0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-11-13 22:58:42 +00:00

Faster shutdown and cleanups by freeing route attributes strictly from main loop

This commit is contained in:
Maria Matejka 2021-12-01 21:52:55 +00:00
parent 387b279f60
commit b2bac7ae91
3 changed files with 66 additions and 31 deletions

View File

@ -883,14 +883,22 @@ static inline rta *rta_clone(rta *r) {
return r; return r;
} }
void rta__free(rta *r); #define RTA_OBSOLETE_LIMIT 512
extern _Atomic u32 rta_obsolete_count;
extern event rta_cleanup_event;
static inline void rta_free(rta *r) { static inline void rta_free(rta *r) {
if (!r) if (!r)
return; return;
u32 uc = atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel); u32 uc = atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel);
if (uc == 1) if (uc > 1)
rta__free(r); return;
u32 obs = atomic_fetch_add_explicit(&rta_obsolete_count, 1, memory_order_acq_rel);
if (obs == RTA_OBSOLETE_LIMIT)
ev_send(&global_work_list, &rta_cleanup_event);
} }
rta *rta_do_cow(rta *o, linpool *lp); rta *rta_do_cow(rta *o, linpool *lp);

View File

@ -1384,38 +1384,57 @@ rta_lookup(rta *o)
return r; return r;
} }
void static void
rta__free(rta *a) rta_cleanup(void *data UNUSED)
{ {
ASSERT(a->cached); u32 count = 0;
rta *ax[RTA_OBSOLETE_LIMIT];
RTA_LOCK; RTA_LOCK;
struct rta_cache *c = atomic_load_explicit(&rta_cache, memory_order_acquire); struct rta_cache *c = atomic_load_explicit(&rta_cache, memory_order_acquire);
if (atomic_load_explicit(&a->uc, memory_order_acquire)) for(u32 h=0; h<c->size; h++)
for(rta *a = atomic_load_explicit(&c->table[h], memory_order_acquire), *next;
a;
a = next)
{ {
/* Acquired inbetween */ next = atomic_load_explicit(&a->next, memory_order_acquire);
RTA_UNLOCK; if (atomic_load_explicit(&a->uc, memory_order_acquire) > 0)
return; continue;
/* Check if the cleanup fits in the buffer */
if (count == RTA_OBSOLETE_LIMIT)
{
ev_send(&global_work_list, &rta_cleanup_event);
goto wait;
} }
/* Relink the forward pointer */ /* Relink the forward pointer */
rta *next = atomic_load_explicit(&a->next, memory_order_acquire);
atomic_store_explicit(a->pprev, next, memory_order_release); atomic_store_explicit(a->pprev, next, memory_order_release);
/* Relink the backwards pointer */ /* Relink the backwards pointer */
if (next) if (next)
next->pprev = a->pprev; next->pprev = a->pprev;
/* Store for freeing and go to the next */
ax[count++] = a;
a = next;
}
wait:
/* Wait until nobody knows about us */ /* Wait until nobody knows about us */
synchronize_rcu(); synchronize_rcu();
u32 freed = 0;
for (u32 i=0; i<count; i++)
{
rta *a = ax[i];
/* Acquired inbetween, relink back */
if (atomic_load_explicit(&a->uc, memory_order_acquire)) if (atomic_load_explicit(&a->uc, memory_order_acquire))
{ {
/* Acquired inbetween, relink back */
rta_insert(a, c); rta_insert(a, c);
RTA_UNLOCK; continue;
return;
} }
/* Cleared to free the memory */ /* Cleared to free the memory */
@ -1426,10 +1445,17 @@ rta__free(rta *a)
a->cached = 0; a->cached = 0;
c->count--; c->count--;
sl_free(rta_slab(a), a); sl_free(rta_slab(a), a);
freed++;
}
atomic_fetch_sub_explicit(&rta_obsolete_count, freed, memory_order_release);
RTA_UNLOCK; RTA_UNLOCK;
} }
_Atomic u32 rta_obsolete_count;
event rta_cleanup_event = { .hook = rta_cleanup, .list = &global_work_list };
rta * rta *
rta_do_cow(rta *o, linpool *lp) rta_do_cow(rta *o, linpool *lp)
{ {

View File

@ -201,5 +201,6 @@ struct coroutine *coro_run(pool *p, void (*entry)(void *), void *data)
void void
coro_yield(void) coro_yield(void)
{ {
usleep(100); const struct timespec req = { .tv_nsec = 100 };
nanosleep(&req, NULL);
} }