0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-11-10 05:08:42 +00:00

Faster shutdown and cleanups by freeing route attributes strictly from main loop

This commit is contained in:
Maria Matejka 2021-12-01 21:52:55 +00:00
parent 387b279f60
commit b2bac7ae91
3 changed files with 66 additions and 31 deletions

View File

@ -883,14 +883,22 @@ static inline rta *rta_clone(rta *r) {
return r;
}
void rta__free(rta *r);
#define RTA_OBSOLETE_LIMIT 512
extern _Atomic u32 rta_obsolete_count;
extern event rta_cleanup_event;
static inline void rta_free(rta *r) {
if (!r)
return;
u32 uc = atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel);
if (uc == 1)
rta__free(r);
if (uc > 1)
return;
u32 obs = atomic_fetch_add_explicit(&rta_obsolete_count, 1, memory_order_acq_rel);
if (obs == RTA_OBSOLETE_LIMIT)
ev_send(&global_work_list, &rta_cleanup_event);
}
rta *rta_do_cow(rta *o, linpool *lp);

View File

@ -1384,52 +1384,78 @@ rta_lookup(rta *o)
return r;
}
void
rta__free(rta *a)
static void
rta_cleanup(void *data UNUSED)
{
ASSERT(a->cached);
u32 count = 0;
rta *ax[RTA_OBSOLETE_LIMIT];
RTA_LOCK;
struct rta_cache *c = atomic_load_explicit(&rta_cache, memory_order_acquire);
if (atomic_load_explicit(&a->uc, memory_order_acquire))
{
/* Acquired inbetween */
RTA_UNLOCK;
return;
}
for(u32 h=0; h<c->size; h++)
for(rta *a = atomic_load_explicit(&c->table[h], memory_order_acquire), *next;
a;
a = next)
{
next = atomic_load_explicit(&a->next, memory_order_acquire);
if (atomic_load_explicit(&a->uc, memory_order_acquire) > 0)
continue;
/* Relink the forward pointer */
rta *next = atomic_load_explicit(&a->next, memory_order_acquire);
atomic_store_explicit(a->pprev, next, memory_order_release);
/* Check if the cleanup fits in the buffer */
if (count == RTA_OBSOLETE_LIMIT)
{
ev_send(&global_work_list, &rta_cleanup_event);
goto wait;
}
/* Relink the backwards pointer */
if (next)
next->pprev = a->pprev;
/* Relink the forward pointer */
atomic_store_explicit(a->pprev, next, memory_order_release);
/* Relink the backwards pointer */
if (next)
next->pprev = a->pprev;
/* Store for freeing and go to the next */
ax[count++] = a;
a = next;
}
wait:
/* Wait until nobody knows about us */
synchronize_rcu();
if (atomic_load_explicit(&a->uc, memory_order_acquire))
u32 freed = 0;
for (u32 i=0; i<count; i++)
{
rta *a = ax[i];
/* Acquired inbetween, relink back */
rta_insert(a, c);
RTA_UNLOCK;
return;
if (atomic_load_explicit(&a->uc, memory_order_acquire))
{
rta_insert(a, c);
continue;
}
/* Cleared to free the memory */
rt_unlock_hostentry(a->hostentry);
if (a->nh.next)
nexthop_free(a->nh.next);
ea_free(a->eattrs);
a->cached = 0;
c->count--;
sl_free(rta_slab(a), a);
freed++;
}
/* Cleared to free the memory */
rt_unlock_hostentry(a->hostentry);
if (a->nh.next)
nexthop_free(a->nh.next);
ea_free(a->eattrs);
a->cached = 0;
c->count--;
sl_free(rta_slab(a), a);
atomic_fetch_sub_explicit(&rta_obsolete_count, freed, memory_order_release);
RTA_UNLOCK;
}
_Atomic u32 rta_obsolete_count;
event rta_cleanup_event = { .hook = rta_cleanup, .list = &global_work_list };
rta *
rta_do_cow(rta *o, linpool *lp)
{

View File

@ -201,5 +201,6 @@ struct coroutine *coro_run(pool *p, void (*entry)(void *), void *data)
void
coro_yield(void)
{
usleep(100);
const struct timespec req = { .tv_nsec = 100 };
nanosleep(&req, NULL);
}