0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

Revert (partial) "Route attributes now use the common lockfree usecount"

This partially reverts commit d617801c31.

The common lockfree doesn't work well for high-volume structures like
eattr cache because it expects the structure to be cleaned up by a
sweeper routine ... which is very ineffective for >1M records.

OTOH, we need the deferred ea_free in all cases ... so keeping that.
This commit is contained in:
Maria Matejka 2024-06-11 10:48:21 +02:00
parent 8e240fc44f
commit a146abc8fd
2 changed files with 60 additions and 49 deletions

View File

@ -253,10 +253,8 @@ enum ea_stored {
struct ea_storage {
struct ea_storage *next_hash; /* Next in hash chain */
struct lfuc uc; /* Use count */
_Atomic u32 uc; /* Use count */
u32 hash_key; /* List hash */
/* 32 bits unused but we don't wanna squeeze the use count
* into 32 bits to pack with the list hash, sorry */
ea_list l[0]; /* The list itself */
};
@ -548,7 +546,7 @@ static inline struct ea_storage *ea_get_storage(ea_list *r)
static inline ea_list *ea_ref(ea_list *r)
{
lfuc_lock(&ea_get_storage(r)->uc);
ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel));
return r;
}
@ -561,25 +559,38 @@ static inline ea_list *ea_lookup(ea_list *r, u32 squash_upto, enum ea_stored oid
return ea_lookup_slow(r, squash_upto, oid);
}
#define ea_free_later ea_free
extern event ea_cleanup_event;
static inline void ea_free(ea_list *l)
struct ea_free_deferred {
struct deferred_call dc;
ea_list *attrs;
};
void ea_free_deferred(struct deferred_call *dc);
static inline ea_list *ea_free_later(ea_list *r)
{
if (l)
lfuc_unlock(&ea_get_storage(l)->uc, &global_work_list, &ea_cleanup_event);
if (!r)
return NULL;
struct ea_free_deferred efd = {
.dc.hook = ea_free_deferred,
.attrs = r,
};
defer_call(&efd.dc, sizeof efd);
return r;
}
#define ea_free ea_free_later
static inline ea_list *ea_lookup_tmp(ea_list *r, u32 squash_upto, enum ea_stored oid)
{
ea_free_later(r = ea_lookup(r, squash_upto, oid));
return r;
return ea_free_later(ea_lookup(r, squash_upto, oid));
}
static inline ea_list *ea_ref_tmp(ea_list *r)
{
ASSERT_DIE(r->stored);
ea_free_later(ea_ref(r));
return r;
return ea_free_later(ea_ref(r));
}
static inline ea_list *ea_strip_to(ea_list *r, u32 strip_to)

View File

@ -1227,6 +1227,8 @@ ea_list_ref(ea_list *l)
ea_ref(l->next);
}
static void ea_free_nested(ea_list *l);
static void
ea_list_unref(ea_list *l)
{
@ -1247,7 +1249,7 @@ ea_list_unref(ea_list *l)
}
if (l->next)
lfuc_unlock(&ea_get_storage(l->next)->uc, &global_work_list, &ea_cleanup_event);
ea_free_nested(l->next);
}
void
@ -1521,7 +1523,7 @@ ea_dump(ea_list *e)
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
e->stored,
s ? atomic_load_explicit(&s->uc.uc, memory_order_relaxed) : 0,
s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
s ? s->hash_key : 0);
for(i=0; i<e->count; i++)
{
@ -1621,8 +1623,6 @@ ea_append(ea_list *to, ea_list *what)
* rta's
*/
event ea_cleanup_event;
static SPINHASH(struct ea_storage) rta_hash_table;
#define RTAH_KEY(a) a->l, a->hash_key
@ -1665,7 +1665,7 @@ ea_lookup_existing(ea_list *o, u32 squash_upto, uint h)
(!r || r->l->stored > ea->l->stored))
r = ea;
if (r)
lfuc_lock_revive(&r->uc);
atomic_fetch_add_explicit(&r->uc, 1, memory_order_acq_rel);
SPINHASH_END_CHAIN(read);
return r ? r->l : NULL;
@ -1727,9 +1727,7 @@ ea_lookup_slow(ea_list *o, u32 squash_upto, enum ea_stored oid)
r->l->flags |= huge;
r->l->stored = oid;
r->hash_key = h;
memset(&r->uc, 0, sizeof r->uc);
lfuc_lock_revive(&r->uc);
atomic_store_explicit(&r->uc, 1, memory_order_release);
SPINHASH_INSERT(rta_hash_table, RTAH, r);
@ -1738,32 +1736,38 @@ ea_lookup_slow(ea_list *o, u32 squash_upto, enum ea_stored oid)
}
static void
ea_cleanup(void *_ UNUSED)
ea_free_locked(struct ea_storage *a)
{
RTA_LOCK;
/* Somebody has cloned this rta inbetween. This sometimes happens. */
if (atomic_load_explicit(&a->uc, memory_order_acquire))
return;
_Bool run_again = 0;
SPINHASH_REMOVE(rta_hash_table, RTAH, a);
SPINHASH_WALK_FILTER(rta_hash_table, RTAH, write, rp)
{
struct ea_storage *r = *rp;
if (lfuc_finished(&r->uc))
{
run_again = 1;
SPINHASH_DO_REMOVE(rta_hash_table, RTAH, rp);
ea_list_unref(r->l);
if (r->l->flags & EALF_HUGE)
mb_free(r);
ea_list_unref(a->l);
if (a->l->flags & EALF_HUGE)
mb_free(a);
else
sl_free(r);
}
}
SPINHASH_WALK_FILTER_END(write);
sl_free(a);
}
if (run_again)
ev_send(&global_work_list, &ea_cleanup_event);
static void
ea_free_nested(struct ea_list *l)
{
struct ea_storage *r = ea_get_storage(l);
if (1 == atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel))
ea_free_locked(r);
}
void
ea_free_deferred(struct deferred_call *dc)
{
struct ea_storage *r = ea_get_storage(SKIP_BACK(struct ea_free_deferred, dc, dc)->attrs);
if (1 != atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel))
return;
RTA_LOCK;
ea_free_locked(r);
RTA_UNLOCK;
}
@ -1820,10 +1824,6 @@ rta_init(void)
rte_src_init();
ea_class_init();
ea_cleanup_event = (event) {
.hook = ea_cleanup,
};
RTA_UNLOCK;
/* These attributes are required to be first for nice "show route" output */