0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

Attributes: fix collision on free-lookup

Freeing the eattrs is tricky as somebody else may find them
via RTA-unlocked lookup inbetween.
This commit is contained in:
Maria Matejka 2024-06-11 13:16:50 +02:00
parent 5c8179d63a
commit 951d0422fa
2 changed files with 49 additions and 29 deletions

View File

@ -253,8 +253,9 @@ enum ea_stored {
struct ea_storage { struct ea_storage {
struct ea_storage *next_hash; /* Next in hash chain */ struct ea_storage *next_hash; /* Next in hash chain */
_Atomic u32 uc; /* Use count */ _Atomic u64 uc; /* Use count */
u32 hash_key; /* List hash */ u32 hash_key; /* List hash */
PADDING(unused, 0, 4); /* Sorry, we need u64 for the usecount */
ea_list l[0]; /* The list itself */ ea_list l[0]; /* The list itself */
}; };

View File

@ -1227,8 +1227,6 @@ ea_list_ref(ea_list *l)
ea_ref(l->next); ea_ref(l->next);
} }
static void ea_free_nested(ea_list *l);
static void static void
ea_list_unref(ea_list *l) ea_list_unref(ea_list *l)
{ {
@ -1249,7 +1247,7 @@ ea_list_unref(ea_list *l)
} }
if (l->next) if (l->next)
ea_free_nested(l->next); ea_free_later(l->next);
} }
void void
@ -1735,42 +1733,63 @@ ea_lookup_slow(ea_list *o, u32 squash_upto, enum ea_stored oid)
return r->l; return r->l;
} }
static void #define EA_UC_SUB (1ULL << 56)
ea_free_locked(struct ea_storage *a) #define EA_UC_IN_PROGRESS(x) ((x) >> 56)
{ #define EA_UC_ASSIGNED(x) ((x) & (EA_UC_SUB-1))
/* Somebody has cloned this rta inbetween. This sometimes happens. */
if (atomic_load_explicit(&a->uc, memory_order_acquire))
return;
SPINHASH_REMOVE(rta_hash_table, RTAH, a); #define EA_UC_DONE(x) (EA_UC_ASSIGNED(x) == EA_UC_IN_PROGRESS(x))
ea_list_unref(a->l);
if (a->l->flags & EALF_HUGE)
mb_free(a);
else
sl_free(a);
}
static void
ea_free_nested(struct ea_list *l)
{
struct ea_storage *r = ea_get_storage(l);
if (1 == atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel))
ea_free_locked(r);
}
void void
ea_free_deferred(struct deferred_call *dc) ea_free_deferred(struct deferred_call *dc)
{ {
struct ea_storage *r = ea_get_storage(SKIP_BACK(struct ea_free_deferred, dc, dc)->attrs); struct ea_storage *r = ea_get_storage(SKIP_BACK(struct ea_free_deferred, dc, dc)->attrs);
if (1 != atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel))
return;
/* Wait until the previous free has ended */
u64 prev;
do prev = atomic_fetch_or_explicit(&r->uc, EA_UC_SUB, memory_order_acq_rel);
while (prev & EA_UC_SUB);
prev |= EA_UC_SUB;
if (!EA_UC_DONE(prev))
{
/* The easy case: somebody else holds the usecount */
ASSERT_DIE(EA_UC_IN_PROGRESS(prev) < EA_UC_ASSIGNED(prev));
atomic_fetch_sub_explicit(&r->uc, EA_UC_SUB + 1, memory_order_acq_rel);
return;
}
/* We are the last one to free this ea */
RTA_LOCK; RTA_LOCK;
ea_free_locked(r);
/* Trying to remove the ea from the hash */
SPINHASH_REMOVE(rta_hash_table, RTAH, r);
/* Somebody has cloned this rta inbetween. This sometimes happens. */
prev = atomic_load_explicit(&r->uc, memory_order_acquire);
if (!EA_UC_DONE(prev))
{
/* Reinsert the ea */
SPINHASH_INSERT(rta_hash_table, RTAH, r);
atomic_fetch_sub_explicit(&r->uc, EA_UC_SUB + 1, memory_order_acq_rel);
RTA_UNLOCK;
return;
}
/* Fine, wait until everybody is actually done */
ASSERT_DIE(atomic_load_explicit(&r->uc, memory_order_acquire) == EA_UC_SUB + 1);
/* And now we can free the object, finally */
ea_list_unref(r->l);
if (r->l->flags & EALF_HUGE)
mb_free(r);
else
sl_free(r);
RTA_UNLOCK; RTA_UNLOCK;
} }
/** /**
* rta_dump_all - dump attribute cache * rta_dump_all - dump attribute cache
* *