0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2025-01-03 07:31:54 +00:00

Lock free usecount better debuggability

This commit is contained in:
Maria Matejka 2024-01-10 09:10:03 +01:00
parent e711a991d4
commit 9624ebfd9a
2 changed files with 11 additions and 7 deletions

View File

@ -30,12 +30,13 @@ struct lfuc {
* lfuc_lock - increase an atomic usecount
* @c: the usecount structure
*/
static inline void lfuc_lock(struct lfuc *c)
static inline u64 lfuc_lock(struct lfuc *c)
{
/* Locking is trivial; somebody already holds the underlying data structure
* so we just increase the use count. Nothing can be freed underneath our hands. */
u64 uc = atomic_fetch_add_explicit(&c->uc, 1, memory_order_acq_rel);
ASSERT_DIE(uc > 0);
return uc & (LFUC_IN_PROGRESS - 1);
}
/**
@ -47,9 +48,10 @@ static inline void lfuc_lock(struct lfuc *c)
* Handy for situations with flapping routes. Use only from the same
* loop as which runs the prune routine.
*/
static inline void lfuc_lock_revive(struct lfuc *c)
static inline u64 lfuc_lock_revive(struct lfuc *c)
{
UNUSED u64 uc = atomic_fetch_add_explicit(&c->uc, 1, memory_order_acq_rel);
u64 uc = atomic_fetch_add_explicit(&c->uc, 1, memory_order_acq_rel);
return uc & (LFUC_IN_PROGRESS - 1);
}
/**
@ -61,7 +63,7 @@ static inline void lfuc_lock_revive(struct lfuc *c)
* If the usecount reaches zero, a prune event is run to possibly free the object.
* The prune event MUST use lfuc_finished() to check the object state.
*/
static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev)
static inline u64 lfuc_unlock(struct lfuc *c, event_list *el, event *ev)
{
/* Unlocking is tricky. We do it lockless so at the same time, the prune
* event may be running, therefore if the unlock gets us to zero, it must be
@ -102,11 +104,13 @@ static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev)
/* And now, finally, simultaneously pop the in-progress indicator and the
* usecount, possibly allowing the pruning routine to free this structure */
atomic_fetch_sub_explicit(&c->uc, LFUC_IN_PROGRESS + 1, memory_order_acq_rel);
uc = atomic_fetch_sub_explicit(&c->uc, LFUC_IN_PROGRESS + 1, memory_order_acq_rel);
/* ... and to reduce the load a bit, the pruning routine will better wait for
* RCU synchronization instead of a busy loop. */
rcu_read_unlock();
return uc - LFUC_IN_PROGRESS - 1;
}
/**

View File

@ -189,13 +189,13 @@ net_new_index_locked(struct netindex_hash_private *hp, const net_addr *n, pool *
void net_lock_index(netindex_hash *h UNUSED, struct netindex *i)
{
// log(L_TRACE "Lock index %p", i);
return lfuc_lock(&i->uc);
lfuc_lock(&i->uc);
}
void net_unlock_index(netindex_hash *h, struct netindex *i)
{
// log(L_TRACE "Unlock index %p", i);
return lfuc_unlock(&i->uc, h->cleanup_list, &h->cleanup_event);
lfuc_unlock(&i->uc, h->cleanup_list, &h->cleanup_event);
}
struct netindex *