0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-09-18 19:35:20 +00:00

Read-write spinlocks

This commit is contained in:
Maria Matejka 2024-06-05 13:12:12 +02:00
parent 8f4a784e14
commit 4104d668d9
7 changed files with 248 additions and 5 deletions

View File

@ -291,4 +291,9 @@ static inline u64 u64_hash0(u64 v, u32 p, u64 acc)
static inline u32 u64_hash(u64 v)
{ return hash_value(u64_hash0(v, HASH_PARAM, 0)); }
/* Yield for a little while. Use only in special cases. */
void birdloop_yield(void);
#endif

View File

@ -72,7 +72,4 @@ void birdloop_remove_socket(struct birdloop *, struct birdsock *);
void birdloop_init(void);
/* Yield for a little while. Use only in special cases. */
void birdloop_yield(void);
#endif /* _BIRD_IO_LOOP_H_ */

View File

@ -9,6 +9,7 @@
#ifndef _BIRD_LOCKING_H_
#define _BIRD_LOCKING_H_
#include "lib/birdlib.h"
#include "lib/macro.h"
#include "lib/rcu.h"
@ -83,7 +84,150 @@ extern DOMAIN(the_bird) the_bird_domain;
#define ASSERT_THE_BIRD_LOCKED ({ if (!the_bird_locked()) bug("The BIRD lock must be locked here: %s:%d", __FILE__, __LINE__); })
/* Unwind stored lock state helpers */
/*
* RW spinlocks
*/
#define RWS_READ_PENDING_POS 0
#define RWS_READ_ACTIVE_POS 20
#define RWS_WRITE_PENDING_POS 40
#define RWS_WRITE_ACTIVE_POS 56
#define RWS_READ_PENDING (1ULL << RWS_READ_PENDING_POS)
#define RWS_READ_ACTIVE (1ULL << RWS_READ_ACTIVE_POS)
#define RWS_WRITE_PENDING (1ULL << RWS_WRITE_PENDING_POS)
#define RWS_WRITE_ACTIVE (1ULL << RWS_WRITE_ACTIVE_POS)
#define RWS_READ_PENDING_MASK (RWS_READ_ACTIVE - 1)
#define RWS_READ_ACTIVE_MASK ((RWS_WRITE_PENDING - 1) & ~(RWS_READ_ACTIVE - 1))
#define RWS_WRITE_PENDING_MASK ((RWS_WRITE_ACTIVE - 1) & ~(RWS_WRITE_PENDING - 1))
#define RWS_WRITE_ACTIVE_MASK (~(RWS_WRITE_ACTIVE - 1))
typedef struct {
u64 _Atomic spin;
} rw_spinlock;
#define MAX_RWS_AT_ONCE 32
extern _Thread_local rw_spinlock *rw_spinlocks_taken[MAX_RWS_AT_ONCE];
extern _Thread_local btime rw_spinlocks_time[MAX_RWS_AT_ONCE];
extern _Thread_local u32 rw_spinlocks_taken_cnt;
extern _Thread_local u32 rw_spinlocks_taken_write;
/* Borrowed from lib/timer.h */
btime current_time_now(void);
#ifdef DEBUGGING
static inline void rws_mark(rw_spinlock *p, _Bool write, _Bool lock)
{
if (lock) {
ASSERT_DIE(rw_spinlocks_taken_cnt < MAX_RWS_AT_ONCE);
if (write)
rw_spinlocks_taken_write |= (1 << rw_spinlocks_taken_cnt);
else
rw_spinlocks_taken_write &= ~(1 << rw_spinlocks_taken_cnt);
rw_spinlocks_time[rw_spinlocks_taken_cnt] = current_time_now();
rw_spinlocks_taken[rw_spinlocks_taken_cnt++] = p;
}
else {
ASSERT_DIE(rw_spinlocks_taken_cnt > 0);
ASSERT_DIE(rw_spinlocks_taken[--rw_spinlocks_taken_cnt] == p);
ASSERT_DIE(!(rw_spinlocks_taken_write & (1 << rw_spinlocks_taken_cnt)) == !write);
btime tdif = current_time_now() - rw_spinlocks_time[rw_spinlocks_taken_cnt];
if (tdif > 1 S_)
log(L_WARN "Spent an alarming time %t s in spinlock %p (%s); "
"if this happens often to you, please contact the developers.",
tdif, p, write ? "write" : "read");
}
}
#else
#define rws_mark(...)
#endif
static inline void rws_init(rw_spinlock *p)
{
atomic_store_explicit(&p->spin, 0, memory_order_relaxed);
}
static inline void rws_read_lock(rw_spinlock *p)
{
u64 old = atomic_fetch_add_explicit(&p->spin, RWS_READ_PENDING, memory_order_acquire);
while (1)
{
/* Wait until all writers end */
while (old & (RWS_WRITE_PENDING_MASK | RWS_WRITE_ACTIVE_MASK))
{
birdloop_yield();
old = atomic_load_explicit(&p->spin, memory_order_acquire);
}
/* Convert to active */
old = atomic_fetch_add_explicit(&p->spin, RWS_READ_ACTIVE - RWS_READ_PENDING, memory_order_acq_rel);
if (old & RWS_WRITE_ACTIVE_MASK)
/* Oh but some writer was faster */
old = atomic_fetch_sub_explicit(&p->spin, RWS_READ_ACTIVE - RWS_READ_PENDING, memory_order_acq_rel);
else
/* No writers, approved */
break;
}
rws_mark(p, 0, 1);
}
static inline void rws_read_unlock(rw_spinlock *p)
{
rws_mark(p, 0, 0);
u64 old = atomic_fetch_sub_explicit(&p->spin, RWS_READ_ACTIVE, memory_order_release);
ASSERT_DIE(old & RWS_READ_ACTIVE_MASK);
}
static inline void rws_write_lock(rw_spinlock *p)
{
u64 old = atomic_fetch_add_explicit(&p->spin, RWS_WRITE_PENDING, memory_order_acquire);
/* Wait until all active readers end */
while (1)
{
while (old & (RWS_READ_ACTIVE_MASK | RWS_WRITE_ACTIVE_MASK))
{
birdloop_yield();
old = atomic_load_explicit(&p->spin, memory_order_acquire);
}
/* Mark self as active */
u64 updated = atomic_fetch_or_explicit(&p->spin, RWS_WRITE_ACTIVE, memory_order_acquire);
/* And it's us */
if (!(updated & RWS_WRITE_ACTIVE))
{
if (updated & RWS_READ_ACTIVE_MASK)
/* But some reader was faster */
atomic_fetch_and_explicit(&p->spin, ~RWS_WRITE_ACTIVE, memory_order_release);
else
/* No readers, approved */
break;
}
}
/* It's us, then we aren't actually pending */
u64 updated = atomic_fetch_sub_explicit(&p->spin, RWS_WRITE_PENDING, memory_order_acquire);
ASSERT_DIE(updated & RWS_WRITE_PENDING_MASK);
rws_mark(p, 1, 1);
}
static inline void rws_write_unlock(rw_spinlock *p)
{
rws_mark(p, 1, 0);
u64 old = atomic_fetch_and_explicit(&p->spin, ~RWS_WRITE_ACTIVE, memory_order_release);
ASSERT_DIE(old & RWS_WRITE_ACTIVE);
}
/*
* Unwind stored lock state helpers
*/
struct locking_unwind_status {
struct lock_order *desired;
enum {

View File

@ -77,6 +77,96 @@ t_locking(void)
return 1;
}
#define RWS_DATASIZE 333
#define RWS_THREADS 128
struct rws_test_data {
int data[RWS_DATASIZE];
rw_spinlock rws[RWS_DATASIZE];
};
static void *
rwspin_thread_run(void *_rtd)
{
struct rws_test_data *d = _rtd;
for (_Bool sorted = 0; !sorted++; )
{
for (int i=0; (i<RWS_DATASIZE-1) && sorted; i++)
{
rws_read_lock(&d->rws[i]);
rws_read_lock(&d->rws[i+1]);
ASSERT_DIE(d->data[i] >= 0);
ASSERT_DIE(d->data[i+1] >= 0);
if (d->data[i] > d->data[i+1])
sorted = 0;
rws_read_unlock(&d->rws[i+1]);
rws_read_unlock(&d->rws[i]);
}
for (int i=0; (i<RWS_DATASIZE-1); i++)
{
rws_write_lock(&d->rws[i]);
rws_write_lock(&d->rws[i+1]);
int first = d->data[i];
int second = d->data[i+1];
ASSERT_DIE(first >= 0);
ASSERT_DIE(second >= 0);
d->data[i] = d->data[i+1] = -1;
if (first > second)
{
d->data[i] = second;
d->data[i+1] = first;
}
else
{
d->data[i] = first;
d->data[i+1] = second;
}
rws_write_unlock(&d->rws[i+1]);
rws_write_unlock(&d->rws[i]);
}
}
return NULL;
}
static int
t_rwspin(void)
{
struct rws_test_data d;
/* Setup an array to sort */
for (int i=0; i<RWS_DATASIZE; i++)
d.data[i] = RWS_DATASIZE-i-1;
/* Spinlock for every place */
for (int i=0; i<RWS_DATASIZE; i++)
rws_init(&d.rws[i]);
/* Start the threads */
pthread_t thr[RWS_THREADS];
for (int i=0; i<RWS_THREADS; i++)
bt_assert(pthread_create(&thr[i], NULL, rwspin_thread_run, &d) == 0);
/* Wait for the threads */
for (int i=0; i<RWS_THREADS; i++)
bt_assert(pthread_join(thr[i], NULL) == 0);
for (int i=0; i<RWS_DATASIZE; i++)
bt_assert(d.data[i] == i);
return 1;
}
int
main(int argc, char **argv)
{
@ -84,6 +174,7 @@ main(int argc, char **argv)
bt_bird_init();
bt_test_suite(t_locking, "Testing locks");
bt_test_suite(t_rwspin, "Testing rw spinlock");
return bt_exit_value();
}

View File

@ -55,7 +55,7 @@ static inline void rcu_read_lock(void)
static inline void rcu_read_unlock(void)
{
/* Just decrement the nesting counter; when unlocked, nobody cares */
atomic_fetch_sub(&this_rcu_thread.ctl, RCU_NEST_CNT);
atomic_fetch_sub_explicit(&this_rcu_thread.ctl, RCU_NEST_CNT, memory_order_acq_rel);
}
static inline _Bool rcu_read_active(void)

View File

@ -10,6 +10,7 @@
#define _BIRD_BIRD_H_
#include "lib/birdlib.h"
#include "lib/locking.h"
#include "lib/ip.h"
#include "lib/net.h"

View File

@ -35,6 +35,11 @@
* Locking subsystem
*/
_Thread_local rw_spinlock *rw_spinlocks_taken[MAX_RWS_AT_ONCE];
_Thread_local btime rw_spinlocks_time[MAX_RWS_AT_ONCE];
_Thread_local u32 rw_spinlocks_taken_cnt;
_Thread_local u32 rw_spinlocks_taken_write;
_Thread_local struct lock_order locking_stack = {};
_Thread_local struct domain_generic **last_locked = NULL;