mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2025-01-03 07:31:54 +00:00
Netindex: converted to spinlocked hash
This commit is contained in:
parent
1032bf2543
commit
fece55b810
119
lib/netindex.c
119
lib/netindex.c
@ -21,39 +21,38 @@ struct netindex netindex_in_progress;
|
|||||||
#define NETINDEX_REHASH netindex_rehash
|
#define NETINDEX_REHASH netindex_rehash
|
||||||
#define NETINDEX_PARAMS /8, *1, 2, 2, 4, 28
|
#define NETINDEX_PARAMS /8, *1, 2, 2, 4, 28
|
||||||
|
|
||||||
HASH_DEFINE_REHASH_FN(NETINDEX, struct netindex);
|
static void NETINDEX_REHASH(void *_v) {
|
||||||
|
netindex_spinhash *v = _v;
|
||||||
|
int step;
|
||||||
|
{
|
||||||
|
NH_LOCK(SKIP_BACK(netindex_hash, hash, v), _);
|
||||||
|
SPINHASH_REHASH_PREPARE(v,NETINDEX,struct netindex,step);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!step) return;
|
||||||
|
|
||||||
|
if (step > 0) SPINHASH_REHASH_UP(v,NETINDEX,struct netindex,step);
|
||||||
|
if (step < 0) SPINHASH_REHASH_DOWN(v,NETINDEX,struct netindex,-step);
|
||||||
|
|
||||||
|
{
|
||||||
|
NH_LOCK(SKIP_BACK(netindex_hash, hash, v), _);
|
||||||
|
SPINHASH_REHASH_FINISH(v,NETINDEX);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void netindex_hash_cleanup(void *netindex_hash);
|
static void netindex_hash_cleanup(void *netindex_hash);
|
||||||
|
|
||||||
static struct netindex *
|
static struct netindex *
|
||||||
net_lock_revive_unlock(struct netindex_hash_private *hp, struct netindex *i)
|
net_lock_revive_unlock(netindex_hash *h, struct netindex *i)
|
||||||
{
|
{
|
||||||
if (!i)
|
if (!i)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
lfuc_lock_revive(&i->uc);
|
lfuc_lock_revive(&i->uc);
|
||||||
lfuc_unlock(&i->uc, hp->cleanup_list, &hp->cleanup_event);
|
lfuc_unlock(&i->uc, h->cleanup_list, &h->cleanup_event);
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
netindex_hash_consistency_check(struct netindex_hash_private *nh)
|
|
||||||
{
|
|
||||||
uint count = 0;
|
|
||||||
struct netindex * _Atomic *block = atomic_load_explicit(&nh->block, memory_order_relaxed);
|
|
||||||
u32 block_size = atomic_load_explicit(&nh->block_size, memory_order_relaxed);
|
|
||||||
HASH_WALK(nh->hash, next, i)
|
|
||||||
{
|
|
||||||
ASSERT_DIE(count < nh->hash.count);
|
|
||||||
ASSERT_DIE(i->index < block_size);
|
|
||||||
ASSERT_DIE(atomic_load_explicit(&block[i->index], memory_order_relaxed) == i);
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
HASH_WALK_END;
|
|
||||||
|
|
||||||
ASSERT_DIE(count == nh->hash.count);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Index initialization
|
* Index initialization
|
||||||
*/
|
*/
|
||||||
@ -72,7 +71,7 @@ netindex_hash_new(pool *sp, event_list *cleanup_target, u8 type)
|
|||||||
|
|
||||||
nh->slab = net_addr_length[type] ? sl_new(nh->pool, sizeof (struct netindex) + net_addr_length[type]) : NULL;
|
nh->slab = net_addr_length[type] ? sl_new(nh->pool, sizeof (struct netindex) + net_addr_length[type]) : NULL;
|
||||||
|
|
||||||
HASH_INIT(nh->hash, nh->pool, NETINDEX_ORDER);
|
SPINHASH_INIT(nh->hash, NETINDEX, nh->pool, cleanup_target);
|
||||||
atomic_store_explicit(&nh->block_size, NETINDEX_INIT_BLOCK_SIZE, memory_order_release);
|
atomic_store_explicit(&nh->block_size, NETINDEX_INIT_BLOCK_SIZE, memory_order_release);
|
||||||
atomic_store_explicit(&nh->block,
|
atomic_store_explicit(&nh->block,
|
||||||
mb_allocz(nh->pool, NETINDEX_INIT_BLOCK_SIZE * sizeof *nh->block),
|
mb_allocz(nh->pool, NETINDEX_INIT_BLOCK_SIZE * sizeof *nh->block),
|
||||||
@ -95,8 +94,6 @@ netindex_hash_cleanup(void *_nh)
|
|||||||
DOMAIN(attrs) dom = nh->lock;
|
DOMAIN(attrs) dom = nh->lock;
|
||||||
LOCK_DOMAIN(attrs, dom);
|
LOCK_DOMAIN(attrs, dom);
|
||||||
|
|
||||||
EXPENSIVE_CHECK(netindex_hash_consistency_check(nh));
|
|
||||||
|
|
||||||
uint kept = 0;
|
uint kept = 0;
|
||||||
|
|
||||||
uint bs = atomic_load_explicit(&nh->block_size, memory_order_relaxed);
|
uint bs = atomic_load_explicit(&nh->block_size, memory_order_relaxed);
|
||||||
@ -147,7 +144,7 @@ netindex_hash_cleanup(void *_nh)
|
|||||||
ASSERT_DIE(&netindex_in_progress == atomic_exchange_explicit(&block[i], NULL, memory_order_acq_rel));
|
ASSERT_DIE(&netindex_in_progress == atomic_exchange_explicit(&block[i], NULL, memory_order_acq_rel));
|
||||||
|
|
||||||
/* And free it from other structures */
|
/* And free it from other structures */
|
||||||
HASH_REMOVE2(nh->hash, NETINDEX, nh->pool, ni);
|
SPINHASH_REMOVE(nh->hash, NETINDEX, ni);
|
||||||
hmap_clear(&nh->id_map, ni->index);
|
hmap_clear(&nh->id_map, ni->index);
|
||||||
|
|
||||||
if (nh->slab)
|
if (nh->slab)
|
||||||
@ -156,8 +153,6 @@ netindex_hash_cleanup(void *_nh)
|
|||||||
mb_free(ni);
|
mb_free(ni);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPENSIVE_CHECK(netindex_hash_consistency_check(nh));
|
|
||||||
|
|
||||||
if (kept || !nh->deleted_event)
|
if (kept || !nh->deleted_event)
|
||||||
{
|
{
|
||||||
UNLOCK_DOMAIN(attrs, dom);
|
UNLOCK_DOMAIN(attrs, dom);
|
||||||
@ -170,11 +165,14 @@ netindex_hash_cleanup(void *_nh)
|
|||||||
event_list *t = nh->deleted_target;
|
event_list *t = nh->deleted_target;
|
||||||
|
|
||||||
/* Check cleanliness */
|
/* Check cleanliness */
|
||||||
HASH_WALK(nh->hash, next, i)
|
SPINHASH_WALK(nh->hash, NETINDEX, i)
|
||||||
bug("Stray netindex in deleted hash");
|
bug("Stray netindex in deleted hash");
|
||||||
HASH_WALK_END;
|
SPINHASH_WALK_END;
|
||||||
|
|
||||||
/* Pool free is enough to drop everything */
|
/* Cleanup the spinhash itself */
|
||||||
|
SPINHASH_FREE(nh->hash);
|
||||||
|
|
||||||
|
/* Pool free is enough to drop everything else */
|
||||||
rp_free(nh->pool);
|
rp_free(nh->pool);
|
||||||
|
|
||||||
/* And only the lock remains */
|
/* And only the lock remains */
|
||||||
@ -190,8 +188,6 @@ netindex_hash_delete(netindex_hash *h, event *e, event_list *t)
|
|||||||
{
|
{
|
||||||
NH_LOCK(h, hp);
|
NH_LOCK(h, hp);
|
||||||
|
|
||||||
EXPENSIVE_CHECK(netindex_hash_consistency_check(nh));
|
|
||||||
|
|
||||||
hp->deleted_event = e;
|
hp->deleted_event = e;
|
||||||
hp->deleted_target = t;
|
hp->deleted_target = t;
|
||||||
|
|
||||||
@ -201,15 +197,28 @@ netindex_hash_delete(netindex_hash *h, event *e, event_list *t)
|
|||||||
/*
|
/*
|
||||||
* Private index manipulation
|
* Private index manipulation
|
||||||
*/
|
*/
|
||||||
struct netindex *
|
static struct netindex *
|
||||||
net_find_index_fragile(struct netindex_hash_private *hp, const net_addr *n)
|
net_find_index_fragile(netindex_hash *nh, const net_addr *n)
|
||||||
{
|
{
|
||||||
ASSERT_DIE(n->type == hp->net_type);
|
ASSERT_DIE(n->type == nh->net_type);
|
||||||
|
|
||||||
EXPENSIVE_CHECK(netindex_hash_consistency_check(hp));
|
|
||||||
|
|
||||||
u32 h = net_hash(n);
|
u32 h = net_hash(n);
|
||||||
return HASH_FIND(hp->hash, NETINDEX, h, n);
|
return SPINHASH_FIND(nh->hash, NETINDEX, h, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static _Bool
|
||||||
|
net_validate_index(netindex_hash *h, struct netindex *ni)
|
||||||
|
{
|
||||||
|
struct netindex * _Atomic *block = atomic_load_explicit(&h->block, memory_order_relaxed);
|
||||||
|
u32 bs = atomic_load_explicit(&h->block_size, memory_order_relaxed);
|
||||||
|
|
||||||
|
ASSERT_DIE(ni->index < bs);
|
||||||
|
struct netindex *bni = atomic_load_explicit(&block[ni->index], memory_order_acquire);
|
||||||
|
if (bni == ni)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
ASSERT_DIE(bni == &netindex_in_progress);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct netindex *
|
static struct netindex *
|
||||||
@ -230,7 +239,7 @@ net_new_index_locked(struct netindex_hash_private *hp, const net_addr *n)
|
|||||||
};
|
};
|
||||||
net_copy(ni->addr, n);
|
net_copy(ni->addr, n);
|
||||||
|
|
||||||
HASH_INSERT2(hp->hash, NETINDEX, hp->pool, ni);
|
SPINHASH_INSERT(hp->hash, NETINDEX, ni);
|
||||||
|
|
||||||
struct netindex * _Atomic *block = atomic_load_explicit(&hp->block, memory_order_relaxed);
|
struct netindex * _Atomic *block = atomic_load_explicit(&hp->block, memory_order_relaxed);
|
||||||
u32 bs = atomic_load_explicit(&hp->block_size, memory_order_relaxed);
|
u32 bs = atomic_load_explicit(&hp->block_size, memory_order_relaxed);
|
||||||
@ -257,7 +266,7 @@ net_new_index_locked(struct netindex_hash_private *hp, const net_addr *n)
|
|||||||
ASSERT_DIE(i < nbs);
|
ASSERT_DIE(i < nbs);
|
||||||
atomic_store_explicit(&block[i], ni, memory_order_release);
|
atomic_store_explicit(&block[i], ni, memory_order_release);
|
||||||
|
|
||||||
return net_lock_revive_unlock(hp, ni);
|
return ni;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -280,26 +289,23 @@ void net_unlock_index(netindex_hash *h, struct netindex *i)
|
|||||||
struct netindex *
|
struct netindex *
|
||||||
net_find_index(netindex_hash *h, const net_addr *n)
|
net_find_index(netindex_hash *h, const net_addr *n)
|
||||||
{
|
{
|
||||||
NH_LOCK(h, hp);
|
RCU_ANCHOR(u);
|
||||||
struct netindex *ni = net_find_index_fragile(hp, n);
|
struct netindex *ni = net_find_index_fragile(h, n);
|
||||||
return (ni == &netindex_in_progress) ? NULL : net_lock_revive_unlock(hp, ni);
|
return (ni && net_validate_index(h, ni)) ? net_lock_revive_unlock(h, ni) : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct netindex *
|
struct netindex *
|
||||||
net_get_index(netindex_hash *h, const net_addr *n)
|
net_get_index(netindex_hash *h, const net_addr *n)
|
||||||
{
|
{
|
||||||
while (1)
|
struct netindex *ni = net_find_index(h, n);
|
||||||
{
|
if (ni) return ni;
|
||||||
NH_LOCK(h, hp);
|
|
||||||
struct netindex *ni = net_find_index_fragile(hp, n);
|
|
||||||
if (ni == &netindex_in_progress)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (ni)
|
NH_LOCK(h, hp);
|
||||||
return net_lock_revive_unlock(hp, ni);
|
|
||||||
else
|
/* Somebody may have added one inbetween */
|
||||||
return net_new_index_locked(hp, n);
|
return net_lock_revive_unlock(h,
|
||||||
}
|
(net_find_index_fragile(h, n) ?:
|
||||||
|
net_new_index_locked(hp, n)));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct netindex *
|
struct netindex *
|
||||||
@ -320,8 +326,5 @@ net_resolve_index(netindex_hash *h, u32 i)
|
|||||||
if (ni == &netindex_in_progress)
|
if (ni == &netindex_in_progress)
|
||||||
RCU_RETRY(u);
|
RCU_RETRY(u);
|
||||||
|
|
||||||
lfuc_lock_revive(&ni->uc);
|
return net_lock_revive_unlock(h, ni);
|
||||||
net_unlock_index(h, ni);
|
|
||||||
|
|
||||||
return ni;
|
|
||||||
}
|
}
|
||||||
|
@ -11,20 +11,22 @@
|
|||||||
|
|
||||||
#include "lib/netindex.h"
|
#include "lib/netindex.h"
|
||||||
|
|
||||||
|
typedef SPINHASH(struct netindex) netindex_spinhash;
|
||||||
|
|
||||||
#define NETINDEX_HASH_PUBLIC \
|
#define NETINDEX_HASH_PUBLIC \
|
||||||
DOMAIN(attrs) lock; /* Assigned lock */ \
|
DOMAIN(attrs) lock; /* Assigned lock */ \
|
||||||
event_list *cleanup_list; /* Cleanup event list */ \
|
event_list *cleanup_list; /* Cleanup event list */ \
|
||||||
event cleanup_event; /* Cleanup event */ \
|
event cleanup_event; /* Cleanup event */ \
|
||||||
u8 net_type; /* Which NET_* is stored */ \
|
u8 net_type; /* Which NET_* is stored */ \
|
||||||
uint _Atomic block_size; /* How big block is */ \
|
uint _Atomic block_size; /* How big block is */ \
|
||||||
struct netindex * _Atomic * _Atomic block; /* u32 to netindex */ \
|
struct netindex * _Atomic * _Atomic block; /* u32 to netindex */ \
|
||||||
|
netindex_spinhash hash; /* Spinlocking hashtable */ \
|
||||||
|
|
||||||
struct netindex_hash_private {
|
struct netindex_hash_private {
|
||||||
struct { NETINDEX_HASH_PUBLIC; };
|
struct { NETINDEX_HASH_PUBLIC; };
|
||||||
struct netindex_hash_private **locked_at;
|
struct netindex_hash_private **locked_at;
|
||||||
pool *pool;
|
pool *pool;
|
||||||
slab *slab;
|
slab *slab;
|
||||||
HASH(struct netindex) hash;
|
|
||||||
struct hmap id_map;
|
struct hmap id_map;
|
||||||
u32 block_epoch;
|
u32 block_epoch;
|
||||||
event *deleted_event;
|
event *deleted_event;
|
||||||
|
Loading…
Reference in New Issue
Block a user