0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2025-01-18 15:01:53 +00:00

Netindex is now net-type specific

Combining all network types in one netindex was just a waste of memory
and too much complexity for no reason.
This commit is contained in:
Maria Matejka 2024-06-05 17:47:32 +02:00
parent 6ffab25f01
commit 8f4a784e14
6 changed files with 73 additions and 97 deletions

View File

@ -36,29 +36,23 @@ net_lock_revive_unlock(struct netindex_hash_private *hp, struct netindex *i)
void void
netindex_hash_consistency_check(struct netindex_hash_private *nh) netindex_hash_consistency_check(struct netindex_hash_private *nh)
{ {
for (uint t = 0; t < NET_MAX; t++)
{
if (!nh->net[t].hash.data)
continue;
uint count = 0; uint count = 0;
HASH_WALK(nh->net[t].hash, next, i) HASH_WALK(nh->hash, next, i)
{ {
ASSERT_DIE(count < nh->net[t].hash.count); ASSERT_DIE(count < nh->hash.count);
ASSERT_DIE(nh->net[t].block[i->index] == i); ASSERT_DIE(nh->block[i->index] == i);
count++; count++;
} }
HASH_WALK_END; HASH_WALK_END;
ASSERT_DIE(count == nh->net[t].hash.count); ASSERT_DIE(count == nh->hash.count);
}
} }
/* /*
* Index initialization * Index initialization
*/ */
netindex_hash * netindex_hash *
netindex_hash_new(pool *sp, event_list *cleanup_target) netindex_hash_new(pool *sp, event_list *cleanup_target, u8 type)
{ {
DOMAIN(attrs) dom = DOMAIN_NEW(attrs); DOMAIN(attrs) dom = DOMAIN_NEW(attrs);
LOCK_DOMAIN(attrs, dom); LOCK_DOMAIN(attrs, dom);
@ -68,6 +62,15 @@ netindex_hash_new(pool *sp, event_list *cleanup_target)
struct netindex_hash_private *nh = mb_allocz(p, sizeof *nh); struct netindex_hash_private *nh = mb_allocz(p, sizeof *nh);
nh->lock = dom; nh->lock = dom;
nh->pool = p; nh->pool = p;
nh->net_type = type;
nh->slab = net_addr_length[type] ? sl_new(nh->pool, sizeof (struct netindex) + net_addr_length[type]) : NULL;
HASH_INIT(nh->hash, nh->pool, NETINDEX_ORDER);
nh->block_size = 128;
nh->block = mb_allocz(nh->pool, nh->block_size * sizeof (struct netindex *));
hmap_init(&nh->id_map, nh->pool, 128);
nh->cleanup_list = cleanup_target; nh->cleanup_list = cleanup_target;
nh->cleanup_event = (event) { .hook = netindex_hash_cleanup, nh }; nh->cleanup_event = (event) { .hook = netindex_hash_cleanup, nh };
@ -88,10 +91,9 @@ netindex_hash_cleanup(void *_nh)
uint kept = 0; uint kept = 0;
for (uint t = 0; t < NET_MAX; t++) for (uint i = 0; i < nh->block_size; i++)
for (uint i = 0; i < nh->net[t].block_size; i++)
{ {
struct netindex *ni = nh->net[t].block[i]; struct netindex *ni = nh->block[i];
if (!ni) if (!ni)
continue; continue;
@ -99,11 +101,11 @@ netindex_hash_cleanup(void *_nh)
if (lfuc_finished(&ni->uc)) if (lfuc_finished(&ni->uc))
{ {
HASH_REMOVE2(nh->net[t].hash, NETINDEX, nh->pool, ni); HASH_REMOVE2(nh->hash, NETINDEX, nh->pool, ni);
hmap_clear(&nh->net[t].id_map, ni->index); hmap_clear(&nh->id_map, ni->index);
nh->net[t].block[i] = NULL; nh->block[i] = NULL;
if (nh->net[t].slab) if (nh->slab)
sl_free(ni); sl_free(ni);
else else
mb_free(ni); mb_free(ni);
@ -126,13 +128,9 @@ netindex_hash_cleanup(void *_nh)
event_list *t = nh->deleted_target; event_list *t = nh->deleted_target;
/* Check cleanliness */ /* Check cleanliness */
for (uint t = 0; t < NET_MAX; t++) HASH_WALK(nh->hash, next, i)
if (nh->net[t].hash.data)
{
HASH_WALK(nh->net[t].hash, next, i)
bug("Stray netindex in deleted hash"); bug("Stray netindex in deleted hash");
HASH_WALK_END; HASH_WALK_END;
}
/* Pool free is enough to drop everything */ /* Pool free is enough to drop everything */
rp_free(nh->pool); rp_free(nh->pool);
@ -145,18 +143,6 @@ netindex_hash_cleanup(void *_nh)
ev_send(t, e); ev_send(t, e);
} }
static void
netindex_hash_init(struct netindex_hash_private *hp, u8 type)
{
ASSERT_DIE(hp->net[type].block == NULL);
hp->net[type].slab = net_addr_length[type] ? sl_new(hp->pool, sizeof (struct netindex) + net_addr_length[type]) : NULL;
HASH_INIT(hp->net[type].hash, hp->pool, NETINDEX_ORDER);
hp->net[type].block_size = 128;
hp->net[type].block = mb_allocz(hp->pool, hp->net[type].block_size * sizeof (struct netindex *));
hmap_init(&hp->net[type].id_map, hp->pool, 128);
};
void void
netindex_hash_delete(netindex_hash *h, event *e, event_list *t) netindex_hash_delete(netindex_hash *h, event *e, event_list *t)
{ {
@ -176,25 +162,20 @@ netindex_hash_delete(netindex_hash *h, event *e, event_list *t)
struct netindex * struct netindex *
net_find_index_fragile_chain(struct netindex_hash_private *hp, const net_addr *n) net_find_index_fragile_chain(struct netindex_hash_private *hp, const net_addr *n)
{ {
ASSERT_DIE(n->type < NET_MAX); ASSERT_DIE(n->type == hp->net_type);
if (!hp->net[n->type].block)
return NULL;
u32 h = net_hash(n); u32 h = net_hash(n);
return HASH_FIND_CHAIN(hp->net[n->type].hash, NETINDEX, h, n); return HASH_FIND_CHAIN(hp->hash, NETINDEX, h, n);
} }
struct netindex * struct netindex *
net_find_index_fragile(struct netindex_hash_private *hp, const net_addr *n) net_find_index_fragile(struct netindex_hash_private *hp, const net_addr *n)
{ {
ASSERT_DIE(n->type < NET_MAX); ASSERT_DIE(n->type == hp->net_type);
if (!hp->net[n->type].block)
return NULL;
EXPENSIVE_CHECK(netindex_hash_consistency_check(nh)); EXPENSIVE_CHECK(netindex_hash_consistency_check(hp));
u32 h = net_hash(n); u32 h = net_hash(n);
return HASH_FIND(hp->net[n->type].hash, NETINDEX, h, n); return HASH_FIND(hp->hash, NETINDEX, h, n);
} }
static struct netindex * static struct netindex *
@ -208,14 +189,11 @@ net_new_index_locked(struct netindex_hash_private *hp, const net_addr *n)
{ {
ASSERT_DIE(!hp->deleted_event); ASSERT_DIE(!hp->deleted_event);
if (!hp->net[n->type].block) u32 i = hmap_first_zero(&hp->id_map);
netindex_hash_init(hp, n->type); hmap_set(&hp->id_map, i);
u32 i = hmap_first_zero(&hp->net[n->type].id_map); struct netindex *ni = hp->slab ?
hmap_set(&hp->net[n->type].id_map, i); sl_alloc(hp->slab) :
struct netindex *ni = hp->net[n->type].slab ?
sl_alloc(hp->net[n->type].slab) :
mb_alloc(hp->pool, n->length + sizeof *ni); mb_alloc(hp->pool, n->length + sizeof *ni);
*ni = (struct netindex) { *ni = (struct netindex) {
@ -224,21 +202,21 @@ net_new_index_locked(struct netindex_hash_private *hp, const net_addr *n)
}; };
net_copy(ni->addr, n); net_copy(ni->addr, n);
HASH_INSERT2(hp->net[n->type].hash, NETINDEX, hp->pool, ni); HASH_INSERT2(hp->hash, NETINDEX, hp->pool, ni);
while (hp->net[n->type].block_size <= i) while (hp->block_size <= i)
{ {
u32 bs = hp->net[n->type].block_size; u32 bs = hp->block_size;
struct netindex **nb = mb_alloc(hp->pool, bs * 2 * sizeof *nb); struct netindex **nb = mb_alloc(hp->pool, bs * 2 * sizeof *nb);
memcpy(nb, hp->net[n->type].block, bs * sizeof *nb); memcpy(nb, hp->block, bs * sizeof *nb);
memset(&nb[bs], 0, bs * sizeof *nb); memset(&nb[bs], 0, bs * sizeof *nb);
mb_free(hp->net[n->type].block); mb_free(hp->block);
hp->net[n->type].block = nb; hp->block = nb;
hp->net[n->type].block_size *= 2; hp->block_size *= 2;
} }
hp->net[n->type].block[i] = ni; hp->block[i] = ni;
return net_lock_revive_unlock(hp, ni); return net_lock_revive_unlock(hp, ni);
} }
@ -277,13 +255,10 @@ net_get_index(netindex_hash *h, const net_addr *n)
} }
struct netindex * struct netindex *
net_resolve_index(netindex_hash *h, u8 net_type, u32 i) net_resolve_index(netindex_hash *h, u32 i)
{ {
NH_LOCK(h, hp); NH_LOCK(h, hp);
if (i >= hp->net[net_type].block_size)
return NULL;
struct netindex *ni = hp->net[net_type].block[i]; struct netindex *ni = hp->block[i];
ASSERT_DIE(!ni || (ni->addr->type == net_type));
return net_lock_revive_unlock(hp, ni); return net_lock_revive_unlock(hp, ni);
} }

View File

@ -28,13 +28,13 @@ struct netindex {
typedef union netindex_hash netindex_hash; typedef union netindex_hash netindex_hash;
/* Initialization and teardown */ /* Initialization and teardown */
netindex_hash *netindex_hash_new(pool *, event_list *); netindex_hash *netindex_hash_new(pool *, event_list *, u8);
void netindex_hash_delete(netindex_hash *, event *, event_list *); void netindex_hash_delete(netindex_hash *, event *, event_list *);
/* Find/get/resolve index; pointer valid until end of task */ /* Find/get/resolve index; pointer valid until end of task */
struct netindex *net_find_index(netindex_hash *, const net_addr *); struct netindex *net_find_index(netindex_hash *, const net_addr *);
struct netindex *net_get_index(netindex_hash *, const net_addr *); struct netindex *net_get_index(netindex_hash *, const net_addr *);
struct netindex *net_resolve_index(netindex_hash *, u8, u32); struct netindex *net_resolve_index(netindex_hash *, u32);
/* Update use-count without allocating a handle. Take same care /* Update use-count without allocating a handle. Take same care
* to ensure that your locks and unlocks are always balanced. */ * to ensure that your locks and unlocks are always balanced. */

View File

@ -15,18 +15,17 @@
DOMAIN(attrs) lock; /* Assigned lock */ \ DOMAIN(attrs) lock; /* Assigned lock */ \
event_list *cleanup_list; /* Cleanup event list */ \ event_list *cleanup_list; /* Cleanup event list */ \
event cleanup_event; /* Cleanup event */ \ event cleanup_event; /* Cleanup event */ \
u8 net_type; /* Which NET_* is stored */ \
struct netindex_hash_private { struct netindex_hash_private {
struct { NETINDEX_HASH_PUBLIC; }; struct { NETINDEX_HASH_PUBLIC; };
struct netindex_hash_private **locked_at; struct netindex_hash_private **locked_at;
pool *pool; pool *pool;
struct {
slab *slab; slab *slab;
HASH(struct netindex) hash; HASH(struct netindex) hash;
uint block_size; uint block_size;
struct netindex **block; struct netindex **block;
struct hmap id_map; struct hmap id_map;
} net[NET_MAX];
event *deleted_event; event *deleted_event;
event_list *deleted_target; event_list *deleted_target;
}; };

View File

@ -252,7 +252,7 @@ rt_export_next_feed(struct rt_export_feeder *f)
struct netindex *ni = NULL; struct netindex *ni = NULL;
u32 mfi = atomic_load_explicit(&e->max_feed_index, memory_order_acquire); u32 mfi = atomic_load_explicit(&e->max_feed_index, memory_order_acquire);
for (; !ni && f->feed_index < mfi; f->feed_index++) for (; !ni && f->feed_index < mfi; f->feed_index++)
ni = net_resolve_index(e->netindex, e->net_type, f->feed_index); ni = net_resolve_index(e->netindex, f->feed_index);
if (!ni) if (!ni)
{ {

View File

@ -123,7 +123,7 @@ pool *rt_table_pool;
list routing_tables; list routing_tables;
list deleted_routing_tables; list deleted_routing_tables;
netindex_hash *rt_global_netindex_hash; netindex_hash *rt_global_netindex_hash[NET_MAX];
#define RT_INITIAL_ROUTES_BLOCK_SIZE 128 #define RT_INITIAL_ROUTES_BLOCK_SIZE 128
struct rt_cork rt_cork; struct rt_cork rt_cork;
@ -2884,7 +2884,7 @@ rt_setup(pool *pp, struct rtable_config *cf)
if (t->id >= rtable_max_id) if (t->id >= rtable_max_id)
rtable_max_id = t->id + 1; rtable_max_id = t->id + 1;
t->netindex = rt_global_netindex_hash; t->netindex = rt_global_netindex_hash[cf->addr_type];
atomic_store_explicit(&t->routes, mb_allocz(p, RT_INITIAL_ROUTES_BLOCK_SIZE * sizeof(net)), memory_order_relaxed); atomic_store_explicit(&t->routes, mb_allocz(p, RT_INITIAL_ROUTES_BLOCK_SIZE * sizeof(net)), memory_order_relaxed);
atomic_store_explicit(&t->routes_block_size, RT_INITIAL_ROUTES_BLOCK_SIZE, memory_order_relaxed); atomic_store_explicit(&t->routes_block_size, RT_INITIAL_ROUTES_BLOCK_SIZE, memory_order_relaxed);
@ -3025,7 +3025,9 @@ rt_init(void)
ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release"); ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release");
rt_cork.run = (event) { .hook = rt_cork_release_hook }; rt_cork.run = (event) { .hook = rt_cork_release_hook };
idm_init(&rtable_idm, rt_table_pool, 256); idm_init(&rtable_idm, rt_table_pool, 256);
rt_global_netindex_hash = netindex_hash_new(rt_table_pool, &global_event_list);
for (uint i=1; i<NET_MAX; i++)
rt_global_netindex_hash[i] = netindex_hash_new(rt_table_pool, &global_event_list, i);
} }
static _Bool static _Bool

View File

@ -1694,7 +1694,7 @@ bgp_init_prefix_table(struct bgp_ptx_private *c)
* locked while entering a loop. That's kinda stupid but i'm lazy now * locked while entering a loop. That's kinda stupid but i'm lazy now
* to rework it. */ * to rework it. */
ASSERT_DIE(!c->netindex); ASSERT_DIE(!c->netindex);
c->netindex = netindex_hash_new(c->c->pool, proto_event_list(c->c->c.proto)); c->netindex = netindex_hash_new(c->c->pool, proto_event_list(c->c->c.proto), c->c->c.net_type);
u32 len = 64; u32 len = 64;
struct bgp_prefix * _Atomic * block = mb_allocz(c->pool, len * sizeof *block); struct bgp_prefix * _Atomic * block = mb_allocz(c->pool, len * sizeof *block);