0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-11-17 16:48:43 +00:00

Slab allocator can free the blocks without knowing the parent structure

This commit is contained in:
Maria Matejka 2022-04-04 20:31:14 +02:00
parent 3a6eda995e
commit ebd807c0b8
13 changed files with 128 additions and 125 deletions

View File

@ -100,7 +100,7 @@ typedef struct slab slab;
slab *sl_new(pool *, unsigned size);
void *sl_alloc(slab *);
void *sl_allocz(slab *);
void sl_free(slab *, void *);
void sl_free(void *);
/*
* Low-level memory allocation functions, please don't use

View File

@ -98,7 +98,7 @@ sl_allocz(slab *s)
}
void
sl_free(slab *s, void *oo)
sl_free(void *oo)
{
struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
@ -170,6 +170,7 @@ static struct resclass sl_class = {
};
struct sl_head {
struct slab *slab;
node n;
u32 num_full;
u32 used_bits[0];
@ -236,7 +237,7 @@ sl_alloc(slab *s)
struct sl_head *h;
redo:
h = HEAD(s->partial_heads);
h = SKIP_BACK(struct sl_head, n, HEAD(s->partial_heads));
if (!h->n.next)
goto no_partial;
okay:
@ -262,7 +263,7 @@ okay:
goto redo;
no_partial:
h = HEAD(s->empty_heads);
h = SKIP_BACK(struct sl_head, n, HEAD(s->empty_heads));
if (h->n.next)
{
rem_node(&h->n);
@ -270,12 +271,16 @@ no_partial:
s->num_empty_heads--;
goto okay;
}
h = alloc_page();
ASSERT_DIE(SL_GET_HEAD(h) == h);
#ifdef POISON
memset(h, 0xba, page_size);
#endif
ASSERT_DIE(SL_GET_HEAD(h) == h);
memset(h, 0, s->head_size);
h->slab = s;
add_head(&s->partial_heads, &h->n);
goto okay;
}
@ -305,9 +310,10 @@ sl_allocz(slab *s)
* and returns it back to the Slab @s.
*/
void
sl_free(slab *s, void *oo)
sl_free(void *oo)
{
struct sl_head *h = SL_GET_HEAD(oo);
struct slab *s = h->slab;
#ifdef POISON
memset(oo, 0xdb, s->data_size);
@ -347,13 +353,14 @@ static void
slab_free(resource *r)
{
slab *s = (slab *) r;
struct sl_head *h, *g;
struct sl_head *h;
node *nn, *nxt;
WALK_LIST_DELSAFE(h, g, s->empty_heads)
WALK_LIST2_DELSAFE(h, nn, nxt, s->empty_heads, n)
free_page(h);
WALK_LIST_DELSAFE(h, g, s->partial_heads)
WALK_LIST2_DELSAFE(h, nn, nxt, s->partial_heads, n)
free_page(h);
WALK_LIST_DELSAFE(h, g, s->full_heads)
WALK_LIST2_DELSAFE(h, nn, nxt, s->full_heads, n)
free_page(h);
}
@ -363,12 +370,13 @@ slab_dump(resource *r)
slab *s = (slab *) r;
int ec=0, pc=0, fc=0;
struct sl_head *h;
node *nn;
WALK_LIST(h, s->empty_heads)
WALK_LIST2(h, nn, s->empty_heads, n)
ec++;
WALK_LIST(h, s->partial_heads)
WALK_LIST2(h, nn, s->partial_heads, n)
pc++;
WALK_LIST(h, s->full_heads)
WALK_LIST2(h, nn, s->full_heads, n)
fc++;
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
@ -379,19 +387,20 @@ slab_memsize(resource *r)
slab *s = (slab *) r;
size_t heads = 0;
struct sl_head *h;
node *nn;
WALK_LIST(h, s->full_heads)
WALK_LIST2(h, nn, s->full_heads, n)
heads++;
size_t items = heads * s->objs_per_slab;
WALK_LIST(h, s->partial_heads)
WALK_LIST2(h, nn, s->partial_heads, n)
{
heads++;
items += h->num_full;
}
WALK_LIST(h, s->empty_heads)
WALK_LIST2(h, nn, s->empty_heads, n)
heads++;
size_t eff = items * s->data_size;
@ -407,11 +416,12 @@ slab_lookup(resource *r, unsigned long a)
{
slab *s = (slab *) r;
struct sl_head *h;
node *nn;
WALK_LIST(h, s->partial_heads)
WALK_LIST2(h, nn, s->partial_heads, n)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
WALK_LIST(h, s->full_heads)
WALK_LIST2(h, nn, s->full_heads, n)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;

View File

@ -17,6 +17,25 @@ static const int sizes[] = {
#define TEST_SIZE 1024 * 128
#define ITEMS(sz) TEST_SIZE / ( (sz) >> u32_log2((sz))/2 )
struct test_request {
int size;
enum strategy {
TEST_NONE,
TEST_FORWARDS,
TEST_BACKWARDS,
TEST_RANDOM,
TEST_MIXED,
TEST__MAX,
} strategy;
};
const char * const strategy_name[TEST__MAX] = {
[TEST_FORWARDS] = "forwards",
[TEST_BACKWARDS] = "backwards",
[TEST_RANDOM] = "random",
[TEST_MIXED] = "mixed",
};
static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
{
byte *out = sl_alloc(s);
@ -42,7 +61,7 @@ static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz)
block[p]++;
}
sl_free(s, block);
sl_free(block);
struct resmem ns = rmemsize((resource *) s);
@ -60,118 +79,93 @@ static inline struct resmem get_memsize(slab *s)
}
static int
t_slab_forwards(const void *data)
t_slab(const void *data)
{
int sz = (intptr_t) data;
slab *s = sl_new(&root_pool, sz);
const struct test_request *tr = data;
int sz = tr->size;
slab *s = sl_new(&root_pool, sz);
struct resmem sliz = get_memsize(s);
int n = ITEMS(sz);
byte **block = mb_alloc(&root_pool, n * sizeof(*block));
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
switch (tr->strategy) {
case TEST_FORWARDS:
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
for (int i = 0; i < n; i++)
test_free(s, block[i], sz, &sliz);
for (int i = 0; i < n; i++)
test_free(s, block[i], sz, &sliz);
mb_free(block);
break;
return 1;
}
case TEST_BACKWARDS:
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
static int
t_slab_backwards(const void *data)
{
int sz = (intptr_t) data;
slab *s = sl_new(&root_pool, sz);
for (int i = n - 1; i >= 0; i--)
test_free(s, block[i], sz, &sliz);
struct resmem sliz = get_memsize(s);
break;
int n = ITEMS(sz);
byte **block = mb_alloc(&root_pool, n * sizeof(*block));
case TEST_RANDOM:
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
for (int i = 0; i < n; i++)
{
int pos = bt_random() % (n - i);
test_free(s, block[pos], sz, &sliz);
if (pos != n - i - 1)
block[pos] = block[n - i - 1];
}
for (int i = n - 1; i >= 0; i--)
test_free(s, block[i], sz, &sliz);
break;
mb_free(block);
case TEST_MIXED:
{
int cur = 0;
int pending = n;
return 1;
}
while (cur + pending > 0) {
int action = bt_random() % (cur + pending);
static int
t_slab_random(const void *data)
{
int sz = (intptr_t) data;
slab *s = sl_new(&root_pool, sz);
if (action < cur) {
test_free(s, block[action], sz, &sliz);
if (action != --cur)
block[action] = block[cur];
} else {
block[cur++] = test_alloc(s, sz, &sliz);
pending--;
}
}
struct resmem sliz = get_memsize(s);
break;
}
int n = ITEMS(sz);
byte **block = mb_alloc(&root_pool, n * sizeof(*block));
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
for (int i = 0; i < n; i++)
{
int pos = bt_random() % (n - i);
test_free(s, block[pos], sz, &sliz);
if (pos != n - i - 1)
block[pos] = block[n - i - 1];
default: bug("This shouldn't happen");
}
mb_free(block);
return 1;
}
static int
t_slab_mixed(const void *data)
{
int sz = (intptr_t) data;
slab *s = sl_new(&root_pool, sz);
struct resmem sliz = get_memsize(s);
int n = ITEMS(sz);
byte **block = mb_alloc(&root_pool, n * sizeof(*block));
int cur = 0;
int pending = n;
while (cur + pending > 0) {
int action = bt_random() % (cur + pending);
if (action < cur) {
test_free(s, block[action], sz, &sliz);
if (action != --cur)
block[action] = block[cur];
} else {
block[cur++] = test_alloc(s, sz, &sliz);
pending--;
}
}
mb_free(block);
return 1;
}
int main(int argc, char *argv[])
{
bt_init(argc, argv);
struct test_request tr;
for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++)
{
bt_test_suite_arg(t_slab_forwards, (void *) (intptr_t) sizes[i], "Slab deallocation from beginning to end, size=%d", sizes[i]);
bt_test_suite_arg(t_slab_backwards, (void *) (intptr_t) sizes[i], "Slab deallocation from end to beginning, size=%d", sizes[i]);
bt_test_suite_arg(t_slab_random, (void *) (intptr_t) sizes[i], "Slab deallocation in random order, size=%d", sizes[i]);
bt_test_suite_arg(t_slab_mixed, (void *) (intptr_t) sizes[i], "Slab deallocation in mixed order, size=%d", sizes[i]);
}
for (uint strategy = TEST_FORWARDS; strategy < TEST__MAX; strategy++)
{
tr = (struct test_request) {
.size = sizes[i],
.strategy = strategy,
};
bt_test_suite_arg(t_slab, &tr, "Slab allocator test, size=%d, strategy=%s",
tr.size, strategy_name[strategy]);
}
return bt_exit_value();
}

View File

@ -345,7 +345,7 @@ neigh_free(neighbor *n)
{
rem_node(&n->n);
rem_node(&n->if_n);
sl_free(neigh_slab, n);
sl_free(n);
}
/**

View File

@ -154,7 +154,7 @@ rt_prune_sources(void)
{
HASH_DO_REMOVE(src_hash, RSH, sp);
idm_free(&src_ids, src->global_id);
sl_free(rte_src_slab, src);
sl_free(src);
}
}
HASH_WALK_FILTER_END;
@ -391,7 +391,7 @@ nexthop_free(struct nexthop *o)
while (o)
{
n = o->next;
sl_free(nexthop_slab(o), o);
sl_free(o);
o = n;
}
}
@ -1231,7 +1231,7 @@ rta__free(rta *a)
nexthop_free(a->nh.next);
ea_free(a->eattrs);
a->cached = 0;
sl_free(rta_slab(a), a);
sl_free(a);
}
rta *

View File

@ -475,7 +475,7 @@ fib_delete(struct fib *f, void *E)
}
if (f->fib_slab)
sl_free(f->fib_slab, E);
sl_free(E);
else
mb_free(E);

View File

@ -631,7 +631,7 @@ rte_free(rte *e)
rt_unlock_source(e->src);
if (rta_is_cached(e->attrs))
rta_free(e->attrs);
sl_free(rte_slab, e);
sl_free(e);
}
static inline void
@ -639,7 +639,7 @@ rte_free_quick(rte *e)
{
rt_unlock_source(e->src);
rta_free(e->attrs);
sl_free(rte_slab, e);
sl_free(e);
}
static int /* Actually better or at least as good as */
@ -3393,7 +3393,7 @@ hc_delete_hostentry(struct hostcache *hc, pool *p, struct hostentry *he)
rem_node(&he->ln);
hc_remove(hc, he);
sl_free(hc->slab, he);
sl_free(he);
hc->hash_items--;
if (hc->hash_items < hc->hash_min)

View File

@ -119,7 +119,7 @@ babel_get_source(struct babel_proto *p, struct babel_entry *e, u64 router_id)
}
static void
babel_expire_sources(struct babel_proto *p, struct babel_entry *e)
babel_expire_sources(struct babel_proto *p UNUSED, struct babel_entry *e)
{
struct babel_source *n, *nx;
btime now_ = current_time();
@ -129,7 +129,7 @@ babel_expire_sources(struct babel_proto *p, struct babel_entry *e)
if (n->expires && n->expires <= now_)
{
rem_node(NODE n);
sl_free(p->source_slab, n);
sl_free(n);
}
}
}
@ -174,7 +174,7 @@ babel_retract_route(struct babel_proto *p, struct babel_route *r)
}
static void
babel_flush_route(struct babel_proto *p, struct babel_route *r)
babel_flush_route(struct babel_proto *p UNUSED, struct babel_route *r)
{
DBG("Babel: Flush route %N router_id %lR neigh %I\n",
r->e->n.addr, r->router_id, r->neigh->addr);
@ -185,7 +185,7 @@ babel_flush_route(struct babel_proto *p, struct babel_route *r)
if (r->e->selected == r)
r->e->selected = NULL;
sl_free(p->route_slab, r);
sl_free(r);
}
static void
@ -336,13 +336,13 @@ found:
}
static void
babel_remove_seqno_request(struct babel_proto *p, struct babel_seqno_request *sr)
babel_remove_seqno_request(struct babel_proto *p UNUSED, struct babel_seqno_request *sr)
{
if (sr->nbr)
rem_node(&sr->nbr_node);
rem_node(NODE sr);
sl_free(p->seqno_slab, sr);
sl_free(sr);
}
static int

View File

@ -1318,7 +1318,6 @@ babel_send_to(struct babel_iface *ifa, ip_addr dest)
static uint
babel_write_queue(struct babel_iface *ifa, list *queue)
{
struct babel_proto *p = ifa->proto;
struct babel_write_state state = { .next_hop_ip6 = ifa->addr };
if (EMPTY_LIST(*queue))
@ -1346,7 +1345,7 @@ babel_write_queue(struct babel_iface *ifa, list *queue)
pos += len;
rem_node(NODE msg);
sl_free(p->msg_slab, msg);
sl_free(msg);
}
pos += babel_auth_add_tlvs(ifa, (struct babel_tlv *) pos, end - pos);
@ -1507,13 +1506,13 @@ babel_process_packet(struct babel_iface *ifa,
else if (res == PARSE_IGNORE)
{
DBG("Babel: Ignoring TLV of type %d\n", tlv->type);
sl_free(p->msg_slab, msg);
sl_free(msg);
}
else /* PARSE_ERROR */
{
LOG_PKT("Bad TLV from %I via %s type %d pos %d - parse error",
saddr, ifa->iface->name, tlv->type, (int) ((byte *)tlv - (byte *)pkt));
sl_free(p->msg_slab, msg);
sl_free(msg);
break;
}
}
@ -1525,7 +1524,7 @@ babel_process_packet(struct babel_iface *ifa,
if (tlv_data[msg->msg.type].handle_tlv)
tlv_data[msg->msg.type].handle_tlv(&msg->msg, ifa);
rem_node(NODE msg);
sl_free(p->msg_slab, msg);
sl_free(msg);
}
}

View File

@ -508,7 +508,7 @@ bfd_remove_session(struct bfd_proto *p, struct bfd_session *s)
HASH_REMOVE(p->session_hash_id, HASH_ID, s);
HASH_REMOVE(p->session_hash_ip, HASH_IP, s);
sl_free(p->session_slab, s);
sl_free(s);
TRACE(D_EVENTS, "Session to %I removed", ip);

View File

@ -1664,7 +1664,7 @@ bgp_free_prefix(struct bgp_channel *c, struct bgp_prefix *px)
HASH_REMOVE2(c->prefix_hash, PXH, c->pool, px);
if (c->prefix_slab)
sl_free(c->prefix_slab, px);
sl_free(px);
else
mb_free(px);
}

View File

@ -2135,7 +2135,7 @@ ospf_hash_delete(struct top_graph *f, struct top_hash_entry *e)
if (*ee == e)
{
*ee = e->next;
sl_free(f->hash_slab, e);
sl_free(e);
if (f->hash_entries-- < f->hash_entries_min)
ospf_top_rehash(f, -HASH_LO_STEP);
return;

View File

@ -108,14 +108,14 @@ rip_add_rte(struct rip_proto *p, struct rip_rte **rp, struct rip_rte *src)
}
static inline void
rip_remove_rte(struct rip_proto *p, struct rip_rte **rp)
rip_remove_rte(struct rip_proto *p UNUSED, struct rip_rte **rp)
{
struct rip_rte *rt = *rp;
rip_unlock_neighbor(rt->from);
*rp = rt->next;
sl_free(p->rte_slab, rt);
sl_free(rt);
}
static inline int rip_same_rte(struct rip_rte *a, struct rip_rte *b)