1998-05-03 16:43:39 +00:00
|
|
|
/*
|
2000-03-05 22:48:30 +00:00
|
|
|
* BIRD Resource Manager -- A SLAB-like Memory Allocator
|
1998-05-03 16:43:39 +00:00
|
|
|
*
|
2000-03-05 22:48:30 +00:00
|
|
|
* Heavily inspired by the original SLAB paper by Jeff Bonwick.
|
|
|
|
*
|
|
|
|
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
|
2020-07-21 22:09:15 +00:00
|
|
|
* (c) 2020 Maria Matejka <mq@jmq.cz>
|
1998-05-03 16:43:39 +00:00
|
|
|
*
|
|
|
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
|
|
|
*/
|
|
|
|
|
2000-06-05 11:41:41 +00:00
|
|
|
/**
|
|
|
|
* DOC: Slabs
|
|
|
|
*
|
|
|
|
* Slabs are collections of memory blocks of a fixed size.
|
|
|
|
* They support very fast allocation and freeing of such blocks, prevent memory
|
|
|
|
* fragmentation and optimize L2 cache usage. Slabs have been invented by Jeff Bonwick
|
|
|
|
* and published in USENIX proceedings as `The Slab Allocator: An Object-Caching Kernel
|
|
|
|
* Memory Allocator'. Our implementation follows this article except that we don't use
|
|
|
|
* constructors and destructors.
|
|
|
|
*
|
|
|
|
* When the |DEBUGGING| switch is turned on, we automatically fill all
|
2000-06-07 13:25:53 +00:00
|
|
|
* newly allocated and freed blocks with a special pattern to make detection
|
2000-06-05 11:41:41 +00:00
|
|
|
* of use of uninitialized or already freed memory easier.
|
|
|
|
*
|
2000-06-07 13:25:53 +00:00
|
|
|
* Example: Nodes of a FIB are allocated from a per-FIB Slab.
|
2000-06-05 11:41:41 +00:00
|
|
|
*/
|
|
|
|
|
1998-05-03 16:43:39 +00:00
|
|
|
#include <stdlib.h>
|
2009-09-17 15:52:36 +00:00
|
|
|
#include <stdint.h>
|
1998-05-03 16:43:39 +00:00
|
|
|
|
|
|
|
#include "nest/bird.h"
|
|
|
|
#include "lib/resource.h"
|
2000-03-31 23:30:21 +00:00
|
|
|
#include "lib/string.h"
|
2022-04-04 18:31:25 +00:00
|
|
|
#include "lib/tlists.h"
|
1998-05-03 16:43:39 +00:00
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
#undef FAKE_SLAB /* Turn on if you want to debug memory allocations */
|
|
|
|
|
2000-05-16 13:51:31 +00:00
|
|
|
#ifdef DEBUGGING
|
|
|
|
#define POISON /* Poison all regions after they are freed */
|
|
|
|
#endif
|
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
static void slab_free(resource *r);
|
2024-11-14 19:43:35 +00:00
|
|
|
static void slab_dump(struct dump_request *dreq, resource *r);
|
2000-05-08 22:33:38 +00:00
|
|
|
static resource *slab_lookup(resource *r, unsigned long addr);
|
2021-11-26 23:21:12 +00:00
|
|
|
static struct resmem slab_memsize(resource *r);
|
2000-03-05 22:48:30 +00:00
|
|
|
|
|
|
|
#ifdef FAKE_SLAB
|
|
|
|
|
1998-05-03 16:43:39 +00:00
|
|
|
/*
|
2000-03-05 22:48:30 +00:00
|
|
|
* Fake version used for debugging.
|
1998-05-03 16:43:39 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
struct slab {
|
|
|
|
resource r;
|
2015-05-19 06:53:34 +00:00
|
|
|
uint size;
|
1998-05-03 16:43:39 +00:00
|
|
|
list objs;
|
|
|
|
};
|
|
|
|
|
1998-05-24 14:46:20 +00:00
|
|
|
static struct resclass sl_class = {
|
2000-03-05 22:48:30 +00:00
|
|
|
"FakeSlab",
|
1998-05-03 16:43:39 +00:00
|
|
|
sizeof(struct slab),
|
|
|
|
slab_free,
|
2010-06-02 20:20:40 +00:00
|
|
|
slab_dump,
|
2012-01-02 23:42:25 +00:00
|
|
|
NULL,
|
2010-06-02 20:20:40 +00:00
|
|
|
slab_memsize
|
1998-05-03 16:43:39 +00:00
|
|
|
};
|
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
struct sl_obj {
|
|
|
|
node n;
|
2009-07-06 17:07:01 +00:00
|
|
|
uintptr_t data_align[0];
|
2000-03-05 22:48:30 +00:00
|
|
|
byte data[0];
|
|
|
|
};
|
|
|
|
|
1998-05-03 16:43:39 +00:00
|
|
|
slab *
|
2015-05-19 06:53:34 +00:00
|
|
|
sl_new(pool *p, uint size)
|
1998-05-03 16:43:39 +00:00
|
|
|
{
|
|
|
|
slab *s = ralloc(p, &sl_class);
|
|
|
|
s->size = size;
|
|
|
|
init_list(&s->objs);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
sl_alloc(slab *s)
|
|
|
|
{
|
|
|
|
struct sl_obj *o = xmalloc(sizeof(struct sl_obj) + s->size);
|
|
|
|
|
|
|
|
add_tail(&s->objs, &o->n);
|
|
|
|
return o->data;
|
|
|
|
}
|
|
|
|
|
2020-11-24 01:32:13 +00:00
|
|
|
void *
|
|
|
|
sl_allocz(slab *s)
|
|
|
|
{
|
|
|
|
void *obj = sl_alloc(s);
|
|
|
|
memset(obj, 0, s->size);
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
1998-05-03 16:43:39 +00:00
|
|
|
void
|
2022-04-04 18:31:14 +00:00
|
|
|
sl_free(void *oo)
|
1998-05-03 16:43:39 +00:00
|
|
|
{
|
2024-04-26 10:14:33 +00:00
|
|
|
SKIP_BACK_DECLARE(struct sl_obj, o, data, oo);
|
1998-05-03 16:43:39 +00:00
|
|
|
|
|
|
|
rem_node(&o->n);
|
|
|
|
xfree(o);
|
|
|
|
}
|
|
|
|
|
1998-05-24 14:46:20 +00:00
|
|
|
static void
|
1998-05-03 16:43:39 +00:00
|
|
|
slab_free(resource *r)
|
|
|
|
{
|
|
|
|
slab *s = (slab *) r;
|
|
|
|
struct sl_obj *o, *p;
|
|
|
|
|
|
|
|
for(o = HEAD(s->objs); p = (struct sl_obj *) o->n.next; o = p)
|
|
|
|
xfree(o);
|
|
|
|
}
|
|
|
|
|
1998-05-24 14:46:20 +00:00
|
|
|
static void
|
2023-02-28 09:42:47 +00:00
|
|
|
slab_dump(resource *r, unsigned indent UNUSED)
|
1998-05-03 16:43:39 +00:00
|
|
|
{
|
|
|
|
slab *s = (slab *) r;
|
|
|
|
int cnt = 0;
|
|
|
|
struct sl_obj *o;
|
|
|
|
|
|
|
|
WALK_LIST(o, s->objs)
|
|
|
|
cnt++;
|
|
|
|
debug("(%d objects per %d bytes)\n", cnt, s->size);
|
|
|
|
}
|
2000-03-05 22:48:30 +00:00
|
|
|
|
2021-11-26 23:21:12 +00:00
|
|
|
static struct resmem
|
2010-06-02 20:20:40 +00:00
|
|
|
slab_memsize(resource *r)
|
|
|
|
{
|
|
|
|
slab *s = (slab *) r;
|
2014-06-30 09:55:22 +00:00
|
|
|
size_t cnt = 0;
|
2010-06-02 20:20:40 +00:00
|
|
|
struct sl_obj *o;
|
|
|
|
|
|
|
|
WALK_LIST(o, s->objs)
|
|
|
|
cnt++;
|
|
|
|
|
2021-11-26 23:21:12 +00:00
|
|
|
return (struct resmem) {
|
|
|
|
.effective = cnt * s->size,
|
|
|
|
.overhead = ALLOC_OVERHEAD + sizeof(struct slab) + cnt * ALLOC_OVERHEAD,
|
|
|
|
};
|
2010-06-02 20:20:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Real efficient version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MAX_EMPTY_HEADS 1
|
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
enum sl_head_state {
|
|
|
|
slh_empty = 2,
|
|
|
|
slh_partial = 0,
|
|
|
|
slh_full = 1,
|
|
|
|
} PACKED;
|
|
|
|
|
|
|
|
struct sl_head {
|
|
|
|
struct slab *slab;
|
|
|
|
TLIST_NODE(sl_head, struct sl_head) n;
|
|
|
|
u16 num_full;
|
|
|
|
enum sl_head_state state;
|
|
|
|
u32 used_bits[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sl_alignment { /* Magic structure for testing of alignment */
|
|
|
|
byte data;
|
|
|
|
int x[0];
|
2024-12-11 16:51:46 +00:00
|
|
|
_Atomic u64 y[0];
|
|
|
|
void *z[0];
|
2022-04-04 18:31:25 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define TLIST_PREFIX sl_head
|
|
|
|
#define TLIST_TYPE struct sl_head
|
|
|
|
#define TLIST_ITEM n
|
|
|
|
#define TLIST_WANT_WALK
|
|
|
|
#define TLIST_WANT_ADD_HEAD
|
|
|
|
|
|
|
|
#include "lib/tlists.h"
|
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
struct slab {
|
|
|
|
resource r;
|
2020-07-21 22:09:15 +00:00
|
|
|
uint obj_size, head_size, head_bitfield_len;
|
|
|
|
uint objs_per_slab, num_empty_heads, data_size;
|
2022-04-04 18:31:25 +00:00
|
|
|
struct sl_head_list empty_heads, partial_heads, full_heads;
|
2000-03-05 22:48:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct resclass sl_class = {
|
|
|
|
"Slab",
|
|
|
|
sizeof(struct slab),
|
|
|
|
slab_free,
|
2000-05-08 22:33:38 +00:00
|
|
|
slab_dump,
|
2010-06-02 20:20:40 +00:00
|
|
|
slab_lookup,
|
|
|
|
slab_memsize
|
2000-03-05 22:48:30 +00:00
|
|
|
};
|
|
|
|
|
2022-07-15 12:57:02 +00:00
|
|
|
#define SL_GET_HEAD(x) PAGE_HEAD(x)
|
2000-03-05 22:48:30 +00:00
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
#define SL_HEAD_CHANGE_STATE(_s, _h, _from, _to) ({ \
|
|
|
|
ASSERT_DIE(_h->state == slh_##_from); \
|
|
|
|
sl_head_rem_node(&_s->_from##_heads, _h); \
|
|
|
|
sl_head_add_head(&_s->_to##_heads, _h); \
|
|
|
|
_h->state = slh_##_to; \
|
|
|
|
})
|
2000-03-05 22:48:30 +00:00
|
|
|
|
2020-07-21 22:09:15 +00:00
|
|
|
|
2000-06-05 11:41:41 +00:00
|
|
|
/**
|
|
|
|
* sl_new - create a new Slab
|
|
|
|
* @p: resource pool
|
|
|
|
* @size: block size
|
|
|
|
*
|
|
|
|
* This function creates a new Slab resource from which
|
|
|
|
* objects of size @size can be allocated.
|
|
|
|
*/
|
2000-03-05 22:48:30 +00:00
|
|
|
slab *
|
2015-05-19 06:53:34 +00:00
|
|
|
sl_new(pool *p, uint size)
|
2000-03-05 22:48:30 +00:00
|
|
|
{
|
|
|
|
slab *s = ralloc(p, &sl_class);
|
2015-05-19 06:53:34 +00:00
|
|
|
uint align = sizeof(struct sl_alignment);
|
2022-02-07 03:39:49 +00:00
|
|
|
if (align < sizeof(void *))
|
|
|
|
align = sizeof(void *);
|
2000-05-16 15:02:33 +00:00
|
|
|
s->data_size = size;
|
2000-03-05 22:48:30 +00:00
|
|
|
size = (size + align - 1) / align * align;
|
|
|
|
s->obj_size = size;
|
2020-07-21 22:09:15 +00:00
|
|
|
|
|
|
|
s->head_size = sizeof(struct sl_head);
|
|
|
|
|
|
|
|
do {
|
|
|
|
s->objs_per_slab = (page_size - s->head_size) / size;
|
|
|
|
s->head_bitfield_len = (s->objs_per_slab + 31) / 32;
|
|
|
|
s->head_size = (
|
|
|
|
sizeof(struct sl_head)
|
|
|
|
+ sizeof(u32) * s->head_bitfield_len
|
|
|
|
+ align - 1)
|
|
|
|
/ align * align;
|
2022-10-12 08:44:18 +00:00
|
|
|
} while (s->objs_per_slab * size + s->head_size > (size_t) page_size);
|
2020-07-21 22:09:15 +00:00
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
if (!s->objs_per_slab)
|
|
|
|
bug("Slab: object too large");
|
|
|
|
s->num_empty_heads = 0;
|
2020-07-21 22:09:15 +00:00
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2024-05-30 21:05:15 +00:00
|
|
|
/**
|
|
|
|
* sl_delete - destroy an existing Slab
|
|
|
|
* @s: slab
|
|
|
|
*
|
|
|
|
* This function destroys the given Slab.
|
|
|
|
*/
|
|
|
|
void sl_delete(slab *s)
|
|
|
|
{
|
|
|
|
rfree(&s->r);
|
|
|
|
}
|
|
|
|
|
2000-06-05 11:41:41 +00:00
|
|
|
/**
|
|
|
|
* sl_alloc - allocate an object from Slab
|
|
|
|
* @s: slab
|
|
|
|
*
|
|
|
|
* sl_alloc() allocates space for a single object from the
|
|
|
|
* Slab and returns a pointer to the object.
|
|
|
|
*/
|
2000-03-05 22:48:30 +00:00
|
|
|
void *
|
|
|
|
sl_alloc(slab *s)
|
|
|
|
{
|
|
|
|
struct sl_head *h;
|
2023-04-28 21:48:03 +00:00
|
|
|
ASSERT_DIE(DG_IS_LOCKED(resource_parent(&s->r)->domain));
|
2000-03-05 22:48:30 +00:00
|
|
|
|
|
|
|
redo:
|
2022-04-04 18:31:25 +00:00
|
|
|
if (!(h = s->partial_heads.first))
|
2000-03-05 22:48:30 +00:00
|
|
|
goto no_partial;
|
|
|
|
okay:
|
2020-07-21 22:09:15 +00:00
|
|
|
for (uint i=0; i<s->head_bitfield_len; i++)
|
|
|
|
if (~h->used_bits[i])
|
|
|
|
{
|
|
|
|
uint pos = u32_ctz(~h->used_bits[i]);
|
|
|
|
if (i * 32 + pos >= s->objs_per_slab)
|
|
|
|
break;
|
|
|
|
|
|
|
|
h->used_bits[i] |= 1 << pos;
|
|
|
|
h->num_full++;
|
|
|
|
|
|
|
|
void *out = ((void *) h) + s->head_size + (i * 32 + pos) * s->obj_size;
|
2000-05-16 15:02:33 +00:00
|
|
|
#ifdef POISON
|
2020-07-21 22:09:15 +00:00
|
|
|
memset(out, 0xcd, s->data_size);
|
2000-05-16 15:02:33 +00:00
|
|
|
#endif
|
2020-07-21 22:09:15 +00:00
|
|
|
return out;
|
|
|
|
}
|
2000-03-05 22:48:30 +00:00
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
SL_HEAD_CHANGE_STATE(s, h, partial, full);
|
2000-03-05 22:48:30 +00:00
|
|
|
goto redo;
|
|
|
|
|
|
|
|
no_partial:
|
2022-04-04 18:31:25 +00:00
|
|
|
if (h = s->empty_heads.first)
|
2000-03-05 22:48:30 +00:00
|
|
|
{
|
2022-04-04 18:31:25 +00:00
|
|
|
SL_HEAD_CHANGE_STATE(s, h, empty, partial);
|
2000-03-05 22:48:30 +00:00
|
|
|
s->num_empty_heads--;
|
|
|
|
goto okay;
|
|
|
|
}
|
2022-04-04 18:31:14 +00:00
|
|
|
|
2020-07-21 22:09:15 +00:00
|
|
|
h = alloc_page();
|
2022-04-04 18:31:14 +00:00
|
|
|
ASSERT_DIE(SL_GET_HEAD(h) == h);
|
|
|
|
|
2021-05-27 08:35:33 +00:00
|
|
|
#ifdef POISON
|
2022-03-09 08:10:44 +00:00
|
|
|
memset(h, 0xba, page_size);
|
2021-05-27 08:35:33 +00:00
|
|
|
#endif
|
2022-04-04 18:31:14 +00:00
|
|
|
|
2020-07-21 22:09:15 +00:00
|
|
|
memset(h, 0, s->head_size);
|
2022-04-04 18:31:14 +00:00
|
|
|
h->slab = s;
|
2022-04-04 18:31:25 +00:00
|
|
|
sl_head_add_head(&s->partial_heads, h);
|
2000-03-05 22:48:30 +00:00
|
|
|
goto okay;
|
|
|
|
}
|
|
|
|
|
2020-11-24 01:32:13 +00:00
|
|
|
/**
|
|
|
|
* sl_allocz - allocate an object from Slab and zero it
|
|
|
|
* @s: slab
|
|
|
|
*
|
|
|
|
* sl_allocz() allocates space for a single object from the
|
|
|
|
* Slab and returns a pointer to the object after zeroing out
|
|
|
|
* the object memory.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
sl_allocz(slab *s)
|
|
|
|
{
|
|
|
|
void *obj = sl_alloc(s);
|
|
|
|
memset(obj, 0, s->data_size);
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2000-06-05 11:41:41 +00:00
|
|
|
/**
|
|
|
|
* sl_free - return a free object back to a Slab
|
|
|
|
* @s: slab
|
|
|
|
* @oo: object returned by sl_alloc()
|
|
|
|
*
|
|
|
|
* This function frees memory associated with the object @oo
|
|
|
|
* and returns it back to the Slab @s.
|
|
|
|
*/
|
2000-03-05 22:48:30 +00:00
|
|
|
void
|
2022-04-04 18:31:14 +00:00
|
|
|
sl_free(void *oo)
|
2000-03-05 22:48:30 +00:00
|
|
|
{
|
2020-07-21 22:09:15 +00:00
|
|
|
struct sl_head *h = SL_GET_HEAD(oo);
|
2022-04-04 18:31:14 +00:00
|
|
|
struct slab *s = h->slab;
|
2023-04-28 21:48:03 +00:00
|
|
|
ASSERT_DIE(DG_IS_LOCKED(resource_parent(&s->r)->domain));
|
2000-03-05 22:48:30 +00:00
|
|
|
|
2000-05-16 13:51:31 +00:00
|
|
|
#ifdef POISON
|
2000-05-16 15:02:33 +00:00
|
|
|
memset(oo, 0xdb, s->data_size);
|
2000-05-16 13:51:31 +00:00
|
|
|
#endif
|
2020-07-21 22:09:15 +00:00
|
|
|
|
|
|
|
uint offset = oo - ((void *) h) - s->head_size;
|
|
|
|
ASSERT_DIE(offset % s->obj_size == 0);
|
|
|
|
uint pos = offset / s->obj_size;
|
|
|
|
ASSERT_DIE(pos < s->objs_per_slab);
|
|
|
|
|
|
|
|
h->used_bits[pos / 32] &= ~(1 << (pos % 32));
|
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
if ((h->num_full-- == s->objs_per_slab) && (h->state == slh_full))
|
|
|
|
SL_HEAD_CHANGE_STATE(s, h, full, partial);
|
2020-07-21 22:09:15 +00:00
|
|
|
else if (!h->num_full)
|
2000-03-05 22:48:30 +00:00
|
|
|
{
|
2022-04-04 18:31:25 +00:00
|
|
|
sl_head_rem_node(&s->partial_heads, h);
|
2000-03-05 22:48:30 +00:00
|
|
|
if (s->num_empty_heads >= MAX_EMPTY_HEADS)
|
2021-05-27 08:35:33 +00:00
|
|
|
{
|
|
|
|
#ifdef POISON
|
2022-03-09 08:10:44 +00:00
|
|
|
memset(h, 0xde, page_size);
|
2021-05-27 08:35:33 +00:00
|
|
|
#endif
|
2020-07-21 22:09:15 +00:00
|
|
|
free_page(h);
|
2021-05-27 08:35:33 +00:00
|
|
|
}
|
2000-03-05 22:48:30 +00:00
|
|
|
else
|
|
|
|
{
|
2022-04-04 18:31:25 +00:00
|
|
|
sl_head_add_head(&s->empty_heads, h);
|
|
|
|
h->state = slh_empty;
|
2000-03-05 22:48:30 +00:00
|
|
|
s->num_empty_heads++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
slab_free(resource *r)
|
|
|
|
{
|
|
|
|
slab *s = (slab *) r;
|
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST_DELSAFE(sl_head, h, &s->empty_heads)
|
2020-07-21 22:09:15 +00:00
|
|
|
free_page(h);
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST_DELSAFE(sl_head, h, &s->partial_heads)
|
2020-07-21 22:09:15 +00:00
|
|
|
free_page(h);
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST_DELSAFE(sl_head, h, &s->full_heads)
|
2020-07-21 22:09:15 +00:00
|
|
|
free_page(h);
|
2000-03-05 22:48:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-11-14 19:43:35 +00:00
|
|
|
slab_dump(struct dump_request *dreq, resource *r)
|
2000-03-05 22:48:30 +00:00
|
|
|
{
|
|
|
|
slab *s = (slab *) r;
|
|
|
|
int ec=0, pc=0, fc=0;
|
|
|
|
|
2024-11-15 18:05:26 +00:00
|
|
|
RDUMP("(%d objs per %d bytes in page)\n",
|
|
|
|
s->objs_per_slab, s->obj_size);
|
|
|
|
|
|
|
|
RDUMP("%*sempty:\n", dreq->indent+3, "");
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->empty_heads)
|
2024-11-15 18:05:26 +00:00
|
|
|
{
|
|
|
|
RDUMP("%*s%p\n", dreq->indent+6, "", h);
|
2000-03-05 22:48:30 +00:00
|
|
|
ec++;
|
2024-11-15 18:05:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
RDUMP("%*spartial:\n", dreq->indent+3, "");
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->partial_heads)
|
2024-11-15 18:05:26 +00:00
|
|
|
{
|
|
|
|
RDUMP("%*s%p (", dreq->indent+6, "", h);
|
|
|
|
for (uint i=1; i<=s->head_bitfield_len; i++)
|
|
|
|
RDUMP("%08x", h->used_bits[s->head_bitfield_len-i]);
|
|
|
|
RDUMP(")\n");
|
2000-03-05 22:48:30 +00:00
|
|
|
pc++;
|
2024-11-15 18:05:26 +00:00
|
|
|
}
|
2023-02-28 09:42:47 +00:00
|
|
|
|
2024-11-15 18:05:26 +00:00
|
|
|
RDUMP("%*sfull:\n", dreq->indent+3, "");
|
2023-02-28 09:42:47 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->full_heads)
|
2024-11-15 18:05:26 +00:00
|
|
|
{
|
|
|
|
RDUMP("%*s%p\n", dreq->indent+6, "", h);
|
2000-03-05 22:48:30 +00:00
|
|
|
fc++;
|
2024-11-15 18:05:26 +00:00
|
|
|
}
|
|
|
|
RDUMP("%*sempty=%d partial=%d total=%d\n", dreq->indent+3, "", ec, pc, fc);
|
2000-03-05 22:48:30 +00:00
|
|
|
}
|
|
|
|
|
2021-11-26 23:21:12 +00:00
|
|
|
static struct resmem
|
2010-06-02 20:20:40 +00:00
|
|
|
slab_memsize(resource *r)
|
|
|
|
{
|
|
|
|
slab *s = (slab *) r;
|
2014-06-30 09:55:22 +00:00
|
|
|
size_t heads = 0;
|
2010-06-02 20:20:40 +00:00
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->full_heads)
|
2010-06-02 20:20:40 +00:00
|
|
|
heads++;
|
2021-11-26 23:21:12 +00:00
|
|
|
|
|
|
|
size_t items = heads * s->objs_per_slab;
|
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->partial_heads)
|
2021-11-26 23:21:12 +00:00
|
|
|
{
|
2010-06-02 20:20:40 +00:00
|
|
|
heads++;
|
2021-11-26 23:21:12 +00:00
|
|
|
items += h->num_full;
|
|
|
|
}
|
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->empty_heads)
|
2010-06-02 20:20:40 +00:00
|
|
|
heads++;
|
|
|
|
|
2022-03-14 16:29:39 +00:00
|
|
|
size_t eff = items * s->data_size;
|
2021-11-26 23:21:12 +00:00
|
|
|
|
|
|
|
return (struct resmem) {
|
|
|
|
.effective = eff,
|
2022-03-09 08:10:44 +00:00
|
|
|
.overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * page_size - eff,
|
2021-11-26 23:21:12 +00:00
|
|
|
};
|
2010-06-02 20:20:40 +00:00
|
|
|
}
|
|
|
|
|
2000-05-08 22:33:38 +00:00
|
|
|
static resource *
|
|
|
|
slab_lookup(resource *r, unsigned long a)
|
|
|
|
{
|
|
|
|
slab *s = (slab *) r;
|
|
|
|
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->partial_heads)
|
2022-03-09 08:10:44 +00:00
|
|
|
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
|
2000-05-08 22:33:38 +00:00
|
|
|
return r;
|
2022-04-04 18:31:25 +00:00
|
|
|
WALK_TLIST(sl_head, h, &s->full_heads)
|
2022-03-09 08:10:44 +00:00
|
|
|
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
|
2000-05-08 22:33:38 +00:00
|
|
|
return r;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2000-03-05 22:48:30 +00:00
|
|
|
#endif
|