mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-11-09 12:48:43 +00:00
Memory pages allocator is now a global simple lockless structure
This commit is contained in:
parent
3d627d09d4
commit
b80823fe82
@ -279,8 +279,8 @@ rlookup(unsigned long a)
|
||||
void
|
||||
resource_init(void)
|
||||
{
|
||||
resource_sys_init();
|
||||
rcu_init();
|
||||
resource_sys_init();
|
||||
|
||||
root_pool.r.class = &pool_class;
|
||||
root_pool.name = "Root";
|
||||
|
@ -122,6 +122,7 @@ void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_siz
|
||||
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
|
||||
#define PAGE_HEAD(x) ((void *) (((uintptr_t) (x)) & ~(page_size-1)))
|
||||
extern long page_size;
|
||||
extern _Atomic int pages_kept;
|
||||
void *alloc_page(void);
|
||||
void free_page(void *);
|
||||
|
||||
|
@ -109,7 +109,6 @@ print_size(char *dsc, struct resmem vals)
|
||||
|
||||
extern pool *rt_table_pool;
|
||||
extern pool *rta_pool;
|
||||
extern uint *pages_kept;
|
||||
|
||||
void
|
||||
cmd_show_memory(void)
|
||||
@ -121,8 +120,9 @@ cmd_show_memory(void)
|
||||
print_size("Protocols:", rmemsize(proto_pool));
|
||||
struct resmem total = rmemsize(&root_pool);
|
||||
#ifdef HAVE_MMAP
|
||||
print_size("Standby memory:", (struct resmem) { .overhead = page_size * *pages_kept });
|
||||
total.overhead += page_size * *pages_kept;
|
||||
int pages_kept = atomic_load_explicit(&pages_kept, memory_order_relaxed);
|
||||
print_size("Standby memory:", (struct resmem) { .overhead = page_size * pages_kept });
|
||||
total.overhead += page_size * pages_kept;
|
||||
#endif
|
||||
print_size("Total:", total);
|
||||
cli_msg(0, "");
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "lib/resource.h"
|
||||
#include "lib/lists.h"
|
||||
#include "lib/event.h"
|
||||
#include "lib/rcu.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
@ -22,41 +23,32 @@
|
||||
long page_size = 0;
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
#define KEEP_PAGES_MAIN_MAX 256
|
||||
#define KEEP_PAGES_MAIN_MIN 8
|
||||
#define CLEANUP_PAGES_BULK 256
|
||||
#define KEEP_PAGES_MAX 256
|
||||
#define KEEP_PAGES_MIN 8
|
||||
|
||||
STATIC_ASSERT(KEEP_PAGES_MAIN_MIN * 4 < KEEP_PAGES_MAIN_MAX);
|
||||
STATIC_ASSERT(KEEP_PAGES_MIN * 4 < KEEP_PAGES_MAX);
|
||||
|
||||
static _Bool use_fake = 0;
|
||||
static _Bool initialized = 0;
|
||||
|
||||
#if DEBUGGING
|
||||
struct free_page {
|
||||
node unused[42];
|
||||
node n;
|
||||
struct free_page * _Atomic next;
|
||||
};
|
||||
#else
|
||||
struct free_page {
|
||||
node n;
|
||||
struct free_page * _Atomic next;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct free_pages {
|
||||
list pages;
|
||||
u16 min, max; /* Minimal and maximal number of free pages kept */
|
||||
uint cnt; /* Number of empty pages */
|
||||
event cleanup;
|
||||
};
|
||||
static struct free_page * _Atomic page_stack = NULL;
|
||||
|
||||
static void global_free_pages_cleanup_event(void *);
|
||||
static void page_cleanup(void *);
|
||||
static event page_cleanup_event = { .hook = page_cleanup, };
|
||||
#define SCHEDULE_CLEANUP do if (initialized && !shutting_down) ev_send(&global_event_list, &page_cleanup_event); while (0)
|
||||
|
||||
static struct free_pages global_free_pages = {
|
||||
.min = KEEP_PAGES_MAIN_MIN,
|
||||
.max = KEEP_PAGES_MAIN_MAX,
|
||||
.cleanup = { .hook = global_free_pages_cleanup_event },
|
||||
};
|
||||
|
||||
uint *pages_kept = &global_free_pages.cnt;
|
||||
_Atomic int pages_kept = 0;
|
||||
|
||||
static void *
|
||||
alloc_sys_page(void)
|
||||
@ -90,20 +82,21 @@ alloc_page(void)
|
||||
}
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
struct free_pages *fps = &global_free_pages;
|
||||
rcu_read_lock();
|
||||
struct free_page *fp = atomic_load_explicit(&page_stack, memory_order_acquire);
|
||||
while (fp && !atomic_compare_exchange_strong_explicit(
|
||||
&page_stack, &fp, atomic_load_explicit(&fp->next, memory_order_acquire),
|
||||
memory_order_acq_rel, memory_order_acquire))
|
||||
;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (fps->cnt)
|
||||
{
|
||||
struct free_page *fp = SKIP_BACK(struct free_page, n, HEAD(fps->pages));
|
||||
rem_node(&fp->n);
|
||||
if ((--fps->cnt < fps->min) && !shutting_down)
|
||||
ev_send(&global_work_list, &fps->cleanup);
|
||||
if (!fp)
|
||||
return alloc_sys_page();
|
||||
|
||||
bzero(fp, page_size);
|
||||
return fp;
|
||||
}
|
||||
if (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) <= KEEP_PAGES_MIN)
|
||||
SCHEDULE_CLEANUP;
|
||||
|
||||
return alloc_sys_page();
|
||||
return fp;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -117,45 +110,51 @@ free_page(void *ptr)
|
||||
}
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
struct free_pages *fps = &global_free_pages;
|
||||
rcu_read_lock();
|
||||
struct free_page *fp = ptr;
|
||||
struct free_page *next = atomic_load_explicit(&page_stack, memory_order_acquire);
|
||||
|
||||
fp->n = (node) {};
|
||||
add_tail(&fps->pages, &fp->n);
|
||||
do atomic_store_explicit(&fp->next, next, memory_order_release);
|
||||
while (!atomic_compare_exchange_strong_explicit(
|
||||
&page_stack, &next, fp,
|
||||
memory_order_acq_rel, memory_order_acquire));
|
||||
rcu_read_unlock();
|
||||
|
||||
if ((++fps->cnt > fps->max) && !shutting_down)
|
||||
ev_send(&global_work_list, &fps->cleanup);
|
||||
if (atomic_fetch_add_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX)
|
||||
SCHEDULE_CLEANUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
static void
|
||||
global_free_pages_cleanup_event(void *data UNUSED)
|
||||
page_cleanup(void *_ UNUSED)
|
||||
{
|
||||
if (shutting_down)
|
||||
struct free_page *stack = atomic_exchange_explicit(&page_stack, NULL, memory_order_acq_rel);
|
||||
if (!stack)
|
||||
return;
|
||||
|
||||
struct free_pages *fps = &global_free_pages;
|
||||
synchronize_rcu();
|
||||
|
||||
while (fps->cnt / 2 < fps->min)
|
||||
{
|
||||
struct free_page *fp = alloc_sys_page();
|
||||
fp->n = (node) {};
|
||||
add_tail(&fps->pages, &fp->n);
|
||||
fps->cnt++;
|
||||
}
|
||||
do {
|
||||
struct free_page *f = stack;
|
||||
stack = atomic_load_explicit(&f->next, memory_order_acquire);
|
||||
|
||||
for (uint seen = 0; (seen < CLEANUP_PAGES_BULK) && (fps->cnt > fps->max / 2); seen++)
|
||||
{
|
||||
struct free_page *fp = SKIP_BACK(struct free_page, n, TAIL(fps->pages));
|
||||
rem_node(&fp->n);
|
||||
|
||||
if (munmap(fp, page_size) == 0)
|
||||
fps->cnt--;
|
||||
else if (errno == ENOMEM)
|
||||
add_head(&fps->pages, &fp->n);
|
||||
if (munmap(f, page_size) == 0)
|
||||
continue;
|
||||
else if (errno != ENOMEM)
|
||||
bug("munmap(%p) failed: %m", f);
|
||||
else
|
||||
bug("munmap(%p) failed: %m", fp);
|
||||
free_page(f);
|
||||
}
|
||||
while (stack && (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX / 2));
|
||||
|
||||
while (stack)
|
||||
{
|
||||
atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
|
||||
|
||||
struct free_page *f = stack;
|
||||
stack = atomic_load_explicit(&f->next, memory_order_acquire);
|
||||
free_page(f);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -164,17 +163,17 @@ void
|
||||
resource_sys_init(void)
|
||||
{
|
||||
#ifdef HAVE_MMAP
|
||||
ASSERT_DIE(global_free_pages.cnt == 0);
|
||||
|
||||
if (!(page_size = sysconf(_SC_PAGESIZE)))
|
||||
die("System page size must be non-zero");
|
||||
|
||||
if (u64_popcount(page_size) == 1)
|
||||
{
|
||||
struct free_pages *fps = &global_free_pages;
|
||||
|
||||
init_list(&fps->pages);
|
||||
global_free_pages_cleanup_event(NULL);
|
||||
for (int i = 0; i < (KEEP_PAGES_MIN * 2); i++)
|
||||
free_page(alloc_page());
|
||||
|
||||
page_cleanup(NULL);
|
||||
initialized = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -184,4 +183,5 @@ resource_sys_init(void)
|
||||
#endif
|
||||
|
||||
page_size = 4096;
|
||||
initialized = 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user