mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-22 09:41:54 +00:00
Allocator parameters can be configured now.
This commit is contained in:
parent
d27c2b6ec5
commit
0fc0e4618f
@ -722,6 +722,37 @@ include "tablename.conf";;
|
||||
Evaluates given filter expression. It is used by the developers for testing of filters.
|
||||
</descrip>
|
||||
|
||||
<sect>Global performance options
|
||||
<label id="perf-opts">
|
||||
|
||||
<p>The internal scheduler and allocator can be tweaked if needed. You probably
|
||||
don't want to do this, yet if you encounter some weird performance problem,
|
||||
these knobs may be handy. For now, all the options are concentrated in the
|
||||
<cf/memory {}/ block.
|
||||
|
||||
<descrip>
|
||||
<tag><label id="memory-global-keep-hot">global keep hot <m/number/</tag>
|
||||
How much memory is kept hot at most in the global memory storage.
|
||||
Overflowing memory is returned back to the OS. The virtual memory,
|
||||
aka address space, is kept in the cold storage and may be later reused,
|
||||
to prevent address space fragmentation problems.
|
||||
Aligned automatically to the system page size.
|
||||
This knob must be higher than the following memory settings.
|
||||
Default: 16777216.
|
||||
|
||||
<tag><label id="memory-local-keep-hot">local keep hot <m/number/</tag>
|
||||
How much memory is kept hot at most in every thread-local memory storage.
|
||||
Overflowing memory is moved to the global hot storage.
|
||||
Aligned automatically to the system page size.
|
||||
This knob must be higher than the following memory settings.
|
||||
Default: 524288.
|
||||
|
||||
<tag><label id="memory-local-keep-hot">allocate block <m/number/</tag>
|
||||
How much memory is allocated at once when no more memory is neither
|
||||
in available hot nor cold storages.
|
||||
Aligned automatically to the system page size.
|
||||
Default: 131072.
|
||||
</descrip>
|
||||
|
||||
<sect>Routing table options
|
||||
<label id="rtable-opts">
|
||||
|
@ -146,16 +146,19 @@ void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_siz
|
||||
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
|
||||
#define PAGE_HEAD(x) ((void *) (((uintptr_t) (x)) & ~(page_size-1)))
|
||||
extern long page_size;
|
||||
extern _Atomic int pages_kept;
|
||||
extern _Atomic int pages_kept_locally;
|
||||
extern _Atomic int pages_kept_cold;
|
||||
extern _Atomic int pages_kept_cold_index;
|
||||
extern _Atomic uint pages_kept;
|
||||
extern _Atomic uint pages_kept_locally;
|
||||
extern _Atomic uint pages_kept_cold;
|
||||
extern _Atomic uint pages_kept_cold_index;
|
||||
void *alloc_page(void);
|
||||
void free_page(void *);
|
||||
void flush_local_pages(void);
|
||||
|
||||
void resource_sys_init(void);
|
||||
|
||||
struct alloc_config;
|
||||
void alloc_preconfig(struct alloc_config *);
|
||||
|
||||
#ifdef HAVE_LIBDMALLOC
|
||||
/*
|
||||
* The standard dmalloc macros tend to produce lots of namespace
|
||||
|
@ -24,6 +24,12 @@ enum latency_debug_flags {
|
||||
DL_TIMERS = 0x40,
|
||||
};
|
||||
|
||||
struct alloc_config {
|
||||
uint keep_mem_max_global; /* How much free memory is kept hot in total */
|
||||
uint keep_mem_max_local; /* How much free memory is kept hot in every thread */
|
||||
uint at_once; /* How much memory to allocate at once */
|
||||
};
|
||||
|
||||
#define GLOBAL_RUNTIME_CONTENTS \
|
||||
struct timeformat tf_log; /* Time format for the logfile */ \
|
||||
struct timeformat tf_base; /* Time format for other purposes */ \
|
||||
@ -32,6 +38,7 @@ enum latency_debug_flags {
|
||||
u32 latency_limit; /* Events with longer duration are logged (us) */ \
|
||||
u32 watchdog_warning; /* I/O loop watchdog limit for warning (us) */ \
|
||||
const char *hostname; /* Hostname */ \
|
||||
struct alloc_config alloc; /* Allocation settings */ \
|
||||
|
||||
struct global_runtime { GLOBAL_RUNTIME_CONTENTS };
|
||||
extern struct global_runtime * _Atomic global_runtime;
|
||||
|
@ -32,13 +32,19 @@
|
||||
long page_size = 0;
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
# define KEEP_PAGES_MAX 16384
|
||||
# define KEEP_PAGES_MIN 32
|
||||
# define KEEP_PAGES_MAX_LOCAL 128
|
||||
# define ALLOC_PAGES_AT_ONCE 32
|
||||
|
||||
STATIC_ASSERT(KEEP_PAGES_MIN * 4 < KEEP_PAGES_MAX);
|
||||
STATIC_ASSERT(ALLOC_PAGES_AT_ONCE < KEEP_PAGES_MAX_LOCAL);
|
||||
void
|
||||
alloc_preconfig(struct alloc_config *ac)
|
||||
{
|
||||
ac->keep_mem_max_global = 16777216;
|
||||
ac->keep_mem_max_local = 524288;
|
||||
ac->at_once = 131072;
|
||||
}
|
||||
|
||||
# define ALLOC_INFO (&(atomic_load_explicit(&global_runtime, memory_order_relaxed)->alloc))
|
||||
# define KEEP_MEM_MAX ALLOC_INFO->keep_mem_max_global
|
||||
# define KEEP_MEM_MAX_LOCAL ALLOC_INFO->keep_mem_max_local
|
||||
# define ALLOC_MEM_AT_ONCE ALLOC_INFO->at_once
|
||||
|
||||
static bool use_fake = 0;
|
||||
static bool initialized = 0;
|
||||
@ -124,8 +130,8 @@ long page_size = 0;
|
||||
|
||||
static DOMAIN(resource) empty_pages_domain;
|
||||
static struct empty_pages *empty_pages = NULL;
|
||||
_Atomic int pages_kept_cold = 0;
|
||||
_Atomic int pages_kept_cold_index = 0;
|
||||
_Atomic uint pages_kept_cold = 0;
|
||||
_Atomic uint pages_kept_cold_index = 0;
|
||||
|
||||
static struct free_page * _Atomic page_stack = NULL;
|
||||
static _Thread_local struct free_page * local_page_stack = NULL;
|
||||
@ -143,14 +149,14 @@ long page_size = 0;
|
||||
static event page_cleanup_event = { .hook = page_cleanup, };
|
||||
# define SCHEDULE_CLEANUP do if (initialized && !shutting_down) ev_send(&global_event_list, &page_cleanup_event); while (0)
|
||||
|
||||
_Atomic int pages_kept = 0;
|
||||
_Atomic int pages_kept_locally = 0;
|
||||
static _Thread_local int pages_kept_here = 0;
|
||||
_Atomic uint pages_kept = 0;
|
||||
_Atomic uint pages_kept_locally = 0;
|
||||
static _Thread_local uint pages_kept_here = 0;
|
||||
|
||||
static void *
|
||||
alloc_sys_page(void)
|
||||
{
|
||||
void *ptr = mmap(NULL, page_size * ALLOC_PAGES_AT_ONCE, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void *ptr = mmap(NULL, ALLOC_MEM_AT_ONCE, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
|
||||
if (ptr == MAP_FAILED)
|
||||
die("mmap(%ld) failed: %m", (s64) page_size);
|
||||
@ -258,8 +264,8 @@ alloc_page(void)
|
||||
void *ptr = alloc_sys_page();
|
||||
ajlog(ptr, NULL, 0, AJT_ALLOC_MMAP);
|
||||
|
||||
for (int i=1; i<ALLOC_PAGES_AT_ONCE; i++)
|
||||
free_page(ptr + page_size * i);
|
||||
for (unsigned long skip = page_size; skip<ALLOC_MEM_AT_ONCE; skip += page_size)
|
||||
free_page(ptr + skip);
|
||||
|
||||
return ptr;
|
||||
#endif
|
||||
@ -278,7 +284,7 @@ free_page(void *ptr)
|
||||
#ifdef HAVE_MMAP
|
||||
/* We primarily try to keep the pages locally. */
|
||||
struct free_page *fp = ptr;
|
||||
if (pages_kept_here < KEEP_PAGES_MAX_LOCAL)
|
||||
if (pages_kept_here * page_size < KEEP_MEM_MAX_LOCAL)
|
||||
{
|
||||
struct free_page *next = local_page_stack;
|
||||
atomic_store_explicit(&fp->next, next, memory_order_relaxed);
|
||||
@ -304,7 +310,7 @@ free_page(void *ptr)
|
||||
ajlog(fp, next, pk, AJT_FREE_GLOBAL_HOT);
|
||||
|
||||
/* And if there are too many global hot free pages, we ask for page cleanup */
|
||||
if (pk >= KEEP_PAGES_MAX)
|
||||
if (pk * page_size >= KEEP_MEM_MAX)
|
||||
SCHEDULE_CLEANUP;
|
||||
#endif
|
||||
}
|
||||
@ -322,7 +328,7 @@ flush_local_pages(void)
|
||||
/* We first count the pages to enable consistency checking.
|
||||
* Also, we need to know the last page. */
|
||||
struct free_page *last = local_page_stack, *next;
|
||||
int check_count = 1;
|
||||
uint check_count = 1;
|
||||
while (next = atomic_load_explicit(&last->next, memory_order_relaxed))
|
||||
{
|
||||
check_count++;
|
||||
@ -348,7 +354,7 @@ flush_local_pages(void)
|
||||
|
||||
/* Check the state of global page cache and maybe schedule its cleanup. */
|
||||
atomic_fetch_sub_explicit(&pages_kept_locally, check_count, memory_order_relaxed);
|
||||
if (atomic_fetch_add_explicit(&pages_kept, check_count, memory_order_relaxed) >= KEEP_PAGES_MAX)
|
||||
if (atomic_fetch_add_explicit(&pages_kept, check_count, memory_order_relaxed) * page_size >= KEEP_MEM_MAX)
|
||||
SCHEDULE_CLEANUP;
|
||||
}
|
||||
|
||||
@ -362,7 +368,7 @@ page_cleanup(void *_ UNUSED)
|
||||
|
||||
/* Pages allocated inbetween */
|
||||
uint pk = atomic_load_explicit(&pages_kept, memory_order_relaxed);
|
||||
if (pk < KEEP_PAGES_MAX)
|
||||
if (pk * page_size < KEEP_MEM_MAX)
|
||||
return;
|
||||
|
||||
/* Walk the pages */
|
||||
@ -420,7 +426,7 @@ page_cleanup(void *_ UNUSED)
|
||||
UNLOCK_DOMAIN(resource, empty_pages_domain);
|
||||
count++;
|
||||
}
|
||||
while (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX / 2);
|
||||
while (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) * page_size >= KEEP_MEM_MAX / 2);
|
||||
|
||||
ALLOC_TRACE("Moved %u pages to cold storage, now %u cold, %u index", count,
|
||||
atomic_load_explicit(&pages_kept_cold, memory_order_relaxed),
|
||||
@ -450,9 +456,12 @@ resource_sys_init(void)
|
||||
/* We assume that page size has only one bit and is between 1K and 256K (incl.).
|
||||
* Otherwise, the assumptions in lib/slab.c (sl_head's num_full range) aren't met. */
|
||||
|
||||
alloc_preconfig(&(atomic_load_explicit(&global_runtime, memory_order_relaxed)->alloc));
|
||||
|
||||
empty_pages_domain = DOMAIN_NEW(resource);
|
||||
DOMAIN_SETUP(resource, empty_pages_domain, "Empty Pages", NULL);
|
||||
initialized = 1;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -462,5 +471,7 @@ resource_sys_init(void)
|
||||
#endif
|
||||
|
||||
page_size = 4096;
|
||||
alloc_preconfig(&(atomic_load_explicit(&global_runtime, memory_order_relaxed)->alloc));
|
||||
|
||||
initialized = 1;
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ CF_KEYWORDS(LOG, SYSLOG, ALL, DEBUG, TRACE, INFO, REMOTE, WARNING, ERROR, AUTH,
|
||||
CF_KEYWORDS(NAME, CONFIRM, UNDO, CHECK, TIMEOUT, DEBUG, LATENCY, LIMIT, WATCHDOG, WARNING, STATUS)
|
||||
CF_KEYWORDS(PING, WAKEUP, SOCKETS, SCHEDULING, EVENTS, TIMERS, ALLOCATOR)
|
||||
CF_KEYWORDS(GRACEFUL, RESTART, FIXED)
|
||||
CF_KEYWORDS(MEMORY, GLOBAL, LOCAL, KEEP, HOT, ALLOCATE, BLOCK)
|
||||
|
||||
%type <i> log_mask log_mask_list log_cat cfg_timeout debug_unix latency_debug_mask latency_debug_flag latency_debug_list
|
||||
%type <t> cfg_name
|
||||
@ -156,6 +157,28 @@ latency_debug_flag:
|
||||
| TIMERS { $$ = DL_TIMERS; }
|
||||
;
|
||||
|
||||
conf: MEMORY '{' memory_items '}'
|
||||
{
|
||||
if (new_config->runtime.alloc.keep_mem_max_global <= new_config->runtime.alloc.keep_mem_max_local)
|
||||
cf_error("Global (%u) hot memory limit must be higher than local (%u)",
|
||||
new_config->runtime.alloc.keep_mem_max_global,
|
||||
new_config->runtime.alloc.keep_mem_max_local);
|
||||
|
||||
if (new_config->runtime.alloc.keep_mem_max_local < new_config->runtime.alloc.at_once)
|
||||
cf_error("Can't allocate more memory at once (%u) than local hot limit (%u)",
|
||||
new_config->runtime.alloc.at_once,
|
||||
new_config->runtime.alloc.keep_mem_max_local);
|
||||
}
|
||||
|
||||
memory_items:
|
||||
| memory_items GLOBAL KEEP HOT NUM ';' {
|
||||
new_config->runtime.alloc.keep_mem_max_global = BIRD_ALIGN($5, page_size); }
|
||||
| memory_items LOCAL KEEP HOT NUM ';' {
|
||||
new_config->runtime.alloc.keep_mem_max_local = BIRD_ALIGN($5, page_size); }
|
||||
| memory_items ALLOCATE BLOCK NUM ';' {
|
||||
new_config->runtime.alloc.at_once = BIRD_ALIGN($4, page_size); }
|
||||
;
|
||||
|
||||
|
||||
/* Unix specific commands */
|
||||
|
||||
|
@ -194,6 +194,8 @@ sysdep_preconfig(struct config *c)
|
||||
c->runtime.latency_limit = UNIX_DEFAULT_LATENCY_LIMIT;
|
||||
c->runtime.watchdog_warning = UNIX_DEFAULT_WATCHDOG_WARNING;
|
||||
|
||||
alloc_preconfig(&c->runtime.alloc);
|
||||
|
||||
#ifdef PATH_IPROUTE_DIR
|
||||
read_iproute_table(c, PATH_IPROUTE_DIR "/rt_protos", "ipp_", 255);
|
||||
read_iproute_table(c, PATH_IPROUTE_DIR "/rt_realms", "ipr_", 0xffffffff);
|
||||
|
@ -557,7 +557,9 @@ void cmd_reconfig_undo_notify(void) {}
|
||||
#include "nest/bird.h"
|
||||
#include "lib/net.h"
|
||||
#include "conf/conf.h"
|
||||
void sysdep_preconfig(struct config *c UNUSED) {}
|
||||
void sysdep_preconfig(struct config *c) {
|
||||
alloc_preconfig(&c->runtime.alloc);
|
||||
}
|
||||
|
||||
void bird_thread_commit(struct thread_config *new);
|
||||
void sysdep_commit(struct config *new, struct config *old UNUSED)
|
||||
|
Loading…
Reference in New Issue
Block a user