2020-07-21 22:09:15 +00:00
|
|
|
/*
|
|
|
|
* BIRD Internet Routing Daemon -- Raw allocation
|
|
|
|
*
|
|
|
|
* (c) 2020 Maria Matejka <mq@ucw.cz>
|
|
|
|
*
|
|
|
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "nest/bird.h"
|
|
|
|
#include "lib/resource.h"
|
2021-11-24 16:30:13 +00:00
|
|
|
#include "lib/lists.h"
|
|
|
|
#include "lib/event.h"
|
2020-07-21 22:09:15 +00:00
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
#include <errno.h>
|
2020-07-21 22:09:15 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#ifdef HAVE_MMAP
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
|
2023-01-17 16:13:50 +00:00
|
|
|
#ifdef CONFIG_DISABLE_THP
|
|
|
|
#include <sys/prctl.h>
|
|
|
|
#endif
|
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
long page_size = 0;
|
2021-11-24 16:30:13 +00:00
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
#ifdef HAVE_MMAP
|
|
|
|
#define KEEP_PAGES_MAIN_MAX 256
|
|
|
|
#define KEEP_PAGES_MAIN_MIN 8
|
|
|
|
#define CLEANUP_PAGES_BULK 256
|
2021-11-24 16:30:13 +00:00
|
|
|
|
2022-03-09 12:27:49 +00:00
|
|
|
STATIC_ASSERT(KEEP_PAGES_MAIN_MIN * 4 < KEEP_PAGES_MAIN_MAX);
|
2021-11-24 16:30:13 +00:00
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
static _Bool use_fake = 0;
|
2021-11-24 16:30:13 +00:00
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
#if DEBUGGING
|
|
|
|
struct free_page {
|
|
|
|
node unused[42];
|
|
|
|
node n;
|
|
|
|
};
|
2020-07-21 22:09:15 +00:00
|
|
|
#else
|
2022-03-09 08:10:44 +00:00
|
|
|
struct free_page {
|
|
|
|
node n;
|
|
|
|
};
|
2020-07-21 22:09:15 +00:00
|
|
|
#endif
|
|
|
|
|
2022-11-01 17:40:56 +00:00
|
|
|
#define EP_POS_MAX ((page_size - OFFSETOF(struct empty_pages, pages)) / sizeof (void *))
|
|
|
|
|
|
|
|
struct empty_pages {
|
|
|
|
node n;
|
|
|
|
uint pos;
|
|
|
|
void *pages[0];
|
|
|
|
};
|
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
struct free_pages {
|
2022-11-03 11:38:57 +00:00
|
|
|
list pages; /* List of (struct free_page) keeping free pages without releasing them (hot) */
|
|
|
|
list empty; /* List of (struct empty_pages) keeping invalidated pages mapped for us (cold) */
|
2022-03-09 08:10:44 +00:00
|
|
|
u16 min, max; /* Minimal and maximal number of free pages kept */
|
2022-11-03 11:38:57 +00:00
|
|
|
uint cnt; /* Number of free pages in list */
|
2022-03-09 08:10:44 +00:00
|
|
|
event cleanup;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void global_free_pages_cleanup_event(void *);
|
2023-01-18 08:39:45 +00:00
|
|
|
static void *alloc_cold_page(void);
|
2022-03-09 08:10:44 +00:00
|
|
|
|
|
|
|
static struct free_pages global_free_pages = {
|
|
|
|
.min = KEEP_PAGES_MAIN_MIN,
|
|
|
|
.max = KEEP_PAGES_MAIN_MAX,
|
|
|
|
.cleanup = { .hook = global_free_pages_cleanup_event },
|
|
|
|
};
|
|
|
|
|
|
|
|
uint *pages_kept = &global_free_pages.cnt;
|
|
|
|
|
|
|
|
static void *
|
|
|
|
alloc_sys_page(void)
|
2020-07-21 22:09:15 +00:00
|
|
|
{
|
2022-03-09 08:10:44 +00:00
|
|
|
void *ptr = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
2020-07-21 22:09:15 +00:00
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
if (ptr == MAP_FAILED)
|
|
|
|
bug("mmap(%lu) failed: %m", page_size);
|
2020-07-21 22:09:15 +00:00
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
return ptr;
|
2020-07-21 22:09:15 +00:00
|
|
|
}
|
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
extern int shutting_down; /* Shutdown requested. */
|
|
|
|
|
|
|
|
#else // ! HAVE_MMAP
|
|
|
|
#define use_fake 1
|
|
|
|
#endif
|
|
|
|
|
2020-07-21 22:09:15 +00:00
|
|
|
void *
|
|
|
|
alloc_page(void)
|
|
|
|
{
|
2022-11-03 11:38:57 +00:00
|
|
|
/* If the system page allocator is goofy, we use posix_memalign to get aligned blocks of memory. */
|
2022-03-09 08:10:44 +00:00
|
|
|
if (use_fake)
|
2020-07-21 22:09:15 +00:00
|
|
|
{
|
2022-02-08 21:42:00 +00:00
|
|
|
void *ptr = NULL;
|
|
|
|
int err = posix_memalign(&ptr, page_size, page_size);
|
2022-03-09 08:10:44 +00:00
|
|
|
|
2022-02-08 21:42:00 +00:00
|
|
|
if (err || !ptr)
|
|
|
|
bug("posix_memalign(%lu) failed", (long unsigned int) page_size);
|
2022-03-09 08:10:44 +00:00
|
|
|
|
2022-02-08 21:42:00 +00:00
|
|
|
return ptr;
|
2020-07-21 22:09:15 +00:00
|
|
|
}
|
2022-03-09 08:10:44 +00:00
|
|
|
|
|
|
|
#ifdef HAVE_MMAP
|
|
|
|
struct free_pages *fps = &global_free_pages;
|
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* If there is any free page kept hot, we use it. */
|
2022-03-09 08:10:44 +00:00
|
|
|
if (fps->cnt)
|
|
|
|
{
|
|
|
|
struct free_page *fp = SKIP_BACK(struct free_page, n, HEAD(fps->pages));
|
|
|
|
rem_node(&fp->n);
|
2022-11-03 11:38:57 +00:00
|
|
|
|
|
|
|
/* If the hot-free-page cache is getting short, request the cleanup routine to replenish the cache */
|
2022-03-09 08:10:44 +00:00
|
|
|
if ((--fps->cnt < fps->min) && !shutting_down)
|
|
|
|
ev_schedule(&fps->cleanup);
|
|
|
|
|
|
|
|
return fp;
|
|
|
|
}
|
2023-01-18 08:39:45 +00:00
|
|
|
else
|
|
|
|
return alloc_cold_page();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
alloc_cold_page(void)
|
|
|
|
{
|
|
|
|
struct free_pages *fps = &global_free_pages;
|
2022-03-09 08:10:44 +00:00
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* If there is any free page kept cold, we use that. */
|
2022-11-01 17:40:56 +00:00
|
|
|
if (!EMPTY_LIST(fps->empty))
|
|
|
|
{
|
|
|
|
struct empty_pages *ep = HEAD(fps->empty);
|
2022-11-03 11:38:57 +00:00
|
|
|
|
|
|
|
/* Either the keeper page contains at least one cold page pointer, return that */
|
2022-11-01 17:40:56 +00:00
|
|
|
if (ep->pos)
|
|
|
|
return ep->pages[--ep->pos];
|
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* Or the keeper page has no more cold page pointer, return the keeper page */
|
2022-11-01 17:40:56 +00:00
|
|
|
rem_node(&ep->n);
|
|
|
|
return ep;
|
|
|
|
}
|
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* And in the worst case, allocate a new page by mmap() */
|
2022-03-09 08:10:44 +00:00
|
|
|
return alloc_sys_page();
|
|
|
|
#endif
|
2020-07-21 22:09:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
free_page(void *ptr)
|
|
|
|
{
|
2022-11-03 11:38:57 +00:00
|
|
|
/* If the system page allocator is goofy, we just free the block and care no more. */
|
2022-03-09 08:10:44 +00:00
|
|
|
if (use_fake)
|
2020-07-21 22:09:15 +00:00
|
|
|
{
|
2022-03-09 08:10:44 +00:00
|
|
|
free(ptr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_MMAP
|
|
|
|
struct free_pages *fps = &global_free_pages;
|
|
|
|
struct free_page *fp = ptr;
|
2021-11-24 16:30:13 +00:00
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* Otherwise, we add the free page to the hot-free-page list */
|
2022-03-09 08:10:44 +00:00
|
|
|
fp->n = (node) {};
|
|
|
|
add_tail(&fps->pages, &fp->n);
|
2021-11-24 16:30:13 +00:00
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* And if there are too many hot free pages, we ask for page cleanup */
|
2022-03-09 08:10:44 +00:00
|
|
|
if ((++fps->cnt > fps->max) && !shutting_down)
|
|
|
|
ev_schedule(&fps->cleanup);
|
2020-07-21 22:09:15 +00:00
|
|
|
#endif
|
|
|
|
}
|
2021-11-24 16:30:13 +00:00
|
|
|
|
|
|
|
#ifdef HAVE_MMAP
|
|
|
|
static void
|
2022-03-09 08:10:44 +00:00
|
|
|
global_free_pages_cleanup_event(void *data UNUSED)
|
2021-11-24 16:30:13 +00:00
|
|
|
{
|
2022-11-03 11:38:57 +00:00
|
|
|
/* Cleanup on shutdown is ignored. All pages may be kept hot, OS will take care. */
|
2022-03-09 08:10:44 +00:00
|
|
|
if (shutting_down)
|
|
|
|
return;
|
|
|
|
|
|
|
|
struct free_pages *fps = &global_free_pages;
|
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* Cleanup may get called when hot free page cache is short of pages. Replenishing. */
|
2022-03-09 08:10:44 +00:00
|
|
|
while (fps->cnt / 2 < fps->min)
|
2023-01-18 08:39:45 +00:00
|
|
|
free_page(alloc_cold_page());
|
2022-03-09 08:10:44 +00:00
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* Or the hot free page cache is too big. Moving some pages to the cold free page cache. */
|
|
|
|
for (int limit = CLEANUP_PAGES_BULK; limit && (fps->cnt > fps->max / 2); fps->cnt--, limit--)
|
2022-03-09 08:10:44 +00:00
|
|
|
{
|
|
|
|
struct free_page *fp = SKIP_BACK(struct free_page, n, TAIL(fps->pages));
|
|
|
|
rem_node(&fp->n);
|
2022-11-01 17:40:56 +00:00
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* Empty pages are stored as pointers. To store them, we need a pointer block. */
|
2022-11-01 17:40:56 +00:00
|
|
|
struct empty_pages *ep;
|
|
|
|
if (EMPTY_LIST(fps->empty) || ((ep = HEAD(fps->empty))->pos == EP_POS_MAX))
|
|
|
|
{
|
2022-11-03 11:38:57 +00:00
|
|
|
/* There is either no pointer block or the last block is full. We use this block as a pointer block. */
|
2022-11-01 17:40:56 +00:00
|
|
|
ep = (struct empty_pages *) fp;
|
|
|
|
*ep = (struct empty_pages) {};
|
|
|
|
add_head(&fps->empty, &ep->n);
|
|
|
|
}
|
2021-11-24 16:30:13 +00:00
|
|
|
else
|
2022-11-01 17:40:56 +00:00
|
|
|
{
|
2022-11-03 11:38:57 +00:00
|
|
|
/* We store this block as a pointer into the first free place
|
|
|
|
* and tell the OS that the underlying memory is trash. */
|
2022-11-01 17:40:56 +00:00
|
|
|
ep->pages[ep->pos++] = fp;
|
|
|
|
if (madvise(fp, page_size,
|
|
|
|
#ifdef CONFIG_MADV_DONTNEED_TO_FREE
|
|
|
|
MADV_DONTNEED
|
|
|
|
#else
|
|
|
|
MADV_FREE
|
|
|
|
#endif
|
|
|
|
) < 0)
|
|
|
|
bug("madvise(%p) failed: %m", fp);
|
|
|
|
}
|
2021-11-24 16:30:13 +00:00
|
|
|
}
|
2022-11-01 17:40:56 +00:00
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* If the hot free page cleanup hit the limit, re-schedule this routine
|
|
|
|
* to allow for other routines to run. */
|
|
|
|
if (fps->cnt > fps->max)
|
2022-11-01 17:40:56 +00:00
|
|
|
ev_schedule(&fps->cleanup);
|
2021-11-24 16:30:13 +00:00
|
|
|
}
|
|
|
|
#endif
|
2022-03-09 08:10:44 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
resource_sys_init(void)
|
|
|
|
{
|
2023-01-17 16:13:50 +00:00
|
|
|
#ifdef CONFIG_DISABLE_THP
|
|
|
|
/* Disable transparent huge pages, they do not work properly with madvice(MADV_DONTNEED) */
|
|
|
|
if (prctl(PR_SET_THP_DISABLE, (unsigned long) 1, (unsigned long) 0, (unsigned long) 0, (unsigned long) 0) < 0)
|
|
|
|
die("prctl(PR_SET_THP_DISABLE) failed: %m");
|
|
|
|
#endif
|
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
#ifdef HAVE_MMAP
|
2022-03-09 09:30:03 +00:00
|
|
|
ASSERT_DIE(global_free_pages.cnt == 0);
|
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
/* Check what page size the system supports */
|
2022-03-09 08:10:44 +00:00
|
|
|
if (!(page_size = sysconf(_SC_PAGESIZE)))
|
|
|
|
die("System page size must be non-zero");
|
|
|
|
|
2022-11-03 11:38:57 +00:00
|
|
|
if ((u64_popcount(page_size) == 1) && (page_size >= (1 << 10)) && (page_size <= (1 << 18)))
|
2022-03-09 08:10:44 +00:00
|
|
|
{
|
2022-11-03 11:38:57 +00:00
|
|
|
/* We assume that page size has only one bit and is between 1K and 256K (incl.).
|
|
|
|
* Otherwise, the assumptions in lib/slab.c (sl_head's num_full range) aren't met. */
|
|
|
|
|
2022-03-09 08:10:44 +00:00
|
|
|
struct free_pages *fps = &global_free_pages;
|
|
|
|
|
|
|
|
init_list(&fps->pages);
|
2022-11-01 17:40:56 +00:00
|
|
|
init_list(&fps->empty);
|
2022-03-09 08:10:44 +00:00
|
|
|
global_free_pages_cleanup_event(NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Too big or strange page, use the aligned allocator instead */
|
2022-11-03 11:38:57 +00:00
|
|
|
log(L_WARN "Got strange memory page size (%ld), using the aligned allocator instead", (s64) page_size);
|
2022-03-09 08:10:44 +00:00
|
|
|
use_fake = 1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
page_size = 4096;
|
|
|
|
}
|