mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-22 17:51:53 +00:00
Allow allocating cold pages inside RCU critical section
We have quite large critical sections and we need to allocate inside them. This is something to revise properly later on, yet for now, instead of slowly but surely growing the virtual memory address space, it's better to optimize the cold page cache pickup and count situations where this happened inside the critical section.
This commit is contained in:
parent
af89c43413
commit
ef63946d2e
@ -154,6 +154,7 @@ extern _Atomic int pages_kept_locally;
|
|||||||
extern _Atomic int pages_kept_cold;
|
extern _Atomic int pages_kept_cold;
|
||||||
extern _Atomic int pages_kept_cold_index;
|
extern _Atomic int pages_kept_cold_index;
|
||||||
extern _Atomic int pages_total;
|
extern _Atomic int pages_total;
|
||||||
|
extern _Atomic int alloc_locking_in_rcu;
|
||||||
void *alloc_page(void);
|
void *alloc_page(void);
|
||||||
void free_page(void *);
|
void free_page(void *);
|
||||||
void flush_local_pages(void);
|
void flush_local_pages(void);
|
||||||
|
@ -147,6 +147,7 @@ cmd_show_memory(void)
|
|||||||
cli_msg(-1018, "%-17s " SIZE_FORMAT, "Cold free pages:", SIZE_ARGS(cold));
|
cli_msg(-1018, "%-17s " SIZE_FORMAT, "Cold free pages:", SIZE_ARGS(cold));
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
cli_msg(-1028, "Hot page cache depleted while in RCU: %d", atomic_load_explicit(&alloc_locking_in_rcu, memory_order_relaxed));
|
||||||
cli_msg(0, "");
|
cli_msg(0, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,6 +127,7 @@ long page_size = 0;
|
|||||||
_Atomic int pages_kept_cold = 0;
|
_Atomic int pages_kept_cold = 0;
|
||||||
_Atomic int pages_kept_cold_index = 0;
|
_Atomic int pages_kept_cold_index = 0;
|
||||||
_Atomic int pages_total = 0;
|
_Atomic int pages_total = 0;
|
||||||
|
_Atomic int alloc_locking_in_rcu = 0;
|
||||||
|
|
||||||
static struct free_page * _Atomic page_stack = NULL;
|
static struct free_page * _Atomic page_stack = NULL;
|
||||||
static _Thread_local struct free_page * local_page_stack = NULL;
|
static _Thread_local struct free_page * local_page_stack = NULL;
|
||||||
@ -169,6 +170,27 @@ long page_size = 0;
|
|||||||
#define ALLOC_TRACE(fmt...) do { \
|
#define ALLOC_TRACE(fmt...) do { \
|
||||||
if (atomic_load_explicit(&global_runtime, memory_order_relaxed)->latency_debug & DL_ALLOCATOR) log(L_TRACE "Allocator: " fmt, ##fmt); } while (0)
|
if (atomic_load_explicit(&global_runtime, memory_order_relaxed)->latency_debug & DL_ALLOCATOR) log(L_TRACE "Allocator: " fmt, ##fmt); } while (0)
|
||||||
|
|
||||||
|
|
||||||
|
static void *
|
||||||
|
alloc_hot_page(struct free_page *fp) {
|
||||||
|
if (fp = PAGE_STACK_GET)
|
||||||
|
{
|
||||||
|
/* Reinstate the stack with the next page in list */
|
||||||
|
PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed));
|
||||||
|
|
||||||
|
/* Update the counters */
|
||||||
|
UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
|
||||||
|
|
||||||
|
/* Release the page */
|
||||||
|
UNPROTECT_PAGE(fp);
|
||||||
|
ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT);
|
||||||
|
return fp;
|
||||||
|
}
|
||||||
|
/* Reinstate the stack with zero */
|
||||||
|
PAGE_STACK_PUT(NULL);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
alloc_page(void)
|
alloc_page(void)
|
||||||
{
|
{
|
||||||
@ -201,62 +223,68 @@ alloc_page(void)
|
|||||||
ASSERT_DIE(pages_kept_here == 0);
|
ASSERT_DIE(pages_kept_here == 0);
|
||||||
|
|
||||||
/* If there is any free page kept hot in global storage, we use it. */
|
/* If there is any free page kept hot in global storage, we use it. */
|
||||||
if (fp = PAGE_STACK_GET)
|
if (fp = alloc_hot_page(fp))
|
||||||
{
|
|
||||||
/* Reinstate the stack with the next page in list */
|
|
||||||
PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed));
|
|
||||||
|
|
||||||
/* Update the counters */
|
|
||||||
UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
|
|
||||||
|
|
||||||
/* Release the page */
|
|
||||||
UNPROTECT_PAGE(fp);
|
|
||||||
ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT);
|
|
||||||
return fp;
|
return fp;
|
||||||
}
|
|
||||||
|
|
||||||
/* Reinstate the stack with zero */
|
|
||||||
PAGE_STACK_PUT(NULL);
|
|
||||||
|
|
||||||
if (rcu_read_active())
|
if (rcu_read_active())
|
||||||
{
|
{
|
||||||
/* We can't lock and we actually shouldn't alloc either when rcu is active
|
/* We shouldn't alloc when rcu is active but that's a quest for another day. */
|
||||||
* but that's a quest for another day. */
|
atomic_fetch_add_explicit(&alloc_locking_in_rcu, 1, memory_order_relaxed);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
|
|
||||||
/* If there is any free page kept cold, we use that. */
|
/* If there is any free page kept cold, we warm up some of these. */
|
||||||
LOCK_DOMAIN(resource, empty_pages_domain);
|
LOCK_DOMAIN(resource, empty_pages_domain);
|
||||||
|
|
||||||
|
/* Threads were serialized on lock and the first one might have prepared some
|
||||||
|
* blocks for the rest of threads */
|
||||||
|
if (fp = alloc_hot_page(fp))
|
||||||
|
{
|
||||||
|
UNLOCK_DOMAIN(resource, empty_pages_domain);
|
||||||
|
return fp;
|
||||||
|
}
|
||||||
|
|
||||||
if (empty_pages) {
|
if (empty_pages) {
|
||||||
UNPROTECT_PAGE(empty_pages);
|
UNPROTECT_PAGE(empty_pages);
|
||||||
|
|
||||||
|
/* We flush all the pages in this block to the hot page cache
|
||||||
|
* and return the keeper page as allocated. */
|
||||||
|
ajlog(fp, empty_pages, empty_pages->pos, AJT_ALLOC_COLD_STD);
|
||||||
if (empty_pages->pos)
|
if (empty_pages->pos)
|
||||||
{
|
{
|
||||||
/* Either the keeper page contains at least one cold page pointer, return that */
|
/* Link one after another */
|
||||||
fp = empty_pages->pages[--empty_pages->pos];
|
for (uint i = 0; i < empty_pages->pos - 1; i++)
|
||||||
PROTECT_PAGE(empty_pages);
|
atomic_store_explicit(
|
||||||
UNPROTECT_PAGE(fp);
|
&((struct free_page *) empty_pages->pages[i])->next,
|
||||||
ajlog(fp, empty_pages, empty_pages->pos, AJT_ALLOC_COLD_STD);
|
empty_pages->pages[i+1],
|
||||||
atomic_fetch_sub_explicit(&pages_kept_cold, 1, memory_order_relaxed);
|
memory_order_relaxed);
|
||||||
|
|
||||||
|
/* And put into the hot page cache */
|
||||||
|
atomic_store_explicit(
|
||||||
|
&((struct free_page *) empty_pages->pages[empty_pages->pos - 1])->next,
|
||||||
|
PAGE_STACK_GET,
|
||||||
|
memory_order_release);
|
||||||
|
PAGE_STACK_PUT(empty_pages->pages[0]);
|
||||||
|
|
||||||
|
/* Update counters */
|
||||||
|
atomic_fetch_sub_explicit(&pages_kept_cold, empty_pages->pos, memory_order_relaxed);
|
||||||
|
atomic_fetch_add_explicit(&pages_kept, empty_pages->pos, memory_order_relaxed);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
/* We can then reuse the old keeper page. */
|
||||||
/* Or the keeper page has no more cold page pointer, return the keeper page */
|
/* Or the keeper page has no more cold page pointer, return the keeper page */
|
||||||
fp = (struct free_page *) empty_pages;
|
fp = (struct free_page *) empty_pages;
|
||||||
empty_pages = empty_pages->next;
|
empty_pages = empty_pages->next;
|
||||||
ajlog(fp, empty_pages, 0, AJT_ALLOC_COLD_KEEPER);
|
ajlog(fp, empty_pages, 0, AJT_ALLOC_COLD_KEEPER);
|
||||||
atomic_fetch_sub_explicit(&pages_kept_cold_index, 1, memory_order_relaxed);
|
atomic_fetch_sub_explicit(&pages_kept_cold_index, 1, memory_order_relaxed);
|
||||||
|
|
||||||
if (!empty_pages)
|
if (!empty_pages)
|
||||||
ALLOC_TRACE("Taken last page from cold storage");
|
ALLOC_TRACE("Taken last page from cold storage");
|
||||||
}
|
}
|
||||||
}
|
|
||||||
UNLOCK_DOMAIN(resource, empty_pages_domain);
|
UNLOCK_DOMAIN(resource, empty_pages_domain);
|
||||||
|
|
||||||
if (fp)
|
if (fp)
|
||||||
return fp;
|
return fp;
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* And in the worst case, allocate some new pages by mmap() */
|
/* And in the worst case, allocate some new pages by mmap() */
|
||||||
void *ptr = alloc_sys_page();
|
void *ptr = alloc_sys_page();
|
||||||
ajlog(ptr, NULL, 0, AJT_ALLOC_MMAP);
|
ajlog(ptr, NULL, 0, AJT_ALLOC_MMAP);
|
||||||
|
@ -106,7 +106,7 @@ void do_lock(struct domain_generic *dg, struct domain_generic **lsp)
|
|||||||
memcpy(&stack_copy, &locking_stack, sizeof(stack_copy));
|
memcpy(&stack_copy, &locking_stack, sizeof(stack_copy));
|
||||||
struct domain_generic **lll = last_locked;
|
struct domain_generic **lll = last_locked;
|
||||||
|
|
||||||
if (rcu_read_active())
|
if (rcu_read_active() && (dg->order < DOMAIN_ORDER(resource)))
|
||||||
bug("Locking forbidden while RCU reader is active");
|
bug("Locking forbidden while RCU reader is active");
|
||||||
|
|
||||||
if ((char *) lsp - (char *) &locking_stack != dg->order)
|
if ((char *) lsp - (char *) &locking_stack != dg->order)
|
||||||
|
Loading…
Reference in New Issue
Block a user