0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 01:31:55 +00:00

sysdep/unix/alloc.c: Allowing allocating new pages with locked rc lock

This commit is contained in:
Katerina Kubecova 2024-12-13 12:35:02 +01:00
parent cdc17da4b8
commit 479d2c7dca
4 changed files with 50 additions and 33 deletions

View File

@ -150,6 +150,7 @@ extern _Atomic int pages_kept;
extern _Atomic int pages_kept_locally; extern _Atomic int pages_kept_locally;
extern _Atomic int pages_kept_cold; extern _Atomic int pages_kept_cold;
extern _Atomic int pages_kept_cold_index; extern _Atomic int pages_kept_cold_index;
extern _Atomic int allocated_pages_with_rc_lock;
void *alloc_page(void); void *alloc_page(void);
void free_page(void *); void free_page(void *);
void flush_local_pages(void); void flush_local_pages(void);

View File

@ -136,6 +136,7 @@ cmd_show_memory(void)
struct size_args cold = get_size_args(atomic_load_explicit(&pages_kept_cold, memory_order_relaxed) * page_size); struct size_args cold = get_size_args(atomic_load_explicit(&pages_kept_cold, memory_order_relaxed) * page_size);
cli_msg(-1018, "%-23s " SIZE_FORMAT, "Cold memory:", SIZE_ARGS(cold)); cli_msg(-1018, "%-23s " SIZE_FORMAT, "Cold memory:", SIZE_ARGS(cold));
#endif #endif
cli_msg(-1028, "Attempt to allocate page with rc lock: %i times", atomic_load_explicit(&allocated_pages_with_rc_lock, memory_order_relaxed));
cli_msg(0, ""); cli_msg(0, "");
} }

View File

@ -126,6 +126,7 @@ long page_size = 0;
static struct empty_pages *empty_pages = NULL; static struct empty_pages *empty_pages = NULL;
_Atomic int pages_kept_cold = 0; _Atomic int pages_kept_cold = 0;
_Atomic int pages_kept_cold_index = 0; _Atomic int pages_kept_cold_index = 0;
_Atomic int allocated_pages_with_rc_lock = 0;
static struct free_page * _Atomic page_stack = NULL; static struct free_page * _Atomic page_stack = NULL;
static _Thread_local struct free_page * local_page_stack = NULL; static _Thread_local struct free_page * local_page_stack = NULL;
@ -167,6 +168,27 @@ long page_size = 0;
#define ALLOC_TRACE(fmt...) do { \ #define ALLOC_TRACE(fmt...) do { \
if (atomic_load_explicit(&global_runtime, memory_order_relaxed)->latency_debug & DL_ALLOCATOR) log(L_TRACE "Allocator: " fmt, ##fmt); } while (0) if (atomic_load_explicit(&global_runtime, memory_order_relaxed)->latency_debug & DL_ALLOCATOR) log(L_TRACE "Allocator: " fmt, ##fmt); } while (0)
static void *
return_page_if_in_stack(struct free_page *fp) {
if (fp = PAGE_STACK_GET)
{
/* Reinstate the stack with the next page in list */
PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed));
/* Update the counters */
UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
/* Release the page */
UNPROTECT_PAGE(fp);
ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT);
return fp;
}
/* Reinstate the stack with zero */
PAGE_STACK_PUT(NULL);
return NULL;
}
void * void *
alloc_page(void) alloc_page(void)
{ {
@ -198,62 +220,55 @@ alloc_page(void)
ASSERT_DIE(pages_kept_here == 0); ASSERT_DIE(pages_kept_here == 0);
/* If there is any free page kept hot in global storage, we use it. */ /* If there is any free page kept hot in global storage, we use it. */
if (fp = PAGE_STACK_GET) if (fp = return_page_if_in_stack(fp))
{
/* Reinstate the stack with the next page in list */
PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed));
/* Update the counters */
UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
/* Release the page */
UNPROTECT_PAGE(fp);
ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT);
return fp; return fp;
}
/* Reinstate the stack with zero */
PAGE_STACK_PUT(NULL);
if (rcu_read_active()) if (rcu_read_active())
{ {
/* We can't lock and we actually shouldn't alloc either when rcu is active /* We can't lock and we actually shouldn't alloc either when rcu is active
* but that's a quest for another day. */ * but that's a quest for another day. */
atomic_fetch_add_explicit(&allocated_pages_with_rc_lock, 1, memory_order_relaxed);
} }
else
{
/* If there is any free page kept cold, we use that. */ /* If there is any free page kept cold, we use that. */
LOCK_DOMAIN(resource, empty_pages_domain); LOCK_DOMAIN(resource, empty_pages_domain);
/* Threads were serialized on lock, the first one might prepare some blocks for the rest of threads */
if (fp = return_page_if_in_stack(fp))
return fp;
if (empty_pages) { if (empty_pages) {
UNPROTECT_PAGE(empty_pages); UNPROTECT_PAGE(empty_pages);
if (empty_pages->pos) if (empty_pages->pos)
{ {
/* Either the keeper page contains at least one cold page pointer, return that */ /* Either the keeper page contains at least one cold page pointer, return that */
fp = empty_pages->pages[--empty_pages->pos];
PROTECT_PAGE(empty_pages); PROTECT_PAGE(empty_pages);
UNPROTECT_PAGE(fp); for (uint i = 0; i < empty_pages->pos - 1; i++)
{
struct empty_pages *page = (struct empty_pages *) empty_pages->pages[i];
page->next = empty_pages->pages[i + 1];
}
struct empty_pages *page = (struct empty_pages *) empty_pages->pages[empty_pages->pos - 1];
page->next = NULL;
ajlog(fp, empty_pages, empty_pages->pos, AJT_ALLOC_COLD_STD); ajlog(fp, empty_pages, empty_pages->pos, AJT_ALLOC_COLD_STD);
atomic_fetch_sub_explicit(&pages_kept_cold, 1, memory_order_relaxed); atomic_fetch_sub_explicit(&pages_kept_cold, empty_pages->pos, memory_order_relaxed);
} PAGE_STACK_GET;
else PAGE_STACK_PUT(empty_pages->pages[0]);
{
/* Or the keeper page has no more cold page pointer, return the keeper page */
fp = (struct free_page *) empty_pages;
empty_pages = empty_pages->next;
ajlog(fp, empty_pages, 0, AJT_ALLOC_COLD_KEEPER);
atomic_fetch_sub_explicit(&pages_kept_cold_index, 1, memory_order_relaxed);
if (!empty_pages)
ALLOC_TRACE("Taken last page from cold storage");
} }
/* Or the keeper page has no more cold page pointer, return the keeper page */
fp = (struct free_page *) empty_pages;
empty_pages = empty_pages->next;
ajlog(fp, empty_pages, 0, AJT_ALLOC_COLD_KEEPER);
atomic_fetch_sub_explicit(&pages_kept_cold_index, 1, memory_order_relaxed);
if (!empty_pages)
ALLOC_TRACE("Taken last page from cold storage");
} }
UNLOCK_DOMAIN(resource, empty_pages_domain); UNLOCK_DOMAIN(resource, empty_pages_domain);
if (fp) if (fp)
return fp; return fp;
}
/* And in the worst case, allocate some new pages by mmap() */ /* And in the worst case, allocate some new pages by mmap() */
void *ptr = alloc_sys_page(); void *ptr = alloc_sys_page();
ajlog(ptr, NULL, 0, AJT_ALLOC_MMAP); ajlog(ptr, NULL, 0, AJT_ALLOC_MMAP);

View File

@ -106,7 +106,7 @@ void do_lock(struct domain_generic *dg, struct domain_generic **lsp)
memcpy(&stack_copy, &locking_stack, sizeof(stack_copy)); memcpy(&stack_copy, &locking_stack, sizeof(stack_copy));
struct domain_generic **lll = last_locked; struct domain_generic **lll = last_locked;
if (rcu_read_active()) if (rcu_read_active() && (dg->order < DOMAIN_ORDER(resource)))
bug("Locking forbidden while RCU reader is active"); bug("Locking forbidden while RCU reader is active");
if ((char *) lsp - (char *) &locking_stack != dg->order) if ((char *) lsp - (char *) &locking_stack != dg->order)