From 479d2c7dca33b0c6c32c6ed5bb0dc8c2a4bfa485 Mon Sep 17 00:00:00 2001 From: Katerina Kubecova Date: Fri, 13 Dec 2024 12:35:02 +0100 Subject: [PATCH] sysdep/unix/alloc.c: Allowing allocating new pages with locked rc lock --- lib/resource.h | 1 + nest/cmds.c | 1 + sysdep/unix/alloc.c | 79 ++++++++++++++++++++++++++------------------ sysdep/unix/domain.c | 2 +- 4 files changed, 50 insertions(+), 33 deletions(-) diff --git a/lib/resource.h b/lib/resource.h index df5509e3..2f4a17d4 100644 --- a/lib/resource.h +++ b/lib/resource.h @@ -150,6 +150,7 @@ extern _Atomic int pages_kept; extern _Atomic int pages_kept_locally; extern _Atomic int pages_kept_cold; extern _Atomic int pages_kept_cold_index; +extern _Atomic int allocated_pages_with_rc_lock; void *alloc_page(void); void free_page(void *); void flush_local_pages(void); diff --git a/nest/cmds.c b/nest/cmds.c index a596773c..e1a02a81 100644 --- a/nest/cmds.c +++ b/nest/cmds.c @@ -136,6 +136,7 @@ cmd_show_memory(void) struct size_args cold = get_size_args(atomic_load_explicit(&pages_kept_cold, memory_order_relaxed) * page_size); cli_msg(-1018, "%-23s " SIZE_FORMAT, "Cold memory:", SIZE_ARGS(cold)); #endif + cli_msg(-1028, "Attempt to allocate page with rc lock: %i times", atomic_load_explicit(&allocated_pages_with_rc_lock, memory_order_relaxed)); cli_msg(0, ""); } diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c index 0432b9e2..ad545326 100644 --- a/sysdep/unix/alloc.c +++ b/sysdep/unix/alloc.c @@ -126,6 +126,7 @@ long page_size = 0; static struct empty_pages *empty_pages = NULL; _Atomic int pages_kept_cold = 0; _Atomic int pages_kept_cold_index = 0; + _Atomic int allocated_pages_with_rc_lock = 0; static struct free_page * _Atomic page_stack = NULL; static _Thread_local struct free_page * local_page_stack = NULL; @@ -167,6 +168,27 @@ long page_size = 0; #define ALLOC_TRACE(fmt...) do { \ if (atomic_load_explicit(&global_runtime, memory_order_relaxed)->latency_debug & DL_ALLOCATOR) log(L_TRACE "Allocator: " fmt, ##fmt); } while (0) + +static void * +return_page_if_in_stack(struct free_page *fp) { + if (fp = PAGE_STACK_GET) + { + /* Reinstate the stack with the next page in list */ + PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed)); + + /* Update the counters */ + UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed); + + /* Release the page */ + UNPROTECT_PAGE(fp); + ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT); + return fp; + } + /* Reinstate the stack with zero */ + PAGE_STACK_PUT(NULL); + return NULL; +} + void * alloc_page(void) { @@ -198,62 +220,55 @@ alloc_page(void) ASSERT_DIE(pages_kept_here == 0); /* If there is any free page kept hot in global storage, we use it. */ - if (fp = PAGE_STACK_GET) - { - /* Reinstate the stack with the next page in list */ - PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed)); - - /* Update the counters */ - UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed); - - /* Release the page */ - UNPROTECT_PAGE(fp); - ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT); + if (fp = return_page_if_in_stack(fp)) return fp; - } - - /* Reinstate the stack with zero */ - PAGE_STACK_PUT(NULL); if (rcu_read_active()) { /* We can't lock and we actually shouldn't alloc either when rcu is active * but that's a quest for another day. */ + atomic_fetch_add_explicit(&allocated_pages_with_rc_lock, 1, memory_order_relaxed); } - else - { /* If there is any free page kept cold, we use that. */ LOCK_DOMAIN(resource, empty_pages_domain); + + /* Threads were serialized on lock, the first one might prepare some blocks for the rest of threads */ + if (fp = return_page_if_in_stack(fp)) + return fp; + if (empty_pages) { UNPROTECT_PAGE(empty_pages); if (empty_pages->pos) { /* Either the keeper page contains at least one cold page pointer, return that */ - fp = empty_pages->pages[--empty_pages->pos]; PROTECT_PAGE(empty_pages); - UNPROTECT_PAGE(fp); + for (uint i = 0; i < empty_pages->pos - 1; i++) + { + struct empty_pages *page = (struct empty_pages *) empty_pages->pages[i]; + page->next = empty_pages->pages[i + 1]; + } + struct empty_pages *page = (struct empty_pages *) empty_pages->pages[empty_pages->pos - 1]; + page->next = NULL; + ajlog(fp, empty_pages, empty_pages->pos, AJT_ALLOC_COLD_STD); - atomic_fetch_sub_explicit(&pages_kept_cold, 1, memory_order_relaxed); - } - else - { - /* Or the keeper page has no more cold page pointer, return the keeper page */ - fp = (struct free_page *) empty_pages; - empty_pages = empty_pages->next; - ajlog(fp, empty_pages, 0, AJT_ALLOC_COLD_KEEPER); - atomic_fetch_sub_explicit(&pages_kept_cold_index, 1, memory_order_relaxed); - if (!empty_pages) - ALLOC_TRACE("Taken last page from cold storage"); + atomic_fetch_sub_explicit(&pages_kept_cold, empty_pages->pos, memory_order_relaxed); + PAGE_STACK_GET; + PAGE_STACK_PUT(empty_pages->pages[0]); } + /* Or the keeper page has no more cold page pointer, return the keeper page */ + fp = (struct free_page *) empty_pages; + empty_pages = empty_pages->next; + ajlog(fp, empty_pages, 0, AJT_ALLOC_COLD_KEEPER); + atomic_fetch_sub_explicit(&pages_kept_cold_index, 1, memory_order_relaxed); + if (!empty_pages) + ALLOC_TRACE("Taken last page from cold storage"); } UNLOCK_DOMAIN(resource, empty_pages_domain); if (fp) return fp; - } - /* And in the worst case, allocate some new pages by mmap() */ void *ptr = alloc_sys_page(); ajlog(ptr, NULL, 0, AJT_ALLOC_MMAP); diff --git a/sysdep/unix/domain.c b/sysdep/unix/domain.c index e76ac2fb..efc6fc85 100644 --- a/sysdep/unix/domain.c +++ b/sysdep/unix/domain.c @@ -106,7 +106,7 @@ void do_lock(struct domain_generic *dg, struct domain_generic **lsp) memcpy(&stack_copy, &locking_stack, sizeof(stack_copy)); struct domain_generic **lll = last_locked; - if (rcu_read_active()) + if (rcu_read_active() && (dg->order < DOMAIN_ORDER(resource))) bug("Locking forbidden while RCU reader is active"); if ((char *) lsp - (char *) &locking_stack != dg->order)