diff --git a/lib/defer.h b/lib/defer.h index 6fed6024..220a3281 100644 --- a/lib/defer.h +++ b/lib/defer.h @@ -54,4 +54,10 @@ defer_call(struct deferred_call *call, size_t actual_size) return a; } +static inline void +defer_expect(size_t size) +{ + lp_prealloc(local_deferred.lp, size); +} + #endif diff --git a/lib/lockfree.h b/lib/lockfree.h index 8dc8f82d..b696bfb9 100644 --- a/lib/lockfree.h +++ b/lib/lockfree.h @@ -130,6 +130,11 @@ static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev) defer_call(&luqi.dc, sizeof luqi); } +static inline void lfuc_unlock_expected(uint count) +{ + defer_expect(count * sizeof (struct lfuc_unlock_queue_item)); +} + /** * lfuc_finished - auxiliary routine for prune event * @c: usecount structure diff --git a/lib/mempool.c b/lib/mempool.c index 5940e571..b2628031 100644 --- a/lib/mempool.c +++ b/lib/mempool.c @@ -141,6 +141,23 @@ lp_alloc_slow(linpool *m, uint size) return c->data; } +void +lp_prealloc(linpool *m, uint size) +{ + ASSERT_DIE(DG_IS_LOCKED(resource_parent(&m->r)->domain)); + + if (size > LP_DATA_SIZE) + bug("Requested block size is too big to prealloc"); + + byte *a = (byte *) BIRD_ALIGN((unsigned long) m->ptr, CPU_STRUCT_ALIGN); + byte *e = a + size; + + if (e <= m->end) + return; + + lp_alloc_slow(m, size); +} + /** * lp_allocu - allocate unaligned memory from a &linpool * @m: linear memory pool diff --git a/lib/netindex.c b/lib/netindex.c index 1bc43377..5e9321c3 100644 --- a/lib/netindex.c +++ b/lib/netindex.c @@ -302,6 +302,8 @@ void net_unlock_index(netindex_hash *h, struct netindex *i) struct netindex * net_find_index(netindex_hash *h, const net_addr *n) { + lfuc_unlock_expected(1); + RCU_ANCHOR(u); struct netindex *ni = net_find_index_fragile(h, n); return (ni && net_validate_index(h, ni)) ? net_lock_revive_unlock(h, ni) : NULL; diff --git a/lib/resource.h b/lib/resource.h index df5509e3..4cfee819 100644 --- a/lib/resource.h +++ b/lib/resource.h @@ -103,6 +103,7 @@ void *lp_allocz(linpool *, unsigned size) ALLOC_SIZE(2); /* With clear */ void lp_flush(linpool *); /* Free everything, but leave linpool */ lp_state *lp_save(linpool *m); /* Save state */ void lp_restore(linpool *m, lp_state *p); /* Restore state */ +void lp_prealloc(linpool *m, uint size); /* Make sure we will be able to allocate the memory without requesting new blocks */ static inline void lp_saved_cleanup(struct lp_state **lps) { @@ -150,6 +151,7 @@ extern _Atomic int pages_kept; extern _Atomic int pages_kept_locally; extern _Atomic int pages_kept_cold; extern _Atomic int pages_kept_cold_index; +extern _Atomic int cold_memory_failed_to_use; void *alloc_page(void); void free_page(void *); void flush_local_pages(void); diff --git a/nest/cmds.c b/nest/cmds.c index a596773c..9c55d6fb 100644 --- a/nest/cmds.c +++ b/nest/cmds.c @@ -136,6 +136,7 @@ cmd_show_memory(void) struct size_args cold = get_size_args(atomic_load_explicit(&pages_kept_cold, memory_order_relaxed) * page_size); cli_msg(-1018, "%-23s " SIZE_FORMAT, "Cold memory:", SIZE_ARGS(cold)); #endif + cli_msg(-1028, "Failed to use cold memory: %i times", atomic_load_explicit(&cold_memory_failed_to_use, memory_order_relaxed)); cli_msg(0, ""); } diff --git a/nest/rt-table.c b/nest/rt-table.c index cd37d947..ab207497 100644 --- a/nest/rt-table.c +++ b/nest/rt-table.c @@ -750,6 +750,8 @@ enum aspa_result aspa_check(rtable *tab, const adata *path, bool force_upstream) * and check allowed directions */ uint max_up = 0, min_up = 0, max_down = 0, min_down = 0; + lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */ + RT_READ(tab, tr); for (uint ap=0; aptable, tab) { u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_acquire); @@ -2481,6 +2485,8 @@ rt_net_feed_internal(struct rtable_reading *tr, u32 index, bool (*prefilter)(str struct rt_export_feed * rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first) { + lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */ + RT_READ(t, tr); const struct netindex *ni = net_find_index(tr->t->netindex, a); return ni ? rt_net_feed_internal(tr, ni->index, NULL, NULL, first) : NULL; @@ -2498,6 +2504,8 @@ rt_net_best(rtable *t, const net_addr *a) { rte rt = {}; + lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */ + RT_READ(t, tr); struct netindex *i = net_find_index(t->netindex, a); diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c index 0432b9e2..d7b3f095 100644 --- a/sysdep/unix/alloc.c +++ b/sysdep/unix/alloc.c @@ -126,6 +126,7 @@ long page_size = 0; static struct empty_pages *empty_pages = NULL; _Atomic int pages_kept_cold = 0; _Atomic int pages_kept_cold_index = 0; + _Atomic int cold_memory_failed_to_use = 0; static struct free_page * _Atomic page_stack = NULL; static _Thread_local struct free_page * local_page_stack = NULL; @@ -219,6 +220,7 @@ alloc_page(void) { /* We can't lock and we actually shouldn't alloc either when rcu is active * but that's a quest for another day. */ + atomic_fetch_add_explicit(&cold_memory_failed_to_use, 1, memory_order_relaxed); } else {