0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

alloc.c, mempool.c, netindex.c: before rc lock make sure we will be able to allocate enough memory without requesting new page. (Requesting new pages with rc lock causes growing of cold memory)

This commit is contained in:
Katerina Kubecova 2024-12-11 15:24:32 +01:00
parent 3f62ec5477
commit 9103eff734
8 changed files with 43 additions and 0 deletions

View File

@ -54,4 +54,10 @@ defer_call(struct deferred_call *call, size_t actual_size)
return a; return a;
} }
static inline void
defer_expect(size_t size)
{
lp_prealloc(local_deferred.lp, size);
}
#endif #endif

View File

@ -130,6 +130,11 @@ static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev)
defer_call(&luqi.dc, sizeof luqi); defer_call(&luqi.dc, sizeof luqi);
} }
static inline void lfuc_unlock_expected(uint count)
{
defer_expect(count * sizeof (struct lfuc_unlock_queue_item));
}
/** /**
* lfuc_finished - auxiliary routine for prune event * lfuc_finished - auxiliary routine for prune event
* @c: usecount structure * @c: usecount structure

View File

@ -141,6 +141,23 @@ lp_alloc_slow(linpool *m, uint size)
return c->data; return c->data;
} }
void
lp_prealloc(linpool *m, uint size)
{
ASSERT_DIE(DG_IS_LOCKED(resource_parent(&m->r)->domain));
if (size > LP_DATA_SIZE)
bug("Requested block size is too big to prealloc");
byte *a = (byte *) BIRD_ALIGN((unsigned long) m->ptr, CPU_STRUCT_ALIGN);
byte *e = a + size;
if (e <= m->end)
return;
lp_alloc_slow(m, size);
}
/** /**
* lp_allocu - allocate unaligned memory from a &linpool * lp_allocu - allocate unaligned memory from a &linpool
* @m: linear memory pool * @m: linear memory pool

View File

@ -302,6 +302,8 @@ void net_unlock_index(netindex_hash *h, struct netindex *i)
struct netindex * struct netindex *
net_find_index(netindex_hash *h, const net_addr *n) net_find_index(netindex_hash *h, const net_addr *n)
{ {
lfuc_unlock_expected(1);
RCU_ANCHOR(u); RCU_ANCHOR(u);
struct netindex *ni = net_find_index_fragile(h, n); struct netindex *ni = net_find_index_fragile(h, n);
return (ni && net_validate_index(h, ni)) ? net_lock_revive_unlock(h, ni) : NULL; return (ni && net_validate_index(h, ni)) ? net_lock_revive_unlock(h, ni) : NULL;

View File

@ -103,6 +103,7 @@ void *lp_allocz(linpool *, unsigned size) ALLOC_SIZE(2); /* With clear */
void lp_flush(linpool *); /* Free everything, but leave linpool */ void lp_flush(linpool *); /* Free everything, but leave linpool */
lp_state *lp_save(linpool *m); /* Save state */ lp_state *lp_save(linpool *m); /* Save state */
void lp_restore(linpool *m, lp_state *p); /* Restore state */ void lp_restore(linpool *m, lp_state *p); /* Restore state */
void lp_prealloc(linpool *m, uint size); /* Make sure we will be able to allocate the memory without requesting new blocks */
static inline void lp_saved_cleanup(struct lp_state **lps) static inline void lp_saved_cleanup(struct lp_state **lps)
{ {
@ -150,6 +151,7 @@ extern _Atomic int pages_kept;
extern _Atomic int pages_kept_locally; extern _Atomic int pages_kept_locally;
extern _Atomic int pages_kept_cold; extern _Atomic int pages_kept_cold;
extern _Atomic int pages_kept_cold_index; extern _Atomic int pages_kept_cold_index;
extern _Atomic int cold_memory_failed_to_use;
void *alloc_page(void); void *alloc_page(void);
void free_page(void *); void free_page(void *);
void flush_local_pages(void); void flush_local_pages(void);

View File

@ -136,6 +136,7 @@ cmd_show_memory(void)
struct size_args cold = get_size_args(atomic_load_explicit(&pages_kept_cold, memory_order_relaxed) * page_size); struct size_args cold = get_size_args(atomic_load_explicit(&pages_kept_cold, memory_order_relaxed) * page_size);
cli_msg(-1018, "%-23s " SIZE_FORMAT, "Cold memory:", SIZE_ARGS(cold)); cli_msg(-1018, "%-23s " SIZE_FORMAT, "Cold memory:", SIZE_ARGS(cold));
#endif #endif
cli_msg(-1028, "Failed to use cold memory: %i times", atomic_load_explicit(&cold_memory_failed_to_use, memory_order_relaxed));
cli_msg(0, ""); cli_msg(0, "");
} }

View File

@ -750,6 +750,8 @@ enum aspa_result aspa_check(rtable *tab, const adata *path, bool force_upstream)
* and check allowed directions */ * and check allowed directions */
uint max_up = 0, min_up = 0, max_down = 0, min_down = 0; uint max_up = 0, min_up = 0, max_down = 0, min_down = 0;
lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
RT_READ(tab, tr); RT_READ(tab, tr);
for (uint ap=0; ap<nsz; ap++) for (uint ap=0; ap<nsz; ap++)
@ -2269,6 +2271,8 @@ rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rt
return; return;
} }
lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
RT_LOCKED(hook->table, tab) RT_LOCKED(hook->table, tab)
{ {
u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_acquire); u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_acquire);
@ -2481,6 +2485,8 @@ rt_net_feed_internal(struct rtable_reading *tr, u32 index, bool (*prefilter)(str
struct rt_export_feed * struct rt_export_feed *
rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first) rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
{ {
lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
RT_READ(t, tr); RT_READ(t, tr);
const struct netindex *ni = net_find_index(tr->t->netindex, a); const struct netindex *ni = net_find_index(tr->t->netindex, a);
return ni ? rt_net_feed_internal(tr, ni->index, NULL, NULL, first) : NULL; return ni ? rt_net_feed_internal(tr, ni->index, NULL, NULL, first) : NULL;
@ -2498,6 +2504,8 @@ rt_net_best(rtable *t, const net_addr *a)
{ {
rte rt = {}; rte rt = {};
lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
RT_READ(t, tr); RT_READ(t, tr);
struct netindex *i = net_find_index(t->netindex, a); struct netindex *i = net_find_index(t->netindex, a);

View File

@ -126,6 +126,7 @@ long page_size = 0;
static struct empty_pages *empty_pages = NULL; static struct empty_pages *empty_pages = NULL;
_Atomic int pages_kept_cold = 0; _Atomic int pages_kept_cold = 0;
_Atomic int pages_kept_cold_index = 0; _Atomic int pages_kept_cold_index = 0;
_Atomic int cold_memory_failed_to_use = 0;
static struct free_page * _Atomic page_stack = NULL; static struct free_page * _Atomic page_stack = NULL;
static _Thread_local struct free_page * local_page_stack = NULL; static _Thread_local struct free_page * local_page_stack = NULL;
@ -219,6 +220,7 @@ alloc_page(void)
{ {
/* We can't lock and we actually shouldn't alloc either when rcu is active /* We can't lock and we actually shouldn't alloc either when rcu is active
* but that's a quest for another day. */ * but that's a quest for another day. */
atomic_fetch_add_explicit(&cold_memory_failed_to_use, 1, memory_order_relaxed);
} }
else else
{ {