0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 01:31:55 +00:00

WIP: allocating memory near rc lock

This commit is contained in:
Katerina Kubecova 2024-12-12 14:55:25 +01:00
parent 9103eff734
commit d219a6d792
5 changed files with 65 additions and 22 deletions

View File

@ -215,6 +215,13 @@ struct rt_export_union {
struct rt_export_request *req;
};
struct rt_feed_retry {
struct rcu_unwinder *u;
void *feed_block;
u32 feed_size;
u32 feed_request;
};
struct rt_exporter {
struct lfjour journal; /* Journal for update keeping */
TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
@ -227,7 +234,7 @@ struct rt_exporter {
netindex_hash *netindex; /* Table for net <-> id conversion */
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, u32, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rt_feed_retry *, u32, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
};
@ -236,7 +243,7 @@ extern struct rt_export_feed rt_feed_index_out_of_range;
/* Exporter API */
void rt_exporter_init(struct rt_exporter *, struct settle_config *);
struct rt_export_item *rt_exporter_push(struct rt_exporter *, const struct rt_export_item *);
struct rt_export_feed *rt_alloc_feed(uint routes, uint exports);
struct rt_export_feed *rt_alloc_feed(struct rt_feed_retry *ur, uint routes, uint exports);
void rt_exporter_shutdown(struct rt_exporter *, void (*stopped)(struct rt_exporter *));
/* Standalone feeds */
@ -297,6 +304,22 @@ static inline void rt_export_walk_cleanup(const struct rt_export_union **up)
rt_unlock_table(_tp); \
}} while (0) \
#define RT_EXPORT_RETRY_ANCHOR(ur, u) \
struct rt_feed_retry ur = { \
.feed_block = tmp_alloc(512), \
.feed_size = 512, \
}; \
RCU_ANCHOR(u); \
ur.u = u; \
if (ur.feed_request > ur.feed_size) \
{ \
rcu_read_unlock(); \
ur.feed_size = ur.feed_request; \
/* allocate a little bit more just for good measure */ \
ur.feed_block = tmp_alloc((ur.feed_request * 3) / 2); \
rcu_read_lock(); \
} \
static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
{
switch (p->mode)

View File

@ -152,8 +152,8 @@ rt_export_get(struct rt_export_request *r)
}
else
{
RCU_ANCHOR(u);
feed = e->feed_net(e, u, ni->index, NULL, NULL, update);
RT_EXPORT_RETRY_ANCHOR(ur, u);
feed = e->feed_net(e, &ur, ni->index, NULL, NULL, update);
}
ASSERT_DIE(feed && (feed != &rt_feed_index_out_of_range));
@ -228,13 +228,24 @@ rt_export_processed(struct rt_export_request *r, u64 seq)
}
struct rt_export_feed *
rt_alloc_feed(uint routes, uint exports)
rt_alloc_feed(struct rt_feed_retry *ur, uint routes, uint exports)
{
struct rt_export_feed *feed;
uint size = sizeof *feed
+ routes * sizeof *feed->block + _Alignof(typeof(*feed->block))
+ exports * sizeof *feed->exports + _Alignof(typeof(*feed->exports));
if (ur)
{
if (size > ur->feed_size)
{
ur->feed_request = size;
RCU_RETRY(ur->u);
}
feed = ur->feed_block;
}
else
feed = tmp_alloc(size);
feed->count_routes = routes;
@ -249,11 +260,11 @@ rt_alloc_feed(uint routes, uint exports)
}
static struct rt_export_feed *
rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u)
rt_export_get_next_feed(struct rt_export_feeder *f, struct rt_feed_retry *ur)
{
for (uint retry = 0; retry < (u ? 1024 : ~0U); retry++)
for (uint retry = 0; retry < (ur ? 1024 : ~0U); retry++)
{
ASSERT_DIE(u || DOMAIN_IS_LOCKED(rtable, f->domain));
ASSERT_DIE(ur->u || DOMAIN_IS_LOCKED(rtable, f->domain));
struct rt_exporter *e = atomic_load_explicit(&f->exporter, memory_order_acquire);
if (!e)
@ -262,7 +273,7 @@ rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u)
return NULL;
}
struct rt_export_feed *feed = e->feed_net(e, u, f->feed_index,
struct rt_export_feed *feed = e->feed_net(e, ur, f->feed_index,
rt_net_is_feeding_feeder, f, NULL);
if (feed == &rt_feed_index_out_of_range)
{
@ -286,7 +297,7 @@ rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u)
return feed;
}
RCU_RETRY_FAST(u);
RCU_RETRY_FAST(ur->u);
}
struct rt_export_feed *
@ -303,8 +314,9 @@ rt_export_next_feed(struct rt_export_feeder *f)
}
else
{
RCU_ANCHOR(u);
feed = rt_export_get_next_feed(f, u);
RT_EXPORT_RETRY_ANCHOR(ur, u);
feed = rt_export_get_next_feed(f, &ur);
}
if (feed)

View File

@ -205,6 +205,7 @@ static inline rtable *rt_pub_to_pub(rtable *tab) { return tab; }
struct rtable_reading {
rtable *t;
struct rcu_unwinder *u;
struct rt_export_feeder f;
};
#define RT_READ_ANCHORED(_o, _i, _u) \
@ -2431,10 +2432,16 @@ rt_net_feed_index(struct rtable_reading *tr, net *n, bool (*prefilter)(struct rt
if (!ecnt && prefilter && !prefilter(f, NET_READ_BEST_ROUTE(tr, n)->rte.net))
return NULL;
feed = rt_alloc_feed(rcnt+ocnt, ecnt);
feed = rt_alloc_feed(NULL, rcnt+ocnt, ecnt);
if (rcnt)
{
rcu_read_unlock();
defer_expect(rcnt * sizeof (rte));
rcu_read_lock();
rte_feed_obtain_copy(tr, n, feed->block, rcnt);
}
if (ecnt)
{
@ -2493,9 +2500,9 @@ rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
}
static struct rt_export_feed *
rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
rt_feed_net_all(struct rt_exporter *e, struct rt_feed_retry *ur, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, ur->u);
return rt_net_feed_internal(tr, index, prefilter, f, SKIP_BACK(const struct rt_pending_export, it, _first));
}
@ -2523,12 +2530,12 @@ rt_net_best(rtable *t, const net_addr *a)
}
static struct rt_export_feed *
rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
rt_feed_net_best(struct rt_exporter *e, struct rt_feed_retry *ur, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
SKIP_BACK_DECLARE(rtable, t, export_best, e);
SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
RT_READ_ANCHORED(t, tr, u);
RT_READ_ANCHORED(t, tr, ur->u);
net *n = rt_net_feed_get_net(tr, index);
if (!n)
@ -2556,7 +2563,7 @@ rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool
if (!ecnt && (!best || prefilter && !prefilter(f, best->rte.net)))
return NULL;
struct rt_export_feed *feed = rt_alloc_feed(!!best, ecnt);
struct rt_export_feed *feed = rt_alloc_feed(ur, !!best, ecnt);
if (best)
{
feed->block[0] = best->rte;

View File

@ -2010,9 +2010,9 @@ bgp_out_item_done(struct lfjour *j UNUSED, struct lfjour_item *i UNUSED)
{}
static struct rt_export_feed *
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, UNUSED const struct rt_export_item *_first)
bgp_out_feed_net(struct rt_exporter *e, struct rt_feed_retry *ur, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, UNUSED const struct rt_export_item *_first)
{
ASSERT_DIE(u == NULL);
ASSERT_DIE(ur->u == NULL);
SKIP_BACK_DECLARE(struct bgp_ptx_private, c, exporter, e);
ASSERT_DIE(DOMAIN_IS_LOCKED(rtable, c->lock));
@ -2038,7 +2038,7 @@ bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool
if (count)
{
feed = rt_alloc_feed(count, 0);
feed = rt_alloc_feed(NULL, count, 0);
feed->ni = ni;
uint pos = 0;

View File

@ -221,6 +221,7 @@ alloc_page(void)
/* We can't lock and we actually shouldn't alloc either when rcu is active
* but that's a quest for another day. */
atomic_fetch_add_explicit(&cold_memory_failed_to_use, 1, memory_order_relaxed);
bug("bug");
}
else
{