0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 01:31:55 +00:00

Dropped net_resolve_index from feeder fast path

This commit is contained in:
Maria Matejka 2024-06-10 16:25:53 +02:00
parent ab1697d7a0
commit 47e493f228
6 changed files with 53 additions and 43 deletions

View File

@ -308,6 +308,8 @@ net_get_index(netindex_hash *h, const net_addr *n)
net_new_index_locked(hp, n)));
}
struct netindex net_index_out_of_range;
struct netindex *
net_resolve_index(netindex_hash *h, u32 i)
{
@ -317,7 +319,7 @@ net_resolve_index(netindex_hash *h, u32 i)
u32 bs = atomic_load_explicit(&h->block_size, memory_order_relaxed);
if (i >= bs)
return NULL;
return &net_index_out_of_range;
struct netindex *ni = atomic_load_explicit(&block[i], memory_order_acquire);
if (ni == NULL)

View File

@ -36,6 +36,8 @@ struct netindex *net_find_index(netindex_hash *, const net_addr *);
struct netindex *net_get_index(netindex_hash *, const net_addr *);
struct netindex *net_resolve_index(netindex_hash *, u32);
extern struct netindex net_index_out_of_range;
/* Update use-count without allocating a handle. Take same care
* to ensure that your locks and unlocks are always balanced. */
void net_lock_index(netindex_hash *h, struct netindex *i);

View File

@ -222,10 +222,12 @@ struct rt_exporter {
netindex_hash *netindex; /* Table for net <-> id conversion */
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, struct netindex *, const struct rt_export_item *first);
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, u32, const struct rt_export_item *first);
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
};
extern struct rt_export_feed rt_feed_index_out_of_range;
/* Exporter API */
void rt_exporter_init(struct rt_exporter *, struct settle_config *);
struct rt_export_item *rt_exporter_push(struct rt_exporter *, const struct rt_export_item *);

View File

@ -10,6 +10,8 @@
#include "nest/route.h"
#include "nest/protocol.h"
struct rt_export_feed rt_feed_index_out_of_range;
#define rtex_trace(_req, _cat, msg, args...) do { \
if ((_req)->trace_routes & _cat) \
log(L_TRACE "%s: " msg, (_req)->name, ##args); \
@ -144,17 +146,17 @@ rt_export_get(struct rt_export_request *r)
if (r->feeder.domain.rtable)
{
LOCK_DOMAIN(rtable, r->feeder.domain);
feed = e->feed_net(e, NULL, ni, update);
feed = e->feed_net(e, NULL, ni->index, update);
UNLOCK_DOMAIN(rtable, r->feeder.domain);
}
else
{
RCU_ANCHOR(u);
feed = e->feed_net(e, u, ni, update);
feed = e->feed_net(e, u, ni->index, update);
}
bmap_set(&r->feed_map, ni->index);
ASSERT_DIE(feed);
ASSERT_DIE(feed && (feed != &rt_feed_index_out_of_range));
EXPORT_FOUND(RT_EXPORT_FEED);
}
@ -246,7 +248,7 @@ rt_alloc_feed(uint routes, uint exports)
static struct rt_export_feed *
rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u)
{
while (1)
for (uint retry = 0; retry < (u ? 1024 : ~0U); retry++)
{
ASSERT_DIE(u || DOMAIN_IS_LOCKED(rtable, f->domain));
@ -257,42 +259,35 @@ rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u)
return NULL;
}
struct netindex *ni = NULL;
u32 mfi = atomic_load_explicit(&e->max_feed_index, memory_order_acquire);
for (; !ni && f->feed_index < mfi; f->feed_index++)
ni = net_resolve_index(e->netindex, f->feed_index);
if (!ni)
struct rt_export_feed *feed = e->feed_net(e, u, f->feed_index, NULL);
if (feed == &rt_feed_index_out_of_range)
{
rtex_trace(f, D_ROUTES, "Nothing more to feed", f->feed_index);
f->feed_index = ~0;
return NULL;
}
#define NOT_THIS_FEED(...) { \
rtex_trace(f, D_ROUTES, __VA_ARGS__); \
f->feed_index++; \
continue; \
}
if (!feed)
NOT_THIS_FEED("Nothing found for index %u", f->feed_index);
struct netindex *ni = feed->ni;
if (!rt_prefilter_net(&f->prefilter, ni->addr))
{
rtex_trace(f, D_ROUTES, "Not feeding %N due to prefilter", ni->addr);
if (u)
RCU_RETRY_FAST(u);
else
continue;
}
NOT_THIS_FEED("Not feeding %N due to prefilter", ni->addr);
if (f->feeding && !rt_net_is_feeding_feeder(f, ni->addr))
{
rtex_trace(f, D_ROUTES, "Not feeding %N, not requested", ni->addr);
if (u)
RCU_RETRY_FAST(u);
else
continue;
}
NOT_THIS_FEED("Not feeding %N, not requested", ni->addr);
struct rt_export_feed *feed = e->feed_net(e, u, ni, NULL);
if (feed)
{
rtex_trace(f, D_ROUTES, "Feeding %u routes for %N", feed->count_routes, ni->addr);
return feed;
}
f->feed_index++;
return feed;
}
RCU_RETRY_FAST(u);
}
struct rt_export_feed *

View File

@ -2279,11 +2279,11 @@ rt_net_feed_index(struct rtable_reading *tr, net *n, const struct rt_pending_exp
}
static struct rt_export_feed *
rt_net_feed_internal(struct rtable_reading *tr, const struct netindex *ni, const struct rt_pending_export *first)
rt_net_feed_internal(struct rtable_reading *tr, u32 index, const struct rt_pending_export *first)
{
net *n = rt_net_feed_get_net(tr, ni->index);
net *n = rt_net_feed_get_net(tr, index);
if (!n)
return NULL;
return &rt_feed_index_out_of_range;
return rt_net_feed_index(tr, n, first);
}
@ -2293,14 +2293,14 @@ rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
{
RT_READ(t, tr);
const struct netindex *ni = net_find_index(tr->t->netindex, a);
return ni ? rt_net_feed_internal(tr, ni, first) : NULL;
return ni ? rt_net_feed_internal(tr, ni->index, first) : NULL;
}
static struct rt_export_feed *
rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, struct netindex *ni, const struct rt_export_item *_first)
rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, const struct rt_export_item *_first)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
return rt_net_feed_internal(tr, ni, SKIP_BACK(const struct rt_pending_export, it, _first));
return rt_net_feed_internal(tr, index, SKIP_BACK(const struct rt_pending_export, it, _first));
}
rte
@ -2325,17 +2325,17 @@ rt_net_best(rtable *t, const net_addr *a)
}
static struct rt_export_feed *
rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, struct netindex *ni, const struct rt_export_item *_first)
rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, const struct rt_export_item *_first)
{
SKIP_BACK_DECLARE(rtable, t, export_best, e);
SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
RT_READ_ANCHORED(t, tr, u);
net *n = rt_net_feed_get_net(tr, ni->index);
net *n = rt_net_feed_get_net(tr, index);
if (!n)
return &rt_feed_index_out_of_range;
/* No more to feed, we are fed up! */
return NULL;
const struct rt_pending_export *first_in_net, *last_in_net;
first_in_net = atomic_load_explicit(&n->best.first, memory_order_acquire);
@ -2352,9 +2352,13 @@ rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, struct netindex
return NULL;
struct rt_export_feed *feed = rt_alloc_feed(!!best, ecnt);
feed->ni = ni;
if (best)
{
feed->block[0] = best->rte;
feed->ni = NET_TO_INDEX(best->rte.net);
}
else
feed->ni = NET_TO_INDEX((first->it.new ?: first->it.old)->net);
if (ecnt)
{

View File

@ -1879,11 +1879,16 @@ bgp_out_item_done(struct lfjour *j, struct lfjour_item *i)
{}
static struct rt_export_feed *
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, struct netindex *ni, const struct rt_export_item *_first)
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, const struct rt_export_item *_first)
{
ASSERT_DIE(u == NULL);
SKIP_BACK_DECLARE(struct bgp_ptx_private, c, exporter, e);
ASSERT_DIE(DOMAIN_IS_LOCKED(rtable, c->lock));
struct netindex *ni = net_resolve_index(c->c->c.table->netindex, index);
if (ni == &net_index_out_of_range)
return &rt_feed_index_out_of_range;
if (ni == NULL)
return NULL;
struct rt_export_feed *feed = NULL;