0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-10-18 09:58:43 +00:00

Export: Next feed step-up is now internal

There is no much reason to do anything else than the netindex technique.
This commit is contained in:
Maria Matejka 2024-05-30 08:22:40 +02:00 committed by Katerina Kubecova
parent dd841dd472
commit a9208e602d
3 changed files with 45 additions and 61 deletions

View File

@ -147,10 +147,7 @@ struct rt_export_request {
#define TLIST_WANT_ADD_TAIL
/* Feeding itself */
union {
u64 feed_index; /* Index of the feed in progress */
struct rt_feeding_index *feed_index_ptr; /* Use this when u64 is not enough */
};
u32 feed_index; /* Index of the feed in progress */
struct rt_feeding_request {
struct rt_feeding_request *next; /* Next in request chain */
void (*done)(struct rt_feeding_request *);/* Called when this refeed finishes */
@ -211,11 +208,13 @@ struct rt_exporter {
TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
_Bool _Atomic feeders_lock; /* Spinlock for the above list */
u8 trace_routes; /* Debugging flags (D_*) */
u8 net_type; /* Which net this exporter provides */
u32 _Atomic max_feed_index; /* Stop feeding at this index */
const char *name; /* Name for logging */
netindex_hash *netindex; /* Table for net <-> id conversion */
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, const net_addr *, const struct rt_export_item *first);
const net_addr *(*feed_next)(struct rt_exporter *, struct rcu_unwinder *, struct rt_export_feeder *);
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, const struct netindex *, const struct rt_export_item *first);
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
};

View File

@ -129,6 +129,8 @@ rt_export_get(struct rt_export_request *r)
/* Is this update allowed by prefilter? */
const net_addr *n = (update->new ?: update->old)->net;
struct netindex *ni = NET_TO_INDEX(n);
if (!rt_prefilter_net(&r->feeder.prefilter, n))
{
rtex_trace(r, D_ROUTES, "Not exporting %N due to prefilter", n);
@ -140,9 +142,9 @@ rt_export_get(struct rt_export_request *r)
/* But this net shall get a feed first! */
rtex_trace(r, D_ROUTES, "Expediting %N feed due to pending update %lu", n, update->seq);
RCU_ANCHOR(u);
feed = e->feed_net(e, u, n, update);
feed = e->feed_net(e, u, ni, update);
bmap_set(&r->feed_map, NET_TO_INDEX(n)->index);
bmap_set(&r->feed_map, ni->index);
ASSERT_DIE(feed);
EXPORT_FOUND(RT_EXPORT_FEED);
@ -226,26 +228,33 @@ rt_export_next_feed(struct rt_export_feeder *f)
return NULL;
}
const net_addr *a = e->feed_next(e, u, f);
if (!a)
struct netindex *ni = NULL;
u32 mfi = atomic_load_explicit(&e->max_feed_index, memory_order_acquire);
for (; !ni && f->feed_index < mfi; f->feed_index++)
ni = net_resolve_index(e->netindex, e->net_type, f->feed_index);
if (!ni)
{
f->feed_index = ~0;
break;
}
if (!rt_prefilter_net(&f->prefilter, a))
if (!rt_prefilter_net(&f->prefilter, ni->addr))
{
rtex_trace(f, D_ROUTES, "Not feeding %N due to prefilter", a);
rtex_trace(f, D_ROUTES, "Not feeding %N due to prefilter", ni->addr);
continue;
}
if (f->feeding && !rt_net_is_feeding_feeder(f, a))
if (f->feeding && !rt_net_is_feeding_feeder(f, ni->addr))
{
rtex_trace(f, D_ROUTES, "Not feeding %N, not requested", a);
rtex_trace(f, D_ROUTES, "Not feeding %N, not requested", ni->addr);
continue;
}
struct rt_export_feed *feed = e->feed_net(e, u, a, NULL);
struct rt_export_feed *feed = e->feed_net(e, u, ni, NULL);
if (feed)
{
rtex_trace(f, D_ROUTES, "Feeding %u routes for %N", feed->count_routes, a);
rtex_trace(f, D_ROUTES, "Feeding %u routes for %N", feed->count_routes, ni->addr);
return feed;
}
}
@ -371,7 +380,7 @@ rt_exporter_init(struct rt_exporter *e, struct settle_config *scf)
e->journal.cleanup_done = rt_exporter_cleanup_done;
lfjour_init(&e->journal, scf);
ASSERT_DIE(e->feed_net);
ASSERT_DIE(e->feed_next);
ASSERT_DIE(e->netindex);
}
struct rt_export_item *

View File

@ -1193,7 +1193,7 @@ rte_announce_to(struct rt_exporter *e, struct rt_net_pending_export *npe, const
}
static void
rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, const rte *new, const rte *old,
rte_announce(struct rtable_private *tab, const struct netindex *i UNUSED, net *net, const rte *new, const rte *old,
const rte *new_best, const rte *old_best)
{
/* Update network count */
@ -1903,6 +1903,12 @@ rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rt
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&tab->routes_block_size, &bs, nbs,
memory_order_acq_rel, memory_order_relaxed));
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&tab->export_all.max_feed_index, &bs, nbs,
memory_order_acq_rel, memory_order_relaxed));
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&tab->export_best.max_feed_index, &bs, nbs,
memory_order_acq_rel, memory_order_relaxed));
synchronize_rcu();
mb_free(routes);
@ -1942,38 +1948,6 @@ rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rt
* Feeding
*/
static const net_addr *
rt_feed_next(struct rtable_reading *tr, struct rt_export_feeder *f)
{
u32 rbs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
for (; f->feed_index < rbs; f->feed_index++)
{
struct netindex *ni = net_resolve_index(tr->t->netindex, tr->t->addr_type, f->feed_index);
if (ni)
{
f->feed_index++;
return ni->addr;
}
}
f->feed_index = ~0ULL;
return NULL;
}
static const net_addr *
rt_feed_next_best(struct rt_exporter *e, struct rcu_unwinder *u, struct rt_export_feeder *f)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, priv.export_best, e), tr, u);
return rt_feed_next(tr, f);
}
static const net_addr *
rt_feed_next_all(struct rt_exporter *e, struct rcu_unwinder *u, struct rt_export_feeder *f)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, priv.export_all, e), tr, u);
return rt_feed_next(tr, f);
}
static struct rt_export_feed *
rt_alloc_feed(uint routes, uint exports)
{
@ -2112,10 +2086,9 @@ rt_net_feed_index(struct rtable_reading *tr, net *n, const struct rt_pending_exp
}
static struct rt_export_feed *
rt_net_feed_internal(struct rtable_reading *tr, const net_addr *a, const struct rt_pending_export *first)
rt_net_feed_internal(struct rtable_reading *tr, const struct netindex *ni, const struct rt_pending_export *first)
{
const struct netindex *i = net_find_index(tr->t->netindex, a);
net *n = rt_net_feed_get_net(tr, i->index);
net *n = rt_net_feed_get_net(tr, ni->index);
if (!n)
return NULL;
@ -2126,14 +2099,15 @@ struct rt_export_feed *
rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
{
RT_READ(t, tr);
return rt_net_feed_internal(tr, a, first);
const struct netindex *ni = net_find_index(tr->t->netindex, a);
return ni ? rt_net_feed_internal(tr, ni, first) : NULL;
}
static struct rt_export_feed *
rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, const net_addr *a, const struct rt_export_item *_first)
rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, const struct netindex *ni, const struct rt_export_item *_first)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
return rt_net_feed_internal(tr, a, SKIP_BACK(const struct rt_pending_export, it, _first));
return rt_net_feed_internal(tr, ni, SKIP_BACK(const struct rt_pending_export, it, _first));
}
rte
@ -2158,13 +2132,11 @@ rt_net_best(rtable *t, const net_addr *a)
}
static struct rt_export_feed *
rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, const net_addr *a, const struct rt_export_item *_first)
rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, const struct netindex *ni, const struct rt_export_item *_first)
{
SKIP_BACK_DECLARE(rtable, t, export_best, e);
SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
struct netindex *ni = NET_TO_INDEX(a);
RT_READ_ANCHORED(t, tr, u);
net *n = rt_net_feed_get_net(tr, ni->index);
@ -2854,10 +2826,12 @@ rt_setup(pool *pp, struct rtable_config *cf)
.item_done = rt_cleanup_export_best,
},
.name = mb_sprintf(p, "%s.export-best", t->name),
.net_type = t->addr_type,
.max_feed_index = RT_INITIAL_ROUTES_BLOCK_SIZE,
.netindex = t->netindex,
.trace_routes = t->debug,
.cleanup_done = rt_cleanup_done_best,
.feed_net = rt_feed_net_best,
.feed_next = rt_feed_next_best,
};
rt_exporter_init(&t->export_best, &cf->export_settle);
@ -2870,10 +2844,12 @@ rt_setup(pool *pp, struct rtable_config *cf)
.item_done = rt_cleanup_export_all,
},
.name = mb_sprintf(p, "%s.export-all", t->name),
.net_type = t->addr_type,
.max_feed_index = RT_INITIAL_ROUTES_BLOCK_SIZE,
.netindex = t->netindex,
.trace_routes = t->debug,
.cleanup_done = rt_cleanup_done_all,
.feed_net = rt_feed_net_all,
.feed_next = rt_feed_next_all,
};
rt_exporter_init(&t->export_all, &cf->export_settle);