0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-10-18 01:54:08 +00:00

This also hits the cork, maybe this refactoring is too brutal at once

This commit is contained in:
Maria Matejka 2024-06-15 22:55:19 +02:00
parent 6223c4066c
commit d3b4b36af5
8 changed files with 105 additions and 99 deletions

View File

@ -87,8 +87,8 @@ struct rte_owner {
u32 hash_key;
u32 uc;
u32 debug;
struct callback *prune_callback;
event *stop;
callback prune;
callback *stop;
};
extern DOMAIN(attrs) attrs_domain;
@ -118,7 +118,7 @@ static inline void rt_lock_source(struct rte_src *src)
static inline void rt_unlock_source(struct rte_src *src)
{
lfuc_unlock(&src->uc, src->owner->prune_callback);
lfuc_unlock(&src->uc, &src->owner->prune);
}
#ifdef RT_SOURCE_DEBUG
@ -129,8 +129,8 @@ static inline void rt_unlock_source(struct rte_src *src)
#define rt_unlock_source(x) ( log(L_INFO "Unlock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_unlock_source_internal(x) )
#endif
void rt_init_sources(struct rte_owner *, const char *name, event_list *list);
void rt_destroy_sources(struct rte_owner *, event *);
void rt_init_sources(struct rte_owner *, const char *name, struct birdloop *loop);
void rt_destroy_sources(struct rte_owner *, callback *);
void rt_dump_sources(struct rte_owner *);

View File

@ -1239,7 +1239,7 @@ mpls_announce_fec(struct mpls_fec_map *m, struct mpls_fec *fec, ea_list *src)
/* The same hostentry, but different dependent table */
SKIP_BACK_DECLARE(struct hostentry_adata, head, ad, heea->u.ad);
struct hostentry *he = head->he;
ea_set_hostentry(&e.attrs, m->channel->table, he->owner, he->addr, he->link,
ea_set_hostentry(&e.attrs, m->channel->table, he->owner->tab, he->addr, he->link,
HOSTENTRY_LABEL_COUNT(head), head->labels);
}
else

View File

@ -395,7 +395,7 @@ neigh_unlink(neighbor *n)
proto_neigh_rem_node(&p->neighbors, n);
if ((p->proto_state == PS_DOWN) && EMPTY_TLIST(proto_neigh, &p->neighbors))
proto_send_event(p, p->event);
callback_activate(&p->done);
n->proto = NULL;

View File

@ -656,7 +656,7 @@ channel_check_stopped(struct channel *c)
ASSERT_DIE(!rt_export_feed_active(&c->reimporter));
channel_set_state(c, CS_DOWN);
proto_send_event(c->proto, c->proto->event);
callback_activate(&c->proto->done);
break;
case CS_PAUSE:
@ -819,7 +819,7 @@ channel_do_down(struct channel *c)
/* Schedule protocol shutddown */
if (proto_is_done(c->proto))
proto_send_event(c->proto, c->proto->event);
callback_activate(&c->proto->done);
}
void
@ -1182,9 +1182,9 @@ proto_loop_stopped(void *ptr)
static void
proto_event(void *ptr)
proto_event(callback *cb)
{
struct proto *p = ptr;
SKIP_BACK_DECLARE(struct proto, p, done, cb);
if (p->do_stop)
{
@ -1249,7 +1249,7 @@ proto_init(struct proto_config *c, struct proto *after)
p->vrf = c->vrf;
proto_add_after(&global_proto_list, p, after);
p->event = ev_new_init(proto_pool, proto_event, p);
callback_init(&p->done, proto_event, p->loop);
PD(p, "Initializing%s", p->disabled ? " [disabled]" : "");
@ -1269,6 +1269,8 @@ proto_start(struct proto *p)
{
p->loop = birdloop_new(proto_pool, p->cf->loop_order, p->cf->loop_max_latency, "Protocol %s", p->cf->name);
p->pool = birdloop_pool(p->loop);
ASSERT_DIE(!callback_is_active(&p->done));
p->done.target = p->loop;
}
else
p->pool = rp_newf(proto_pool, the_bird_domain.the_bird, "Protocol %s", p->cf->name);
@ -1680,7 +1682,7 @@ proto_rethink_goal(struct proto *p)
OBSREF_CLEAR(p->global_config);
proto_remove_channels(p);
proto_rem_node(&global_proto_list, p);
rfree(p->event);
callback_cancel(&p->done);
mb_free(p->message);
mb_free(p);
if (!nc)
@ -2181,7 +2183,7 @@ proto_do_start(struct proto *p)
{
p->active = 1;
rt_init_sources(&p->sources, p->name, proto_event_list(p));
rt_init_sources(&p->sources, p->name, p->loop);
if (!p->sources.class)
p->sources.class = &default_rte_owner_class;
@ -2226,10 +2228,10 @@ proto_do_stop(struct proto *p)
p->pool_up = NULL;
proto_stop_channels(p);
rt_destroy_sources(&p->sources, p->event);
rt_destroy_sources(&p->sources, &p->done);
p->do_stop = 1;
proto_send_event(p, p->event);
callback_activate(&p->done);
}
static void
@ -2239,7 +2241,7 @@ proto_do_down(struct proto *p)
/* Shutdown is finished in the protocol event */
if (proto_is_done(p))
proto_send_event(p, p->event);
callback_activate(&p->done);
}

View File

@ -141,7 +141,7 @@ struct proto {
as the protocol enters the STOP / DOWN state */
pool *pool_inloop; /* Pool containing local objects which need to be freed
before the protocol's birdloop actually stops, like olocks */
event *event; /* Protocol event */
callback done; /* Protocol shutdown checker */
timer *restart_timer; /* Timer to restart the protocol from limits */
event *restart_event; /* Event to restart/shutdown the protocol from limits */
struct birdloop *loop; /* BIRDloop running this protocol */

View File

@ -361,7 +361,7 @@ extern uint rtable_max_id;
_Atomic u32 routes_block_size; /* Size of the route object pointer block */ \
struct f_trie * _Atomic trie; /* Trie of prefixes defined in fib */ \
event *nhu_event; /* Nexthop updater */ \
event *hcu_event; /* Hostcache updater */ \
callback shutdown_finished; /* Shutdown finisher */ \
struct rt_exporter export_all; /* Route export journal for all routes */ \
struct rt_exporter export_best; /* Route export journal for best routes */ \
@ -449,8 +449,10 @@ LOBJ_UNLOCK_CLEANUP(rtable, rtable);
extern struct rt_cork {
_Atomic uint active;
event_list queue;
event run;
struct rt_cork_callbacks {
struct rt_cork_callbacks *_Atomic next;
callback *uncork_block[0];
} *_Atomic callbacks;
} rt_cork;
static inline void rt_cork_acquire(void)
@ -464,7 +466,7 @@ static inline void rt_cork_release(void)
ev_send(&global_work_list, &rt_cork.run);
}
static inline _Bool rt_cork_check(event *e)
static inline _Bool rt_cork_check(callback *cb)
{
int corked = (atomic_load_explicit(&rt_cork.active, memory_order_acquire) > 0);
if (corked)
@ -641,7 +643,7 @@ struct hostentry {
ip_addr link; /* (link-local) IP address of host, used as gw
if host is directly attached */
rtable *tab; /* Dependent table, part of key */
rtable *owner; /* Nexthop owner table */
struct hostcache *owner; /* Nexthop owner hostcache (use with care) */
struct hostentry *next; /* Next in hash chain */
unsigned hash_key; /* Hash key */
u32 igp_metric; /* Chosen route IGP metric */
@ -658,11 +660,14 @@ struct hostcache {
unsigned hash_order, hash_shift;
unsigned hash_max, hash_min;
unsigned hash_items;
u8 corked; /* Stuck by cork */
linpool *lp; /* Linpool for trie */
struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
list hostentries; /* List of all hostentries */
struct rt_export_request req; /* Notifier */
event source_event;
callback update; /* Hostcache updater */
callback uncork; /* Hostcache uncorker */
};
struct rt_digestor {

View File

@ -150,7 +150,7 @@ static void
ea_gen_hostentry_freed(const eattr *ea)
{
struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr;
lfuc_unlock(&had->he->uc, birdloop_event_list(had->he->owner->loop), had->he->owner->hcu_event);
lfuc_unlock(&had->he->uc, &had->he->owner->update);
}
struct ea_class ea_gen_hostentry = {
@ -244,7 +244,7 @@ rt_get_source_o(struct rte_owner *p, u32 id)
if (p->stop)
bug("Stopping route owner asked for another source.");
ASSERT_DIE(birdloop_inside(p->list->loop));
ASSERT_DIE(birdloop_inside(p->prune.target));
struct rte_src *src = rt_find_source(p, id);
@ -327,16 +327,10 @@ rt_find_source_global(u32 id)
return src;
}
static inline void
rt_done_sources(struct rte_owner *o)
{
ev_send(o->list, o->stop);
}
void
rt_prune_sources(void *data)
rt_prune_sources(callback *cb)
{
struct rte_owner *o = data;
SKIP_BACK_DECLARE(struct rte_owner, o, prune, cb);
HASH_WALK_FILTER(o->hash, next, src, sp)
{
@ -365,13 +359,13 @@ rt_prune_sources(void *data)
if (o->stop && !o->uc)
{
rfree(o->prune);
callback_cancel(&o->prune);
RTA_UNLOCK;
if (o->debug & D_EVENTS)
log(L_TRACE "%s: all rte_src's pruned, scheduling stop event", o->name);
rt_done_sources(o);
callback_activate(o->stop);
}
else
RTA_UNLOCK;
@ -402,23 +396,22 @@ rt_dump_sources(struct rte_owner *o)
}
void
rt_init_sources(struct rte_owner *o, const char *name, event_list *list)
rt_init_sources(struct rte_owner *o, const char *name, struct birdloop *loop)
{
RTA_LOCK;
HASH_INIT(o->hash, rta_pool, RSH_INIT_ORDER);
o->hash_key = random_u32();
o->uc = 0;
o->name = name;
o->prune = ev_new_init(rta_pool, rt_prune_sources, o);
callback_init(&o->prune, rt_prune_sources, loop);
o->stop = NULL;
o->list = list;
RTA_UNLOCK;
if (o->debug & D_EVENTS)
log(L_TRACE "%s: initialized rte_src owner", o->name);
}
void
rt_destroy_sources(struct rte_owner *o, event *done)
rt_destroy_sources(struct rte_owner *o, callback *done)
{
o->stop = done;
@ -427,11 +420,7 @@ rt_destroy_sources(struct rte_owner *o, event *done)
if (o->debug & D_EVENTS)
log(L_TRACE "%s: rte_src owner destroy requested, already clean, scheduling stop event", o->name);
RTA_LOCK;
rfree(o->prune);
RTA_UNLOCK;
rt_done_sources(o);
callback_activate(o->stop);
}
else
if (o->debug & D_EVENTS)
@ -1386,9 +1375,9 @@ ea_show_hostentry(const struct adata *ad, byte *buf, uint size)
uint s = 0;
if (ipa_nonzero(had->he->link) && !ipa_equal(had->he->link, had->he->addr))
s = bsnprintf(buf, size, "via %I %I table %s", had->he->addr, had->he->link, had->he->owner->name);
s = bsnprintf(buf, size, "via %I %I table %s", had->he->addr, had->he->link, had->he->owner->tab->name);
else
s = bsnprintf(buf, size, "via %I table %s", had->he->addr, had->he->owner->name);
s = bsnprintf(buf, size, "via %I table %s", had->he->addr, had->he->owner->tab->name);
uint lc = HOSTENTRY_LABEL_COUNT(had);
if (!lc)
@ -1632,7 +1621,7 @@ static SPINHASH(struct ea_storage) rta_hash_table;
#define RTAH_REHASH rta_rehash
#define RTAH_PARAMS /8, *2, 2, 2, 12, 28
static void RTAH_REHASH(void *_ UNUSED) {
static void RTAH_REHASH(callback *_ UNUSED) {
int step;
RTA_LOCK;
@ -1838,7 +1827,7 @@ rta_init(void)
for (uint i=0; i<ARRAY_SIZE(ea_slab_sizes); i++)
ea_slab[i] = sl_new(rta_pool, ea_slab_sizes[i]);
SPINHASH_INIT(rta_hash_table, RTAH, rta_pool, &global_work_list);
SPINHASH_INIT(rta_hash_table, RTAH, rta_pool, &main_birdloop);
rte_src_init();
ea_class_init();

View File

@ -129,8 +129,8 @@ struct rt_cork rt_cork;
/* Data structures for export journal */
static void rt_free_hostcache(struct rtable_private *tab);
static void rt_hcu_uncork(void *_tab);
static void rt_update_hostcache(void *tab);
static void rt_hcu_update(callback *);
static void rt_hcu_uncork(callback *);
static void rt_next_hop_update(void *_tab);
static void rt_nhu_uncork(void *_tab);
static inline void rt_next_hop_resolve_rte(rte *r);
@ -3068,7 +3068,7 @@ rt_setup(pool *pp, struct rtable_config *cf)
if (t->id >= rtable_max_id)
rtable_max_id = t->id + 1;
t->netindex = netindex_hash_new(birdloop_pool(t->loop), birdloop_event_list(t->loop), cf->addr_type);
t->netindex = netindex_hash_new(birdloop_pool(t->loop), t->loop, cf->addr_type);
atomic_store_explicit(&t->routes, mb_allocz(p, RT_INITIAL_ROUTES_BLOCK_SIZE * sizeof(net)), memory_order_relaxed);
atomic_store_explicit(&t->routes_block_size, RT_INITIAL_ROUTES_BLOCK_SIZE, memory_order_relaxed);
@ -4319,11 +4319,10 @@ rt_unlock_table_priv(struct rtable_private *r, const char *file, uint line)
}
static void
rt_shutdown_finished(void *tab_)
rt_shutdown_finished(struct callback *cb)
{
rtable *t = tab_;
RT_LOCK(t, tab);
birdloop_stop_self(t->loop, rt_delete, t);
RT_LOCK(SKIP_BACK(rtable, shutdown_finished, cb), tab);
birdloop_stop_self(tab->loop, rt_delete, tab);
}
static void
@ -4332,6 +4331,8 @@ rt_shutdown(void *tab_)
rtable *t = tab_;
RT_LOCK(t, tab);
callback_init(&tab->shutdown_finished, rt_shutdown_finished, tab->loop);
if (tab->export_digest)
{
rtex_export_unsubscribe(&tab->export_digest->req);
@ -4345,9 +4346,7 @@ rt_shutdown(void *tab_)
rt_exporter_shutdown(&tab->export_best, NULL);
rt_exporter_shutdown(&tab->export_all, NULL);
netindex_hash_delete(tab->netindex,
ev_new_init(tab->rp, rt_shutdown_finished, tab),
birdloop_event_list(tab->loop));
netindex_hash_delete(tab->netindex, &tab->shutdown_finished);
}
static void
@ -4495,13 +4494,9 @@ rt_commit(struct config *new, struct config *old)
rt_check_cork_low(tab);
if (tab->hcu_event)
{
if (ev_get_list(tab->hcu_event) == &rt_cork.queue)
ev_postpone(tab->hcu_event);
/* Stop the hostcache updater */
if (rt_export_get_state(&tab->hostcache->req) != TES_DOWN)
rtex_export_unsubscribe(&tab->hostcache->req);
}
rt_unlock_table(tab);
}
@ -4637,6 +4632,9 @@ hc_notify_export(void *_hc)
RT_EXPORT_WALK(&hc->req, u)
{
if (callback_is_active(&hc->update))
continue;
const net_addr *n = NULL;
switch (u->kind)
{
@ -4676,9 +4674,6 @@ hc_notify_export(void *_hc)
continue;
RT_LOCK(hc->tab, tab);
if (ev_active(tab->hcu_event))
continue;
if (!trie_match_net(hc->trie, n))
{
/* No interest in this update, mark seen only */
@ -4693,12 +4688,12 @@ hc_notify_export(void *_hc)
hc->req.name, n, NET_TO_INDEX(n)->index);
if ((rt_export_get_state(&hc->req) == TES_READY)
&& !ev_active(tab->hcu_event))
&& !callback_is_active(&hc->update))
{
if (hc->req.trace_routes & D_EVENTS)
log(L_TRACE "%s requesting HCU", hc->req.name);
ev_send_loop(tab->loop, tab->hcu_event);
callback_activate(&hc->update);
}
}
@ -4722,12 +4717,11 @@ rt_init_hostcache(struct rtable_private *tab)
hc->trie = f_new_trie(hc->lp, 0);
hc->tab = RT_PUB(tab);
tab->hcu_event = ev_new_init(tab->rp, rt_update_hostcache, tab);
tab->hcu_uncork_event = ev_new_init(tab->rp, rt_hcu_uncork, tab);
tab->hostcache = hc;
ev_send_loop(tab->loop, tab->hcu_event);
rt_lock_table(tab);
callback_init(&hc->update, rt_hcu_update, tab->loop);
callback_activate(&hc->update);
}
static void
@ -4877,20 +4871,9 @@ done:
}
static void
rt_hcu_uncork(void *_tab)
rt_update_hostcache(struct hostcache *hc, rtable **nhu_pending)
{
RT_LOCKED((rtable *) _tab, tab)
ev_send_loop(tab->loop, tab->hcu_event);
}
static void
rt_update_hostcache(void *data)
{
rtable **nhu_pending;
RT_LOCKED((rtable *) data, tab)
{
struct hostcache *hc = tab->hostcache;
RT_LOCK(hc->tab, tab);
/* Finish initialization */
if (!hc->req.name)
@ -4913,19 +4896,6 @@ rt_update_hostcache(void *data)
rtex_export_subscribe(&tab->export_best, &hc->req);
}
/* Shutdown shortcut */
if (rt_export_get_state(&hc->req) == TES_DOWN)
return;
if (rt_cork_check(tab->hcu_uncork_event))
{
rt_trace(tab, D_STATES, "Hostcache update corked");
return;
}
/* Destination schedule map */
nhu_pending = tmp_allocz(sizeof(rtable *) * rtable_max_id);
struct hostentry *he;
node *n, *x;
@ -4947,12 +4917,52 @@ rt_update_hostcache(void *data)
}
}
static void
rt_hcu_update(struct callback *cb)
{
SKIP_BACK_DECLARE(struct hostcache, hc, update, cb);
/* Still corked, do nothing */
if (hc->corked)
return;
/* Shutdown shortcut */
if (hc->req.name && (rt_export_get_state(&hc->req) == TES_DOWN))
{
RT_LOCK(hc->tab, tab);
rt_unlock_table(tab);
return;
}
/* Cork check */
if (rt_cork_check(&hc->uncork))
{
hc->corked = 1;
rt_trace(tab, D_STATES, "Hostcache update corked");
return;
}
/* Destination schedule map */
rtable **nhu_pending = tmp_allocz(sizeof(rtable *) * rtable_max_id);
/* Find which destinations we have to ping */
rt_update_hostcache(hc, &nhu_pending);
/* And do the pinging */
for (uint i=0; i<rtable_max_id; i++)
if (nhu_pending[i])
RT_LOCKED(nhu_pending[i], dst)
rt_schedule_nhu(dst);
}
static void
rt_hcu_uncork(struct callback *cb)
{
SKIP_BACK_DECLARE(struct hostcache, hc, uncork, cb);
hc->corked = 0;
callback_activate(&hc->update);
}
static struct hostentry *
rt_get_hostentry(struct rtable_private *tab, ip_addr a, ip_addr ll, rtable *dep)
{