0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

Table cork now uses callbacks and direct flush to uncork

This commit is contained in:
Maria Matejka 2024-06-15 23:31:44 +02:00
parent 83045e9a1f
commit d6233b4de0
5 changed files with 78 additions and 54 deletions

View File

@ -342,6 +342,11 @@ rt_net_is_feeding_request(struct rt_export_request *req, const net_addr *n)
*/
struct rt_uncork_callback {
event ev;
callback cb;
};
struct rt_export_hook;
extern uint rtable_max_id;
@ -390,8 +395,8 @@ struct rtable_private {
* obstacle from this routing table.
*/
struct rt_export_request best_req; /* Internal request from best route announcement cleanup */
struct event *nhu_uncork_event; /* Helper event to schedule NHU on uncork */
struct event *hcu_uncork_event; /* Helper event to schedule HCU on uncork */
struct rt_uncork_callback nhu_uncork; /* Helper event to schedule NHU on uncork */
struct rt_uncork_callback hcu_uncork; /* Helper event to schedule HCU on uncork */
struct timer *prune_timer; /* Timer for periodic pruning / GC */
struct event *prune_event; /* Event for prune execution */
btime last_rt_change; /* Last time when route changed */
@ -447,10 +452,12 @@ LOBJ_UNLOCK_CLEANUP(rtable, rtable);
#define RT_PUB(tab) SKIP_BACK(rtable, priv, tab)
#define RT_UNCORKING (1ULL << 44)
extern struct rt_cork {
_Atomic uint active;
_Atomic u64 active;
DOMAIN(resource) dom;
event_list queue;
event run;
} rt_cork;
static inline void rt_cork_acquire(void)
@ -460,20 +467,55 @@ static inline void rt_cork_acquire(void)
static inline void rt_cork_release(void)
{
if (atomic_fetch_sub_explicit(&rt_cork.active, 1, memory_order_acq_rel) == 1)
ev_send(&global_work_list, &rt_cork.run);
u64 upd = atomic_fetch_add_explicit(&rt_cork.active, RT_UNCORKING, memory_order_acq_rel) + RT_UNCORKING;
/* Actualy released? */
if ((upd >> 44) == (upd & (RT_UNCORKING - 1)))
{
LOCK_DOMAIN(resource, rt_cork.dom);
synchronize_rcu();
ev_run_list(&rt_cork.queue);
UNLOCK_DOMAIN(resource, rt_cork.dom);
}
atomic_fetch_sub_explicit(&rt_cork.active, RT_UNCORKING + 1, memory_order_acq_rel);
}
static inline _Bool rt_cork_check(event *e)
void rt_cork_send_callback(void *_data);
static inline _Bool rt_cork_check(struct rt_uncork_callback *rcc)
{
int corked = (atomic_load_explicit(&rt_cork.active, memory_order_acquire) > 0);
if (corked)
ev_send(&rt_cork.queue, e);
/* Wait until all uncorks have finished */
while (1)
{
rcu_read_lock();
if (atomic_load_explicit(&rt_cork.active, memory_order_acquire) == 0)
ev_send(&global_work_list, &rt_cork.run);
/* Not corked */
u64 corked = atomic_load_explicit(&rt_cork.active, memory_order_acquire);
if (!corked)
{
rcu_read_unlock();
return 0;
}
return corked;
/* Yes, corked */
if (corked < RT_UNCORKING)
{
if (!rcc->ev.hook)
{
rcc->ev.hook = rt_cork_send_callback;
rcc->ev.data = rcc;
}
ev_send(&rt_cork.queue, &rcc->ev);
rcu_read_unlock();
return 1;
}
/* In progress, retry */
rcu_read_unlock();
birdloop_yield();
}
}
struct rt_pending_export {

View File

@ -129,10 +129,10 @@ struct rt_cork rt_cork;
/* Data structures for export journal */
static void rt_free_hostcache(struct rtable_private *tab);
static void rt_hcu_uncork(void *_tab);
static void rt_hcu_uncork(callback *);
static void rt_update_hostcache(void *tab);
static void rt_next_hop_update(void *_tab);
static void rt_nhu_uncork(void *_tab);
static void rt_nhu_uncork(callback *);
static inline void rt_next_hop_resolve_rte(rte *r);
static inline void rt_flowspec_resolve_rte(rte *r, struct channel *c);
static void rt_refresh_trace(struct rtable_private *tab, struct rt_import_hook *ih, const char *msg);
@ -140,7 +140,6 @@ static void rt_kick_prune_timer(struct rtable_private *tab);
static void rt_prune_table(void *_tab);
static void rt_check_cork_low(struct rtable_private *tab);
static void rt_check_cork_high(struct rtable_private *tab);
static void rt_cork_release_hook(void *);
static void rt_shutdown(void *);
static void rt_delete(void *);
@ -3085,7 +3084,7 @@ rt_setup(pool *pp, struct rtable_config *cf)
hmap_set(&t->id_map, 0);
t->nhu_event = ev_new_init(p, rt_next_hop_update, t);
t->nhu_uncork_event = ev_new_init(p, rt_nhu_uncork, t);
callback_init(&t->nhu_uncork.cb, rt_nhu_uncork, t->loop);
t->prune_timer = tm_new_init(p, rt_prune_timer, t, 0, 0);
t->prune_event = ev_new_init(p, rt_prune_table, t);
t->last_rt_change = t->gc_time = current_time();
@ -3213,7 +3212,7 @@ rt_init(void)
init_list(&routing_tables);
init_list(&deleted_routing_tables);
ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release");
rt_cork.run = (event) { .hook = rt_cork_release_hook };
rt_cork.dom = DOMAIN_NEW_RCU_SYNC(resource);
idm_init(&rtable_idm, rt_table_pool, 256);
ea_register_init(&ea_roa_aggregated);
@ -3425,14 +3424,11 @@ rt_prune_table(void *_tab)
}
}
static void
rt_cork_release_hook(void *data UNUSED)
void
rt_cork_send_callback(void *_rcc)
{
do birdloop_yield();
while (
!atomic_load_explicit(&rt_cork.active, memory_order_acquire) &&
ev_run_list(&rt_cork.queue)
);
struct rt_uncork_callback *rcc = _rcc;
callback_activate(&rcc->cb);
}
/**
@ -4098,9 +4094,9 @@ rt_next_hop_update_net(struct rtable_private *tab, struct netindex *ni, net *n)
}
static void
rt_nhu_uncork(void *_tab)
rt_nhu_uncork(callback *cb)
{
RT_LOCKED((rtable *) _tab, tab)
RT_LOCKED(SKIP_BACK(rtable, priv.nhu_uncork.cb, cb), tab)
{
ASSERT_DIE(tab->nhu_corked);
ASSERT_DIE(tab->nhu_state == 0);
@ -4128,7 +4124,7 @@ rt_next_hop_update(void *_tab)
return;
/* Check corkedness */
if (rt_cork_check(tab->nhu_uncork_event))
if (rt_cork_check(&tab->nhu_uncork))
{
rt_trace(tab, D_STATES, "Next hop updater corked");
@ -4724,7 +4720,7 @@ rt_init_hostcache(struct rtable_private *tab)
hc->tab = RT_PUB(tab);
tab->hcu_event = ev_new_init(tab->rp, rt_update_hostcache, tab);
tab->hcu_uncork_event = ev_new_init(tab->rp, rt_hcu_uncork, tab);
callback_init(&tab->hcu_uncork.cb, rt_hcu_uncork, tab->loop);
tab->hostcache = hc;
ev_send_loop(tab->loop, tab->hcu_event);
@ -4877,10 +4873,10 @@ done:
}
static void
rt_hcu_uncork(void *_tab)
rt_hcu_uncork(callback *cb)
{
RT_LOCKED((rtable *) _tab, tab)
ev_send_loop(tab->loop, tab->hcu_event);
SKIP_BACK_DECLARE(rtable, tab, priv.hcu_uncork.cb, cb);
ev_send_loop(tab->loop, tab->hcu_event);
}
static void
@ -4917,7 +4913,7 @@ rt_update_hostcache(void *data)
if (rt_export_get_state(&hc->req) == TES_DOWN)
return;
if (rt_cork_check(tab->hcu_uncork_event))
if (rt_cork_check(&tab->hcu_uncork))
{
rt_trace(tab, D_STATES, "Hostcache update corked");
return;

View File

@ -573,8 +573,6 @@ bgp_down(struct bgp_proto *p)
bgp_close(p);
}
rfree(p->uncork_do_ev);
p->neigh = NULL;
BGP_TRACE(D_EVENTS, "Down");
@ -1732,8 +1730,7 @@ bgp_start(struct proto *P)
p->last_rx_update = 0;
p->event = ev_new_init(p->p.pool, bgp_decision, p);
p->uncork_main_ev = ev_new_init(p->p.pool, bgp_uncork_main, p);
p->uncork_do_ev = ev_new_init(p->p.pool, bgp_do_uncork, p);
callback_init(&p->uncork.cb, bgp_do_uncork, p->p.loop);
p->startup_timer = tm_new_init(p->p.pool, bgp_startup_timeout, p, 0, 0);
p->gr_timer = tm_new_init(p->p.pool, bgp_graceful_restart_timeout, p, 0, 0);

View File

@ -364,8 +364,7 @@ struct bgp_proto {
struct bgp_listen_request listen; /* Shared listening socket */
struct bfd_request *bfd_req; /* BFD request, if BFD is used */
struct birdsock *postponed_sk; /* Postponed incoming socket for dynamic BGP */
event *uncork_main_ev; /* Uncork event for mainloop */
event *uncork_do_ev; /* Uncork event to actually uncork */
struct rt_uncork_callback uncork; /* Uncork hook */
struct bgp_stats stats; /* BGP statistics */
btime last_established; /* Last time of enter/leave of established state */
btime last_rx_update; /* Last time of RX update */
@ -711,8 +710,7 @@ void bgp_schedule_packet(struct bgp_conn *conn, struct bgp_channel *c, int type)
void bgp_kick_tx(void *vconn);
void bgp_tx(struct birdsock *sk);
int bgp_rx(struct birdsock *sk, uint size);
void bgp_uncork_main(void *vp);
void bgp_do_uncork(void *vp);
void bgp_do_uncork(callback *);
const char * bgp_error_dsc(unsigned code, unsigned subcode);
void bgp_log_error(struct bgp_proto *p, u8 class, char *msg, unsigned code, unsigned subcode, byte *data, unsigned len);

View File

@ -3441,9 +3441,10 @@ bgp_rx_packet(struct bgp_conn *conn, byte *pkt, uint len)
}
void
bgp_do_uncork(void *vp)
bgp_do_uncork(callback *cb)
{
struct bgp_proto *p = vp;
SKIP_BACK_DECLARE(struct bgp_proto, p, uncork.cb, cb);
ASSERT_DIE(birdloop_inside(p->p.loop));
ASSERT_DIE(p->p.active_loops--);
@ -3459,16 +3460,6 @@ bgp_do_uncork(void *vp)
}
}
void
bgp_uncork_main(void *vp)
{
/* The uncork event is run from &main_birdloop and there is no useful way how
* to assign the target loop to it, thus we have to lock it ourselves. */
struct bgp_proto *p = vp;
ev_send_loop(p->p.loop, p->uncork_do_ev);
}
/**
* bgp_rx - handle received data
* @sk: socket
@ -3493,7 +3484,7 @@ bgp_rx(sock *sk, uint size)
{
if ((conn->state == BS_CLOSE) || (conn->sk != sk))
return 0;
if ((conn->state == BS_ESTABLISHED) && rt_cork_check(conn->bgp->uncork_main_ev))
if ((conn->state == BS_ESTABLISHED) && rt_cork_check(&conn->bgp->uncork))
{
sk_pause_rx(p->p.loop, sk);
p->p.active_loops++;