mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-11-12 22:28:44 +00:00
Kernel protocol now uses out_table resync mechanism for periodic scanning
This commit is contained in:
parent
73ee8e18b3
commit
f23f014ef3
10
nest/proto.c
10
nest/proto.c
@ -542,8 +542,12 @@ channel_reset_import(struct channel *c)
|
||||
static void
|
||||
channel_reset_export(struct channel *c)
|
||||
{
|
||||
if (c->out_flush_refeed)
|
||||
rt_out_flush(c);
|
||||
|
||||
/* Just free the routes */
|
||||
rt_prune_sync(c->out_table, 1);
|
||||
bmap_reset(&c->out_seen_map, 1024);
|
||||
}
|
||||
|
||||
/* Called by protocol to activate in_table */
|
||||
@ -584,6 +588,10 @@ channel_do_start(struct channel *c)
|
||||
|
||||
bmap_init(&c->export_map, c->proto->pool, 1024);
|
||||
bmap_init(&c->export_reject_map, c->proto->pool, 1024);
|
||||
|
||||
if (c->out_table)
|
||||
bmap_init(&c->out_seen_map, c->proto->pool, 1024);
|
||||
|
||||
memset(&c->stats, 0, sizeof(struct proto_stats));
|
||||
|
||||
channel_reset_limit(&c->rx_limit);
|
||||
@ -618,6 +626,8 @@ channel_do_flush(struct channel *c)
|
||||
/* This have to be done in here, as channel pool is freed before channel_do_down() */
|
||||
bmap_free(&c->export_map);
|
||||
bmap_free(&c->export_reject_map);
|
||||
if (c->out_table)
|
||||
bmap_free(&c->out_seen_map);
|
||||
c->in_table = NULL;
|
||||
c->reload_event = NULL;
|
||||
c->out_table = NULL;
|
||||
|
@ -509,6 +509,7 @@ struct channel {
|
||||
const struct filter *out_filter; /* Output filter */
|
||||
struct bmap export_map; /* Keeps track which routes were really exported */
|
||||
struct bmap export_reject_map; /* Keeps track which routes were rejected by export filter */
|
||||
struct bmap out_seen_map; /* Keeps track which routes have been seen by out_sync */
|
||||
struct channel_limit rx_limit; /* Receive limit (for in_keep_filtered) */
|
||||
struct channel_limit in_limit; /* Input limit */
|
||||
struct channel_limit out_limit; /* Output limit */
|
||||
@ -546,6 +547,7 @@ struct channel {
|
||||
u8 reload_pending; /* Reloading and another reload is scheduled */
|
||||
u8 refeed_pending; /* Refeeding and another refeed is scheduled */
|
||||
u8 rpki_reload; /* RPKI changes trigger channel reload */
|
||||
u8 out_flush_refeed; /* Feed by withdrawals on export reset */
|
||||
|
||||
list net_feed; /* Active net feeders (struct channel_net_feed) */
|
||||
|
||||
|
@ -367,8 +367,12 @@ void rt_feed_channel_abort(struct channel *c);
|
||||
int rt_reload_channel(struct channel *c);
|
||||
void rt_reload_channel_abort(struct channel *c);
|
||||
void rt_prune_sync(rtable *t, int all);
|
||||
int rte_update_out(struct channel *c, rte *new, rte *old, struct rte_storage **old_stored, int refeed);
|
||||
int rte_update_out(struct channel *c, rte *new, rte *old, struct rte_storage **old_stored, u32 id, int refeed);
|
||||
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
|
||||
void rt_out_sync_start(struct channel *c);
|
||||
_Bool rt_out_sync_mark(struct channel *c, struct rte_export *e);
|
||||
void rt_out_sync_finish(struct channel *c);
|
||||
void rt_out_flush(struct channel *c);
|
||||
|
||||
|
||||
/* Default limit for ECMP next hops, defined in sysdep code */
|
||||
|
@ -691,7 +691,7 @@ rt_notify_merged(struct channel *c, struct rte_export_internal *e)
|
||||
/* Prepare new merged route */
|
||||
if (e->new_best)
|
||||
{
|
||||
ep->new_id = e->new->id;
|
||||
ep->new_id = e->new_best->id;
|
||||
if (!rt_export_merged(c, e->net, &ep->new, rte_update_pool, 0))
|
||||
ep->new.attrs = NULL;
|
||||
}
|
||||
@ -774,7 +774,7 @@ rte_export_store(struct channel *c, struct rte_export_internal *e)
|
||||
/* Apply export table */
|
||||
if (c->out_table)
|
||||
{
|
||||
if (!rte_update_out(c, &(e->pub.new), &(e->pub.old), &(e->old_stored), e->refeed))
|
||||
if (!rte_update_out(c, &(e->pub.new), &(e->pub.old), &(e->old_stored), e->pub.new_id, e->refeed))
|
||||
return 0;
|
||||
}
|
||||
else if (c->out_filter != FILTER_ACCEPT)
|
||||
@ -2471,7 +2471,7 @@ rt_prune_sync(rtable *t, int all)
|
||||
*/
|
||||
|
||||
int
|
||||
rte_update_out(struct channel *c, rte *new, rte *old, struct rte_storage **old_stored, int refeed)
|
||||
rte_update_out(struct channel *c, rte *new, rte *old, struct rte_storage **old_stored, u32 id, int refeed)
|
||||
{
|
||||
struct rtable *tab = c->out_table;
|
||||
struct rte_storage **pos;
|
||||
@ -2533,6 +2533,7 @@ rte_update_out(struct channel *c, rte *new, rte *old, struct rte_storage **old_s
|
||||
struct rte_storage *e = rte_store(new, net);
|
||||
e->sender = c;
|
||||
e->lastmod = current_time();
|
||||
e->id = id;
|
||||
e->next = *pos;
|
||||
*pos = e;
|
||||
tab->rt_count++;
|
||||
@ -2545,6 +2546,79 @@ drop_withdraw:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
rt_out_sync_start(struct channel *c)
|
||||
{
|
||||
ASSERT_DIE(c->out_table);
|
||||
ASSERT_DIE(c->ra_mode != RA_ANY);
|
||||
bmap_reset(&c->out_seen_map, 1024);
|
||||
}
|
||||
|
||||
_Bool
|
||||
rt_out_sync_mark(struct channel *c, struct rte_export *e)
|
||||
{
|
||||
ASSERT_DIE(c->out_table);
|
||||
ASSERT_DIE(c->ra_mode != RA_ANY);
|
||||
|
||||
net *n = net_find(c->out_table, e->old.net);
|
||||
if (!n || !n->routes)
|
||||
return 1;
|
||||
|
||||
e->new = rte_copy(n->routes);
|
||||
e->new_id = n->routes->id;
|
||||
|
||||
if (bmap_test(&c->out_seen_map, n->routes->id))
|
||||
return 0;
|
||||
|
||||
bmap_set(&c->out_seen_map, n->routes->id);
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
rt_out_sync_finish(struct channel *c)
|
||||
{
|
||||
ASSERT_DIE(c->out_table);
|
||||
ASSERT_DIE(c->ra_mode != RA_ANY);
|
||||
|
||||
FIB_WALK(&c->out_table->fib, net, n)
|
||||
{
|
||||
if (!n->routes)
|
||||
continue;
|
||||
|
||||
if (!bmap_test(&c->out_seen_map, n->routes->id))
|
||||
{
|
||||
struct rte_export ex = {
|
||||
.new_id = n->routes->id,
|
||||
.new = rte_copy(n->routes),
|
||||
};
|
||||
|
||||
c->proto->rt_notify(c, &ex);
|
||||
}
|
||||
}
|
||||
FIB_WALK_END;
|
||||
bmap_reset(&c->out_seen_map, 1024);
|
||||
}
|
||||
|
||||
void
|
||||
rt_out_flush(struct channel *c)
|
||||
{
|
||||
ASSERT_DIE(c->out_table);
|
||||
ASSERT_DIE(c->ra_mode != RA_ANY);
|
||||
|
||||
FIB_WALK(&c->out_table->fib, net, n)
|
||||
{
|
||||
if (!n->routes)
|
||||
continue;
|
||||
|
||||
struct rte_export ex = {
|
||||
.old_id = n->routes->id,
|
||||
.old = rte_copy(n->routes),
|
||||
};
|
||||
|
||||
c->proto->rt_notify(c, &ex);
|
||||
}
|
||||
FIB_WALK_END;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hostcache
|
||||
|
@ -68,14 +68,12 @@
|
||||
*/
|
||||
|
||||
pool *krt_pool;
|
||||
static linpool *krt_filter_lp;
|
||||
static list krt_proto_list;
|
||||
|
||||
void
|
||||
krt_io_init(void)
|
||||
{
|
||||
krt_pool = rp_new(&root_pool, "Kernel Syncer");
|
||||
krt_filter_lp = lp_new_default(krt_pool);
|
||||
init_list(&krt_proto_list);
|
||||
krt_sys_io_init();
|
||||
}
|
||||
@ -538,61 +536,6 @@ krt_dump(struct proto *P)
|
||||
* Routes
|
||||
*/
|
||||
|
||||
static inline int
|
||||
krt_is_installed(struct krt_proto *p, net *n)
|
||||
{
|
||||
return n->routes && bmap_test(&p->p.main_channel->export_map, n->routes->id);
|
||||
}
|
||||
|
||||
static void
|
||||
krt_flush_routes(struct krt_proto *p)
|
||||
{
|
||||
struct rtable *t = p->p.main_channel->table;
|
||||
|
||||
KRT_TRACE(p, D_EVENTS, "Flushing kernel routes");
|
||||
FIB_WALK(&t->fib, net, n)
|
||||
{
|
||||
if (krt_is_installed(p, n))
|
||||
{
|
||||
struct rte_export e = {
|
||||
.old = rte_copy(n->routes),
|
||||
.old_id = n->routes->id,
|
||||
};
|
||||
|
||||
/* FIXME: this does not work if gw is changed in export filter */
|
||||
krt_replace_rte(p, &e);
|
||||
}
|
||||
}
|
||||
FIB_WALK_END;
|
||||
}
|
||||
|
||||
static _Bool
|
||||
krt_export_net(struct krt_proto *p, net *net, rte *rt)
|
||||
{
|
||||
struct channel *c = p->p.main_channel;
|
||||
const struct filter *filter = c->out_filter;
|
||||
|
||||
if (c->ra_mode == RA_MERGED)
|
||||
return rt_export_merged(c, net, rt, krt_filter_lp, 1);
|
||||
|
||||
if (!rte_is_valid(net->routes))
|
||||
return 0;
|
||||
|
||||
if (filter == FILTER_REJECT)
|
||||
return 0;
|
||||
|
||||
/* We could run krt_preexport() here, but it is already handled by krt_is_installed() */
|
||||
*rt = rte_copy(net->routes);
|
||||
|
||||
if (filter == FILTER_ACCEPT)
|
||||
return 1;
|
||||
|
||||
if (f_run(filter, rt, krt_filter_lp, FF_SILENT) > F_ACCEPT)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
krt_same_dest(rte *k, rte *e)
|
||||
{
|
||||
@ -644,24 +587,14 @@ krt_got_route(struct krt_proto *p, rte *e, s8 src)
|
||||
if (!p->ready)
|
||||
goto ignore;
|
||||
|
||||
net *n = net_find(p->p.main_channel->table, e->net);
|
||||
|
||||
if (!n || !krt_is_installed(p, n))
|
||||
goto delete;
|
||||
|
||||
/* Rejected by filters */
|
||||
if (!krt_export_net(p, n, &ex.new))
|
||||
goto delete;
|
||||
|
||||
/* Route to this destination was already seen. Strange, but it happens... */
|
||||
if (bmap_test(&p->seen_map, n->routes->id))
|
||||
if (!rt_out_sync_mark(p->p.main_channel, &ex))
|
||||
goto aseen;
|
||||
|
||||
/* Mark route as seen */
|
||||
bmap_set(&p->seen_map, n->routes->id);
|
||||
if (!ex.new.attrs)
|
||||
goto delete;
|
||||
|
||||
/* TODO: There also may be changes in route eattrs, we ignore that for now. */
|
||||
if (!bmap_test(&p->sync_map, n->routes->id) || !krt_same_dest(e, &ex.new))
|
||||
if (!bmap_test(&p->sync_map, ex.new_id) || !krt_same_dest(e, &ex.new))
|
||||
goto update;
|
||||
|
||||
goto seen;
|
||||
@ -680,7 +613,6 @@ ignore:
|
||||
|
||||
update:
|
||||
krt_trace_in(p, &ex.new, "updating");
|
||||
ex.new_id = n->routes->id;
|
||||
krt_replace_rte(p, &ex);
|
||||
goto done;
|
||||
|
||||
@ -690,39 +622,23 @@ delete:
|
||||
goto done;
|
||||
|
||||
done:
|
||||
lp_flush(krt_filter_lp);
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
krt_init_scan(struct krt_proto *p)
|
||||
{
|
||||
bmap_reset(&p->seen_map, 1024);
|
||||
rt_out_sync_start(p->p.main_channel);
|
||||
}
|
||||
|
||||
static void
|
||||
krt_prune(struct krt_proto *p)
|
||||
{
|
||||
struct rtable *t = p->p.main_channel->table;
|
||||
KRT_TRACE(p, D_EVENTS, "Sync finished, pruning table %s", p->p.main_channel->table->name);
|
||||
|
||||
KRT_TRACE(p, D_EVENTS, "Pruning table %s", t->name);
|
||||
FIB_WALK(&t->fib, net, n)
|
||||
{
|
||||
if (p->ready && krt_is_installed(p, n) && !bmap_test(&p->seen_map, n->routes->id))
|
||||
{
|
||||
struct rte_export ex = {
|
||||
.new_id = n->routes->id
|
||||
};
|
||||
|
||||
if (krt_export_net(p, n, &ex.new))
|
||||
{
|
||||
krt_trace_in(p, &ex.new, "installing");
|
||||
krt_replace_rte(p, &ex);
|
||||
}
|
||||
|
||||
lp_flush(krt_filter_lp);
|
||||
}
|
||||
}
|
||||
FIB_WALK_END;
|
||||
p->pruning = 1;
|
||||
rt_out_sync_finish(p->p.main_channel);
|
||||
p->pruning = 0;
|
||||
|
||||
#ifdef KRT_ALLOW_LEARN
|
||||
if (KRT_CF->learn)
|
||||
@ -897,7 +813,7 @@ krt_rt_notify(struct channel *ch, struct rte_export *e)
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (p->initialized) /* Before first scan we don't touch the routes */
|
||||
if (p->initialized || p->ready && p->pruning) /* Before first scan we don't touch the routes */
|
||||
krt_replace_rte(p, e);
|
||||
}
|
||||
|
||||
@ -994,6 +910,7 @@ krt_init(struct proto_config *CF)
|
||||
// struct krt_config *cf = (void *) CF;
|
||||
|
||||
p->p.main_channel = proto_add_channel(&p->p, proto_cf_main_channel(CF));
|
||||
p->p.main_channel->out_flush_refeed = 1;
|
||||
|
||||
p->p.preexport = krt_preexport;
|
||||
p->p.rt_notify = krt_rt_notify;
|
||||
@ -1022,12 +939,11 @@ krt_start(struct proto *P)
|
||||
}
|
||||
|
||||
/* If it is needed, setup out table automagically */
|
||||
if (!TRIVIAL_FILTER(p->p.main_channel->out_filter))
|
||||
/* For now, setup out table always.
|
||||
if (!TRIVIAL_FILTER(p->p.main_channel->out_filter)) */
|
||||
channel_setup_out_table(p->p.main_channel);
|
||||
|
||||
|
||||
bmap_init(&p->sync_map, p->p.pool, 1024);
|
||||
bmap_init(&p->seen_map, p->p.pool, 1024);
|
||||
add_tail(&krt_proto_list, &p->krt_node);
|
||||
|
||||
#ifdef KRT_ALLOW_LEARN
|
||||
@ -1057,7 +973,7 @@ krt_shutdown(struct proto *P)
|
||||
|
||||
/* FIXME we should flush routes even when persist during reconfiguration */
|
||||
if (p->initialized && !KRT_CF->persist && (P->down_code != PDC_CMD_GR_DOWN))
|
||||
krt_flush_routes(p);
|
||||
rt_out_flush(p->p.main_channel);
|
||||
|
||||
p->ready = 0;
|
||||
p->initialized = 0;
|
||||
@ -1073,12 +989,9 @@ krt_shutdown(struct proto *P)
|
||||
}
|
||||
|
||||
static int
|
||||
krt_channel_reconfigure(struct channel *C, struct channel_config *CC, int *import_changed UNUSED, int *export_changed)
|
||||
krt_channel_reconfigure(struct channel *C UNUSED, struct channel_config *CC UNUSED, int *import_changed UNUSED, int *export_changed)
|
||||
{
|
||||
if (!*export_changed)
|
||||
return 1;
|
||||
|
||||
return (TRIVIAL_FILTER(C->out_filter) == TRIVIAL_FILTER(CC->out_filter));
|
||||
return !*export_changed;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -60,11 +60,11 @@ struct krt_proto {
|
||||
#endif
|
||||
|
||||
struct bmap sync_map; /* Keeps track which exported routes were successfully written to kernel */
|
||||
struct bmap seen_map; /* Routes seen during last periodic scan */
|
||||
node krt_node; /* Node in krt_proto_list */
|
||||
byte af; /* Kernel address family (AF_*) */
|
||||
byte ready; /* Initial feed has been finished */
|
||||
byte initialized; /* First scan has been finished */
|
||||
byte pruning; /* Pruning */
|
||||
byte reload; /* Next scan is doing reload */
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user