0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 17:51:53 +00:00

Revert "Special table hooks rectified."

This reverts commit 44f26c49f9.
This commit is contained in:
Maria Matejka 2022-07-12 14:46:06 +02:00
parent 44f26c49f9
commit 1df20989c1
16 changed files with 846 additions and 682 deletions

View File

@ -109,15 +109,6 @@ add_head(list *l, node *n)
l->head = n; l->head = n;
} }
LIST_INLINE void
self_link(node *n)
{
ASSUME(n->prev == NULL);
ASSUME(n->next == NULL);
n->prev = n->next = n;
}
/** /**
* insert_node - insert a node to a list * insert_node - insert a node to a list
* @n: a new list node * @n: a new list node

View File

@ -78,7 +78,6 @@ typedef union list { /* In fact two overlayed nodes */
#define LIST_INLINE #define LIST_INLINE
void add_tail(list *, node *); void add_tail(list *, node *);
void add_head(list *, node *); void add_head(list *, node *);
void self_link(node *);
void rem_node(node *); void rem_node(node *);
void add_tail_list(list *, list *); void add_tail_list(list *, list *);
void init_list(list *); void init_list(list *);

View File

@ -644,12 +644,12 @@ r_args:
} }
| r_args IMPORT TABLE channel_arg { | r_args IMPORT TABLE channel_arg {
if (!$4->in_table) cf_error("No import table in channel %s.%s", $4->proto->name, $4->name); if (!$4->in_table) cf_error("No import table in channel %s.%s", $4->proto->name, $4->name);
rt_show_add_table($$, $4->in_table->tab); rt_show_add_table($$, $4->in_table);
$$->tables_defined_by = RSD_TDB_DIRECT; $$->tables_defined_by = RSD_TDB_DIRECT;
} }
| r_args EXPORT TABLE channel_arg { | r_args EXPORT TABLE channel_arg {
if (!$4->out_table) cf_error("No export table in channel %s.%s", $4->proto->name, $4->name); if (!$4->out_table) cf_error("No export table in channel %s.%s", $4->proto->name, $4->name);
rt_show_add_table($$, $4->out_table->tab); rt_show_add_table($$, $4->out_table);
$$->tables_defined_by = RSD_TDB_DIRECT; $$->tables_defined_by = RSD_TDB_DIRECT;
} }
| r_args FILTER filter { | r_args FILTER filter {

View File

@ -47,7 +47,7 @@ static char *c_states[] = { "DOWN", "START", "UP", "STOP", "RESTART" };
extern struct protocol proto_unix_iface; extern struct protocol proto_unix_iface;
static void channel_aux_request_refeed(struct channel_aux_table *cat); static void channel_request_reload(struct channel *c);
static void proto_shutdown_loop(timer *); static void proto_shutdown_loop(timer *);
static void proto_rethink_goal(struct proto *p); static void proto_rethink_goal(struct proto *p);
static char *proto_state_name(struct proto *p); static char *proto_state_name(struct proto *p);
@ -88,9 +88,7 @@ channel_export_log_state_change(struct rt_export_request *req, u8 state)
switch (state) switch (state)
{ {
case TES_FEEDING: case TES_FEEDING:
if (c->out_table) if (c->proto->feed_begin)
rt_refresh_begin(&c->out_table->push);
else if (c->proto->feed_begin)
c->proto->feed_begin(c, !c->refeeding); c->proto->feed_begin(c, !c->refeeding);
break; break;
case TES_READY: case TES_READY:
@ -181,7 +179,6 @@ proto_find_channel_by_name(struct proto *p, const char *n)
} }
rte * channel_preimport(struct rt_import_request *req, rte *new, rte *old); rte * channel_preimport(struct rt_import_request *req, rte *new, rte *old);
rte * channel_in_preimport(struct rt_import_request *req, rte *new, rte *old);
void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe); void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe); void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
@ -298,10 +295,14 @@ static void
channel_roa_in_changed(struct rt_subscription *s) channel_roa_in_changed(struct rt_subscription *s)
{ {
struct channel *c = s->data; struct channel *c = s->data;
int active = c->reload_event && ev_active(c->reload_event);
CD(c, "Reload triggered by RPKI change"); CD(c, "Reload triggered by RPKI change%s", active ? " - already active" : "");
channel_request_reload(c); if (!active)
channel_request_reload(c);
else
c->reload_pending = 1;
} }
static void static void
@ -443,6 +444,7 @@ channel_start_import(struct channel *c)
.dump_req = channel_dump_import_req, .dump_req = channel_dump_import_req,
.log_state_change = channel_import_log_state_change, .log_state_change = channel_import_log_state_change,
.preimport = channel_preimport, .preimport = channel_preimport,
.rte_modify = c->proto->rte_modify,
}; };
ASSERT(c->channel_state == CS_UP); ASSERT(c->channel_state == CS_UP);
@ -461,8 +463,7 @@ channel_start_export(struct channel *c)
{ {
if (c->out_req.hook) if (c->out_req.hook)
{ {
c->restart_export = 1; log(L_WARN "%s.%s: Attempted to start channel's already started export", c->proto->name, c->name);
log(L_WARN "%s.%s: Fast channel export restart", c->proto->name, c->name);
return; return;
} }
@ -513,7 +514,7 @@ channel_check_stopped(struct channel *c)
switch (c->channel_state) switch (c->channel_state)
{ {
case CS_STOP: case CS_STOP:
if (c->out_req.hook || c->in_req.hook || c->out_table || c->in_table) if (c->out_req.hook || c->in_req.hook)
return; return;
channel_set_state(c, CS_DOWN); channel_set_state(c, CS_DOWN);
@ -540,6 +541,9 @@ channel_import_stopped(struct rt_import_request *req)
req->hook = NULL; req->hook = NULL;
if (c->in_table)
rt_prune_sync(c->in_table, 1);
mb_free(c->in_req.name); mb_free(c->in_req.name);
c->in_req.name = NULL; c->in_req.name = NULL;
@ -562,16 +566,14 @@ channel_export_stopped(struct rt_export_request *req)
return; return;
} }
/* Free the routes from out_table */
if (c->out_table)
rt_prune_sync(c->out_table, 1);
mb_free(c->out_req.name); mb_free(c->out_req.name);
c->out_req.name = NULL; c->out_req.name = NULL;
if (c->restart_export) channel_check_stopped(c);
{
c->restart_export = 0;
channel_start_export(c);
}
else
channel_check_stopped(c);
} }
static void static void
@ -593,296 +595,72 @@ channel_feed_end(struct channel *c)
return; return;
} }
if (c->out_table) if (c->proto->feed_end)
rt_refresh_end(&c->out_table->push);
else if (c->proto->feed_end)
c->proto->feed_end(c); c->proto->feed_end(c);
if (c->refeed_pending) if (c->refeed_pending)
rt_stop_export(req, channel_export_stopped); rt_stop_export(req, channel_export_stopped);
}
#define CHANNEL_AUX_TABLE_DUMP_REQ(inout, imex, pgimex, pushget) static void \
channel_##inout##_##pushget##_dump_req(struct rt_##pgimex##_request *req) { \
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, pushget, req); \
debug(" Channel %s.%s " #imex " table " #pushget " request %p\n", cat->c->proto->name, cat->c->name, req); }
CHANNEL_AUX_TABLE_DUMP_REQ(in, import, import, push)
CHANNEL_AUX_TABLE_DUMP_REQ(in, import, export, get)
CHANNEL_AUX_TABLE_DUMP_REQ(out, export, import, push)
CHANNEL_AUX_TABLE_DUMP_REQ(out, export, export, get)
#undef CHANNEL_AUX_TABLE_DUMP_REQ
static uint channel_aux_imex(struct channel_aux_table *cat)
{
if (cat->c->in_table == cat)
return 0;
else if (cat->c->out_table == cat)
return 1;
else else
bug("Channel aux table must be in_table or out_table"); c->refeeding = 0;
}
/* Called by protocol for reload from in_table */
void
channel_schedule_reload(struct channel *c)
{
ASSERT(c->in_req.hook);
rt_reload_channel_abort(c);
ev_schedule_work(c->reload_event);
} }
static void static void
channel_aux_stopped(void *data) channel_reload_loop(void *ptr)
{ {
struct channel_aux_table *cat = data; struct channel *c = ptr;
struct channel *c = cat->c;
if (channel_aux_imex(cat)) /* Start reload */
c->out_table = NULL; if (!c->reload_active)
else c->reload_pending = 0;
c->in_table = NULL;
mb_free(cat); if (!rt_reload_channel(c))
return channel_check_stopped(c);
}
static void
channel_aux_import_stopped(struct rt_import_request *req)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
ASSERT_DIE(cat->stop);
}
static void
channel_aux_export_stopped(struct rt_export_request *req)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
req->hook = NULL;
if (cat->refeed_pending && !cat->stop)
{ {
cat->refeed_pending = 0; ev_schedule_work(c->reload_event);
rt_request_export(cat->tab, req);
}
else
ASSERT_DIE(cat->stop);
}
static void
channel_aux_stop(struct channel_aux_table *cat)
{
cat->stop = 1;
rt_stop_import(&cat->push, channel_aux_import_stopped);
rt_stop_export(&cat->get, channel_aux_export_stopped);
rt_lock_table(cat->tab);
cat->tab->deleted = channel_aux_stopped;
cat->tab->del_data = cat;
rt_unlock_table(cat->tab);
}
static void
channel_push_log_state_change(struct rt_import_request *req, u8 state)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
const char *imex = channel_aux_imex(cat) ? "export" : "import";
CD(cat->c, "Channel %s table import state changed to %s", imex, rt_import_state_name(state));
}
static void
channel_get_log_state_change(struct rt_export_request *req, u8 state)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
const char *imex = channel_aux_imex(cat) ? "export" : "import";
CD(cat->c, "Channel %s table export state changed to %s", imex, rt_export_state_name(state));
switch (state)
{
case TES_FEEDING:
if (imex && cat->c->proto->feed_begin)
cat->c->proto->feed_begin(cat->c, !cat->c->refeeding);
else if (!imex)
rt_refresh_begin(&cat->c->in_req);
break;
case TES_READY:
if (imex && cat->c->proto->feed_end)
cat->c->proto->feed_end(cat->c);
else if (!imex)
rt_refresh_end(&cat->c->in_req);
if (cat->refeed_pending)
rt_stop_export(&cat->get, channel_aux_export_stopped);
break;
}
}
void rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
static void
channel_in_export_one_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
if (!rpe->new && !rpe->old)
return; return;
rte n0;
struct rte_src *src = rpe->new ? rpe->new->rte.src : rpe->old->rte.src;
rte_update_direct(cat->c, net, RTES_CLONE(rpe->new, &n0), src);
}
static void
channel_in_export_one_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
if (!rpe->new && !rpe->old)
return;
rte n0;
struct rte_src *src = rpe->old_best ? rpe->old_best->rte.src : rpe->new_best->rte.src;
rte_update_direct(cat->c, net, RTES_CLONE(rpe->new_best, &n0), src);
}
static void
channel_in_export_bulk_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
for (uint i=0; i<count; i++)
{
rte n0 = *feed[i];
rte_update_direct(cat->c, net, &n0, n0.src);
} }
}
static void /* Restart reload */
channel_in_export_bulk_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count) if (c->reload_pending)
{ channel_request_reload(c);
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
if (!count)
return;
rte n0 = *feed[0];
rte_update_direct(cat->c, net, &n0, n0.src);
}
void do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte *old);
static void
channel_out_export_one_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
rte n0;
do_rt_notify_direct(cat->c, net, RTES_CLONE(rpe->new, &n0), RTES_OR_NULL(rpe->old));
}
static void
channel_out_export_one_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
rte n0;
do_rt_notify_direct(cat->c, net, RTES_CLONE(rpe->new_best, &n0), RTES_OR_NULL(rpe->old_best));
}
static void
channel_out_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
if (cat->c->ra_mode != RA_ANY)
ASSERT_DIE(count <= 1);
for (uint i=0; i<count; i++)
{
rte n0 = *feed[i];
do_rt_notify_direct(cat->c, net, &n0, NULL);
}
} }
/* Called by protocol to activate in_table */ /* Called by protocol to activate in_table */
void void
channel_setup_in_table(struct channel *c, int best) channel_setup_in_table(struct channel *c)
{ {
int nlen = sizeof("import") + strlen(c->name) + strlen(c->proto->name) + 3; struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
struct { cf->name = "import";
struct channel_aux_table cat; cf->addr_type = c->net_type;
struct rtable_config tab_cf; cf->internal = 1;
char name[0];
} *cat = mb_allocz(c->proto->pool, sizeof(*cat) + nlen);
bsprintf(cat->name, "%s.%s.import", c->proto->name, c->name); c->in_table = rt_setup(c->proto->pool, cf);
cat->tab_cf.name = cat->name; c->reload_event = ev_new_init(c->proto->pool, channel_reload_loop, c);
cat->tab_cf.addr_type = c->net_type;
c->in_table = &cat->cat;
c->in_table->push = (struct rt_import_request) {
.name = cat->name,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_in_push_dump_req,
.log_state_change = channel_push_log_state_change,
.preimport = channel_in_preimport,
};
c->in_table->get = (struct rt_export_request) {
.name = cat->name,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_in_get_dump_req,
.log_state_change = channel_get_log_state_change,
.export_one = best ? channel_in_export_one_best : channel_in_export_one_any,
.export_bulk = best ? channel_in_export_bulk_best : channel_in_export_bulk_any,
};
c->in_table->c = c;
c->in_table->tab = rt_setup(c->proto->pool, &cat->tab_cf);
self_link(&c->in_table->tab->n);
rt_request_import(c->in_table->tab, &c->in_table->push);
rt_request_export(c->in_table->tab, &c->in_table->get);
} }
/* Called by protocol to activate out_table */ /* Called by protocol to activate out_table */
void void
channel_setup_out_table(struct channel *c) channel_setup_out_table(struct channel *c)
{ {
int nlen = sizeof("export") + strlen(c->name) + strlen(c->proto->name) + 3; struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
cf->name = "export";
cf->addr_type = c->net_type;
cf->internal = 1;
struct { c->out_table = rt_setup(c->proto->pool, cf);
struct channel_aux_table cat;
struct rtable_config tab_cf;
char name[0];
} *cat = mb_allocz(c->proto->pool, sizeof(*cat) + nlen);
bsprintf(cat->name, "%s.%s.export", c->proto->name, c->name);
cat->tab_cf.name = cat->name;
cat->tab_cf.addr_type = c->net_type;
c->out_table = &cat->cat;
c->out_table->push = (struct rt_import_request) {
.name = cat->name,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_out_push_dump_req,
.log_state_change = channel_push_log_state_change,
};
c->out_table->get = (struct rt_export_request) {
.name = cat->name,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_out_get_dump_req,
.log_state_change = channel_get_log_state_change,
.export_one = (c->ra_mode == RA_ANY) ? channel_out_export_one_any : channel_out_export_one_best,
.export_bulk = channel_out_export_bulk,
};
c->out_table->c = c;
c->out_table->tab = rt_setup(c->proto->pool, &cat->tab_cf);
self_link(&c->out_table->tab->n);
rt_request_import(c->out_table->tab, &c->out_table->push);
rt_request_export(c->out_table->tab, &c->out_table->get);
} }
static void
channel_aux_request_refeed(struct channel_aux_table *cat)
{
cat->refeed_pending = 1;
rt_stop_export(&cat->get, channel_aux_export_stopped);
}
static void static void
channel_do_start(struct channel *c) channel_do_start(struct channel *c)
@ -908,12 +686,16 @@ channel_do_up(struct channel *c)
static void static void
channel_do_pause(struct channel *c) channel_do_pause(struct channel *c)
{ {
/* Need to abort feeding */
if (c->reload_event)
{
ev_postpone(c->reload_event);
rt_reload_channel_abort(c);
}
/* Stop export */ /* Stop export */
if (c->out_req.hook) if (c->out_req.hook)
{
rt_stop_export(&c->out_req, channel_export_stopped); rt_stop_export(&c->out_req, channel_export_stopped);
c->refeeding = 0;
}
channel_roa_unsubscribe_all(c); channel_roa_unsubscribe_all(c);
@ -924,13 +706,6 @@ channel_do_pause(struct channel *c)
static void static void
channel_do_stop(struct channel *c) channel_do_stop(struct channel *c)
{ {
/* Drop auxiliary tables */
if (c->in_table)
channel_aux_stop(c->in_table);
if (c->out_table)
channel_aux_stop(c->out_table);
/* Stop import */ /* Stop import */
if (c->in_req.hook) if (c->in_req.hook)
rt_stop_import(&c->in_req, channel_import_stopped); rt_stop_import(&c->in_req, channel_import_stopped);
@ -941,13 +716,16 @@ channel_do_stop(struct channel *c)
CALL(c->channel->shutdown, c); CALL(c->channel->shutdown, c);
channel_roa_unsubscribe_all(c); /* This have to be done in here, as channel pool is freed before channel_do_down() */
c->in_table = NULL;
c->reload_event = NULL;
c->out_table = NULL;
} }
static void static void
channel_do_down(struct channel *c) channel_do_down(struct channel *c)
{ {
ASSERT(!c->out_req.hook && !c->in_req.hook && !c->out_table && !c->in_table); ASSERT(!c->reload_active);
c->proto->active_channels--; c->proto->active_channels--;
@ -955,11 +733,13 @@ channel_do_down(struct channel *c)
memset(&c->import_stats, 0, sizeof(struct channel_import_stats)); memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
memset(&c->export_stats, 0, sizeof(struct channel_export_stats)); memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
CALL(c->channel->cleanup, c); c->in_table = NULL;
c->reload_event = NULL;
c->out_table = NULL;
/* This have to be done in here, as channel pool is freed before channel_do_down() */ /* The in_table and out_table are going to be freed by freeing their resource pools. */
bmap_free(&c->export_map);
bmap_free(&c->export_reject_map); CALL(c->channel->cleanup, c);
/* Schedule protocol shutddown */ /* Schedule protocol shutddown */
if (proto_is_done(c->proto)) if (proto_is_done(c->proto))
@ -989,7 +769,7 @@ channel_set_state(struct channel *c, uint state)
break; break;
case CS_UP: case CS_UP:
ASSERT(cs == CS_DOWN || cs == CS_START || cs == CS_PAUSE); ASSERT(cs == CS_DOWN || cs == CS_START);
if (cs == CS_DOWN) if (cs == CS_DOWN)
channel_do_start(c); channel_do_start(c);
@ -1039,8 +819,8 @@ channel_set_state(struct channel *c, uint state)
* completed, it will switch back to ES_READY. This function can be called * completed, it will switch back to ES_READY. This function can be called
* even when feeding is already running, in that case it is restarted. * even when feeding is already running, in that case it is restarted.
*/ */
static void void
channel_request_table_feeding(struct channel *c) channel_request_feeding(struct channel *c)
{ {
ASSERT(c->out_req.hook); ASSERT(c->out_req.hook);
@ -1048,18 +828,7 @@ channel_request_table_feeding(struct channel *c)
rt_stop_export(&c->out_req, channel_export_stopped); rt_stop_export(&c->out_req, channel_export_stopped);
} }
void static void
channel_request_feeding(struct channel *c)
{
ASSERT(c->out_req.hook);
if (c->out_table)
channel_aux_request_refeed(c->out_table);
else
channel_request_table_feeding(c);
}
void
channel_request_reload(struct channel *c) channel_request_reload(struct channel *c)
{ {
ASSERT(c->in_req.hook); ASSERT(c->in_req.hook);
@ -1067,29 +836,14 @@ channel_request_reload(struct channel *c)
CD(c, "Reload requested"); CD(c, "Reload requested");
if (c->in_table) c->proto->reload_routes(c);
channel_aux_request_refeed(c->in_table);
else
c->proto->reload_routes(c);
}
void /*
channel_refresh_begin(struct channel *c) * Should this be done before reload_routes() hook?
{ * Perhaps, but routes are updated asynchronously.
CD(c, "Channel route refresh begin"); */
if (c->in_table) channel_reset_limit(c, &c->rx_limit, PLD_RX);
rt_refresh_begin(&c->in_table->push); channel_reset_limit(c, &c->in_limit, PLD_IN);
else
rt_refresh_begin(&c->in_req);
}
void
channel_refresh_end(struct channel *c)
{
if (c->in_table)
rt_refresh_end(&c->in_table->push);
else
rt_refresh_end(&c->in_req);
} }
const struct channel_class channel_basic = { const struct channel_class channel_basic = {
@ -1247,7 +1001,7 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
channel_request_reload(c); channel_request_reload(c);
if (export_changed) if (export_changed)
channel_request_table_feeding(c); channel_request_feeding(c);
done: done:
CD(c, "Reconfigured"); CD(c, "Reconfigured");
@ -1960,7 +1714,7 @@ protos_dump_all(void)
WALK_LIST(p, proto_list) WALK_LIST(p, proto_list)
{ {
#define DPF(x) (p->x ? " " #x : "") #define DPF(x) (p->x ? " " #x : "")
debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s\n", debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s%s\n",
p->name, p, p_states[p->proto_state], p->active_channels, p->name, p, p_states[p->proto_state], p->active_channels,
DPF(disabled), DPF(active), DPF(do_start), DPF(do_stop), DPF(reconfiguring)); DPF(disabled), DPF(active), DPF(do_start), DPF(do_stop), DPF(reconfiguring));
#undef DPF #undef DPF
@ -1976,20 +1730,6 @@ protos_dump_all(void)
debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state], debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-", c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-",
c->out_req.hook ? rt_export_state_name(rt_export_get_state(c->out_req.hook)) : "-"); c->out_req.hook ? rt_export_state_name(rt_export_get_state(c->out_req.hook)) : "-");
if (c->in_table)
{
debug("\tInput aux table:\n");
rt_dump_hooks(c->in_table->tab);
rt_dump(c->in_table->tab);
debug("\tEnd of input aux table.\n");
}
if (c->out_table)
{
debug("\tOutput aux table:\n");
rt_dump_hooks(c->in_table->tab);
rt_dump(c->in_table->tab);
debug("\tEnd of output aux table.\n");
}
} }
if (p->proto->dump && (p->proto_state != PS_DOWN)) if (p->proto->dump && (p->proto_state != PS_DOWN))
@ -2411,11 +2151,11 @@ channel_show_stats(struct channel *c)
cli_msg(-1006, " Routes: %u imported, %u exported, %u preferred", cli_msg(-1006, " Routes: %u imported, %u exported, %u preferred",
in_routes, out_routes, SRI(pref)); in_routes, out_routes, SRI(pref));
cli_msg(-1006, " Route change stats: received rejected filtered ignored limited accepted"); cli_msg(-1006, " Route change stats: received rejected filtered ignored RX limit IN limit accepted");
cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u %10u", cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u %10u %10u",
SCI(updates_received), SCI(updates_invalid), SCI(updates_received), SCI(updates_invalid),
SCI(updates_filtered), SRI(updates_ignored), SCI(updates_filtered), SRI(updates_ignored),
SCI(updates_limited_rx) + SCI(updates_limited_in), SCI(updates_limited_rx), SCI(updates_limited_in),
SRI(updates_accepted)); SRI(updates_accepted));
cli_msg(-1006, " Import withdraws: %10u %10u --- %10u --- %10u", cli_msg(-1006, " Import withdraws: %10u %10u --- %10u --- %10u",
SCI(withdraws_received), SCI(withdraws_invalid), SCI(withdraws_received), SCI(withdraws_invalid),

View File

@ -207,6 +207,7 @@ struct proto {
int (*rte_recalculate)(struct rtable *, struct network *, struct rte *, struct rte *, struct rte *); int (*rte_recalculate)(struct rtable *, struct network *, struct rte *, struct rte *, struct rte *);
int (*rte_better)(struct rte *, struct rte *); int (*rte_better)(struct rte *, struct rte *);
int (*rte_mergable)(struct rte *, struct rte *); int (*rte_mergable)(struct rte *, struct rte *);
struct rte *(*rte_modify)(struct rte *, struct linpool *);
void (*rte_insert)(struct network *, struct rte *); void (*rte_insert)(struct network *, struct rte *);
void (*rte_remove)(struct network *, struct rte *); void (*rte_remove)(struct network *, struct rte *);
u32 (*rte_igp_metric)(struct rte *); u32 (*rte_igp_metric)(struct rte *);
@ -543,29 +544,24 @@ struct channel {
u8 reloadable; /* Hook reload_routes() is allowed on the channel */ u8 reloadable; /* Hook reload_routes() is allowed on the channel */
u8 gr_lock; /* Graceful restart mechanism should wait for this channel */ u8 gr_lock; /* Graceful restart mechanism should wait for this channel */
u8 gr_wait; /* Route export to channel is postponed until graceful restart */ u8 gr_wait; /* Route export to channel is postponed until graceful restart */
u8 restart_export; /* Route export should restart as soon as it stops */
btime last_state_change; /* Time of last state transition */ btime last_state_change; /* Time of last state transition */
struct channel_aux_table *in_table; /* Internal table for received routes */ struct rtable *in_table; /* Internal table for received routes */
struct event *reload_event; /* Event responsible for reloading from in_table */
struct fib_iterator reload_fit; /* FIB iterator in in_table used during reloading */
struct rte_storage *reload_next_rte; /* Route iterator in in_table used during reloading */
u8 reload_active; /* Iterator reload_fit is linked */
u8 reload_pending; /* Reloading and another reload is scheduled */ u8 reload_pending; /* Reloading and another reload is scheduled */
u8 refeed_pending; /* Refeeding and another refeed is scheduled */ u8 refeed_pending; /* Refeeding and another refeed is scheduled */
u8 rpki_reload; /* RPKI changes trigger channel reload */ u8 rpki_reload; /* RPKI changes trigger channel reload */
struct channel_aux_table *out_table; /* Internal table for exported routes */ struct rtable *out_table; /* Internal table for exported routes */
list roa_subscriptions; /* List of active ROA table subscriptions based on filters roa_check() */ list roa_subscriptions; /* List of active ROA table subscriptions based on filters roa_check() */
}; };
struct channel_aux_table {
struct channel *c;
struct rt_import_request push;
struct rt_export_request get;
rtable *tab;
u8 stop;
u8 refeed_pending;
};
/* /*
* Channel states * Channel states
@ -631,7 +627,7 @@ struct channel *proto_add_channel(struct proto *p, struct channel_config *cf);
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf); int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
void channel_set_state(struct channel *c, uint state); void channel_set_state(struct channel *c, uint state);
void channel_setup_in_table(struct channel *c, int best); void channel_setup_in_table(struct channel *c);
void channel_setup_out_table(struct channel *c); void channel_setup_out_table(struct channel *c);
void channel_schedule_reload(struct channel *c); void channel_schedule_reload(struct channel *c);
@ -640,9 +636,6 @@ static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP)
static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); } static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); }
void channel_request_feeding(struct channel *c); void channel_request_feeding(struct channel *c);
void channel_request_reload(struct channel *c);
void channel_refresh_begin(struct channel *c);
void channel_refresh_end(struct channel *c);
void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto); void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
void *channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto); void *channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
int channel_reconfigure(struct channel *c, struct channel_config *cf); int channel_reconfigure(struct channel *c, struct channel_config *cf);

View File

@ -150,6 +150,7 @@ struct rtable_config {
int gc_max_ops; /* Maximum number of operations before GC is run */ int gc_max_ops; /* Maximum number of operations before GC is run */
int gc_min_time; /* Minimum time between two consecutive GC runs */ int gc_min_time; /* Minimum time between two consecutive GC runs */
byte sorted; /* Routes of network are sorted according to rte_better() */ byte sorted; /* Routes of network are sorted according to rte_better() */
byte internal; /* Internal table of a protocol */
btime min_settle_time; /* Minimum settle time for notifications */ btime min_settle_time; /* Minimum settle time for notifications */
btime max_settle_time; /* Maximum settle time for notifications */ btime max_settle_time; /* Maximum settle time for notifications */
}; };
@ -171,8 +172,10 @@ typedef struct rtable {
struct hmap id_map; struct hmap id_map;
struct hostcache *hostcache; struct hostcache *hostcache;
struct rtable_config *config; /* Configuration of this table */ struct rtable_config *config; /* Configuration of this table */
void (*deleted)(void *); /* Table should free itself. Call this when it is done. */ struct config *deleted; /* Table doesn't exist in current configuration,
void *del_data; * delete as soon as use_count becomes 0 and remove
* obstacle from this routing table.
*/
struct event *rt_event; /* Routing table event */ struct event *rt_event; /* Routing table event */
btime last_rt_change; /* Last time when route changed */ btime last_rt_change; /* Last time when route changed */
btime base_settle_time; /* Start time of rtable settling interval */ btime base_settle_time; /* Start time of rtable settling interval */
@ -181,6 +184,7 @@ typedef struct rtable {
byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */ byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */
byte hcu_scheduled; /* Hostcache update is scheduled */ byte hcu_scheduled; /* Hostcache update is scheduled */
byte nhu_state; /* Next Hop Update state */ byte nhu_state; /* Next Hop Update state */
byte internal; /* This table is internal for some other object */
struct fib_iterator prune_fit; /* Rtable prune FIB iterator */ struct fib_iterator prune_fit; /* Rtable prune FIB iterator */
struct fib_iterator nhu_fit; /* Next Hop Update FIB iterator */ struct fib_iterator nhu_fit; /* Next Hop Update FIB iterator */
struct tbf rl_pipe; /* Rate limiting token buffer for pipe collisions */ struct tbf rl_pipe; /* Rate limiting token buffer for pipe collisions */
@ -245,7 +249,6 @@ typedef struct rte {
u8 generation; /* If this route import is based on other previously exported route, u8 generation; /* If this route import is based on other previously exported route,
this value should be 1 + MAX(generation of the parent routes). this value should be 1 + MAX(generation of the parent routes).
Otherwise the route is independent and this value is zero. */ Otherwise the route is independent and this value is zero. */
u8 stale_cycle; /* Auxiliary value for route refresh */
} rte; } rte;
struct rte_storage { struct rte_storage {
@ -253,11 +256,13 @@ struct rte_storage {
struct rte rte; /* Route data */ struct rte rte; /* Route data */
}; };
#define RTES_CLONE(r, l) ((r) ? (((*(l)) = (r)->rte), (l)) : NULL) #define RTE_COPY(r, l) ((r) ? (((*(l)) = (r)->rte), (l)) : NULL)
#define RTES_OR_NULL(r) ((r) ? &((r)->rte) : NULL) #define RTE_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
#define REF_FILTERED 2 /* Route is rejected by import filter */ #define REF_FILTERED 2 /* Route is rejected by import filter */
#define REF_USE_STALE 4 /* Do not reset route's stale_cycle to the actual value */ #define REF_STALE 4 /* Route is stale in a refresh cycle */
#define REF_DISCARD 8 /* Route is scheduled for discard */
#define REF_MODIFY 16 /* Route is scheduled for modify */
/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */ /* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); } static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); }
@ -278,6 +283,7 @@ struct rt_import_request {
/* Preimport is called when the @new route is just-to-be inserted, replacing @old. /* Preimport is called when the @new route is just-to-be inserted, replacing @old.
* Return a route (may be different or modified in-place) to continue or NULL to withdraw. */ * Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
struct rte *(*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old); struct rte *(*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
struct rte *(*rte_modify)(struct rte *, struct linpool *);
}; };
struct rt_import_hook { struct rt_import_hook {
@ -297,10 +303,6 @@ struct rt_import_hook {
btime last_state_change; /* Time of last state transition */ btime last_state_change; /* Time of last state transition */
u8 import_state; /* IS_* */ u8 import_state; /* IS_* */
u8 stale_set; /* Set this stale_cycle to imported routes */
u8 stale_valid; /* Routes with this stale_cycle and bigger are considered valid */
u8 stale_pruned; /* Last prune finished when this value was set at stale_valid */
u8 stale_pruning; /* Last prune started when this value was set at stale_valid */
void (*stopped)(struct rt_import_request *); /* Stored callback when import is stopped */ void (*stopped)(struct rt_import_request *); /* Stored callback when import is stopped */
}; };
@ -453,9 +455,9 @@ void *net_route(rtable *tab, const net_addr *n);
int net_roa_check(rtable *tab, const net_addr *n, u32 asn); int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter); int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
rte *rt_export_merged(struct channel *c, rte ** feed, uint count, linpool *pool, int silent); rte *rt_export_merged(struct channel *c, rte ** feed, uint count, linpool *pool, int silent);
void rt_refresh_begin(rtable *t, struct rt_import_request *);
void rt_refresh_begin(struct rt_import_request *); void rt_refresh_end(rtable *t, struct rt_import_request *);
void rt_refresh_end(struct rt_import_request *); void rt_modify_stale(rtable *t, struct rt_import_request *);
void rt_schedule_prune(rtable *t); void rt_schedule_prune(rtable *t);
void rte_dump(struct rte_storage *); void rte_dump(struct rte_storage *);
void rte_free(struct rte_storage *, rtable *); void rte_free(struct rte_storage *, rtable *);
@ -464,9 +466,15 @@ void rt_dump(rtable *);
void rt_dump_all(void); void rt_dump_all(void);
void rt_dump_hooks(rtable *); void rt_dump_hooks(rtable *);
void rt_dump_hooks_all(void); void rt_dump_hooks_all(void);
int rt_reload_channel(struct channel *c);
void rt_reload_channel_abort(struct channel *c);
void rt_refeed_channel(struct channel *c);
void rt_prune_sync(rtable *t, int all); void rt_prune_sync(rtable *t, int all);
int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
int rte_update_out(struct channel *c, const net_addr *n, rte *new, const rte *old, struct rte_storage **old_exported);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type); struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
/* Default limit for ECMP next hops, defined in sysdep code */ /* Default limit for ECMP next hops, defined in sysdep code */
extern const int rt_default_ecmp; extern const int rt_default_ecmp;
@ -781,7 +789,6 @@ void rta__free(rta *r);
static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); } static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); }
rta *rta_do_cow(rta *o, linpool *lp); rta *rta_do_cow(rta *o, linpool *lp);
static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; } static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; }
static inline void rta_uncache(rta *r) { r->cached = 0; r->uc = 0; }
void rta_dump(rta *); void rta_dump(rta *);
void rta_dump_all(void); void rta_dump_all(void);
void rta_show(struct cli *, rta *); void rta_show(struct cli *, rta *);

View File

@ -1245,7 +1245,8 @@ rta_do_cow(rta *o, linpool *lp)
memcpy(*nhn, nho, nexthop_size(nho)); memcpy(*nhn, nho, nexthop_size(nho));
nhn = &((*nhn)->next); nhn = &((*nhn)->next);
} }
rta_uncache(r); r->cached = 0;
r->uc = 0;
return r; return r;
} }

View File

@ -95,10 +95,7 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
} }
if (d->verbose) if (d->verbose)
{
cli_printf(c, -1008, "\tInternal route ID: %uL %uG %uS", e->src->private_id, e->src->global_id, e->stale_cycle);
rta_show(c, a); rta_show(c, a);
}
} }
static uint static uint
@ -106,7 +103,7 @@ rte_feed_count(net *n)
{ {
uint count = 0; uint count = 0;
for (struct rte_storage *e = n->routes; e; e = e->next) for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e))) if (rte_is_valid(RTE_OR_NULL(e)))
count++; count++;
return count; return count;
} }
@ -116,7 +113,7 @@ rte_feed_obtain(net *n, rte **feed, uint count)
{ {
uint i = 0; uint i = 0;
for (struct rte_storage *e = n->routes; e; e = e->next) for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e))) if (rte_is_valid(RTE_OR_NULL(e)))
{ {
ASSERT_DIE(i < count); ASSERT_DIE(i < count);
feed[i++] = &e->rte; feed[i++] = &e->rte;

View File

@ -50,6 +50,7 @@ pool *rt_table_pool;
static linpool *rte_update_pool; static linpool *rte_update_pool;
list routing_tables; list routing_tables;
list deleted_routing_tables;
static void rt_free_hostcache(rtable *tab); static void rt_free_hostcache(rtable *tab);
static void rt_notify_hostcache(rtable *tab, net *net); static void rt_notify_hostcache(rtable *tab, net *net);
@ -385,11 +386,9 @@ rte_mergable(rte *pri, rte *sec)
static void static void
rte_trace(const char *name, const rte *e, int dir, const char *msg) rte_trace(const char *name, const rte *e, int dir, const char *msg)
{ {
log(L_TRACE "%s %c %s %N src %uL %uG %uS %s%s", log(L_TRACE "%s %c %s %N %uL %uG %s",
name, dir, msg, e->net, name, dir, msg, e->net, e->src->private_id, e->src->global_id,
e->src->private_id, e->src->global_id, e->stale_cycle, rta_dest_name(e->attrs->dest));
rta_dest_name(e->attrs->dest),
rte_is_filtered(e) ? " (filtered)" : "");
} }
static inline void static inline void
@ -428,7 +427,7 @@ rte_feed_count(net *n)
{ {
uint count = 0; uint count = 0;
for (struct rte_storage *e = n->routes; e; e = e->next) for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e))) if (rte_is_valid(RTE_OR_NULL(e)))
count++; count++;
return count; return count;
} }
@ -438,7 +437,7 @@ rte_feed_obtain(net *n, struct rte **feed, uint count)
{ {
uint i = 0; uint i = 0;
for (struct rte_storage *e = n->routes; e; e = e->next) for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e))) if (rte_is_valid(RTE_OR_NULL(e)))
{ {
ASSERT_DIE(i < count); ASSERT_DIE(i < count);
feed[i++] = &e->rte; feed[i++] = &e->rte;
@ -509,11 +508,10 @@ export_filter(struct channel *c, rte *rt, int silent)
return export_filter_(c, rt, rte_update_pool, silent); return export_filter_(c, rt, rte_update_pool, silent);
} }
void do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte *old);
static void static void
do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old) do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old)
{ {
struct proto *p = c->proto;
struct channel_export_stats *stats = &c->export_stats; struct channel_export_stats *stats = &c->export_stats;
if (c->refeeding && new) if (c->refeeding && new)
@ -530,31 +528,28 @@ do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old)
if (!new && old) if (!new && old)
CHANNEL_LIMIT_POP(c, OUT); CHANNEL_LIMIT_POP(c, OUT);
/* Store route export state */
if (old)
bmap_clear(&c->export_map, old->id);
if (new)
bmap_set(&c->export_map, new->id);
/* Apply export table */ /* Apply export table */
struct rte_storage *old_exported = NULL;
if (c->out_table) if (c->out_table)
rte_import(&c->out_table->push, net, new, old ? old->src : new->src); {
else if (!rte_update_out(c, net, new, old, &old_exported))
do_rt_notify_direct(c, net, new, old); {
} channel_rte_trace_out(D_ROUTES, c, new, "idempotent");
return;
void }
do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte *old) }
{
struct proto *p = c->proto;
struct channel_export_stats *stats = &c->export_stats;
if (new) if (new)
stats->updates_accepted++; stats->updates_accepted++;
else else
stats->withdraws_accepted++; stats->withdraws_accepted++;
if (old)
bmap_clear(&c->export_map, old->id);
if (new)
bmap_set(&c->export_map, new->id);
if (p->debug & D_ROUTES) if (p->debug & D_ROUTES)
{ {
if (new && old) if (new && old)
@ -565,7 +560,10 @@ do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte
channel_rte_trace_out(D_ROUTES, c, old, "removed"); channel_rte_trace_out(D_ROUTES, c, old, "removed");
} }
p->rt_notify(p, c, net, new, old); p->rt_notify(p, c, net, new, old_exported ? &old_exported->rte : old);
if (c->out_table && old_exported)
rte_free(old_exported, c->out_table);
} }
static void static void
@ -786,7 +784,7 @@ rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_
rte n0; rte n0;
if (rpe->new_best != rpe->old_best) if (rpe->new_best != rpe->old_best)
rt_notify_basic(c, net, RTES_CLONE(rpe->new_best, &n0), RTES_OR_NULL(rpe->old_best)); rt_notify_basic(c, net, RTE_COPY(rpe->new_best, &n0), RTE_OR_NULL(rpe->old_best));
/* Drop the old stored rejection if applicable. /* Drop the old stored rejection if applicable.
* new->id == old->id happens when updating hostentries. */ * new->id == old->id happens when updating hostentries. */
@ -801,7 +799,7 @@ rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pend
rte n0; rte n0;
if (rpe->new != rpe->old) if (rpe->new != rpe->old)
rt_notify_basic(c, net, RTES_CLONE(rpe->new, &n0), RTES_OR_NULL(rpe->old)); rt_notify_basic(c, net, RTE_COPY(rpe->new, &n0), RTE_OR_NULL(rpe->old));
/* Drop the old stored rejection if applicable. /* Drop the old stored rejection if applicable.
* new->id == old->id happens when updating hostentries. */ * new->id == old->id happens when updating hostentries. */
@ -976,10 +974,6 @@ rte_recalculate(struct rt_import_hook *c, net *net, rte *new, struct rte_src *sr
rte *old_best = old_best_stored ? &old_best_stored->rte : NULL; rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
rte *old = NULL; rte *old = NULL;
/* Set the stale cycle unless already set */
if (new && !(new->flags & REF_USE_STALE))
new->stale_cycle = c->stale_set;
/* Find and remove original route from the same protocol */ /* Find and remove original route from the same protocol */
struct rte_storage **before_old = rte_find(net, src); struct rte_storage **before_old = rte_find(net, src);
@ -1005,7 +999,8 @@ rte_recalculate(struct rt_import_hook *c, net *net, rte *new, struct rte_src *sr
if (new && rte_same(old, new)) if (new && rte_same(old, new))
{ {
/* No changes, ignore the new route and refresh the old one */ /* No changes, ignore the new route and refresh the old one */
old->stale_cycle = new->stale_cycle;
old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
if (!rte_is_filtered(new)) if (!rte_is_filtered(new))
{ {
@ -1143,23 +1138,18 @@ rte_recalculate(struct rt_import_hook *c, net *net, rte *new, struct rte_src *sr
new_stored->rte.id = old->id; new_stored->rte.id = old->id;
} }
_Bool nb = (new_stored == net->routes);
_Bool ob = (old_best == old);
/* Log the route change */ /* Log the route change */
if (new_ok && old_ok) if (new_ok)
{ rt_rte_trace_in(D_ROUTES, req, &new_stored->rte, new_stored == net->routes ? "added [best]" : "added");
const char *best_indicator[2][2] = { { "updated", "updated [-best]" }, { "updated [+best]", "updated [best]" } };
rt_rte_trace_in(D_ROUTES, req, &new_stored->rte, best_indicator[nb][ob]);
}
else if (new_ok)
rt_rte_trace_in(D_ROUTES, req, &new_stored->rte,
(!net->routes->next || !rte_is_ok(&net->routes->next->rte)) ? "added [sole]" :
nb ? "added [best]" : "added");
else if (old_ok) else if (old_ok)
rt_rte_trace_in(D_ROUTES, req, old, {
(!net->routes || !rte_is_ok(&net->routes->rte)) ? "removed [sole]" : if (old != old_best)
ob ? "removed [best]" : "removed"); rt_rte_trace_in(D_ROUTES, req, old, "removed");
else if (net->routes && rte_is_ok(&net->routes->rte))
rt_rte_trace_in(D_ROUTES, req, old, "removed [replaced]");
else
rt_rte_trace_in(D_ROUTES, req, old, "removed [sole]");
}
/* Propagate the route change */ /* Propagate the route change */
rte_announce(table, net, new_stored, old_stored, rte_announce(table, net, new_stored, old_stored,
@ -1207,15 +1197,12 @@ channel_preimport(struct rt_import_request *req, rte *new, rte *old)
{ {
struct channel *c = SKIP_BACK(struct channel, in_req, req); struct channel *c = SKIP_BACK(struct channel, in_req, req);
if (!c->in_table) if (new && !old)
{ if (CHANNEL_LIMIT_PUSH(c, RX))
if (new && !old) return NULL;
if (CHANNEL_LIMIT_PUSH(c, RX))
return NULL;
if (!new && old) if (!new && old)
CHANNEL_LIMIT_POP(c, RX); CHANNEL_LIMIT_POP(c, RX);
}
int new_in = new && !rte_is_filtered(new); int new_in = new && !rte_is_filtered(new);
int old_in = old && !rte_is_filtered(old); int old_in = old && !rte_is_filtered(old);
@ -1236,22 +1223,7 @@ channel_preimport(struct rt_import_request *req, rte *new, rte *old)
return new; return new;
} }
rte * static void rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
channel_in_preimport(struct rt_import_request *req, rte *new, rte *old)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
if (new && !old)
if (CHANNEL_LIMIT_PUSH(cat->c, RX))
return NULL;
if (!new && old)
CHANNEL_LIMIT_POP(cat->c, RX);
return new;
}
void rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
void void
rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
@ -1261,14 +1233,13 @@ rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
ASSERT(c->channel_state == CS_UP); ASSERT(c->channel_state == CS_UP);
if (c->in_table && !rte_update_in(c, n, new, src))
return;
if (c->in_table) return rte_update_direct(c, n, new, src);
rte_import(&c->in_table->push, n, new, src);
else
rte_update_direct(c, n, new, src);
} }
void static void
rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
{ {
const struct filter *filter = c->in_filter; const struct filter *filter = c->in_filter;
@ -1352,6 +1323,24 @@ rte_discard(net *net, rte *old) /* Non-filtered route deletion, used during garb
rte_update_unlock(); rte_update_unlock();
} }
/* Modify existing route by protocol hook, used for long-lived graceful restart */
static inline void
rte_modify(net *net, rte *old)
{
rte_update_lock();
rte *new = old->sender->req->rte_modify(old, rte_update_pool);
if (new != old)
{
if (new)
new->flags = old->flags & ~REF_MODIFY;
rte_recalculate(old->sender, net, new, old->src);
}
rte_update_unlock();
}
/* Check rtable for best route to given net whether it would be exported do p */ /* Check rtable for best route to given net whether it would be exported do p */
int int
rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter) rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter)
@ -1430,9 +1419,6 @@ rt_request_import(rtable *tab, struct rt_import_request *req)
hook->req = req; hook->req = req;
hook->table = tab; hook->table = tab;
if (!hook->stale_set)
hook->stale_set = hook->stale_valid = hook->stale_pruning = hook->stale_pruned = 1;
rt_set_import_state(hook, TIS_UP); rt_set_import_state(hook, TIS_UP);
hook->n = (node) {}; hook->n = (node) {};
@ -1513,41 +1499,20 @@ rt_stop_export(struct rt_export_request *req, void (*stopped)(struct rt_export_r
* routes to the routing table (by rte_update()). After that, all protocol * routes to the routing table (by rte_update()). After that, all protocol
* routes (more precisely routes with @c as @sender) not sent during the * routes (more precisely routes with @c as @sender) not sent during the
* refresh cycle but still in the table from the past are pruned. This is * refresh cycle but still in the table from the past are pruned. This is
* implemented by setting rte->stale_cycle to req->stale_set in rte_update() * implemented by marking all related routes as stale by REF_STALE flag in
* and then dropping all routes with old stale_cycle values in table prune loop. */ * rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
* flag in rt_refresh_end() and then removing such routes in the prune loop.
*/
void void
rt_refresh_begin(struct rt_import_request *req) rt_refresh_begin(rtable *t, struct rt_import_request *req)
{ {
struct rt_import_hook *hook = req->hook; FIB_WALK(&t->fib, net, n)
ASSERT_DIE(hook); {
for (struct rte_storage *e = n->routes; e; e = e->next)
ASSERT_DIE(hook->stale_set == hook->stale_valid); if (e->rte.sender == req->hook)
e->rte.flags |= REF_STALE;
/* If the pruning routine is too slow */ }
if ((hook->stale_pruned < hook->stale_valid) && (hook->stale_pruned + 128 < hook->stale_valid) FIB_WALK_END;
|| (hook->stale_pruned > hook->stale_valid) && (hook->stale_pruned > hook->stale_valid + 128))
{
log(L_WARN "Route refresh flood in table %s", hook->table->name);
FIB_WALK(&hook->table->fib, net, n)
{
for (struct rte_storage *e = n->routes; e; e = e->next)
if (e->rte.sender == req->hook)
e->rte.stale_cycle = 0;
}
FIB_WALK_END;
hook->stale_set = 1;
hook->stale_valid = 0;
hook->stale_pruned = 0;
}
else if (!++hook->stale_set)
{
/* Let's reserve the stale_cycle zero value for always-invalid routes */
hook->stale_set = 1;
hook->stale_valid = 0;
}
if (req->trace_routes & D_STATES)
log(L_TRACE "%s: route refresh begin [%u]", req->name, hook->stale_set);
} }
/** /**
@ -1559,18 +1524,43 @@ rt_refresh_begin(struct rt_import_request *req)
* hook. See rt_refresh_begin() for description of refresh cycles. * hook. See rt_refresh_begin() for description of refresh cycles.
*/ */
void void
rt_refresh_end(struct rt_import_request *req) rt_refresh_end(rtable *t, struct rt_import_request *req)
{ {
struct rt_import_hook *hook = req->hook; int prune = 0;
ASSERT_DIE(hook);
hook->stale_valid++; FIB_WALK(&t->fib, net, n)
ASSERT_DIE(hook->stale_set == hook->stale_valid); {
for (struct rte_storage *e = n->routes; e; e = e->next)
if ((e->rte.sender == req->hook) && (e->rte.flags & REF_STALE))
{
e->rte.flags |= REF_DISCARD;
prune = 1;
}
}
FIB_WALK_END;
rt_schedule_prune(hook->table); if (prune)
rt_schedule_prune(t);
}
if (req->trace_routes & D_STATES) void
log(L_TRACE "%s: route refresh end [%u]", req->name, hook->stale_valid); rt_modify_stale(rtable *t, struct rt_import_request *req)
{
int prune = 0;
FIB_WALK(&t->fib, net, n)
{
for (struct rte_storage *e = n->routes; e; e = e->next)
if ((e->rte.sender == req->hook) && (e->rte.flags & REF_STALE) && !(e->rte.flags & REF_FILTERED))
{
e->rte.flags |= REF_MODIFY;
prune = 1;
}
}
FIB_WALK_END;
if (prune)
rt_schedule_prune(t);
} }
/** /**
@ -1623,6 +1613,9 @@ rt_dump_all(void)
WALK_LIST2(t, n, routing_tables, n) WALK_LIST2(t, n, routing_tables, n)
rt_dump(t); rt_dump(t);
WALK_LIST2(t, n, deleted_routing_tables, n)
rt_dump(t);
} }
void void
@ -1665,6 +1658,9 @@ rt_dump_hooks_all(void)
WALK_LIST2(t, n, routing_tables, n) WALK_LIST2(t, n, routing_tables, n)
rt_dump_hooks(t); rt_dump_hooks(t);
WALK_LIST2(t, n, deleted_routing_tables, n)
rt_dump_hooks(t);
} }
static inline void static inline void
@ -1800,7 +1796,9 @@ rt_free(resource *_r)
DBG("Deleting routing table %s\n", r->name); DBG("Deleting routing table %s\n", r->name);
ASSERT_DIE(r->use_count == 0); ASSERT_DIE(r->use_count == 0);
ASSERT_DIE(r->deleted);
if (r->internal)
return;
r->config->table = NULL; r->config->table = NULL;
rem_node(&r->n); rem_node(&r->n);
@ -1855,17 +1853,20 @@ rt_setup(pool *pp, struct rtable_config *cf)
fib_init(&t->fib, p, t->addr_type, sizeof(net), OFFSETOF(net, n), 0, NULL); fib_init(&t->fib, p, t->addr_type, sizeof(net), OFFSETOF(net, n), 0, NULL);
init_list(&t->imports); if (!(t->internal = cf->internal))
init_list(&t->exports); {
hmap_init(&t->id_map, p, 1024); init_list(&t->imports);
hmap_set(&t->id_map, 0); init_list(&t->exports);
hmap_init(&t->id_map, p, 1024);
hmap_set(&t->id_map, 0);
init_list(&t->subscribers); init_list(&t->subscribers);
t->rt_event = ev_new_init(p, rt_event, t); t->rt_event = ev_new_init(p, rt_event, t);
t->last_rt_change = t->gc_time = current_time(); t->last_rt_change = t->gc_time = current_time();
t->rl_pipe = (struct tbf) TBF_DEFAULT_LOG_LIMITS; t->rl_pipe = (struct tbf) TBF_DEFAULT_LOG_LIMITS;
}
return t; return t;
} }
@ -1883,6 +1884,7 @@ rt_init(void)
rt_table_pool = rp_new(&root_pool, "Routing tables"); rt_table_pool = rp_new(&root_pool, "Routing tables");
rte_update_pool = lp_new_default(rt_table_pool); rte_update_pool = lp_new_default(rt_table_pool);
init_list(&routing_tables); init_list(&routing_tables);
init_list(&deleted_routing_tables);
} }
@ -1923,13 +1925,6 @@ rt_prune_table(rtable *tab)
WALK_LIST2(ih, n, tab->imports, n) WALK_LIST2(ih, n, tab->imports, n)
if (ih->import_state == TIS_STOP) if (ih->import_state == TIS_STOP)
rt_set_import_state(ih, TIS_FLUSHING); rt_set_import_state(ih, TIS_FLUSHING);
else if ((ih->stale_valid != ih->stale_pruning) && (ih->stale_pruning == ih->stale_pruned))
{
ih->stale_pruning = ih->stale_valid;
if (ih->req->trace_routes & D_STATES)
log(L_TRACE "%s: table prune after refresh begin [%u]", ih->req->name, ih->stale_pruning);
}
FIB_ITERATE_INIT(fit, &tab->fib); FIB_ITERATE_INIT(fit, &tab->fib);
tab->prune_state = 2; tab->prune_state = 2;
@ -1941,11 +1936,7 @@ again:
rescan: rescan:
for (struct rte_storage *e=n->routes; e; e=e->next) for (struct rte_storage *e=n->routes; e; e=e->next)
{ {
struct rt_import_hook *s = e->rte.sender; if ((e->rte.sender->import_state == TIS_FLUSHING) || (e->rte.flags & REF_DISCARD))
if ((s->import_state == TIS_FLUSHING) ||
(e->rte.stale_cycle < s->stale_valid) ||
(e->rte.stale_cycle > s->stale_set))
{ {
if (limit <= 0) if (limit <= 0)
{ {
@ -1957,6 +1948,21 @@ again:
rte_discard(n, &e->rte); rte_discard(n, &e->rte);
limit--; limit--;
goto rescan;
}
if (e->rte.flags & REF_MODIFY)
{
if (limit <= 0)
{
FIB_ITERATE_PUT(fit);
ev_schedule(tab->rt_event);
return;
}
rte_modify(n, &e->rte);
limit--;
goto rescan; goto rescan;
} }
} }
@ -1992,13 +1998,6 @@ again:
mb_free(ih); mb_free(ih);
rt_unlock_table(tab); rt_unlock_table(tab);
} }
else if (ih->stale_pruning != ih->stale_pruned)
{
ih->stale_pruned = ih->stale_pruning;
if (ih->req->trace_routes & D_STATES)
log(L_TRACE "%s: table prune after refresh end [%u]", ih->req->name, ih->stale_pruned);
}
} }
void void
@ -2205,10 +2204,7 @@ rt_next_hop_update_net(rtable *tab, net *n)
for (int i=0; i<count; i++) for (int i=0; i<count; i++)
{ {
_Bool nb = (new == updates[i].new), ob = (old_best == updates[i].old); _Bool nb = (new == updates[i].new), ob = (old_best == updates[i].old);
const char *best_indicator[2][2] = { const char *best_indicator[2][2] = { { "updated", "updated [-best]" }, { "updated [+best]", "updated [best]" } };
{ "autoupdated", "autoupdated [-best]" },
{ "autoupdated [+best]", "autoupdated [best]" }
};
rt_rte_trace_in(D_ROUTES, updates[i].new->rte.sender->req, &updates[i].new->rte, best_indicator[nb][ob]); rt_rte_trace_in(D_ROUTES, updates[i].new->rte.sender->req, &updates[i].new->rte, best_indicator[nb][ob]);
rte_announce_i(tab, n, updates[i].new, updates[i].old, new, old_best); rte_announce_i(tab, n, updates[i].new, updates[i].old, new, old_best);
} }
@ -2312,12 +2308,11 @@ rt_unlock_table(rtable *r)
{ {
if (!--r->use_count && r->deleted) if (!--r->use_count && r->deleted)
{ {
void *del_data = r->del_data; struct config *conf = r->deleted;
void (*deleted)(void *) = r->deleted;
/* Delete the routing table by freeing its pool */ /* Delete the routing table by freeing its pool */
rt_shutdown(r); rt_shutdown(r);
deleted(del_data); config_del_obstacle(conf);
} }
} }
@ -2328,8 +2323,6 @@ rt_find_table_config(struct config *cf, char *name)
return (sym && (sym->class == SYM_TABLE)) ? sym->table : NULL; return (sym && (sym->class == SYM_TABLE)) ? sym->table : NULL;
} }
static void rt_config_del_obstacle(void *data) { config_del_obstacle(data); }
/** /**
* rt_commit - commit new routing table configuration * rt_commit - commit new routing table configuration
* @new: new configuration * @new: new configuration
@ -2368,10 +2361,9 @@ rt_commit(struct config *new, struct config *old)
else else
{ {
DBG("\t%s: deleted\n", o->name); DBG("\t%s: deleted\n", o->name);
rt_lock_table(ot); ot->deleted = old;
ot->deleted = rt_config_del_obstacle;
ot->del_data = old;
config_add_obstacle(old); config_add_obstacle(old);
rt_lock_table(ot);
rt_unlock_table(ot); rt_unlock_table(ot);
} }
} }
@ -2449,6 +2441,285 @@ done:
} }
/*
* Import table
*/
int
rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
{
struct rtable *tab = c->in_table;
net *net;
if (new)
net = net_get(tab, n);
else
{
net = net_find(tab, n);
if (!net)
goto drop_withdraw;
}
/* Find the old rte */
struct rte_storage **pos = rte_find(net, src);
if (*pos)
{
rte *old = &(*pos)->rte;
if (new && rte_same(old, new))
{
/* Refresh the old rte, continue with update to main rtable */
if (old->flags & (REF_STALE | REF_DISCARD | REF_MODIFY))
{
old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
return 1;
}
goto drop_update;
}
if (!new)
CHANNEL_LIMIT_POP(c, RX);
/* Move iterator if needed */
if (*pos == c->reload_next_rte)
c->reload_next_rte = (*pos)->next;
/* Remove the old rte */
struct rte_storage *del = *pos;
*pos = (*pos)->next;
rte_free(del, tab);
tab->rt_count--;
}
else if (new)
{
if (CHANNEL_LIMIT_PUSH(c, RX))
{
/* Required by rte_trace_in() */
new->net = n;
channel_rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
goto drop_update;
}
}
else
goto drop_withdraw;
if (!new)
{
if (!net->routes)
fib_delete(&tab->fib, net);
return 1;
}
/* Insert the new rte */
struct rte_storage *e = rte_store(new, net, tab);
e->rte.lastmod = current_time();
e->next = *pos;
*pos = e;
tab->rt_count++;
return 1;
drop_update:
c->import_stats.updates_received++;
c->in_req.hook->stats.updates_ignored++;
if (!net->routes)
fib_delete(&tab->fib, net);
return 0;
drop_withdraw:
c->import_stats.withdraws_received++;
c->in_req.hook->stats.withdraws_ignored++;
return 0;
}
int
rt_reload_channel(struct channel *c)
{
struct rtable *tab = c->in_table;
struct fib_iterator *fit = &c->reload_fit;
int max_feed = 64;
ASSERT(c->channel_state == CS_UP);
if (!c->reload_active)
{
FIB_ITERATE_INIT(fit, &tab->fib);
c->reload_active = 1;
}
do {
for (struct rte_storage *e = c->reload_next_rte; e; e = e->next)
{
if (max_feed-- <= 0)
{
c->reload_next_rte = e;
debug("%s channel reload burst split (max_feed=%d)", c->proto->name, max_feed);
return 0;
}
rte r = e->rte;
rte_update_direct(c, r.net, &r, r.src);
}
c->reload_next_rte = NULL;
FIB_ITERATE_START(&tab->fib, fit, net, n)
{
if (c->reload_next_rte = n->routes)
{
FIB_ITERATE_PUT_NEXT(fit, &tab->fib);
break;
}
}
FIB_ITERATE_END;
}
while (c->reload_next_rte);
c->reload_active = 0;
return 1;
}
void
rt_reload_channel_abort(struct channel *c)
{
if (c->reload_active)
{
/* Unlink the iterator */
fit_get(&c->in_table->fib, &c->reload_fit);
c->reload_next_rte = NULL;
c->reload_active = 0;
}
}
void
rt_prune_sync(rtable *t, int all)
{
struct fib_iterator fit;
FIB_ITERATE_INIT(&fit, &t->fib);
again:
FIB_ITERATE_START(&t->fib, &fit, net, n)
{
struct rte_storage *e, **ee = &n->routes;
while (e = *ee)
{
if (all || (e->rte.flags & (REF_STALE | REF_DISCARD)))
{
*ee = e->next;
rte_free(e, t);
t->rt_count--;
}
else
ee = &e->next;
}
if (all || !n->routes)
{
FIB_ITERATE_PUT(&fit);
fib_delete(&t->fib, n);
goto again;
}
}
FIB_ITERATE_END;
}
/*
* Export table
*/
int
rte_update_out(struct channel *c, const net_addr *n, rte *new, const rte *old0, struct rte_storage **old_exported)
{
struct rtable *tab = c->out_table;
struct rte_src *src;
net *net;
if (new)
{
net = net_get(tab, n);
src = new->src;
}
else
{
net = net_find(tab, n);
src = old0->src;
if (!net)
goto drop;
}
/* Find the old rte */
struct rte_storage **pos = (c->ra_mode == RA_ANY) ? rte_find(net, src) : &net->routes;
struct rte_storage *old = NULL;
if (old = *pos)
{
if (new && rte_same(&(*pos)->rte, new))
goto drop;
/* Remove the old rte */
*pos = old->next;
*old_exported = old;
tab->rt_count--;
}
if (!new)
{
if (!old)
goto drop;
if (!net->routes)
fib_delete(&tab->fib, net);
return 1;
}
/* Insert the new rte */
struct rte_storage *e = rte_store(new, net, tab);
e->rte.lastmod = current_time();
e->next = *pos;
*pos = e;
tab->rt_count++;
return 1;
drop:
return 0;
}
void
rt_refeed_channel(struct channel *c)
{
if (!c->out_table)
{
channel_request_feeding(c);
return;
}
ASSERT_DIE(c->ra_mode != RA_ANY);
c->proto->feed_begin(c, 0);
FIB_WALK(&c->out_table->fib, net, n)
{
if (!n->routes)
continue;
rte e = n->routes->rte;
c->proto->rt_notify(c->proto, c, n->n.addr, &e, NULL);
}
FIB_WALK_END;
c->proto->feed_end(c);
}
/* /*
* Hostcache * Hostcache
*/ */

View File

@ -2267,44 +2267,30 @@ bgp_rte_recalculate(rtable *table, net *net, rte *new, rte *old, rte *old_best)
return !old_suppressed; return !old_suppressed;
} }
void rte *
bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *rpe UNUSED, rte **feed, uint count) bgp_rte_modify_stale(struct rte *r, struct linpool *pool)
{ {
struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req); eattr *ea = ea_find(r->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_COMMUNITY));
const struct adata *ad = ea ? ea->u.ptr : NULL;
uint flags = ea ? ea->flags : BAF_PARTIAL;
do { if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
rte *r = feed[--count]; return NULL;
if (r->sender != c->c.in_req.hook)
continue;
/* A new route, do not mark as stale */ if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
if (r->stale_cycle == c->c.in_req.hook->stale_set) return r;
continue;
eattr *ea = ea_find(r->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_COMMUNITY)); rta *a = rta_do_cow(r->attrs, pool);
const struct adata *ad = ea ? ea->u.ptr : NULL;
uint flags = ea ? ea->flags : BAF_PARTIAL; _Thread_local static rte e0;
e0 = *r;
e0.attrs = a;
rte e0 = *r; bgp_set_attr_ptr(&(a->eattrs), pool, BA_COMMUNITY, flags,
e0.flags |= REF_USE_STALE; int_set_add(pool, ad, BGP_COMM_LLGR_STALE));
e0.pflags |= BGP_REF_STALE;
if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR)) return &e0;
rte_import(&c->c.in_req, n, NULL, r->src);
else if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
rte_import(&c->c.in_req, n, &e0, r->src);
else {
rta *a = e0.attrs = rta_do_cow(r->attrs, bgp_linpool);
bgp_set_attr_ptr(&(a->eattrs), bgp_linpool, BA_COMMUNITY, flags,
int_set_add(bgp_linpool, ad, BGP_COMM_LLGR_STALE));
e0.pflags |= BGP_REF_STALE;
rte_import(&c->c.in_req, n, &e0, r->src);
lp_flush(bgp_linpool);
}
} while (count);
} }

View File

@ -140,15 +140,6 @@ static void bgp_update_bfd(struct bgp_proto *p, const struct bfd_options *bfd);
static int bgp_incoming_connection(sock *sk, uint dummy UNUSED); static int bgp_incoming_connection(sock *sk, uint dummy UNUSED);
static void bgp_listen_sock_err(sock *sk UNUSED, int err); static void bgp_listen_sock_err(sock *sk UNUSED, int err);
static void bgp_graceful_restart_feed(struct bgp_channel *c);
static inline void channel_refresh_end_reload(struct channel *c)
{
channel_refresh_end(c);
if (c->in_table)
channel_request_reload(c);
}
/** /**
* bgp_open - open a BGP instance * bgp_open - open a BGP instance
* @p: BGP instance * @p: BGP instance
@ -784,25 +775,25 @@ bgp_handle_graceful_restart(struct bgp_proto *p)
{ {
case BGP_GRS_NONE: case BGP_GRS_NONE:
c->gr_active = BGP_GRS_ACTIVE; c->gr_active = BGP_GRS_ACTIVE;
channel_refresh_begin(&c->c); rt_refresh_begin(c->c.table, &c->c.in_req);
break; break;
case BGP_GRS_ACTIVE: case BGP_GRS_ACTIVE:
channel_refresh_end(&c->c); rt_refresh_end(c->c.table, &c->c.in_req);
channel_refresh_begin(&c->c); rt_refresh_begin(c->c.table, &c->c.in_req);
break; break;
case BGP_GRS_LLGR: case BGP_GRS_LLGR:
channel_refresh_begin(&c->c); rt_refresh_begin(c->c.table, &c->c.in_req);
bgp_graceful_restart_feed(c); rt_modify_stale(c->c.table, &c->c.in_req);
break; break;
} }
} }
else else
{ {
/* Just flush the routes */ /* Just flush the routes */
channel_refresh_begin(&c->c); rt_refresh_begin(c->c.table, &c->c.in_req);
channel_refresh_end(&c->c); rt_refresh_end(c->c.table, &c->c.in_req);
} }
/* Reset bucket and prefix tables */ /* Reset bucket and prefix tables */
@ -820,50 +811,6 @@ bgp_handle_graceful_restart(struct bgp_proto *p)
tm_start(p->gr_timer, p->conn->remote_caps->gr_time S); tm_start(p->gr_timer, p->conn->remote_caps->gr_time S);
} }
static void
bgp_graceful_restart_feed_done(struct rt_export_request *req)
{
req->hook = NULL;
}
static void
bgp_graceful_restart_feed_dump_req(struct rt_export_request *req)
{
struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
debug(" BGP-GR %s.%s export request %p\n", c->c.proto->name, c->c.name, req);
}
static void
bgp_graceful_restart_feed_log_state_change(struct rt_export_request *req, u8 state)
{
struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
struct bgp_proto *p = (void *) c->c.proto;
BGP_TRACE(D_EVENTS, "Long-lived graceful restart export state changed to %s", rt_export_state_name(state));
if (state == TES_READY)
rt_stop_export(req, bgp_graceful_restart_feed_done);
}
static void
bgp_graceful_restart_drop_export(struct rt_export_request *req UNUSED, const net_addr *n UNUSED, struct rt_pending_export *rpe UNUSED)
{ /* Nothing to do */ }
static void
bgp_graceful_restart_feed(struct bgp_channel *c)
{
c->stale_feed = (struct rt_export_request) {
.name = "BGP-GR",
.trace_routes = c->c.debug | c->c.proto->debug,
.dump_req = bgp_graceful_restart_feed_dump_req,
.log_state_change = bgp_graceful_restart_feed_log_state_change,
.export_bulk = bgp_rte_modify_stale,
.export_one = bgp_graceful_restart_drop_export,
};
rt_request_export(c->c.table, &c->stale_feed);
}
/** /**
* bgp_graceful_restart_done - finish active BGP graceful restart * bgp_graceful_restart_done - finish active BGP graceful restart
* @c: BGP channel * @c: BGP channel
@ -886,11 +833,8 @@ bgp_graceful_restart_done(struct bgp_channel *c)
if (!p->gr_active_num) if (!p->gr_active_num)
BGP_TRACE(D_EVENTS, "Neighbor graceful restart done"); BGP_TRACE(D_EVENTS, "Neighbor graceful restart done");
if (c->stale_feed.hook)
rt_stop_export(&c->stale_feed, bgp_graceful_restart_feed_done);
tm_stop(c->stale_timer); tm_stop(c->stale_timer);
channel_refresh_end_reload(&c->c); rt_refresh_end(c->c.table, &c->c.in_req);
} }
/** /**
@ -932,7 +876,7 @@ bgp_graceful_restart_timeout(timer *t)
/* Channel is in GR, and supports LLGR -> start LLGR */ /* Channel is in GR, and supports LLGR -> start LLGR */
c->gr_active = BGP_GRS_LLGR; c->gr_active = BGP_GRS_LLGR;
tm_start(c->stale_timer, c->stale_time S); tm_start(c->stale_timer, c->stale_time S);
bgp_graceful_restart_feed(c); rt_modify_stale(c->c.table, &c->c.in_req);
} }
} }
else else
@ -970,7 +914,10 @@ bgp_refresh_begin(struct bgp_channel *c)
{ log(L_WARN "%s: BEGIN-OF-RR received before END-OF-RIB, ignoring", p->p.name); return; } { log(L_WARN "%s: BEGIN-OF-RR received before END-OF-RIB, ignoring", p->p.name); return; }
c->load_state = BFS_REFRESHING; c->load_state = BFS_REFRESHING;
channel_refresh_begin(&c->c); rt_refresh_begin(c->c.table, &c->c.in_req);
if (c->c.in_table)
rt_refresh_begin(c->c.in_table, &c->c.in_req);
} }
/** /**
@ -991,7 +938,10 @@ bgp_refresh_end(struct bgp_channel *c)
{ log(L_WARN "%s: END-OF-RR received without prior BEGIN-OF-RR, ignoring", p->p.name); return; } { log(L_WARN "%s: END-OF-RR received without prior BEGIN-OF-RR, ignoring", p->p.name); return; }
c->load_state = BFS_NONE; c->load_state = BFS_NONE;
channel_refresh_end_reload(&c->c); rt_refresh_end(c->c.table, &c->c.in_req);
if (c->c.in_table)
rt_prune_sync(c->c.in_table, 0);
} }
@ -1458,9 +1408,12 @@ bgp_reload_routes(struct channel *C)
struct bgp_proto *p = (void *) C->proto; struct bgp_proto *p = (void *) C->proto;
struct bgp_channel *c = (void *) C; struct bgp_channel *c = (void *) C;
ASSERT(p->conn && (p->route_refresh)); ASSERT(p->conn && (p->route_refresh || c->c.in_table));
bgp_schedule_packet(p->conn, c, PKT_ROUTE_REFRESH); if (c->c.in_table)
channel_schedule_reload(C);
else
bgp_schedule_packet(p->conn, c, PKT_ROUTE_REFRESH);
} }
static void static void
@ -1740,6 +1693,7 @@ bgp_init(struct proto_config *CF)
P->rte_better = bgp_rte_better; P->rte_better = bgp_rte_better;
P->rte_mergable = bgp_rte_mergable; P->rte_mergable = bgp_rte_mergable;
P->rte_recalculate = cf->deterministic_med ? bgp_rte_recalculate : NULL; P->rte_recalculate = cf->deterministic_med ? bgp_rte_recalculate : NULL;
P->rte_modify = bgp_rte_modify_stale;
P->rte_igp_metric = bgp_rte_igp_metric; P->rte_igp_metric = bgp_rte_igp_metric;
p->cf = cf; p->cf = cf;
@ -1802,7 +1756,7 @@ bgp_channel_start(struct channel *C)
bgp_init_prefix_table(c); bgp_init_prefix_table(c);
if (c->cf->import_table) if (c->cf->import_table)
channel_setup_in_table(C, 0); channel_setup_in_table(C);
if (c->cf->export_table) if (c->cf->export_table)
channel_setup_out_table(C); channel_setup_out_table(C);

View File

@ -366,7 +366,6 @@ struct bgp_channel {
timer *stale_timer; /* Long-lived stale timer for LLGR */ timer *stale_timer; /* Long-lived stale timer for LLGR */
u32 stale_time; /* Stored LLGR stale time from last session */ u32 stale_time; /* Stored LLGR stale time from last session */
struct rt_export_request stale_feed; /* Feeder request for stale route modification */
u8 add_path_rx; /* Session expects receive of ADD-PATH extended NLRI */ u8 add_path_rx; /* Session expects receive of ADD-PATH extended NLRI */
u8 add_path_tx; /* Session expects transmit of ADD-PATH extended NLRI */ u8 add_path_tx; /* Session expects transmit of ADD-PATH extended NLRI */
@ -586,7 +585,7 @@ void bgp_free_prefix(struct bgp_channel *c, struct bgp_prefix *bp);
int bgp_rte_better(struct rte *, struct rte *); int bgp_rte_better(struct rte *, struct rte *);
int bgp_rte_mergable(rte *pri, rte *sec); int bgp_rte_mergable(rte *pri, rte *sec);
int bgp_rte_recalculate(rtable *table, net *net, rte *new, rte *old, rte *old_best); int bgp_rte_recalculate(rtable *table, net *net, rte *new, rte *old, rte *old_best);
void bgp_rte_modify_stale(struct rt_export_request *, const net_addr *, struct rt_pending_export *, rte **, uint); struct rte *bgp_rte_modify_stale(struct rte *r, struct linpool *pool);
u32 bgp_rte_igp_metric(struct rte *); u32 bgp_rte_igp_metric(struct rte *);
void bgp_rt_notify(struct proto *P, struct channel *C, const net_addr *n, rte *new, const rte *old); void bgp_rt_notify(struct proto *P, struct channel *C, const net_addr *n, rte *new, const rte *old);
int bgp_preexport(struct channel *, struct rte *); int bgp_preexport(struct channel *, struct rte *);

View File

@ -2695,7 +2695,7 @@ bgp_rx_route_refresh(struct bgp_conn *conn, byte *pkt, uint len)
{ {
case BGP_RR_REQUEST: case BGP_RR_REQUEST:
BGP_TRACE(D_PACKETS, "Got ROUTE-REFRESH"); BGP_TRACE(D_PACKETS, "Got ROUTE-REFRESH");
channel_request_feeding(&c->c); rt_refeed_channel(&c->c);
break; break;
case BGP_RR_BEGIN: case BGP_RR_BEGIN:

View File

@ -661,9 +661,9 @@ rpki_handle_cache_response_pdu(struct rpki_cache *cache, const struct pdu_cache_
* a refresh cycle. * a refresh cycle.
*/ */
if (cache->p->roa4_channel) if (cache->p->roa4_channel)
rt_refresh_begin(&cache->p->roa4_channel->in_req); rt_refresh_begin(cache->p->roa4_channel->table, &cache->p->roa4_channel->in_req);
if (cache->p->roa6_channel) if (cache->p->roa6_channel)
rt_refresh_begin(&cache->p->roa6_channel->in_req); rt_refresh_begin(cache->p->roa6_channel->table, &cache->p->roa6_channel->in_req);
cache->p->refresh_channels = 1; cache->p->refresh_channels = 1;
} }
@ -819,9 +819,9 @@ rpki_handle_end_of_data_pdu(struct rpki_cache *cache, const struct pdu_end_of_da
{ {
cache->p->refresh_channels = 0; cache->p->refresh_channels = 0;
if (cache->p->roa4_channel) if (cache->p->roa4_channel)
rt_refresh_end(&cache->p->roa4_channel->in_req); rt_refresh_end(cache->p->roa4_channel->table, &cache->p->roa4_channel->in_req);
if (cache->p->roa6_channel) if (cache->p->roa6_channel)
rt_refresh_end(&cache->p->roa6_channel->in_req); rt_refresh_end(cache->p->roa6_channel->table, &cache->p->roa6_channel->in_req);
} }
cache->last_update = current_time(); cache->last_update = current_time();

View File

@ -285,24 +285,249 @@ krt_metric(rte *a)
} }
static inline int static inline int
krt_rte_better(rte *a, rte *b) krt_same_key(rte *a, rte *b)
{ {
return (krt_metric(a) > krt_metric(b)); return (krt_metric(a) == krt_metric(b));
}
static inline int
krt_uptodate(rte *a, rte *b)
{
return (a->attrs == b->attrs);
}
static void
krt_learn_announce_update(struct krt_proto *p, rte *e)
{
rte e0 = {
.attrs = rta_clone(e->attrs),
.src = p->p.main_source,
};
rte_update(p->p.main_channel, e->net, &e0, p->p.main_source);
}
static void
krt_learn_announce_delete(struct krt_proto *p, net_addr *n)
{
rte_update(p->p.main_channel, n, NULL, p->p.main_source);
} }
/* Called when alien route is discovered during scan */ /* Called when alien route is discovered during scan */
static void static void
krt_learn_rte(struct krt_proto *p, rte *e) krt_learn_scan(struct krt_proto *p, rte *e)
{ {
e->src = rt_get_source(&p->p, krt_metric(e)); net *n = net_get(p->krt_table, e->net);
rte_update(p->p.main_channel, e->net, e, e->src); struct rte_storage *m, **mm;
struct rte_storage *ee = rte_store(e, n, p->krt_table);
for(mm = &n->routes; m = *mm; mm = &m->next)
if (krt_same_key(&m->rte, e))
break;
if (m)
{
if (krt_uptodate(&m->rte, e))
{
krt_trace_in_rl(&rl_alien, p, e, "[alien] seen");
rte_free(ee, p->krt_table);
m->rte.pflags |= KRT_REF_SEEN;
}
else
{
krt_trace_in(p, e, "[alien] updated");
*mm = m->next;
rte_free(m, p->krt_table);
m = NULL;
}
}
else
krt_trace_in(p, e, "[alien] created");
if (!m)
{
ee->next = n->routes;
n->routes = ee;
ee->rte.pflags |= KRT_REF_SEEN;
}
}
static void
krt_learn_prune(struct krt_proto *p)
{
struct fib *fib = &p->krt_table->fib;
struct fib_iterator fit;
KRT_TRACE(p, D_EVENTS, "Pruning inherited routes");
FIB_ITERATE_INIT(&fit, fib);
again:
FIB_ITERATE_START(fib, &fit, net, n)
{
struct rte_storage *e, **ee, *best, **pbest, *old_best;
/*
* Note that old_best may be NULL even if there was an old best route in
* the previous step, because it might be replaced in krt_learn_scan().
* But in that case there is a new valid best route.
*/
old_best = NULL;
best = NULL;
pbest = NULL;
ee = &n->routes;
while (e = *ee)
{
if (e->rte.pflags & KRT_REF_BEST)
old_best = e;
if (!(e->rte.pflags & KRT_REF_SEEN))
{
*ee = e->next;
rte_free(e, p->krt_table);
continue;
}
if (!best || krt_metric(&best->rte) > krt_metric(&e->rte))
{
best = e;
pbest = ee;
}
e->rte.pflags &= ~(KRT_REF_SEEN | KRT_REF_BEST);
ee = &e->next;
}
if (!n->routes)
{
DBG("%I/%d: deleting\n", n->n.prefix, n->n.pxlen);
if (old_best)
krt_learn_announce_delete(p, n->n.addr);
FIB_ITERATE_PUT(&fit);
fib_delete(fib, n);
goto again;
}
best->rte.pflags |= KRT_REF_BEST;
*pbest = best->next;
best->next = n->routes;
n->routes = best;
if ((best != old_best) || p->reload)
{
DBG("%I/%d: announcing (metric=%d)\n", n->n.prefix, n->n.pxlen, krt_metric(&best->rte));
krt_learn_announce_update(p, &best->rte);
}
else
DBG("%I/%d: uptodate (metric=%d)\n", n->n.prefix, n->n.pxlen, krt_metric(&best->rte));
}
FIB_ITERATE_END;
p->reload = 0;
}
static void
krt_learn_async(struct krt_proto *p, rte *e, int new)
{
net *n = net_get(p->krt_table, e->net);
struct rte_storage *g, **gg, *best, **bestp, *old_best;
ASSERT(!e->attrs->cached);
e->attrs->pref = p->p.main_channel->preference;
struct rte_storage *ee = rte_store(e, n, p->krt_table);
old_best = n->routes;
for(gg=&n->routes; g = *gg; gg = &g->next)
if (krt_same_key(&g->rte, e))
break;
if (new)
{
if (g)
{
if (krt_uptodate(&g->rte, e))
{
krt_trace_in(p, e, "[alien async] same");
rte_free(ee, p->krt_table);
return;
}
krt_trace_in(p, e, "[alien async] updated");
*gg = g->next;
rte_free(g, p->krt_table);
}
else
krt_trace_in(p, e, "[alien async] created");
ee->next = n->routes;
n->routes = ee;
}
else if (!g)
{
krt_trace_in(p, e, "[alien async] delete failed");
rte_free(ee, p->krt_table);
return;
}
else
{
krt_trace_in(p, e, "[alien async] removed");
*gg = g->next;
rte_free(ee, p->krt_table);
rte_free(g, p->krt_table);
}
best = n->routes;
bestp = &n->routes;
for(gg=&n->routes; g=*gg; gg=&g->next)
{
if (krt_metric(&best->rte) > krt_metric(&g->rte))
{
best = g;
bestp = gg;
}
g->rte.pflags &= ~KRT_REF_BEST;
}
if (best)
{
best->rte.pflags |= KRT_REF_BEST;
*bestp = best->next;
best->next = n->routes;
n->routes = best;
}
if (best != old_best)
{
DBG("krt_learn_async: distributing change\n");
if (best)
krt_learn_announce_update(p, &best->rte);
else
krt_learn_announce_delete(p, n->n.addr);
}
} }
static void static void
krt_learn_init(struct krt_proto *p) krt_learn_init(struct krt_proto *p)
{ {
if (KRT_CF->learn) if (KRT_CF->learn)
channel_setup_in_table(p->p.main_channel, 1); {
struct rtable_config *cf = mb_allocz(p->p.pool, sizeof(struct rtable_config));
cf->name = "Inherited";
cf->addr_type = p->p.net_type;
cf->internal = 1;
p->krt_table = rt_setup(p->p.pool, cf);
}
}
static void
krt_dump(struct proto *P)
{
struct krt_proto *p = (struct krt_proto *) P;
if (!KRT_CF->learn)
return;
debug("KRT: Table of inheritable routes\n");
rt_dump(p->krt_table);
} }
#endif #endif
@ -322,7 +547,7 @@ rte_feed_count(net *n)
{ {
uint count = 0; uint count = 0;
for (struct rte_storage *e = n->routes; e; e = e->next) for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e))) if (rte_is_valid(RTE_OR_NULL(e)))
count++; count++;
return count; return count;
} }
@ -332,7 +557,7 @@ rte_feed_obtain(net *n, rte **feed, uint count)
{ {
uint i = 0; uint i = 0;
for (struct rte_storage *e = n->routes; e; e = e->next) for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e))) if (rte_is_valid(RTE_OR_NULL(e)))
{ {
ASSERT_DIE(i < count); ASSERT_DIE(i < count);
feed[i++] = &e->rte; feed[i++] = &e->rte;
@ -418,7 +643,7 @@ krt_got_route(struct krt_proto *p, rte *e, s8 src)
case KRT_SRC_ALIEN: case KRT_SRC_ALIEN:
if (KRT_CF->learn) if (KRT_CF->learn)
krt_learn_rte(p, e); krt_learn_scan(p, e);
else else
krt_trace_in_rl(&rl_alien, p, e, "[alien] ignored"); krt_trace_in_rl(&rl_alien, p, e, "[alien] ignored");
return; return;
@ -487,11 +712,6 @@ static void
krt_init_scan(struct krt_proto *p) krt_init_scan(struct krt_proto *p)
{ {
bmap_reset(&p->seen_map, 1024); bmap_reset(&p->seen_map, 1024);
#ifdef KRT_ALLOW_LEARN
if (KRT_CF->learn)
channel_refresh_begin(p->p.main_channel);
#endif
} }
static void static void
@ -519,7 +739,7 @@ krt_prune(struct krt_proto *p)
#ifdef KRT_ALLOW_LEARN #ifdef KRT_ALLOW_LEARN
if (KRT_CF->learn) if (KRT_CF->learn)
channel_refresh_end(p->p.main_channel); krt_learn_prune(p);
#endif #endif
if (p->ready) if (p->ready)
@ -561,7 +781,7 @@ krt_got_route_async(struct krt_proto *p, rte *e, int new, s8 src)
case KRT_SRC_ALIEN: case KRT_SRC_ALIEN:
if (KRT_CF->learn) if (KRT_CF->learn)
{ {
krt_learn_rte(p, e); krt_learn_async(p, e, new);
return; return;
} }
#endif #endif
@ -807,7 +1027,6 @@ krt_init(struct proto_config *CF)
p->p.if_notify = krt_if_notify; p->p.if_notify = krt_if_notify;
p->p.reload_routes = krt_reload_routes; p->p.reload_routes = krt_reload_routes;
p->p.feed_end = krt_feed_end; p->p.feed_end = krt_feed_end;
p->p.rte_better = krt_rte_better;
krt_sys_init(p); krt_sys_init(p);
return &p->p; return &p->p;
@ -963,4 +1182,7 @@ struct protocol proto_unix_kernel = {
.reconfigure = krt_reconfigure, .reconfigure = krt_reconfigure,
.copy_config = krt_copy_config, .copy_config = krt_copy_config,
.get_attr = krt_get_attr, .get_attr = krt_get_attr,
#ifdef KRT_ALLOW_LEARN
.dump = krt_dump,
#endif
}; };

View File

@ -51,6 +51,10 @@ struct krt_proto {
struct proto p; struct proto p;
struct krt_state sys; /* Sysdep state */ struct krt_state sys; /* Sysdep state */
#ifdef KRT_ALLOW_LEARN
struct rtable *krt_table; /* Internal table of inherited routes */
#endif
#ifndef CONFIG_ALL_TABLES_AT_ONCE #ifndef CONFIG_ALL_TABLES_AT_ONCE
timer *scan_timer; timer *scan_timer;
#endif #endif