mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-22 09:41:54 +00:00
Merge remote-tracking branch 'origin/partial-reload' into thread-next
This commit is contained in:
commit
28bb5694f8
@ -81,6 +81,7 @@ CF_DECLS
|
||||
const struct filter *f;
|
||||
struct f_tree *e;
|
||||
struct f_trie *trie;
|
||||
const struct f_trie *const_trie;
|
||||
struct f_val v;
|
||||
struct password_item *p;
|
||||
struct rt_show_data *ra;
|
||||
|
@ -977,7 +977,8 @@ inherited from templates can be updated by new definitions.
|
||||
<cf/roa_check()/ operator). In contrast to to other filter operators,
|
||||
this status for the same route may change as the content of ROA tables
|
||||
changes. When this option is active, BIRD activates automatic reload of
|
||||
affected channels whenever ROA tables are updated (after a short settle
|
||||
the appropriate subset of prefixes imported or exported by the channels
|
||||
whenever ROA tables are updated (after a short settle
|
||||
time). When disabled, route reloads have to be requested manually. The
|
||||
option is ignored if <cf/roa_check()/ is not used in channel filters.
|
||||
Note that for BGP channels, automatic reload requires
|
||||
@ -1261,7 +1262,7 @@ This argument can be omitted if there exists only a single instance.
|
||||
Enable, disable or restart a given protocol instance, instances matching
|
||||
the <cf><m/pattern/</cf> or <cf/all/ instances.
|
||||
|
||||
<tag><label id="cli-reload">reload [in|out] <m/name/|"<m/pattern/"|all</tag>
|
||||
<tag><label id="cli-reload">reload [in|out] (<m/name/|"<m/pattern/"|all) [partial prefix] </tag>
|
||||
Reload a given protocol instance, that means re-import routes from the
|
||||
protocol instance and re-export preferred routes to the instance. If
|
||||
<cf/in/ or <cf/out/ options are used, the command is restricted to one
|
||||
@ -1272,6 +1273,9 @@ This argument can be omitted if there exists only a single instance.
|
||||
propagates the old set of routes. For example when <cf/configure soft/
|
||||
command was used to change filters.
|
||||
|
||||
If <cf/partial prefix/ option is used, only corresponding routes are reloaded.
|
||||
Protocol BGP does partial reload only if it has import table enabled, otherwise partial reload for BGP is refused.
|
||||
|
||||
Re-export always succeeds, but re-import is protocol-dependent and might
|
||||
fail (for example, if BGP neighbor does not support route-refresh
|
||||
extension). In that case, re-export is also skipped. Note that for the
|
||||
|
11
lib/fib.h
11
lib/fib.h
@ -94,17 +94,16 @@ void fit_copy(struct fib *f, struct fib_iterator *dst, struct fib_iterator *src)
|
||||
uint count_ = (fib)->hash_size; \
|
||||
uint hpos_ = (it)->hash; \
|
||||
type *z; \
|
||||
for(;;) { \
|
||||
if (!fn_) \
|
||||
for(;;fn_ = fn_->next) { \
|
||||
while (!fn_ && ++hpos_ < count_) \
|
||||
{ \
|
||||
if (++hpos_ >= count_) \
|
||||
break; \
|
||||
fn_ = (fib)->hash_table[hpos_]; \
|
||||
continue; \
|
||||
} \
|
||||
if (hpos_ >= count_) \
|
||||
break; \
|
||||
z = fib_node_to_user(fib, fn_);
|
||||
|
||||
#define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
|
||||
#define FIB_ITERATE_END } } while(0)
|
||||
|
||||
#define FIB_ITERATE_PUT(it) fit_put(it, fn_)
|
||||
|
||||
|
@ -107,6 +107,44 @@ proto_postconfig(void)
|
||||
this_proto = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
channel_reload_out_done_main(void *_prr)
|
||||
{
|
||||
struct proto_reload_request *prr = _prr;
|
||||
ASSERT_THE_BIRD_LOCKED;
|
||||
|
||||
rfree(prr->trie->lp);
|
||||
}
|
||||
|
||||
static inline void
|
||||
proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
|
||||
{
|
||||
struct proto_reload_request *prr = cfg_alloc(sizeof *prr);
|
||||
*prr = (struct proto_reload_request) {
|
||||
.trie = trie,
|
||||
.dir = dir,
|
||||
.counter = 1,
|
||||
};
|
||||
if (trie)
|
||||
{
|
||||
/* CLI linpool is moved to trie, because trie is need for longer time
|
||||
* than the linpool would exist in CLI. The linpool is freed in channel_reload_out_done_main.
|
||||
*/
|
||||
ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
|
||||
rmove(this_cli->parser_pool, &root_pool);
|
||||
this_cli->parser_pool = lp_new(this_cli->pool);
|
||||
prr->ev = (event) {
|
||||
.data = prr,
|
||||
.hook = channel_reload_out_done_main,
|
||||
};
|
||||
}
|
||||
|
||||
proto_apply_cmd(ps, proto_cmd_reload, 1, (uintptr_t) prr);
|
||||
/* This function used reference to this trie, so it is freed here as well as in protocols*/
|
||||
if (trie)
|
||||
if (atomic_fetch_sub_explicit(&prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &prr->ev);
|
||||
}
|
||||
|
||||
#define DIRECT_CFG ((struct rt_dev_config *) this_proto)
|
||||
|
||||
@ -120,7 +158,7 @@ CF_KEYWORDS(PASSWORD, KEY, FROM, PASSIVE, TO, ID, EVENTS, PACKETS, PROTOCOLS, CH
|
||||
CF_KEYWORDS(ALGORITHM, KEYED, HMAC, MD5, SHA1, SHA256, SHA384, SHA512, BLAKE2S128, BLAKE2S256, BLAKE2B256, BLAKE2B512)
|
||||
CF_KEYWORDS(PRIMARY, STATS, COUNT, FOR, IN, COMMANDS, PREEXPORT, NOEXPORT, EXPORTED, GENERATE)
|
||||
CF_KEYWORDS(BGP, PASSWORDS, DESCRIPTION)
|
||||
CF_KEYWORDS(RELOAD, IN, OUT, MRTDUMP, MESSAGES, RESTRICT, MEMORY, CLASS, DSCP)
|
||||
CF_KEYWORDS(RELOAD, IN, OUT, MRTDUMP, MESSAGES, RESTRICT, MEMORY, CLASS, DSCP, PARTIAL)
|
||||
CF_KEYWORDS(TIMEFORMAT, ISO, SHORT, LONG, ROUTE, PROTOCOL, BASE, LOG, S, MS, US)
|
||||
CF_KEYWORDS(GRACEFUL, RESTART, WAIT, MAX, AS)
|
||||
CF_KEYWORDS(MIN, IDLE, RX, TX, INTERVAL, MULTIPLIER, PASSIVE)
|
||||
@ -151,6 +189,7 @@ CF_ENUM_PX(T_ENUM_AF, AF_, AFI_, IPV4, IPV6)
|
||||
%type <net_ptr> r_args_for
|
||||
%type <t> channel_sym
|
||||
%type <c> channel_arg
|
||||
%type <const_trie> partial_opt
|
||||
|
||||
CF_GRAMMAR
|
||||
|
||||
@ -897,18 +936,28 @@ CF_CLI(DUMP FILTER ALL,,, [[Dump all filters in linearized form]])
|
||||
CF_CLI(EVAL, term, <expr>, [[Evaluate an expression]])
|
||||
{ cmd_eval(f_linearize($2, 1)); } ;
|
||||
|
||||
partial_opt:
|
||||
PARTIAL term {
|
||||
struct f_val val;
|
||||
if (f_eval(f_linearize($2, 1), &val) > F_RETURN) cf_error("Runtime error");
|
||||
if (val.type != T_PREFIX_SET) cf_error("Partial spec must be prefix set");
|
||||
$$ = val.val.ti;
|
||||
}
|
||||
| /* empty */ { $$ = NULL; }
|
||||
;
|
||||
|
||||
CF_CLI(DISABLE, proto_patt opttext, (<protocol> | \"<pattern>\" | all) [message], [[Disable protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_disable, 1, (uintptr_t) $3); } ;
|
||||
CF_CLI(ENABLE, proto_patt opttext, (<protocol> | \"<pattern>\" | all) [message], [[Enable protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_enable, 1, (uintptr_t) $3); } ;
|
||||
CF_CLI(RESTART, proto_patt opttext, (<protocol> | \"<pattern>\" | all) [message], [[Restart protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_restart, 1, (uintptr_t) $3); } ;
|
||||
CF_CLI(RELOAD, proto_patt, <protocol> | \"<pattern>\" | all, [[Reload protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_reload, 1, CMD_RELOAD); } ;
|
||||
CF_CLI(RELOAD IN, proto_patt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just imported routes)]])
|
||||
{ proto_apply_cmd($3, proto_cmd_reload, 1, CMD_RELOAD_IN); } ;
|
||||
CF_CLI(RELOAD OUT, proto_patt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just exported routes)]])
|
||||
{ proto_apply_cmd($3, proto_cmd_reload, 1, CMD_RELOAD_OUT); } ;
|
||||
CF_CLI(RELOAD, proto_patt partial_opt, (<protocol> | \"<pattern>\" | all) [partial <prefix set>], [[Reload protocol]])
|
||||
{ proto_call_cmd_reload($2, CMD_RELOAD, $3); } ;
|
||||
CF_CLI(RELOAD IN, proto_patt partial_opt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just imported routes)]])
|
||||
{ proto_call_cmd_reload($3, CMD_RELOAD_IN, $4); } ;
|
||||
CF_CLI(RELOAD OUT, proto_patt partial_opt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just exported routes)]])
|
||||
{ proto_call_cmd_reload($3, CMD_RELOAD_OUT, $4); } ;
|
||||
|
||||
CF_CLI_HELP(DEBUG, ..., [[Control protocol debugging via BIRD logs]])
|
||||
CF_CLI(DEBUG, debug_args, (<protocol> | <channel> | \"<pattern>\" | all) (all | off | { states|routes|filters|interfaces|events|packets [, ...] }), [[Control protocol debugging via BIRD logs]])
|
||||
|
395
nest/proto.c
395
nest/proto.c
@ -51,11 +51,15 @@ static char *proto_state_name(struct proto *p);
|
||||
static void channel_init_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
|
||||
static void channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
|
||||
static void channel_reset_limit(struct channel *c, struct limit *l, int dir);
|
||||
static int channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n);
|
||||
static int channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n);
|
||||
static void channel_feed_end(struct channel *c);
|
||||
static void channel_stop_export(struct channel *c);
|
||||
static void channel_export_stopped(struct rt_export_request *req);
|
||||
static void channel_refeed_stopped(struct rt_export_request *req);
|
||||
static void channel_check_stopped(struct channel *c);
|
||||
static void channel_reload_in_done(struct channel_import_request *cir);
|
||||
static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
|
||||
|
||||
static inline int proto_is_done(struct proto *p)
|
||||
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
|
||||
@ -337,14 +341,109 @@ proto_remove_channels(struct proto *p)
|
||||
proto_remove_channel(p, c);
|
||||
}
|
||||
|
||||
/**
|
||||
* # Automatic ROA reloads
|
||||
*
|
||||
* Route origin authorizations may (and do) change over time by updates via
|
||||
* our RPKI protocols. This then manifests in ROA tables. As the roa_check()
|
||||
* is always executed on a specific contents of ROA table in a specific moment
|
||||
* of time, its value may switch after updates in the ROA table and therefore
|
||||
* must be re-evaluated any time the result may have changed.
|
||||
*
|
||||
* To enable this mechanism, there are auxiliary tools integrated in BIRD
|
||||
* to automatically re-evaluate all filters that may get a different outcome
|
||||
* after ROA change.
|
||||
*
|
||||
* ROA Subscription Data Structure (struct roa_subscription) is the connector
|
||||
* between the channel and the ROA table, keeping track about unprocessed
|
||||
* changes and initiating the reloads. The modus operandi is as follows:
|
||||
*
|
||||
* Init 1. Check whether the filter uses ROA at all.
|
||||
* Init 2. Request exports from the ROA table
|
||||
* Init 3. Allocate a trie
|
||||
*
|
||||
* Export from ROA: This may affect all routes for prefixes matching the ROA
|
||||
* prefix, disregarding its maxlen. Thus we mark these routes in the request's
|
||||
* auxiliary trie. Then we ping the settle timer to wait a reasonable amount of
|
||||
* time before actually requesting channel reload.
|
||||
*
|
||||
* Settle timer fires when nothing has pinged it for the 'min' time, or 'max'
|
||||
* time has elapsed since the first ping. It then:
|
||||
*
|
||||
* - requests partial channel import / export reload based on the trie
|
||||
* - allocates a new trie
|
||||
*
|
||||
* As the import/export reload uses the auxiliary trie to prefilter prefixes,
|
||||
* the trie must be freed after the reload is done, which is ensured in the
|
||||
* .done() hook of the reimport/reexport request.
|
||||
*
|
||||
* # Channel export refeed
|
||||
*
|
||||
* The request, either by ROA or from CLI, is enqueued to the channel and an
|
||||
* auxiliary export hook is requested from the table. This way, the ordinary
|
||||
* updates can flow uninterrupted while refeed gets prefiltered by the given
|
||||
* trie (if given). When the auxiliary export hook finishes, the .done() hook
|
||||
* is then called for the requestor to do their cleanup.
|
||||
*
|
||||
* While refeeding, special care must be taken about route changes inside the
|
||||
* table. For this, an auxiliary trie is allocated to keep track about already
|
||||
* refed net, to avoid unnecessary multiple re-evaluation of filters.
|
||||
*
|
||||
* # Channel import reload from import table
|
||||
*
|
||||
* When the import table is on, the channel keeps the original version of the route
|
||||
* in the table together with the actual version after filters, in a form of
|
||||
* an additional layer of route attributes underneath the actual version. This makes
|
||||
* it exceptionally simple to get the original version of the route directly
|
||||
* from the table by an ordinary export which strips all the newer layers.
|
||||
*
|
||||
* Then, by processing all these auxiliary exports, the channel basically re-imports
|
||||
* all the routes into the table back again, re-evaluating the filters and ROA checks.
|
||||
*
|
||||
* # Channel import reload from protocols
|
||||
*
|
||||
* When the import table is off, the protocol gets the reimport request directly
|
||||
* via the .reload_routes() hook and must do its internal route reload instead.
|
||||
* The protocol may not support it and in such case, this function returns 0
|
||||
* indicating that no partial reload is going to happen. It's then on the
|
||||
* developer's or user's discretion to run a full reload instead.
|
||||
*
|
||||
* # Caveats, FIXME's, TODO's and other kinds of hell
|
||||
*
|
||||
* The partial reexport uses a trie to track state for single prefixes. This
|
||||
* may do crazy things if a partial reload was to be performed on any other
|
||||
* table than plain IPv6 or IPv4. Network types like VPNv6 or Flowspec may
|
||||
* cause some crashes. This is currently not checked anywhere.
|
||||
*
|
||||
* Anyway, we decided to split the table FIB structure to carry only a mapping
|
||||
* between a prefix and a locally-unique ID, and after this update is done
|
||||
* (probably also in v2), the tracking tries may be easily replaced by
|
||||
* bitfields, therefore fixing this bug.
|
||||
*
|
||||
* We also probably didn't do a proper analysis of the implemented algorithm
|
||||
* for reexports, so if there is somebody willing to formally prove that we
|
||||
* both won't miss any update and won't reexport more than needed, you're welcome
|
||||
* to submit such a proof.
|
||||
*
|
||||
* We wish you a pleasant reading, analyzing and bugfixing experience.
|
||||
*
|
||||
* Kata, Maria and the BIRD Team
|
||||
*/
|
||||
|
||||
struct roa_subscription {
|
||||
node roa_node;
|
||||
struct settle settle;
|
||||
struct channel *c;
|
||||
struct rt_export_request req;
|
||||
struct channel_feeding_request cfr[2];
|
||||
struct f_trie *trie;
|
||||
};
|
||||
|
||||
static void
|
||||
channel_roa_in_reload_done(struct channel_import_request *req)
|
||||
{
|
||||
rfree(req->trie->lp);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_roa_in_changed(struct settle *se)
|
||||
{
|
||||
@ -352,7 +451,21 @@ channel_roa_in_changed(struct settle *se)
|
||||
struct channel *c = s->c;
|
||||
|
||||
CD(c, "Reload triggered by RPKI change");
|
||||
channel_request_reload(c);
|
||||
struct channel_import_request *cir = lp_alloc(s->trie->lp, sizeof *cir);
|
||||
*cir = (struct channel_import_request) {
|
||||
.trie = s->trie,
|
||||
.done = channel_roa_in_reload_done,
|
||||
};
|
||||
|
||||
s->trie = f_new_trie(lp_new(c->proto->pool), 0);
|
||||
|
||||
channel_request_partial_reload(c, cir);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_roa_out_reload_done(struct channel_feeding_request *req)
|
||||
{
|
||||
rfree(req->trie->lp);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -363,31 +476,38 @@ channel_roa_out_changed(struct settle *se)
|
||||
|
||||
CD(c, "Feeding triggered by RPKI change");
|
||||
|
||||
/* Refeed already pending */
|
||||
if ((s->cfr[0].state == CFRS_PENDING) || (s->cfr[1].state == CFRS_PENDING))
|
||||
return;
|
||||
/* Setup feeding request */
|
||||
struct channel_feeding_request *cfr = lp_alloc(s->trie->lp, sizeof *cfr);
|
||||
*cfr = (struct channel_feeding_request) {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.trie = s->trie,
|
||||
.done = channel_roa_out_reload_done,
|
||||
};
|
||||
|
||||
/* First refeed inactive */
|
||||
if (s->cfr[0].state == CFRS_INACTIVE)
|
||||
{
|
||||
s->cfr[0].type = CFRT_AUXILIARY;
|
||||
channel_request_feeding(c, &s->cfr[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Second refeed MUST be inactive */
|
||||
ASSERT_DIE(s->cfr[1].state == CFRS_INACTIVE);
|
||||
s->cfr[1].type = CFRT_AUXILIARY;
|
||||
channel_request_feeding(c, &s->cfr[1]);
|
||||
}
|
||||
/* Prepare new trie */
|
||||
s->trie = f_new_trie(lp_new(c->proto->pool), 0);
|
||||
|
||||
/* Actually request the feed */
|
||||
channel_request_feeding(c, cfr);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_export_one_roa(struct rt_export_request *req, const net_addr *net UNUSED, struct rt_pending_export *first)
|
||||
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
|
||||
{
|
||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
||||
|
||||
/* TODO: use the information about what roa has changed */
|
||||
switch (net->type)
|
||||
{
|
||||
case NET_ROA4:
|
||||
trie_add_prefix(s->trie, net, net_pxlen(net), 32);
|
||||
break;
|
||||
case NET_ROA6:
|
||||
trie_add_prefix(s->trie, net, net_pxlen(net), 128);
|
||||
break;
|
||||
default:
|
||||
bug("ROA table sent us a non-roa export");
|
||||
}
|
||||
|
||||
settle_kick(&s->settle, s->c->proto->loop);
|
||||
|
||||
rpe_mark_seen_all(req->hook, first, NULL, NULL);
|
||||
@ -434,6 +554,7 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
|
||||
*s = (struct roa_subscription) {
|
||||
.settle = SETTLE_INIT(&c->roa_settle, dir ? channel_roa_in_changed : channel_roa_out_changed, NULL),
|
||||
.c = c,
|
||||
.trie = f_new_trie(lp_new(c->proto->pool), 0),
|
||||
.req = {
|
||||
.name = mb_sprintf(c->proto->pool, "%s.%s.roa-%s.%s",
|
||||
c->proto->name, c->name, dir ? "in" : "out", tab->name),
|
||||
@ -464,6 +585,7 @@ channel_roa_unsubscribed(struct rt_export_request *req)
|
||||
static void
|
||||
channel_roa_unsubscribe(struct roa_subscription *s)
|
||||
{
|
||||
rfree(s->trie->lp);
|
||||
rt_stop_export(&s->req, channel_roa_unsubscribed);
|
||||
settle_cancel(&s->settle);
|
||||
}
|
||||
@ -571,8 +693,10 @@ channel_start_export(struct channel *c)
|
||||
.list = proto_work_list(c->proto),
|
||||
.pool = p,
|
||||
.feed_block_size = c->feed_block_size,
|
||||
.addr = c->out_subprefix,
|
||||
.addr_mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
|
||||
.prefilter = {
|
||||
.mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
|
||||
.addr = c->out_subprefix,
|
||||
},
|
||||
.trace_routes = c->debug | c->proto->debug,
|
||||
.dump_req = channel_dump_export_req,
|
||||
.log_state_change = channel_export_log_state_change,
|
||||
@ -694,6 +818,8 @@ channel_refeed_stopped(struct rt_export_request *req)
|
||||
static void
|
||||
channel_init_feeding(struct channel *c)
|
||||
{
|
||||
int no_trie = 0;
|
||||
|
||||
for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
|
||||
if (cfrp->type == CFRT_DIRECT)
|
||||
{
|
||||
@ -701,14 +827,66 @@ channel_init_feeding(struct channel *c)
|
||||
channel_stop_export(c);
|
||||
return;
|
||||
}
|
||||
else if (!cfrp->trie)
|
||||
no_trie = 1;
|
||||
|
||||
/* No direct feeding, running auxiliary refeed. */
|
||||
c->refeeding = c->refeed_pending;
|
||||
c->refeed_pending = NULL;
|
||||
c->refeed_trie = f_new_trie(lp_new(c->proto->pool), 0);
|
||||
|
||||
if (no_trie)
|
||||
{
|
||||
c->refeed_req.prefilter.mode = TE_ADDR_NONE;
|
||||
c->refeed_req.prefilter.hook = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
c->refeed_req.prefilter.mode = TE_ADDR_HOOK;
|
||||
c->refeed_req.prefilter.hook = channel_refeed_prefilter;
|
||||
}
|
||||
|
||||
rt_request_export(c->table, &c->refeed_req);
|
||||
}
|
||||
|
||||
static int
|
||||
channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
||||
{
|
||||
const struct channel *c =
|
||||
SKIP_BACK(struct channel, refeed_req,
|
||||
SKIP_BACK(struct rt_export_request, prefilter, p)
|
||||
);
|
||||
|
||||
ASSERT_DIE(c->refeeding);
|
||||
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
||||
if (!cfr->trie || trie_match_net(cfr->trie, n))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n)
|
||||
{
|
||||
for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
|
||||
{
|
||||
if (!cir->trie || trie_match_net(cir->trie, n))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
||||
{
|
||||
const struct channel *c =
|
||||
SKIP_BACK(struct channel, reload_req,
|
||||
SKIP_BACK(struct rt_export_request, prefilter, p)
|
||||
);
|
||||
ASSERT_DIE(c->importing);
|
||||
|
||||
return channel_import_request_prefilter(c->importing, n);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_feed_end(struct channel *c)
|
||||
{
|
||||
@ -742,8 +920,7 @@ channel_feed_end(struct channel *c)
|
||||
for (struct channel_feeding_request *cfr = c->refeeding, *next = cfr ? cfr->next : NULL;
|
||||
cfr;
|
||||
(cfr = next), (next = next ? next->next : NULL))
|
||||
if (cfr->flags & CFRF_DYNAMIC)
|
||||
mb_free(cfr);
|
||||
CALL(cfr->done, cfr);
|
||||
|
||||
/* Drop the refeed batch */
|
||||
c->refeeding = NULL;
|
||||
@ -760,9 +937,15 @@ channel_feed_end(struct channel *c)
|
||||
|
||||
/* Called by protocol for reload from in_table */
|
||||
void
|
||||
channel_schedule_reload(struct channel *c)
|
||||
channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
|
||||
{
|
||||
ASSERT(c->in_req.hook);
|
||||
int no_trie = 0;
|
||||
if (cir)
|
||||
{
|
||||
cir->next = c->import_pending;
|
||||
c->import_pending = cir;
|
||||
}
|
||||
|
||||
if (c->reload_req.hook)
|
||||
{
|
||||
@ -771,7 +954,29 @@ channel_schedule_reload(struct channel *c)
|
||||
return;
|
||||
}
|
||||
|
||||
rt_refresh_begin(&c->in_req);
|
||||
/* If there is any full-reload request, we can disregard all partials */
|
||||
for (struct channel_import_request *last = cir; last && no_trie==0;)
|
||||
{
|
||||
if (!last->trie)
|
||||
no_trie = 1;
|
||||
last = last->next;
|
||||
}
|
||||
|
||||
/* activating pending imports */
|
||||
c->importing = c->import_pending;
|
||||
c->import_pending = NULL;
|
||||
|
||||
if (no_trie)
|
||||
{
|
||||
c->reload_req.prefilter.mode = TE_ADDR_NONE;
|
||||
c->reload_req.prefilter.hook = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
c->reload_req.prefilter.mode = TE_ADDR_HOOK;
|
||||
c->reload_req.prefilter.hook = channel_import_prefilter;
|
||||
}
|
||||
|
||||
rt_request_export(c->table, &c->reload_req);
|
||||
}
|
||||
|
||||
@ -982,7 +1187,11 @@ channel_set_state(struct channel *c, uint state)
|
||||
void
|
||||
channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
|
||||
{
|
||||
ASSERT(c->out_req.hook);
|
||||
ASSERT_DIE(c->out_req.hook);
|
||||
|
||||
CD(c, "Feeding requested (%s)",
|
||||
cfr->type == CFRT_DIRECT ? "direct" :
|
||||
(cfr->trie ? "partial" : "auxiliary"));
|
||||
|
||||
/* Enqueue the request */
|
||||
cfr->next = c->refeed_pending;
|
||||
@ -993,12 +1202,21 @@ channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
|
||||
channel_init_feeding(c);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_feeding_request_done_dynamic(struct channel_feeding_request *req)
|
||||
{
|
||||
mb_free(req);
|
||||
}
|
||||
|
||||
void
|
||||
channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type type)
|
||||
{
|
||||
struct channel_feeding_request *req = mb_allocz(c->proto->pool, sizeof *req);
|
||||
req->type = type;
|
||||
req->flags |= CFRF_DYNAMIC;
|
||||
struct channel_feeding_request *req = mb_alloc(c->proto->pool, sizeof *req);
|
||||
*req = (struct channel_feeding_request) {
|
||||
.type = type,
|
||||
.done = channel_feeding_request_done_dynamic,
|
||||
};
|
||||
|
||||
channel_request_feeding(c, req);
|
||||
}
|
||||
|
||||
@ -1012,6 +1230,12 @@ channel_stop_export(struct channel *c)
|
||||
rt_stop_export(&c->out_req, channel_export_stopped);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_import_request_done_dynamic(struct channel_import_request *req)
|
||||
{
|
||||
mb_free(req);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_request_reload(struct channel *c)
|
||||
{
|
||||
@ -1019,11 +1243,28 @@ channel_request_reload(struct channel *c)
|
||||
ASSERT(channel_reloadable(c));
|
||||
|
||||
CD(c, "Reload requested");
|
||||
struct channel_import_request* cir = mb_alloc(c->proto->pool, sizeof *cir);
|
||||
cir->trie = NULL;
|
||||
cir->done = channel_import_request_done_dynamic;
|
||||
|
||||
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
||||
channel_schedule_reload(c);
|
||||
else
|
||||
c->proto->reload_routes(c);
|
||||
channel_schedule_reload(c, cir);
|
||||
else if (! c->proto->reload_routes(c, cir))
|
||||
bug("Channel %s.%s refused full import reload.", c->proto->name, c->name);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_request_partial_reload(struct channel *c, struct channel_import_request *cir)
|
||||
{
|
||||
ASSERT(c->in_req.hook);
|
||||
ASSERT(channel_reloadable(c));
|
||||
|
||||
CD(c, "Partial import reload requested");
|
||||
|
||||
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
||||
channel_schedule_reload(c, cir);
|
||||
else if (! c->proto->reload_routes(c, cir))
|
||||
cli_msg(-15, "%s.%s: partial reload refused, please run full reload instead", c->proto->name, c->name);
|
||||
}
|
||||
|
||||
const struct channel_class channel_basic = {
|
||||
@ -1150,7 +1391,7 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
|
||||
// c->ra_mode = cf->ra_mode;
|
||||
c->merge_limit = cf->merge_limit;
|
||||
c->preference = cf->preference;
|
||||
c->out_req.addr = c->out_subprefix = cf->out_subprefix;
|
||||
c->out_req.prefilter.addr = c->out_subprefix = cf->out_subprefix;
|
||||
c->debug = cf->debug;
|
||||
c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug;
|
||||
c->rpki_reload = cf->rpki_reload;
|
||||
@ -2622,11 +2863,37 @@ proto_cmd_restart(struct proto *p, uintptr_t arg, int cnt UNUSED)
|
||||
cli_msg(-12, "%s: restarted", p->name);
|
||||
}
|
||||
|
||||
void
|
||||
proto_cmd_reload(struct proto *p, uintptr_t dir, int cnt UNUSED)
|
||||
{
|
||||
struct channel *c;
|
||||
struct channel_cmd_reload_feeding_request {
|
||||
struct channel_feeding_request cfr;
|
||||
struct proto_reload_request *prr;
|
||||
};
|
||||
|
||||
struct channel_cmd_reload_import_request {
|
||||
struct channel_import_request cir;
|
||||
struct proto_reload_request *prr;
|
||||
};
|
||||
|
||||
static void
|
||||
channel_reload_out_done(struct channel_feeding_request *cfr)
|
||||
{
|
||||
struct channel_cmd_reload_feeding_request *ccrfr = SKIP_BACK(struct channel_cmd_reload_feeding_request, cfr, cfr);
|
||||
if (atomic_fetch_sub_explicit(&ccrfr->prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &ccrfr->prr->ev);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_reload_in_done(struct channel_import_request *cir)
|
||||
{
|
||||
struct channel_cmd_reload_import_request *ccrir = SKIP_BACK(struct channel_cmd_reload_import_request, cir, cir);
|
||||
if (atomic_fetch_sub_explicit(&ccrir->prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &ccrir->prr->ev);
|
||||
}
|
||||
|
||||
void
|
||||
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
|
||||
{
|
||||
struct proto_reload_request *prr = (void *) _prr;
|
||||
struct channel *c;
|
||||
if (p->disabled)
|
||||
{
|
||||
cli_msg(-8, "%s: already disabled", p->name);
|
||||
@ -2638,7 +2905,7 @@ proto_cmd_reload(struct proto *p, uintptr_t dir, int cnt UNUSED)
|
||||
return;
|
||||
|
||||
/* All channels must support reload */
|
||||
if (dir != CMD_RELOAD_OUT)
|
||||
if (prr->dir != CMD_RELOAD_OUT)
|
||||
WALK_LIST(c, p->channels)
|
||||
if ((c->channel_state == CS_UP) && !channel_reloadable(c))
|
||||
{
|
||||
@ -2649,16 +2916,56 @@ proto_cmd_reload(struct proto *p, uintptr_t dir, int cnt UNUSED)
|
||||
log(L_INFO "Reloading protocol %s", p->name);
|
||||
|
||||
/* re-importing routes */
|
||||
if (dir != CMD_RELOAD_OUT)
|
||||
if (prr->dir != CMD_RELOAD_OUT)
|
||||
WALK_LIST(c, p->channels)
|
||||
if (c->channel_state == CS_UP)
|
||||
channel_request_reload(c);
|
||||
{
|
||||
if (prr->trie)
|
||||
{
|
||||
/* Increase the refeed counter */
|
||||
atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
|
||||
ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
|
||||
|
||||
struct channel_cmd_reload_import_request *req = lp_alloc(prr->trie->lp, sizeof *req);
|
||||
*req = (struct channel_cmd_reload_import_request) {
|
||||
.cir = {
|
||||
.done = channel_reload_in_done,
|
||||
.trie = prr->trie,
|
||||
},
|
||||
.prr = prr,
|
||||
};
|
||||
channel_request_partial_reload(c, &req->cir);
|
||||
}
|
||||
else
|
||||
channel_request_reload(c);
|
||||
}
|
||||
|
||||
/* re-exporting routes */
|
||||
if (dir != CMD_RELOAD_IN)
|
||||
if (prr->dir != CMD_RELOAD_IN)
|
||||
WALK_LIST(c, p->channels)
|
||||
if (c->channel_state == CS_UP)
|
||||
channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
|
||||
if ((c->channel_state == CS_UP) && (c->out_req.hook))
|
||||
if (prr->trie)
|
||||
{
|
||||
/* Increase the refeed counter */
|
||||
atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
|
||||
ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
|
||||
|
||||
/* Request actually the feeding */
|
||||
|
||||
struct channel_cmd_reload_feeding_request *req = lp_alloc(prr->trie->lp, sizeof *req);
|
||||
*req = (struct channel_cmd_reload_feeding_request) {
|
||||
.cfr = {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.done = channel_reload_out_done,
|
||||
.trie = prr->trie,
|
||||
},
|
||||
.prr = prr,
|
||||
};
|
||||
|
||||
channel_request_feeding(c, &req->cfr);
|
||||
}
|
||||
else
|
||||
channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
|
||||
|
||||
cli_msg(-15, "%s: reloading", p->name);
|
||||
}
|
||||
|
@ -125,6 +125,12 @@ struct proto_config {
|
||||
/* Protocol-specific data follow... */
|
||||
};
|
||||
|
||||
struct channel_import_request {
|
||||
struct channel_import_request *next; /* Next in request chain */
|
||||
void (*done)(struct channel_import_request *); /* Called when import finishes */
|
||||
const struct f_trie *trie; /* Reload only matching nets */
|
||||
};
|
||||
|
||||
#define TLIST_PREFIX proto
|
||||
#define TLIST_TYPE struct proto
|
||||
#define TLIST_ITEM n
|
||||
@ -196,7 +202,7 @@ struct proto {
|
||||
|
||||
void (*rt_notify)(struct proto *, struct channel *, const net_addr *net, struct rte *new, const struct rte *old);
|
||||
int (*preexport)(struct channel *, struct rte *rt);
|
||||
void (*reload_routes)(struct channel *);
|
||||
int (*reload_routes)(struct channel *, struct channel_import_request *cir);
|
||||
void (*feed_begin)(struct channel *);
|
||||
void (*feed_end)(struct channel *);
|
||||
|
||||
@ -278,6 +284,14 @@ struct proto *proto_iterate_named(struct symbol *sym, struct protocol *proto, st
|
||||
|
||||
#define PROTO_WALK_CMD(sym,pr,p) for(struct proto *p = NULL; p = proto_iterate_named(sym, pr, p); )
|
||||
|
||||
/* Request from CLI to reload multiple protocols */
|
||||
struct proto_reload_request {
|
||||
const struct f_trie *trie; /* Trie to apply */
|
||||
_Atomic uint counter; /* How many channels remaining */
|
||||
uint dir; /* Direction of reload */
|
||||
event ev; /* Event to run when finished */
|
||||
};
|
||||
|
||||
#define PROTO_ENTER_FROM_MAIN(p) ({ \
|
||||
ASSERT_DIE(birdloop_inside(&main_birdloop)); \
|
||||
struct birdloop *_loop = (p)->loop; \
|
||||
@ -572,6 +586,8 @@ struct channel {
|
||||
struct f_trie *refeed_trie; /* Auxiliary refeed trie */
|
||||
struct channel_feeding_request *refeeding; /* Refeeding the channel */
|
||||
struct channel_feeding_request *refeed_pending; /* Scheduled refeeds */
|
||||
struct channel_import_request *importing; /* Importing the channel */
|
||||
struct channel_import_request *import_pending; /* Scheduled imports */
|
||||
|
||||
uint feed_block_size; /* How many routes to feed at once */
|
||||
|
||||
@ -669,26 +685,26 @@ void proto_remove_channel(struct proto *p, struct channel *c);
|
||||
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
|
||||
|
||||
void channel_set_state(struct channel *c, uint state);
|
||||
void channel_schedule_reload(struct channel *c);
|
||||
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
|
||||
int channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n);
|
||||
|
||||
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
|
||||
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
|
||||
static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); }
|
||||
|
||||
struct channel_feeding_request {
|
||||
struct channel_feeding_request *next;
|
||||
struct channel_feeding_request *next; /* Next in request chain */
|
||||
void (*done)(struct channel_feeding_request *); /* Called when refeed finishes */
|
||||
const struct f_trie *trie; /* Reload only matching nets */
|
||||
PACKED enum channel_feeding_request_type {
|
||||
CFRT_DIRECT = 1,
|
||||
CFRT_AUXILIARY,
|
||||
CFRT_DIRECT = 1, /* Refeed by export restart */
|
||||
CFRT_AUXILIARY, /* Refeed by auxiliary request */
|
||||
} type;
|
||||
PACKED enum {
|
||||
CFRS_INACTIVE = 0,
|
||||
CFRS_PENDING,
|
||||
CFRS_RUNNING,
|
||||
CFRS_INACTIVE = 0, /* Inactive request */
|
||||
CFRS_PENDING, /* Request enqueued, do not touch */
|
||||
CFRS_RUNNING, /* Request active, do not touch */
|
||||
} state;
|
||||
PACKED enum {
|
||||
CFRF_DYNAMIC = 1,
|
||||
} flags;
|
||||
};
|
||||
|
||||
struct channel *channel_from_export_request(struct rt_export_request *req);
|
||||
@ -696,12 +712,29 @@ void channel_request_feeding(struct channel *c, struct channel_feeding_request *
|
||||
void channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type);
|
||||
|
||||
static inline int channel_net_is_refeeding(struct channel *c, const net_addr *n)
|
||||
{ return (c->refeeding && c->refeed_trie && !trie_match_net(c->refeed_trie, n)); }
|
||||
{
|
||||
/* Not refeeding if not refeeding at all */
|
||||
if (!c->refeeding || !c->refeed_trie)
|
||||
return 0;
|
||||
|
||||
/* Not refeeding if already refed */
|
||||
if (trie_match_net(c->refeed_trie, n))
|
||||
return 0;
|
||||
|
||||
/* Refeeding if matching any request */
|
||||
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
||||
if (!cfr->trie || trie_match_net(cfr->trie, n))
|
||||
return 1;
|
||||
|
||||
/* Not matching any request */
|
||||
return 0;
|
||||
}
|
||||
static inline void channel_net_mark_refed(struct channel *c, const net_addr *n)
|
||||
{
|
||||
ASSERT_DIE(c->refeeding && c->refeed_trie);
|
||||
trie_add_prefix(c->refeed_trie, n, n->pxlen, n->pxlen);
|
||||
}
|
||||
|
||||
void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
|
||||
void *channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
|
||||
int channel_reconfigure(struct channel *c, struct channel_config *cf);
|
||||
|
45
nest/route.h
45
nest/route.h
@ -22,6 +22,8 @@
|
||||
#include "lib/io-loop.h"
|
||||
#include "lib/settle.h"
|
||||
|
||||
#include "filter/data.h"
|
||||
|
||||
#include <stdatomic.h>
|
||||
|
||||
struct ea_list;
|
||||
@ -251,6 +253,23 @@ struct rte_storage {
|
||||
|
||||
/* Table-channel connections */
|
||||
|
||||
struct rt_prefilter {
|
||||
union {
|
||||
const struct f_trie *trie;
|
||||
const net_addr *addr; /* Network prefilter address */
|
||||
int (*hook)(const struct rt_prefilter *, const net_addr *);
|
||||
};
|
||||
/* Network prefilter mode (TE_ADDR_*) */
|
||||
enum {
|
||||
TE_ADDR_NONE = 0, /* No address matching */
|
||||
TE_ADDR_EQUAL, /* Exact query - show route <addr> */
|
||||
TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
|
||||
TE_ADDR_IN, /* Interval query - show route in <addr> */
|
||||
TE_ADDR_TRIE, /* Query defined by trie */
|
||||
TE_ADDR_HOOK, /* Query processed by supplied custom hook */
|
||||
} mode;
|
||||
} PACKED;
|
||||
|
||||
struct rt_import_request {
|
||||
struct rt_import_hook *hook; /* The table part of importer */
|
||||
char *name;
|
||||
@ -300,11 +319,10 @@ struct rt_pending_export {
|
||||
|
||||
struct rt_export_request {
|
||||
struct rt_export_hook *hook; /* Table part of the export */
|
||||
const char *name;
|
||||
const net_addr *addr; /* Network prefilter address */
|
||||
char *name;
|
||||
u8 trace_routes;
|
||||
u8 addr_mode; /* Network prefilter mode (TE_ADDR_*) */
|
||||
uint feed_block_size; /* How many routes to feed at once */
|
||||
struct rt_prefilter prefilter;
|
||||
|
||||
event_list *list; /* Where to schedule export events */
|
||||
pool *pool; /* Pool to use for allocations */
|
||||
@ -327,6 +345,21 @@ struct rt_export_request {
|
||||
void (*log_state_change)(struct rt_export_request *req, u8);
|
||||
};
|
||||
|
||||
static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
|
||||
{
|
||||
switch (p->mode)
|
||||
{
|
||||
case TE_ADDR_NONE: return 1;
|
||||
case TE_ADDR_IN: return net_in_netX(n, p->addr);
|
||||
case TE_ADDR_EQUAL: return net_equal(n, p->addr);
|
||||
case TE_ADDR_FOR: return net_in_netX(p->addr, n);
|
||||
case TE_ADDR_TRIE: return trie_match_net(p->trie, n);
|
||||
case TE_ADDR_HOOK: return p->hook(p, n);
|
||||
}
|
||||
|
||||
bug("Crazy prefilter application attempt failed wildly.");
|
||||
}
|
||||
|
||||
struct rt_export_hook {
|
||||
node n;
|
||||
struct rt_exporter *table; /* The connected table */
|
||||
@ -396,12 +429,6 @@ struct rt_table_export_hook {
|
||||
#define TES_STOP 4
|
||||
#define TES_MAX 5
|
||||
|
||||
/* Value of addr_mode */
|
||||
#define TE_ADDR_NONE 0 /* No address matching */
|
||||
#define TE_ADDR_EQUAL 1 /* Exact query - show route <addr> */
|
||||
#define TE_ADDR_FOR 2 /* Longest prefix match - show route for <addr> */
|
||||
#define TE_ADDR_IN 3 /* Interval query - show route in <addr> */
|
||||
|
||||
|
||||
#define TFT_FIB 1
|
||||
#define TFT_TRIE 2
|
||||
|
@ -285,14 +285,14 @@ rt_show_cont(struct rt_show_data *d)
|
||||
}
|
||||
|
||||
d->req = (struct rt_export_request) {
|
||||
.addr = d->addr,
|
||||
.prefilter.addr = d->addr,
|
||||
.name = "CLI Show Route",
|
||||
.list = &global_work_list,
|
||||
.pool = c->pool,
|
||||
.export_bulk = rt_show_net_export_bulk,
|
||||
.dump_req = rt_show_dump_req,
|
||||
.log_state_change = rt_show_log_state_change,
|
||||
.addr_mode = d->addr_mode,
|
||||
.prefilter.mode = d->addr_mode,
|
||||
};
|
||||
|
||||
d->table_counter++;
|
||||
|
@ -1260,27 +1260,12 @@ rte_export(struct rt_table_export_hook *th, struct rt_pending_export *rpe)
|
||||
|
||||
const net_addr *n = rpe->new_best ? rpe->new_best->rte.net : rpe->old_best->rte.net;
|
||||
|
||||
switch (hook->req->addr_mode)
|
||||
{
|
||||
case TE_ADDR_NONE:
|
||||
break;
|
||||
/* Check export eligibility of this net */
|
||||
if (!rt_prefilter_net(&hook->req->prefilter, n))
|
||||
goto ignore;
|
||||
|
||||
case TE_ADDR_IN:
|
||||
if (!net_in_netX(n, hook->req->addr))
|
||||
goto ignore;
|
||||
break;
|
||||
|
||||
case TE_ADDR_EQUAL:
|
||||
if (!net_equal(n, hook->req->addr))
|
||||
goto ignore;
|
||||
break;
|
||||
|
||||
case TE_ADDR_FOR:
|
||||
bug("Continuos export of best prefix match not implemented yet.");
|
||||
|
||||
default:
|
||||
bug("Strange table export address mode: %d", hook->req->addr_mode);
|
||||
}
|
||||
if (hook->req->prefilter.mode == TE_ADDR_FOR)
|
||||
bug("Continuos export of best prefix match not implemented yet.");
|
||||
|
||||
if (rpe->new)
|
||||
hook->stats.updates_received++;
|
||||
@ -2231,20 +2216,22 @@ rt_table_export_start_feed(struct rtable_private *tab, struct rt_table_export_ho
|
||||
struct rt_export_request *req = hook->h.req;
|
||||
|
||||
/* stats zeroed by mb_allocz */
|
||||
switch (req->addr_mode)
|
||||
switch (req->prefilter.mode)
|
||||
{
|
||||
case TE_ADDR_IN:
|
||||
if (tab->trie && net_val_match(tab->addr_type, NB_IP))
|
||||
{
|
||||
hook->walk_state = mb_allocz(hook->h.pool, sizeof (struct f_trie_walk_state));
|
||||
hook->walk_lock = rt_lock_trie(tab);
|
||||
trie_walk_init(hook->walk_state, tab->trie, req->addr);
|
||||
trie_walk_init(hook->walk_state, tab->trie, req->prefilter.addr);
|
||||
hook->h.event.hook = rt_feed_by_trie;
|
||||
hook->walk_last.type = 0;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case TE_ADDR_NONE:
|
||||
case TE_ADDR_TRIE:
|
||||
case TE_ADDR_HOOK:
|
||||
FIB_ITERATE_INIT(&hook->feed_fit, &tab->fib);
|
||||
hook->h.event.hook = rt_feed_by_fib;
|
||||
break;
|
||||
@ -2330,7 +2317,7 @@ rt_table_export_stop_locked(struct rt_export_hook *hh)
|
||||
rt_trace(tab, D_EVENTS, "Stopping export hook %s must wait for uncorking", hook->h.req->name);
|
||||
return 0;
|
||||
case TES_FEEDING:
|
||||
switch (hh->req->addr_mode)
|
||||
switch (hh->req->prefilter.mode)
|
||||
{
|
||||
case TE_ADDR_IN:
|
||||
if (hook->walk_lock)
|
||||
@ -2343,8 +2330,13 @@ rt_table_export_stop_locked(struct rt_export_hook *hh)
|
||||
}
|
||||
/* fall through */
|
||||
case TE_ADDR_NONE:
|
||||
case TE_ADDR_HOOK:
|
||||
case TE_ADDR_TRIE:
|
||||
fit_get(&tab->fib, &hook->feed_fit);
|
||||
break;
|
||||
case TE_ADDR_EQUAL:
|
||||
case TE_ADDR_FOR:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -4425,7 +4417,7 @@ rt_feed_by_fib(void *data)
|
||||
|
||||
FIB_ITERATE_START(&tab->fib, fit, net, n)
|
||||
{
|
||||
if ((c->h.req->addr_mode == TE_ADDR_NONE) || net_in_netX(n->n.addr, c->h.req->addr))
|
||||
if (rt_prefilter_net(&c->h.req->prefilter, n->n.addr))
|
||||
{
|
||||
if (!rt_prepare_feed(c, n, &block))
|
||||
{
|
||||
@ -4436,6 +4428,8 @@ rt_feed_by_fib(void *data)
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
req_trace(c->h.req, D_ROUTES, "Feeding %N rejected by prefilter", n->n.addr);
|
||||
}
|
||||
FIB_ITERATE_END;
|
||||
}
|
||||
@ -4500,9 +4494,9 @@ rt_feed_equal(void *data)
|
||||
RT_LOCKED(RT_PUB(SKIP_BACK(struct rtable_private, exporter, c->table)), tab)
|
||||
{
|
||||
ASSERT_DIE(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_FEEDING);
|
||||
ASSERT_DIE(c->h.req->addr_mode == TE_ADDR_EQUAL);
|
||||
ASSERT_DIE(c->h.req->prefilter.mode == TE_ADDR_EQUAL);
|
||||
|
||||
if (n = net_find(tab, c->h.req->addr))
|
||||
if (n = net_find(tab, c->h.req->prefilter.addr))
|
||||
ASSERT_DIE(rt_prepare_feed(c, n, &block));
|
||||
}
|
||||
|
||||
@ -4522,9 +4516,9 @@ rt_feed_for(void *data)
|
||||
RT_LOCKED(RT_PUB(SKIP_BACK(struct rtable_private, exporter, c->table)), tab)
|
||||
{
|
||||
ASSERT_DIE(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_FEEDING);
|
||||
ASSERT_DIE(c->h.req->addr_mode == TE_ADDR_FOR);
|
||||
ASSERT_DIE(c->h.req->prefilter.mode == TE_ADDR_FOR);
|
||||
|
||||
if (n = net_route(tab, c->h.req->addr))
|
||||
if (n = net_route(tab, c->h.req->prefilter.addr))
|
||||
ASSERT_DIE(rt_prepare_feed(c, n, &block));
|
||||
}
|
||||
|
||||
|
@ -1910,16 +1910,18 @@ bgp_out_table_feed(void *data)
|
||||
|
||||
int max = 512;
|
||||
|
||||
const net_addr *neq = (hook->h.req->addr_mode == TE_ADDR_EQUAL) ? hook->h.req->addr : NULL;
|
||||
const net_addr *neq = (hook->h.req->prefilter.mode == TE_ADDR_EQUAL) ? hook->h.req->prefilter.addr : NULL;
|
||||
const net_addr *cand = NULL;
|
||||
|
||||
do {
|
||||
HASH_WALK_ITER(c->prefix_hash, PXH, n, hook->hash_iter)
|
||||
{
|
||||
switch (hook->h.req->addr_mode)
|
||||
switch (hook->h.req->prefilter.mode)
|
||||
{
|
||||
case TE_ADDR_HOOK:
|
||||
case TE_ADDR_TRIE:
|
||||
case TE_ADDR_IN:
|
||||
if (!net_in_netX(n->net, hook->h.req->addr))
|
||||
if (!rt_prefilter_net(&hook->h.req->prefilter, n->net))
|
||||
continue;
|
||||
/* fall through */
|
||||
case TE_ADDR_NONE:
|
||||
@ -1931,7 +1933,7 @@ bgp_out_table_feed(void *data)
|
||||
case TE_ADDR_FOR:
|
||||
if (!neq)
|
||||
{
|
||||
if (net_in_netX(hook->h.req->addr, n->net) && (!cand || (n->net->length > cand->length)))
|
||||
if (net_in_netX(hook->h.req->prefilter.addr, n->net) && (!cand || (n->net->length > cand->length)))
|
||||
cand = n->net;
|
||||
continue;
|
||||
}
|
||||
|
@ -1584,18 +1584,30 @@ bgp_update_bfd(struct bgp_proto *p, const struct bfd_options *bfd)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_reload_routes(struct channel *C)
|
||||
static int
|
||||
bgp_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct bgp_proto *p = (void *) C->proto;
|
||||
struct bgp_channel *c = (void *) C;
|
||||
|
||||
/* Ignore non-BGP channels */
|
||||
if (C->class != &channel_bgp)
|
||||
return;
|
||||
{
|
||||
cir->done(cir);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (cir->trie)
|
||||
{
|
||||
cir->done(cir);
|
||||
return 0;
|
||||
}
|
||||
/* We do not need cir anymore and later we will not be able to detect when to free it. */
|
||||
cir->done(cir);
|
||||
|
||||
ASSERT(p->conn && p->route_refresh);
|
||||
bgp_schedule_packet(p->conn, c, PKT_ROUTE_REFRESH);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -109,7 +109,7 @@
|
||||
#include "lib/macro.h"
|
||||
|
||||
static int ospf_preexport(struct channel *C, rte *new);
|
||||
static void ospf_reload_routes(struct channel *C);
|
||||
static int ospf_reload_routes(struct channel *C, struct channel_import_request *cir);
|
||||
static int ospf_rte_better(const rte *new, const rte *old);
|
||||
static u32 ospf_rte_igp_metric(const rte *rt);
|
||||
static void ospf_disp(timer *timer);
|
||||
@ -432,16 +432,19 @@ ospf_schedule_rtcalc(struct ospf_proto *p)
|
||||
p->calcrt = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
ospf_reload_routes(struct channel *C)
|
||||
static int
|
||||
ospf_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct ospf_proto *p = (struct ospf_proto *) C->proto;
|
||||
cir->next = p->cir;
|
||||
p->cir = cir;
|
||||
|
||||
if (p->calcrt == 2)
|
||||
return;
|
||||
return 1;
|
||||
|
||||
OSPF_TRACE(D_EVENTS, "Scheduling routing table calculation with route reload");
|
||||
p->calcrt = 2;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -219,6 +219,7 @@ struct ospf_proto
|
||||
slist lsal; /* List of all LSA's */
|
||||
int calcrt; /* Routing table calculation scheduled?
|
||||
0=no, 1=normal, 2=forced reload */
|
||||
struct channel_import_request *cir; /* Struct with trie for partial reload */
|
||||
list iface_list; /* List of OSPF interfaces (struct ospf_iface) */
|
||||
list area_list; /* List of OSPF areas (struct ospf_area) */
|
||||
int areano; /* Number of area I belong to */
|
||||
|
@ -1700,7 +1700,8 @@ ospf_rt_spf(struct ospf_proto *p)
|
||||
rt_sync(p);
|
||||
lp_flush(p->nhpool);
|
||||
|
||||
p->calcrt = 0;
|
||||
if (p->cir == NULL) /* If there is no more cir waiting for reload */
|
||||
p->calcrt = 0;
|
||||
}
|
||||
|
||||
|
||||
@ -2020,11 +2021,16 @@ rt_sync(struct ospf_proto *p)
|
||||
|
||||
OSPF_TRACE(D_EVENTS, "Starting routing table synchronization");
|
||||
|
||||
struct channel_import_request *cir = p->cir;
|
||||
p->cir = NULL;
|
||||
|
||||
DBG("Now syncing my rt table with nest's\n");
|
||||
FIB_ITERATE_INIT(&fit, fib);
|
||||
again1:
|
||||
FIB_ITERATE_START(fib, &fit, ort, nf)
|
||||
{
|
||||
if (cir && !channel_import_request_prefilter(cir, nf->fn.addr))
|
||||
continue;
|
||||
/* Sanity check of next-hop addresses, failure should not happen */
|
||||
if (nf->n.type && nf->n.nhs)
|
||||
{
|
||||
@ -2099,7 +2105,6 @@ again1:
|
||||
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
|
||||
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
|
||||
*/
|
||||
|
||||
rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
|
||||
}
|
||||
}
|
||||
@ -2125,6 +2130,13 @@ again1:
|
||||
}
|
||||
FIB_ITERATE_END;
|
||||
|
||||
while(cir)
|
||||
{
|
||||
struct channel_import_request *next = cir->next;
|
||||
cir->done(cir);
|
||||
cir = next;
|
||||
}
|
||||
|
||||
WALK_LIST(oa, p->area_list)
|
||||
{
|
||||
/* Cleanup ASBR hash tables */
|
||||
|
@ -96,13 +96,37 @@ pipe_preexport(struct channel *C, rte *e)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
pipe_reload_routes(struct channel *C)
|
||||
void
|
||||
pipe_import_by_refeed_free(struct channel_feeding_request *cfr)
|
||||
{
|
||||
struct import_to_export_reload *reload = SKIP_BACK(struct import_to_export_reload, cfr, cfr);
|
||||
reload->cir->done(reload->cir);
|
||||
}
|
||||
|
||||
static int
|
||||
pipe_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct pipe_proto *p = (void *) C->proto;
|
||||
|
||||
/* Route reload on one channel is just refeed on the other */
|
||||
channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
|
||||
if (cir->trie)
|
||||
{
|
||||
struct import_to_export_reload *reload = lp_alloc(cir->trie->lp, sizeof *reload);
|
||||
*reload = (struct import_to_export_reload) {
|
||||
.cir = cir,
|
||||
.cfr = {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.done = pipe_import_by_refeed_free,
|
||||
.trie = cir->trie,
|
||||
},
|
||||
};
|
||||
channel_request_feeding((C == p->pri) ? p->sec : p->pri, &reload->cfr);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Route reload on one channel is just refeed on the other */
|
||||
channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
|
||||
cir->done(cir);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -28,3 +28,8 @@ struct pipe_proto {
|
||||
#define PIPE_FL_RR_BEGIN_PENDING 1 /* Route refresh should start with the first route notified */
|
||||
|
||||
#endif
|
||||
|
||||
struct import_to_export_reload {
|
||||
struct channel_import_request *cir; /* We can not free this struct before reload finishes. */
|
||||
struct channel_feeding_request cfr; /* New request we actually need - import was changed to feed the other side. */
|
||||
};
|
||||
|
@ -968,6 +968,9 @@ rip_timer(timer *t)
|
||||
|
||||
FIB_ITERATE_INIT(&fit, &p->rtable);
|
||||
|
||||
struct channel_import_request *cir = p->cir;
|
||||
p->cir = NULL;
|
||||
|
||||
loop:
|
||||
FIB_ITERATE_START(&p->rtable, &fit, struct rip_entry, en)
|
||||
{
|
||||
@ -989,14 +992,13 @@ rip_timer(timer *t)
|
||||
}
|
||||
|
||||
/* Propagating eventual change */
|
||||
if (changed || p->rt_reload)
|
||||
if ((changed || p->rt_reload) && (cir == NULL || channel_import_request_prefilter(cir, en->n.addr)))
|
||||
{
|
||||
/*
|
||||
* We have to restart the iteration because there may be a cascade of
|
||||
* synchronous events rip_announce_rte() -> nest table change ->
|
||||
* rip_rt_notify() -> p->rtable change, invalidating hidden variables.
|
||||
*/
|
||||
|
||||
FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
|
||||
rip_announce_rte(p, en);
|
||||
goto loop;
|
||||
@ -1047,7 +1049,19 @@ rip_timer(timer *t)
|
||||
}
|
||||
}
|
||||
|
||||
tm_start(p->timer, MAX(next - now_, 100 MS));
|
||||
while(cir)
|
||||
{
|
||||
struct channel_import_request *next_cir = cir->next;
|
||||
cir->done(cir);
|
||||
cir = next_cir;
|
||||
}
|
||||
if (p->cir)
|
||||
{
|
||||
p->rt_reload = 1;
|
||||
rip_kick_timer(p);
|
||||
}
|
||||
else
|
||||
tm_start(p->timer, MAX(next - now_, 100 MS));
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -1148,17 +1162,21 @@ rip_trigger_update(struct rip_proto *p)
|
||||
* RIP protocol glue
|
||||
*/
|
||||
|
||||
static void
|
||||
rip_reload_routes(struct channel *C)
|
||||
static int
|
||||
rip_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct rip_proto *p = (struct rip_proto *) C->proto;
|
||||
|
||||
cir->next = p->cir;
|
||||
p->cir = cir;
|
||||
|
||||
if (p->rt_reload)
|
||||
return;
|
||||
return 1;
|
||||
|
||||
TRACE(D_EVENTS, "Scheduling route reload");
|
||||
p->rt_reload = 1;
|
||||
rip_kick_timer(p);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct rte_owner_class rip_rte_owner_class;
|
||||
|
@ -103,6 +103,7 @@ struct rip_proto
|
||||
|
||||
struct tbf log_pkt_tbf; /* TBF for packet messages */
|
||||
struct tbf log_rte_tbf; /* TBF for RTE messages */
|
||||
struct channel_import_request *cir; /* Trie for partial reload */
|
||||
};
|
||||
|
||||
struct rip_iface
|
||||
|
@ -172,6 +172,25 @@ static_mark_all(struct static_proto *p)
|
||||
ev_schedule(p->event);
|
||||
}
|
||||
|
||||
static void
|
||||
static_mark_partial(struct static_proto *p, struct channel_import_request *cir)
|
||||
{
|
||||
struct static_config *cf = (void *) p->p.cf;
|
||||
struct static_route *r;
|
||||
|
||||
WALK_LIST(r, cf->routes)
|
||||
if (r->state == SRS_CLEAN && trie_match_net(cir->trie, r->net))
|
||||
{
|
||||
r->state = SRS_DIRTY;
|
||||
BUFFER_PUSH(p->marked) = r;
|
||||
}
|
||||
|
||||
if (!ev_active(p->event))
|
||||
ev_schedule(p->event);
|
||||
|
||||
cir->done(cir);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static_announce_marked(void *P)
|
||||
@ -395,14 +414,19 @@ static_bfd_notify(struct bfd_request *req)
|
||||
static_mark_rte(p, r->mp_head);
|
||||
}
|
||||
|
||||
static void
|
||||
static_reload_routes(struct channel *C)
|
||||
static int
|
||||
static_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct static_proto *p = (void *) C->proto;
|
||||
|
||||
TRACE(D_EVENTS, "Scheduling route reload");
|
||||
|
||||
static_mark_all(p);
|
||||
if (cir->trie)
|
||||
static_mark_partial(p, cir);
|
||||
else
|
||||
static_mark_all(p);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -768,11 +768,18 @@ krt_if_notify(struct proto *P, uint flags, struct iface *iface UNUSED)
|
||||
krt_scan_timer_kick(p);
|
||||
}
|
||||
|
||||
static void
|
||||
krt_reload_routes(struct channel *C)
|
||||
static int
|
||||
krt_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct krt_proto *p = (void *) C->proto;
|
||||
|
||||
|
||||
if (cir->trie)
|
||||
{
|
||||
cir->done(cir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Although we keep learned routes in krt_table, we rather schedule a scan */
|
||||
|
||||
if (KRT_CF->learn)
|
||||
@ -780,6 +787,9 @@ krt_reload_routes(struct channel *C)
|
||||
p->reload = 1;
|
||||
krt_scan_timer_kick(p);
|
||||
}
|
||||
|
||||
cir->done(cir);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void krt_cleanup(struct krt_proto *p);
|
||||
|
Loading…
Reference in New Issue
Block a user