mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-22 09:41:54 +00:00
Automatic ROA reloads on channel import
This includes updating OSPF, Pipe and RIP to enable partial route reload directly from the protocols' internal tables.
This commit is contained in:
parent
4e9725e825
commit
9680bf68e4
@ -81,6 +81,7 @@ CF_DECLS
|
||||
const struct filter *f;
|
||||
struct f_tree *e;
|
||||
struct f_trie *trie;
|
||||
const struct f_trie *const_trie;
|
||||
struct f_val v;
|
||||
struct password_item *p;
|
||||
struct rt_show_data *ra;
|
||||
|
@ -979,7 +979,8 @@ inherited from templates can be updated by new definitions.
|
||||
<cf/roa_check()/ operator). In contrast to to other filter operators,
|
||||
this status for the same route may change as the content of ROA tables
|
||||
changes. When this option is active, BIRD activates automatic reload of
|
||||
affected channels whenever ROA tables are updated (after a short settle
|
||||
the appropriate subset of prefixes imported or exported by the channels
|
||||
whenever ROA tables are updated (after a short settle
|
||||
time). When disabled, route reloads have to be requested manually. The
|
||||
option is ignored if <cf/roa_check()/ is not used in channel filters.
|
||||
Note that for BGP channels, automatic reload requires
|
||||
@ -1263,7 +1264,7 @@ This argument can be omitted if there exists only a single instance.
|
||||
Enable, disable or restart a given protocol instance, instances matching
|
||||
the <cf><m/pattern/</cf> or <cf/all/ instances.
|
||||
|
||||
<tag><label id="cli-reload">reload [in|out] <m/name/|"<m/pattern/"|all</tag>
|
||||
<tag><label id="cli-reload">reload [in|out] (<m/name/|"<m/pattern/"|all) [partial prefix] </tag>
|
||||
Reload a given protocol instance, that means re-import routes from the
|
||||
protocol instance and re-export preferred routes to the instance. If
|
||||
<cf/in/ or <cf/out/ options are used, the command is restricted to one
|
||||
@ -1274,6 +1275,9 @@ This argument can be omitted if there exists only a single instance.
|
||||
propagates the old set of routes. For example when <cf/configure soft/
|
||||
command was used to change filters.
|
||||
|
||||
If <cf/partial prefix/ option is used, only corresponding routes are reloaded.
|
||||
Protocol BGP does partial reload only if it has import table enabled, otherwise partial reload for BGP is refused.
|
||||
|
||||
Re-export always succeeds, but re-import is protocol-dependent and might
|
||||
fail (for example, if BGP neighbor does not support route-refresh
|
||||
extension). In that case, re-export is also skipped. Note that for the
|
||||
|
@ -107,6 +107,44 @@ proto_postconfig(void)
|
||||
this_proto = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
channel_reload_out_done_main(void *_prr)
|
||||
{
|
||||
struct proto_reload_request *prr = _prr;
|
||||
ASSERT_THE_BIRD_LOCKED;
|
||||
|
||||
rfree(prr->trie->lp);
|
||||
}
|
||||
|
||||
static inline void
|
||||
proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
|
||||
{
|
||||
struct proto_reload_request *prr = cfg_alloc(sizeof *prr);
|
||||
*prr = (struct proto_reload_request) {
|
||||
.trie = trie,
|
||||
.dir = dir,
|
||||
.counter = 1,
|
||||
};
|
||||
if (trie)
|
||||
{
|
||||
/* CLI linpool is moved to trie, because trie is need for longer time
|
||||
* than the linpool would exist in CLI. The linpool is freed in channel_reload_out_done_main.
|
||||
*/
|
||||
ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
|
||||
rmove(this_cli->parser_pool, &root_pool);
|
||||
this_cli->parser_pool = lp_new(this_cli->pool);
|
||||
prr->ev = (event) {
|
||||
.data = prr,
|
||||
.hook = channel_reload_out_done_main,
|
||||
};
|
||||
}
|
||||
|
||||
proto_apply_cmd(ps, proto_cmd_reload, 1, (uintptr_t) prr);
|
||||
/* This function used reference to this trie, so it is freed here as well as in protocols*/
|
||||
if (trie)
|
||||
if (atomic_fetch_sub_explicit(&prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &prr->ev);
|
||||
}
|
||||
|
||||
#define DIRECT_CFG ((struct rt_dev_config *) this_proto)
|
||||
|
||||
@ -120,7 +158,7 @@ CF_KEYWORDS(PASSWORD, KEY, FROM, PASSIVE, TO, ID, EVENTS, PACKETS, PROTOCOLS, CH
|
||||
CF_KEYWORDS(ALGORITHM, KEYED, HMAC, MD5, SHA1, SHA256, SHA384, SHA512, BLAKE2S128, BLAKE2S256, BLAKE2B256, BLAKE2B512)
|
||||
CF_KEYWORDS(PRIMARY, STATS, COUNT, FOR, IN, COMMANDS, PREEXPORT, NOEXPORT, EXPORTED, GENERATE)
|
||||
CF_KEYWORDS(BGP, PASSWORDS, DESCRIPTION)
|
||||
CF_KEYWORDS(RELOAD, IN, OUT, MRTDUMP, MESSAGES, RESTRICT, MEMORY, CLASS, DSCP)
|
||||
CF_KEYWORDS(RELOAD, IN, OUT, MRTDUMP, MESSAGES, RESTRICT, MEMORY, CLASS, DSCP, PARTIAL)
|
||||
CF_KEYWORDS(TIMEFORMAT, ISO, SHORT, LONG, ROUTE, PROTOCOL, BASE, LOG, S, MS, US)
|
||||
CF_KEYWORDS(GRACEFUL, RESTART, WAIT, MAX, AS)
|
||||
CF_KEYWORDS(MIN, IDLE, RX, TX, INTERVAL, MULTIPLIER, PASSIVE)
|
||||
@ -151,6 +189,7 @@ CF_ENUM_PX(T_ENUM_AF, AF_, AFI_, IPV4, IPV6)
|
||||
%type <net_ptr> r_args_for
|
||||
%type <t> channel_sym
|
||||
%type <c> channel_arg
|
||||
%type <const_trie> partial_opt
|
||||
|
||||
CF_GRAMMAR
|
||||
|
||||
@ -892,18 +931,28 @@ CF_CLI(DUMP FILTER ALL,,, [[Dump all filters in linearized form]])
|
||||
CF_CLI(EVAL, term, <expr>, [[Evaluate an expression]])
|
||||
{ cmd_eval(f_linearize($2, 1)); } ;
|
||||
|
||||
partial_opt:
|
||||
PARTIAL term {
|
||||
struct f_val val;
|
||||
if (f_eval(f_linearize($2, 1), &val) > F_RETURN) cf_error("Runtime error");
|
||||
if (val.type != T_PREFIX_SET) cf_error("Partial spec must be prefix set");
|
||||
$$ = val.val.ti;
|
||||
}
|
||||
| /* empty */ { $$ = NULL; }
|
||||
;
|
||||
|
||||
CF_CLI(DISABLE, proto_patt opttext, (<protocol> | \"<pattern>\" | all) [message], [[Disable protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_disable, 1, (uintptr_t) $3); } ;
|
||||
CF_CLI(ENABLE, proto_patt opttext, (<protocol> | \"<pattern>\" | all) [message], [[Enable protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_enable, 1, (uintptr_t) $3); } ;
|
||||
CF_CLI(RESTART, proto_patt opttext, (<protocol> | \"<pattern>\" | all) [message], [[Restart protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_restart, 1, (uintptr_t) $3); } ;
|
||||
CF_CLI(RELOAD, proto_patt, <protocol> | \"<pattern>\" | all, [[Reload protocol]])
|
||||
{ proto_apply_cmd($2, proto_cmd_reload, 1, CMD_RELOAD); } ;
|
||||
CF_CLI(RELOAD IN, proto_patt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just imported routes)]])
|
||||
{ proto_apply_cmd($3, proto_cmd_reload, 1, CMD_RELOAD_IN); } ;
|
||||
CF_CLI(RELOAD OUT, proto_patt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just exported routes)]])
|
||||
{ proto_apply_cmd($3, proto_cmd_reload, 1, CMD_RELOAD_OUT); } ;
|
||||
CF_CLI(RELOAD, proto_patt partial_opt, (<protocol> | \"<pattern>\" | all) [partial <prefix set>], [[Reload protocol]])
|
||||
{ proto_call_cmd_reload($2, CMD_RELOAD, $3); } ;
|
||||
CF_CLI(RELOAD IN, proto_patt partial_opt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just imported routes)]])
|
||||
{ proto_call_cmd_reload($3, CMD_RELOAD_IN, $4); } ;
|
||||
CF_CLI(RELOAD OUT, proto_patt partial_opt, <protocol> | \"<pattern>\" | all, [[Reload protocol (just exported routes)]])
|
||||
{ proto_call_cmd_reload($3, CMD_RELOAD_OUT, $4); } ;
|
||||
|
||||
CF_CLI_HELP(DEBUG, ..., [[Control protocol debugging via BIRD logs]])
|
||||
CF_CLI(DEBUG, debug_args, (<protocol> | <channel> | \"<pattern>\" | all) (all | off | { states|routes|filters|interfaces|events|packets [, ...] }), [[Control protocol debugging via BIRD logs]])
|
||||
|
310
nest/proto.c
310
nest/proto.c
@ -52,11 +52,14 @@ static void channel_init_limit(struct channel *c, struct limit *l, int dir, stru
|
||||
static void channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
|
||||
static void channel_reset_limit(struct channel *c, struct limit *l, int dir);
|
||||
static int channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n);
|
||||
static int channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n);
|
||||
static void channel_feed_end(struct channel *c);
|
||||
static void channel_stop_export(struct channel *c);
|
||||
static void channel_export_stopped(struct rt_export_request *req);
|
||||
static void channel_refeed_stopped(struct rt_export_request *req);
|
||||
static void channel_check_stopped(struct channel *c);
|
||||
static void channel_reload_in_done(struct channel_import_request *cir);
|
||||
static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
|
||||
|
||||
static inline int proto_is_done(struct proto *p)
|
||||
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
|
||||
@ -339,15 +342,109 @@ proto_remove_channels(struct proto *p)
|
||||
proto_remove_channel(p, c);
|
||||
}
|
||||
|
||||
/**
|
||||
* # Automatic ROA reloads
|
||||
*
|
||||
* Route origin authorizations may (and do) change over time by updates via
|
||||
* our RPKI protocols. This then manifests in ROA tables. As the roa_check()
|
||||
* is always executed on a specific contents of ROA table in a specific moment
|
||||
* of time, its value may switch after updates in the ROA table and therefore
|
||||
* must be re-evaluated any time the result may have changed.
|
||||
*
|
||||
* To enable this mechanism, there are auxiliary tools integrated in BIRD
|
||||
* to automatically re-evaluate all filters that may get a different outcome
|
||||
* after ROA change.
|
||||
*
|
||||
* ROA Subscription Data Structure (struct roa_subscription) is the connector
|
||||
* between the channel and the ROA table, keeping track about unprocessed
|
||||
* changes and initiating the reloads. The modus operandi is as follows:
|
||||
*
|
||||
* Init 1. Check whether the filter uses ROA at all.
|
||||
* Init 2. Request exports from the ROA table
|
||||
* Init 3. Allocate a trie
|
||||
*
|
||||
* Export from ROA: This may affect all routes for prefixes matching the ROA
|
||||
* prefix, disregarding its maxlen. Thus we mark these routes in the request's
|
||||
* auxiliary trie. Then we ping the settle timer to wait a reasonable amount of
|
||||
* time before actually requesting channel reload.
|
||||
*
|
||||
* Settle timer fires when nothing has pinged it for the 'min' time, or 'max'
|
||||
* time has elapsed since the first ping. It then:
|
||||
*
|
||||
* - requests partial channel import / export reload based on the trie
|
||||
* - allocates a new trie
|
||||
*
|
||||
* As the import/export reload uses the auxiliary trie to prefilter prefixes,
|
||||
* the trie must be freed after the reload is done, which is ensured in the
|
||||
* .done() hook of the reimport/reexport request.
|
||||
*
|
||||
* # Channel export refeed
|
||||
*
|
||||
* The request, either by ROA or from CLI, is enqueued to the channel and an
|
||||
* auxiliary export hook is requested from the table. This way, the ordinary
|
||||
* updates can flow uninterrupted while refeed gets prefiltered by the given
|
||||
* trie (if given). When the auxiliary export hook finishes, the .done() hook
|
||||
* is then called for the requestor to do their cleanup.
|
||||
*
|
||||
* While refeeding, special care must be taken about route changes inside the
|
||||
* table. For this, an auxiliary trie is allocated to keep track about already
|
||||
* refed net, to avoid unnecessary multiple re-evaluation of filters.
|
||||
*
|
||||
* # Channel import reload from import table
|
||||
*
|
||||
* When the import table is on, the channel keeps the original version of the route
|
||||
* in the table together with the actual version after filters, in a form of
|
||||
* an additional layer of route attributes underneath the actual version. This makes
|
||||
* it exceptionally simple to get the original version of the route directly
|
||||
* from the table by an ordinary export which strips all the newer layers.
|
||||
*
|
||||
* Then, by processing all these auxiliary exports, the channel basically re-imports
|
||||
* all the routes into the table back again, re-evaluating the filters and ROA checks.
|
||||
*
|
||||
* # Channel import reload from protocols
|
||||
*
|
||||
* When the import table is off, the protocol gets the reimport request directly
|
||||
* via the .reload_routes() hook and must do its internal route reload instead.
|
||||
* The protocol may not support it and in such case, this function returns 0
|
||||
* indicating that no partial reload is going to happen. It's then on the
|
||||
* developer's or user's discretion to run a full reload instead.
|
||||
*
|
||||
* # Caveats, FIXME's, TODO's and other kinds of hell
|
||||
*
|
||||
* The partial reexport uses a trie to track state for single prefixes. This
|
||||
* may do crazy things if a partial reload was to be performed on any other
|
||||
* table than plain IPv6 or IPv4. Network types like VPNv6 or Flowspec may
|
||||
* cause some crashes. This is currently not checked anywhere.
|
||||
*
|
||||
* Anyway, we decided to split the table FIB structure to carry only a mapping
|
||||
* between a prefix and a locally-unique ID, and after this update is done
|
||||
* (probably also in v2), the tracking tries may be easily replaced by
|
||||
* bitfields, therefore fixing this bug.
|
||||
*
|
||||
* We also probably didn't do a proper analysis of the implemented algorithm
|
||||
* for reexports, so if there is somebody willing to formally prove that we
|
||||
* both won't miss any update and won't reexport more than needed, you're welcome
|
||||
* to submit such a proof.
|
||||
*
|
||||
* We wish you a pleasant reading, analyzing and bugfixing experience.
|
||||
*
|
||||
* Kata, Maria and the BIRD Team
|
||||
*/
|
||||
|
||||
struct roa_subscription {
|
||||
node roa_node;
|
||||
struct settle settle;
|
||||
struct channel *c;
|
||||
struct rt_export_request req;
|
||||
struct f_trie* trie;
|
||||
struct channel_feeding_request cfr[2];
|
||||
struct f_trie *trie;
|
||||
};
|
||||
|
||||
static void
|
||||
channel_roa_in_reload_done(struct channel_import_request *req)
|
||||
{
|
||||
rfree(req->trie->lp);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_roa_in_changed(struct settle *se)
|
||||
{
|
||||
@ -355,7 +452,15 @@ channel_roa_in_changed(struct settle *se)
|
||||
struct channel *c = s->c;
|
||||
|
||||
CD(c, "Reload triggered by RPKI change");
|
||||
channel_request_reload(c);
|
||||
struct channel_import_request *cir = lp_alloc(s->trie->lp, sizeof *cir);
|
||||
*cir = (struct channel_import_request) {
|
||||
.trie = s->trie,
|
||||
.done = channel_roa_in_reload_done,
|
||||
};
|
||||
|
||||
s->trie = f_new_trie(lp_new(c->proto->pool), 0);
|
||||
|
||||
channel_request_partial_reload(c, cir);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -372,15 +477,19 @@ channel_roa_out_changed(struct settle *se)
|
||||
|
||||
CD(c, "Feeding triggered by RPKI change");
|
||||
|
||||
/* Setup feeding request */
|
||||
struct channel_feeding_request *cfr = lp_alloc(s->trie->lp, sizeof *cfr);
|
||||
*cfr = (struct channel_feeding_request) {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.trie = s->trie,
|
||||
.done = channel_roa_out_reload_done,
|
||||
};
|
||||
channel_request_feeding(c, cfr);
|
||||
|
||||
/* Prepare new trie */
|
||||
s->trie = f_new_trie(lp_new(c->proto->pool), 0);
|
||||
|
||||
/* Actually request the feed */
|
||||
channel_request_feeding(c, cfr);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -623,7 +732,6 @@ channel_start_export(struct channel *c)
|
||||
c->refeed_req.dump_req = channel_dump_refeed_req;
|
||||
c->refeed_req.log_state_change = channel_refeed_log_state_change;
|
||||
c->refeed_req.mark_seen = channel_rpe_mark_seen_refeed;
|
||||
c->refeed_req.prefilter.hook = channel_refeed_prefilter;
|
||||
|
||||
DBG("%s.%s: Channel start export req=%p\n", c->proto->name, c->name, &c->out_req);
|
||||
rt_request_export(c->table, &c->out_req);
|
||||
@ -709,6 +817,8 @@ channel_refeed_stopped(struct rt_export_request *req)
|
||||
static void
|
||||
channel_init_feeding(struct channel *c)
|
||||
{
|
||||
int no_trie = 0;
|
||||
|
||||
for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
|
||||
if (cfrp->type == CFRT_DIRECT)
|
||||
{
|
||||
@ -716,12 +826,25 @@ channel_init_feeding(struct channel *c)
|
||||
channel_stop_export(c);
|
||||
return;
|
||||
}
|
||||
else if (!cfrp->trie)
|
||||
no_trie = 1;
|
||||
|
||||
/* No direct feeding, running auxiliary refeed. */
|
||||
c->refeeding = c->refeed_pending;
|
||||
c->refeed_pending = NULL;
|
||||
c->refeed_trie = f_new_trie(lp_new(c->proto->pool), 0);
|
||||
|
||||
if (no_trie)
|
||||
{
|
||||
c->refeed_req.prefilter.mode = TE_ADDR_NONE;
|
||||
c->refeed_req.prefilter.hook = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
c->refeed_req.prefilter.mode = TE_ADDR_HOOK;
|
||||
c->refeed_req.prefilter.hook = channel_refeed_prefilter;
|
||||
}
|
||||
|
||||
rt_request_export(c->table, &c->refeed_req);
|
||||
}
|
||||
|
||||
@ -733,13 +856,35 @@ channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
||||
SKIP_BACK(struct rt_export_request, prefilter, p)
|
||||
);
|
||||
|
||||
ASSERT_DIE(c->refeeding);
|
||||
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
||||
if (!cfr->trie || trie_match_net(cfr->trie, n))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n)
|
||||
{
|
||||
for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
|
||||
{
|
||||
if (!cir->trie || trie_match_net(cir->trie, n))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
||||
{
|
||||
const struct channel *c =
|
||||
SKIP_BACK(struct channel, reload_req,
|
||||
SKIP_BACK(struct rt_export_request, prefilter, p)
|
||||
);
|
||||
ASSERT_DIE(c->importing);
|
||||
|
||||
return channel_import_request_prefilter(c->importing, n);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_feed_end(struct channel *c)
|
||||
@ -791,9 +936,15 @@ channel_feed_end(struct channel *c)
|
||||
|
||||
/* Called by protocol for reload from in_table */
|
||||
void
|
||||
channel_schedule_reload(struct channel *c)
|
||||
channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
|
||||
{
|
||||
ASSERT(c->in_req.hook);
|
||||
int no_trie = 0;
|
||||
if (cir)
|
||||
{
|
||||
cir->next = c->import_pending;
|
||||
c->import_pending = cir;
|
||||
}
|
||||
|
||||
if (c->reload_req.hook)
|
||||
{
|
||||
@ -802,7 +953,29 @@ channel_schedule_reload(struct channel *c)
|
||||
return;
|
||||
}
|
||||
|
||||
rt_refresh_begin(&c->in_req);
|
||||
/* If there is any full-reload request, we can disregard all partials */
|
||||
for (struct channel_import_request *last = cir; last && no_trie==0;)
|
||||
{
|
||||
if (!last->trie)
|
||||
no_trie = 1;
|
||||
last = last->next;
|
||||
}
|
||||
|
||||
/* activating pending imports */
|
||||
c->importing = c->import_pending;
|
||||
c->import_pending = NULL;
|
||||
|
||||
if (no_trie)
|
||||
{
|
||||
c->reload_req.prefilter.mode = TE_ADDR_NONE;
|
||||
c->reload_req.prefilter.hook = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
c->reload_req.prefilter.mode = TE_ADDR_HOOK;
|
||||
c->reload_req.prefilter.hook = channel_import_prefilter;
|
||||
}
|
||||
|
||||
rt_request_export(c->table, &c->reload_req);
|
||||
}
|
||||
|
||||
@ -1013,7 +1186,11 @@ channel_set_state(struct channel *c, uint state)
|
||||
void
|
||||
channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
|
||||
{
|
||||
ASSERT(c->out_req.hook);
|
||||
ASSERT_DIE(c->out_req.hook);
|
||||
|
||||
CD(c, "Feeding requested (%s)",
|
||||
cfr->type == CFRT_DIRECT ? "direct" :
|
||||
(cfr->trie ? "partial" : "auxiliary"));
|
||||
|
||||
/* Enqueue the request */
|
||||
cfr->next = c->refeed_pending;
|
||||
@ -1052,6 +1229,12 @@ channel_stop_export(struct channel *c)
|
||||
rt_stop_export(&c->out_req, channel_export_stopped);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_import_request_done_dynamic(struct channel_import_request *req)
|
||||
{
|
||||
mb_free(req);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_request_reload(struct channel *c)
|
||||
{
|
||||
@ -1059,11 +1242,28 @@ channel_request_reload(struct channel *c)
|
||||
ASSERT(channel_reloadable(c));
|
||||
|
||||
CD(c, "Reload requested");
|
||||
struct channel_import_request* cir = mb_alloc(c->proto->pool, sizeof *cir);
|
||||
cir->trie = NULL;
|
||||
cir->done = channel_import_request_done_dynamic;
|
||||
|
||||
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
||||
channel_schedule_reload(c);
|
||||
else
|
||||
c->proto->reload_routes(c);
|
||||
channel_schedule_reload(c, cir);
|
||||
else if (! c->proto->reload_routes(c, cir))
|
||||
bug("Channel %s.%s refused full import reload.", c->proto->name, c->name);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_request_partial_reload(struct channel *c, struct channel_import_request *cir)
|
||||
{
|
||||
ASSERT(c->in_req.hook);
|
||||
ASSERT(channel_reloadable(c));
|
||||
|
||||
CD(c, "Partial import reload requested");
|
||||
|
||||
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
||||
channel_schedule_reload(c, cir);
|
||||
else if (! c->proto->reload_routes(c, cir))
|
||||
cli_msg(-15, "%s.%s: partial reload refused, please run full reload instead", c->proto->name, c->name);
|
||||
}
|
||||
|
||||
const struct channel_class channel_basic = {
|
||||
@ -2655,11 +2855,37 @@ proto_cmd_restart(struct proto *p, uintptr_t arg, int cnt UNUSED)
|
||||
cli_msg(-12, "%s: restarted", p->name);
|
||||
}
|
||||
|
||||
void
|
||||
proto_cmd_reload(struct proto *p, uintptr_t dir, int cnt UNUSED)
|
||||
{
|
||||
struct channel *c;
|
||||
struct channel_cmd_reload_feeding_request {
|
||||
struct channel_feeding_request cfr;
|
||||
struct proto_reload_request *prr;
|
||||
};
|
||||
|
||||
struct channel_cmd_reload_import_request {
|
||||
struct channel_import_request cir;
|
||||
struct proto_reload_request *prr;
|
||||
};
|
||||
|
||||
static void
|
||||
channel_reload_out_done(struct channel_feeding_request *cfr)
|
||||
{
|
||||
struct channel_cmd_reload_feeding_request *ccrfr = SKIP_BACK(struct channel_cmd_reload_feeding_request, cfr, cfr);
|
||||
if (atomic_fetch_sub_explicit(&ccrfr->prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &ccrfr->prr->ev);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_reload_in_done(struct channel_import_request *cir)
|
||||
{
|
||||
struct channel_cmd_reload_import_request *ccrir = SKIP_BACK(struct channel_cmd_reload_import_request, cir, cir);
|
||||
if (atomic_fetch_sub_explicit(&ccrir->prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &ccrir->prr->ev);
|
||||
}
|
||||
|
||||
void
|
||||
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
|
||||
{
|
||||
struct proto_reload_request *prr = (void *) _prr;
|
||||
struct channel *c;
|
||||
if (p->disabled)
|
||||
{
|
||||
cli_msg(-8, "%s: already disabled", p->name);
|
||||
@ -2671,7 +2897,7 @@ proto_cmd_reload(struct proto *p, uintptr_t dir, int cnt UNUSED)
|
||||
return;
|
||||
|
||||
/* All channels must support reload */
|
||||
if (dir != CMD_RELOAD_OUT)
|
||||
if (prr->dir != CMD_RELOAD_OUT)
|
||||
WALK_LIST(c, p->channels)
|
||||
if ((c->channel_state == CS_UP) && !channel_reloadable(c))
|
||||
{
|
||||
@ -2682,16 +2908,56 @@ proto_cmd_reload(struct proto *p, uintptr_t dir, int cnt UNUSED)
|
||||
log(L_INFO "Reloading protocol %s", p->name);
|
||||
|
||||
/* re-importing routes */
|
||||
if (dir != CMD_RELOAD_OUT)
|
||||
if (prr->dir != CMD_RELOAD_OUT)
|
||||
WALK_LIST(c, p->channels)
|
||||
if (c->channel_state == CS_UP)
|
||||
channel_request_reload(c);
|
||||
{
|
||||
if (prr->trie)
|
||||
{
|
||||
/* Increase the refeed counter */
|
||||
atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
|
||||
ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
|
||||
|
||||
struct channel_cmd_reload_import_request *req = lp_alloc(prr->trie->lp, sizeof *req);
|
||||
*req = (struct channel_cmd_reload_import_request) {
|
||||
.cir = {
|
||||
.done = channel_reload_in_done,
|
||||
.trie = prr->trie,
|
||||
},
|
||||
.prr = prr,
|
||||
};
|
||||
channel_request_partial_reload(c, &req->cir);
|
||||
}
|
||||
else
|
||||
channel_request_reload(c);
|
||||
}
|
||||
|
||||
/* re-exporting routes */
|
||||
if (dir != CMD_RELOAD_IN)
|
||||
if (prr->dir != CMD_RELOAD_IN)
|
||||
WALK_LIST(c, p->channels)
|
||||
if (c->channel_state == CS_UP)
|
||||
channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
|
||||
if ((c->channel_state == CS_UP) && (c->out_req.hook))
|
||||
if (prr->trie)
|
||||
{
|
||||
/* Increase the refeed counter */
|
||||
atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
|
||||
ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
|
||||
|
||||
/* Request actually the feeding */
|
||||
|
||||
struct channel_cmd_reload_feeding_request *req = lp_alloc(prr->trie->lp, sizeof *req);
|
||||
*req = (struct channel_cmd_reload_feeding_request) {
|
||||
.cfr = {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.done = channel_reload_out_done,
|
||||
.trie = prr->trie,
|
||||
},
|
||||
.prr = prr,
|
||||
};
|
||||
|
||||
channel_request_feeding(c, &req->cfr);
|
||||
}
|
||||
else
|
||||
channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
|
||||
|
||||
cli_msg(-15, "%s: reloading", p->name);
|
||||
}
|
||||
|
@ -125,6 +125,12 @@ struct proto_config {
|
||||
/* Protocol-specific data follow... */
|
||||
};
|
||||
|
||||
struct channel_import_request {
|
||||
struct channel_import_request *next; /* Next in request chain */
|
||||
void (*done)(struct channel_import_request *); /* Called when import finishes */
|
||||
const struct f_trie *trie; /* Reload only matching nets */
|
||||
};
|
||||
|
||||
#define TLIST_PREFIX proto
|
||||
#define TLIST_TYPE struct proto
|
||||
#define TLIST_ITEM n
|
||||
@ -194,7 +200,7 @@ struct proto {
|
||||
|
||||
void (*rt_notify)(struct proto *, struct channel *, const net_addr *net, struct rte *new, const struct rte *old);
|
||||
int (*preexport)(struct channel *, struct rte *rt);
|
||||
void (*reload_routes)(struct channel *);
|
||||
int (*reload_routes)(struct channel *, struct channel_import_request *cir);
|
||||
void (*feed_begin)(struct channel *);
|
||||
void (*feed_end)(struct channel *);
|
||||
|
||||
@ -280,6 +286,14 @@ struct proto *proto_iterate_named(struct symbol *sym, struct protocol *proto, st
|
||||
|
||||
#define PROTO_WALK_CMD(sym,pr,p) for(struct proto *p = NULL; p = proto_iterate_named(sym, pr, p); )
|
||||
|
||||
/* Request from CLI to reload multiple protocols */
|
||||
struct proto_reload_request {
|
||||
const struct f_trie *trie; /* Trie to apply */
|
||||
_Atomic uint counter; /* How many channels remaining */
|
||||
uint dir; /* Direction of reload */
|
||||
event ev; /* Event to run when finished */
|
||||
};
|
||||
|
||||
#define PROTO_ENTER_FROM_MAIN(p) ({ \
|
||||
ASSERT_DIE(birdloop_inside(&main_birdloop)); \
|
||||
struct birdloop *_loop = (p)->loop; \
|
||||
@ -573,6 +587,8 @@ struct channel {
|
||||
struct f_trie *refeed_trie; /* Auxiliary refeed trie */
|
||||
struct channel_feeding_request *refeeding; /* Refeeding the channel */
|
||||
struct channel_feeding_request *refeed_pending; /* Scheduled refeeds */
|
||||
struct channel_import_request *importing; /* Importing the channel */
|
||||
struct channel_import_request *import_pending; /* Scheduled imports */
|
||||
|
||||
uint feed_block_size; /* How many routes to feed at once */
|
||||
|
||||
@ -669,7 +685,8 @@ struct channel *proto_add_channel(struct proto *p, struct channel_config *cf);
|
||||
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
|
||||
|
||||
void channel_set_state(struct channel *c, uint state);
|
||||
void channel_schedule_reload(struct channel *c);
|
||||
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
|
||||
int channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n);
|
||||
|
||||
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
|
||||
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
|
||||
@ -678,7 +695,7 @@ static inline void channel_close(struct channel *c) { channel_set_state(c, CS_ST
|
||||
struct channel_feeding_request {
|
||||
struct channel_feeding_request *next; /* Next in request chain */
|
||||
void (*done)(struct channel_feeding_request *); /* Called when refeed finishes */
|
||||
struct f_trie *trie; /* Reload only matching nets */
|
||||
const struct f_trie *trie; /* Reload only matching nets */
|
||||
PACKED enum channel_feeding_request_type {
|
||||
CFRT_DIRECT = 1, /* Refeed by export restart */
|
||||
CFRT_AUXILIARY, /* Refeed by auxiliary request */
|
||||
|
34
nest/rt.h
34
nest/rt.h
@ -253,6 +253,23 @@ struct rte_storage {
|
||||
|
||||
/* Table-channel connections */
|
||||
|
||||
struct rt_prefilter {
|
||||
union {
|
||||
const struct f_trie *trie;
|
||||
const net_addr *addr; /* Network prefilter address */
|
||||
int (*hook)(const struct rt_prefilter *, const net_addr *);
|
||||
};
|
||||
/* Network prefilter mode (TE_ADDR_*) */
|
||||
enum {
|
||||
TE_ADDR_NONE = 0, /* No address matching */
|
||||
TE_ADDR_EQUAL, /* Exact query - show route <addr> */
|
||||
TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
|
||||
TE_ADDR_IN, /* Interval query - show route in <addr> */
|
||||
TE_ADDR_TRIE, /* Query defined by trie */
|
||||
TE_ADDR_HOOK, /* Query processed by supplied custom hook */
|
||||
} mode;
|
||||
} PACKED;
|
||||
|
||||
struct rt_import_request {
|
||||
struct rt_import_hook *hook; /* The table part of importer */
|
||||
char *name;
|
||||
@ -300,23 +317,6 @@ struct rt_pending_export {
|
||||
u64 seq; /* Sequential ID (table-local) of the pending export */
|
||||
};
|
||||
|
||||
struct rt_prefilter {
|
||||
union {
|
||||
const struct f_trie *trie;
|
||||
const net_addr *addr; /* Network prefilter address */
|
||||
int (*hook)(const struct rt_prefilter *, const net_addr *);
|
||||
};
|
||||
/* Network prefilter mode (TE_ADDR_*) */
|
||||
enum {
|
||||
TE_ADDR_NONE = 0, /* No address matching */
|
||||
TE_ADDR_EQUAL, /* Exact query - show route <addr> */
|
||||
TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
|
||||
TE_ADDR_IN, /* Interval query - show route in <addr> */
|
||||
TE_ADDR_TRIE, /* Query defined by trie */
|
||||
TE_ADDR_HOOK, /* Query processed by supplied custom hook */
|
||||
} mode;
|
||||
} PACKED;
|
||||
|
||||
struct rt_export_request {
|
||||
struct rt_export_hook *hook; /* Table part of the export */
|
||||
char *name; /* Network prefilter address */
|
||||
|
@ -1904,6 +1904,7 @@ bgp_out_table_feed(void *data)
|
||||
{
|
||||
switch (hook->h.req->prefilter.mode)
|
||||
{
|
||||
case TE_ADDR_HOOK:
|
||||
case TE_ADDR_TRIE:
|
||||
case TE_ADDR_IN:
|
||||
if (!rt_prefilter_net(&hook->h.req->prefilter, n->net))
|
||||
|
@ -1560,18 +1560,30 @@ bgp_update_bfd(struct bgp_proto *p, const struct bfd_options *bfd)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_reload_routes(struct channel *C)
|
||||
static int
|
||||
bgp_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct bgp_proto *p = (void *) C->proto;
|
||||
struct bgp_channel *c = (void *) C;
|
||||
|
||||
/* Ignore non-BGP channels */
|
||||
if (C->class != &channel_bgp)
|
||||
return;
|
||||
{
|
||||
cir->done(cir);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (cir->trie)
|
||||
{
|
||||
cir->done(cir);
|
||||
return 0;
|
||||
}
|
||||
/* We do not need cir anymore and later we will not be able to detect when to free it. */
|
||||
cir->done(cir);
|
||||
|
||||
ASSERT(p->conn && p->route_refresh);
|
||||
bgp_schedule_packet(p->conn, c, PKT_ROUTE_REFRESH);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -109,7 +109,7 @@
|
||||
#include "lib/macro.h"
|
||||
|
||||
static int ospf_preexport(struct channel *C, rte *new);
|
||||
static void ospf_reload_routes(struct channel *C);
|
||||
static int ospf_reload_routes(struct channel *C, struct channel_import_request *cir);
|
||||
static int ospf_rte_better(const rte *new, const rte *old);
|
||||
static u32 ospf_rte_igp_metric(const rte *rt);
|
||||
static void ospf_disp(timer *timer);
|
||||
@ -432,16 +432,19 @@ ospf_schedule_rtcalc(struct ospf_proto *p)
|
||||
p->calcrt = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
ospf_reload_routes(struct channel *C)
|
||||
static int
|
||||
ospf_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct ospf_proto *p = (struct ospf_proto *) C->proto;
|
||||
cir->next = p->cir;
|
||||
p->cir = cir;
|
||||
|
||||
if (p->calcrt == 2)
|
||||
return;
|
||||
return 1;
|
||||
|
||||
OSPF_TRACE(D_EVENTS, "Scheduling routing table calculation with route reload");
|
||||
p->calcrt = 2;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -219,6 +219,7 @@ struct ospf_proto
|
||||
slist lsal; /* List of all LSA's */
|
||||
int calcrt; /* Routing table calculation scheduled?
|
||||
0=no, 1=normal, 2=forced reload */
|
||||
struct channel_import_request *cir; /* Struct with trie for partial reload */
|
||||
list iface_list; /* List of OSPF interfaces (struct ospf_iface) */
|
||||
list area_list; /* List of OSPF areas (struct ospf_area) */
|
||||
int areano; /* Number of area I belong to */
|
||||
|
@ -1700,7 +1700,8 @@ ospf_rt_spf(struct ospf_proto *p)
|
||||
rt_sync(p);
|
||||
lp_flush(p->nhpool);
|
||||
|
||||
p->calcrt = 0;
|
||||
if (p->cir == NULL) /* If there is no more cir waiting for reload */
|
||||
p->calcrt = 0;
|
||||
}
|
||||
|
||||
|
||||
@ -2020,11 +2021,16 @@ rt_sync(struct ospf_proto *p)
|
||||
|
||||
OSPF_TRACE(D_EVENTS, "Starting routing table synchronization");
|
||||
|
||||
struct channel_import_request *cir = p->cir;
|
||||
p->cir = NULL;
|
||||
|
||||
DBG("Now syncing my rt table with nest's\n");
|
||||
FIB_ITERATE_INIT(&fit, fib);
|
||||
again1:
|
||||
FIB_ITERATE_START(fib, &fit, ort, nf)
|
||||
{
|
||||
if (cir && !channel_import_request_prefilter(cir, nf->fn.addr))
|
||||
continue;
|
||||
/* Sanity check of next-hop addresses, failure should not happen */
|
||||
if (nf->n.type && nf->n.nhs)
|
||||
{
|
||||
@ -2099,7 +2105,6 @@ again1:
|
||||
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
|
||||
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
|
||||
*/
|
||||
|
||||
rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
|
||||
}
|
||||
}
|
||||
@ -2125,6 +2130,13 @@ again1:
|
||||
}
|
||||
FIB_ITERATE_END;
|
||||
|
||||
while(cir)
|
||||
{
|
||||
struct channel_import_request *next = cir->next;
|
||||
cir->done(cir);
|
||||
cir = next;
|
||||
}
|
||||
|
||||
WALK_LIST(oa, p->area_list)
|
||||
{
|
||||
/* Cleanup ASBR hash tables */
|
||||
|
@ -96,13 +96,37 @@ pipe_preexport(struct channel *C, rte *e)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
pipe_reload_routes(struct channel *C)
|
||||
void
|
||||
pipe_import_by_refeed_free(struct channel_feeding_request *cfr)
|
||||
{
|
||||
struct import_to_export_reload *reload = SKIP_BACK(struct import_to_export_reload, cfr, cfr);
|
||||
reload->cir->done(reload->cir);
|
||||
}
|
||||
|
||||
static int
|
||||
pipe_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct pipe_proto *p = (void *) C->proto;
|
||||
|
||||
/* Route reload on one channel is just refeed on the other */
|
||||
channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
|
||||
if (cir->trie)
|
||||
{
|
||||
struct import_to_export_reload *reload = lp_alloc(cir->trie->lp, sizeof *reload);
|
||||
*reload = (struct import_to_export_reload) {
|
||||
.cir = cir,
|
||||
.cfr = {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.done = pipe_import_by_refeed_free,
|
||||
.trie = cir->trie,
|
||||
},
|
||||
};
|
||||
channel_request_feeding((C == p->pri) ? p->sec : p->pri, &reload->cfr);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Route reload on one channel is just refeed on the other */
|
||||
channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
|
||||
cir->done(cir);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -28,3 +28,8 @@ struct pipe_proto {
|
||||
#define PIPE_FL_RR_BEGIN_PENDING 1 /* Route refresh should start with the first route notified */
|
||||
|
||||
#endif
|
||||
|
||||
struct import_to_export_reload {
|
||||
struct channel_import_request *cir; /* We can not free this struct before reload finishes. */
|
||||
struct channel_feeding_request cfr; /* New request we actually need - import was changed to feed the other side. */
|
||||
};
|
||||
|
@ -968,6 +968,9 @@ rip_timer(timer *t)
|
||||
|
||||
FIB_ITERATE_INIT(&fit, &p->rtable);
|
||||
|
||||
struct channel_import_request *cir = p->cir;
|
||||
p->cir = NULL;
|
||||
|
||||
loop:
|
||||
FIB_ITERATE_START(&p->rtable, &fit, struct rip_entry, en)
|
||||
{
|
||||
@ -989,14 +992,13 @@ rip_timer(timer *t)
|
||||
}
|
||||
|
||||
/* Propagating eventual change */
|
||||
if (changed || p->rt_reload)
|
||||
if ((changed || p->rt_reload) && (cir == NULL || channel_import_request_prefilter(cir, en->n.addr)))
|
||||
{
|
||||
/*
|
||||
* We have to restart the iteration because there may be a cascade of
|
||||
* synchronous events rip_announce_rte() -> nest table change ->
|
||||
* rip_rt_notify() -> p->rtable change, invalidating hidden variables.
|
||||
*/
|
||||
|
||||
FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
|
||||
rip_announce_rte(p, en);
|
||||
goto loop;
|
||||
@ -1047,7 +1049,19 @@ rip_timer(timer *t)
|
||||
}
|
||||
}
|
||||
|
||||
tm_start(p->timer, MAX(next - now_, 100 MS));
|
||||
while(cir)
|
||||
{
|
||||
struct channel_import_request *next_cir = cir->next;
|
||||
cir->done(cir);
|
||||
cir = next_cir;
|
||||
}
|
||||
if (p->cir)
|
||||
{
|
||||
p->rt_reload = 1;
|
||||
rip_kick_timer(p);
|
||||
}
|
||||
else
|
||||
tm_start(p->timer, MAX(next - now_, 100 MS));
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -1148,17 +1162,21 @@ rip_trigger_update(struct rip_proto *p)
|
||||
* RIP protocol glue
|
||||
*/
|
||||
|
||||
static void
|
||||
rip_reload_routes(struct channel *C)
|
||||
static int
|
||||
rip_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct rip_proto *p = (struct rip_proto *) C->proto;
|
||||
|
||||
cir->next = p->cir;
|
||||
p->cir = cir;
|
||||
|
||||
if (p->rt_reload)
|
||||
return;
|
||||
return 1;
|
||||
|
||||
TRACE(D_EVENTS, "Scheduling route reload");
|
||||
p->rt_reload = 1;
|
||||
rip_kick_timer(p);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct rte_owner_class rip_rte_owner_class;
|
||||
|
@ -103,6 +103,7 @@ struct rip_proto
|
||||
|
||||
struct tbf log_pkt_tbf; /* TBF for packet messages */
|
||||
struct tbf log_rte_tbf; /* TBF for RTE messages */
|
||||
struct channel_import_request *cir; /* Trie for partial reload */
|
||||
};
|
||||
|
||||
struct rip_iface
|
||||
|
@ -172,6 +172,25 @@ static_mark_all(struct static_proto *p)
|
||||
ev_schedule(p->event);
|
||||
}
|
||||
|
||||
static void
|
||||
static_mark_partial(struct static_proto *p, struct channel_import_request *cir)
|
||||
{
|
||||
struct static_config *cf = (void *) p->p.cf;
|
||||
struct static_route *r;
|
||||
|
||||
WALK_LIST(r, cf->routes)
|
||||
if (r->state == SRS_CLEAN && trie_match_net(cir->trie, r->net))
|
||||
{
|
||||
r->state = SRS_DIRTY;
|
||||
BUFFER_PUSH(p->marked) = r;
|
||||
}
|
||||
|
||||
if (!ev_active(p->event))
|
||||
ev_schedule(p->event);
|
||||
|
||||
cir->done(cir);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static_announce_marked(void *P)
|
||||
@ -395,14 +414,19 @@ static_bfd_notify(struct bfd_request *req)
|
||||
static_mark_rte(p, r->mp_head);
|
||||
}
|
||||
|
||||
static void
|
||||
static_reload_routes(struct channel *C)
|
||||
static int
|
||||
static_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct static_proto *p = (void *) C->proto;
|
||||
|
||||
TRACE(D_EVENTS, "Scheduling route reload");
|
||||
|
||||
static_mark_all(p);
|
||||
if (cir->trie)
|
||||
static_mark_partial(p, cir);
|
||||
else
|
||||
static_mark_all(p);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -766,11 +766,18 @@ krt_if_notify(struct proto *P, uint flags, struct iface *iface UNUSED)
|
||||
krt_scan_timer_kick(p);
|
||||
}
|
||||
|
||||
static void
|
||||
krt_reload_routes(struct channel *C)
|
||||
static int
|
||||
krt_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
{
|
||||
struct krt_proto *p = (void *) C->proto;
|
||||
|
||||
|
||||
if (cir->trie)
|
||||
{
|
||||
cir->done(cir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Although we keep learned routes in krt_table, we rather schedule a scan */
|
||||
|
||||
if (KRT_CF->learn)
|
||||
@ -778,6 +785,9 @@ krt_reload_routes(struct channel *C)
|
||||
p->reload = 1;
|
||||
krt_scan_timer_kick(p);
|
||||
}
|
||||
|
||||
cir->done(cir);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void krt_cleanup(struct krt_proto *p);
|
||||
|
Loading…
Reference in New Issue
Block a user