mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-23 02:01:55 +00:00
fixup! fixup! fixup! fixup! partial import seems working
This commit is contained in:
parent
283ea03419
commit
723092115f
@ -1275,7 +1275,7 @@ This argument can be omitted if there exists only a single instance.
|
|||||||
command was used to change filters.
|
command was used to change filters.
|
||||||
|
|
||||||
If partial <prefix> option is used, only corresponding routes are reloaded.
|
If partial <prefix> option is used, only corresponding routes are reloaded.
|
||||||
Protocol BGP does partial reload only if it has locked table, otherwise partial reload for BGP is refused.
|
Protocol BGP does partial reload only if it has _import_ table enabled, otherwise partial reload for BGP is refused.
|
||||||
|
|
||||||
Re-export always succeeds, but re-import is protocol-dependent and might
|
Re-export always succeeds, but re-import is protocol-dependent and might
|
||||||
fail (for example, if BGP neighbor does not support route-refresh
|
fail (for example, if BGP neighbor does not support route-refresh
|
||||||
|
11
lib/fib.h
11
lib/fib.h
@ -94,17 +94,16 @@ void fit_copy(struct fib *f, struct fib_iterator *dst, struct fib_iterator *src)
|
|||||||
uint count_ = (fib)->hash_size; \
|
uint count_ = (fib)->hash_size; \
|
||||||
uint hpos_ = (it)->hash; \
|
uint hpos_ = (it)->hash; \
|
||||||
type *z; \
|
type *z; \
|
||||||
for(;;) { \
|
for(;;fn_ = fn_->next) { \
|
||||||
if (!fn_) \
|
while (!fn_ && ++hpos_ < count_) \
|
||||||
{ \
|
{ \
|
||||||
if (++hpos_ >= count_) \
|
|
||||||
break; \
|
|
||||||
fn_ = (fib)->hash_table[hpos_]; \
|
fn_ = (fib)->hash_table[hpos_]; \
|
||||||
continue; \
|
|
||||||
} \
|
} \
|
||||||
|
if (hpos_ >= count_) \
|
||||||
|
break; \
|
||||||
z = fib_node_to_user(fib, fn_);
|
z = fib_node_to_user(fib, fn_);
|
||||||
|
|
||||||
#define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
|
#define FIB_ITERATE_END } } while(0)
|
||||||
|
|
||||||
#define FIB_ITERATE_PUT(it) fit_put(it, fn_)
|
#define FIB_ITERATE_PUT(it) fit_put(it, fn_)
|
||||||
|
|
||||||
|
@ -107,6 +107,15 @@ proto_postconfig(void)
|
|||||||
this_proto = NULL;
|
this_proto = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
channel_reload_out_done_main(void *_prr)
|
||||||
|
{
|
||||||
|
struct proto_reload_request *prr = _prr;
|
||||||
|
ASSERT_THE_BIRD_LOCKED;
|
||||||
|
|
||||||
|
rfree(prr->trie->lp);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
|
proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
|
||||||
{
|
{
|
||||||
@ -114,19 +123,23 @@ proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
|
|||||||
*prr = (struct proto_reload_request) {
|
*prr = (struct proto_reload_request) {
|
||||||
.trie = trie,
|
.trie = trie,
|
||||||
.dir = dir,
|
.dir = dir,
|
||||||
.counter =1,
|
.counter = 1,
|
||||||
};
|
};
|
||||||
if (trie)
|
if (trie)
|
||||||
{
|
{
|
||||||
|
/* CLI linpool is moved to trie, because trie is need for longer time
|
||||||
|
than the linpool would exist in CLI. The linpool is freed in channel_reload_out_done_main */
|
||||||
ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
|
ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
|
||||||
rmove(this_cli->parser_pool, &root_pool);
|
rmove(this_cli->parser_pool, &root_pool);
|
||||||
this_cli->parser_pool = lp_new(this_cli->pool);
|
this_cli->parser_pool = lp_new(this_cli->pool);
|
||||||
prr->ev = (event) {
|
prr->ev = (event) {
|
||||||
.data = prr,
|
.data = prr,
|
||||||
|
.hook = channel_reload_out_done_main,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
proto_apply_cmd(ps, proto_cmd_reload, 1, (uintptr_t) prr);
|
proto_apply_cmd(ps, proto_cmd_reload, 1, (uintptr_t) prr);
|
||||||
|
/* This function used reference to this trie, so it is freed here as well as in protocols*/
|
||||||
if (trie)
|
if (trie)
|
||||||
if (atomic_fetch_sub_explicit(&prr->counter, 1, memory_order_acq_rel) == 1)
|
if (atomic_fetch_sub_explicit(&prr->counter, 1, memory_order_acq_rel) == 1)
|
||||||
ev_send_loop(&main_birdloop, &prr->ev);
|
ev_send_loop(&main_birdloop, &prr->ev);
|
||||||
|
47
nest/proto.c
47
nest/proto.c
@ -60,8 +60,6 @@ static void channel_refeed_stopped(struct rt_export_request *req);
|
|||||||
static void channel_check_stopped(struct channel *c);
|
static void channel_check_stopped(struct channel *c);
|
||||||
static void channel_reload_in_done(struct channel_import_request *cir);
|
static void channel_reload_in_done(struct channel_import_request *cir);
|
||||||
static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
|
static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
|
||||||
void print_trie_node(const struct f_trie_node4 *t, int i);
|
|
||||||
|
|
||||||
|
|
||||||
static inline int proto_is_done(struct proto *p)
|
static inline int proto_is_done(struct proto *p)
|
||||||
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
|
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
|
||||||
@ -90,6 +88,7 @@ channel_export_log_state_change(struct rt_export_request *req, u8 state)
|
|||||||
{
|
{
|
||||||
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
||||||
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
||||||
|
|
||||||
switch (state)
|
switch (state)
|
||||||
{
|
{
|
||||||
case TES_FEEDING:
|
case TES_FEEDING:
|
||||||
@ -107,6 +106,7 @@ channel_refeed_log_state_change(struct rt_export_request *req, u8 state)
|
|||||||
{
|
{
|
||||||
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
||||||
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
||||||
|
|
||||||
switch (state)
|
switch (state)
|
||||||
{
|
{
|
||||||
case TES_FEEDING:
|
case TES_FEEDING:
|
||||||
@ -290,6 +290,7 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
|
|||||||
add_tail(&p->channels, &c->n);
|
add_tail(&p->channels, &c->n);
|
||||||
|
|
||||||
CD(c, "Connected to table %s", c->table->name);
|
CD(c, "Connected to table %s", c->table->name);
|
||||||
|
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,6 +342,7 @@ proto_remove_channels(struct proto *p)
|
|||||||
proto_remove_channel(p, c);
|
proto_remove_channel(p, c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct roa_subscription {
|
struct roa_subscription {
|
||||||
node roa_node;
|
node roa_node;
|
||||||
struct settle settle;
|
struct settle settle;
|
||||||
@ -406,6 +408,7 @@ static void
|
|||||||
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
|
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
|
||||||
{
|
{
|
||||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
||||||
|
|
||||||
switch (net->type)
|
switch (net->type)
|
||||||
{
|
{
|
||||||
case NET_ROA4:
|
case NET_ROA4:
|
||||||
@ -417,6 +420,7 @@ channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struc
|
|||||||
default:
|
default:
|
||||||
bug("ROA table sent us a non-roa export");
|
bug("ROA table sent us a non-roa export");
|
||||||
}
|
}
|
||||||
|
|
||||||
settle_kick(&s->settle, s->c->proto->loop);
|
settle_kick(&s->settle, s->c->proto->loop);
|
||||||
|
|
||||||
rpe_mark_seen_all(req->hook, first, NULL, NULL);
|
rpe_mark_seen_all(req->hook, first, NULL, NULL);
|
||||||
@ -428,6 +432,7 @@ channel_dump_roa_req(struct rt_export_request *req)
|
|||||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
||||||
struct channel *c = s->c;
|
struct channel *c = s->c;
|
||||||
struct rtable_private *tab = SKIP_BACK(struct rtable_private, exporter.e, req->hook->table);
|
struct rtable_private *tab = SKIP_BACK(struct rtable_private, exporter.e, req->hook->table);
|
||||||
|
|
||||||
debug(" Channel %s.%s ROA %s change notifier from table %s request %p\n",
|
debug(" Channel %s.%s ROA %s change notifier from table %s request %p\n",
|
||||||
c->proto->name, c->name,
|
c->proto->name, c->name,
|
||||||
(s->settle.hook == channel_roa_in_changed) ? "import" : "export",
|
(s->settle.hook == channel_roa_in_changed) ? "import" : "export",
|
||||||
@ -439,6 +444,7 @@ channel_roa_is_subscribed(struct channel *c, rtable *tab, int dir)
|
|||||||
{
|
{
|
||||||
void (*hook)(struct settle *) =
|
void (*hook)(struct settle *) =
|
||||||
dir ? channel_roa_in_changed : channel_roa_out_changed;
|
dir ? channel_roa_in_changed : channel_roa_out_changed;
|
||||||
|
|
||||||
struct roa_subscription *s;
|
struct roa_subscription *s;
|
||||||
node *n;
|
node *n;
|
||||||
|
|
||||||
@ -457,6 +463,7 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
struct roa_subscription *s = mb_allocz(c->proto->pool, sizeof(struct roa_subscription));
|
struct roa_subscription *s = mb_allocz(c->proto->pool, sizeof(struct roa_subscription));
|
||||||
|
|
||||||
*s = (struct roa_subscription) {
|
*s = (struct roa_subscription) {
|
||||||
.settle = SETTLE_INIT(&c->roa_settle, dir ? channel_roa_in_changed : channel_roa_out_changed, NULL),
|
.settle = SETTLE_INIT(&c->roa_settle, dir ? channel_roa_in_changed : channel_roa_out_changed, NULL),
|
||||||
.c = c,
|
.c = c,
|
||||||
@ -471,6 +478,7 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
|
|||||||
.export_one = channel_export_one_roa,
|
.export_one = channel_export_one_roa,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
add_tail(&c->roa_subscriptions, &s->roa_node);
|
add_tail(&c->roa_subscriptions, &s->roa_node);
|
||||||
rt_request_export(tab, &s->req);
|
rt_request_export(tab, &s->req);
|
||||||
}
|
}
|
||||||
@ -714,6 +722,7 @@ channel_refeed_stopped(struct rt_export_request *req)
|
|||||||
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
||||||
|
|
||||||
req->hook = NULL;
|
req->hook = NULL;
|
||||||
|
|
||||||
channel_feed_end(c);
|
channel_feed_end(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -721,6 +730,7 @@ static void
|
|||||||
channel_init_feeding(struct channel *c)
|
channel_init_feeding(struct channel *c)
|
||||||
{
|
{
|
||||||
int no_trie = 0;
|
int no_trie = 0;
|
||||||
|
|
||||||
for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
|
for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
|
||||||
if (cfrp->type == CFRT_DIRECT)
|
if (cfrp->type == CFRT_DIRECT)
|
||||||
{
|
{
|
||||||
@ -757,6 +767,7 @@ channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
|||||||
SKIP_BACK(struct channel, refeed_req,
|
SKIP_BACK(struct channel, refeed_req,
|
||||||
SKIP_BACK(struct rt_export_request, prefilter, p)
|
SKIP_BACK(struct rt_export_request, prefilter, p)
|
||||||
);
|
);
|
||||||
|
|
||||||
ASSERT_DIE(c->refeeding);
|
ASSERT_DIE(c->refeeding);
|
||||||
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
||||||
if (!cfr->trie || trie_match_net(cfr->trie, n))
|
if (!cfr->trie || trie_match_net(cfr->trie, n))
|
||||||
@ -765,7 +776,7 @@ channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n)
|
channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n)
|
||||||
{
|
{
|
||||||
for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
|
for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
|
||||||
{
|
{
|
||||||
@ -783,12 +794,8 @@ channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
|||||||
SKIP_BACK(struct rt_export_request, prefilter, p)
|
SKIP_BACK(struct rt_export_request, prefilter, p)
|
||||||
);
|
);
|
||||||
ASSERT_DIE(c->importing);
|
ASSERT_DIE(c->importing);
|
||||||
for (struct channel_import_request *cir = c->importing; cir; cir = cir->next)
|
|
||||||
{
|
return channel_import_request_prefilter(c->importing, n);
|
||||||
if (!cir->trie || trie_match_net(cir->trie, n))
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -857,14 +864,16 @@ channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
|
|||||||
c->reload_pending = 1;
|
c->reload_pending = 1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
struct channel_import_request *last = cir;
|
|
||||||
while (last)
|
/* If there is any full-reload request, we can disregard all partials */
|
||||||
|
for (struct channel_import_request *last = cir; last && no_trie==0;)
|
||||||
{
|
{
|
||||||
if (!last->trie)
|
if (!last->trie)
|
||||||
no_trie = 1;
|
no_trie = 1;
|
||||||
last = last->next;
|
last = last->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* activating pending imports */
|
||||||
c->importing = c->import_pending;
|
c->importing = c->import_pending;
|
||||||
c->import_pending = NULL;
|
c->import_pending = NULL;
|
||||||
|
|
||||||
@ -1090,6 +1099,7 @@ void
|
|||||||
channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
|
channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
|
||||||
{
|
{
|
||||||
ASSERT_DIE(c->out_req.hook);
|
ASSERT_DIE(c->out_req.hook);
|
||||||
|
|
||||||
CD(c, "Feeding requested (%s)",
|
CD(c, "Feeding requested (%s)",
|
||||||
cfr->type == CFRT_DIRECT ? "direct" :
|
cfr->type == CFRT_DIRECT ? "direct" :
|
||||||
(cfr->trie ? "partial" : "auxiliary"));
|
(cfr->trie ? "partial" : "auxiliary"));
|
||||||
@ -1151,7 +1161,7 @@ channel_request_reload(struct channel *c)
|
|||||||
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
||||||
channel_schedule_reload(c, cir);
|
channel_schedule_reload(c, cir);
|
||||||
else if (! c->proto->reload_routes(c, cir))
|
else if (! c->proto->reload_routes(c, cir))
|
||||||
bug( "Partial reload was refused. Maybe you tried partial reload on BGP with unlocked table?");
|
bug("Channel %s.%s refused full import reload.", c->proto->name, c->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1165,7 +1175,7 @@ channel_request_partial_reload(struct channel *c, struct channel_import_request
|
|||||||
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
||||||
channel_schedule_reload(c, cir);
|
channel_schedule_reload(c, cir);
|
||||||
else if (! c->proto->reload_routes(c, cir))
|
else if (! c->proto->reload_routes(c, cir))
|
||||||
cli_msg(-15, "Partial reload was refused. Maybe you tried partial reload on BGP with unlocked table?");
|
cli_msg(-15, "%s.%s: partial reload refused, please run full reload instead", c->proto->name, c->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct channel_class channel_basic = {
|
const struct channel_class channel_basic = {
|
||||||
@ -2767,15 +2777,6 @@ struct channel_cmd_reload_import_request {
|
|||||||
struct proto_reload_request *prr;
|
struct proto_reload_request *prr;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
|
||||||
channel_reload_out_done_main(void *_prr)
|
|
||||||
{
|
|
||||||
struct proto_reload_request *prr = _prr;
|
|
||||||
ASSERT_THE_BIRD_LOCKED;
|
|
||||||
|
|
||||||
rfree(prr->trie->lp);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
channel_reload_out_done(struct channel_feeding_request *cfr)
|
channel_reload_out_done(struct channel_feeding_request *cfr)
|
||||||
{
|
{
|
||||||
@ -2796,8 +2797,6 @@ void
|
|||||||
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
|
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
|
||||||
{
|
{
|
||||||
struct proto_reload_request *prr = (void *) _prr;
|
struct proto_reload_request *prr = (void *) _prr;
|
||||||
if (prr->trie)
|
|
||||||
prr->ev.hook = channel_reload_out_done_main;
|
|
||||||
struct channel *c;
|
struct channel *c;
|
||||||
if (p->disabled)
|
if (p->disabled)
|
||||||
{
|
{
|
||||||
|
@ -686,7 +686,7 @@ int proto_configure_channel(struct proto *p, struct channel **c, struct channel_
|
|||||||
|
|
||||||
void channel_set_state(struct channel *c, uint state);
|
void channel_set_state(struct channel *c, uint state);
|
||||||
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
|
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
|
||||||
int import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n);
|
int channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n);
|
||||||
|
|
||||||
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
|
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
|
||||||
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
|
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
|
||||||
|
@ -1251,7 +1251,6 @@ rpe_next(struct rt_pending_export *rpe, struct rte_src *src)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct rt_pending_export * rt_next_export_fast(struct rt_pending_export *last);
|
static struct rt_pending_export * rt_next_export_fast(struct rt_pending_export *last);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
rte_export(struct rt_table_export_hook *th, struct rt_pending_export *rpe)
|
rte_export(struct rt_table_export_hook *th, struct rt_pending_export *rpe)
|
||||||
{
|
{
|
||||||
@ -2213,6 +2212,7 @@ rt_table_export_start_feed(struct rtable_private *tab, struct rt_table_export_ho
|
|||||||
{
|
{
|
||||||
struct rt_exporter *re = &tab->exporter.e;
|
struct rt_exporter *re = &tab->exporter.e;
|
||||||
struct rt_export_request *req = hook->h.req;
|
struct rt_export_request *req = hook->h.req;
|
||||||
|
|
||||||
/* stats zeroed by mb_allocz */
|
/* stats zeroed by mb_allocz */
|
||||||
switch (req->prefilter.mode)
|
switch (req->prefilter.mode)
|
||||||
{
|
{
|
||||||
|
@ -1568,7 +1568,12 @@ bgp_reload_routes(struct channel *C, struct channel_import_request *cir)
|
|||||||
|
|
||||||
/* Ignore non-BGP channels */
|
/* Ignore non-BGP channels */
|
||||||
if (C->class != &channel_bgp)
|
if (C->class != &channel_bgp)
|
||||||
|
{
|
||||||
|
if (cir)
|
||||||
|
cir->done(cir);
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (cir)
|
if (cir)
|
||||||
{
|
{
|
||||||
if (cir->trie)
|
if (cir->trie)
|
||||||
@ -1576,6 +1581,7 @@ bgp_reload_routes(struct channel *C, struct channel_import_request *cir)
|
|||||||
cir->done(cir);
|
cir->done(cir);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
/* We do not need cir anymore and later we will not be able to detect when to free it. */
|
||||||
cir->done(cir);
|
cir->done(cir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1611,6 +1611,7 @@ bgp_decode_nlri_ip4(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
|
|||||||
net_normalize_ip4(&net);
|
net_normalize_ip4(&net);
|
||||||
|
|
||||||
// XXXX validate prefix
|
// XXXX validate prefix
|
||||||
|
|
||||||
bgp_rte_update(s, (net_addr *) &net, path_id, a);
|
bgp_rte_update(s, (net_addr *) &net, path_id, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1695,6 +1696,7 @@ bgp_decode_nlri_ip6(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
|
|||||||
net_normalize_ip6(&net);
|
net_normalize_ip6(&net);
|
||||||
|
|
||||||
// XXXX validate prefix
|
// XXXX validate prefix
|
||||||
|
|
||||||
bgp_rte_update(s, (net_addr *) &net, path_id, a);
|
bgp_rte_update(s, (net_addr *) &net, path_id, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -441,10 +441,7 @@ ospf_reload_routes(struct channel *C, struct channel_import_request *cir)
|
|||||||
p->cir = cir;
|
p->cir = cir;
|
||||||
}
|
}
|
||||||
if (p->calcrt == 2)
|
if (p->calcrt == 2)
|
||||||
{
|
|
||||||
/*todo*/
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
|
||||||
|
|
||||||
OSPF_TRACE(D_EVENTS, "Scheduling routing table calculation with route reload");
|
OSPF_TRACE(D_EVENTS, "Scheduling routing table calculation with route reload");
|
||||||
p->calcrt = 2;
|
p->calcrt = 2;
|
||||||
@ -474,9 +471,7 @@ ospf_disp(timer * timer)
|
|||||||
|
|
||||||
/* Calculate routing table */
|
/* Calculate routing table */
|
||||||
if (p->calcrt)
|
if (p->calcrt)
|
||||||
{
|
|
||||||
ospf_rt_spf(p);
|
ospf_rt_spf(p);
|
||||||
}
|
|
||||||
|
|
||||||
/* Cleanup after graceful restart */
|
/* Cleanup after graceful restart */
|
||||||
if (p->gr_cleanup)
|
if (p->gr_cleanup)
|
||||||
|
@ -608,7 +608,6 @@ spfa_process_prefixes(struct ospf_proto *p, struct ospf_area *oa)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* RFC 2328 16.1. calculating shortest paths for an area */
|
/* RFC 2328 16.1. calculating shortest paths for an area */
|
||||||
static void
|
static void
|
||||||
ospf_rt_spfa(struct ospf_area *oa)
|
ospf_rt_spfa(struct ospf_area *oa)
|
||||||
@ -616,10 +615,12 @@ ospf_rt_spfa(struct ospf_area *oa)
|
|||||||
struct ospf_proto *p = oa->po;
|
struct ospf_proto *p = oa->po;
|
||||||
struct top_hash_entry *act;
|
struct top_hash_entry *act;
|
||||||
node *n;
|
node *n;
|
||||||
|
|
||||||
if (oa->rt == NULL)
|
if (oa->rt == NULL)
|
||||||
return;
|
return;
|
||||||
if (oa->rt->lsa.age == LSA_MAXAGE)
|
if (oa->rt->lsa.age == LSA_MAXAGE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
OSPF_TRACE(D_EVENTS, "Starting routing table calculation for area %R", oa->areaid);
|
OSPF_TRACE(D_EVENTS, "Starting routing table calculation for area %R", oa->areaid);
|
||||||
|
|
||||||
/* 16.1. (1) */
|
/* 16.1. (1) */
|
||||||
@ -642,6 +643,7 @@ ospf_rt_spfa(struct ospf_area *oa)
|
|||||||
|
|
||||||
DBG("Working on LSA: rt: %R, id: %R, type: %u\n",
|
DBG("Working on LSA: rt: %R, id: %R, type: %u\n",
|
||||||
act->lsa.rt, act->lsa.id, act->lsa_type);
|
act->lsa.rt, act->lsa.id, act->lsa_type);
|
||||||
|
|
||||||
act->color = INSPF;
|
act->color = INSPF;
|
||||||
switch (act->lsa_type)
|
switch (act->lsa_type)
|
||||||
{
|
{
|
||||||
@ -1698,7 +1700,7 @@ ospf_rt_spf(struct ospf_proto *p)
|
|||||||
rt_sync(p);
|
rt_sync(p);
|
||||||
lp_flush(p->nhpool);
|
lp_flush(p->nhpool);
|
||||||
|
|
||||||
if (p->cir == NULL)
|
if (p->cir == NULL) /* If there is no more cir waiting for reload */
|
||||||
p->calcrt = 0;
|
p->calcrt = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2027,6 +2029,8 @@ rt_sync(struct ospf_proto *p)
|
|||||||
again1:
|
again1:
|
||||||
FIB_ITERATE_START(fib, &fit, ort, nf)
|
FIB_ITERATE_START(fib, &fit, ort, nf)
|
||||||
{
|
{
|
||||||
|
if (cir && !channel_import_request_prefilter(cir, nf->fn.addr))
|
||||||
|
continue;
|
||||||
/* Sanity check of next-hop addresses, failure should not happen */
|
/* Sanity check of next-hop addresses, failure should not happen */
|
||||||
if (nf->n.type && nf->n.nhs)
|
if (nf->n.type && nf->n.nhs)
|
||||||
{
|
{
|
||||||
@ -2066,8 +2070,6 @@ again1:
|
|||||||
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nf->n.nhs->ad);
|
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nf->n.nhs->ad);
|
||||||
|
|
||||||
if (reload || ort_changed(nf, &eattrs.l))
|
if (reload || ort_changed(nf, &eattrs.l))
|
||||||
{
|
|
||||||
if (cir == NULL || import_prefilter_for_protocols(cir, nf->fn.addr))
|
|
||||||
{
|
{
|
||||||
nf->old_metric1 = nf->n.metric1;
|
nf->old_metric1 = nf->n.metric1;
|
||||||
nf->old_metric2 = nf->n.metric2;
|
nf->old_metric2 = nf->n.metric2;
|
||||||
@ -2106,7 +2108,6 @@ again1:
|
|||||||
rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
|
rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else if (nf->old_ea)
|
else if (nf->old_ea)
|
||||||
{
|
{
|
||||||
/* Remove the route */
|
/* Remove the route */
|
||||||
|
@ -992,9 +992,7 @@ rip_timer(timer *t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Propagating eventual change */
|
/* Propagating eventual change */
|
||||||
if (changed || p->rt_reload)
|
if ((changed || p->rt_reload) && (cir == NULL || channel_import_request_prefilter(cir, en->n.addr)))
|
||||||
{
|
|
||||||
if (cir == NULL || import_prefilter_for_protocols(cir, en->n.addr))
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We have to restart the iteration because there may be a cascade of
|
* We have to restart the iteration because there may be a cascade of
|
||||||
@ -1005,7 +1003,6 @@ rip_timer(timer *t)
|
|||||||
rip_announce_rte(p, en);
|
rip_announce_rte(p, en);
|
||||||
goto loop;
|
goto loop;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* Checking stale entries for garbage collection timeout */
|
/* Checking stale entries for garbage collection timeout */
|
||||||
if (en->valid == RIP_ENTRY_STALE)
|
if (en->valid == RIP_ENTRY_STALE)
|
||||||
|
@ -418,7 +418,9 @@ static int
|
|||||||
static_reload_routes(struct channel *C, struct channel_import_request *cir)
|
static_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||||
{
|
{
|
||||||
struct static_proto *p = (void *) C->proto;
|
struct static_proto *p = (void *) C->proto;
|
||||||
|
|
||||||
TRACE(D_EVENTS, "Scheduling route reload");
|
TRACE(D_EVENTS, "Scheduling route reload");
|
||||||
|
|
||||||
if (cir && cir->trie)
|
if (cir && cir->trie)
|
||||||
static_mark_partial(p, cir);
|
static_mark_partial(p, cir);
|
||||||
else
|
else
|
||||||
|
@ -1,46 +0,0 @@
|
|||||||
log "bird.log" all;
|
|
||||||
|
|
||||||
debug protocols all;
|
|
||||||
debug channels all;
|
|
||||||
debug tables all;
|
|
||||||
|
|
||||||
ipv4 table master1;
|
|
||||||
ipv4 table master2;
|
|
||||||
|
|
||||||
protocol device {
|
|
||||||
scan time 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
protocol static static1 {
|
|
||||||
ipv4 { table master1; };
|
|
||||||
route 10.0.0.0/16 unreachable;
|
|
||||||
route 12.0.0.0/16 unreachable;
|
|
||||||
route 127.0.0.0/8 unreachable;
|
|
||||||
route 192.0.0.0/8 unreachable;
|
|
||||||
route 192.168.0.0/16 unreachable;
|
|
||||||
route 195.113.26.206/32 unreachable;
|
|
||||||
route 1.1.1.1/32 unreachable;
|
|
||||||
}
|
|
||||||
|
|
||||||
ipv4 table ct_4;
|
|
||||||
protocol pipe {
|
|
||||||
table master1;
|
|
||||||
peer table master2;
|
|
||||||
import filter {
|
|
||||||
print net;
|
|
||||||
accept;
|
|
||||||
};
|
|
||||||
export filter {
|
|
||||||
print net;
|
|
||||||
accept;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
protocol rip rip4 {
|
|
||||||
ipv4 {
|
|
||||||
export all;
|
|
||||||
};
|
|
||||||
interface "ve0";
|
|
||||||
interface "ve1", "ve2" { metric 1; mode multicast; };
|
|
||||||
}
|
|
||||||
|
|
@ -767,10 +767,18 @@ krt_if_notify(struct proto *P, uint flags, struct iface *iface UNUSED)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
krt_reload_routes(struct channel *C, struct channel_import_request *cir UNUSED)
|
krt_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||||
{
|
{
|
||||||
struct krt_proto *p = (void *) C->proto;
|
struct krt_proto *p = (void *) C->proto;
|
||||||
|
|
||||||
|
if (cir && cir->trie)
|
||||||
|
{
|
||||||
|
cir->done(cir);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (cir)
|
||||||
|
cir->done(cir);
|
||||||
|
|
||||||
/* Although we keep learned routes in krt_table, we rather schedule a scan */
|
/* Although we keep learned routes in krt_table, we rather schedule a scan */
|
||||||
|
|
||||||
if (KRT_CF->learn)
|
if (KRT_CF->learn)
|
||||||
@ -1037,5 +1045,3 @@ krt_build(void)
|
|||||||
&ea_krt_metric,
|
&ea_krt_metric,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user