0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

fixup! fixup! fixup! fixup! partial import seems working

This commit is contained in:
Katerina Kubecova 2023-11-01 17:28:08 +01:00
parent 283ea03419
commit 723092115f
14 changed files with 107 additions and 133 deletions

View File

@ -1275,7 +1275,7 @@ This argument can be omitted if there exists only a single instance.
command was used to change filters.
If partial <prefix> option is used, only corresponding routes are reloaded.
Protocol BGP does partial reload only if it has locked table, otherwise partial reload for BGP is refused.
Protocol BGP does partial reload only if it has _import_ table enabled, otherwise partial reload for BGP is refused.
Re-export always succeeds, but re-import is protocol-dependent and might
fail (for example, if BGP neighbor does not support route-refresh

View File

@ -94,17 +94,16 @@ void fit_copy(struct fib *f, struct fib_iterator *dst, struct fib_iterator *src)
uint count_ = (fib)->hash_size; \
uint hpos_ = (it)->hash; \
type *z; \
for(;;) { \
if (!fn_) \
for(;;fn_ = fn_->next) { \
while (!fn_ && ++hpos_ < count_) \
{ \
if (++hpos_ >= count_) \
break; \
fn_ = (fib)->hash_table[hpos_]; \
continue; \
} \
if (hpos_ >= count_) \
break; \
z = fib_node_to_user(fib, fn_);
#define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
#define FIB_ITERATE_END } } while(0)
#define FIB_ITERATE_PUT(it) fit_put(it, fn_)

View File

@ -107,6 +107,15 @@ proto_postconfig(void)
this_proto = NULL;
}
static void
channel_reload_out_done_main(void *_prr)
{
struct proto_reload_request *prr = _prr;
ASSERT_THE_BIRD_LOCKED;
rfree(prr->trie->lp);
}
static inline void
proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
{
@ -114,19 +123,23 @@ proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
*prr = (struct proto_reload_request) {
.trie = trie,
.dir = dir,
.counter =1,
.counter = 1,
};
if (trie)
{
/* CLI linpool is moved to trie, because trie is need for longer time
than the linpool would exist in CLI. The linpool is freed in channel_reload_out_done_main */
ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
rmove(this_cli->parser_pool, &root_pool);
this_cli->parser_pool = lp_new(this_cli->pool);
prr->ev = (event) {
.data = prr,
.hook = channel_reload_out_done_main,
};
}
proto_apply_cmd(ps, proto_cmd_reload, 1, (uintptr_t) prr);
/* This function used reference to this trie, so it is freed here as well as in protocols*/
if (trie)
if (atomic_fetch_sub_explicit(&prr->counter, 1, memory_order_acq_rel) == 1)
ev_send_loop(&main_birdloop, &prr->ev);

View File

@ -60,8 +60,6 @@ static void channel_refeed_stopped(struct rt_export_request *req);
static void channel_check_stopped(struct channel *c);
static void channel_reload_in_done(struct channel_import_request *cir);
static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
void print_trie_node(const struct f_trie_node4 *t, int i);
static inline int proto_is_done(struct proto *p)
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
@ -90,6 +88,7 @@ channel_export_log_state_change(struct rt_export_request *req, u8 state)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
switch (state)
{
case TES_FEEDING:
@ -107,6 +106,7 @@ channel_refeed_log_state_change(struct rt_export_request *req, u8 state)
{
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
switch (state)
{
case TES_FEEDING:
@ -290,6 +290,7 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
add_tail(&p->channels, &c->n);
CD(c, "Connected to table %s", c->table->name);
return c;
}
@ -341,6 +342,7 @@ proto_remove_channels(struct proto *p)
proto_remove_channel(p, c);
}
struct roa_subscription {
node roa_node;
struct settle settle;
@ -406,6 +408,7 @@ static void
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
switch (net->type)
{
case NET_ROA4:
@ -417,6 +420,7 @@ channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struc
default:
bug("ROA table sent us a non-roa export");
}
settle_kick(&s->settle, s->c->proto->loop);
rpe_mark_seen_all(req->hook, first, NULL, NULL);
@ -428,6 +432,7 @@ channel_dump_roa_req(struct rt_export_request *req)
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
struct channel *c = s->c;
struct rtable_private *tab = SKIP_BACK(struct rtable_private, exporter.e, req->hook->table);
debug(" Channel %s.%s ROA %s change notifier from table %s request %p\n",
c->proto->name, c->name,
(s->settle.hook == channel_roa_in_changed) ? "import" : "export",
@ -439,6 +444,7 @@ channel_roa_is_subscribed(struct channel *c, rtable *tab, int dir)
{
void (*hook)(struct settle *) =
dir ? channel_roa_in_changed : channel_roa_out_changed;
struct roa_subscription *s;
node *n;
@ -457,6 +463,7 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
return;
struct roa_subscription *s = mb_allocz(c->proto->pool, sizeof(struct roa_subscription));
*s = (struct roa_subscription) {
.settle = SETTLE_INIT(&c->roa_settle, dir ? channel_roa_in_changed : channel_roa_out_changed, NULL),
.c = c,
@ -471,6 +478,7 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
.export_one = channel_export_one_roa,
},
};
add_tail(&c->roa_subscriptions, &s->roa_node);
rt_request_export(tab, &s->req);
}
@ -714,6 +722,7 @@ channel_refeed_stopped(struct rt_export_request *req)
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
req->hook = NULL;
channel_feed_end(c);
}
@ -721,6 +730,7 @@ static void
channel_init_feeding(struct channel *c)
{
int no_trie = 0;
for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
if (cfrp->type == CFRT_DIRECT)
{
@ -757,6 +767,7 @@ channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
SKIP_BACK(struct channel, refeed_req,
SKIP_BACK(struct rt_export_request, prefilter, p)
);
ASSERT_DIE(c->refeeding);
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
if (!cfr->trie || trie_match_net(cfr->trie, n))
@ -765,7 +776,7 @@ channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
}
int
import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n)
channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n)
{
for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
{
@ -783,12 +794,8 @@ channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
SKIP_BACK(struct rt_export_request, prefilter, p)
);
ASSERT_DIE(c->importing);
for (struct channel_import_request *cir = c->importing; cir; cir = cir->next)
{
if (!cir->trie || trie_match_net(cir->trie, n))
return 1;
}
return 0;
return channel_import_request_prefilter(c->importing, n);
}
static void
@ -857,14 +864,16 @@ channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
c->reload_pending = 1;
return;
}
struct channel_import_request *last = cir;
while (last)
/* If there is any full-reload request, we can disregard all partials */
for (struct channel_import_request *last = cir; last && no_trie==0;)
{
if (!last->trie)
no_trie = 1;
last = last->next;
last = last->next;
}
/* activating pending imports */
c->importing = c->import_pending;
c->import_pending = NULL;
@ -1090,6 +1099,7 @@ void
channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
{
ASSERT_DIE(c->out_req.hook);
CD(c, "Feeding requested (%s)",
cfr->type == CFRT_DIRECT ? "direct" :
(cfr->trie ? "partial" : "auxiliary"));
@ -1151,7 +1161,7 @@ channel_request_reload(struct channel *c)
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
channel_schedule_reload(c, cir);
else if (! c->proto->reload_routes(c, cir))
bug( "Partial reload was refused. Maybe you tried partial reload on BGP with unlocked table?");
bug("Channel %s.%s refused full import reload.", c->proto->name, c->name);
}
static void
@ -1165,7 +1175,7 @@ channel_request_partial_reload(struct channel *c, struct channel_import_request
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
channel_schedule_reload(c, cir);
else if (! c->proto->reload_routes(c, cir))
cli_msg(-15, "Partial reload was refused. Maybe you tried partial reload on BGP with unlocked table?");
cli_msg(-15, "%s.%s: partial reload refused, please run full reload instead", c->proto->name, c->name);
}
const struct channel_class channel_basic = {
@ -2767,15 +2777,6 @@ struct channel_cmd_reload_import_request {
struct proto_reload_request *prr;
};
static void
channel_reload_out_done_main(void *_prr)
{
struct proto_reload_request *prr = _prr;
ASSERT_THE_BIRD_LOCKED;
rfree(prr->trie->lp);
}
static void
channel_reload_out_done(struct channel_feeding_request *cfr)
{
@ -2796,8 +2797,6 @@ void
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
{
struct proto_reload_request *prr = (void *) _prr;
if (prr->trie)
prr->ev.hook = channel_reload_out_done_main;
struct channel *c;
if (p->disabled)
{

View File

@ -686,7 +686,7 @@ int proto_configure_channel(struct proto *p, struct channel **c, struct channel_
void channel_set_state(struct channel *c, uint state);
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
int import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n);
int channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n);
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }

View File

@ -1251,7 +1251,6 @@ rpe_next(struct rt_pending_export *rpe, struct rte_src *src)
}
static struct rt_pending_export * rt_next_export_fast(struct rt_pending_export *last);
static int
rte_export(struct rt_table_export_hook *th, struct rt_pending_export *rpe)
{
@ -2213,6 +2212,7 @@ rt_table_export_start_feed(struct rtable_private *tab, struct rt_table_export_ho
{
struct rt_exporter *re = &tab->exporter.e;
struct rt_export_request *req = hook->h.req;
/* stats zeroed by mb_allocz */
switch (req->prefilter.mode)
{

View File

@ -1568,7 +1568,12 @@ bgp_reload_routes(struct channel *C, struct channel_import_request *cir)
/* Ignore non-BGP channels */
if (C->class != &channel_bgp)
{
if (cir)
cir->done(cir);
return 1;
}
if (cir)
{
if (cir->trie)
@ -1576,6 +1581,7 @@ bgp_reload_routes(struct channel *C, struct channel_import_request *cir)
cir->done(cir);
return 0;
}
/* We do not need cir anymore and later we will not be able to detect when to free it. */
cir->done(cir);
}

View File

@ -1611,6 +1611,7 @@ bgp_decode_nlri_ip4(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
net_normalize_ip4(&net);
// XXXX validate prefix
bgp_rte_update(s, (net_addr *) &net, path_id, a);
}
}
@ -1695,6 +1696,7 @@ bgp_decode_nlri_ip6(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
net_normalize_ip6(&net);
// XXXX validate prefix
bgp_rte_update(s, (net_addr *) &net, path_id, a);
}
}

View File

@ -441,10 +441,7 @@ ospf_reload_routes(struct channel *C, struct channel_import_request *cir)
p->cir = cir;
}
if (p->calcrt == 2)
{
/*todo*/
return 1;
}
OSPF_TRACE(D_EVENTS, "Scheduling routing table calculation with route reload");
p->calcrt = 2;
@ -474,9 +471,7 @@ ospf_disp(timer * timer)
/* Calculate routing table */
if (p->calcrt)
{
ospf_rt_spf(p);
}
/* Cleanup after graceful restart */
if (p->gr_cleanup)

View File

@ -608,7 +608,6 @@ spfa_process_prefixes(struct ospf_proto *p, struct ospf_area *oa)
}
}
/* RFC 2328 16.1. calculating shortest paths for an area */
static void
ospf_rt_spfa(struct ospf_area *oa)
@ -616,10 +615,12 @@ ospf_rt_spfa(struct ospf_area *oa)
struct ospf_proto *p = oa->po;
struct top_hash_entry *act;
node *n;
if (oa->rt == NULL)
return;
if (oa->rt->lsa.age == LSA_MAXAGE)
return;
OSPF_TRACE(D_EVENTS, "Starting routing table calculation for area %R", oa->areaid);
/* 16.1. (1) */
@ -642,6 +643,7 @@ ospf_rt_spfa(struct ospf_area *oa)
DBG("Working on LSA: rt: %R, id: %R, type: %u\n",
act->lsa.rt, act->lsa.id, act->lsa_type);
act->color = INSPF;
switch (act->lsa_type)
{
@ -1698,7 +1700,7 @@ ospf_rt_spf(struct ospf_proto *p)
rt_sync(p);
lp_flush(p->nhpool);
if (p->cir == NULL)
if (p->cir == NULL) /* If there is no more cir waiting for reload */
p->calcrt = 0;
}
@ -2027,6 +2029,8 @@ rt_sync(struct ospf_proto *p)
again1:
FIB_ITERATE_START(fib, &fit, ort, nf)
{
if (cir && !channel_import_request_prefilter(cir, nf->fn.addr))
continue;
/* Sanity check of next-hop addresses, failure should not happen */
if (nf->n.type && nf->n.nhs)
{
@ -2067,44 +2071,41 @@ again1:
if (reload || ort_changed(nf, &eattrs.l))
{
if (cir == NULL || import_prefilter_for_protocols(cir, nf->fn.addr))
{
nf->old_metric1 = nf->n.metric1;
nf->old_metric2 = nf->n.metric2;
nf->old_tag = nf->n.tag;
nf->old_rid = nf->n.rid;
nf->old_metric1 = nf->n.metric1;
nf->old_metric2 = nf->n.metric2;
nf->old_tag = nf->n.tag;
nf->old_rid = nf->n.rid;
eattrs.a[eattrs.l.count++] =
EA_LITERAL_EMBEDDED(&ea_ospf_metric1, 0, nf->n.metric1);
if (nf->n.type == RTS_OSPF_EXT2)
eattrs.a[eattrs.l.count++] =
EA_LITERAL_EMBEDDED(&ea_ospf_metric1, 0, nf->n.metric1);
if (nf->n.type == RTS_OSPF_EXT2)
eattrs.a[eattrs.l.count++] =
EA_LITERAL_EMBEDDED(&ea_ospf_metric2, 0, nf->n.metric2);
if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
eattrs.a[eattrs.l.count++] =
EA_LITERAL_EMBEDDED(&ea_ospf_tag, 0, nf->n.tag);
EA_LITERAL_EMBEDDED(&ea_ospf_metric2, 0, nf->n.metric2);
if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
eattrs.a[eattrs.l.count++] =
EA_LITERAL_EMBEDDED(&ea_ospf_router_id, 0, nf->n.rid);
EA_LITERAL_EMBEDDED(&ea_ospf_tag, 0, nf->n.tag);
ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
eattrs.a[eattrs.l.count++] =
EA_LITERAL_EMBEDDED(&ea_ospf_router_id, 0, nf->n.rid);
ea_list *eal = ea_lookup(&eattrs.l, 0);
ea_free(nf->old_ea);
nf->old_ea = eal;
ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
rte e0 = {
.attrs = eal,
.src = p->p.main_source,
};
ea_list *eal = ea_lookup(&eattrs.l, 0);
ea_free(nf->old_ea);
nf->old_ea = eal;
/*
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
*/
rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
}
rte e0 = {
.attrs = eal,
.src = p->p.main_source,
};
/*
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
*/
rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
}
}
else if (nf->old_ea)

View File

@ -992,19 +992,16 @@ rip_timer(timer *t)
}
/* Propagating eventual change */
if (changed || p->rt_reload)
if ((changed || p->rt_reload) && (cir == NULL || channel_import_request_prefilter(cir, en->n.addr)))
{
if (cir == NULL || import_prefilter_for_protocols(cir, en->n.addr))
{
/*
* We have to restart the iteration because there may be a cascade of
* synchronous events rip_announce_rte() -> nest table change ->
* rip_rt_notify() -> p->rtable change, invalidating hidden variables.
*/
FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
rip_announce_rte(p, en);
goto loop;
}
/*
* We have to restart the iteration because there may be a cascade of
* synchronous events rip_announce_rte() -> nest table change ->
* rip_rt_notify() -> p->rtable change, invalidating hidden variables.
*/
FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
rip_announce_rte(p, en);
goto loop;
}
/* Checking stale entries for garbage collection timeout */

View File

@ -418,7 +418,9 @@ static int
static_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct static_proto *p = (void *) C->proto;
TRACE(D_EVENTS, "Scheduling route reload");
if (cir && cir->trie)
static_mark_partial(p, cir);
else

View File

@ -1,46 +0,0 @@
log "bird.log" all;
debug protocols all;
debug channels all;
debug tables all;
ipv4 table master1;
ipv4 table master2;
protocol device {
scan time 10;
}
protocol static static1 {
ipv4 { table master1; };
route 10.0.0.0/16 unreachable;
route 12.0.0.0/16 unreachable;
route 127.0.0.0/8 unreachable;
route 192.0.0.0/8 unreachable;
route 192.168.0.0/16 unreachable;
route 195.113.26.206/32 unreachable;
route 1.1.1.1/32 unreachable;
}
ipv4 table ct_4;
protocol pipe {
table master1;
peer table master2;
import filter {
print net;
accept;
};
export filter {
print net;
accept;
};
}
protocol rip rip4 {
ipv4 {
export all;
};
interface "ve0";
interface "ve1", "ve2" { metric 1; mode multicast; };
}

View File

@ -767,10 +767,18 @@ krt_if_notify(struct proto *P, uint flags, struct iface *iface UNUSED)
}
static int
krt_reload_routes(struct channel *C, struct channel_import_request *cir UNUSED)
krt_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct krt_proto *p = (void *) C->proto;
if (cir && cir->trie)
{
cir->done(cir);
return 0;
}
if (cir)
cir->done(cir);
/* Although we keep learned routes in krt_table, we rather schedule a scan */
if (KRT_CF->learn)
@ -1037,5 +1045,3 @@ krt_build(void)
&ea_krt_metric,
);
}