mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-22 17:51:53 +00:00
Nest: Allow batch route import
This commit is contained in:
parent
1675ecbb71
commit
95ea723bb7
72
nest/route.h
72
nest/route.h
@ -325,6 +325,78 @@ void rte_update(struct channel *c, net_addr *net, struct rte *rte);
|
|||||||
*/
|
*/
|
||||||
void rte_withdraw(struct channel *c, net_addr *net, struct rte_src *src);
|
void rte_withdraw(struct channel *c, net_addr *net, struct rte_src *src);
|
||||||
|
|
||||||
|
/* Single route update order */
|
||||||
|
struct rte_update {
|
||||||
|
struct rte_update *next; /* Internal single-linked list */
|
||||||
|
struct rte_src *src; /* Key: rte_src */
|
||||||
|
rte* rte; /* Value: the route itself */
|
||||||
|
enum rte_update_flags {
|
||||||
|
RUF_IGNORE = 1, /* Ignore this update */
|
||||||
|
} flags;
|
||||||
|
net_addr n[0]; /* Key: net */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rte_update_batch {
|
||||||
|
struct linpool *lp; /* Linpool to allocate the batch from */
|
||||||
|
struct rte_update *first, **last; /* Single route update order list */
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rte_update_init - prepare a route update batch
|
||||||
|
*
|
||||||
|
* If you want to import / withdraw more routes than one, you should pack them
|
||||||
|
* into one batch and then execute them all at once. This function prepares
|
||||||
|
* the batch.
|
||||||
|
*/
|
||||||
|
struct rte_update_batch * rte_update_init(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rte_update_get - prepare a route import
|
||||||
|
*
|
||||||
|
* @batch: the batch to put this import in
|
||||||
|
* @n: network address
|
||||||
|
* @src: the route source identifier (NULL for default)
|
||||||
|
*
|
||||||
|
* This function returns a structure for route import.
|
||||||
|
* You shall fill in only the @rte member of the returned structure
|
||||||
|
* (the pointer is already set) and set the flags.
|
||||||
|
* The route attributes must exist until rte_update_commit()
|
||||||
|
* is called which you can do either by calling rta_lookup()
|
||||||
|
* or by allocating from the batch->lp linpool.
|
||||||
|
*/
|
||||||
|
struct rte_update * rte_update_get(struct rte_update_batch *batch, net_addr *n, struct rte_src *src);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rte_withdraw_get - prepare a route withdraw
|
||||||
|
*
|
||||||
|
* @batch: the batch to put this import in
|
||||||
|
* @n: network address
|
||||||
|
* @src: the route source identifier (NULL for default)
|
||||||
|
*
|
||||||
|
* This function registers a withdraw. You may only set flags in the returned structure.
|
||||||
|
*/
|
||||||
|
struct rte_update * rte_withdraw_get(struct rte_update_batch *batch, net_addr *n, struct rte_src *src);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rte_update_commit - do all the prepared updates
|
||||||
|
*
|
||||||
|
* @batch: batch to commit
|
||||||
|
* @c: channel to send the updates to
|
||||||
|
*
|
||||||
|
* This function does all the prepared updates.
|
||||||
|
*/
|
||||||
|
void rte_update_commit(struct rte_update_batch *batch, struct channel *c);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rte_update_cancel - cancel the prepared updates
|
||||||
|
*
|
||||||
|
* @batch: batch to cancel
|
||||||
|
*
|
||||||
|
* In case of error, you may want to send no update.
|
||||||
|
* This frees all the memory allocated to the batch together with the batch itself.
|
||||||
|
*/
|
||||||
|
void rte_update_cancel(struct rte_update_batch *batch);
|
||||||
|
|
||||||
extern list routing_tables;
|
extern list routing_tables;
|
||||||
struct config;
|
struct config;
|
||||||
|
|
||||||
|
101
nest/rt-table.c
101
nest/rt-table.c
@ -1373,6 +1373,17 @@ rte_unhide_dummy_routes(net *net, rte **dummy)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct rte_src *
|
||||||
|
rte_fix_src(struct channel *c, rte *new)
|
||||||
|
{
|
||||||
|
if (new->attrs->src)
|
||||||
|
return new->attrs->src;
|
||||||
|
else if (rta_is_cached(new->attrs))
|
||||||
|
bug("src is NULL in cached RTA");
|
||||||
|
else
|
||||||
|
return new->attrs->src = c->proto->main_source;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
|
rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
|
||||||
{
|
{
|
||||||
@ -1398,6 +1409,8 @@ rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
|
|||||||
if (!new->pref)
|
if (!new->pref)
|
||||||
new->pref = c->preference;
|
new->pref = c->preference;
|
||||||
|
|
||||||
|
src = rte_fix_src(c, new);
|
||||||
|
|
||||||
stats->imp_updates_received++;
|
stats->imp_updates_received++;
|
||||||
if (!rte_validate(new))
|
if (!rte_validate(new))
|
||||||
{
|
{
|
||||||
@ -1449,9 +1462,12 @@ rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
if (!src)
|
||||||
|
src = c->proto->main_source;
|
||||||
|
|
||||||
stats->imp_withdraws_received++;
|
stats->imp_withdraws_received++;
|
||||||
|
|
||||||
if (!(nn = net_find(c->table, n)) || !src)
|
if (!(nn = net_find(c->table, n)))
|
||||||
{
|
{
|
||||||
stats->imp_withdraws_ignored++;
|
stats->imp_withdraws_ignored++;
|
||||||
rte_update_unlock();
|
rte_update_unlock();
|
||||||
@ -1477,18 +1493,89 @@ rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
|
|||||||
rte_update_unlock();
|
rte_update_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct rte_update_batch *
|
||||||
|
rte_update_init(void)
|
||||||
|
{
|
||||||
|
rte_update_lock();
|
||||||
|
struct rte_update_batch *batch = lp_alloc(rte_update_pool, sizeof(struct rte_update_batch));
|
||||||
|
batch->lp = rte_update_pool;
|
||||||
|
batch->first = NULL;
|
||||||
|
batch->last = &(batch->first);
|
||||||
|
return batch;
|
||||||
|
}
|
||||||
|
|
||||||
static int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
|
static int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
|
||||||
|
|
||||||
|
void
|
||||||
|
rte_update_commit(struct rte_update_batch *batch, struct channel *c)
|
||||||
|
{
|
||||||
|
ASSERT(batch->lp);
|
||||||
|
|
||||||
|
_Bool have_in_table = c->in_table;
|
||||||
|
|
||||||
|
for (struct rte_update *ru = batch->first; ru; ru = ru->next)
|
||||||
|
if ((ru->flags & RUF_IGNORE) ||
|
||||||
|
have_in_table && !rte_update_in(c, ru->n, ru->rte, ru->src))
|
||||||
|
rte_free_quick(ru->rte);
|
||||||
|
else
|
||||||
|
rte_update2(c, ru->n, ru->rte, ru->src);
|
||||||
|
|
||||||
|
batch->lp = NULL;
|
||||||
|
rte_update_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rte_update_cancel(struct rte_update_batch *batch)
|
||||||
|
{
|
||||||
|
ASSERT(batch->lp);
|
||||||
|
|
||||||
|
for (struct rte_update *ru = batch->first; ru; ru = ru->next)
|
||||||
|
if (ru->rte)
|
||||||
|
rte_free_quick(ru->rte);
|
||||||
|
|
||||||
|
batch->lp = NULL;
|
||||||
|
rte_update_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
struct rte_update *
|
||||||
|
rte_withdraw_get(struct rte_update_batch *batch, net_addr *n, struct rte_src *src)
|
||||||
|
{
|
||||||
|
struct rte_update *ru = lp_alloc(batch->lp, sizeof(struct rte_update) + n->length);
|
||||||
|
|
||||||
|
*(batch->last) = ru;
|
||||||
|
batch->last = &(ru->next);
|
||||||
|
ru->next = NULL;
|
||||||
|
|
||||||
|
net_copy(ru->n, n);
|
||||||
|
ru->src = src;
|
||||||
|
ru->rte = NULL;
|
||||||
|
ru->flags = 0;
|
||||||
|
return ru;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
rte_withdraw(struct channel *c, net_addr *n, struct rte_src *src)
|
rte_withdraw(struct channel *c, net_addr *n, struct rte_src *src)
|
||||||
{
|
{
|
||||||
if (!src)
|
if (c->in_table && !rte_update_in(c, n, NULL, src ?: c->proto->main_source))
|
||||||
src = c->proto->main_source;
|
|
||||||
|
|
||||||
if (!c->in_table || rte_update_in(c, n, NULL, src))
|
|
||||||
rte_update2(c, n, NULL, src ?: c->proto->main_source);
|
rte_update2(c, n, NULL, src ?: c->proto->main_source);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct rte_update *
|
||||||
|
rte_update_get(struct rte_update_batch *batch, net_addr *n, struct rte_src *src)
|
||||||
|
{
|
||||||
|
struct rte_update *ru = rte_withdraw_get(batch, n, src);
|
||||||
|
|
||||||
|
rte *e = sl_alloc(rte_slab);
|
||||||
|
|
||||||
|
e->id = 0;
|
||||||
|
e->flags = 0;
|
||||||
|
e->pref = 0;
|
||||||
|
|
||||||
|
ru->rte = e;
|
||||||
|
|
||||||
|
return ru;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
rte_update(struct channel *c, net_addr *n, struct rte *new)
|
rte_update(struct channel *c, net_addr *n, struct rte *new)
|
||||||
{
|
{
|
||||||
@ -1504,7 +1591,9 @@ rte_update(struct channel *c, net_addr *n, struct rte *new)
|
|||||||
e->attrs->src = c->proto->main_source;
|
e->attrs->src = c->proto->main_source;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!c->in_table || rte_update_in(c, n, e, e->attrs->src))
|
if (c->in_table && !rte_update_in(c, n, e, e->attrs->src))
|
||||||
|
rte_free_quick(e);
|
||||||
|
else
|
||||||
rte_update2(c, n, e, e->attrs->src);
|
rte_update2(c, n, e, e->attrs->src);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,6 +453,7 @@ struct bgp_parse_state {
|
|||||||
u32 last_id;
|
u32 last_id;
|
||||||
struct rte_src *last_src;
|
struct rte_src *last_src;
|
||||||
rta *cached_rta;
|
rta *cached_rta;
|
||||||
|
struct rte_update_batch *update_batch;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define BGP_PORT 179
|
#define BGP_PORT 179
|
||||||
@ -483,6 +484,9 @@ static inline uint bgp_max_packet_length(struct bgp_conn *conn)
|
|||||||
static inline void
|
static inline void
|
||||||
bgp_parse_error(struct bgp_parse_state *s, uint subcode)
|
bgp_parse_error(struct bgp_parse_state *s, uint subcode)
|
||||||
{
|
{
|
||||||
|
if (s->update_batch)
|
||||||
|
rte_update_cancel(s->update_batch);
|
||||||
|
|
||||||
s->err_subcode = subcode;
|
s->err_subcode = subcode;
|
||||||
longjmp(s->err_jmpbuf, 1);
|
longjmp(s->err_jmpbuf, 1);
|
||||||
}
|
}
|
||||||
|
@ -1304,7 +1304,7 @@ bgp_rte_update(struct bgp_parse_state *s, net_addr *n, u32 path_id, rta *a0)
|
|||||||
if (!a0)
|
if (!a0)
|
||||||
{
|
{
|
||||||
/* Route withdraw */
|
/* Route withdraw */
|
||||||
rte_withdraw(&(s->channel->c), n, s->last_src);
|
rte_withdraw_get(s->update_batch, n, s->last_src);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1319,12 +1319,12 @@ bgp_rte_update(struct bgp_parse_state *s, net_addr *n, u32 path_id, rta *a0)
|
|||||||
a0->eattrs = ea;
|
a0->eattrs = ea;
|
||||||
}
|
}
|
||||||
|
|
||||||
rte e0 = {
|
struct rte_update *ru = rte_update_get(s->update_batch, n, s->last_src);
|
||||||
.attrs = rta_clone(s->cached_rta),
|
ru->rte->pflags = 0;
|
||||||
.u.bgp.stale = -1,
|
ru->rte->attrs = rta_clone(s->cached_rta);
|
||||||
};
|
ru->rte->u.bgp.suppressed = 0;
|
||||||
|
ru->rte->u.bgp.stale = -1;
|
||||||
rte_update(&(s->channel->c), n, &e0);
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -2388,7 +2388,7 @@ bgp_decode_nlri(struct bgp_parse_state *s, u32 afi, byte *nlri, uint len, ea_lis
|
|||||||
|
|
||||||
if (ea)
|
if (ea)
|
||||||
{
|
{
|
||||||
a = allocz(RTA_MAX_SIZE);
|
a = lp_allocz(s->update_batch->lp, RTA_MAX_SIZE);
|
||||||
|
|
||||||
a->source = RTS_BGP;
|
a->source = RTS_BGP;
|
||||||
a->scope = SCOPE_UNIVERSE;
|
a->scope = SCOPE_UNIVERSE;
|
||||||
@ -2491,6 +2491,8 @@ bgp_rx_update(struct bgp_conn *conn, byte *pkt, uint len)
|
|||||||
!s.mp_reach_len && !s.mp_unreach_len && s.mp_unreach_af)
|
!s.mp_reach_len && !s.mp_unreach_len && s.mp_unreach_af)
|
||||||
{ bgp_rx_end_mark(&s, s.mp_unreach_af); goto done; }
|
{ bgp_rx_end_mark(&s, s.mp_unreach_af); goto done; }
|
||||||
|
|
||||||
|
s.update_batch = rte_update_init();
|
||||||
|
|
||||||
if (s.ip_unreach_len)
|
if (s.ip_unreach_len)
|
||||||
bgp_decode_nlri(&s, BGP_AF_IPV4, s.ip_unreach_nlri, s.ip_unreach_len, NULL, NULL, 0);
|
bgp_decode_nlri(&s, BGP_AF_IPV4, s.ip_unreach_nlri, s.ip_unreach_len, NULL, NULL, 0);
|
||||||
|
|
||||||
@ -2505,6 +2507,9 @@ bgp_rx_update(struct bgp_conn *conn, byte *pkt, uint len)
|
|||||||
bgp_decode_nlri(&s, s.mp_reach_af, s.mp_reach_nlri, s.mp_reach_len,
|
bgp_decode_nlri(&s, s.mp_reach_af, s.mp_reach_nlri, s.mp_reach_len,
|
||||||
ea, s.mp_next_hop_data, s.mp_next_hop_len);
|
ea, s.mp_next_hop_data, s.mp_next_hop_len);
|
||||||
|
|
||||||
|
rte_update_commit(s.update_batch, &s.channel->c);
|
||||||
|
s.update_batch = NULL;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
rta_free(s.cached_rta);
|
rta_free(s.cached_rta);
|
||||||
lp_flush(s.pool);
|
lp_flush(s.pool);
|
||||||
|
@ -2015,6 +2015,8 @@ rt_sync(struct ospf_proto *p)
|
|||||||
|
|
||||||
OSPF_TRACE(D_EVENTS, "Starting routing table synchronization");
|
OSPF_TRACE(D_EVENTS, "Starting routing table synchronization");
|
||||||
|
|
||||||
|
struct rte_update_batch *rub = rte_update_init();
|
||||||
|
|
||||||
DBG("Now syncing my rt table with nest's\n");
|
DBG("Now syncing my rt table with nest's\n");
|
||||||
FIB_ITERATE_INIT(&fit, fib);
|
FIB_ITERATE_INIT(&fit, fib);
|
||||||
again1:
|
again1:
|
||||||
@ -2052,29 +2054,27 @@ again1:
|
|||||||
|
|
||||||
if (reload || ort_changed(nf, &a0))
|
if (reload || ort_changed(nf, &a0))
|
||||||
{
|
{
|
||||||
rte e0 = {
|
struct rte_update *ru = rte_update_get(rub, nf->fn.addr, NULL);
|
||||||
.attrs = rta_lookup(&a0),
|
ru->rte->attrs = rta_lookup(&a0);
|
||||||
.u.ospf.metric1 = nf->old_metric1 = nf->n.metric1,
|
|
||||||
.u.ospf.metric2 = nf->old_metric2 = nf->n.metric2,
|
|
||||||
.u.ospf.tag = nf->old_tag = nf->n.tag,
|
|
||||||
.u.ospf.router_id = nf->old_rid = nf->n.rid,
|
|
||||||
.pflags = EA_ID_FLAG(EA_OSPF_METRIC1) | EA_ID_FLAG(EA_OSPF_ROUTER_ID),
|
|
||||||
};
|
|
||||||
|
|
||||||
rta_free(nf->old_rta);
|
rta_free(nf->old_rta);
|
||||||
nf->old_rta = rta_clone(e0.attrs);
|
nf->old_rta = rta_clone(ru->rte->attrs);
|
||||||
|
|
||||||
|
ru->rte->u.ospf.metric1 = nf->old_metric1 = nf->n.metric1;
|
||||||
|
ru->rte->u.ospf.metric2 = nf->old_metric2 = nf->n.metric2;
|
||||||
|
ru->rte->u.ospf.tag = nf->old_tag = nf->n.tag;
|
||||||
|
ru->rte->u.ospf.router_id = nf->old_rid = nf->n.rid;
|
||||||
|
ru->rte->pflags = EA_ID_FLAG(EA_OSPF_METRIC1) | EA_ID_FLAG(EA_OSPF_ROUTER_ID);
|
||||||
|
|
||||||
if (nf->n.type == RTS_OSPF_EXT2)
|
if (nf->n.type == RTS_OSPF_EXT2)
|
||||||
e0.pflags |= EA_ID_FLAG(EA_OSPF_METRIC2);
|
ru->rte->pflags |= EA_ID_FLAG(EA_OSPF_METRIC2);
|
||||||
|
|
||||||
/* Perhaps onfly if tag is non-zero? */
|
/* Perhaps onfly if tag is non-zero? */
|
||||||
if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
|
if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
|
||||||
e0.pflags |= EA_ID_FLAG(EA_OSPF_TAG);
|
ru->rte->pflags |= EA_ID_FLAG(EA_OSPF_TAG);
|
||||||
|
|
||||||
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
|
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
|
||||||
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
|
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
|
||||||
|
|
||||||
rte_update(p->p.main_channel, nf->fn.addr, &e0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (nf->old_rta)
|
else if (nf->old_rta)
|
||||||
@ -2083,7 +2083,7 @@ again1:
|
|||||||
rta_free(nf->old_rta);
|
rta_free(nf->old_rta);
|
||||||
nf->old_rta = NULL;
|
nf->old_rta = NULL;
|
||||||
|
|
||||||
rte_withdraw(p->p.main_channel, nf->fn.addr, NULL);
|
rte_withdraw_get(rub, nf->fn.addr, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Remove unused rt entry, some special entries are persistent */
|
/* Remove unused rt entry, some special entries are persistent */
|
||||||
@ -2099,6 +2099,8 @@ again1:
|
|||||||
}
|
}
|
||||||
FIB_ITERATE_END;
|
FIB_ITERATE_END;
|
||||||
|
|
||||||
|
rte_update_commit(rub, p->p.main_channel);
|
||||||
|
|
||||||
WALK_LIST(oa, p->area_list)
|
WALK_LIST(oa, p->area_list)
|
||||||
{
|
{
|
||||||
/* Cleanup ASBR hash tables */
|
/* Cleanup ASBR hash tables */
|
||||||
|
@ -160,17 +160,25 @@ perf_loop(void *data)
|
|||||||
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &ts_generated);
|
clock_gettime(CLOCK_MONOTONIC, &ts_generated);
|
||||||
|
|
||||||
for (uint i=0; i<N; i++)
|
struct rte_update_batch *rub = rte_update_init();
|
||||||
{
|
|
||||||
rte e0 = { .attrs = p->data[i].a, };
|
for (uint i=0; i<N; i++) {
|
||||||
rte_update(P->main_channel, &(p->data[i].net), &e0);
|
struct rte_update *ru = rte_update_get(rub, &(p->data[i].net), NULL);
|
||||||
|
ru->rte->attrs = p->data[i].a;
|
||||||
|
ru->rte->pflags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rte_update_commit(rub, P->main_channel);
|
||||||
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &ts_update);
|
clock_gettime(CLOCK_MONOTONIC, &ts_update);
|
||||||
|
|
||||||
|
rub = rte_update_init();
|
||||||
|
|
||||||
if (!p->keep)
|
if (!p->keep)
|
||||||
for (uint i=0; i<N; i++)
|
for (uint i=0; i<N; i++)
|
||||||
rte_withdraw(P->main_channel, &(p->data[i].net), NULL);
|
rte_withdraw_get(rub, &(p->data[i].net), NULL);
|
||||||
|
|
||||||
|
rte_update_commit(rub, P->main_channel);
|
||||||
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &ts_withdraw);
|
clock_gettime(CLOCK_MONOTONIC, &ts_withdraw);
|
||||||
|
|
||||||
|
@ -50,10 +50,9 @@
|
|||||||
static linpool *static_lp;
|
static linpool *static_lp;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
static_announce_rte(struct static_proto *p, struct static_route *r)
|
static_announce_rte(struct static_proto *p, struct rte_update_batch *rub, struct static_route *r)
|
||||||
{
|
{
|
||||||
rta *a = allocz(RTA_MAX_SIZE);
|
rta *a = lp_allocz(rub->lp, RTA_MAX_SIZE);
|
||||||
a->src = p->p.main_source;
|
|
||||||
a->source = RTS_STATIC;
|
a->source = RTS_STATIC;
|
||||||
a->scope = SCOPE_UNIVERSE;
|
a->scope = SCOPE_UNIVERSE;
|
||||||
a->dest = r->dest;
|
a->dest = r->dest;
|
||||||
@ -68,7 +67,7 @@ static_announce_rte(struct static_proto *p, struct static_route *r)
|
|||||||
if (!r2->active)
|
if (!r2->active)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
struct nexthop *nh = allocz(NEXTHOP_MAX_SIZE);
|
struct nexthop *nh = lp_allocz(rub->lp, NEXTHOP_MAX_SIZE);
|
||||||
nh->gw = r2->via;
|
nh->gw = r2->via;
|
||||||
nh->iface = r2->neigh->iface;
|
nh->iface = r2->neigh->iface;
|
||||||
nh->flags = r2->onlink ? RNF_ONLINK : 0;
|
nh->flags = r2->onlink ? RNF_ONLINK : 0;
|
||||||
@ -99,24 +98,22 @@ static_announce_rte(struct static_proto *p, struct static_route *r)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* We skip rta_lookup() here */
|
/* We skip rta_lookup() here */
|
||||||
rte e0 = { .attrs = a }, *e = &e0;
|
struct rte_update *ru = rte_update_get(rub, r->net, NULL);
|
||||||
|
ru->rte->pflags = 0;
|
||||||
|
ru->rte->attrs = a;
|
||||||
|
|
||||||
if (r->cmds)
|
if (r->cmds)
|
||||||
f_eval_rte(r->cmds, &e, static_lp);
|
f_eval_rte(r->cmds, &(ru->rte), rub->lp);
|
||||||
|
|
||||||
rte_update(p->p.main_channel, r->net, e);
|
|
||||||
r->state = SRS_CLEAN;
|
r->state = SRS_CLEAN;
|
||||||
|
|
||||||
if (r->cmds)
|
|
||||||
lp_flush(static_lp);
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
withdraw:
|
withdraw:
|
||||||
if (r->state == SRS_DOWN)
|
if (r->state == SRS_DOWN)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
rte_withdraw(p->p.main_channel, r->net, NULL);
|
rte_withdraw_get(rub, r->net, NULL);
|
||||||
r->state = SRS_DOWN;
|
r->state = SRS_DOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,10 +135,12 @@ static_announce_marked(void *P)
|
|||||||
{
|
{
|
||||||
struct static_proto *p = P;
|
struct static_proto *p = P;
|
||||||
|
|
||||||
|
struct rte_update_batch *rub = rte_update_init();
|
||||||
BUFFER_WALK(p->marked, r)
|
BUFFER_WALK(p->marked, r)
|
||||||
static_announce_rte(P, r);
|
static_announce_rte(p, rub, r);
|
||||||
|
|
||||||
BUFFER_FLUSH(p->marked);
|
BUFFER_FLUSH(p->marked);
|
||||||
|
rte_update_commit(rub, p->p.main_channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -196,7 +195,7 @@ fail:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
static_add_rte(struct static_proto *p, struct static_route *r)
|
static_add_rte(struct static_proto *p, struct rte_update_batch *rub, struct static_route *r)
|
||||||
{
|
{
|
||||||
if (r->dest == RTD_UNICAST)
|
if (r->dest == RTD_UNICAST)
|
||||||
{
|
{
|
||||||
@ -224,7 +223,7 @@ static_add_rte(struct static_proto *p, struct static_route *r)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static_announce_rte(p, r);
|
static_announce_rte(p, rub, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -246,10 +245,10 @@ static_reset_rte(struct static_proto *p UNUSED, struct static_route *r)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
static_remove_rte(struct static_proto *p, struct static_route *r)
|
static_remove_rte(struct static_proto *p, struct rte_update_batch *rub, struct static_route *r)
|
||||||
{
|
{
|
||||||
if (r->state)
|
if (r->state)
|
||||||
rte_withdraw(p->p.main_channel, r->net, NULL);
|
rte_withdraw_get(rub, r->net, NULL);
|
||||||
|
|
||||||
static_reset_rte(p, r);
|
static_reset_rte(p, r);
|
||||||
}
|
}
|
||||||
@ -312,14 +311,14 @@ static_same_rte(struct static_route *or, struct static_route *nr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
static_reconfigure_rte(struct static_proto *p, struct static_route *or, struct static_route *nr)
|
static_reconfigure_rte(struct static_proto *p, struct rte_update_batch *rub, struct static_route *or, struct static_route *nr)
|
||||||
{
|
{
|
||||||
if ((or->state == SRS_CLEAN) && !static_same_rte(or, nr))
|
if ((or->state == SRS_CLEAN) && !static_same_rte(or, nr))
|
||||||
nr->state = SRS_DIRTY;
|
nr->state = SRS_DIRTY;
|
||||||
else
|
else
|
||||||
nr->state = or->state;
|
nr->state = or->state;
|
||||||
|
|
||||||
static_add_rte(p, nr);
|
static_add_rte(p, rub, nr);
|
||||||
static_reset_rte(p, or);
|
static_reset_rte(p, or);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -427,8 +426,12 @@ static_start(struct proto *P)
|
|||||||
/* We have to go UP before routes could be installed */
|
/* We have to go UP before routes could be installed */
|
||||||
proto_notify_state(P, PS_UP);
|
proto_notify_state(P, PS_UP);
|
||||||
|
|
||||||
|
struct rte_update_batch *rub = rte_update_init();
|
||||||
|
|
||||||
WALK_LIST(r, cf->routes)
|
WALK_LIST(r, cf->routes)
|
||||||
static_add_rte(p, r);
|
static_add_rte(p, rub, r);
|
||||||
|
|
||||||
|
rte_update_commit(rub, p->p.main_channel);
|
||||||
|
|
||||||
return PS_UP;
|
return PS_UP;
|
||||||
}
|
}
|
||||||
@ -508,6 +511,8 @@ static_reconfigure(struct proto *P, struct proto_config *CF)
|
|||||||
if (!proto_configure_channel(P, &P->main_channel, proto_cf_main_channel(CF)))
|
if (!proto_configure_channel(P, &P->main_channel, proto_cf_main_channel(CF)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
struct rte_update_batch *rub = rte_update_init();
|
||||||
|
|
||||||
p->p.cf = CF;
|
p->p.cf = CF;
|
||||||
|
|
||||||
/* Reset route lists in neighbor entries */
|
/* Reset route lists in neighbor entries */
|
||||||
@ -520,10 +525,13 @@ static_reconfigure(struct proto *P, struct proto_config *CF)
|
|||||||
for (or = HEAD(o->routes), nr = HEAD(n->routes);
|
for (or = HEAD(o->routes), nr = HEAD(n->routes);
|
||||||
NODE_VALID(or) && NODE_VALID(nr) && net_equal(or->net, nr->net);
|
NODE_VALID(or) && NODE_VALID(nr) && net_equal(or->net, nr->net);
|
||||||
or = NODE_NEXT(or), nr = NODE_NEXT(nr))
|
or = NODE_NEXT(or), nr = NODE_NEXT(nr))
|
||||||
static_reconfigure_rte(p, or, nr);
|
static_reconfigure_rte(p, rub, or, nr);
|
||||||
|
|
||||||
if (!NODE_VALID(or) && !NODE_VALID(nr))
|
if (!NODE_VALID(or) && !NODE_VALID(nr))
|
||||||
|
{
|
||||||
|
rte_update_commit(rub, p->p.main_channel);
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* Reconfigure remaining routes, sort them to find matching pairs */
|
/* Reconfigure remaining routes, sort them to find matching pairs */
|
||||||
struct static_route *or2, *nr2, **orbuf, **nrbuf;
|
struct static_route *or2, *nr2, **orbuf, **nrbuf;
|
||||||
@ -551,18 +559,20 @@ static_reconfigure(struct proto *P, struct proto_config *CF)
|
|||||||
{
|
{
|
||||||
int x = net_compare(orbuf[orpos]->net, nrbuf[nrpos]->net);
|
int x = net_compare(orbuf[orpos]->net, nrbuf[nrpos]->net);
|
||||||
if (x < 0)
|
if (x < 0)
|
||||||
static_remove_rte(p, orbuf[orpos++]);
|
static_remove_rte(p, rub, orbuf[orpos++]);
|
||||||
else if (x > 0)
|
else if (x > 0)
|
||||||
static_add_rte(p, nrbuf[nrpos++]);
|
static_add_rte(p, rub, nrbuf[nrpos++]);
|
||||||
else
|
else
|
||||||
static_reconfigure_rte(p, orbuf[orpos++], nrbuf[nrpos++]);
|
static_reconfigure_rte(p, rub, orbuf[orpos++], nrbuf[nrpos++]);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (orpos < ornum)
|
while (orpos < ornum)
|
||||||
static_remove_rte(p, orbuf[orpos++]);
|
static_remove_rte(p, rub, orbuf[orpos++]);
|
||||||
|
|
||||||
while (nrpos < nrnum)
|
while (nrpos < nrnum)
|
||||||
static_add_rte(p, nrbuf[nrpos++]);
|
static_add_rte(p, rub, nrbuf[nrpos++]);
|
||||||
|
|
||||||
|
rte_update_commit(rub, p->p.main_channel);
|
||||||
|
|
||||||
xfree(orbuf);
|
xfree(orbuf);
|
||||||
xfree(nrbuf);
|
xfree(nrbuf);
|
||||||
|
@ -296,21 +296,18 @@ krt_uptodate(rte *a, rte *b)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
krt_learn_announce_update(struct krt_proto *p, rte *e)
|
krt_learn_announce_update(struct rte_update_batch *rub, rte *e)
|
||||||
{
|
{
|
||||||
rte e0 = {
|
struct rte_update *ru = rte_update_get(rub, e->net->n.addr, NULL);
|
||||||
.attrs = rta_clone(e->attrs),
|
ru->rte->attrs = rta_clone(e->attrs);
|
||||||
.pflags = EA_ID_FLAG(EA_KRT_SOURCE) | EA_ID_FLAG(EA_KRT_METRIC),
|
ru->rte->pflags = EA_ID_FLAG(EA_KRT_SOURCE) | EA_ID_FLAG(EA_KRT_METRIC);
|
||||||
.u.krt = e->u.krt,
|
ru->rte->u.krt = e->u.krt;
|
||||||
};
|
|
||||||
|
|
||||||
rte_update(p->p.main_channel, e->net->n.addr, &e0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
krt_learn_announce_delete(struct krt_proto *p, net_addr *n)
|
krt_learn_announce_delete(struct rte_update_batch *rub, net *n)
|
||||||
{
|
{
|
||||||
rte_withdraw(p->p.main_channel, n, NULL);
|
rte_withdraw_get(rub, n->n.addr, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called when alien route is discovered during scan */
|
/* Called when alien route is discovered during scan */
|
||||||
@ -360,6 +357,8 @@ krt_learn_prune(struct krt_proto *p)
|
|||||||
|
|
||||||
KRT_TRACE(p, D_EVENTS, "Pruning inherited routes");
|
KRT_TRACE(p, D_EVENTS, "Pruning inherited routes");
|
||||||
|
|
||||||
|
struct rte_update_batch *rub = rte_update_init();
|
||||||
|
|
||||||
FIB_ITERATE_INIT(&fit, fib);
|
FIB_ITERATE_INIT(&fit, fib);
|
||||||
again:
|
again:
|
||||||
FIB_ITERATE_START(fib, &fit, net, n)
|
FIB_ITERATE_START(fib, &fit, net, n)
|
||||||
@ -402,7 +401,7 @@ again:
|
|||||||
{
|
{
|
||||||
DBG("%I/%d: deleting\n", n->n.prefix, n->n.pxlen);
|
DBG("%I/%d: deleting\n", n->n.prefix, n->n.pxlen);
|
||||||
if (old_best)
|
if (old_best)
|
||||||
krt_learn_announce_delete(p, n->n.addr);
|
krt_learn_announce_delete(rub, n);
|
||||||
|
|
||||||
FIB_ITERATE_PUT(&fit);
|
FIB_ITERATE_PUT(&fit);
|
||||||
fib_delete(fib, n);
|
fib_delete(fib, n);
|
||||||
@ -417,13 +416,15 @@ again:
|
|||||||
if ((best != old_best) || p->reload)
|
if ((best != old_best) || p->reload)
|
||||||
{
|
{
|
||||||
DBG("%I/%d: announcing (metric=%d)\n", n->n.prefix, n->n.pxlen, best->u.krt.metric);
|
DBG("%I/%d: announcing (metric=%d)\n", n->n.prefix, n->n.pxlen, best->u.krt.metric);
|
||||||
krt_learn_announce_update(p, best);
|
krt_learn_announce_update(rub, best);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
DBG("%I/%d: uptodate (metric=%d)\n", n->n.prefix, n->n.pxlen, best->u.krt.metric);
|
DBG("%I/%d: uptodate (metric=%d)\n", n->n.prefix, n->n.pxlen, best->u.krt.metric);
|
||||||
}
|
}
|
||||||
FIB_ITERATE_END;
|
FIB_ITERATE_END;
|
||||||
|
|
||||||
|
rte_update_commit(rub, p->p.main_channel);
|
||||||
|
|
||||||
p->reload = 0;
|
p->reload = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -494,14 +495,16 @@ krt_learn_async(struct krt_proto *p, rte *e, int new)
|
|||||||
n->routes = best;
|
n->routes = best;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct rte_update_batch *rub = rte_update_init();
|
||||||
if (best != old_best)
|
if (best != old_best)
|
||||||
{
|
{
|
||||||
DBG("krt_learn_async: distributing change\n");
|
DBG("krt_learn_async: distributing change\n");
|
||||||
if (best)
|
if (best)
|
||||||
krt_learn_announce_update(p, best);
|
krt_learn_announce_update(rub, best);
|
||||||
else
|
else
|
||||||
krt_learn_announce_delete(p, n->n.addr);
|
krt_learn_announce_delete(rub, n);
|
||||||
}
|
}
|
||||||
|
rte_update_commit(rub, p->p.main_channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
Loading…
Reference in New Issue
Block a user