0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

Split route data structure to storage (ro) / manipulation (rw) structures.

This should help a lot with keeping the route machinery and allocations
clean. This commit also changes behaviour of rte_update() with cached
rta. Newly, calling rte_update() keeps the number of rta references.
This commit is contained in:
Maria Matejka 2020-04-11 00:35:52 +02:00 committed by Maria Matejka
parent a9d468c590
commit d24a6ad4c7
28 changed files with 892 additions and 1008 deletions

View File

@ -519,14 +519,14 @@
{
STATIC_ATTR;
ACCESS_RTE;
struct rta *rta = (*fs->rte)->attrs;
struct rta *rta = fs->rte->attrs;
switch (sa.sa_code)
{
case SA_FROM: RESULT(sa.f_type, ip, rta->from); break;
case SA_GW: RESULT(sa.f_type, ip, rta->nh.gw); break;
case SA_NET: RESULT(sa.f_type, net, (*fs->rte)->net->n.addr); break;
case SA_PROTO: RESULT(sa.f_type, s, (*fs->rte)->src->proto->name); break;
case SA_NET: RESULT(sa.f_type, net, fs->rte->net); break;
case SA_PROTO: RESULT(sa.f_type, s, fs->rte->src->proto->name); break;
case SA_SOURCE: RESULT(sa.f_type, i, rta->source); break;
case SA_SCOPE: RESULT(sa.f_type, i, rta->scope); break;
case SA_DEST: RESULT(sa.f_type, i, rta->dest); break;
@ -549,7 +549,7 @@
f_rta_cow(fs);
{
struct rta *rta = (*fs->rte)->attrs;
struct rta *rta = fs->rte->attrs;
switch (sa.sa_code)
{
@ -561,7 +561,7 @@
{
ip_addr ip = v1.val.ip;
struct iface *ifa = ipa_is_link_local(ip) ? rta->nh.iface : NULL;
neighbor *n = neigh_find((*fs->rte)->src->proto, ip, ifa, 0);
neighbor *n = neigh_find(fs->rte->src->proto, ip, ifa, 0);
if (!n || (n->scope == SCOPE_HOST))
runtime( "Invalid gw address" );
@ -1194,7 +1194,7 @@
struct rtable *table = rtc->table;
ACCESS_RTE;
ACCESS_EATTRS;
const net_addr *net = (*fs->rte)->net->n.addr;
const net_addr *net = fs->rte->net;
/* We ignore temporary attributes, probably not a problem here */
/* 0x02 is a value of BA_AS_PATH, we don't want to include BGP headers */

View File

@ -76,10 +76,7 @@ struct filter_state {
struct filter_stack *stack;
/* The route we are processing. This may be NULL to indicate no route available. */
struct rte **rte;
/* The old rta to be freed after filters are done. */
struct rta *old_rta;
struct rte *rte;
/* Cached pointer to ea_list */
struct ea_list **eattrs;
@ -87,11 +84,11 @@ struct filter_state {
/* Linpool for adata allocation */
struct linpool *pool;
/* Buffer for log output */
struct buffer buf;
/* Filter execution flags */
int flags;
/* Buffer for log output */
struct buffer buf;
};
_Thread_local static struct filter_state filter_state;
@ -101,15 +98,7 @@ void (*bt_assert_hook)(int result, const struct f_line_item *assert);
static inline void f_cache_eattrs(struct filter_state *fs)
{
fs->eattrs = &((*fs->rte)->attrs->eattrs);
}
static inline void f_rte_cow(struct filter_state *fs)
{
if (!((*fs->rte)->flags & REF_COW))
return;
*fs->rte = rte_cow(*fs->rte);
fs->eattrs = &(fs->rte->attrs->eattrs);
}
/*
@ -118,22 +107,16 @@ static inline void f_rte_cow(struct filter_state *fs)
static void
f_rta_cow(struct filter_state *fs)
{
if (!rta_is_cached((*fs->rte)->attrs))
if (!rta_is_cached(fs->rte->attrs))
return;
/* Prepare to modify rte */
f_rte_cow(fs);
/* Store old rta to free it later, it stores reference from rte_cow() */
fs->old_rta = (*fs->rte)->attrs;
/*
* Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
* with fs->old_rta (they will be copied when the cached rta will be obtained
* at the end of f_run()), also the lock of hostentry is inherited (we
* suppose hostentry is not changed by filters).
*/
(*fs->rte)->attrs = rta_do_cow((*fs->rte)->attrs, fs->pool);
fs->rte->attrs = rta_do_cow(fs->rte->attrs, fs->pool);
/* Re-cache the ea_list */
f_cache_eattrs(fs);
@ -241,29 +224,15 @@ interpret(struct filter_state *fs, const struct f_line *line, struct f_val *val)
/**
* f_run - run a filter for a route
* @filter: filter to run
* @rte: route being filtered, may be modified
* @rte: route being filtered; must be writable
* @tmp_pool: all filter allocations go from this pool
* @flags: flags
*
* If filter needs to modify the route, there are several
* posibilities. @rte might be read-only (with REF_COW flag), in that
* case rw copy is obtained by rte_cow() and @rte is replaced. If
* @rte is originally rw, it may be directly modified (and it is never
* copied).
*
* The returned rte may reuse the (possibly cached, cloned) rta, or
* (if rta was modified) contains a modified uncached rta, which
* uses parts allocated from @tmp_pool and parts shared from original
* rta. There is one exception - if @rte is rw but contains a cached
* rta and that is modified, rta in returned rte is also cached.
*
* Ownership of cached rtas is consistent with rte, i.e.
* if a new rte is returned, it has its own clone of cached rta
* (and cached rta of read-only source rte is intact), if rte is
* modified in place, old cached rta is possibly freed.
* If filter needs to modify the attributes, it allocates a local
* shallow copy of them on @tmp_pool.
*/
enum filter_return
f_run(const struct filter *filter, struct rte **rte, struct linpool *tmp_pool, int flags)
f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, int flags)
{
if (filter == FILTER_ACCEPT)
return F_ACCEPT;
@ -271,7 +240,6 @@ f_run(const struct filter *filter, struct rte **rte, struct linpool *tmp_pool, i
if (filter == FILTER_REJECT)
return F_REJECT;
int rte_cow = ((*rte)->flags & REF_COW);
DBG( "Running filter `%s'...", filter->name );
/* Initialize the filter state */
@ -287,32 +255,6 @@ f_run(const struct filter *filter, struct rte **rte, struct linpool *tmp_pool, i
/* Run the interpreter itself */
enum filter_return fret = interpret(&filter_state, filter->root, NULL);
if (filter_state.old_rta) {
/*
* Cached rta was modified and filter_state->rte contains now an uncached one,
* sharing some part with the cached one. The cached rta should
* be freed (if rte was originally COW, filter_state->old_rta is a clone
* obtained during rte_cow()).
*
* This also implements the exception mentioned in f_run()
* description. The reason for this is that rta reuses parts of
* filter_state->old_rta, and these may be freed during rta_free(filter_state->old_rta).
* This is not the problem if rte was COW, because original rte
* also holds the same rta.
*/
if (!rte_cow) {
/* Cache the new attrs */
(*filter_state.rte)->attrs = rta_lookup((*filter_state.rte)->attrs);
/* Drop cached ea_list pointer */
filter_state.eattrs = NULL;
}
/* Uncache the old attrs and drop the pointer as it is invalid now. */
rta_free(filter_state.old_rta);
filter_state.old_rta = NULL;
}
/* Process the filter output, log it and return */
if (fret < F_ACCEPT) {
if (!(filter_state.flags & FF_SILENT))
@ -337,7 +279,7 @@ f_run(const struct filter *filter, struct rte **rte, struct linpool *tmp_pool, i
*/
enum filter_return
f_eval_rte(const struct f_line *expr, struct rte **rte, struct linpool *tmp_pool)
f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool)
{
filter_state = (struct filter_state) {
.stack = &filter_stack,
@ -347,8 +289,7 @@ f_eval_rte(const struct f_line *expr, struct rte **rte, struct linpool *tmp_pool
LOG_BUFFER_INIT(filter_state.buf);
ASSERT(!((*rte)->flags & REF_COW));
ASSERT(!rta_is_cached((*rte)->attrs));
ASSERT(!rta_is_cached(rte->attrs));
return interpret(&filter_state, expr, NULL);
}

View File

@ -51,8 +51,8 @@ struct filter {
struct rte;
enum filter_return f_run(const struct filter *filter, struct rte **rte, struct linpool *tmp_pool, int flags);
enum filter_return f_eval_rte(const struct f_line *expr, struct rte **rte, struct linpool *tmp_pool);
enum filter_return f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, int flags);
enum filter_return f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool);
uint f_eval_int(const struct f_line *expr);
enum filter_return f_eval_buf(const struct f_line *expr, struct linpool *tmp_pool, buffer *buf);

View File

@ -72,7 +72,7 @@ static inline int u64_cmp(u64 i1, u64 i2)
#define NORET __attribute__((noreturn))
#define UNUSED __attribute__((unused))
#define PACKED __attribute__((packed))
#define NONNULL(...) __attribute__((nonnull((__VA_ARGS__))))
#define NONNULL(...) __attribute__((nonnull(__VA_ARGS__)))
#define USE_RESULT __attribute__((warn_unused_result))
#ifndef HAVE_THREAD_LOCAL

View File

@ -892,8 +892,8 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
c->out_limit = cf->out_limit;
// c->ra_mode = cf->ra_mode;
c->merge_limit = cf->merge_limit;
c->preference = cf->preference;
c->merge_limit = cf->merge_limit;
c->debug = cf->debug;
c->in_keep_filtered = cf->in_keep_filtered;
c->rpki_reload = cf->rpki_reload;

View File

@ -78,7 +78,7 @@ struct protocol {
int (*shutdown)(struct proto *); /* Stop the instance */
void (*cleanup)(struct proto *); /* Called after shutdown when protocol became hungry/down */
void (*get_status)(struct proto *, byte *buf); /* Get instance status (for `show protocols' command) */
void (*get_route_info)(struct rte *, byte *buf); /* Get route information (for `show route' command) */
void (*get_route_info)(struct rte *, struct rte_storage *, byte *); /* Get route information (for `show route' command) */
int (*get_attr)(const struct eattr *, byte *buf, int buflen); /* ASCIIfy dynamic attribute (returns GA_*) */
void (*show_proto_info)(struct proto *); /* Show protocol info (for `show protocols all' command) */
void (*copy_config)(struct proto_config *, struct proto_config *); /* Copy config from given protocol instance */
@ -213,7 +213,7 @@ struct proto {
void (*ifa_notify)(struct proto *, unsigned flags, struct ifa *a);
void (*rt_notify)(struct channel *, struct rte_export *);
void (*neigh_notify)(struct neighbor *neigh);
int (*preexport)(struct proto *, struct rte *rt);
int (*preexport)(struct channel *, struct rte *rt);
void (*reload_routes)(struct channel *);
void (*feed_begin)(struct channel *, int initial);
void (*feed_end)(struct channel *);
@ -228,12 +228,12 @@ struct proto {
* rte_remove Called whenever a rte is removed from the routing table.
*/
int (*rte_recalculate)(struct rtable *, struct network *, struct rte *, struct rte *, struct rte *);
int (*rte_better)(struct rte *, struct rte *);
int (*rte_mergable)(struct rte *, struct rte *);
struct rte * (*rte_modify)(struct rte *, struct linpool *);
void (*rte_insert)(struct network *, struct rte *);
void (*rte_remove)(struct network *, struct rte *);
int (*rte_recalculate)(struct rtable *, struct network *, struct rte_storage *, struct rte_storage *, struct rte_storage *);
int (*rte_better)(struct rte_storage *, struct rte_storage *);
int (*rte_mergable)(struct rte_storage *, struct rte_storage *);
struct rta *(*rte_modify)(struct rte_storage *, struct linpool *);
void (*rte_insert)(struct network *, struct rte_storage *);
void (*rte_remove)(struct network *, struct rte_storage *);
/* Hic sunt protocol-specific data */
};
@ -541,7 +541,7 @@ struct channel {
struct rtable *in_table; /* Internal table for received routes */
struct event *reload_event; /* Event responsible for reloading from in_table */
struct fib_iterator reload_fit; /* FIB iterator in in_table used during reloading */
struct rte *reload_next_rte; /* Route iterator in in_table used during reloading */
struct rte_storage *reload_next_rte; /* Route iterator in in_table used during reloading */
u8 reload_active; /* Iterator reload_fit is linked */
u8 reload_pending; /* Reloading and another reload is scheduled */
u8 refeed_pending; /* Refeeding and another refeed is scheduled */

View File

@ -196,7 +196,7 @@ struct rt_subscription {
#define NHU_DIRTY 3
typedef struct network {
struct rte *routes; /* Available routes for this network */
struct rte_storage *routes; /* Available routes for this network */
struct fib_node n; /* FIB flags reserved for kernel syncer */
} net;
@ -228,40 +228,60 @@ struct hostentry {
};
typedef struct rte {
struct rte *next;
struct rta *attrs; /* Attributes of this route */
const net_addr *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
} rte;
struct rte_storage {
struct rte_storage *next; /* Next in chain */
struct rta *attrs; /* Attributes of this route */
net *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
struct channel *sender; /* Channel used to send the route to the routing table */
struct rta *attrs; /* Attributes of this route */
u32 id; /* Table specific route id */
byte flags; /* Flags (REF_...) */
byte pflags; /* Protocol-specific flags */
btime lastmod; /* Last modified */
} rte;
/* Route export structure. Protocols get this structure as an information about
* new routes on the channel. */
struct rte_export {
net_addr *net; /* Network information */
rte *new; /* New route (NULL for withdraw) */
struct rte_src *new_src; /* New route src (kept if route is rejected by preexport or filter) */
rte *old; /* Old route (only if export table is on) */
struct rte_src *old_src; /* Old route src */
};
#define REF_COW 1 /* Copy this rte on write */
#define REF_FILTERED 2 /* Route is rejected by import filter */
#define REF_STALE 4 /* Route is stale in a refresh cycle */
#define REF_DISCARD 8 /* Route is scheduled for discard */
#define REF_MODIFY 16 /* Route is scheduled for modify */
/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
static inline int rte_is_valid(rte *r) { return r && !(r->flags & REF_FILTERED); }
static inline int rte_is_valid(const struct rte_storage *r) { return r && !(r->flags & REF_FILTERED); }
/* Route just has REF_FILTERED flag */
static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED); }
static inline int rte_is_filtered(const struct rte_storage *r) { return !!(r->flags & REF_FILTERED); }
/* Route export structure. Protocols get this structure as an information about
* new routes on the channel.
*
* In new:
* if all fields are set then the route is to be exported,
* if all fields are set but attrs == NULL then this route has been filtered out,
* if all fields are NULL then there is no route at all to announce.
*
* In old:
* if all fields are set then this is the complete previously exported route,
* if all fields are set but attrs == NULL then this route was exported last time
* but we don't run filters on it once more to get the modified version,
* if all fields are NULL then there was no route at all to withdraw.
*/
struct rte_export {
rte new; /* New route */
rte old; /* Old route */
u32 new_id, old_id; /* Table specific route id for channel-private use */
};
#define RTE_EXPORT_NEW_OK(ep) (!!ep->new.attrs)
#define RTE_EXPORT_OLD_OK(ep) (!!ep->old.net)
#define RTE_EXPORT_IS_OK(ep) (RTE_EXPORT_NEW_OK(ep) || RTE_EXPORT_OLD_OK(ep))
/* Types of route announcement, also used as flags */
#define RA_UNDEF 0 /* Undefined RA type */
@ -279,19 +299,18 @@ static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED);
/**
* rte_update - enter a new update to a routing table
* @c: channel doing the update
* @net: network address
* @rte: a &rte representing the new route
*
* This function imports a new route to the appropriate table (via the channel).
* Table keys are @net (obligatory) and @rte->attrs->src.
* Both the @net and @rte pointers can be local.
* Table keys are @rte->net (obligatory) and @rte->src.
* The @rte pointer can be local as well as @rte->net. The @rte->src must be
* either the protocol's main_source, or looked-up by rt_get_source().
* The @rte pointer must be writable.
*
* The route attributes (@rte->attrs) are obligatory. They can be also allocated
* locally. Anyway, if you use an already-cached attribute object, you shall
* call rta_clone() on that object yourself. (This semantics may change in future.)
*
* If the route attributes are local, you may set @rte->attrs->src to NULL, then
* the protocol's default route source will be supplied.
* For an update, the route attributes (@rte->attrs) are obligatory.
* They can be also allocated locally. If you use an already-cached
* attribute object, this function returns keeping your reference
* for yourself. No attributes means withdraw.
*
* When rte_update() gets a route, it automatically validates it. This includes
* checking for validity of the given network and next hop addresses and also
@ -305,21 +324,11 @@ static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED);
* All memory used for temporary allocations is taken from a special linpool
* @rte_update_pool and freed when rte_update() finishes.
*/
void rte_update(struct channel *c, const net_addr *net, struct rte *rte);
/**
* rte_withdraw - withdraw a route from a routing table
* @c: channel doing the withdraw
* @net: network address
* @src: the route source identifier
*
* This function withdraws a previously announced route from the table.
* No import filter is called. This function is idempotent. If no route
* is found under the given key, it does nothing.
*
* If @src is NULL, the protocol's default route source is used.
*/
void rte_withdraw(struct channel *c, const net_addr *net, struct rte_src *src);
void rte_update(struct channel *c, struct rte *rte) NONNULL(1,2);
static inline void rte_withdraw(struct channel *c, const net_addr *net, struct rte_src *src)
{
rte e = { .net = net, .src = src}; rte_update(c, &e);
}
extern list routing_tables;
struct config;
@ -338,18 +347,18 @@ static inline net *net_find_valid(rtable *tab, const net_addr *addr)
static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
void *net_route(rtable *tab, const net_addr *n);
int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
rte *rte_find(net *net, struct rte_src *src);
rte *rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int silent);
struct rte_storage *rte_find(net *net, struct rte_src *src);
_Bool rt_export_merged(struct channel *c, net *net, rte *best, linpool *pool, int silent);
void rt_refresh_begin(rtable *t, struct channel *c);
void rt_refresh_end(rtable *t, struct channel *c);
void rt_modify_stale(rtable *t, struct channel *c);
void rt_schedule_prune(rtable *t);
void rte_dump(rte *);
void rte_free(rte *);
rte *rte_do_cow(rte *);
rte *rte_store(rte *);
static inline rte * rte_cow(rte *r) { return (r->flags & REF_COW) ? rte_do_cow(r) : r; }
rte *rte_cow_rta(rte *r, linpool *lp);
void rte_dump(struct rte_storage *);
void rte_free(struct rte_storage *);
struct rte_storage *rte_store(const rte *, net *n);
void rte_copy_metadata(struct rte_storage *dest, struct rte_storage *src);
static inline rte rte_copy(const struct rte_storage *r)
{ return (rte) { .attrs = r->attrs, .net = r->net->n.addr, .src = r->src }; }
void rt_dump(rtable *);
void rt_dump_all(void);
int rt_feed_channel(struct channel *c);
@ -358,7 +367,7 @@ void rt_feed_channel_abort(struct channel *c);
int rt_reload_channel(struct channel *c);
void rt_reload_channel_abort(struct channel *c);
void rt_prune_sync(rtable *t, int all);
int rte_update_out(struct channel *c, const net_addr *n, struct rte_src *src, rte *new, rte **old_exported, int refeed);
int rte_update_out(struct channel *c, rte *new, rte *old, struct rte_storage **old_stored, int refeed);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
@ -679,7 +688,7 @@ void rta_dump(rta *);
void rta_dump_all(void);
void rta_show(struct cli *, rta *);
u32 rt_get_igp_metric(rte *rt);
u32 rt_get_igp_metric(struct rta *);
struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep);
void rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls);

View File

@ -85,9 +85,10 @@ dev_ifa_notify(struct proto *P, uint flags, struct ifa *ad)
};
rte e0 = {
.src = rt_get_source(P, ad->iface->index),
.net = net,
.attrs = &a0,
};
rte_update(c, net, &e0);
rte_update(c, &e0);
}
}

View File

@ -37,16 +37,16 @@ rt_show_get_kernel(struct rt_show_data *d)
}
static void
rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary)
rt_show_rte(struct cli *c, byte *ia, struct rte *e, struct rte_storage *er, struct rt_show_data *d)
{
byte from[IPA_MAX_TEXT_LENGTH+8];
byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
rta *a = e->attrs;
int sync_error = d->kernel ? krt_get_sync_error(d->kernel, e) : 0;
void (*get_route_info)(struct rte *, byte *buf);
int sync_error = d->kernel ? krt_get_sync_error(d->kernel, er->id) : 0;
void (*get_route_info)(struct rte *, struct rte_storage *, byte *);
struct nexthop *nh;
tm_format_time(tm, &config->tf_route, e->lastmod);
tm_format_time(tm, &config->tf_route, er->lastmod);
if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->nh.gw))
bsprintf(from, " from %I", a->from);
else
@ -58,7 +58,7 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
get_route_info = e->src->proto->proto->get_route_info;
if (get_route_info)
get_route_info(e, info);
get_route_info(e, er, info);
else
bsprintf(info, " (%d)", a->pref);
@ -66,7 +66,7 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
rt_show_table(c, d);
cli_printf(c, -1007, "%-20s %s [%s %s%s]%s%s", ia, rta_dest_name(a->dest),
e->src->proto->name, tm, from, primary ? (sync_error ? " !" : " *") : "", info);
e->src->proto->name, tm, from, (er == er->net->routes) ? (sync_error ? " !" : " *") : "", info);
if (a->dest == RTD_UNICAST)
for (nh = &(a->nh); nh; nh = nh->next)
@ -101,7 +101,6 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
static void
rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
{
rte *e, *ee;
byte ia[NET_MAX_TEXT_LENGTH+1];
struct channel *ec = d->tab->export_channel;
@ -114,9 +113,9 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
bsnprintf(ia, sizeof(ia), "%N", n->n.addr);
for (e = n->routes; e; e = e->next)
for (struct rte_storage *er = n->routes; er; er = er->next)
{
if (rte_is_filtered(e) != d->filtered)
if (rte_is_filtered(er) != d->filtered)
continue;
d->rt_counter++;
@ -126,7 +125,7 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
if (pass)
continue;
ee = e;
struct rte e = rte_copy(er);
/* Export channel is down, do not try to export routes to it */
if (ec && (ec->export_state == ES_DOWN))
@ -134,7 +133,7 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
if (d->export_mode == RSEM_EXPORTED)
{
if (!bmap_test(&ec->export_map, ee->id))
if (!bmap_test(&ec->export_map, er->id))
goto skip;
// if (ec->ra_mode != RA_ANY)
@ -143,17 +142,14 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
else if ((d->export_mode == RSEM_EXPORT) && (ec->ra_mode == RA_MERGED))
{
/* Special case for merged export */
rte *rt_free;
e = rt_export_merged(ec, n, &rt_free, c->show_pool, 1);
pass = 1;
if (!e)
{ e = ee; goto skip; }
if (!rt_export_merged(ec, n, &e, c->show_pool, 1))
goto skip;
}
else if (d->export_mode)
{
struct proto *ep = ec->proto;
int ic = ep->preexport ? ep->preexport(ep, e) : 0;
int ic = ep->preexport ? ep->preexport(ec, &e) : 0;
if (ec->ra_mode == RA_OPTIMAL || ec->ra_mode == RA_MERGED)
pass = 1;
@ -179,24 +175,19 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
}
}
if (d->show_protocol && (d->show_protocol != e->src->proto))
if (d->show_protocol && (d->show_protocol != er->src->proto))
goto skip;
if (f_run(d->filter, &e, c->show_pool, 0) > F_ACCEPT)
goto skip;
if (d->stats < 2)
rt_show_rte(c, ia, e, d, (e->net->routes == ee));
rt_show_rte(c, ia, &e, er, d);
d->show_counter++;
ia[0] = 0;
skip:
if (e != ee)
{
rte_free(e);
e = ee;
}
lp_flush(c->show_pool);
if (d->primary_only)

File diff suppressed because it is too large Load Diff

View File

@ -671,12 +671,13 @@ babel_announce_rte(struct babel_proto *p, struct babel_entry *e)
a0.nh.flags = RNF_ONLINK;
rte e0 = {
.net = e->n.addr,
.src = p->p.main_source,
.attrs = &a0,
};
e->unreachable = 0;
rte_update(c, e->n.addr, &e0);
rte_update(c, &e0);
}
else if (e->valid && (e->router_id != p->router_id))
{
@ -689,12 +690,13 @@ babel_announce_rte(struct babel_proto *p, struct babel_entry *e)
};
rte e0 = {
.net = e->n.addr,
.src = p->p.main_source,
.attrs = &a0,
};
e->unreachable = 1;
rte_update(c, e->n.addr, &e0);
rte_update(c, &e0);
}
else
{
@ -1873,12 +1875,12 @@ babel_dump(struct proto *P)
}
static void
babel_get_route_info(rte *rte, byte *buf)
babel_get_route_info(struct rte *e, struct rte_storage *er UNUSED, byte *buf)
{
u64 rid;
memcpy(&rid, ea_find(rte->attrs->eattrs, EA_BABEL_ROUTER_ID)->u.ptr->data, sizeof(u64));
buf += bsprintf(buf, " (%d/%d) [%lR]", rte->attrs->pref,
ea_find(rte->attrs->eattrs, EA_BABEL_METRIC)->u.data, rid);
memcpy(&rid, ea_find(e->attrs->eattrs, EA_BABEL_ROUTER_ID)->u.ptr->data, sizeof(u64));
buf += bsprintf(buf, " (%d/%d) [%lR]", e->attrs->pref,
ea_find(e->attrs->eattrs, EA_BABEL_METRIC)->u.data, rid);
}
static int
@ -2100,10 +2102,10 @@ babel_kick_timer(struct babel_proto *p)
static int
babel_preexport(struct proto *P, struct rte *new)
babel_preexport(struct channel *c, struct rte *new)
{
/* Reject our own unreachable routes */
if ((new->attrs->dest == RTD_UNREACHABLE) && (new->src->proto == P))
if ((new->attrs->dest == RTD_UNREACHABLE) && (new->src->proto == c->proto))
return -1;
return 0;
@ -2119,15 +2121,15 @@ babel_rt_notify(struct channel *c, struct rte_export *export)
struct babel_proto *p = (void *) c->proto;
struct babel_entry *e;
if (export->new)
if (export->new.attrs)
{
/* Update */
uint rt_metric = ea_get_int(export->new->attrs->eattrs, EA_BABEL_METRIC, 0);
uint rt_seqno = ea_get_int(export->new->attrs->eattrs, EA_BABEL_SEQNO, p->update_seqno);
uint rt_metric = ea_get_int(export->new.attrs->eattrs, EA_BABEL_METRIC, 0);
uint rt_seqno = ea_get_int(export->new.attrs->eattrs, EA_BABEL_SEQNO, p->update_seqno);
u64 rt_router_id;
eattr *ea;
if (ea = ea_find(export->new->attrs->eattrs, EA_BABEL_ROUTER_ID))
if (ea = ea_find(export->new.attrs->eattrs, EA_BABEL_ROUTER_ID))
memcpy(&rt_router_id, ea->u.ptr->data, sizeof(u64));
else
rt_router_id = p->router_id;
@ -2135,11 +2137,11 @@ babel_rt_notify(struct channel *c, struct rte_export *export)
if (rt_metric > BABEL_INFINITY)
{
log(L_WARN "%s: Invalid babel_metric value %u for route %N",
p->p.name, rt_metric, export->net);
p->p.name, rt_metric, export->new.net);
rt_metric = BABEL_INFINITY;
}
e = babel_get_entry(p, export->net);
e = babel_get_entry(p, export->new.net);
/* Activate triggered updates */
if ((e->valid != BABEL_ENTRY_VALID) ||
@ -2157,7 +2159,7 @@ babel_rt_notify(struct channel *c, struct rte_export *export)
else
{
/* Withdraw */
e = babel_find_entry(p, export->net);
e = babel_find_entry(p, export->old.net);
if (!e || e->valid != BABEL_ENTRY_VALID)
return;
@ -2171,7 +2173,7 @@ babel_rt_notify(struct channel *c, struct rte_export *export)
}
static int
babel_rte_better(struct rte *new, struct rte *old)
babel_rte_better(struct rte_storage *new, struct rte_storage *old)
{
uint new_metric = ea_find(new->attrs->eattrs, EA_BABEL_SEQNO)->u.data;
uint old_metric = ea_find(old->attrs->eattrs, EA_BABEL_SEQNO)->u.data;

View File

@ -333,26 +333,26 @@ bgp_aigp_set_metric(struct linpool *pool, const struct adata *ad, u64 metric)
}
int
bgp_total_aigp_metric_(rte *e, u64 *metric, const struct adata **ad)
bgp_total_aigp_metric_(struct rta *a, u64 *metric, const struct adata **ad)
{
eattr *a = ea_find(e->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_AIGP));
if (!a)
eattr *ea = ea_find(a->eattrs, EA_CODE(PROTOCOL_BGP, BA_AIGP));
if (!ea)
return 0;
const byte *b = bgp_aigp_get_tlv(a->u.ptr, BGP_AIGP_METRIC);
const byte *b = bgp_aigp_get_tlv(ea->u.ptr, BGP_AIGP_METRIC);
if (!b)
return 0;
u64 aigp = get_u64(b + 3);
u64 step = e->attrs->igp_metric;
u64 step = a->igp_metric;
if (!rte_resolvable(e) || (step >= IGP_METRIC_UNKNOWN))
if (!rta_resolvable(a) || (step >= IGP_METRIC_UNKNOWN))
step = BGP_AIGP_MAX;
if (!step)
step = 1;
*ad = a->u.ptr;
*ad = ea->u.ptr;
*metric = aigp + step;
if (*metric < aigp)
*metric = BGP_AIGP_MAX;
@ -361,12 +361,12 @@ bgp_total_aigp_metric_(rte *e, u64 *metric, const struct adata **ad)
}
static inline int
bgp_init_aigp_metric(rte *e, u64 *metric, const struct adata **ad)
bgp_init_aigp_metric(rta *a, u64 *metric, const struct adata **ad)
{
if (e->attrs->source == RTS_BGP)
if (a->source == RTS_BGP)
return 0;
*metric = rt_get_igp_metric(e);
*metric = rt_get_igp_metric(a);
*ad = NULL;
return *metric < IGP_METRIC_UNKNOWN;
}
@ -896,7 +896,7 @@ bgp_decode_large_community(struct bgp_parse_state *s, uint code UNUSED, uint fla
static void
bgp_export_mpls_label_stack(struct bgp_export_state *s, eattr *a)
{
net_addr *n = s->route->net->n.addr;
const net_addr *n = s->route->net;
u32 *labels = (u32 *) a->u.ptr->data;
uint lnum = a->u.ptr->length / 4;
@ -1617,7 +1617,7 @@ bgp_free_prefix_table(struct bgp_channel *c)
}
static struct bgp_prefix *
bgp_get_prefix(struct bgp_channel *c, net_addr *net, u32 path_id)
bgp_get_prefix(struct bgp_channel *c, const net_addr *net, u32 path_id)
{
u32 hash = net_hash(net) ^ u32_hash(path_id);
struct bgp_prefix *px = HASH_FIND(c->prefix_hash, PXH, net, path_id, hash);
@ -1661,10 +1661,10 @@ bgp_free_prefix(struct bgp_channel *c, struct bgp_prefix *px)
*/
int
bgp_preexport(struct proto *P, rte *e)
bgp_preexport(struct channel *c, rte *e)
{
struct proto *SRC = e->src->proto;
struct bgp_proto *p = (struct bgp_proto *) P;
struct bgp_proto *p = (struct bgp_proto *) (c->proto);
struct bgp_proto *src = (SRC->proto == &proto_bgp) ? (struct bgp_proto *) SRC : NULL;
/* Reject our routes */
@ -1689,11 +1689,11 @@ bgp_preexport(struct proto *P, rte *e)
}
/* Handle well-known communities, RFC 1997 */
struct eattr *c;
struct eattr *com;
if (p->cf->interpret_communities &&
(c = ea_find(e->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_COMMUNITY))))
(com = ea_find(e->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_COMMUNITY))))
{
const struct adata *d = c->u.ptr;
const struct adata *d = com->u.ptr;
/* Do not export anywhere */
if (int_set_contains(d, BGP_COMM_NO_ADVERTISE))
@ -1773,8 +1773,8 @@ bgp_update_attrs(struct bgp_proto *p, struct bgp_channel *c, rte *e, ea_list *at
/* AIGP attribute - accumulate local metric or originate new one */
u64 metric;
if (s.local_next_hop &&
(bgp_total_aigp_metric_(e, &metric, &ad) ||
(c->cf->aigp_originate && bgp_init_aigp_metric(e, &metric, &ad))))
(bgp_total_aigp_metric_(e->attrs, &metric, &ad) ||
(c->cf->aigp_originate && bgp_init_aigp_metric(e->attrs, &metric, &ad))))
{
ad = bgp_aigp_set_metric(pool, ad, metric);
bgp_set_attr_ptr(&attrs, pool, BA_AIGP, 0, ad);
@ -1840,23 +1840,28 @@ bgp_rt_notify(struct channel *C, struct rte_export *e)
struct bgp_prefix *px;
u32 path;
if (e->new)
const net_addr *n;
if (e->new.attrs)
{
struct ea_list *attrs = bgp_update_attrs(p, c, e->new, e->new->attrs->eattrs, bgp_linpool2);
struct ea_list *attrs = bgp_update_attrs(p, c, &(e->new), e->new.attrs->eattrs, bgp_linpool2);
/* If attributes are invalid, we fail back to withdraw */
buck = attrs ? bgp_get_bucket(c, attrs) : bgp_get_withdraw_bucket(c);
path = e->new_src->global_id;
path = e->new.src->global_id;
lp_flush(bgp_linpool2);
n = e->new.net;
}
else
{
buck = bgp_get_withdraw_bucket(c);
path = e->old_src->global_id;
path = e->old.src->global_id;
n = e->old.net;
}
px = bgp_get_prefix(c, e->net, c->add_path_tx ? path : 0);
px = bgp_get_prefix(c, n, c->add_path_tx ? path : 0);
add_tail(&buck->prefixes, &px->buck_node);
bgp_schedule_packet(p->conn, c, PKT_UPDATE);
@ -1864,7 +1869,7 @@ bgp_rt_notify(struct channel *C, struct rte_export *e)
static inline u32
bgp_get_neighbor(rte *r)
bgp_get_neighbor(struct rte_storage *r)
{
eattr *e = ea_find(r->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_AS_PATH));
u32 as;
@ -1878,7 +1883,7 @@ bgp_get_neighbor(rte *r)
}
static inline int
rte_stale(rte *r)
rte_stale(struct rte_storage *r)
{
if (r->pflags & BGP_REF_STALE)
return 1;
@ -1901,7 +1906,7 @@ rte_stale(rte *r)
}
int
bgp_rte_better(rte *new, rte *old)
bgp_rte_better(struct rte_storage *new, struct rte_storage *old)
{
struct bgp_proto *new_bgp = (struct bgp_proto *) new->src->proto;
struct bgp_proto *old_bgp = (struct bgp_proto *) old->src->proto;
@ -1917,8 +1922,8 @@ bgp_rte_better(rte *new, rte *old)
return 1;
/* RFC 4271 9.1.2.1. Route resolvability test */
n = rte_resolvable(new);
o = rte_resolvable(old);
n = rta_resolvable(new->attrs);
o = rta_resolvable(old->attrs);
if (n > o)
return 1;
if (n < o)
@ -1943,8 +1948,8 @@ bgp_rte_better(rte *new, rte *old)
return 0;
/* RFC 7311 4.1 - Apply AIGP metric */
u64 n2 = bgp_total_aigp_metric(new);
u64 o2 = bgp_total_aigp_metric(old);
u64 n2 = bgp_total_aigp_metric(new->attrs);
u64 o2 = bgp_total_aigp_metric(old->attrs);
if (n2 < o2)
return 1;
if (n2 > o2)
@ -2046,7 +2051,7 @@ bgp_rte_better(rte *new, rte *old)
int
bgp_rte_mergable(rte *pri, rte *sec)
bgp_rte_mergable(struct rte_storage *pri, struct rte_storage *sec)
{
struct bgp_proto *pri_bgp = (struct bgp_proto *) pri->src->proto;
struct bgp_proto *sec_bgp = (struct bgp_proto *) sec->src->proto;
@ -2059,7 +2064,7 @@ bgp_rte_mergable(rte *pri, rte *sec)
return 0;
/* RFC 4271 9.1.2.1. Route resolvability test */
if (rte_resolvable(pri) != rte_resolvable(sec))
if (rta_resolvable(pri->attrs) != rta_resolvable(sec->attrs))
return 0;
/* Start with local preferences */
@ -2122,23 +2127,23 @@ bgp_rte_mergable(rte *pri, rte *sec)
static inline int
same_group(rte *r, u32 lpref, u32 lasn)
same_group(struct rte_storage *r, u32 lpref, u32 lasn)
{
return (r->attrs->pref == lpref) && (bgp_get_neighbor(r) == lasn);
}
static inline int
use_deterministic_med(rte *r)
use_deterministic_med(struct rte_storage *r)
{
struct proto *P = r->src->proto;
return (P->proto == &proto_bgp) && ((struct bgp_proto *) P)->cf->deterministic_med;
}
int
bgp_rte_recalculate(rtable *table, net *net, rte *new, rte *old, rte *old_best)
bgp_rte_recalculate(rtable *table, net *net, struct rte_storage *new, struct rte_storage *old, struct rte_storage *old_best)
{
rte *r, *s;
rte *key = new ? new : old;
struct rte_storage *r, *s;
struct rte_storage *key = new ? new : old;
u32 lpref = key->attrs->pref;
u32 lasn = bgp_get_neighbor(key);
int old_suppressed = old ? !!(old->pflags & BGP_REF_SUPPRESSED) : 0;
@ -2261,25 +2266,31 @@ bgp_rte_recalculate(rtable *table, net *net, rte *new, rte *old, rte *old_best)
return !old_suppressed;
}
struct rte *
bgp_rte_modify_stale(struct rte *r, struct linpool *pool)
struct rta *
bgp_rte_modify_stale(struct rte_storage *r, struct linpool *pool)
{
eattr *a = ea_find(r->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_COMMUNITY));
const struct adata *ad = a ? a->u.ptr : NULL;
uint flags = a ? a->flags : BAF_PARTIAL;
eattr *ea = ea_find(r->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_COMMUNITY));
const struct adata *ad = ea ? ea->u.ptr : NULL;
uint flags = ea ? ea->flags : BAF_PARTIAL;
if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
return NULL;
if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
return r;
return r->attrs;
r = rte_cow_rta(r, pool);
bgp_set_attr_ptr(&(r->attrs->eattrs), pool, BA_COMMUNITY, flags,
_Thread_local static struct {
rta a;
u32 labels[MPLS_MAX_LABEL_STACK];
} aloc;
struct rta *a = &(aloc.a);
memcpy(a, r->attrs, rta_size(r->attrs));
bgp_set_attr_ptr(&(a->eattrs), pool, BA_COMMUNITY, flags,
int_set_add(pool, ad, BGP_COMM_LLGR_STALE));
r->pflags |= BGP_REF_STALE;
return r;
return a;
}
@ -2356,7 +2367,7 @@ bgp_get_attr(const eattr *a, byte *buf, int buflen)
}
void
bgp_get_route_info(rte *e, byte *buf)
bgp_get_route_info(struct rte *e, struct rte_storage *er, byte *buf)
{
eattr *p = ea_find(e->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_AS_PATH));
eattr *o = ea_find(e->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_ORIGIN));
@ -2364,20 +2375,20 @@ bgp_get_route_info(rte *e, byte *buf)
buf += bsprintf(buf, " (%d", e->attrs->pref);
if (e->pflags & BGP_REF_SUPPRESSED)
if (er->pflags & BGP_REF_SUPPRESSED)
buf += bsprintf(buf, "-");
if (rte_stale(e))
if (rte_stale(er))
buf += bsprintf(buf, "s");
u64 metric = bgp_total_aigp_metric(e);
u64 metric = bgp_total_aigp_metric(e->attrs);
if (metric < BGP_AIGP_MAX)
{
buf += bsprintf(buf, "/%lu", metric);
}
else if (e->attrs->igp_metric)
{
if (!rte_resolvable(e))
if (!rta_resolvable(e->attrs))
buf += bsprintf(buf, "/-");
else if (e->attrs->igp_metric >= IGP_METRIC_UNKNOWN)
buf += bsprintf(buf, "/?");

View File

@ -517,9 +517,9 @@ struct rte_source *bgp_find_source(struct bgp_proto *p, u32 path_id);
struct rte_source *bgp_get_source(struct bgp_proto *p, u32 path_id);
static inline int
rte_resolvable(rte *rt)
rta_resolvable(rta *a)
{
return rt->attrs->dest == RTD_UNICAST;
return a->dest == RTD_UNICAST;
}
@ -582,26 +582,26 @@ void bgp_init_prefix_table(struct bgp_channel *c);
void bgp_free_prefix_table(struct bgp_channel *c);
void bgp_free_prefix(struct bgp_channel *c, struct bgp_prefix *bp);
int bgp_rte_better(struct rte *, struct rte *);
int bgp_rte_mergable(rte *pri, rte *sec);
int bgp_rte_recalculate(rtable *table, net *net, rte *new, rte *old, rte *old_best);
struct rte *bgp_rte_modify_stale(struct rte *r, struct linpool *pool);
int bgp_rte_better(struct rte_storage *, struct rte_storage *);
int bgp_rte_mergable(struct rte_storage *pri, struct rte_storage *sec);
int bgp_rte_recalculate(rtable *table, net *net, struct rte_storage *new, struct rte_storage *old, struct rte_storage *old_best);
struct rta *bgp_rte_modify_stale(struct rte_storage *r, struct linpool *pool);
void bgp_rt_notify(struct channel *C, struct rte_export *e);
int bgp_preexport(struct proto *, struct rte *);
int bgp_preexport(struct channel *, struct rte *);
int bgp_get_attr(const struct eattr *e, byte *buf, int buflen);
void bgp_get_route_info(struct rte *, byte *buf);
int bgp_total_aigp_metric_(rte *e, u64 *metric, const struct adata **ad);
void bgp_get_route_info(struct rte *, struct rte_storage *, byte *);
int bgp_total_aigp_metric_(rta *a, u64 *metric, const struct adata **ad);
#define BGP_AIGP_METRIC 1
#define BGP_AIGP_MAX U64(0xffffffffffffffff)
static inline u64
bgp_total_aigp_metric(rte *r)
bgp_total_aigp_metric(rta *a)
{
u64 metric = BGP_AIGP_MAX;
const struct adata *ad;
bgp_total_aigp_metric_(r, &metric, &ad);
bgp_total_aigp_metric_(a, &metric, &ad);
return metric;
}

View File

@ -1359,11 +1359,12 @@ bgp_rte_update(struct bgp_parse_state *s, net_addr *n, u32 path_id, rta *a0)
}
rte e0 = {
.attrs = rta_clone(s->cached_rta),
.net = n,
.src = s->last_src,
.attrs = s->cached_rta,
};
rte_update(&(s->channel->c), n, &e0);
rte_update(&(s->channel->c), &e0);
}
static void

View File

@ -457,12 +457,12 @@ mrt_rib_table_entry_bgp_attrs(struct mrt_table_dump_state *s, rte *r)
return;
fail:
mrt_log(s, "Attribute list too long for %N", r->net->n.addr);
mrt_log(s, "Attribute list too long for %N", r->net);
}
#endif
static void
mrt_rib_table_entry(struct mrt_table_dump_state *s, rte *r)
mrt_rib_table_entry(struct mrt_table_dump_state *s, rte *r, struct rte_storage *er)
{
buffer *b = &s->buf;
uint peer = 0;
@ -481,7 +481,7 @@ mrt_rib_table_entry(struct mrt_table_dump_state *s, rte *r)
/* Peer Index and Originated Time */
mrt_put_u16(b, peer);
mrt_put_u32(b, (r->lastmod + s->time_offset) TO_S);
mrt_put_u32(b, (er->lastmod + s->time_offset) TO_S);
/* Path Identifier */
if (s->add_path)
@ -509,8 +509,7 @@ mrt_rib_table_dump(struct mrt_table_dump_state *s, net *n, int add_path)
mrt_init_message(&s->buf, MRT_TABLE_DUMP_V2, subtype);
mrt_rib_table_header(s, n->n.addr);
rte *rt, *rt0;
for (rt0 = n->routes; rt = rt0; rt0 = rt0->next)
for (struct rte_storage *rt = n->routes; rt; rt = rt->next)
{
if (rte_is_filtered(rt))
continue;
@ -522,11 +521,10 @@ mrt_rib_table_dump(struct mrt_table_dump_state *s, net *n, int add_path)
continue;
}
if (f_run(s->filter, &rt, s->linpool, 0) <= F_ACCEPT)
mrt_rib_table_entry(s, rt);
rte e = rte_copy(rt);
if (rt != rt0)
rte_free(rt);
if (f_run(s->filter, &e, s->linpool, 0) <= F_ACCEPT)
mrt_rib_table_entry(s, &e, rt);
lp_flush(s->linpool);
}

View File

@ -107,9 +107,9 @@
#include <stdlib.h>
#include "ospf.h"
static int ospf_preexport(struct proto *P, rte *new);
static int ospf_preexport(struct channel *c, rte *new);
static void ospf_reload_routes(struct channel *C);
static int ospf_rte_better(struct rte *new, struct rte *old);
static int ospf_rte_better(struct rte_storage *new, struct rte_storage *old);
static void ospf_disp(timer *timer);
@ -382,7 +382,7 @@ ospf_init(struct proto_config *CF)
/* If new is better return 1 */
static int
ospf_rte_better(struct rte *new, struct rte *old)
ospf_rte_better(struct rte_storage *new, struct rte_storage *old)
{
u32 new_metric1 = ea_get_int(new->attrs->eattrs, EA_OSPF_METRIC1, LSINFINITY);
@ -471,13 +471,13 @@ ospf_disp(timer * timer)
* import to the filters.
*/
static int
ospf_preexport(struct proto *P, rte *e)
ospf_preexport(struct channel *c, rte *e)
{
struct ospf_proto *p = (struct ospf_proto *) P;
struct ospf_proto *p = (struct ospf_proto *) (c->proto);
struct ospf_area *oa = ospf_main_area(p);
/* Reject our own routes */
if (e->src->proto == P)
if (e->src->proto == c->proto)
return -1;
/* Do not export routes to stub areas */
@ -552,7 +552,7 @@ ospf_get_status(struct proto *P, byte * buf)
}
static void
ospf_get_route_info(rte * rte, byte * buf)
ospf_get_route_info(rte * rte, struct rte_storage * er UNUSED, byte * buf)
{
char *type = "<bug>";

View File

@ -2098,15 +2098,16 @@ again1:
rte e0 = {
.attrs = rta_lookup(&a0),
.src = p->p.main_source,
.net = nf->fn.addr,
};
rta_free(nf->old_rta);
nf->old_rta = rta_clone(e0.attrs);
nf->old_rta = e0.attrs;
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
rte_update(p->p.main_channel, nf->fn.addr, &e0);
rte_update(p->p.main_channel, &e0);
}
}
else if (nf->old_rta)

View File

@ -1317,9 +1317,9 @@ ospf_rt_notify(struct channel *ch, struct rte_export *e)
if ((p->areano == 1) && oa_is_nssa(HEAD(p->area_list)))
oa = HEAD(p->area_list);
if (!e->new)
if (!e->new.attrs)
{
nf = fib_find(&p->rtf, e->net);
nf = fib_find(&p->rtf, e->old.net);
if (!nf || !nf->external_rte)
return;
@ -1337,7 +1337,7 @@ ospf_rt_notify(struct channel *ch, struct rte_export *e)
ASSERT(p->asbr);
/* Get route attributes */
rta *a = e->new->attrs;
rta *a = e->new.attrs;
eattr *m1a = ea_find(a->eattrs, EA_OSPF_METRIC1);
eattr *m2a = ea_find(a->eattrs, EA_OSPF_METRIC2);
uint m1 = m1a ? m1a->u.data : 0;
@ -1346,14 +1346,14 @@ ospf_rt_notify(struct channel *ch, struct rte_export *e)
if (m1 > LSINFINITY)
{
log(L_WARN "%s: Invalid ospf_metric1 value %u for route %N",
p->p.name, m1, e->net);
p->p.name, m1, e->new.net);
m1 = LSINFINITY;
}
if (m2 > LSINFINITY)
{
log(L_WARN "%s: Invalid ospf_metric2 value %u for route %N",
p->p.name, m2, e->net);
p->p.name, m2, e->new.net);
m2 = LSINFINITY;
}
@ -1377,12 +1377,12 @@ ospf_rt_notify(struct channel *ch, struct rte_export *e)
if (ipa_zero(fwd))
{
log(L_ERR "%s: Cannot find forwarding address for NSSA-LSA %N",
p->p.name, e->net);
p->p.name, e->new.net);
return;
}
}
nf = fib_get(&p->rtf, e->net);
nf = fib_get(&p->rtf, e->new.net);
ospf_originate_ext_lsa(p, oa, nf, LSA_M_EXPORT, metric, ebit, fwd, tag, 1, p->vpn_pe);
nf->external_rte = 1;
}

View File

@ -165,8 +165,9 @@ perf_loop(void *data)
rte e0 = {
.attrs = p->data[i].a,
.src = p->p.main_source,
.net = &(p->data[i].net),
};
rte_update(P->main_channel, &(p->data[i].net), &e0);
rte_update(P->main_channel, &e0);
}
clock_gettime(CLOCK_MONOTONIC, &ts_update);
@ -177,6 +178,9 @@ perf_loop(void *data)
clock_gettime(CLOCK_MONOTONIC, &ts_withdraw);
for (uint i=0; i<N; i++)
rta_free(p->data[i].a);
s64 gentime = timediff(&ts_begin, &ts_generated);
s64 updatetime = timediff(&ts_generated, &ts_update);
s64 withdrawtime = timediff(&ts_update, &ts_withdraw);

View File

@ -53,48 +53,54 @@ pipe_rt_notify(struct channel *src_ch, struct rte_export *export)
struct pipe_proto *p = (void *) src_ch->proto;
struct channel *dst = (src_ch == p->pri) ? p->sec : p->pri;
if (!export->new && !export->old)
if (!RTE_EXPORT_IS_OK(export))
return;
const net_addr *net = export->new.attrs ? export->new.net : export->old.net;
if (dst->table->pipe_busy)
{
log(L_ERR "Pipe loop detected when sending %N to table %s",
export->net, dst->table->name);
net, dst->table->name);
return;
}
if (export->new)
if (export->new.attrs)
{
rta *a = alloca(rta_size(export->new->attrs));
memcpy(a, export->new->attrs, rta_size(export->new->attrs));
rta *a = alloca(rta_size(export->new.attrs));
memcpy(a, export->new.attrs, rta_size(export->new.attrs));
a->cached = 0;
a->uc = 0;
a->hostentry = NULL;
rte e0 = {
.attrs = rta_lookup(a),
.attrs = a,
.src = export->new.src,
.net = net,
};
src_ch->table->pipe_busy = 1;
rte_update(dst, export->net, &e0);
rte_update(dst, &e0);
src_ch->table->pipe_busy = 0;
}
else
{
src_ch->table->pipe_busy = 1;
rte_withdraw(dst, export->net, export->old_src);
rte_withdraw(dst, net, export->old.src);
src_ch->table->pipe_busy = 0;
}
}
static int
pipe_preexport(struct proto *P, rte *e)
pipe_preexport(struct channel *src_ch, rte *e UNUSED)
{
struct proto *pp = e->sender->proto;
struct pipe_proto *p = (void *) src_ch->proto;
struct channel *dst = (src_ch == p->pri) ? p->sec : p->pri;
if (pp == P)
return -1; /* Avoid local loops automatically */
/* Avoid pipe loops */
if (dst->table->pipe_busy)
return -1;
return 0;
}

View File

@ -385,18 +385,17 @@ radv_trigger_valid(struct radv_config *cf)
}
static inline int
radv_net_match_trigger(struct radv_config *cf, net_addr *n)
radv_net_match_trigger(struct radv_config *cf, const net_addr *n)
{
return radv_trigger_valid(cf) && net_equal(n, &cf->trigger);
}
int
radv_preexport(struct proto *P, rte *new)
radv_preexport(struct channel *c, rte *new)
{
// struct radv_proto *p = (struct radv_proto *) P;
struct radv_config *cf = (struct radv_config *) (P->cf);
struct radv_config *cf = (struct radv_config *) (c->proto->cf);
if (radv_net_match_trigger(cf, new->net->n.addr))
if (radv_net_match_trigger(cf, new->net))
return RIC_PROCESS;
if (cf->propagate_routes)
@ -413,10 +412,12 @@ radv_rt_notify(struct channel *ch, struct rte_export *e)
struct radv_route *rt;
eattr *ea;
if (radv_net_match_trigger(cf, e->net))
const net_addr *net = e->new.attrs ? e->new.net : e->old.net;
if (radv_net_match_trigger(cf, net))
{
u8 old_active = p->active;
p->active = !!e->new;
p->active = !!e->new.attrs;
if (p->active == old_active)
return;
@ -440,15 +441,15 @@ radv_rt_notify(struct channel *ch, struct rte_export *e)
* And yes, we exclude the trigger route on purpose.
*/
if (e->new)
if (e->new.attrs)
{
/* Update */
ea = ea_find(e->new->attrs->eattrs, EA_RA_PREFERENCE);
ea = ea_find(e->new.attrs->eattrs, EA_RA_PREFERENCE);
uint preference = ea ? ea->u.data : RA_PREF_MEDIUM;
uint preference_set = !!ea;
ea = ea_find(e->new->attrs->eattrs, EA_RA_LIFETIME);
ea = ea_find(e->new.attrs->eattrs, EA_RA_LIFETIME);
uint lifetime = ea ? ea->u.data : 0;
uint lifetime_set = !!ea;
@ -457,14 +458,14 @@ radv_rt_notify(struct channel *ch, struct rte_export *e)
(preference != RA_PREF_HIGH))
{
log(L_WARN "%s: Invalid ra_preference value %u on route %N",
p->p.name, preference, e->net);
p->p.name, preference, e->new.net);
preference = RA_PREF_MEDIUM;
preference_set = 1;
lifetime = 0;
lifetime_set = 1;
}
rt = fib_get(&p->routes, e->net);
rt = fib_get(&p->routes, e->new.net);
/* Ignore update if nothing changed */
if (rt->valid &&
@ -487,7 +488,7 @@ radv_rt_notify(struct channel *ch, struct rte_export *e)
else
{
/* Withdraw */
rt = fib_find(&p->routes, e->net);
rt = fib_find(&p->routes, e->old.net);
if (!rt || !rt->valid)
return;

View File

@ -210,9 +210,10 @@ rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
rte e0 = {
.attrs = &a0,
.src = p->p.main_source,
.net = en->n.addr,
};
rte_update(p->p.main_channel, en->n.addr, &e0);
rte_update(p->p.main_channel, &e0);
}
else
rte_withdraw(p->p.main_channel, en->n.addr, p->p.main_source);
@ -316,10 +317,10 @@ rip_rt_notify(struct channel *ch, struct rte_export *e)
struct rip_entry *en;
int old_metric;
if (e->new)
if (e->new.attrs)
{
/* Update */
rta *a = e->new->attrs;
rta *a = e->new.attrs;
u32 rt_tag = ea_get_int(a->eattrs, EA_RIP_TAG, 0);
u32 rt_metric = ea_get_int(a->eattrs, EA_RIP_METRIC, 1);
struct iface *rt_from = (struct iface *) ea_get_int(a->eattrs, EA_RIP_FROM, 0);
@ -327,14 +328,14 @@ rip_rt_notify(struct channel *ch, struct rte_export *e)
if (rt_metric > p->infinity)
{
log(L_WARN "%s: Invalid rip_metric value %u for route %N",
p->p.name, rt_metric, e->net);
p->p.name, rt_metric, e->new.net);
rt_metric = p->infinity;
}
if (rt_tag > 0xffff)
{
log(L_WARN "%s: Invalid rip_tag value %u for route %N",
p->p.name, rt_tag, e->net);
p->p.name, rt_tag, e->new.net);
rt_metric = p->infinity;
rt_tag = 0;
}
@ -346,7 +347,7 @@ rip_rt_notify(struct channel *ch, struct rte_export *e)
* collection.
*/
en = fib_get(&p->rtable, e->net);
en = fib_get(&p->rtable, e->new.net);
old_metric = en->valid ? en->metric : -1;
@ -360,7 +361,7 @@ rip_rt_notify(struct channel *ch, struct rte_export *e)
else
{
/* Withdraw */
en = fib_find(&p->rtable, e->net);
en = fib_find(&p->rtable, e->old.net);
if (!en || en->valid != RIP_ENTRY_VALID)
return;
@ -1083,7 +1084,7 @@ rip_reload_routes(struct channel *C)
}
static int
rip_rte_better(struct rte *new, struct rte *old)
rip_rte_better(struct rte_storage *new, struct rte_storage *old)
{
u32 new_metric = ea_get_int(new->attrs->eattrs, EA_RIP_METRIC, 1);
u32 old_metric = ea_get_int(old->attrs->eattrs, EA_RIP_METRIC, 1);
@ -1185,7 +1186,7 @@ rip_reconfigure(struct proto *P, struct proto_config *CF)
}
static void
rip_get_route_info(rte *rte, byte *buf)
rip_get_route_info(rte *rte, struct rte_storage *er UNUSED, byte *buf)
{
u32 rt_metric = ea_get_int(rte->attrs->eattrs, EA_RIP_METRIC, 1);
u32 rt_tag = ea_get_int(rte->attrs->eattrs, EA_RIP_TAG, 0);

View File

@ -130,9 +130,10 @@ rpki_table_add_roa(struct rpki_cache *cache, struct channel *channel, const net_
rte e0 = {
.attrs = &a0,
.src = p->p.main_source,
.net = &pfxr->n,
};
rte_update(channel, &pfxr->n, &e0);
rte_update(channel, &e0);
}
void

View File

@ -101,27 +101,16 @@ static_announce_rte(struct static_proto *p, struct static_route *r)
if (r->state == SRS_CLEAN)
return;
/* We skip rta_lookup() here */
rte e0 = {
.attrs = a,
.src = static_get_source(p, r->index),
}, *e = &e0;
.net = r->net,
};
if (r->cmds)
{
/* Create a temporary table node */
e->net = alloca(sizeof(net) + r->net->length);
memset(e->net, 0, sizeof(net) + r->net->length);
net_copy(e->net->n.addr, r->net);
f_eval_rte(r->cmds, &e0, static_lp);
/* Evaluate the filter */
f_eval_rte(r->cmds, &e, static_lp);
/* Remove the temporary node */
e->net = NULL;
}
rte_update(p->p.main_channel, r->net, e);
rte_update(p->p.main_channel, &e0);
r->state = SRS_CLEAN;
if (r->cmds)
@ -133,7 +122,7 @@ withdraw:
if (r->state == SRS_DOWN)
return;
rte_withdraw(p->p.main_channel, r->net, p->p.main_source);
rte_withdraw(p->p.main_channel, r->net, static_get_source(p, r->index));
r->state = SRS_DOWN;
}
@ -413,7 +402,7 @@ static_reload_routes(struct channel *C)
}
static int
static_rte_better(rte *new, rte *old)
static_rte_better(struct rte_storage *new, struct rte_storage *old)
{
u32 n = ea_get_int(new->attrs->eattrs, EA_GEN_IGP_METRIC, IGP_METRIC_UNKNOWN);
u32 o = ea_get_int(old->attrs->eattrs, EA_GEN_IGP_METRIC, IGP_METRIC_UNKNOWN);
@ -421,7 +410,7 @@ static_rte_better(rte *new, rte *old)
}
static int
static_rte_mergable(rte *pri, rte *sec)
static_rte_mergable(struct rte_storage *pri, struct rte_storage *sec)
{
u32 a = ea_get_int(pri->attrs->eattrs, EA_GEN_IGP_METRIC, IGP_METRIC_UNKNOWN);
u32 b = ea_get_int(sec->attrs->eattrs, EA_GEN_IGP_METRIC, IGP_METRIC_UNKNOWN);
@ -719,7 +708,7 @@ static_copy_config(struct proto_config *dest, struct proto_config *src)
}
static void
static_get_route_info(rte *rte, byte *buf)
static_get_route_info(rte *rte, struct rte_storage *es UNUSED, byte *buf)
{
eattr *a = ea_find(rte->attrs->eattrs, EA_GEN_IGP_METRIC);
if (a)

View File

@ -199,7 +199,6 @@ sockaddr_fill_dl(struct sockaddr_dl *sa, struct iface *ifa)
static int
krt_send_route(struct krt_proto *p, int cmd, rte *e)
{
net *net = e->net;
rta *a = e->attrs;
static int msg_seq;
struct iface *j, *i = a->nh.iface;
@ -208,7 +207,7 @@ krt_send_route(struct krt_proto *p, int cmd, rte *e)
char *body = (char *)msg.buf;
sockaddr gate, mask, dst;
DBG("krt-sock: send %I/%d via %I\n", net->n.prefix, net->n.pxlen, a->gw);
DBG("krt-sock: send %N via %I\n", e->net, a->gw);
bzero(&msg,sizeof (struct rt_msghdr));
msg.rtm.rtm_version = RTM_VERSION;
@ -218,7 +217,7 @@ krt_send_route(struct krt_proto *p, int cmd, rte *e)
msg.rtm.rtm_flags = RTF_UP | RTF_PROTO1;
/* XXXX */
if (net_pxlen(net->n.addr) == net_max_prefix_length[net->n.addr->type])
if (net_pxlen(e->net) == net_max_prefix_length[e->net->type])
msg.rtm.rtm_flags |= RTF_HOST;
else
msg.rtm.rtm_addrs |= RTA_NETMASK;
@ -260,7 +259,7 @@ krt_send_route(struct krt_proto *p, int cmd, rte *e)
int af = AF_UNSPEC;
switch (net->n.addr->type) {
switch (e->net->type) {
case NET_IP4:
af = AF_INET;
break;
@ -268,12 +267,12 @@ krt_send_route(struct krt_proto *p, int cmd, rte *e)
af = AF_INET6;
break;
default:
log(L_ERR "KRT: Not sending route %N to kernel", net->n.addr);
log(L_ERR "KRT: Not sending route %N to kernel", e->net);
return -1;
}
sockaddr_fill(&dst, af, net_prefix(net->n.addr), NULL, 0);
sockaddr_fill(&mask, af, net_pxmask(net->n.addr), NULL, 0);
sockaddr_fill(&dst, af, net_prefix(e->net), NULL, 0);
sockaddr_fill(&mask, af, net_pxmask(e->net), NULL, 0);
switch (a->dest)
{
@ -303,7 +302,7 @@ krt_send_route(struct krt_proto *p, int cmd, rte *e)
#if __OpenBSD__
/* Keeping temporarily old code for OpenBSD */
struct ifa *addr = (net->n.addr->type == NET_IP4) ? i->addr4 : (i->addr6 ?: i->llv6);
struct ifa *addr = (e->net->type == NET_IP4) ? i->addr4 : (i->addr6 ?: i->llv6);
if (!addr)
{
@ -339,7 +338,7 @@ krt_send_route(struct krt_proto *p, int cmd, rte *e)
msg.rtm.rtm_msglen = l;
if ((l = write(p->sys.sk->fd, (char *)&msg, l)) < 0) {
log(L_ERR "KRT: Error sending route %N to kernel: %m", net->n.addr);
log(L_ERR "KRT: Error sending route %N to kernel: %m", e->net);
return -1;
}
@ -347,22 +346,24 @@ krt_send_route(struct krt_proto *p, int cmd, rte *e)
}
void
krt_replace_rte(struct krt_proto *p, rte *new, rte *old)
krt_replace_rte(struct krt_proto *p, struct rte_export *e)
{
int err = 0;
if (old)
krt_send_route(p, RTM_DELETE, old);
if (RTE_EXPORT_OLD_OK(e))
krt_send_route(p, RTM_DELETE, &e->old);
if (new)
err = krt_send_route(p, RTM_ADD, new);
if (RTE_EXPORT_NEW_OK(e))
err = krt_send_route(p, RTM_ADD, &e->new);
if (new)
/* There is no need to update this bit for the old route. It is used solely together
* with the bit in export map in channel. */
if (RTE_EXPORT_NEW_OK(e))
{
if (err < 0)
bmap_clear(&p->sync_map, new->id);
bmap_clear(&p->sync_map, e->new_id);
else
bmap_set(&p->sync_map, new->id);
bmap_set(&p->sync_map, e->new_id);
}
}
@ -374,7 +375,6 @@ krt_read_route(struct ks_msg *msg, struct krt_proto *p, int scan)
/* p is NULL iff KRT_SHARED_SOCKET and !scan */
int ipv6;
net *net;
sockaddr dst, gate, mask;
ip_addr idst, igate, imask;
net_addr ndst;
@ -491,8 +491,6 @@ krt_read_route(struct ks_msg *msg, struct krt_proto *p, int scan)
else
src = KRT_SRC_KERNEL;
net = net_get(p->p.main_channel->table, &ndst);
rta a = {
.source = RTS_INHERIT,
.scope = SCOPE_UNIVERSE,
@ -519,7 +517,7 @@ krt_read_route(struct ks_msg *msg, struct krt_proto *p, int scan)
if (!a.nh.iface)
{
log(L_ERR "KRT: Received route %N with unknown ifindex %u",
net->n.addr, msg->rtm.rtm_index);
&ndst, msg->rtm.rtm_index);
return;
}
@ -542,19 +540,20 @@ krt_read_route(struct ks_msg *msg, struct krt_proto *p, int scan)
return;
log(L_ERR "KRT: Received route %N with strange next-hop %I",
net->n.addr, a.nh.gw);
&ndst, a.nh.gw);
return;
}
}
done:;
rte e0 = {}, *e = &e0;
e->attrs = &a;
e->net = net;
rte e0 = {
.attrs = &a,
.net = &ndst,
};
ea_list *ea = alloca(sizeof(ea_list) + 1 * sizeof(eattr));
*ea = (ea_list) { .count = 1, .next = e->attrs->eattrs };
e->attrs->eattrs = ea;
*ea = (ea_list) { .count = 1, .next = a.eattrs };
a.eattrs = ea;
ea->attrs[0] = (eattr) {
.id = EA_KRT_SOURCE,
@ -563,9 +562,9 @@ krt_read_route(struct ks_msg *msg, struct krt_proto *p, int scan)
};
if (scan)
krt_got_route(p, e, src);
krt_got_route(p, &e0, src);
else
krt_got_route_async(p, e, new, src);
krt_got_route_async(p, &e0, new, src);
}
static void

View File

@ -105,7 +105,7 @@ struct nl_parse_state
int scan;
int merge;
net *net;
net_addr *net;
rta *attrs;
struct krt_proto *proto;
s8 new;
@ -1236,7 +1236,7 @@ static int
nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop *nh)
{
eattr *ea;
net *net = e->net;
const net_addr *net = e->net;
rta *a = e->attrs;
ea_list *eattrs = a->eattrs;
int bufsize = 128 + KRT_METRICS_MAX*8 + nh_bufsize(&(a->nh));
@ -1251,7 +1251,7 @@ nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop *nh)
int rsize = sizeof(*r) + bufsize;
r = alloca(rsize);
DBG("nl_send_route(%N,op=%x)\n", net->n.addr, op);
DBG("nl_send_route(%N,op=%x)\n", net, op);
bzero(&r->h, sizeof(r->h));
bzero(&r->r, sizeof(r->r));
@ -1260,7 +1260,7 @@ nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop *nh)
r->h.nlmsg_flags = op | NLM_F_REQUEST | NLM_F_ACK;
r->r.rtm_family = p->af;
r->r.rtm_dst_len = net_pxlen(net->n.addr);
r->r.rtm_dst_len = net_pxlen(net);
r->r.rtm_protocol = RTPROT_BIRD;
r->r.rtm_scope = RT_SCOPE_NOWHERE;
#ifdef HAVE_MPLS_KERNEL
@ -1272,7 +1272,7 @@ nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop *nh)
* 2) Never use RTA_PRIORITY
*/
u32 label = net_mpls(net->n.addr);
u32 label = net_mpls(net);
nl_add_attr_mpls(&r->h, rsize, RTA_DST, 1, &label);
r->r.rtm_scope = RT_SCOPE_UNIVERSE;
r->r.rtm_type = RTN_UNICAST;
@ -1280,12 +1280,12 @@ nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop *nh)
else
#endif
{
nl_add_attr_ipa(&r->h, rsize, RTA_DST, net_prefix(net->n.addr));
nl_add_attr_ipa(&r->h, rsize, RTA_DST, net_prefix(net));
/* Add source address for IPv6 SADR routes */
if (net->n.addr->type == NET_IP6_SADR)
if (net->type == NET_IP6_SADR)
{
net_addr_ip6_sadr *a = (void *) &net->n.addr;
net_addr_ip6_sadr *a = (void *) &net;
nl_add_attr_ip6(&r->h, rsize, RTA_SRC, a->src_prefix);
r->r.rtm_src_len = a->src_pxlen;
}
@ -1427,7 +1427,7 @@ nl_replace_rte(struct krt_proto *p, rte *e)
void
krt_replace_rte(struct krt_proto *p, rte *new, rte *old)
krt_replace_rte(struct krt_proto *p, struct rte_export *e)
{
int err = 0;
@ -1443,30 +1443,32 @@ krt_replace_rte(struct krt_proto *p, rte *new, rte *old)
* old route value, so we do not try to optimize IPv6 ECMP reconfigurations.
*/
if (krt_ipv4(p) && old && new)
if (krt_ipv4(p) && RTE_EXPORT_NEW_OK(e) && RTE_EXPORT_OLD_OK(e))
{
err = nl_replace_rte(p, new);
err = nl_replace_rte(p, &e->new);
}
else
{
if (old)
nl_delete_rte(p, old);
if (RTE_EXPORT_OLD_OK(e))
nl_delete_rte(p, &e->old);
if (new)
err = nl_add_rte(p, new);
if (RTE_EXPORT_NEW_OK(e))
err = nl_add_rte(p, &e->new);
}
if (new)
/* There is no need to update this bit for the old route. It is used solely together
* with the bit in export map in channel. */
if (RTE_EXPORT_NEW_OK(e))
{
if (err < 0)
bmap_clear(&p->sync_map, new->id);
bmap_clear(&p->sync_map, e->new_id);
else
bmap_set(&p->sync_map, new->id);
bmap_set(&p->sync_map, e->new_id);
}
}
static int
nl_mergable_route(struct nl_parse_state *s, net *net, struct krt_proto *p, uint priority, uint krt_type, uint rtm_family)
nl_mergable_route(struct nl_parse_state *s, const net_addr *net, struct krt_proto *p, uint priority, uint krt_type, uint rtm_family)
{
/* Route merging is used for IPv6 scans */
if (!s->scan || (rtm_family != AF_INET6))
@ -1662,9 +1664,7 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
net6_prefix(&src), net6_pxlen(&src));
}
net *net = net_get(p->p.main_channel->table, n);
if (s->net && !nl_mergable_route(s, net, p, priority, i->rtm_type, i->rtm_family))
if (s->net && !nl_mergable_route(s, n, p, priority, i->rtm_type, i->rtm_family))
nl_announce_route(s);
rta *ra = lp_allocz(s->pool, RTA_MAX_SIZE);
@ -1686,7 +1686,7 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
struct nexthop *nh = nl_parse_multipath(s, p, a[RTA_MULTIPATH], i->rtm_family);
if (!nh)
{
log(L_ERR "KRT: Received strange multipath route %N", net->n.addr);
log(L_ERR "KRT: Received strange multipath route %N", n);
return;
}
@ -1700,7 +1700,7 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
ra->nh.iface = if_find_by_index(oif);
if (!ra->nh.iface)
{
log(L_ERR "KRT: Received route %N with unknown ifindex %u", net->n.addr, oif);
log(L_ERR "KRT: Received route %N with unknown ifindex %u", n, oif);
return;
}
@ -1727,7 +1727,7 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
(ra->nh.flags & RNF_ONLINK) ? NEF_ONLINK : 0);
if (!nbr || (nbr->scope == SCOPE_HOST))
{
log(L_ERR "KRT: Received route %N with strange next-hop %I", net->n.addr,
log(L_ERR "KRT: Received route %N with strange next-hop %I", n,
ra->nh.gw);
return;
}
@ -1823,29 +1823,29 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
{
u32 metrics[KRT_METRICS_MAX];
ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + KRT_METRICS_MAX * sizeof(eattr));
int t, n = 0;
int t, na = 0;
if (nl_parse_metrics(a[RTA_METRICS], metrics, ARRAY_SIZE(metrics)) < 0)
{
log(L_ERR "KRT: Received route %N with strange RTA_METRICS attribute", net->n.addr);
log(L_ERR "KRT: Received route %N with strange RTA_METRICS attribute", n);
return;
}
for (t = 1; t < KRT_METRICS_MAX; t++)
if (metrics[0] & (1 << t))
{
ea->attrs[n].id = EA_CODE(PROTOCOL_KERNEL, KRT_METRICS_OFFSET + t);
ea->attrs[n].flags = 0;
ea->attrs[n].type = EAF_TYPE_INT; /* FIXME: Some are EAF_TYPE_BITFIELD */
ea->attrs[n].u.data = metrics[t];
n++;
ea->attrs[na].id = EA_CODE(PROTOCOL_KERNEL, KRT_METRICS_OFFSET + t);
ea->attrs[na].flags = 0;
ea->attrs[na].type = EAF_TYPE_INT; /* FIXME: Some are EAF_TYPE_BITFIELD */
ea->attrs[na].u.data = metrics[t];
na++;
}
if (n > 0)
if (na > 0)
{
ea->next = ra->eattrs;
ea->flags = EALF_SORTED;
ea->count = n;
ea->count = na;
ra->eattrs = ea;
}
}
@ -1861,7 +1861,9 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
if (!s->net)
{
/* Store the new route */
s->net = net;
s->net = lp_alloc(s->pool, n->length);
net_copy(s->net, n);
s->attrs = ra;
s->proto = p;
s->new = new;

View File

@ -251,14 +251,14 @@ static inline void
krt_trace_in(struct krt_proto *p, rte *e, char *msg)
{
if (p->p.debug & D_PACKETS)
log(L_TRACE "%s: %N: %s", p->p.name, e->net->n.addr, msg);
log(L_TRACE "%s: %N: %s", p->p.name, e->net, msg);
}
static inline void
krt_trace_in_rl(struct tbf *f, struct krt_proto *p, rte *e, char *msg)
{
if (p->p.debug & D_PACKETS)
log_rl(f, L_TRACE "%s: %N: %s", p->p.name, e->net->n.addr, msg);
log_rl(f, L_TRACE "%s: %N: %s", p->p.name, e->net, msg);
}
/*
@ -278,33 +278,34 @@ static struct tbf rl_alien = TBF_DEFAULT_LOG_LIMITS;
*/
static inline u32
krt_metric(rte *a)
krt_metric(struct rte_storage *a)
{
eattr *ea = ea_find(a->attrs->eattrs, EA_KRT_METRIC);
return ea ? ea->u.data : 0;
}
static inline int
krt_same_key(rte *a, rte *b)
krt_same_key(struct rte_storage *a, struct rte_storage *b)
{
return (krt_metric(a) == krt_metric(b));
}
static inline int
krt_uptodate(rte *a, rte *b)
krt_uptodate(struct rte_storage *a, struct rte_storage *b)
{
return (a->attrs == b->attrs);
}
static void
krt_learn_announce_update(struct krt_proto *p, rte *e)
krt_learn_announce_update(struct krt_proto *p, struct rte_storage *e)
{
rte e0 = {
.attrs = rta_clone(e->attrs),
.attrs = e->attrs,
.src = p->p.main_source,
.net = e->net->n.addr,
};
rte_update(p->p.main_channel, e->net->n.addr, &e0);
rte_update(p->p.main_channel, &e0);
}
static void
@ -317,21 +318,20 @@ krt_learn_announce_delete(struct krt_proto *p, net_addr *n)
static void
krt_learn_scan(struct krt_proto *p, rte *e)
{
net *n0 = e->net;
net *n = net_get(&p->krt_table, n0->n.addr);
rte *m, **mm;
e = rte_store(e);
net *n = net_get(&p->krt_table, e->net);
struct rte_storage *er = rte_store(e, n);
struct rte_storage *m, **mm;
for(mm=&n->routes; m = *mm; mm=&m->next)
if (krt_same_key(m, e))
if (krt_same_key(m, er))
break;
if (m)
{
if (krt_uptodate(m, e))
if (krt_uptodate(m, er))
{
krt_trace_in_rl(&rl_alien, p, e, "[alien] seen");
rte_free(e);
rte_free(er);
m->pflags |= KRT_REF_SEEN;
}
else
@ -344,11 +344,12 @@ krt_learn_scan(struct krt_proto *p, rte *e)
}
else
krt_trace_in(p, e, "[alien] created");
if (!m)
{
e->next = n->routes;
n->routes = e;
e->pflags |= KRT_REF_SEEN;
er->next = n->routes;
n->routes = er;
er->pflags |= KRT_REF_SEEN;
}
}
@ -364,7 +365,7 @@ krt_learn_prune(struct krt_proto *p)
again:
FIB_ITERATE_START(fib, &fit, net, n)
{
rte *e, **ee, *best, **pbest, *old_best;
struct rte_storage *e, **ee, *best, **pbest, *old_best;
/*
* Note that old_best may be NULL even if there was an old best route in
@ -429,27 +430,28 @@ again:
static void
krt_learn_async(struct krt_proto *p, rte *e, int new)
{
net *n0 = e->net;
net *n = net_get(&p->krt_table, n0->n.addr);
rte *g, **gg, *best, **bestp, *old_best;
net *n = net_get(&p->krt_table, e->net);
struct rte_storage *g, **gg, *best, **bestp, *old_best;
ASSERT(!e->attrs->cached);
e->attrs->pref = p->p.main_channel->preference;
e = rte_store(e);
struct rte_storage *er = rte_store(e, n);
old_best = n->routes;
for(gg=&n->routes; g = *gg; gg = &g->next)
if (krt_same_key(g, e))
if (krt_same_key(g, er))
break;
if (new)
{
if (g)
{
if (krt_uptodate(g, e))
if (krt_uptodate(g, er))
{
krt_trace_in(p, e, "[alien async] same");
rte_free(e);
rte_free(er);
return;
}
krt_trace_in(p, e, "[alien async] updated");
@ -459,20 +461,20 @@ krt_learn_async(struct krt_proto *p, rte *e, int new)
else
krt_trace_in(p, e, "[alien async] created");
e->next = n->routes;
n->routes = e;
er->next = n->routes;
n->routes = er;
}
else if (!g)
{
krt_trace_in(p, e, "[alien async] delete failed");
rte_free(e);
rte_free(er);
return;
}
else
{
krt_trace_in(p, e, "[alien async] removed");
*gg = g->next;
rte_free(e);
rte_free(er);
rte_free(g);
}
best = n->routes;
@ -552,50 +554,43 @@ krt_flush_routes(struct krt_proto *p)
{
if (krt_is_installed(p, n))
{
struct rte_export e = {
.old = rte_copy(n->routes),
.old_id = n->routes->id,
};
/* FIXME: this does not work if gw is changed in export filter */
krt_replace_rte(p, NULL, n->routes);
krt_replace_rte(p, &e);
}
}
FIB_WALK_END;
}
static struct rte *
krt_export_net(struct krt_proto *p, net *net, rte **rt_free)
static _Bool
krt_export_net(struct krt_proto *p, net *net, rte *rt)
{
struct channel *c = p->p.main_channel;
const struct filter *filter = c->out_filter;
rte *rt;
if (c->ra_mode == RA_MERGED)
return rt_export_merged(c, net, rt_free, krt_filter_lp, 1);
return rt_export_merged(c, net, rt, krt_filter_lp, 1);
rt = net->routes;
*rt_free = NULL;
if (!rte_is_valid(rt))
return NULL;
if (!rte_is_valid(net->routes))
return 0;
if (filter == FILTER_REJECT)
return NULL;
return 0;
/* We could run krt_preexport() here, but it is already handled by krt_is_installed() */
*rt = rte_copy(net->routes);
if (filter == FILTER_ACCEPT)
goto accept;
return 1;
if (f_run(filter, &rt, krt_filter_lp, FF_SILENT) > F_ACCEPT)
goto reject;
if (f_run(filter, rt, krt_filter_lp, FF_SILENT) > F_ACCEPT)
return 0;
accept:
if (rt != net->routes)
*rt_free = rt;
return rt;
reject:
if (rt != net->routes)
rte_free(rt);
return NULL;
return 1;
}
static int
@ -620,8 +615,10 @@ krt_same_dest(rte *k, rte *e)
void
krt_got_route(struct krt_proto *p, rte *e, s8 src)
{
rte *new = NULL, *rt_free = NULL;
net *n = e->net;
struct rte_export ex = {
.old = *e,
};
#ifdef KRT_ALLOW_LEARN
switch (src)
@ -647,24 +644,24 @@ krt_got_route(struct krt_proto *p, rte *e, s8 src)
if (!p->ready)
goto ignore;
if (!krt_is_installed(p, n))
net *n = net_find(p->p.main_channel->table, e->net);
if (!n || !krt_is_installed(p, n))
goto delete;
new = krt_export_net(p, n, &rt_free);
/* Rejected by filters */
if (!new)
if (!krt_export_net(p, n, &ex.new))
goto delete;
/* Route to this destination was already seen. Strange, but it happens... */
if (bmap_test(&p->seen_map, new->id))
if (bmap_test(&p->seen_map, n->routes->id))
goto aseen;
/* Mark route as seen */
bmap_set(&p->seen_map, new->id);
bmap_set(&p->seen_map, n->routes->id);
/* TODO: There also may be changes in route eattrs, we ignore that for now. */
if (!bmap_test(&p->sync_map, new->id) || !krt_same_dest(e, new))
if (!bmap_test(&p->sync_map, n->routes->id) || !krt_same_dest(e, &ex.new))
goto update;
goto seen;
@ -682,19 +679,17 @@ ignore:
goto done;
update:
krt_trace_in(p, new, "updating");
krt_replace_rte(p, new, e);
krt_trace_in(p, &ex.new, "updating");
ex.new_id = n->routes->id;
krt_replace_rte(p, &ex);
goto done;
delete:
krt_trace_in(p, e, "deleting");
krt_replace_rte(p, NULL, e);
krt_replace_rte(p, &ex);
goto done;
done:
if (rt_free)
rte_free(rt_free);
lp_flush(krt_filter_lp);
}
@ -714,18 +709,16 @@ krt_prune(struct krt_proto *p)
{
if (p->ready && krt_is_installed(p, n) && !bmap_test(&p->seen_map, n->routes->id))
{
rte *rt_free = NULL;
rte *new = krt_export_net(p, n, &rt_free);
struct rte_export ex = {
.new_id = n->routes->id
};
if (new)
if (krt_export_net(p, n, &ex.new))
{
krt_trace_in(p, new, "installing");
krt_replace_rte(p, new, NULL);
krt_trace_in(p, &ex.new, "installing");
krt_replace_rte(p, &ex);
}
if (rt_free)
rte_free(rt_free);
lp_flush(krt_filter_lp);
}
}
@ -753,7 +746,8 @@ krt_got_route_async(struct krt_proto *p, rte *e, int new, s8 src)
if (new)
{
krt_trace_in(p, e, "[redirect] deleting");
krt_replace_rte(p, NULL, e);
struct rte_export ex = { .old = *e };
krt_replace_rte(p, &ex);
}
/* If !new, it is probably echo of our deletion */
break;
@ -873,10 +867,9 @@ krt_scan_timer_kick(struct krt_proto *p)
*/
static int
krt_preexport(struct proto *P, rte *e)
krt_preexport(struct channel *c, rte *e)
{
// struct krt_proto *p = (struct krt_proto *) P;
if (e->src->proto == P)
if (e->src->proto == c->proto)
return -1;
if (!krt_capable(e))
@ -900,12 +893,12 @@ krt_rt_notify(struct channel *ch, struct rte_export *e)
* but if we processed the update as usual, we would send withdraw to the
* kernel, which would remove the new imported route instead.
*/
if (!e->new && (e->new_src->proto == ch->proto))
if (!e->new.attrs && (e->new.src->proto == ch->proto))
return;
#endif
if (p->initialized) /* Before first scan we don't touch the routes */
krt_replace_rte(p, e->new, e->old);
krt_replace_rte(p, e);
}
static void

View File

@ -83,11 +83,11 @@ void krt_got_route(struct krt_proto *p, struct rte *e, s8 src);
void krt_got_route_async(struct krt_proto *p, struct rte *e, int new, s8 src);
static inline int
krt_get_sync_error(struct krt_proto *p, struct rte *e)
krt_get_sync_error(struct krt_proto *p, u32 id)
{
return (p->p.proto_state == PS_UP) &&
bmap_test(&p->p.main_channel->export_map, e->id) &&
!bmap_test(&p->sync_map, e->id);
bmap_test(&p->p.main_channel->export_map, id) &&
!bmap_test(&p->sync_map, id);
}
/* Values for rte->u.krt_sync.src */
@ -143,7 +143,7 @@ void krt_sys_copy_config(struct krt_config *, struct krt_config *);
int krt_capable(rte *e);
void krt_do_scan(struct krt_proto *);
void krt_replace_rte(struct krt_proto *p, rte *new, rte *old);
void krt_replace_rte(struct krt_proto *p, struct rte_export *e);
int krt_sys_get_attr(const eattr *a, byte *buf, int buflen);