0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

Squashing the route attribute structure into one level.

For now, all route attributes are stored as eattrs in ea_list. This
should make route manipulation easier and it also allows for a layered
approach of route attributes where updates from filters will be stored
as an overlay over the previous version.
This commit is contained in:
Maria Matejka 2022-05-30 12:03:03 +02:00
parent 950775f6fa
commit 938742decc
26 changed files with 352 additions and 497 deletions

View File

@ -212,7 +212,7 @@
* m4_dnl NEVER_CONSTANT-> don't generate pre-interpretation code at all
* m4_dnl ACCESS_RTE -> check that route is available, also NEVER_CONSTANT
* m4_dnl ACCESS_EATTRS -> pre-cache the eattrs; use only with ACCESS_RTE
* m4_dnl f_rta_cow(fs) -> function to call before any change to route should be done
* m4_dnl f_rte_cow(fs) -> function to call before any change to route should be done
*
* m4_dnl If you are stymied, see FI_CALL or FI_CONSTANT or just search for
* m4_dnl the mentioned macros in this file to see what is happening there in wild.
@ -578,7 +578,7 @@
STATIC_ATTR;
ARG_TYPE(1, sa.type);
f_rta_cow(fs);
f_rte_cow(fs);
{
union {
struct nexthop_adata nha;
@ -741,7 +741,7 @@
if (da->type >= EAF_TYPE__MAX)
bug("Unsupported attribute type");
f_rta_cow(fs);
f_rte_cow(fs);
switch (da->type) {
case T_OPAQUE:
@ -770,7 +770,7 @@
ACCESS_RTE;
ACCESS_EATTRS;
f_rta_cow(fs);
f_rte_cow(fs);
ea_unset_attr(fs->eattrs, 1, da);
}

View File

@ -76,9 +76,6 @@ struct filter_state {
/* The route we are processing. This may be NULL to indicate no route available. */
struct rte **rte;
/* The old rta to be freed after filters are done. */
struct rta *old_rta;
/* Cached pointer to ea_list */
struct ea_list **eattrs;
@ -99,7 +96,7 @@ void (*bt_assert_hook)(int result, const struct f_line_item *assert);
static inline void f_cache_eattrs(struct filter_state *fs)
{
fs->eattrs = &((*fs->rte)->attrs->eattrs);
fs->eattrs = &((*fs->rte)->attrs);
}
static inline void f_rte_cow(struct filter_state *fs)
@ -110,33 +107,6 @@ static inline void f_rte_cow(struct filter_state *fs)
*fs->rte = rte_cow(*fs->rte);
}
/*
* rta_cow - prepare rta for modification by filter
*/
static void
f_rta_cow(struct filter_state *fs)
{
if (!rta_is_cached((*fs->rte)->attrs))
return;
/* Prepare to modify rte */
f_rte_cow(fs);
/* Store old rta to free it later, it stores reference from rte_cow() */
fs->old_rta = (*fs->rte)->attrs;
/*
* Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
* with fs->old_rta (they will be copied when the cached rta will be obtained
* at the end of f_run()), also the lock of hostentry is inherited (we
* suppose hostentry is not changed by filters).
*/
(*fs->rte)->attrs = rta_do_cow((*fs->rte)->attrs, tmp_linpool);
/* Re-cache the ea_list */
f_cache_eattrs(fs);
}
static struct tbf rl_runtime_err = TBF_DEFAULT_LOG_LIMITS;
/**
@ -289,7 +259,7 @@ f_run(const struct filter *filter, struct rte **rte, int flags)
/* Run the interpreter itself */
enum filter_return fret = interpret(&filter_state, filter->root, NULL);
if (filter_state.old_rta) {
if (filter_state.eattrs && !ea_is_cached(*filter_state.eattrs)) {
/*
* Cached rta was modified and filter_state->rte contains now an uncached one,
* sharing some part with the cached one. The cached rta should
@ -302,6 +272,10 @@ f_run(const struct filter *filter, struct rte **rte, int flags)
* This is not the problem if rte was COW, because original rte
* also holds the same rta.
*/
ea_list *cached = *filter_state.eattrs;
while (cached && !ea_is_cached(cached))
cached = cached->next;
if (!rte_cow) {
/* Cache the new attrs */
(*filter_state.rte)->attrs = rta_lookup((*filter_state.rte)->attrs);
@ -311,8 +285,7 @@ f_run(const struct filter *filter, struct rte **rte, int flags)
}
/* Uncache the old attrs and drop the pointer as it is invalid now. */
rta_free(filter_state.old_rta);
filter_state.old_rta = NULL;
rta_free(cached);
}
/* Process the filter output, log it and return */

View File

@ -21,7 +21,7 @@ typedef struct rte {
struct network *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
struct channel *sender; /* Channel used to send the route to the routing table */
struct rta *attrs; /* Attributes of this route */
struct ea_list *attrs; /* Attributes of this route */
u32 id; /* Table specific route id */
byte flags; /* Flags (REF_...) */
byte pflags; /* Protocol-specific flags */
@ -90,14 +90,6 @@ struct nexthop_adata {
#define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
typedef struct rta {
struct rta *next, **pprev; /* Hash chain */
u32 uc; /* Use count */
u32 hash_key; /* Hash over important fields */
struct ea_list *eattrs; /* Extended Attribute chain */
u16 cached:1; /* Are attributes cached? */
} rta;
#define RTS_STATIC 1 /* Normal static route */
#define RTS_INHERIT 2 /* Route inherited from kernel */
#define RTS_DEVICE 3 /* Device route */
@ -153,6 +145,10 @@ typedef struct eattr {
#define EA_BIT_GET(ea) ((ea) >> 24)
typedef struct ea_list {
struct ea_list *next_hash; /* Next in hash chain */
struct ea_list **pprev_hash; /* Previous in hash chain */
u32 uc; /* Use count */
u32 hash_key; /* List hash */
struct ea_list *next; /* In case we have an override list */
byte flags; /* Flags: EALF_... */
byte rfu;
@ -162,7 +158,7 @@ typedef struct ea_list {
#define EALF_SORTED 1 /* Attributes are sorted by code */
#define EALF_BISECT 2 /* Use interval bisection for searching */
#define EALF_CACHED 4 /* Attributes belonging to cached rta */
#define EALF_CACHED 4 /* List is cached */
struct ea_class {
#define EA_CLASS_INSIDE \
@ -317,7 +313,7 @@ ea_copy_attr(ea_list **to, ea_list *from, const struct ea_class *def)
/* Preference: first-order comparison */
extern struct ea_class ea_gen_preference;
static inline u32 rt_get_preference(rte *rt)
{ return ea_get_int(rt->attrs->eattrs, &ea_gen_preference, 0); }
{ return ea_get_int(rt->attrs, &ea_gen_preference, 0); }
/* IGP metric: second-order comparison */
extern struct ea_class ea_gen_igp_metric;
@ -332,7 +328,7 @@ extern struct ea_class ea_gen_from;
* To be superseded in a near future by something more informative. */
extern struct ea_class ea_gen_source;
static inline u32 rt_get_source_attr(rte *rt)
{ return ea_get_int(rt->attrs->eattrs, &ea_gen_source, 0); }
{ return ea_get_int(rt->attrs, &ea_gen_source, 0); }
/* Flowspec validation result */
#define FLOWSPEC_UNKNOWN 0
@ -341,7 +337,7 @@ static inline u32 rt_get_source_attr(rte *rt)
extern struct ea_class ea_gen_flowspec_valid;
static inline u32 rt_get_flowspec_valid(rte *rt)
{ return ea_get_int(rt->attrs->eattrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
{ return ea_get_int(rt->attrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
/* Next hop: For now, stored as adata */
extern struct ea_class ea_gen_nexthop;
@ -382,7 +378,7 @@ int nexthop_is_sorted(struct nexthop_adata *x);
/* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
static inline int rte_is_reachable(rte *r)
{
eattr *nhea = ea_find(r->attrs->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(r->attrs, &ea_gen_nexthop);
if (!nhea)
return 0;
@ -404,21 +400,23 @@ static inline int nhea_dest(eattr *nhea)
static inline int rte_dest(rte *r)
{
return nhea_dest(ea_find(r->attrs->eattrs, &ea_gen_nexthop));
return nhea_dest(ea_find(r->attrs, &ea_gen_nexthop));
}
void rta_init(void);
#define rta_size(...) (sizeof(rta))
#define RTA_MAX_SIZE (sizeof(rta))
rta *rta_lookup(rta *); /* Get rta equivalent to this one, uc++ */
static inline int rta_is_cached(rta *r) { return r->cached; }
static inline rta *rta_clone(rta *r) { r->uc++; return r; }
void rta__free(rta *r);
static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); }
rta *rta_do_cow(rta *o, linpool *lp);
static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; }
void rta_dump(rta *);
void rta_dump_all(void);
void rta_show(struct cli *, rta *);
ea_list *ea_lookup(ea_list *); /* Get a cached (and normalized) variant of this attribute list */
static inline int ea_is_cached(ea_list *r) { return r->flags & EALF_CACHED; }
static inline ea_list *ea_clone(ea_list *r) { r->uc++; return r; }
void ea__free(ea_list *r);
static inline void ea_free(ea_list *r) { if (r && !--r->uc) ea__free(r); }
void ea_dump(ea_list *);
void ea_dump_all(void);
void ea_show_list(struct cli *, ea_list *);
#define rta_lookup ea_lookup
#define rta_is_cached ea_is_cached
#define rta_clone ea_clone
#define rta_free ea_free
#endif

View File

@ -849,7 +849,7 @@ CF_CLI(DUMP INTERFACES,,, [[Dump interface information]])
CF_CLI(DUMP NEIGHBORS,,, [[Dump neighbor cache]])
{ neigh_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP ATTRIBUTES,,, [[Dump attribute cache]])
{ rta_dump_all(); cli_msg(0, ""); } ;
{ ea_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP ROUTES,,, [[Dump routing table]])
{ rt_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP PROTOCOLS,,, [[Dump protocol information]])

View File

@ -174,7 +174,6 @@ struct ea_class ea_gen_flowspec_valid = {
pool *rta_pool;
static slab *rta_slab;
static slab *rte_src_slab;
static struct idm src_ids;
@ -943,17 +942,6 @@ ea_list_unref(ea_list *l)
}
}
static inline void
ea_free(ea_list *o)
{
if (o)
{
ea_list_unref(o);
ASSERT(!o->next);
mb_free(o);
}
}
void
ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max)
{
@ -1050,7 +1038,7 @@ ea_show_lc_set(struct cli *c, const struct adata *ad, byte *pos, byte *buf, byte
* If the protocol defining the attribute provides its own
* get_attr() hook, it's consulted first.
*/
void
static void
ea_show(struct cli *c, const eattr *e)
{
const struct adata *ad = (e->type & EAF_EMBEDDED) ? NULL : e->u.ptr;
@ -1141,10 +1129,11 @@ ea_dump(ea_list *e)
}
while (e)
{
debug("[%c%c%c]",
debug("[%c%c%c] uc=%d h=%08x",
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
(e->flags & EALF_CACHED) ? 'C' : 'c');
(e->flags & EALF_CACHED) ? 'C' : 'c',
e->uc, e->hash_key);
for(i=0; i<e->count; i++)
{
eattr *a = &e->attrs[i];
@ -1236,12 +1225,12 @@ static uint rta_cache_count;
static uint rta_cache_size = 32;
static uint rta_cache_limit;
static uint rta_cache_mask;
static rta **rta_hash_table;
static ea_list **rta_hash_table;
static void
rta_alloc_hash(void)
{
rta_hash_table = mb_allocz(rta_pool, sizeof(rta *) * rta_cache_size);
rta_hash_table = mb_allocz(rta_pool, sizeof(ea_list *) * rta_cache_size);
if (rta_cache_size < 32768)
rta_cache_limit = rta_cache_size * 2;
else
@ -1249,44 +1238,14 @@ rta_alloc_hash(void)
rta_cache_mask = rta_cache_size - 1;
}
static inline uint
rta_hash(rta *a)
{
return ea_hash(a->eattrs);
}
static inline int
rta_same(rta *x, rta *y)
{
return ea_same(x->eattrs, y->eattrs);
}
static rta *
rta_copy(rta *o)
{
rta *r = sl_alloc(rta_slab);
memcpy(r, o, rta_size(o));
r->uc = 1;
if (!r->eattrs)
return r;
uint elen = ea_list_size(o->eattrs);
r->eattrs = mb_alloc(rta_pool, elen);
ea_list_copy(r->eattrs, o->eattrs, elen);
ea_list_ref(r->eattrs);
r->eattrs->flags |= EALF_CACHED;
return r;
}
static inline void
rta_insert(rta *r)
rta_insert(ea_list *r)
{
uint h = r->hash_key & rta_cache_mask;
r->next = rta_hash_table[h];
if (r->next)
r->next->pprev = &r->next;
r->pprev = &rta_hash_table[h];
r->next_hash = rta_hash_table[h];
if (r->next_hash)
r->next_hash->pprev_hash = &r->next_hash;
r->pprev_hash = &rta_hash_table[h];
rta_hash_table[h] = r;
}
@ -1295,8 +1254,8 @@ rta_rehash(void)
{
uint ohs = rta_cache_size;
uint h;
rta *r, *n;
rta **oht = rta_hash_table;
ea_list *r, *n;
ea_list **oht = rta_hash_table;
rta_cache_size = 2*rta_cache_size;
DBG("Rehashing rta cache from %d to %d entries.\n", ohs, rta_cache_size);
@ -1304,7 +1263,7 @@ rta_rehash(void)
for(h=0; h<ohs; h++)
for(r=oht[h]; r; r=n)
{
n = r->next;
n = r->next_hash;
rta_insert(r);
}
mb_free(oht);
@ -1323,24 +1282,29 @@ rta_rehash(void)
* The extended attribute lists attached to the &rta are automatically
* converted to the normalized form.
*/
rta *
rta_lookup(rta *o)
ea_list *
ea_lookup(ea_list *o)
{
rta *r;
ea_list *r;
uint h;
ASSERT(!o->cached);
if (o->eattrs)
o->eattrs = ea_normalize(o->eattrs);
ASSERT(!ea_is_cached(o));
o = ea_normalize(o);
h = ea_hash(o);
h = rta_hash(o);
for(r=rta_hash_table[h & rta_cache_mask]; r; r=r->next)
if (r->hash_key == h && rta_same(r, o))
return rta_clone(r);
for(r=rta_hash_table[h & rta_cache_mask]; r; r=r->next_hash)
if (r->hash_key == h && ea_same(r, o))
return ea_clone(r);
r = rta_copy(o);
uint elen = ea_list_size(o);
r = mb_alloc(rta_pool, elen);
ea_list_copy(r, o, elen);
ea_list_ref(r);
r->flags |= EALF_CACHED;
r->hash_key = h;
r->cached = 1;
r->uc = 1;
rta_insert(r);
if (++rta_cache_count > rta_cache_limit)
@ -1350,46 +1314,17 @@ rta_lookup(rta *o)
}
void
rta__free(rta *a)
ea__free(ea_list *a)
{
ASSERT(rta_cache_count && a->cached);
ASSERT(rta_cache_count && ea_is_cached(a));
rta_cache_count--;
*a->pprev = a->next;
if (a->next)
a->next->pprev = a->pprev;
ea_free(a->eattrs);
a->cached = 0;
sl_free(a);
}
*a->pprev_hash = a->next_hash;
if (a->next_hash)
a->next_hash->pprev_hash = a->pprev_hash;
rta *
rta_do_cow(rta *o, linpool *lp)
{
rta *r = lp_alloc(lp, rta_size(o));
memcpy(r, o, rta_size(o));
r->cached = 0;
r->uc = 0;
return r;
}
/**
* rta_dump - dump route attributes
* @a: attribute structure to dump
*
* This function takes a &rta and dumps its contents to the debug output.
*/
void
rta_dump(rta *a)
{
debug("uc=%d h=%04x",
a->uc, a->hash_key);
if (!a->cached)
debug(" !CACHED");
if (a->eattrs)
{
debug(" EA: ");
ea_dump(a->eattrs);
}
ASSERT(!a->next);
ea_list_unref(a);
mb_free(a);
}
/**
@ -1399,26 +1334,23 @@ rta_dump(rta *a)
* to the debug output.
*/
void
rta_dump_all(void)
ea_dump_all(void)
{
rta *a;
uint h;
debug("Route attribute cache (%d entries, rehash at %d):\n", rta_cache_count, rta_cache_limit);
for(h=0; h<rta_cache_size; h++)
for(a=rta_hash_table[h]; a; a=a->next)
for (uint h=0; h < rta_cache_size; h++)
for (ea_list *a = rta_hash_table[h]; a; a = a->next_hash)
{
debug("%p ", a);
rta_dump(a);
ea_dump(a);
debug("\n");
}
debug("\n");
}
void
rta_show(struct cli *c, rta *a)
ea_show_list(struct cli *c, ea_list *eal)
{
for(ea_list *eal = a->eattrs; eal; eal=eal->next)
for( ; eal; eal=eal->next)
for(int i=0; i<eal->count; i++)
ea_show(c, &eal->attrs[i]);
}
@ -1434,8 +1366,6 @@ rta_init(void)
{
rta_pool = rp_new(&root_pool, "Attributes");
rta_slab = sl_new(rta_pool, sizeof(rta));
rta_alloc_hash();
rte_src_init();
ea_class_init();

View File

@ -71,7 +71,6 @@ dev_ifa_notify(struct proto *P, uint flags, struct ifa *ad)
}
else if (flags & IF_CHANGE_UP)
{
rta *a;
rte *e;
DBG("dev_if_notify: %s:%I going up\n", ad->iface->name, ad->ip);
@ -82,18 +81,17 @@ dev_ifa_notify(struct proto *P, uint flags, struct ifa *ad)
/* Use iface ID as local source ID */
struct rte_src *src = rt_get_source(P, ad->iface->index);
rta a0 = {};
ea_list *ea = NULL;
struct nexthop_adata nhad = {
.nh = { .iface = ad->iface, },
.ad = { .length = (void *) NEXTHOP_NEXT(&nhad.nh) - (void *) nhad.ad.data, },
};
ea_set_attr_u32(&a0.eattrs, &ea_gen_preference, 0, c->preference);
ea_set_attr_u32(&a0.eattrs, &ea_gen_source, 0, RTS_DEVICE);
ea_set_attr_data(&a0.eattrs, &ea_gen_nexthop, 0, nhad.ad.data, nhad.ad.length);
ea_set_attr_u32(&ea, &ea_gen_preference, 0, c->preference);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_DEVICE);
ea_set_attr_data(&ea, &ea_gen_nexthop, 0, nhad.ad.data, nhad.ad.length);
a = rta_lookup(&a0);
e = rte_get_temp(a, src);
e = rte_get_temp(rta_lookup(ea), src);
e->pflags = 0;
rte_update2(c, net, e, src);
}

View File

@ -42,23 +42,23 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
{
byte from[IPA_MAX_TEXT_LENGTH+8];
byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
rta *a = e->attrs;
ea_list *a = e->attrs;
int sync_error = d->kernel ? krt_get_sync_error(d->kernel, e) : 0;
void (*get_route_info)(struct rte *, byte *buf);
eattr *nhea = ea_find(a->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(a, &ea_gen_nexthop);
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
int dest = NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest;
tm_format_time(tm, &config->tf_route, e->lastmod);
ip_addr a_from = ea_get_ip(a->eattrs, &ea_gen_from, IPA_NONE);
ip_addr a_from = ea_get_ip(a, &ea_gen_from, IPA_NONE);
if (ipa_nonzero(a_from) && (!nhad || !ipa_equal(a_from, nhad->nh.gw)))
bsprintf(from, " from %I", a_from);
else
from[0] = 0;
/* Need to normalize the extended attributes */
if (d->verbose && !rta_is_cached(a) && a->eattrs)
a->eattrs = ea_normalize(a->eattrs);
if (d->verbose && !rta_is_cached(a) && a)
a = ea_normalize(a);
get_route_info = e->src->proto->proto->get_route_info;
if (get_route_info)
@ -99,7 +99,7 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
}
if (d->verbose)
rta_show(c, a);
ea_show_list(c, a);
}
static void

View File

@ -564,7 +564,7 @@ rte_find(net *net, struct rte_src *src)
* the protocol.
*/
rte *
rte_get_temp(rta *a, struct rte_src *src)
rte_get_temp(ea_list *a, struct rte_src *src)
{
rte *e = sl_alloc(rte_slab);
@ -608,15 +608,15 @@ rte_do_cow(rte *r)
* Result: a pointer to the new writable &rte with writable &rta.
*/
rte *
rte_cow_rta(rte *r, linpool *lp)
rte_cow_rta(rte *r)
{
if (!rta_is_cached(r->attrs))
return r;
r = rte_cow(r);
rta *a = rta_do_cow(r->attrs, lp);
/* This looks stupid but should DWIW. */
rta_free(r->attrs);
r->attrs = a;
return r;
}
@ -971,7 +971,7 @@ rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int
if (rte_is_reachable(rt))
{
eattr *nhea = ea_find(rt->attrs->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(rt->attrs, &ea_gen_nexthop);
ASSERT_DIE(nhea);
if (nhs)
@ -987,13 +987,13 @@ rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int
if (nhs)
{
eattr *nhea = ea_find(best->attrs->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(best->attrs, &ea_gen_nexthop);
ASSERT_DIE(nhea);
nhs = nexthop_merge(nhs, (struct nexthop_adata *) nhea->u.ptr, c->merge_limit, pool);
best = rte_cow_rta(best, pool);
ea_set_attr(&best->attrs->eattrs,
best = rte_cow_rta(best);
ea_set_attr(&best->attrs,
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nhs->ad));
}
@ -1177,7 +1177,7 @@ rte_validate(rte *e)
return 0;
}
eattr *nhea = ea_find(e->attrs->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(e->attrs, &ea_gen_nexthop);
int dest = nhea_dest(nhea);
if (net_type_match(n->n.addr, NB_DEST) == !dest)
@ -1808,7 +1808,7 @@ rte_dump(rte *e)
net *n = e->net;
debug("%-1N ", n->n.addr);
debug("PF=%02x ", e->pflags);
rta_dump(e->attrs);
ea_dump(e->attrs);
debug("\n");
}
@ -2416,21 +2416,21 @@ ea_set_hostentry(ea_list **to, struct rtable *dep, struct rtable *tab, ip_addr g
static void
rta_apply_hostentry(rta *a, struct hostentry_adata *head)
rta_apply_hostentry(ea_list **to, struct hostentry_adata *head)
{
struct hostentry *he = head->he;
u32 *labels = head->labels;
u32 lnum = (u32 *) (head->ad.data + head->ad.length) - labels;
ea_set_attr_u32(&a->eattrs, &ea_gen_igp_metric, 0, he->igp_metric);
ea_set_attr_u32(to, &ea_gen_igp_metric, 0, he->igp_metric);
if (!he->src)
{
ea_set_dest(&a->eattrs, 0, RTD_UNREACHABLE);
ea_set_dest(to, 0, RTD_UNREACHABLE);
return;
}
eattr *he_nh_ea = ea_find(he->src->eattrs, &ea_gen_nexthop);
eattr *he_nh_ea = ea_find(he->src, &ea_gen_nexthop);
ASSERT_DIE(he_nh_ea);
struct nexthop_adata *nhad = (struct nexthop_adata *) he_nh_ea->u.ptr;
@ -2439,7 +2439,7 @@ rta_apply_hostentry(rta *a, struct hostentry_adata *head)
if ((idest != RTD_UNICAST) ||
!lnum && he->nexthop_linkable)
{ /* Just link the nexthop chain, no label append happens. */
ea_copy_attr(&a->eattrs, he->src->eattrs, &ea_gen_nexthop);
ea_copy_attr(to, he->src, &ea_gen_nexthop);
return;
}
@ -2466,7 +2466,7 @@ rta_apply_hostentry(rta *a, struct hostentry_adata *head)
.dest = RTD_UNREACHABLE,
};
ea_set_attr_data(&a->eattrs, &ea_gen_nexthop, 0, &nha.ad.data, nha.ad.length);
ea_set_attr_data(to, &ea_gen_nexthop, 0, &nha.ad.data, nha.ad.length);
return;
}
@ -2500,22 +2500,22 @@ rta_apply_hostentry(rta *a, struct hostentry_adata *head)
/* Fix final length */
new->ad.length = (void *) dest - (void *) new->ad.data;
ea_set_attr(&a->eattrs, EA_LITERAL_DIRECT_ADATA(
ea_set_attr(to, EA_LITERAL_DIRECT_ADATA(
&ea_gen_nexthop, 0, &new->ad));
}
static inline struct hostentry_adata *
rta_next_hop_outdated(rta *a)
rta_next_hop_outdated(ea_list *a)
{
/* First retrieve the hostentry */
eattr *heea = ea_find(a->eattrs, &ea_gen_hostentry);
eattr *heea = ea_find(a, &ea_gen_hostentry);
if (!heea)
return NULL;
struct hostentry_adata *head = (struct hostentry_adata *) heea->u.ptr;
/* If no nexthop is present, we have to create one */
eattr *a_nh_ea = ea_find(a->eattrs, &ea_gen_nexthop);
eattr *a_nh_ea = ea_find(a, &ea_gen_nexthop);
if (!a_nh_ea)
return head;
@ -2526,10 +2526,10 @@ rta_next_hop_outdated(rta *a)
return NEXTHOP_IS_REACHABLE(nhad) ? head : NULL;
/* Comparing our nexthop with the hostentry nexthop */
eattr *he_nh_ea = ea_find(head->he->src->eattrs, &ea_gen_nexthop);
eattr *he_nh_ea = ea_find(head->he->src, &ea_gen_nexthop);
return (
(ea_get_int(a->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN) != head->he->igp_metric) ||
(ea_get_int(a, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN) != head->he->igp_metric) ||
(!head->he->nexthop_linkable) ||
(!he_nh_ea != !a_nh_ea) ||
(he_nh_ea && a_nh_ea && !adata_same(he_nh_ea->u.ptr, a_nh_ea->u.ptr)))
@ -2543,13 +2543,12 @@ rt_next_hop_update_rte(rtable *tab UNUSED, rte *old)
if (!head)
return NULL;
rta a = *old->attrs;
a.cached = 0;
rta_apply_hostentry(&a, head);
ea_list *ea = old->attrs;
rta_apply_hostentry(&ea, head);
rte *e = sl_alloc(rte_slab);
memcpy(e, old, sizeof(rte));
e->attrs = rta_lookup(&a);
e->attrs = rta_lookup(ea);
rt_lock_source(e->src);
return e;
@ -2579,23 +2578,23 @@ net_flow_has_dst_prefix(const net_addr *n)
}
static inline int
rta_as_path_is_empty(rta *a)
rta_as_path_is_empty(ea_list *a)
{
eattr *e = ea_find(a->eattrs, "bgp_path");
eattr *e = ea_find(a, "bgp_path");
return !e || (as_path_getlen(e->u.ptr) == 0);
}
static inline u32
rta_get_first_asn(rta *a)
rta_get_first_asn(ea_list *a)
{
eattr *e = ea_find(a->eattrs, "bgp_path");
eattr *e = ea_find(a, "bgp_path");
u32 asn;
return (e && as_path_get_first_regular(e->u.ptr, &asn)) ? asn : 0;
}
int
rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, rta *a, int interior)
rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, ea_list *a, int interior)
{
ASSERT(rt_is_ip(tab_ip));
ASSERT(rt_is_flow(tab_flow));
@ -2632,13 +2631,13 @@ rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, rta *a, i
return 0;
/* Find ORIGINATOR_ID values */
u32 orig_a = ea_get_int(a->eattrs, "bgp_originator_id", 0);
u32 orig_b = ea_get_int(rb->attrs->eattrs, "bgp_originator_id", 0);
u32 orig_a = ea_get_int(a, "bgp_originator_id", 0);
u32 orig_b = ea_get_int(rb->attrs, "bgp_originator_id", 0);
/* Originator is either ORIGINATOR_ID (if present), or BGP neighbor address (if not) */
if ((orig_a != orig_b) || (!orig_a && !orig_b && !ipa_equal(
ea_get_ip(a->eattrs, &ea_gen_from, IPA_NONE),
ea_get_ip(rb->attrs->eattrs, &ea_gen_from, IPA_NONE)
ea_get_ip(a, &ea_gen_from, IPA_NONE),
ea_get_ip(rb->attrs, &ea_gen_from, IPA_NONE)
)))
return 0;
@ -2691,15 +2690,12 @@ rt_flowspec_update_rte(rtable *tab, rte *r)
if (old == valid)
return NULL;
rta *a = alloca(RTA_MAX_SIZE);
memcpy(a, r->attrs, rta_size(r->attrs));
a->cached = 0;
ea_set_attr_u32(&a->eattrs, &ea_gen_flowspec_valid, 0, valid);
ea_list *a = r->attrs;
ea_set_attr_u32(&a, &ea_gen_flowspec_valid, 0, valid);
rte *new = sl_alloc(rte_slab);
memcpy(new, r, sizeof(rte));
new->attrs = rta_lookup(a);
new->attrs = ea_lookup(a);
return new;
#else
@ -3506,7 +3502,7 @@ if_local_addr(ip_addr a, struct iface *i)
u32
rt_get_igp_metric(rte *rt)
{
eattr *ea = ea_find(rt->attrs->eattrs, "igp_metric");
eattr *ea = ea_find(rt->attrs, "igp_metric");
if (ea)
return ea->u.data;
@ -3523,7 +3519,7 @@ rt_get_igp_metric(rte *rt)
static int
rt_update_hostentry(rtable *tab, struct hostentry *he)
{
rta *old_src = he->src;
ea_list *old_src = he->src;
int direct = 0;
int pxlen = 0;
@ -3538,10 +3534,10 @@ rt_update_hostentry(rtable *tab, struct hostentry *he)
if (n)
{
rte *e = n->routes;
rta *a = e->attrs;
ea_list *a = e->attrs;
pxlen = n->n.addr->pxlen;
if (ea_find(a->eattrs, &ea_gen_hostentry))
if (ea_find(a, &ea_gen_hostentry))
{
/* Recursive route should not depend on another recursive route */
log(L_WARN "Next hop address %I resolvable through recursive route for %N",
@ -3549,7 +3545,7 @@ rt_update_hostentry(rtable *tab, struct hostentry *he)
goto done;
}
eattr *nhea = ea_find(a->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(a, &ea_gen_nexthop);
ASSERT_DIE(nhea);
struct nexthop_adata *nhad = (void *) nhea->u.ptr;

View File

@ -143,7 +143,7 @@ struct hostentry {
struct hostentry *next; /* Next in hash chain */
unsigned hash_key; /* Hash key */
unsigned uc; /* Use count */
struct rta *src; /* Source rta entry */
ea_list *src; /* Source attributes */
byte nexthop_linkable; /* Nexthop list is completely non-device */
u32 igp_metric; /* Chosen route IGP metric */
};
@ -185,7 +185,7 @@ static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) f
net *net_get(rtable *tab, const net_addr *addr);
net *net_route(rtable *tab, const net_addr *n);
rte *rte_find(net *net, struct rte_src *src);
rte *rte_get_temp(struct rta *, struct rte_src *src);
rte *rte_get_temp(ea_list *, struct rte_src *src);
void rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
/* rte_update() moved to protocol.h to avoid dependency conflicts */
int rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter);
@ -198,7 +198,7 @@ void rte_dump(rte *);
void rte_free(rte *);
rte *rte_do_cow(rte *);
static inline rte * rte_cow(rte *r) { return (r->flags & REF_COW) ? rte_do_cow(r) : r; }
rte *rte_cow_rta(rte *r, linpool *lp);
rte *rte_cow_rta(rte *r);
void rt_dump(rtable *);
void rt_dump_all(void);
int rt_feed_channel(struct channel *c);
@ -290,18 +290,7 @@ struct hostentry_adata {
void
ea_set_hostentry(ea_list **to, struct rtable *dep, struct rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum]);
/*
struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep);
void rta_apply_hostentry(rta *a, struct hostentry *he, u32 lnum, u32 labels[lnum]);
static inline void
rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum])
{
rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), lnum, labels);
}
*/
int rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, rta *a, int interior);
int rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, ea_list *a, int interior);
/*

View File

@ -677,10 +677,7 @@ babel_announce_rte(struct babel_proto *p, struct babel_entry *e)
}
};
rta a0 = { .eattrs = &eattrs.l, };
rta *a = rta_lookup(&a0);
rte *rte = rte_get_temp(a, p->p.main_source);
rte *rte = rte_get_temp(rta_lookup(&eattrs.l), p->p.main_source);
e->unreachable = 0;
rte_update2(c, e->n.addr, rte, p->p.main_source);
@ -688,14 +685,13 @@ babel_announce_rte(struct babel_proto *p, struct babel_entry *e)
else if (e->valid && (e->router_id != p->router_id))
{
/* Unreachable */
rta a0 = {};
ea_list *ea = NULL;
ea_set_attr_u32(&a0.eattrs, &ea_gen_preference, 0, 1);
ea_set_attr_u32(&a0.eattrs, &ea_gen_source, 0, RTS_BABEL);
ea_set_dest(&a0.eattrs, 0, RTD_UNREACHABLE);
ea_set_attr_u32(&ea, &ea_gen_preference, 0, 1);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_BABEL);
ea_set_dest(&ea, 0, RTD_UNREACHABLE);
rta *a = rta_lookup(&a0);
rte *rte = rte_get_temp(a, p->p.main_source);
rte *rte = rte_get_temp(rta_lookup(ea), p->p.main_source);
rte->pflags = 0;
e->unreachable = 1;
@ -2025,13 +2021,13 @@ static void
babel_get_route_info(rte *rte, byte *buf)
{
u64 rid = 0;
eattr *e = ea_find(rte->attrs->eattrs, &ea_babel_router_id);
eattr *e = ea_find(rte->attrs, &ea_babel_router_id);
if (e)
memcpy(&rid, e->u.ptr->data, sizeof(u64));
buf += bsprintf(buf, " (%d/%d) [%lR]",
rt_get_preference(rte),
ea_get_int(rte->attrs->eattrs, &ea_babel_metric, BABEL_INFINITY), rid);
ea_get_int(rte->attrs, &ea_babel_metric, BABEL_INFINITY), rid);
}
static void
@ -2263,7 +2259,7 @@ babel_preexport(struct proto *P, struct rte *new)
return 0;
/* Reject our own unreachable routes */
eattr *ea = ea_find(new->attrs->eattrs, &ea_gen_nexthop);
eattr *ea = ea_find(new->attrs, &ea_gen_nexthop);
struct nexthop_adata *nhad = (void *) ea->u.ptr;
if (!NEXTHOP_IS_REACHABLE(nhad))
return -1;
@ -2286,13 +2282,13 @@ babel_rt_notify(struct proto *P, struct channel *c UNUSED, struct network *net,
{
/* Update */
uint rt_seqno;
uint rt_metric = ea_get_int(new->attrs->eattrs, &ea_babel_metric, 0);
uint rt_metric = ea_get_int(new->attrs, &ea_babel_metric, 0);
u64 rt_router_id = 0;
if (new->src->proto == P)
{
rt_seqno = ea_get_int(new->attrs->eattrs, &ea_babel_seqno, 0);
eattr *e = ea_find(new->attrs->eattrs, &ea_babel_router_id);
rt_seqno = ea_get_int(new->attrs, &ea_babel_seqno, 0);
eattr *e = ea_find(new->attrs, &ea_babel_router_id);
if (e)
memcpy(&rt_router_id, e->u.ptr->data, sizeof(u64));
}
@ -2343,8 +2339,8 @@ babel_rt_notify(struct proto *P, struct channel *c UNUSED, struct network *net,
static int
babel_rte_better(struct rte *new, struct rte *old)
{
uint new_metric = ea_get_int(new->attrs->eattrs, &ea_babel_metric, BABEL_INFINITY);
uint old_metric = ea_get_int(old->attrs->eattrs, &ea_babel_metric, BABEL_INFINITY);
uint new_metric = ea_get_int(new->attrs, &ea_babel_metric, BABEL_INFINITY);
uint old_metric = ea_get_int(old->attrs, &ea_babel_metric, BABEL_INFINITY);
return new_metric < old_metric;
}
@ -2352,7 +2348,7 @@ babel_rte_better(struct rte *new, struct rte *old)
static u32
babel_rte_igp_metric(struct rte *rt)
{
return ea_get_int(rt->attrs->eattrs, &ea_babel_metric, BABEL_INFINITY);
return ea_get_int(rt->attrs, &ea_babel_metric, BABEL_INFINITY);
}

View File

@ -374,7 +374,7 @@ bgp_aigp_set_metric(struct linpool *pool, const struct adata *ad, u64 metric)
int
bgp_total_aigp_metric_(rte *e, u64 *metric, const struct adata **ad)
{
eattr *a = ea_find(e->attrs->eattrs, BGP_EA_ID(BA_AIGP));
eattr *a = ea_find(e->attrs, BGP_EA_ID(BA_AIGP));
if (!a)
return 0;
@ -1492,13 +1492,13 @@ loop:
}
void
bgp_finish_attrs(struct bgp_parse_state *s, rta *a)
bgp_finish_attrs(struct bgp_parse_state *s, ea_list **to)
{
/* AIGP test here instead of in bgp_decode_aigp() - we need to know channel */
if (BIT32_TEST(s->attrs_seen, BA_AIGP) && !s->channel->cf->aigp)
{
REPORT("Discarding AIGP attribute received on non-AIGP session");
bgp_unset_attr(&a->eattrs, BA_AIGP);
bgp_unset_attr(to, BA_AIGP);
}
}
@ -1735,14 +1735,14 @@ bgp_preexport(struct proto *P, rte *e)
/* Generally, this should be handled when path is received, but we check it
also here as rr_cluster_id may be undefined or different in src. */
if (p->rr_cluster_id && bgp_cluster_list_loopy(p, e->attrs->eattrs))
if (p->rr_cluster_id && bgp_cluster_list_loopy(p, e->attrs))
return -1;
}
/* Handle well-known communities, RFC 1997 */
struct eattr *c;
if (p->cf->interpret_communities &&
(c = ea_find(e->attrs->eattrs, BGP_EA_ID(BA_COMMUNITY))))
(c = ea_find(e->attrs, BGP_EA_ID(BA_COMMUNITY))))
{
const struct adata *d = c->u.ptr;
@ -1893,7 +1893,7 @@ bgp_rt_notify(struct proto *P, struct channel *C, net *n, rte *new, rte *old)
if (new)
{
struct ea_list *attrs = bgp_update_attrs(p, c, new, new->attrs->eattrs, tmp_linpool);
struct ea_list *attrs = bgp_update_attrs(p, c, new, new->attrs, tmp_linpool);
/* Error during attribute processing */
if (!attrs)
@ -1919,7 +1919,7 @@ bgp_rt_notify(struct proto *P, struct channel *C, net *n, rte *new, rte *old)
static inline u32
bgp_get_neighbor(rte *r)
{
eattr *e = ea_find(r->attrs->eattrs, BGP_EA_ID(BA_AS_PATH));
eattr *e = ea_find(r->attrs, BGP_EA_ID(BA_AS_PATH));
u32 as;
if (e && as_path_get_first_regular(e->u.ptr, &as))
@ -1940,7 +1940,7 @@ rte_stale(rte *r)
return 0;
/* If staleness is unknown, compute and cache it */
eattr *a = ea_find(r->attrs->eattrs, BGP_EA_ID(BA_COMMUNITY));
eattr *a = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
if (a && int_set_contains(a->u.ptr, BGP_COMM_LLGR_STALE))
{
r->pflags |= BGP_REF_STALE;
@ -1986,8 +1986,8 @@ bgp_rte_better(rte *new, rte *old)
return 1;
/* Start with local preferences */
x = ea_find(new->attrs->eattrs, BGP_EA_ID(BA_LOCAL_PREF));
y = ea_find(old->attrs->eattrs, BGP_EA_ID(BA_LOCAL_PREF));
x = ea_find(new->attrs, BGP_EA_ID(BA_LOCAL_PREF));
y = ea_find(old->attrs, BGP_EA_ID(BA_LOCAL_PREF));
n = x ? x->u.data : new_bgp->cf->default_local_pref;
o = y ? y->u.data : old_bgp->cf->default_local_pref;
if (n > o)
@ -2006,8 +2006,8 @@ bgp_rte_better(rte *new, rte *old)
/* RFC 4271 9.1.2.2. a) Use AS path lengths */
if (new_bgp->cf->compare_path_lengths || old_bgp->cf->compare_path_lengths)
{
x = ea_find(new->attrs->eattrs, BGP_EA_ID(BA_AS_PATH));
y = ea_find(old->attrs->eattrs, BGP_EA_ID(BA_AS_PATH));
x = ea_find(new->attrs, BGP_EA_ID(BA_AS_PATH));
y = ea_find(old->attrs, BGP_EA_ID(BA_AS_PATH));
n = x ? as_path_getlen(x->u.ptr) : AS_PATH_MAXLEN;
o = y ? as_path_getlen(y->u.ptr) : AS_PATH_MAXLEN;
if (n < o)
@ -2017,8 +2017,8 @@ bgp_rte_better(rte *new, rte *old)
}
/* RFC 4271 9.1.2.2. b) Use origins */
x = ea_find(new->attrs->eattrs, BGP_EA_ID(BA_ORIGIN));
y = ea_find(old->attrs->eattrs, BGP_EA_ID(BA_ORIGIN));
x = ea_find(new->attrs, BGP_EA_ID(BA_ORIGIN));
y = ea_find(old->attrs, BGP_EA_ID(BA_ORIGIN));
n = x ? x->u.data : ORIGIN_INCOMPLETE;
o = y ? y->u.data : ORIGIN_INCOMPLETE;
if (n < o)
@ -2040,8 +2040,8 @@ bgp_rte_better(rte *new, rte *old)
if (new_bgp->cf->med_metric || old_bgp->cf->med_metric ||
(bgp_get_neighbor(new) == bgp_get_neighbor(old)))
{
x = ea_find(new->attrs->eattrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
y = ea_find(old->attrs->eattrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
x = ea_find(new->attrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
y = ea_find(old->attrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
n = x ? x->u.data : new_bgp->cf->default_med;
o = y ? y->u.data : old_bgp->cf->default_med;
if (n < o)
@ -2066,8 +2066,8 @@ bgp_rte_better(rte *new, rte *old)
/* RFC 4271 9.1.2.2. f) Compare BGP identifiers */
/* RFC 4456 9. a) Use ORIGINATOR_ID instead of local neighbor ID */
x = ea_find(new->attrs->eattrs, BGP_EA_ID(BA_ORIGINATOR_ID));
y = ea_find(old->attrs->eattrs, BGP_EA_ID(BA_ORIGINATOR_ID));
x = ea_find(new->attrs, BGP_EA_ID(BA_ORIGINATOR_ID));
y = ea_find(old->attrs, BGP_EA_ID(BA_ORIGINATOR_ID));
n = x ? x->u.data : new_bgp->remote_id;
o = y ? y->u.data : old_bgp->remote_id;
@ -2084,8 +2084,8 @@ bgp_rte_better(rte *new, rte *old)
return 0;
/* RFC 4456 9. b) Compare cluster list lengths */
x = ea_find(new->attrs->eattrs, BGP_EA_ID(BA_CLUSTER_LIST));
y = ea_find(old->attrs->eattrs, BGP_EA_ID(BA_CLUSTER_LIST));
x = ea_find(new->attrs, BGP_EA_ID(BA_CLUSTER_LIST));
y = ea_find(old->attrs, BGP_EA_ID(BA_CLUSTER_LIST));
n = x ? int_set_get_size(x->u.ptr) : 0;
o = y ? int_set_get_size(y->u.ptr) : 0;
if (n < o)
@ -2119,8 +2119,8 @@ bgp_rte_mergable(rte *pri, rte *sec)
return 0;
/* Start with local preferences */
x = ea_find(pri->attrs->eattrs, BGP_EA_ID(BA_LOCAL_PREF));
y = ea_find(sec->attrs->eattrs, BGP_EA_ID(BA_LOCAL_PREF));
x = ea_find(pri->attrs, BGP_EA_ID(BA_LOCAL_PREF));
y = ea_find(sec->attrs, BGP_EA_ID(BA_LOCAL_PREF));
p = x ? x->u.data : pri_bgp->cf->default_local_pref;
s = y ? y->u.data : sec_bgp->cf->default_local_pref;
if (p != s)
@ -2129,8 +2129,8 @@ bgp_rte_mergable(rte *pri, rte *sec)
/* RFC 4271 9.1.2.2. a) Use AS path lengths */
if (pri_bgp->cf->compare_path_lengths || sec_bgp->cf->compare_path_lengths)
{
x = ea_find(pri->attrs->eattrs, BGP_EA_ID(BA_AS_PATH));
y = ea_find(sec->attrs->eattrs, BGP_EA_ID(BA_AS_PATH));
x = ea_find(pri->attrs, BGP_EA_ID(BA_AS_PATH));
y = ea_find(sec->attrs, BGP_EA_ID(BA_AS_PATH));
p = x ? as_path_getlen(x->u.ptr) : AS_PATH_MAXLEN;
s = y ? as_path_getlen(y->u.ptr) : AS_PATH_MAXLEN;
@ -2142,8 +2142,8 @@ bgp_rte_mergable(rte *pri, rte *sec)
}
/* RFC 4271 9.1.2.2. b) Use origins */
x = ea_find(pri->attrs->eattrs, BGP_EA_ID(BA_ORIGIN));
y = ea_find(sec->attrs->eattrs, BGP_EA_ID(BA_ORIGIN));
x = ea_find(pri->attrs, BGP_EA_ID(BA_ORIGIN));
y = ea_find(sec->attrs, BGP_EA_ID(BA_ORIGIN));
p = x ? x->u.data : ORIGIN_INCOMPLETE;
s = y ? y->u.data : ORIGIN_INCOMPLETE;
if (p != s)
@ -2153,8 +2153,8 @@ bgp_rte_mergable(rte *pri, rte *sec)
if (pri_bgp->cf->med_metric || sec_bgp->cf->med_metric ||
(bgp_get_neighbor(pri) == bgp_get_neighbor(sec)))
{
x = ea_find(pri->attrs->eattrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
y = ea_find(sec->attrs->eattrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
x = ea_find(pri->attrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
y = ea_find(sec->attrs, BGP_EA_ID(BA_MULTI_EXIT_DISC));
p = x ? x->u.data : pri_bgp->cf->default_med;
s = y ? y->u.data : sec_bgp->cf->default_med;
if (p != s)
@ -2320,7 +2320,7 @@ bgp_rte_recalculate(rtable *table, net *net, rte *new, rte *old, rte *old_best)
struct rte *
bgp_rte_modify_stale(struct rte *r, struct linpool *pool)
{
eattr *a = ea_find(r->attrs->eattrs, BGP_EA_ID(BA_COMMUNITY));
eattr *a = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
const struct adata *ad = a ? a->u.ptr : NULL;
uint flags = a ? a->flags : BAF_PARTIAL;
@ -2330,8 +2330,8 @@ bgp_rte_modify_stale(struct rte *r, struct linpool *pool)
if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
return r;
r = rte_cow_rta(r, pool);
bgp_set_attr_ptr(&(r->attrs->eattrs), BA_COMMUNITY, flags,
r = rte_cow_rta(r);
bgp_set_attr_ptr(&(r->attrs), BA_COMMUNITY, flags,
int_set_add(pool, ad, BGP_COMM_LLGR_STALE));
r->pflags |= BGP_REF_STALE;
@ -2388,8 +2388,8 @@ bgp_process_as4_attrs(ea_list **attrs, struct linpool *pool)
void
bgp_get_route_info(rte *e, byte *buf)
{
eattr *p = ea_find(e->attrs->eattrs, BGP_EA_ID(BA_AS_PATH));
eattr *o = ea_find(e->attrs->eattrs, BGP_EA_ID(BA_ORIGIN));
eattr *p = ea_find(e->attrs, BGP_EA_ID(BA_AS_PATH));
eattr *o = ea_find(e->attrs, BGP_EA_ID(BA_ORIGIN));
u32 origas;
buf += bsprintf(buf, " (%d", rt_get_preference(e));

View File

@ -67,10 +67,10 @@ struct bgp_af_desc {
u8 no_igp;
const char *name;
uint (*encode_nlri)(struct bgp_write_state *s, struct bgp_bucket *buck, byte *buf, uint size);
void (*decode_nlri)(struct bgp_parse_state *s, byte *pos, uint len, rta *a);
void (*decode_nlri)(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a);
void (*update_next_hop)(struct bgp_export_state *s, eattr *nh, ea_list **to);
uint (*encode_next_hop)(struct bgp_write_state *s, eattr *nh, byte *buf, uint size);
void (*decode_next_hop)(struct bgp_parse_state *s, byte *pos, uint len, rta *a);
void (*decode_next_hop)(struct bgp_parse_state *s, byte *pos, uint len, ea_list **to);
};
@ -461,7 +461,7 @@ struct bgp_parse_state {
/* Cached state for bgp_rte_update() */
u32 last_id;
struct rte_src *last_src;
rta *cached_rta;
ea_list *cached_ea;
};
#define BGP_PORT 179
@ -519,7 +519,7 @@ struct rte_source *bgp_get_source(struct bgp_proto *p, u32 path_id);
static inline int
rte_resolvable(rte *rt)
{
eattr *nhea = ea_find(rt->attrs->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(rt->attrs, &ea_gen_nexthop);
struct nexthop_adata *nhad = (void *) nhea->u.ptr;
return NEXTHOP_IS_REACHABLE(nhad) || (nhad->dest != RTD_UNREACHABLE);
}
@ -551,7 +551,7 @@ int bgp_encode_mp_reach_mrt(struct bgp_write_state *s, eattr *a, byte *buf, uint
int bgp_encode_attrs(struct bgp_write_state *s, ea_list *attrs, byte *buf, byte *end);
ea_list * bgp_decode_attrs(struct bgp_parse_state *s, byte *data, uint len);
void bgp_finish_attrs(struct bgp_parse_state *s, rta *a);
void bgp_finish_attrs(struct bgp_parse_state *s, ea_list **to);
void bgp_init_bucket_table(struct bgp_channel *c);
void bgp_free_bucket_table(struct bgp_channel *c);

View File

@ -943,7 +943,7 @@ bgp_rx_open(struct bgp_conn *conn, byte *pkt, uint len)
#define MISMATCHED_AF " - mismatched address family (%I for %s)"
static void
bgp_apply_next_hop(struct bgp_parse_state *s, rta *a, ip_addr gw, ip_addr ll)
bgp_apply_next_hop(struct bgp_parse_state *s, ea_list **to, ip_addr gw, ip_addr ll)
{
struct bgp_proto *p = s->proto;
struct bgp_channel *c = s->channel;
@ -966,7 +966,7 @@ bgp_apply_next_hop(struct bgp_parse_state *s, rta *a, ip_addr gw, ip_addr ll)
if (nbr->scope == SCOPE_HOST)
WITHDRAW(BAD_NEXT_HOP " - address %I is local", nbr->addr);
ea_set_attr_u32(&a->eattrs, &ea_gen_igp_metric, 0, c->cf->cost);
ea_set_attr_u32(to, &ea_gen_igp_metric, 0, c->cf->cost);
struct nexthop_adata nhad = {
.nh = {
@ -977,7 +977,7 @@ bgp_apply_next_hop(struct bgp_parse_state *s, rta *a, ip_addr gw, ip_addr ll)
.length = sizeof nhad - sizeof nhad.ad,
},
};
ea_set_attr_data(&a->eattrs, &ea_gen_nexthop, 0, nhad.ad.data, nhad.ad.length);
ea_set_attr_data(to, &ea_gen_nexthop, 0, nhad.ad.data, nhad.ad.length);
}
else /* GW_RECURSIVE */
{
@ -988,21 +988,21 @@ bgp_apply_next_hop(struct bgp_parse_state *s, rta *a, ip_addr gw, ip_addr ll)
if (s->mpls)
{
u32 labels[BGP_MPLS_MAX];
ea_set_hostentry(&a->eattrs, c->c.table, tab, gw, ll, BGP_MPLS_MAX, labels);
ea_set_hostentry(to, c->c.table, tab, gw, ll, BGP_MPLS_MAX, labels);
}
else
ea_set_hostentry(&a->eattrs, c->c.table, tab, gw, ll, 0, NULL);
ea_set_hostentry(to, c->c.table, tab, gw, ll, 0, NULL);
}
}
static void
bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 lnum, u32 labels[lnum])
bgp_apply_mpls_labels(struct bgp_parse_state *s, ea_list **to, u32 lnum, u32 labels[lnum])
{
if (lnum > MPLS_MAX_LABEL_STACK)
{
REPORT("Too many MPLS labels ($u)", lnum);
ea_set_dest(&a->eattrs, 0, RTD_UNREACHABLE);
ea_set_dest(to, 0, RTD_UNREACHABLE);
return;
}
@ -1012,7 +1012,7 @@ bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 lnum, u32 labels[ln
if (s->channel->cf->gw_mode == GW_DIRECT)
{
eattr *e = ea_find(a->eattrs, &ea_gen_nexthop);
eattr *e = ea_find(*to, &ea_gen_nexthop);
struct {
struct nexthop_adata nhad;
u32 labels[MPLS_MAX_LABEL_STACK];
@ -1025,7 +1025,7 @@ bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 lnum, u32 labels[ln
}
else /* GW_RECURSIVE */
{
eattr *e = ea_find(a->eattrs, &ea_gen_hostentry);
eattr *e = ea_find(*to, &ea_gen_hostentry);
ASSERT_DIE(e);
struct hostentry_adata *head = (void *) e->u.ptr;
memcpy(&head->labels, labels, lnum * sizeof(u32));
@ -1034,24 +1034,24 @@ bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 lnum, u32 labels[ln
}
static void
bgp_apply_flow_validation(struct bgp_parse_state *s, const net_addr *n, rta *a)
bgp_apply_flow_validation(struct bgp_parse_state *s, const net_addr *n, ea_list **to)
{
struct bgp_channel *c = s->channel;
uint valid = rt_flowspec_check(c->base_table, c->c.table, n, a, s->proto->is_interior);
uint valid = rt_flowspec_check(c->base_table, c->c.table, n, *to, s->proto->is_interior);
/* Invalidate cached rta */
if (s->cached_rta)
if (s->cached_ea)
{
/* Has't changed */
if (valid == ea_get_int(s->cached_rta->eattrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN))
if (valid == ea_get_int(s->cached_ea, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN))
return;
rta_free(s->cached_rta);
s->cached_rta = NULL;
rta_free(s->cached_ea);
s->cached_ea = NULL;
}
/* Set the value */
ea_set_attr_u32(&a->eattrs, &ea_gen_flowspec_valid, 0, valid);
ea_set_attr_u32(to, &ea_gen_flowspec_valid, 0, valid);
}
static int
@ -1105,13 +1105,13 @@ bgp_use_gateway(struct bgp_export_state *s)
{
struct bgp_proto *p = s->proto;
struct bgp_channel *c = s->channel;
rta *ra = s->route->attrs;
ea_list *ra = s->route->attrs;
/* Handle next hop self option - also applies to gateway */
if (c->cf->next_hop_self && bgp_match_src(s, c->cf->next_hop_self))
return NULL;
eattr *nhea = ea_find(ra->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(ra, &ea_gen_nexthop);
if (!nhea)
return NULL;
@ -1229,7 +1229,7 @@ bgp_encode_next_hop_ip(struct bgp_write_state *s, eattr *a, byte *buf, uint size
}
static void
bgp_decode_next_hop_ip(struct bgp_parse_state *s, byte *data, uint len, rta *a)
bgp_decode_next_hop_ip(struct bgp_parse_state *s, byte *data, uint len, ea_list **to)
{
struct bgp_channel *c = s->channel;
struct adata *ad = lp_alloc_adata(s->pool, 32);
@ -1270,8 +1270,8 @@ bgp_decode_next_hop_ip(struct bgp_parse_state *s, byte *data, uint len, rta *a)
// XXXX validate next hop
bgp_set_attr_ptr(&(a->eattrs), BA_NEXT_HOP, 0, ad);
bgp_apply_next_hop(s, a, nh[0], nh[1]);
bgp_set_attr_ptr(to, BA_NEXT_HOP, 0, ad);
bgp_apply_next_hop(s, to, nh[0], nh[1]);
}
static uint
@ -1309,7 +1309,7 @@ bgp_encode_next_hop_vpn(struct bgp_write_state *s, eattr *a, byte *buf, uint siz
}
static void
bgp_decode_next_hop_vpn(struct bgp_parse_state *s, byte *data, uint len, rta *a)
bgp_decode_next_hop_vpn(struct bgp_parse_state *s, byte *data, uint len, ea_list **to)
{
struct bgp_channel *c = s->channel;
struct adata *ad = lp_alloc_adata(s->pool, 32);
@ -1351,8 +1351,8 @@ bgp_decode_next_hop_vpn(struct bgp_parse_state *s, byte *data, uint len, rta *a)
// XXXX validate next hop
bgp_set_attr_ptr(&(a->eattrs), BA_NEXT_HOP, 0, ad);
bgp_apply_next_hop(s, a, nh[0], nh[1]);
bgp_set_attr_ptr(to, BA_NEXT_HOP, 0, ad);
bgp_apply_next_hop(s, to, nh[0], nh[1]);
}
@ -1364,7 +1364,7 @@ bgp_encode_next_hop_none(struct bgp_write_state *s UNUSED, eattr *a UNUSED, byte
}
static void
bgp_decode_next_hop_none(struct bgp_parse_state *s UNUSED, byte *data UNUSED, uint len UNUSED, rta *a UNUSED)
bgp_decode_next_hop_none(struct bgp_parse_state *s UNUSED, byte *data UNUSED, uint len UNUSED, ea_list **to UNUSED)
{
/*
* Although we expect no next hop and RFC 7606 7.11 states that attribute
@ -1389,15 +1389,15 @@ bgp_update_next_hop_none(struct bgp_export_state *s UNUSED, eattr *a, ea_list **
*/
static void
bgp_rte_update(struct bgp_parse_state *s, const net_addr *n, u32 path_id, rta *a0)
bgp_rte_update(struct bgp_parse_state *s, const net_addr *n, u32 path_id, ea_list *a0)
{
if (path_id != s->last_id)
{
s->last_src = rt_get_source(&s->proto->p, path_id);
s->last_id = path_id;
rta_free(s->cached_rta);
s->cached_rta = NULL;
ea_free(s->cached_ea);
s->cached_ea = NULL;
}
if (!a0)
@ -1412,16 +1412,10 @@ bgp_rte_update(struct bgp_parse_state *s, const net_addr *n, u32 path_id, rta *a
}
/* Prepare cached route attributes */
if (s->cached_rta == NULL)
{
/* Workaround for rta_lookup() breaking eattrs */
ea_list *ea = a0->eattrs;
s->cached_rta = rta_lookup(a0);
a0->eattrs = ea;
}
if (s->cached_ea == NULL)
s->cached_ea = ea_lookup(a0);
rta *a = rta_clone(s->cached_rta);
rte *e = rte_get_temp(a, s->last_src);
rte *e = rte_get_temp(rta_clone(s->cached_ea), s->last_src);
e->pflags = 0;
rte_update3(&s->channel->c, n, e, s->last_src);
@ -1447,7 +1441,7 @@ bgp_encode_mpls_labels(struct bgp_write_state *s UNUSED, const adata *mpls, byte
}
static void
bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *pxlen, rta *a)
bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *pxlen, ea_list **to)
{
u32 labels[BGP_MPLS_MAX];
u32 label;
@ -1469,15 +1463,15 @@ bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *p
}
while (!(label & BGP_MPLS_BOS));
if (!a)
if (!*to)
return;
/* Update next hop entry in rta */
bgp_apply_mpls_labels(s, a, lnum, labels);
bgp_apply_mpls_labels(s, to, lnum, labels);
/* Attributes were changed, invalidate cached entry */
rta_free(s->cached_rta);
s->cached_rta = NULL;
rta_free(s->cached_ea);
s->cached_ea = NULL;
return;
}
@ -1520,7 +1514,7 @@ bgp_encode_nlri_ip4(struct bgp_write_state *s, struct bgp_bucket *buck, byte *bu
}
static void
bgp_decode_nlri_ip4(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
bgp_decode_nlri_ip4(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
{
while (len)
{
@ -1546,7 +1540,7 @@ bgp_decode_nlri_ip4(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
/* Decode MPLS labels */
if (s->mpls)
bgp_decode_mpls_labels(s, &pos, &len, &l, a);
bgp_decode_mpls_labels(s, &pos, &len, &l, &a);
if (l > IP4_MAX_PREFIX_LENGTH)
bgp_parse_error(s, 10);
@ -1605,7 +1599,7 @@ bgp_encode_nlri_ip6(struct bgp_write_state *s, struct bgp_bucket *buck, byte *bu
}
static void
bgp_decode_nlri_ip6(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
bgp_decode_nlri_ip6(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
{
while (len)
{
@ -1631,7 +1625,7 @@ bgp_decode_nlri_ip6(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
/* Decode MPLS labels */
if (s->mpls)
bgp_decode_mpls_labels(s, &pos, &len, &l, a);
bgp_decode_mpls_labels(s, &pos, &len, &l, &a);
if (l > IP6_MAX_PREFIX_LENGTH)
bgp_parse_error(s, 10);
@ -1693,7 +1687,7 @@ bgp_encode_nlri_vpn4(struct bgp_write_state *s, struct bgp_bucket *buck, byte *b
}
static void
bgp_decode_nlri_vpn4(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
bgp_decode_nlri_vpn4(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
{
while (len)
{
@ -1719,7 +1713,7 @@ bgp_decode_nlri_vpn4(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
/* Decode MPLS labels */
if (s->mpls)
bgp_decode_mpls_labels(s, &pos, &len, &l, a);
bgp_decode_mpls_labels(s, &pos, &len, &l, &a);
/* Decode route distinguisher */
if (l < 64)
@ -1790,7 +1784,7 @@ bgp_encode_nlri_vpn6(struct bgp_write_state *s, struct bgp_bucket *buck, byte *b
}
static void
bgp_decode_nlri_vpn6(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
bgp_decode_nlri_vpn6(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
{
while (len)
{
@ -1816,7 +1810,7 @@ bgp_decode_nlri_vpn6(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
/* Decode MPLS labels */
if (s->mpls)
bgp_decode_mpls_labels(s, &pos, &len, &l, a);
bgp_decode_mpls_labels(s, &pos, &len, &l, &a);
/* Decode route distinguisher */
if (l < 64)
@ -1877,7 +1871,7 @@ bgp_encode_nlri_flow4(struct bgp_write_state *s, struct bgp_bucket *buck, byte *
}
static void
bgp_decode_nlri_flow4(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
bgp_decode_nlri_flow4(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
{
while (len)
{
@ -1930,7 +1924,7 @@ bgp_decode_nlri_flow4(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
/* Apply validation procedure per RFC 8955 (6) */
if (a && s->channel->cf->validate)
bgp_apply_flow_validation(s, n, a);
bgp_apply_flow_validation(s, n, &a);
bgp_rte_update(s, n, path_id, a);
}
@ -1969,7 +1963,7 @@ bgp_encode_nlri_flow6(struct bgp_write_state *s, struct bgp_bucket *buck, byte *
}
static void
bgp_decode_nlri_flow6(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
bgp_decode_nlri_flow6(struct bgp_parse_state *s, byte *pos, uint len, ea_list *a)
{
while (len)
{
@ -2022,7 +2016,7 @@ bgp_decode_nlri_flow6(struct bgp_parse_state *s, byte *pos, uint len, rta *a)
/* Apply validation procedure per RFC 8955 (6) */
if (a && s->channel->cf->validate)
bgp_apply_flow_validation(s, n, a);
bgp_apply_flow_validation(s, n, &a);
bgp_rte_update(s, n, path_id, a);
}
@ -2472,7 +2466,6 @@ static inline void
bgp_decode_nlri(struct bgp_parse_state *s, u32 afi, byte *nlri, uint len, ea_list *ea, byte *nh, uint nh_len)
{
struct bgp_channel *c = bgp_get_channel(s->proto, afi);
rta *a = NULL;
if (!c)
DISCARD(BAD_AFI, BGP_AFI(afi), BGP_SAFI(afi));
@ -2493,26 +2486,22 @@ bgp_decode_nlri(struct bgp_parse_state *s, u32 afi, byte *nlri, uint len, ea_lis
if (ea)
{
a = allocz(RTA_MAX_SIZE);
ea_set_attr_data(&ea, &ea_gen_from, 0, &s->proto->remote_ip, sizeof(ip_addr));
ea_set_attr_u32(&ea, &ea_gen_preference, 0, c->c.preference);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_BGP);
a->eattrs = ea;
ea_set_attr_data(&a->eattrs, &ea_gen_from, 0, &s->proto->remote_ip, sizeof(ip_addr));
ea_set_attr_u32(&a->eattrs, &ea_gen_preference, 0, c->c.preference);
ea_set_attr_u32(&a->eattrs, &ea_gen_source, 0, RTS_BGP);
c->desc->decode_next_hop(s, nh, nh_len, a);
bgp_finish_attrs(s, a);
c->desc->decode_next_hop(s, nh, nh_len, &ea);
bgp_finish_attrs(s, &ea);
/* Handle withdraw during next hop decoding */
if (s->err_withdraw)
a = NULL;
ea = NULL;
}
c->desc->decode_nlri(s, nlri, len, a);
c->desc->decode_nlri(s, nlri, len, ea);
rta_free(s->cached_rta);
s->cached_rta = NULL;
rta_free(s->cached_ea);
s->cached_ea = NULL;
}
static void
@ -2617,7 +2606,7 @@ bgp_rx_update(struct bgp_conn *conn, byte *pkt, uint len)
ea, s.mp_next_hop_data, s.mp_next_hop_len);
done:
rta_free(s.cached_rta);
rta_free(s.cached_ea);
lp_restore(tmp_linpool, &tmpp);
return;
}

View File

@ -423,7 +423,7 @@ mrt_rib_table_header(struct mrt_table_dump_state *s, net_addr *n)
static void
mrt_rib_table_entry_bgp_attrs(struct mrt_table_dump_state *s, rte *r)
{
struct ea_list *eattrs = r->attrs->eattrs;
struct ea_list *eattrs = r->attrs;
buffer *b = &s->buf;
if (!eattrs)

View File

@ -387,7 +387,7 @@ ospf_init(struct proto_config *CF)
static int
ospf_rte_better(struct rte *new, struct rte *old)
{
u32 new_metric1 = ea_get_int(new->attrs->eattrs, &ea_ospf_metric1, LSINFINITY);
u32 new_metric1 = ea_get_int(new->attrs, &ea_ospf_metric1, LSINFINITY);
if (new_metric1 == LSINFINITY)
return 0;
@ -400,13 +400,13 @@ ospf_rte_better(struct rte *new, struct rte *old)
if (ns == RTS_OSPF_EXT2)
{
u32 old_metric2 = ea_get_int(old->attrs->eattrs, &ea_ospf_metric2, LSINFINITY);
u32 new_metric2 = ea_get_int(new->attrs->eattrs, &ea_ospf_metric2, LSINFINITY);
u32 old_metric2 = ea_get_int(old->attrs, &ea_ospf_metric2, LSINFINITY);
u32 new_metric2 = ea_get_int(new->attrs, &ea_ospf_metric2, LSINFINITY);
if (new_metric2 < old_metric2) return 1;
if (new_metric2 > old_metric2) return 0;
}
u32 old_metric1 = ea_get_int(old->attrs->eattrs, &ea_ospf_metric1, LSINFINITY);
u32 old_metric1 = ea_get_int(old->attrs, &ea_ospf_metric1, LSINFINITY);
if (new_metric1 < old_metric1)
return 1;
@ -419,7 +419,7 @@ ospf_rte_igp_metric(struct rte *rt)
if (rt_get_source_attr(rt) == RTS_OSPF_EXT2)
return IGP_METRIC_UNKNOWN;
return ea_get_int(rt->attrs->eattrs, &ea_ospf_metric1, LSINFINITY);
return ea_get_int(rt->attrs, &ea_ospf_metric1, LSINFINITY);
}
void
@ -535,7 +535,7 @@ ospf_shutdown(struct proto *P)
/* Cleanup locked rta entries */
FIB_WALK(&p->rtf, ort, nf)
{
rta_free(nf->old_rta);
ea_free(nf->old_ea);
}
FIB_WALK_END;
@ -592,18 +592,18 @@ ospf_get_route_info(rte * rte, byte * buf)
}
buf += bsprintf(buf, " %s", type);
buf += bsprintf(buf, " (%d/%d", rt_get_preference(rte), ea_get_int(rte->attrs->eattrs, &ea_ospf_metric1, LSINFINITY));
buf += bsprintf(buf, " (%d/%d", rt_get_preference(rte), ea_get_int(rte->attrs, &ea_ospf_metric1, LSINFINITY));
if (source == RTS_OSPF_EXT2)
buf += bsprintf(buf, "/%d", ea_get_int(rte->attrs->eattrs, &ea_ospf_metric2, LSINFINITY));
buf += bsprintf(buf, "/%d", ea_get_int(rte->attrs, &ea_ospf_metric2, LSINFINITY));
buf += bsprintf(buf, ")");
if (source == RTS_OSPF_EXT1 || source == RTS_OSPF_EXT2)
{
eattr *ea = ea_find(rte->attrs->eattrs, &ea_ospf_tag);
eattr *ea = ea_find(rte->attrs, &ea_ospf_tag);
if (ea && (ea->u.data > 0))
buf += bsprintf(buf, " [%x]", ea->u.data);
}
eattr *ea = ea_find(rte->attrs->eattrs, &ea_ospf_router_id);
eattr *ea = ea_find(rte->attrs, &ea_ospf_router_id);
if (ea)
buf += bsprintf(buf, " [%R]", ea->u.data);
}

View File

@ -1977,17 +1977,17 @@ add_cand(struct ospf_area *oa, struct top_hash_entry *en, struct top_hash_entry
}
static inline int
ort_changed(ort *nf, rta *nr)
ort_changed(ort *nf, ea_list *nr)
{
rta *or = nf->old_rta;
ea_list *or = nf->old_ea;
if (!or ||
(nf->n.metric1 != nf->old_metric1) || (nf->n.metric2 != nf->old_metric2) ||
(nf->n.tag != nf->old_tag) || (nf->n.rid != nf->old_rid))
return 1;
eattr *nhea_n = ea_find(nr->eattrs, &ea_gen_nexthop);
eattr *nhea_o = ea_find(or->eattrs, &ea_gen_nexthop);
eattr *nhea_n = ea_find(nr, &ea_gen_nexthop);
eattr *nhea_o = ea_find(or, &ea_gen_nexthop);
if (!nhea_n != !nhea_o)
return 1;
@ -2000,8 +2000,8 @@ ort_changed(ort *nf, rta *nr)
return 1;
}
if ( ea_get_int(nr->eattrs, &ea_gen_source, 0)
!= ea_get_int(or->eattrs, &ea_gen_source, 0))
if ( ea_get_int(nr, &ea_gen_source, 0)
!= ea_get_int(or, &ea_gen_source, 0))
return 1;
return 0;
@ -2047,9 +2047,6 @@ again1:
if (nf->n.type) /* Add the route */
{
rta a0 = {
};
struct {
ea_list l;
eattr a[7];
@ -2066,7 +2063,7 @@ again1:
eattrs.a[eattrs.l.count++] =
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nf->n.nhs->ad);
if (reload || ort_changed(nf, &a0))
if (reload || ort_changed(nf, &eattrs.l))
{
nf->old_metric1 = nf->n.metric1;
nf->old_metric2 = nf->n.metric2;
@ -2088,24 +2085,25 @@ again1:
EA_LITERAL_EMBEDDED(&ea_ospf_router_id, 0, nf->n.rid);
ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
a0.eattrs = &eattrs.l;
rta *a = rta_lookup(&a0);
ea_list *a = rta_lookup(&eattrs.l);
rte *e = rte_get_temp(a, p->p.main_source);
rta_free(nf->old_rta);
nf->old_rta = rta_clone(a);
rta_free(nf->old_ea);
nf->old_ea = rta_clone(a);
/*
DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
*/
rte_update(&p->p, nf->fn.addr, e);
}
}
else if (nf->old_rta)
else if (nf->old_ea)
{
/* Remove the route */
rta_free(nf->old_rta);
nf->old_rta = NULL;
rta_free(nf->old_ea);
nf->old_ea = NULL;
rte_update(&p->p, nf->fn.addr, NULL);
}

View File

@ -78,7 +78,7 @@ typedef struct ort
*/
orta n;
u32 old_metric1, old_metric2, old_tag, old_rid;
rta *old_rta;
ea_list *old_ea;
u32 lsa_id;
u8 external_rte;
u8 area_net;

View File

@ -1337,9 +1337,9 @@ ospf_rt_notify(struct proto *P, struct channel *ch UNUSED, net *n, rte *new, rte
ASSERT(p->asbr);
/* Get route attributes */
rta *a = new->attrs;
eattr *m1a = ea_find(a->eattrs, &ea_ospf_metric1);
eattr *m2a = ea_find(a->eattrs, &ea_ospf_metric2);
ea_list *a = new->attrs;
eattr *m1a = ea_find(a, &ea_ospf_metric1);
eattr *m2a = ea_find(a, &ea_ospf_metric2);
uint m1 = m1a ? m1a->u.data : 0;
uint m2 = m2a ? m2a->u.data : 10000;
@ -1363,10 +1363,10 @@ ospf_rt_notify(struct proto *P, struct channel *ch UNUSED, net *n, rte *new, rte
uint ebit = m2a || !m1a;
uint metric = ebit ? m2 : m1;
uint tag = ea_get_int(a->eattrs, &ea_ospf_tag, 0);
uint tag = ea_get_int(a, &ea_ospf_tag, 0);
ip_addr fwd = IPA_NONE;
eattr *nhea = ea_find(a->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(a, &ea_gen_nexthop);
if (nhea)
{
struct nexthop_adata *nhad = (struct nexthop_adata *) nhea->u.ptr;

View File

@ -85,7 +85,7 @@ random_net_ip4(void)
}
struct perf_random_routes {
struct rta *a;
ea_list *a;
net_addr net;
};
@ -142,10 +142,10 @@ perf_loop(void *data)
*((net_addr_ip4 *) &(p->data[i].net)) = random_net_ip4();
if (!p->attrs_per_rte || !(i % p->attrs_per_rte)) {
struct rta a0 = {};
ea_list *ea = NULL;
ea_set_attr_u32(&a0.eattrs, &ea_gen_preference, 0, p->p.main_channel->preference);
ea_set_attr_u32(&a0.eattrs, &ea_gen_source, 0, RTS_PERF);
ea_set_attr_u32(&ea, &ea_gen_preference, 0, p->p.main_channel->preference);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_PERF);
struct nexthop_adata nhad = {
.nh.iface = p->ifa->iface,
@ -153,10 +153,10 @@ perf_loop(void *data)
.nh.weight = 1,
};
ea_set_attr_data(&a0.eattrs, &ea_gen_nexthop, 0,
ea_set_attr_data(&ea, &ea_gen_nexthop, 0,
&nhad.ad.data, sizeof nhad - sizeof nhad.ad);
p->data[i].a = rta_lookup(&a0);
p->data[i].a = rta_lookup(ea);
}
else
p->data[i].a = rta_clone(p->data[i-1].a);

View File

@ -55,7 +55,6 @@ pipe_rt_notify(struct proto *P, struct channel *src_ch, net *n, rte *new, rte *o
struct rte_src *src;
rte *e;
rta *a;
if (!new && !old)
return;
@ -71,11 +70,8 @@ pipe_rt_notify(struct proto *P, struct channel *src_ch, net *n, rte *new, rte *o
{
src = new->src;
a = alloca(rta_size(new->attrs));
memcpy(a, new->attrs, rta_size(new->attrs));
a->cached = 0;
ea_unset_attr(&a->eattrs, 0, &ea_gen_hostentry);
ea_list *a = new->attrs;
ea_unset_attr(&a, 0, &ea_gen_hostentry);
e = rte_get_temp(a, src);
e->pflags = new->pflags;

View File

@ -447,11 +447,11 @@ radv_rt_notify(struct proto *P, struct channel *ch UNUSED, net *n, rte *new, rte
{
/* Update */
ea = ea_find(new->attrs->eattrs, &ea_radv_preference);
ea = ea_find(new->attrs, &ea_radv_preference);
uint preference = ea ? ea->u.data : RA_PREF_MEDIUM;
uint preference_set = !!ea;
ea = ea_find(new->attrs->eattrs, &ea_radv_lifetime);
ea = ea_find(new->attrs, &ea_radv_lifetime);
uint lifetime = ea ? ea->u.data : 0;
uint lifetime_set = !!ea;

View File

@ -151,8 +151,6 @@ rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
if (rt)
{
/* Update */
rta a0 = {};
struct {
ea_list l;
eattr a[3];
@ -164,7 +162,8 @@ rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
EA_LITERAL_EMBEDDED(&ea_rip_metric, 0, rt->metric),
},
};
a0.eattrs = &ea_block.l;
ea_list *ea = &ea_block.l;
u16 rt_tag = rt->tag;
struct iface *rt_from = NULL;
@ -203,7 +202,7 @@ rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
nhad->ad.length = ((void *) nh - (void *) nhad->ad.data);
ea_set_attr(&a0.eattrs,
ea_set_attr(&ea,
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0,
&(nexthop_sort(nhad, tmp_linpool)->ad)));
}
@ -217,22 +216,21 @@ rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
.nh.iface = rt->from->ifa->iface,
};
ea_set_attr_data(&a0.eattrs, &ea_gen_nexthop, 0,
ea_set_attr_data(&ea, &ea_gen_nexthop, 0,
&nhad.ad.data, sizeof nhad - sizeof nhad.ad);
ea_set_attr_data(&a0.eattrs, &ea_gen_from, 0, &rt->from->nbr->addr, sizeof(ip_addr));
ea_set_attr_data(&ea, &ea_gen_from, 0, &rt->from->nbr->addr, sizeof(ip_addr));
}
ea_set_attr_u32(&a0.eattrs, &ea_rip_tag, 0, rt_tag);
ea_set_attr_u32(&ea, &ea_rip_tag, 0, rt_tag);
struct rip_iface_adata riad = {
.ad = { .length = sizeof(struct rip_iface_adata) - sizeof(struct adata) },
.iface = rt_from,
};
ea_set_attr(&a0.eattrs,
ea_set_attr(&ea,
EA_LITERAL_DIRECT_ADATA(&ea_rip_from, 0, &riad.ad));
rta *a = rta_lookup(&a0);
rte *e = rte_get_temp(a, p->p.main_source);
rte *e = rte_get_temp(rta_lookup(ea), p->p.main_source);
rte_update(&p->p, en->n.addr, e);
}
@ -345,9 +343,9 @@ rip_rt_notify(struct proto *P, struct channel *ch UNUSED, struct network *net, s
if (new)
{
/* Update */
u32 rt_tag = ea_get_int(new->attrs->eattrs, &ea_rip_tag, 0);
u32 rt_metric = ea_get_int(new->attrs->eattrs, &ea_rip_metric, 1);
const eattr *rie = ea_find(new->attrs->eattrs, &ea_rip_from);
u32 rt_tag = ea_get_int(new->attrs, &ea_rip_tag, 0);
u32 rt_metric = ea_get_int(new->attrs, &ea_rip_metric, 1);
const eattr *rie = ea_find(new->attrs, &ea_rip_from);
struct iface *rt_from = rie ? ((struct rip_iface_adata *) rie->u.ptr)->iface : NULL;
if (rt_metric > p->infinity)
@ -381,7 +379,7 @@ rip_rt_notify(struct proto *P, struct channel *ch UNUSED, struct network *net, s
en->tag = rt_tag;
en->from = (new->src->proto == P) ? rt_from : NULL;
eattr *nhea = ea_find(new->attrs->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(new->attrs, &ea_gen_nexthop);
if (nhea)
{
struct nexthop_adata *nhad = (struct nexthop_adata *) nhea->u.ptr;
@ -1120,8 +1118,8 @@ rip_rte_better(struct rte *new, struct rte *old)
ASSERT_DIE(new->src == old->src);
struct rip_proto *p = (struct rip_proto *) new->src->proto;
u32 new_metric = ea_get_int(new->attrs->eattrs, &ea_rip_metric, p->infinity);
u32 old_metric = ea_get_int(old->attrs->eattrs, &ea_rip_metric, p->infinity);
u32 new_metric = ea_get_int(new->attrs, &ea_rip_metric, p->infinity);
u32 old_metric = ea_get_int(old->attrs, &ea_rip_metric, p->infinity);
return new_metric < old_metric;
}
@ -1129,7 +1127,7 @@ rip_rte_better(struct rte *new, struct rte *old)
static u32
rip_rte_igp_metric(struct rte *rt)
{
return ea_get_int(rt->attrs->eattrs, &ea_rip_metric, IGP_METRIC_UNKNOWN);
return ea_get_int(rt->attrs, &ea_rip_metric, IGP_METRIC_UNKNOWN);
}
static void
@ -1230,8 +1228,8 @@ static void
rip_get_route_info(rte *rte, byte *buf)
{
struct rip_proto *p = (struct rip_proto *) rte->src->proto;
u32 rt_metric = ea_get_int(rte->attrs->eattrs, &ea_rip_metric, p->infinity);
u32 rt_tag = ea_get_int(rte->attrs->eattrs, &ea_rip_tag, 0);
u32 rt_metric = ea_get_int(rte->attrs, &ea_rip_metric, p->infinity);
u32 rt_tag = ea_get_int(rte->attrs, &ea_rip_tag, 0);
buf += bsprintf(buf, " (%d/%d)", rt_get_preference(rte), rt_metric);

View File

@ -120,13 +120,11 @@ rpki_table_add_roa(struct rpki_cache *cache, struct channel *channel, const net_
{
struct rpki_proto *p = cache->p;
rta a0 = {};
ea_list *ea = NULL;
ea_set_attr_u32(&ea, &ea_gen_preference, 0, channel->preference);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_RPKI);
ea_set_attr_u32(&a0.eattrs, &ea_gen_preference, 0, channel->preference);
ea_set_attr_u32(&a0.eattrs, &ea_gen_source, 0, RTS_RPKI);
rta *a = rta_lookup(&a0);
rte *e = rte_get_temp(a, p->p.main_source);
rte *e = rte_get_temp(rta_lookup(ea), p->p.main_source);
e->pflags = 0;

View File

@ -53,10 +53,10 @@ static inline struct rte_src * static_get_source(struct static_proto *p, uint i)
static void
static_announce_rte(struct static_proto *p, struct static_route *r)
{
rta *a = allocz(RTA_MAX_SIZE);
ea_list *ea = NULL;
struct rte_src *src = static_get_source(p, r->index);
ea_set_attr_u32(&a->eattrs, &ea_gen_preference, 0, p->p.main_channel->preference);
ea_set_attr_u32(&a->eattrs, &ea_gen_source, 0, RTS_STATIC);
ea_set_attr_u32(&ea, &ea_gen_preference, 0, p->p.main_channel->preference);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_STATIC);
if (r->dest == RTD_UNICAST)
{
@ -92,7 +92,7 @@ static_announce_rte(struct static_proto *p, struct static_route *r)
nh = NEXTHOP_NEXT(nh);
}
ea_set_attr_data(&a->eattrs, &ea_gen_nexthop, 0,
ea_set_attr_data(&ea, &ea_gen_nexthop, 0,
nhad->ad.data, (void *) nh - (void *) nhad->ad.data);
}
@ -102,19 +102,19 @@ static_announce_rte(struct static_proto *p, struct static_route *r)
u32 *labels = r->mls ? (void *) r->mls->data : NULL;
u32 lnum = r->mls ? r->mls->length / sizeof(u32) : 0;
ea_set_hostentry(&a->eattrs, p->p.main_channel->table, tab,
ea_set_hostentry(&ea, p->p.main_channel->table, tab,
r->via, IPA_NONE, lnum, labels);
}
else if (r->dest)
ea_set_dest(&a->eattrs, 0, r->dest);
ea_set_dest(&ea, 0, r->dest);
/* Already announced */
if (r->state == SRS_CLEAN)
return;
/* We skip rta_lookup() here */
rte *e = rte_get_temp(a, src);
rte *e = rte_get_temp(ea, src);
e->pflags = 0;
if (r->cmds)
@ -407,16 +407,16 @@ static_reload_routes(struct channel *C)
static int
static_rte_better(rte *new, rte *old)
{
u32 n = ea_get_int(new->attrs->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
u32 o = ea_get_int(old->attrs->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
u32 n = ea_get_int(new->attrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
u32 o = ea_get_int(old->attrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
return n < o;
}
static int
static_rte_mergable(rte *pri, rte *sec)
{
u32 a = ea_get_int(pri->attrs->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
u32 b = ea_get_int(sec->attrs->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
u32 a = ea_get_int(pri->attrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
u32 b = ea_get_int(sec->attrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN);
return a == b;
}
@ -710,7 +710,7 @@ static_copy_config(struct proto_config *dest, struct proto_config *src)
static void
static_get_route_info(rte *rte, byte *buf)
{
eattr *a = ea_find(rte->attrs->eattrs, &ea_gen_igp_metric);
eattr *a = ea_find(rte->attrs, &ea_gen_igp_metric);
u32 pref = rt_get_preference(rte);
if (a)
buf += bsprintf(buf, " (%d/%u)", pref, a->u.data);

View File

@ -111,7 +111,7 @@ struct nl_parse_state
int merge;
net *net;
rta *attrs;
ea_list *attrs;
struct krt_proto *proto;
s8 new;
s8 krt_src;
@ -1407,7 +1407,7 @@ HASH_DEFINE_REHASH_FN(RTH, struct krt_proto)
int
krt_capable(rte *e)
{
eattr *ea = ea_find(e->attrs->eattrs, &ea_gen_nexthop);
eattr *ea = ea_find(e->attrs, &ea_gen_nexthop);
if (!ea)
return 0;
@ -1441,8 +1441,7 @@ nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop_adat
{
eattr *ea;
net *net = e->net;
rta *a = e->attrs;
ea_list *eattrs = a->eattrs;
ea_list *eattrs = e->attrs;
int bufsize = 128 + KRT_METRICS_MAX*8 + (nh ? nh_bufsize(nh) : 0);
u32 priority = 0;
@ -1590,10 +1589,10 @@ dest:
static inline int
nl_add_rte(struct krt_proto *p, rte *e)
{
rta *a = e->attrs;
ea_list *ea = e->attrs;
int err = 0;
eattr *nhea = ea_find(a->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(ea, &ea_gen_nexthop);
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
if (krt_ecmp6(p) && nhad && NEXTHOP_IS_REACHABLE(nhad) && !NEXTHOP_ONE(nhad))
@ -1641,8 +1640,7 @@ nl_delete_rte(struct krt_proto *p, rte *e)
static inline int
nl_replace_rte(struct krt_proto *p, rte *e)
{
rta *a = e->attrs;
eattr *nhea = ea_find(a->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(e->attrs, &ea_gen_nexthop);
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
return nl_send_route(p, e, NL_OP_REPLACE,
NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest, nhad);
@ -1713,14 +1711,14 @@ nl_announce_route(struct nl_parse_state *s)
e->net = s->net;
EA_LOCAL_LIST(2) ea = {
.l = { .count = 2, .next = e->attrs->eattrs },
.l = { .count = 2, .next = e->attrs },
.a = {
EA_LITERAL_EMBEDDED(&ea_krt_source, 0, s->krt_proto),
EA_LITERAL_EMBEDDED(&ea_krt_metric, 0, s->krt_metric),
},
};
e->attrs->eattrs = &ea.l;
e->attrs = &ea.l;
if (s->scan)
krt_got_route(s->proto, e, s->krt_src);
@ -1888,8 +1886,8 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
if (s->net && !nl_mergable_route(s, net, p, priority, i->rtm_type, i->rtm_family))
nl_announce_route(s);
rta *ra = lp_allocz(s->pool, RTA_MAX_SIZE);
ea_set_attr_u32(&ra->eattrs, &ea_gen_source, 0, RTS_INHERIT);
ea_list *ra = NULL;
ea_set_attr_u32(&ra, &ea_gen_source, 0, RTS_INHERIT);
if (a[RTA_FLOW])
s->rta_flow = rta_get_u32(a[RTA_FLOW]);
@ -1914,7 +1912,7 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
if (!nh)
SKIP("strange RTA_MULTIPATH\n");
ea_set_attr(&ra->eattrs, EA_LITERAL_DIRECT_ADATA(
ea_set_attr(&ra, EA_LITERAL_DIRECT_ADATA(
&ea_gen_nexthop, 0, &nh->ad));
break;
}
@ -2000,20 +1998,20 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
}
if (i->rtm_scope != def_scope)
ea_set_attr(&ra->eattrs,
ea_set_attr(&ra,
EA_LITERAL_EMBEDDED(&ea_krt_scope, 0, i->rtm_scope));
if (a[RTA_PREFSRC])
{
ip_addr ps = rta_get_ipa(a[RTA_PREFSRC]);
ea_set_attr(&ra->eattrs,
ea_set_attr(&ra,
EA_LITERAL_STORE_ADATA(&ea_krt_prefsrc, 0, &ps, sizeof(ps)));
}
/* Can be set per-route or per-nexthop */
if (s->rta_flow)
ea_set_attr(&ra->eattrs,
ea_set_attr(&ra,
EA_LITERAL_EMBEDDED(&ea_krt_realm, 0, s->rta_flow));
if (a[RTA_METRICS])
@ -2027,7 +2025,7 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
for (uint t = 1; t < KRT_METRICS_MAX; t++)
if (metrics[0] & (1 << t))
ea_set_attr(&ra->eattrs,
ea_set_attr(&ra,
EA_LITERAL_EMBEDDED(&ea_krt_metrics[t], 0, metrics[t]));
}
@ -2045,7 +2043,7 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
s->net = net;
s->attrs = ra;
ea_set_attr_data(&ra->eattrs, &ea_gen_nexthop, 0,
ea_set_attr_data(&ra, &ea_gen_nexthop, 0,
nhad.ad.data, nhad.ad.length);
s->proto = p;
@ -2058,17 +2056,17 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
else
{
/* Merge next hops with the stored route */
eattr *nhea = ea_find(s->attrs->eattrs, &ea_gen_nexthop);
eattr *nhea = ea_find(s->attrs, &ea_gen_nexthop);
struct nexthop_adata *nhad_old = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
if (nhad_old)
ea_set_attr(&s->attrs->eattrs,
ea_set_attr(&s->attrs,
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0,
&(nexthop_merge(nhad_old, &nhad.nhad,
KRT_CF->merge_paths, s->pool)->ad)
));
else
ea_set_attr_data(&s->attrs->eattrs, &ea_gen_nexthop, 0,
ea_set_attr_data(&s->attrs, &ea_gen_nexthop, 0,
nhad.ad.data, nhad.ad.length);
}
}

View File

@ -286,7 +286,7 @@ static struct tbf rl_alien = TBF_DEFAULT_LOG_LIMITS;
static inline u32
krt_metric(rte *a)
{
eattr *ea = ea_find(a->attrs->eattrs, &ea_krt_metric);
eattr *ea = ea_find(a->attrs, &ea_krt_metric);
return ea ? ea->u.data : 0;
}
@ -306,8 +306,7 @@ static void
krt_learn_announce_update(struct krt_proto *p, rte *e)
{
net *n = e->net;
rta *aa = rta_clone(e->attrs);
rte *ee = rte_get_temp(aa, p->p.main_source);
rte *ee = rte_get_temp(ea_clone(e->attrs), p->p.main_source);
rte_update(&p->p, n->n.addr, ee);
}
@ -437,10 +436,9 @@ krt_learn_async(struct krt_proto *p, rte *e, int new)
net *n = net_get(p->krt_table, n0->n.addr);
rte *g, **gg, *best, **bestp, *old_best;
ASSERT(!e->attrs->cached);
ea_set_attr_u32(&e->attrs->eattrs, &ea_gen_preference, 0, p->p.main_channel->preference);
e->attrs = rta_lookup(e->attrs);
ea_list *ea = e->attrs;
ea_set_attr_u32(&ea, &ea_gen_preference, 0, p->p.main_channel->preference);
e->attrs = rta_lookup(ea);
old_best = n->routes;
for(gg=&n->routes; g = *gg; gg = &g->next)
@ -606,10 +604,10 @@ reject:
static int
krt_same_dest(rte *k, rte *e)
{
rta *ka = k->attrs, *ea = e->attrs;
ea_list *ka = k->attrs, *ea = e->attrs;
eattr *nhea_k = ea_find(ka->eattrs, &ea_gen_nexthop);
eattr *nhea_e = ea_find(ea->eattrs, &ea_gen_nexthop);
eattr *nhea_k = ea_find(ka, &ea_gen_nexthop);
eattr *nhea_e = ea_find(ea, &ea_gen_nexthop);
return (!nhea_k == !nhea_e) && adata_same(nhea_k->u.ptr, nhea_e->u.ptr);
}