From 3fa23c3b532d2e1756360a566a3abc140cae17f1 Mon Sep 17 00:00:00 2001 From: Maria Matejka Date: Thu, 4 Apr 2024 12:01:35 +0200 Subject: [PATCH] Cached route attributes now have explicitly marked layers Also the rta_* functions renamed to ea_* functions --- lib/route.h | 52 ++++++++++++++++++++++++++--------- nest/mpls.c | 2 +- nest/rt-attr.c | 43 +++++++++++++++-------------- nest/rt-show.c | 6 ++-- nest/rt-table.c | 44 ++++++++++++++--------------- proto/aggregator/aggregator.c | 9 ++---- proto/bgp/packets.c | 8 +++--- proto/ospf/rt.c | 4 +-- proto/perf/perf.c | 4 +-- 9 files changed, 96 insertions(+), 76 deletions(-) diff --git a/lib/route.h b/lib/route.h index 37f0db3d..d965ed2a 100644 --- a/lib/route.h +++ b/lib/route.h @@ -235,11 +235,22 @@ typedef struct eattr { typedef struct ea_list { struct ea_list *next; /* In case we have an override list */ byte flags; /* Flags: EALF_... */ - byte rfu; + byte stored:5; /* enum ea_stored */ + byte rfu:3; word count; /* Number of attributes */ eattr attrs[0]; /* Attribute definitions themselves */ } ea_list; +enum ea_stored { + EALS_NONE = 0, /* This is a temporary ea_list */ + EALS_PREIMPORT = 1, /* State when route entered rte_update() */ + EALS_FILTERED = 2, /* State after filters */ + EALS_IN_TABLE = 3, /* State in table */ + EALS_KEY = 4, /* EA list used as key */ + EALS_CUSTOM = 0x10, /* OR this with custom values */ + EALS_MAX = 0x20, +}; + struct ea_storage { struct ea_storage *next_hash; /* Next in hash chain */ struct ea_storage **pprev_hash; /* Previous in hash chain */ @@ -250,7 +261,6 @@ struct ea_storage { #define EALF_SORTED 1 /* Attributes are sorted by code */ #define EALF_BISECT 2 /* Use interval bisection for searching */ -#define EALF_CACHED 4 /* List is cached */ #define EALF_HUGE 8 /* List is too big to fit into slab */ struct ea_class { @@ -332,12 +342,12 @@ static inline eattr *ea_find_by_name(ea_list *l, const char *name) eattr *ea_walk(struct ea_walk_state *s, uint id, uint max); void ea_dump(ea_list *); int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */ -uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */ +uint ea_hash(ea_list *e); /* Calculate attributes hash value */ ea_list *ea_append(ea_list *to, ea_list *what); void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max); /* Normalize ea_list; allocates the result from tmp_linpool */ -ea_list *ea_normalize(ea_list *e, int overlay); +ea_list *ea_normalize(ea_list *e, u32 upto); uint ea_list_size(ea_list *); void ea_list_copy(ea_list *dest, ea_list *src, uint size); @@ -522,18 +532,39 @@ static inline int rte_dest(const rte *r) } void rta_init(void); -ea_list *ea_lookup(ea_list *, int overlay); /* Get a cached (and normalized) variant of this attribute list */ -static inline int ea_is_cached(const ea_list *r) { return r->flags & EALF_CACHED; } + +ea_list *ea_lookup_slow(ea_list *r, u32 squash_upto, enum ea_stored oid); + static inline struct ea_storage *ea_get_storage(ea_list *r) { - ASSERT_DIE(ea_is_cached(r)); + ASSERT_DIE(r->stored); return SKIP_BACK(struct ea_storage, l[0], r); } -static inline ea_list *ea_clone(ea_list *r) { +static inline ea_list *ea_ref(ea_list *r) +{ ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel)); return r; } + +static inline ea_list *ea_lookup(ea_list *r, u32 squash_upto, enum ea_stored oid) +{ + ASSERT_DIE(oid); + if ((r->stored == oid) || BIT32_TEST(&squash_upto, r->stored)) + return ea_ref(r); + else + return ea_lookup_slow(r, squash_upto, oid); +} + +static inline ea_list *ea_strip_to(ea_list *r, u32 strip_to) +{ + ASSERT_DIE(strip_to); + while (r && !BIT32_TEST(&strip_to, r->stored)) + r = r->next; + + return r; +} + void ea__free(struct ea_storage *r); static inline void ea_free(ea_list *l) { if (!l) return; @@ -545,9 +576,4 @@ void ea_dump(ea_list *); void ea_dump_all(void); void ea_show_list(struct cli *, ea_list *); -#define rta_lookup ea_lookup -#define rta_is_cached ea_is_cached -#define rta_clone ea_clone -#define rta_free ea_free - #endif diff --git a/nest/mpls.c b/nest/mpls.c index 5400bcba..eceaeba4 100644 --- a/nest/mpls.c +++ b/nest/mpls.c @@ -1225,7 +1225,7 @@ mpls_get_key_attrs(struct mpls_fec_map *m, ea_list *src) ea.a[ea.l.count++] = EA_LITERAL_EMBEDDED(&ea_gen_source, 0, m->mpls_rts); PUT_ATTR(&ea_gen_mpls_class); - return ea_get_storage(ea_lookup(&ea.l, 0)); + return ea_get_storage(ea_lookup(&ea.l, 0, EALS_KEY)); } static void diff --git a/nest/rt-attr.c b/nest/rt-attr.c index 99eddc7b..52fb8568 100644 --- a/nest/rt-attr.c +++ b/nest/rt-attr.c @@ -1027,7 +1027,7 @@ ea_sort(ea_list *e) * a given &ea_list after merging with ea_merge(). */ static unsigned -ea_scan(const ea_list *e, int overlay) +ea_scan(const ea_list *e, u32 upto) { unsigned cnt = 0; @@ -1035,7 +1035,7 @@ ea_scan(const ea_list *e, int overlay) { cnt += e->count; e = e->next; - if (e && overlay && ea_is_cached(e)) + if (e && BIT32_TEST(&upto, e->stored)) break; } return sizeof(ea_list) + sizeof(eattr)*cnt; @@ -1056,7 +1056,7 @@ ea_scan(const ea_list *e, int overlay) * by calling ea_sort(). */ static void -ea_merge(ea_list *e, ea_list *t, int overlay) +ea_merge(ea_list *e, ea_list *t, u32 upto) { eattr *d = t->attrs; @@ -1070,7 +1070,7 @@ ea_merge(ea_list *e, ea_list *t, int overlay) d += e->count; e = e->next; - if (e && overlay && ea_is_cached(e)) + if (e && BIT32_TEST(&upto, e->stored)) break; } @@ -1078,22 +1078,22 @@ ea_merge(ea_list *e, ea_list *t, int overlay) } ea_list * -ea_normalize(ea_list *e, int overlay) +ea_normalize(ea_list *e, u32 upto) { #if 0 debug("(normalize)"); ea_dump(e); debug(" ----> "); #endif - ea_list *t = tmp_alloc(ea_scan(e, overlay)); - ea_merge(e, t, overlay); + ea_list *t = tmp_allocz(ea_scan(e, upto)); + ea_merge(e, t, upto); ea_sort(t); #if 0 ea_dump(t); debug("\n"); #endif - return t->count ? t : t->next; + return t; } static _Bool @@ -1214,10 +1214,7 @@ ea_list_ref(ea_list *l) } if (l->next) - { - ASSERT_DIE(ea_is_cached(l->next)); - ea_clone(l->next); - } + ea_ref(l->next); } static void ea_free_nested(ea_list *l); @@ -1503,11 +1500,11 @@ ea_dump(ea_list *e) } while (e) { - struct ea_storage *s = ea_is_cached(e) ? ea_get_storage(e) : NULL; - debug("[%c%c%c] uc=%d h=%08x", + struct ea_storage *s = e->stored ? ea_get_storage(e) : NULL; + debug("[%c%c] overlay=%d uc=%d h=%08x", (e->flags & EALF_SORTED) ? 'S' : 's', (e->flags & EALF_BISECT) ? 'B' : 'b', - (e->flags & EALF_CACHED) ? 'C' : 'c', + e->stored, s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0, s ? s->hash_key : 0); for(i=0; icount; i++) @@ -1553,7 +1550,7 @@ ea_dump(ea_list *e) * ea_hash() takes an extended attribute list and calculated a hopefully * uniformly distributed hash value from its contents. */ -inline uint +uint ea_hash(ea_list *e) { const u64 mul = 0x68576150f3d6847; @@ -1670,19 +1667,22 @@ rta_rehash(void) * converted to the normalized form. */ ea_list * -ea_lookup(ea_list *o, int overlay) +ea_lookup_slow(ea_list *o, u32 squash_upto, enum ea_stored oid) { struct ea_storage *r; uint h; - ASSERT(!ea_is_cached(o)); - o = ea_normalize(o, overlay); + ASSERT(o->stored != oid); + ASSERT(oid); + o = ea_normalize(o, squash_upto); h = ea_hash(o); + squash_upto |= BIT32_VAL(oid); + RTA_LOCK; for(r=rta_hash_table[h & rta_cache_mask]; r; r=r->next_hash) - if (r->hash_key == h && ea_same(r->l, o)) + if (r->hash_key == h && ea_same(r->l, o) && BIT32_TEST(&squash_upto, r->l->stored)) { atomic_fetch_add_explicit(&r->uc, 1, memory_order_acq_rel); RTA_UNLOCK; @@ -1705,7 +1705,8 @@ ea_lookup(ea_list *o, int overlay) ea_list_copy(r->l, o, elen); ea_list_ref(r->l); - r->l->flags |= EALF_CACHED | huge; + r->l->flags |= huge; + r->l->stored = oid; r->hash_key = h; atomic_store_explicit(&r->uc, 1, memory_order_release); diff --git a/nest/rt-show.c b/nest/rt-show.c index e705c253..1c978d72 100644 --- a/nest/rt-show.c +++ b/nest/rt-show.c @@ -54,9 +54,9 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary else from[0] = 0; - /* Need to normalize the extended attributes */ - if (d->verbose && !rta_is_cached(a) && a) - a = ea_normalize(a, 0); + /* Need to normalize the attributes for dumping */ + if (d->verbose && !a->stored) + a = ea_normalize(a, EALS_NONE); get_route_info = e->src->owner->class ? e->src->owner->class->get_route_info : NULL; if (get_route_info) diff --git a/nest/rt-table.c b/nest/rt-table.c index 20c57ad7..e1bce674 100644 --- a/nest/rt-table.c +++ b/nest/rt-table.c @@ -494,10 +494,7 @@ rte_store(const rte *r, struct netindex *i, struct rtable_private *tab) rt_lock_source(e->src); - if (ea_is_cached(e->attrs)) - e->attrs = rta_clone(e->attrs); - else - e->attrs = rta_lookup(e->attrs, 1); + e->attrs = ea_lookup(e->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED), EALS_IN_TABLE); #if 0 debug("(store) %N ", i->addr); @@ -524,7 +521,7 @@ rte_free(struct rte_storage *e, struct rtable_private *tab) rt_unlock_source(e->rte.src); - rta_free(e->rte.attrs); + ea_free(e->rte.attrs); sl_free(e); } @@ -1456,7 +1453,7 @@ rte_same(const rte *x, const rte *y) return (x == y) || ( (x->attrs == y->attrs) || - ((!(x->attrs->flags & EALF_CACHED) || !(y->attrs->flags & EALF_CACHED)) && ea_same(x->attrs, y->attrs)) + ((!x->attrs->stored || !y->attrs->stored) && ea_same(x->attrs, y->attrs)) ) && x->src == y->src && rte_is_filtered(x) == rte_is_filtered(y); @@ -1719,14 +1716,11 @@ rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) ASSERT(c->channel_state == CS_UP); - ea_list *ea_tmp[2] = {}; + ea_list *ea_prefilter = NULL, *ea_postfilter = NULL; - /* The import reloader requires prefilter routes to be the first layer */ + /* Storing prefilter routes as an explicit layer */ if (new && (c->in_keep & RIK_PREFILTER)) - ea_tmp[0] = new->attrs = - (ea_is_cached(new->attrs) && !new->attrs->next) ? - ea_clone(new->attrs) : - ea_lookup(new->attrs, 0); + ea_prefilter = new->attrs = ea_lookup(new->attrs, 0, EALS_PREIMPORT); #if 0 debug("%s.%s -(prefilter)-> %s: %N ", c->proto->name, c->name, c->table->name, n); @@ -1769,8 +1763,8 @@ rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) if (new) { - ea_tmp[1] = new->attrs = - ea_is_cached(new->attrs) ? ea_clone(new->attrs) : ea_lookup(new->attrs, !!ea_tmp[0]); + ea_postfilter = new->attrs = ea_lookup(new->attrs, + ea_prefilter ? BIT32_ALL(EALS_PREIMPORT) : 0, EALS_FILTERED); if (net_is_flow(n)) rt_flowspec_resolve_rte(new, c); @@ -1798,9 +1792,8 @@ rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) /* Now the route attributes are kept by the in-table cached version * and we may drop the local handles */ - for (uint k = 0; k < ARRAY_SIZE(ea_tmp); k++) - if (ea_tmp[k]) - ea_free(ea_tmp[k]); + ea_free(ea_prefilter); + ea_free(ea_postfilter); } void @@ -3138,7 +3131,10 @@ rt_next_hop_update_rte(const rte *old, rte *new) if (!head) return 0; + /* Get the state of the route just before nexthop was resolved */ *new = *old; + new->attrs = ea_strip_to(new->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED)); + RT_LOCKED(head->he->owner, tab) rta_apply_hostentry(tab, &new->attrs, head); return 1; @@ -3229,7 +3225,7 @@ rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, ea_list * if (nb) { rb = RTE_COPY_VALID(RTE_OR_NULL(nb->routes)); - rta_clone(rb.attrs); + ea_ref(rb.attrs); net_copy(&nau.n, nb->routes->rte.net); rb.net = &nau.n; } @@ -3310,6 +3306,7 @@ rt_flowspec_update_rte(rtable *tab, const rte *r, rte *new) return 0; *new = *r; + new->attrs = ea_strip_to(new->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED)); ea_set_attr_u32(&new->attrs, &ea_gen_flowspec_valid, 0, valid); return 1; #else @@ -4101,8 +4098,7 @@ void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *n rte new = rte_init_from(feed[i]); /* Strip the later attribute layers */ - while (new.attrs->next) - new.attrs = new.attrs->next; + new.attrs = ea_strip_to(new.attrs, BIT32_ALL(EALS_PREIMPORT)); /* And reload the route */ rte_update(c, net, &new, new.src); @@ -4203,7 +4199,7 @@ hc_new_hostentry(struct hostcache *hc, pool *p, ip_addr a, ip_addr ll, rtable *d static void hc_delete_hostentry(struct hostcache *hc, pool *p, struct hostentry *he) { - rta_free(he->src); + ea_free(he->src); rem_node(&he->ln); hc_remove(hc, he); @@ -4288,7 +4284,7 @@ rt_free_hostcache(struct rtable_private *tab) WALK_LIST(n, hc->hostentries) { struct hostentry *he = SKIP_BACK(struct hostentry, ln, n); - rta_free(he->src); + ea_free(he->src); if (!lfuc_finished(&he->uc)) log(L_ERR "Hostcache is not empty in table %s", tab->name); @@ -4384,7 +4380,7 @@ rt_update_hostentry(struct rtable_private *tab, struct hostentry *he) direct++; } - he->src = rta_clone(a); + he->src = ea_ref(a); he->nexthop_linkable = !direct; he->igp_metric = rt_get_igp_metric(&e->rte); } @@ -4393,7 +4389,7 @@ done: /* Add a prefix range to the trie */ trie_add_prefix(tab->hostcache->trie, &he_addr, pxlen, he_addr.pxlen); - rta_free(old_src); + ea_free(old_src); return old_src != he->src; } diff --git a/proto/aggregator/aggregator.c b/proto/aggregator/aggregator.c index c13b93ad..cc1b2862 100644 --- a/proto/aggregator/aggregator.c +++ b/proto/aggregator/aggregator.c @@ -265,10 +265,7 @@ aggregator_rt_notify(struct proto *P, struct channel *src_ch, const net_addr *ne } /* Store the route attributes */ - if (rta_is_cached(new->attrs)) - rta_clone(new->attrs); - else - new->attrs = rta_lookup(new->attrs, 0); + new->attrs = ea_lookup(new->attrs, 0, EALS_KEY); /* Insert the new route into the bucket */ struct aggregator_route *arte = sl_alloc(p->route_slab); @@ -296,7 +293,7 @@ aggregator_rt_notify(struct proto *P, struct channel *src_ch, const net_addr *ne old_bucket->count--; HASH_REMOVE2(p->routes, AGGR_RTE, p->p.pool, old_route); - rta_free(old_route->rte.attrs); + ea_free(old_route->rte.attrs); sl_free(old_route); } @@ -408,7 +405,7 @@ aggregator_shutdown(struct proto *P) b->rte = arte->next_rte; b->count--; HASH_REMOVE(p->routes, AGGR_RTE, arte); - rta_free(arte->rte.attrs); + ea_free(arte->rte.attrs); sl_free(arte); } diff --git a/proto/bgp/packets.c b/proto/bgp/packets.c index ce722645..52c08289 100644 --- a/proto/bgp/packets.c +++ b/proto/bgp/packets.c @@ -1541,7 +1541,7 @@ bgp_rte_update(struct bgp_parse_state *s, const net_addr *n, u32 path_id, ea_lis /* Prepare cached route attributes */ if (!s->mpls && (s->cached_ea == NULL)) - a0 = s->cached_ea = ea_lookup(a0, 0); + a0 = s->cached_ea = ea_lookup(a0, 0, EALS_CUSTOM); rte e0 = { .attrs = a0, @@ -1600,7 +1600,7 @@ bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *p bgp_apply_mpls_labels(s, to, lnum, labels); /* Attributes were changed, invalidate cached entry */ - rta_free(s->cached_ea); + ea_free(s->cached_ea); s->cached_ea = NULL; return; @@ -2717,7 +2717,7 @@ bgp_decode_nlri(struct bgp_parse_state *s, u32 afi, byte *nlri, uint len, ea_lis c->desc->decode_nlri(s, nlri, len, ea); - rta_free(s->cached_ea); + ea_free(s->cached_ea); s->cached_ea = NULL; rt_unlock_source(s->last_src); @@ -2826,7 +2826,7 @@ bgp_rx_update(struct bgp_conn *conn, byte *pkt, uint len) ea, s.mp_next_hop_data, s.mp_next_hop_len); done: - rta_free(s.cached_ea); + ea_free(s.cached_ea); lp_restore(tmp_linpool, tmpp); return; } diff --git a/proto/ospf/rt.c b/proto/ospf/rt.c index 59ae6cf0..8f28eeb1 100644 --- a/proto/ospf/rt.c +++ b/proto/ospf/rt.c @@ -2097,7 +2097,7 @@ again1: ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count); - ea_list *eal = ea_lookup(&eattrs.l, 0); + ea_list *eal = ea_lookup(&eattrs.l, 0, EALS_CUSTOM); ea_free(nf->old_ea); nf->old_ea = eal; @@ -2116,7 +2116,7 @@ again1: else if (nf->old_ea) { /* Remove the route */ - rta_free(nf->old_ea); + ea_free(nf->old_ea); nf->old_ea = NULL; rte_update(p->p.main_channel, nf->fn.addr, NULL, p->p.main_source); diff --git a/proto/perf/perf.c b/proto/perf/perf.c index 227b51be..eb524970 100644 --- a/proto/perf/perf.c +++ b/proto/perf/perf.c @@ -156,10 +156,10 @@ perf_loop(void *data) ea_set_attr_data(&ea, &ea_gen_nexthop, 0, &nhad.ad.data, sizeof nhad - sizeof nhad.ad); - p->data[i].a = rta_lookup(ea, 0); + p->data[i].a = ea_lookup(ea, 0, EALS_CUSTOM); } else - p->data[i].a = rta_clone(p->data[i-1].a); + p->data[i].a = ea_ref(p->data[i-1].a); } clock_gettime(CLOCK_MONOTONIC, &ts_generated);