0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2025-03-11 17:08:46 +00:00

Properly consted routes inside table

This commit is contained in:
Maria Matejka 2023-07-03 20:38:24 +02:00
parent 206b33fd0b
commit a920b5111c
5 changed files with 84 additions and 62 deletions

View File

@ -21,20 +21,24 @@ struct network;
struct proto;
struct cli;
struct rtable_private;
struct rte_storage;
#define RTE_IN_TABLE_WRITABLE \
byte pflags; /* Protocol-specific flags; may change in-table (!) */ \
u8 stale_cycle; /* Auxiliary value for route refresh; may change in-table (!) */ \
typedef struct rte {
RTE_IN_TABLE_WRITABLE;
byte flags; /* Table-specific flags */
u8 generation; /* If this route import is based on other previously exported route,
this value should be 1 + MAX(generation of the parent routes).
Otherwise the route is independent and this value is zero. */
u32 id; /* Table specific route id */
struct ea_list *attrs; /* Attributes of this route */
const net_addr *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
btime lastmod; /* Last modified (set by table) */
u32 id; /* Table specific route id */
byte flags; /* Table-specific flags */
byte pflags; /* Protocol-specific flags */
u8 generation; /* If this route import is based on other previously exported route,
this value should be 1 + MAX(generation of the parent routes).
Otherwise the route is independent and this value is zero. */
u8 stale_cycle; /* Auxiliary value for route refresh */
} rte;
#define REF_FILTERED 2 /* Route is rejected by import filter */
@ -73,7 +77,7 @@ struct rte_owner_class {
struct rte_owner {
struct rte_owner_class *class;
int (*rte_recalculate)(struct rtable_private *, struct network *, struct rte *, struct rte *, struct rte *);
int (*rte_recalculate)(struct rtable_private *, struct network *, struct rte_storage *new, struct rte_storage *, struct rte_storage *);
HASH(struct rte_src) hash;
const char *name;
u32 hash_key;
@ -461,7 +465,7 @@ static inline const char *flowspec_valid_name(enum flowspec_valid v)
{ return (v < FLOWSPEC__MAX) ? flowspec_valid_names[v] : "???"; }
extern struct ea_class ea_gen_flowspec_valid;
static inline enum flowspec_valid rt_get_flowspec_valid(rte *rt)
static inline enum flowspec_valid rt_get_flowspec_valid(const rte *rt)
{ return ea_get_int(rt->attrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
/* Next hop: For now, stored as adata */

View File

@ -154,7 +154,7 @@ static void rt_delete(void *);
static void rt_export_used(struct rt_table_exporter *, const char *, const char *);
static void rt_export_cleanup(struct rtable_private *tab);
static int rte_same(rte *x, rte *y);
static int rte_same(const rte *x, const rte *y);
const char *rt_import_state_name_array[TIS_MAX] = {
[TIS_DOWN] = "DOWN",
@ -639,19 +639,20 @@ rte_find(net *net, struct rte_src *src)
struct rte_storage *
rte_store(const rte *r, net *net, struct rtable_private *tab)
{
struct rte_storage *e = sl_alloc(tab->rte_slab);
struct rte_storage *s = sl_alloc(tab->rte_slab);
struct rte *e = RTES_WRITE(s);
e->rte = *r;
e->rte.net = net->n.addr;
*e = *r;
e->net = net->n.addr;
rt_lock_source(e->rte.src);
rt_lock_source(e->src);
if (ea_is_cached(e->rte.attrs))
e->rte.attrs = rta_clone(e->rte.attrs);
if (ea_is_cached(e->attrs))
e->attrs = rta_clone(e->attrs);
else
e->rte.attrs = rta_lookup(e->rte.attrs, 1);
e->attrs = rta_lookup(e->attrs, 1);
return e;
return s;
}
/**
@ -886,7 +887,7 @@ do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old)
}
static void
rt_notify_basic(struct channel *c, const net_addr *net, rte *new, rte *old)
rt_notify_basic(struct channel *c, const net_addr *net, rte *new, const rte *old)
{
if (new && old && rte_same(new, old))
{
@ -1104,7 +1105,7 @@ void
rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
rte *o = RTE_VALID_OR_NULL(first->old_best);
const rte *o = RTE_VALID_OR_NULL(first->old_best);
struct rte_storage *new_best = first->new_best;
RPE_WALK(first, rpe, NULL)
@ -1123,8 +1124,8 @@ rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pend
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
rte *n = RTE_VALID_OR_NULL(first->new);
rte *o = RTE_VALID_OR_NULL(first->old);
const rte *n = RTE_VALID_OR_NULL(first->new);
const rte *o = RTE_VALID_OR_NULL(first->old);
if (!n && !o)
{
@ -1613,7 +1614,7 @@ rte_validate(struct channel *ch, rte *e)
}
static int
rte_same(rte *x, rte *y)
rte_same(const rte *x, const rte *y)
{
/* rte.flags / rte.pflags are not checked, as they are internal to rtable */
return
@ -1622,7 +1623,7 @@ rte_same(rte *x, rte *y)
rte_is_filtered(x) == rte_is_filtered(y);
}
static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
static inline int rte_is_ok(const rte *e) { return e && !rte_is_filtered(e); }
static int
rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net, rte *new, struct rte_src *src)
@ -1630,8 +1631,8 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net
struct rt_import_request *req = c->req;
struct rt_import_stats *stats = &c->stats;
struct rte_storage *old_best_stored = net->routes, *old_stored = NULL;
rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
rte *old = NULL;
const rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
const rte *old = NULL;
/* If the new route is identical to the old one, we find the attributes in
* cache and clone these with no performance drop. OTOH, if we were to lookup
@ -1639,7 +1640,10 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net
* therefore it's definitely worth the time. */
struct rte_storage *new_stored = NULL;
if (new)
new = &(new_stored = rte_store(new, net, table))->rte;
{
new_stored = rte_store(new, net, table);
new = RTES_WRITE(new_stored);
}
/* Find and remove original route from the same protocol */
struct rte_storage **before_old = rte_find(net, src);
@ -1666,7 +1670,7 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net
if (new && rte_same(old, &new_stored->rte))
{
/* No changes, ignore the new route and refresh the old one */
old->stale_cycle = new->stale_cycle;
old_stored->stale_cycle = new->stale_cycle;
if (!rte_is_filtered(new))
{
@ -1737,7 +1741,7 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net
the first position. There are several optimized cases. */
if (src->owner->rte_recalculate &&
src->owner->rte_recalculate(table, net, new_stored ? &new_stored->rte : NULL, old, old_best))
src->owner->rte_recalculate(table, net, new_stored, old_stored, old_best_stored))
goto do_recalculate;
if (new_stored && rte_better(&new_stored->rte, old_best))
@ -1800,9 +1804,9 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net
if (new_stored)
{
new_stored->rte.lastmod = current_time();
new_stored->rte.id = hmap_first_zero(&table->id_map);
hmap_set(&table->id_map, new_stored->rte.id);
new->lastmod = current_time();
new->id = hmap_first_zero(&table->id_map);
hmap_set(&table->id_map, new->id);
}
/* Log the route change */
@ -1829,7 +1833,7 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net
}
int
channel_preimport(struct rt_import_request *req, rte *new, rte *old)
channel_preimport(struct rt_import_request *req, rte *new, const rte *old)
{
struct channel *c = SKIP_BACK(struct channel, in_req, req);
@ -2360,7 +2364,7 @@ rt_refresh_begin(struct rt_import_request *req)
{
for (struct rte_storage *e = n->routes; e; e = e->next)
if (e->rte.sender == req->hook)
e->rte.stale_cycle = 0;
e->stale_cycle = 0;
}
FIB_WALK_END;
@ -2654,7 +2658,7 @@ rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struc
}
/* This net may affect some flowspecs, check the actual change */
rte *o = RTE_VALID_OR_NULL(first->old_best);
const rte *o = RTE_VALID_OR_NULL(first->old_best);
struct rte_storage *new_best = first->new_best;
RPE_WALK(first, rpe, NULL)
@ -3546,7 +3550,7 @@ rta_next_hop_outdated(ea_list *a)
}
static inline int
rt_next_hop_update_rte(rte *old, rte *new)
rt_next_hop_update_rte(const rte *old, rte *new)
{
struct hostentry_adata *head = rta_next_hop_outdated(old->attrs);
if (!head)
@ -3704,7 +3708,7 @@ rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, ea_list *
#endif /* CONFIG_BGP */
static int
rt_flowspec_update_rte(rtable *tab, rte *r, rte *new)
rt_flowspec_update_rte(rtable *tab, const rte *r, rte *new)
{
#ifdef CONFIG_BGP
if (r->generation || (rt_get_source_attr(r) != RTS_BGP))
@ -3836,14 +3840,15 @@ rt_next_hop_update_net(struct rtable_private *tab, net *n)
if (updates[i].new_stored)
{
/* Get a new ID for the route */
updates[i].new_stored->rte.lastmod = current_time();
updates[i].new_stored->rte.id = hmap_first_zero(&tab->id_map);
hmap_set(&tab->id_map, updates[i].new_stored->rte.id);
rte *new = RTES_WRITE(updates[i].new_stored);
new->lastmod = current_time();
new->id = hmap_first_zero(&tab->id_map);
hmap_set(&tab->id_map, new->id);
/* Call a pre-comparison hook */
/* Not really an efficient way to compute this */
if (updates[i].old->rte.src->owner->rte_recalculate)
updates[i].old->rte.src->owner->rte_recalculate(tab, n, &updates[i].new_stored->rte, &updates[i].old->rte, &old_best->rte);
updates[i].old->rte.src->owner->rte_recalculate(tab, n, updates[i].new_stored, updates[i].old, old_best);
}
#if DEBUGGING
@ -4611,7 +4616,7 @@ hc_notify_export_one(struct rt_export_request *req, const net_addr *net, struct
else
{
/* This net may affect some hostentries, check the actual change */
rte *o = RTE_VALID_OR_NULL(first->old_best);
const rte *o = RTE_VALID_OR_NULL(first->old_best);
struct rte_storage *new_best = first->new_best;
RPE_WALK(first, rpe, NULL)

View File

@ -229,7 +229,12 @@ typedef struct network {
struct rte_storage {
struct rte_storage *next; /* Next in chain */
struct rte rte; /* Route data */
union {
struct {
RTE_IN_TABLE_WRITABLE;
};
const struct rte rte; /* Route data */
};
};
#define RTE_COPY(r) ((r) ? (r)->rte : (rte) {})
@ -237,6 +242,8 @@ struct rte_storage {
#define RTE_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
#define RTE_VALID_OR_NULL(r) (((r) && (rte_is_valid(&(r)->rte))) ? &((r)->rte) : NULL)
#define RTES_WRITE(r) (((r) != ((struct rte_storage *) 0)) ? ((struct rte *) &(r)->rte) : NULL)
/* Table-channel connections */
struct rt_import_request {
@ -250,7 +257,7 @@ struct rt_import_request {
void (*log_state_change)(struct rt_import_request *req, u8 state);
/* Preimport is called when the @new route is just-to-be inserted, replacing @old.
* Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
int (*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
int (*preimport)(struct rt_import_request *req, struct rte *new, const struct rte *old);
};
struct rt_import_hook {
@ -449,7 +456,7 @@ void rt_exporter_init(struct rt_exporter *re);
* Channel export hooks. To be refactored out.
*/
int channel_preimport(struct rt_import_request *req, rte *new, rte *old);
int channel_preimport(struct rt_import_request *req, rte *new, const rte *old);
void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);

View File

@ -2524,7 +2524,7 @@ bgp_rte_mergable(const rte *pri, const rte *sec)
static inline int
same_group(rte *r, u32 lpref, u32 lasn)
same_group(const rte *r, u32 lpref, u32 lasn)
{
return (rt_get_preference(r) == lpref) && (bgp_get_neighbor(r) == lasn);
}
@ -2537,9 +2537,15 @@ use_deterministic_med(struct rte_storage *r)
}
int
bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old, rte *old_best)
bgp_rte_recalculate(struct rtable_private *table, net *net,
struct rte_storage *new_stored, struct rte_storage *old_stored, struct rte_storage *old_best_stored)
{
rte *key = new ? new : old;
struct rte_storage *key_stored = new_stored ? new_stored : old_stored;
const struct rte *new = &new_stored->rte,
*old = &old_stored->rte,
*old_best = &old_best_stored->rte,
*key = &key_stored->rte;
u32 lpref = rt_get_preference(key);
u32 lasn = bgp_get_neighbor(key);
int old_suppressed = old ? !!(old->pflags & BGP_REF_SUPPRESSED) : 0;
@ -2580,8 +2586,8 @@ bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old,
if (new && old && !same_group(old, lpref, lasn))
{
int i1, i2;
i1 = bgp_rte_recalculate(table, net, NULL, old, old_best);
i2 = bgp_rte_recalculate(table, net, new, NULL, old_best);
i1 = bgp_rte_recalculate(table, net, NULL, old_stored, old_best_stored);
i2 = bgp_rte_recalculate(table, net, new_stored, NULL, old_best_stored);
return i1 || i2;
}
@ -2594,11 +2600,11 @@ bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old,
*/
if (new)
new->pflags |= BGP_REF_SUPPRESSED;
new_stored->pflags |= BGP_REF_SUPPRESSED;
if (old)
{
old->pflags |= BGP_REF_SUPPRESSED;
old_stored->pflags |= BGP_REF_SUPPRESSED;
/* The fast case - replace not best with worse (or remove not best) */
if (old_suppressed && !(new && bgp_rte_better(new, old)))
@ -2606,13 +2612,13 @@ bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old,
}
/* The default case - find a new best-in-group route */
rte *r = new; /* new may not be in the list */
struct rte_storage *r = new_stored; /* new may not be in the list */
for (struct rte_storage *s = net->routes; rte_is_valid(RTE_OR_NULL(s)); s = s->next)
if (use_deterministic_med(s) && same_group(&s->rte, lpref, lasn))
{
s->rte.pflags |= BGP_REF_SUPPRESSED;
if (!r || bgp_rte_better(&s->rte, r))
r = &s->rte;
s->pflags |= BGP_REF_SUPPRESSED;
if (!r || bgp_rte_better(&s->rte, &r->rte))
r = s;
}
/* Simple case - the last route in group disappears */
@ -2620,14 +2626,14 @@ bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old,
return 0;
/* Found if new is mergable with best-in-group */
if (new && (new != r) && bgp_rte_mergable(r, new))
new->pflags &= ~BGP_REF_SUPPRESSED;
if (new && (new_stored != r) && bgp_rte_mergable(&r->rte, new))
new_stored->pflags &= ~BGP_REF_SUPPRESSED;
/* Found all existing routes mergable with best-in-group */
for (struct rte_storage *s = net->routes; rte_is_valid(RTE_OR_NULL(s)); s = s->next)
if (use_deterministic_med(s) && same_group(&s->rte, lpref, lasn))
if ((&s->rte != r) && bgp_rte_mergable(r, &s->rte))
s->rte.pflags &= ~BGP_REF_SUPPRESSED;
if ((s != r) && bgp_rte_mergable(&r->rte, &s->rte))
s->pflags &= ~BGP_REF_SUPPRESSED;
/* Found best-in-group */
r->pflags &= ~BGP_REF_SUPPRESSED;
@ -2656,7 +2662,7 @@ bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old,
* the first reason does not apply, return 0
*/
if (r == new)
if (r == new_stored)
return old_best && same_group(old_best, lpref, lasn);
else
return !old_suppressed;

View File

@ -618,7 +618,7 @@ void bgp_done_prefix(struct bgp_channel *c, struct bgp_prefix *px, struct bgp_bu
int bgp_rte_better(const rte *, const rte *);
int bgp_rte_mergable(const rte *pri, const rte *sec);
int bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old, rte *old_best);
int bgp_rte_recalculate(struct rtable_private *table, net *net, struct rte_storage *new, struct rte_storage *old, struct rte_storage *old_best);
void bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
u32 bgp_rte_igp_metric(const rte *);
void bgp_rt_notify(struct proto *P, struct channel *C, const net_addr *n, rte *new, const rte *old);