0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-11-13 22:58:42 +00:00
bird/nest/rt-table.c

4925 lines
131 KiB
C
Raw Normal View History

/*
2000-06-01 17:12:19 +00:00
* BIRD -- Routing Tables
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
2000-06-01 17:12:19 +00:00
/**
* DOC: Routing tables
*
* Routing tables are probably the most important structures BIRD uses. They
* hold all the information about known networks, the associated routes and
* their attributes.
*
2000-06-08 12:37:21 +00:00
* There are multiple routing tables (a primary one together with any
2000-06-01 17:12:19 +00:00
* number of secondary ones if requested by the configuration). Each table
* is basically a FIB containing entries describing the individual
2000-06-07 13:25:53 +00:00
* destination networks. For each network (represented by structure &net),
2000-06-08 12:37:21 +00:00
* there is a one-way linked list of route entries (&rte), the first entry
* on the list being the best one (i.e., the one we currently use
2000-06-01 17:12:19 +00:00
* for routing), the order of the other ones is undetermined.
*
* The &rte contains information about the route. There are net and src, which
* together forms a key identifying the route in a routing table. There is a
* pointer to a &rta structure (see the route attribute module for a precise
* explanation) holding the route attributes, which are primary data about the
* route. There are several technical fields used by routing table code (route
* id, REF_* flags), There is also the pflags field, holding protocol-specific
* flags. They are not used by routing table code, but by protocol-specific
* hooks. In contrast to route attributes, they are not primary data and their
* validity is also limited to the routing table.
*
* There are several mechanisms that allow automatic update of routes in one
* routing table (dst) as a result of changes in another routing table (src).
* They handle issues of recursive next hop resolving, flowspec validation and
* RPKI validation.
*
* The first such mechanism is handling of recursive next hops. A route in the
* dst table has an indirect next hop address, which is resolved through a route
* in the src table (which may also be the same table) to get an immediate next
* hop. This is implemented using structure &hostcache attached to the src
* table, which contains &hostentry structures for each tracked next hop
* address. These structures are linked from recursive routes in dst tables,
* possibly multiple routes sharing one hostentry (as many routes may have the
* same indirect next hop). There is also a trie in the hostcache, which matches
* all prefixes that may influence resolving of tracked next hops.
*
* When a best route changes in the src table, the hostcache is notified using
* an auxiliary export request, which checks using the trie whether the
* change is relevant and if it is, then it schedules asynchronous hostcache
* recomputation. The recomputation is done by rt_update_hostcache() (called
* as an event of src table), it walks through all hostentries and resolves
* them (by rt_update_hostentry()). It also updates the trie. If a change in
* hostentry resolution was found, then it schedules asynchronous nexthop
* recomputation of associated dst table. That is done by rt_next_hop_update()
* (called from rt_event() of dst table), it iterates over all routes in the dst
* table and re-examines their hostentries for changes. Note that in contrast to
* hostcache update, next hop update can be interrupted by main loop. These two
* full-table walks (over hostcache and dst table) are necessary due to absence
* of direct lookups (route -> affected nexthop, nexthop -> its route).
*
* The second mechanism is for flowspec validation, where validity of flowspec
* routes depends of resolving their network prefixes in IP routing tables. This
* is similar to the recursive next hop mechanism, but simpler as there are no
* intermediate hostcache and hostentries (because flows are less likely to
* share common net prefix than routes sharing a common next hop). Every dst
* table has its own export request in every src table. Each dst table has its
* own trie of prefixes that may influence validation of flowspec routes in it
* (flowspec_trie).
*
* When a best route changes in the src table, the notification mechanism is
* invoked by the export request which checks its dst table's trie to see
* whether the change is relevant, and if so, an asynchronous re-validation of
* flowspec routes in the dst table is scheduled. That is also done by function
* rt_next_hop_update(), like nexthop recomputation above. It iterates over all
* flowspec routes and re-validates them. It also recalculates the trie.
*
* Note that in contrast to the hostcache update, here the trie is recalculated
* during the rt_next_hop_update(), which may be interleaved with IP route
* updates. The trie is flushed at the beginning of recalculation, which means
* that such updates may use partial trie to see if they are relevant. But it
* works anyway! Either affected flowspec was already re-validated and added to
* the trie, then IP route change would match the trie and trigger a next round
* of re-validation, or it was not yet re-validated and added to the trie, but
* will be re-validated later in this round anyway.
*
* The third mechanism is used for RPKI re-validation of IP routes and it is the
* simplest. It is also an auxiliary export request belonging to the
* appropriate channel, triggering its reload/refeed timer after a settle time.
2000-06-01 17:12:19 +00:00
*/
#undef LOCAL_DEBUG
#include "nest/bird.h"
#include "nest/route.h"
#include "nest/protocol.h"
#include "nest/iface.h"
#include "nest/mpls.h"
#include "lib/resource.h"
#include "lib/event.h"
#include "lib/timer.h"
#include "lib/string.h"
#include "conf/conf.h"
#include "filter/filter.h"
#include "filter/data.h"
#include "lib/hash.h"
#include "lib/string.h"
#include "lib/alloca.h"
#include "lib/flowspec.h"
#include "lib/idm.h"
#include "lib/netindex_private.h"
2002-11-13 08:47:06 +00:00
2019-09-28 12:17:20 +00:00
#ifdef CONFIG_BGP
#include "proto/bgp/bgp.h"
#endif
#include <stdatomic.h>
pool *rt_table_pool;
list routing_tables;
list deleted_routing_tables;
netindex_hash *rt_global_netindex_hash;
struct rt_cork rt_cork;
struct rt_export_block {
struct lfjour_block lb;
struct rt_pending_export export[];
};
#define RT_INITIAL_ROUTES_BLOCK_SIZE 128
/* Data structures for export journal */
2022-09-07 11:54:20 +00:00
static void rt_free_hostcache(struct rtable_private *tab);
static void rt_update_hostcache(void *tab);
static void rt_next_hop_update(void *_tab);
static void rt_nhu_uncork(void *_tab);
static inline void rt_next_hop_resolve_rte(rte *r);
static inline void rt_flowspec_resolve_rte(rte *r, struct channel *c);
static void rt_refresh_trace(struct rtable_private *tab, struct rt_import_hook *ih, const char *msg);
2022-09-07 11:54:20 +00:00
static void rt_kick_prune_timer(struct rtable_private *tab);
static void rt_prune_table(void *_tab);
static void rt_feed_by_fib(void *);
static void rt_feed_equal(void *);
static void rt_feed_for(void *);
2022-09-07 11:54:20 +00:00
static void rt_check_cork_low(struct rtable_private *tab);
static void rt_check_cork_high(struct rtable_private *tab);
static void rt_cork_release_hook(void *);
static void rt_shutdown(void *);
2022-09-07 11:54:20 +00:00
static void rt_delete(void *);
int rte_same(const rte *x, const rte *y);
const char *rt_import_state_name_array[TIS_MAX] = {
[TIS_DOWN] = "DOWN",
[TIS_UP] = "UP",
[TIS_STOP] = "STOP",
[TIS_FLUSHING] = "FLUSHING",
[TIS_WAITING] = "WAITING",
[TIS_CLEARED] = "CLEARED",
};
const char *rt_export_state_name_array[TES_MAX] = {
[TES_DOWN] = "DOWN",
[TES_HUNGRY] = "HUNGRY",
[TES_FEEDING] = "FEEDING",
[TES_READY] = "READY",
[TES_STOP] = "STOP"
};
const char *rt_import_state_name(u8 state)
{
if (state >= TIS_MAX)
return "!! INVALID !!";
else
return rt_import_state_name_array[state];
}
const char *rt_export_state_name(u8 state)
{
if (state >= TES_MAX)
return "!! INVALID !!";
else
return rt_export_state_name_array[state];
}
2022-09-07 11:54:20 +00:00
static struct hostentry *rt_get_hostentry(struct rtable_private *tab, ip_addr a, ip_addr ll, rtable *dep);
static inline rtable *rt_priv_to_pub(struct rtable_private *tab) { return RT_PUB(tab); }
static inline rtable *rt_pub_to_pub(rtable *tab) { return tab; }
#define RT_ANY_TO_PUB(tab) _Generic((tab),rtable*:rt_pub_to_pub,struct rtable_private*:rt_priv_to_pub)((tab))
#define rt_trace(tab, level, fmt, args...) do {\
rtable *t = RT_ANY_TO_PUB((tab)); \
if (t->config->debug & (level)) \
log(L_TRACE "%s: " fmt, t->name, ##args); \
} while (0)
#define req_trace(r, level, fmt, args...) do { \
if (r->trace_routes & (level)) \
log(L_TRACE "%s: " fmt, r->name, ##args); \
} while (0)
#define channel_trace(c, level, fmt, args...) do {\
if ((c->debug & (level)) || (c->proto->debug & (level))) \
log(L_TRACE "%s.%s: " fmt, c->proto->name, c->name, ##args);\
} while (0)
/*
* Lockless table feeding helpers
*/
struct rtable_reading {
rtable *t;
struct lock_order locking_stack;
u64 retry;
jmp_buf buf;
};
static inline void _rt_rcu_unlock_(struct rtable_reading *o)
{
if (o->t)
rcu_read_unlock();
}
#define RT_READ(_o, _i) \
CLEANUP(_rt_rcu_unlock_) struct rtable_reading _s##_i = { .t = _o }; \
struct rtable_reading *_i = &_s##_i; \
if (setjmp(_i->buf)) { \
rcu_read_unlock(); \
locking_unwind(&_i->locking_stack); \
birdloop_yield(); \
if (!(++_i->retry % NET_GET_BLOCK_WARN)) \
log(L_WARN "Suspiciously many RT_READs retried (%lu) in table %s" \
" at %s:%d", _i->retry, _i->t->name, __FILE__, __LINE__); \
} \
_i->locking_stack = locking_stack; \
rcu_read_lock(); \
#define RT_READ_RETRY(tr) do { if (RT_IS_LOCKED(tr->t)) bug("No obsolete route allowed here"); else longjmp(tr->buf, 1); } while (0)
#define RT_READ_LOCKED(_o, _i) \
struct rtable_reading _s##_i = { .t = RT_PUB(_o) }, \
*_i = ({ ASSERT_DIE(RT_IS_LOCKED(_o)); &_s##_i; }); \
#define RTE_IS_OBSOLETE(s) ((s)->rte.flags & REF_OBSOLETE)
#define RTE_OBSOLETE_CHECK(tr, _s) ({ \
struct rte_storage *s = _s; \
if (s && RTE_IS_OBSOLETE(s)) \
RT_READ_RETRY(tr); \
s; })
#define NET_GET_BLOCK_WARN 16384
#define NET_READ_WALK_ROUTES(tr, n, ptr, r) \
for (struct rte_storage *r, * _Atomic *ptr = &(n)->routes; \
r = RTE_OBSOLETE_CHECK(tr, atomic_load_explicit(ptr, memory_order_acquire)); \
ptr = &r->next)
#define NET_READ_BEST_ROUTE(tr, n) RTE_OBSOLETE_CHECK(tr, atomic_load_explicit(&n->routes, memory_order_acquire))
#define NET_WALK_ROUTES(priv, n, ptr, r) \
for (struct rte_storage *r = ({ ASSERT_DIE(RT_IS_LOCKED(priv)); NULL; }), \
* _Atomic *ptr = &(n)->routes; \
r = atomic_load_explicit(ptr, memory_order_acquire); \
ptr = &r->next)
#define NET_BEST_ROUTE(priv, n) ({ ASSERT_DIE(RT_IS_LOCKED(priv)); atomic_load_explicit(&n->routes, memory_order_acquire); })
static inline net *
net_find(struct rtable_reading *tr, const struct netindex *i)
{
u32 rbs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
if (i->index >= rbs)
return NULL;
net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
return &(routes[i->index]);
}
static inline net *
net_find_valid(struct rtable_reading *tr, struct netindex_hash_private *nh, const net_addr *addr)
{
struct netindex *i = net_find_index_fragile(nh, addr);
if (!i)
return NULL;
net *n = net_find(tr, i);
if (!n)
return NULL;
struct rte_storage *s = NET_READ_BEST_ROUTE(tr, n);
if (!s || !rte_is_valid(&s->rte))
return NULL;
return n;
}
2015-12-24 14:52:03 +00:00
static inline void *
net_route_ip6_sadr_trie(struct rtable_reading *tr, struct netindex_hash_private *nh, const net_addr_ip6_sadr *n0)
{
u32 bs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
const struct f_trie *trie = atomic_load_explicit(&tr->t->trie, memory_order_acquire);
TRIE_WALK_TO_ROOT_IP6(trie, (const net_addr_ip6 *) n0, px)
{
net_addr_ip6_sadr n = NET_ADDR_IP6_SADR(px.prefix, px.pxlen, n0->src_prefix, n0->src_pxlen);
net *best = NULL;
int best_pxlen = 0;
/* We need to do dst first matching. Since sadr addresses are hashed on dst
prefix only, find the hash table chain and go through it to find the
match with the longest matching src prefix. */
for (struct netindex *i = net_find_index_fragile_chain(nh, (net_addr *) &n); i; i = i->next)
{
net_addr_ip6_sadr *a = (void *) i->addr;
if ((i->index < bs) &&
net_equal_dst_ip6_sadr(&n, a) &&
net_in_net_src_ip6_sadr(&n, a) &&
(a->src_pxlen >= best_pxlen))
{
net *cur = &(atomic_load_explicit(&tr->t->routes, memory_order_acquire)[i->index]);
struct rte_storage *s = NET_READ_BEST_ROUTE(tr, cur);
if (s && rte_is_valid(&s->rte))
{
best = cur;
best_pxlen = a->src_pxlen;
}
}
}
if (best)
return best;
}
TRIE_WALK_TO_ROOT_END;
return NULL;
}
2015-12-24 14:52:03 +00:00
static inline void *
net_route_ip6_sadr_fib(struct rtable_reading *tr, struct netindex_hash_private *nh, const net_addr_ip6_sadr *n0)
{
u32 bs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
net_addr_ip6_sadr n;
net_copy_ip6_sadr(&n, n0);
while (1)
{
net *best = NULL;
int best_pxlen = 0;
/* We need to do dst first matching. Since sadr addresses are hashed on dst
prefix only, find the hash table chain and go through it to find the
match with the longest matching src prefix. */
for (struct netindex *i = net_find_index_fragile_chain(nh, (net_addr *) &n); i; i = i->next)
{
net_addr_ip6_sadr *a = (void *) i->addr;
if ((i->index < bs) &&
net_equal_dst_ip6_sadr(&n, a) &&
net_in_net_src_ip6_sadr(&n, a) &&
(a->src_pxlen >= best_pxlen))
{
net *cur = &(atomic_load_explicit(&tr->t->routes, memory_order_acquire)[i->index]);
struct rte_storage *s = NET_READ_BEST_ROUTE(tr, cur);
if (RTE_IS_OBSOLETE(s))
RT_READ_RETRY(tr);
if (s && rte_is_valid(&s->rte))
{
best = cur;
best_pxlen = a->src_pxlen;
}
}
}
if (best)
return best;
if (!n.dst_pxlen)
break;
n.dst_pxlen--;
ip6_clrbit(&n.dst_prefix, n.dst_pxlen);
}
return NULL;
}
static net *
net_route(struct rtable_reading *tr, const net_addr *n)
{
ASSERT(tr->t->addr_type == n->type);
SKIP_BACK_DECLARE(net_addr_union, nu, n, n);
const struct f_trie *trie = atomic_load_explicit(&tr->t->trie, memory_order_acquire);
NH_LOCK(tr->t->netindex, nh);
2023-12-07 13:10:11 +00:00
#define TW(ipv, what) \
TRIE_WALK_TO_ROOT_IP##ipv(trie, &(nu->ip##ipv), var) \
2023-12-07 13:10:11 +00:00
{ what(ipv, var); } \
TRIE_WALK_TO_ROOT_END; return NULL;
2023-12-07 13:10:11 +00:00
#define FW(ipv, what) do { \
net_addr_union nuc; net_copy(&nuc.n, n); \
while (1) { \
what(ipv, nuc.ip##ipv); if (!nuc.n.pxlen) return NULL; \
nuc.n.pxlen--; ip##ipv##_clrbit(&nuc.ip##ipv.prefix, nuc.ip##ipv.pxlen); \
} \
} while(0); return NULL;
2016-05-12 14:04:47 +00:00
2023-12-07 13:10:11 +00:00
#define FVR_IP(ipv, var) \
net *r; if (r = net_find_valid(tr, nh, (net_addr *) &var)) return r;
2023-12-07 13:10:11 +00:00
#define FVR_VPN(ipv, var) \
net_addr_vpn##ipv _var0 = NET_ADDR_VPN##ipv(var.prefix, var.pxlen, nu->vpn##ipv.rd); FVR_IP(ipv, _var0);
2016-05-12 14:04:47 +00:00
if (trie)
2023-12-07 13:10:11 +00:00
switch (n->type) {
case NET_IP4: TW(4, FVR_IP);
case NET_VPN4: TW(4, FVR_VPN);
case NET_IP6: TW(6, FVR_IP);
case NET_VPN6: TW(6, FVR_VPN);
case NET_IP6_SADR:
return net_route_ip6_sadr_trie(tr, nh, (net_addr_ip6_sadr *) n);
2023-12-07 13:10:11 +00:00
default:
return NULL;
}
else
switch (n->type) {
case NET_IP4: FW(4, FVR_IP);
case NET_VPN4: FW(4, FVR_VPN);
case NET_IP6: FW(6, FVR_IP);
case NET_VPN6: FW(6, FVR_VPN);
case NET_IP6_SADR:
return net_route_ip6_sadr_fib (tr, nh, (net_addr_ip6_sadr *) n);
2023-12-07 13:10:11 +00:00
default:
return NULL;
}
#undef TW
#undef FW
#undef FVR_IP
#undef FVR_VPN
}
2016-05-12 14:04:47 +00:00
/**
* roa_check - check validity of route origination in a ROA table
* @tab: ROA table
* @n: network prefix to check
* @asn: AS number of network prefix
*
* Implements RFC 6483 route validation for the given network prefix. The
* procedure is to find all candidate ROAs - ROAs whose prefixes cover the given
* network prefix. If there is no candidate ROA, return ROA_UNKNOWN. If there is
* a candidate ROA with matching ASN and maxlen field greater than or equal to
* the given prefix length, return ROA_VALID. Otherwise, return ROA_INVALID. If
* caller cannot determine origin AS, 0 could be used (in that case ROA_VALID
* cannot happen). Table @tab must have type NET_ROA4 or NET_ROA6, network @n
* must have type NET_IP4 or NET_IP6, respectively.
*/
int
2022-09-07 11:54:20 +00:00
net_roa_check(rtable *tp, const net_addr *n, u32 asn)
{
SKIP_BACK_DECLARE(net_addr_union, nu, n, n);
int anything = 0;
#define TW(ipv) do { \
TRIE_WALK_TO_ROOT_IP##ipv(trie, &(nu->ip##ipv), var) { \
net_addr_roa##ipv roa0 = NET_ADDR_ROA##ipv(var.prefix, var.pxlen, 0, 0); \
ROA_PARTIAL_CHECK(ipv); \
} TRIE_WALK_TO_ROOT_END; \
return anything ? ROA_INVALID : ROA_UNKNOWN; \
} while (0)
#define FW(ipv) do { \
net_addr_roa##ipv roa0 = NET_ADDR_ROA##ipv(nu->ip##ipv.prefix, nu->ip##ipv.pxlen, 0, 0);\
while (1) { \
ROA_PARTIAL_CHECK(ipv); \
if (roa0.pxlen == 0) break; \
roa0.pxlen--; ip##ipv##_clrbit(&roa0.prefix, roa0.pxlen); \
} \
} while (0)
#define ROA_PARTIAL_CHECK(ipv) do { \
for (struct netindex *i = net_find_index_fragile_chain(nh, (net_addr *) &roa0); i; i = i->next)\
{ \
if (bs < i->index) continue; \
net_addr_roa##ipv *roa = (void *) i->addr; \
if (!net_equal_prefix_roa##ipv(roa, &roa0)) continue; \
net *r = &(atomic_load_explicit(&tr->t->routes, memory_order_acquire)[i->index]); \
struct rte_storage *s = NET_READ_BEST_ROUTE(tr, r); \
if (s && rte_is_valid(&s->rte)) \
{ \
anything = 1; \
if (asn && (roa->asn == asn) && (roa->max_pxlen >= nu->ip##ipv.pxlen)) \
return ROA_VALID; \
} \
} \
} while (0)
2022-09-07 11:54:20 +00:00
RT_READ(tp, tr);
{
u32 bs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
const struct f_trie *trie = atomic_load_explicit(&tr->t->trie, memory_order_acquire);
NH_LOCK(tr->t->netindex, nh);
if ((tr->t->addr_type == NET_ROA4) && (n->type == NET_IP4))
2022-09-07 11:54:20 +00:00
{
if (trie) TW(4);
else FW(4);
2022-09-07 11:54:20 +00:00
}
else if ((tr->t->addr_type == NET_ROA6) && (n->type == NET_IP6))
2022-09-07 11:54:20 +00:00
{
if (trie) TW(6);
else FW(6);
2022-09-07 11:54:20 +00:00
}
}
return anything ? ROA_INVALID : ROA_UNKNOWN;
#undef ROA_PARTIAL_CHECK
#undef TW
#undef FW
}
struct rte_storage *
rte_store(const rte *r, struct netindex *i, struct rtable_private *tab)
{
2023-07-03 18:38:24 +00:00
struct rte_storage *s = sl_alloc(tab->rte_slab);
struct rte *e = RTES_WRITE(s);
2023-07-03 18:38:24 +00:00
*e = *r;
e->net = i->addr;
net_lock_index(tab->netindex, i);
2023-07-03 18:38:24 +00:00
rt_lock_source(e->src);
e->attrs = ea_lookup(e->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED), EALS_IN_TABLE);
#if 0
debug("(store) %N ", i->addr);
ea_dump(e->attrs);
debug("\n");
#endif
2023-07-03 18:38:24 +00:00
return s;
}
/**
* rte_free - delete a &rte
* @e: &struct rte_storage to be deleted
* @tab: the table which the rte belongs to
*
* rte_free() deletes the given &rte from the routing table it's linked to.
*/
static void
rte_free(struct rte_storage *e, struct rtable_private *tab)
{
struct netindex *i = RTE_GET_NETINDEX(&e->rte);
net_unlock_index(tab->netindex, i);
rt_unlock_source(e->rte.src);
ea_free(e->rte.attrs);
sl_free(e);
}
static int /* Actually better or at least as good as */
rte_better(const rte *new, const rte *old)
{
int (*better)(const rte *, const rte *);
if (!rte_is_valid(old))
return 1;
if (!rte_is_valid(new))
return 0;
2022-04-20 10:24:26 +00:00
u32 np = rt_get_preference(new);
u32 op = rt_get_preference(old);
if (np > op)
return 1;
2022-04-20 10:24:26 +00:00
if (np < op)
return 0;
if (new->src->owner->class != old->src->owner->class)
{
/*
* If the user has configured protocol preferences, so that two different protocols
* have the same preference, try to break the tie by comparing addresses. Not too
* useful, but keeps the ordering of routes unambiguous.
*/
return new->src->owner->class > old->src->owner->class;
}
if (better = new->src->owner->class->rte_better)
return better(new, old);
return 0;
}
static int
rte_mergable(const rte *pri, const rte *sec)
{
int (*mergable)(const rte *, const rte *);
if (!rte_is_valid(pri) || !rte_is_valid(sec))
return 0;
2022-04-20 10:24:26 +00:00
if (rt_get_preference(pri) != rt_get_preference(sec))
return 0;
if (pri->src->owner->class != sec->src->owner->class)
return 0;
if (mergable = pri->src->owner->class->rte_mergable)
return mergable(pri, sec);
return 0;
}
static void
rte_trace(const char *name, const rte *e, int dir, const char *msg)
{
log(L_TRACE "%s %c %s %N (%u) src %luL %uG %uS id %u %s",
name, dir, msg, e->net, NET_TO_INDEX(e->net)->index,
e->src->private_id, e->src->global_id, e->stale_cycle, e->id,
rta_dest_name(rte_dest(e)));
}
static inline void
channel_rte_trace_in(uint flag, struct channel *c, const rte *e, const char *msg)
{
if ((c->debug & flag) || (c->proto->debug & flag))
log(L_TRACE "%s > %s %N (-) src %luL %uG %uS id %u %s",
c->in_req.name, msg, e->net,
e->src->private_id, e->src->global_id, e->stale_cycle, e->id,
rta_dest_name(rte_dest(e)));
}
static inline void
channel_rte_trace_out(uint flag, struct channel *c, const rte *e, const char *msg)
{
if ((c->debug & flag) || (c->proto->debug & flag))
rte_trace(c->out_req.name, e, '<', msg);
}
static inline void
rt_rte_trace_in(uint flag, struct rt_import_request *req, const rte *e, const char *msg)
{
if (req->trace_routes & flag)
rte_trace(req->name, e, '>', msg);
}
#if 0
// seems to be unused at all
static inline void
rt_rte_trace_out(uint flag, struct rt_export_request *req, const rte *e, const char *msg)
{
if (req->trace_routes & flag)
rte_trace(req->name, e, '<', msg);
}
#endif
static uint
rte_feed_count(struct rtable_reading *tr, net *n)
{
uint count = 0;
NET_READ_WALK_ROUTES(tr, n, ep, e)
count++;
return count;
}
static void
rte_feed_obtain(struct rtable_reading *tr, net *n, const rte **feed, uint count)
{
uint i = 0;
NET_READ_WALK_ROUTES(tr, n, ep, e)
{
if (i >= count)
RT_READ_RETRY(tr);
feed[i++] = &e->rte;
}
if (i != count)
RT_READ_RETRY(tr);
}
2024-04-23 16:50:22 +00:00
static void
rte_feed_obtain_copy(struct rtable_reading *tr, net *n, rte *feed, uint count)
{
uint i = 0;
NET_READ_WALK_ROUTES(tr, n, ep, e)
{
if (i >= count)
RT_READ_RETRY(tr);
feed[i++] = e->rte;
ea_free_later(ea_ref(e->rte.attrs));
}
if (i != count)
RT_READ_RETRY(tr);
}
static rte *
export_filter(struct channel *c, rte *rt, int silent)
{
struct proto *p = c->proto;
const struct filter *filter = c->out_filter;
struct channel_export_stats *stats = &c->export_stats;
/* Do nothing if we have already rejected the route */
if (silent && bmap_test(&c->export_reject_map, rt->id))
goto reject_noset;
int v = p->preexport ? p->preexport(c, rt) : 0;
if (v < 0)
{
if (silent)
goto reject_noset;
stats->updates_rejected++;
if (v == RIC_REJECT)
channel_rte_trace_out(D_FILTERS, c, rt, "rejected by protocol");
2021-11-23 11:12:32 +00:00
goto reject;
}
if (v > 0)
{
if (!silent)
channel_rte_trace_out(D_FILTERS, c, rt, "forced accept by protocol");
goto accept;
}
v = filter && ((filter == FILTER_REJECT) ||
(f_run(filter, rt,
(silent ? FF_SILENT : 0)) > F_ACCEPT));
if (v)
{
if (silent)
goto reject;
stats->updates_filtered++;
channel_rte_trace_out(D_FILTERS, c, rt, "filtered out");
goto reject;
}
accept:
/* We have accepted the route */
bmap_clear(&c->export_reject_map, rt->id);
return rt;
reject:
/* We have rejected the route by filter */
bmap_set(&c->export_reject_map, rt->id);
reject_noset:
/* Discard temporary rte */
return NULL;
}
static void
do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old)
{
struct proto *p = c->proto;
struct channel_export_stats *stats = &c->export_stats;
2021-11-06 19:34:16 +00:00
if (!old && new)
if (CHANNEL_LIMIT_PUSH(c, OUT))
{
stats->updates_rejected++;
channel_rte_trace_out(D_FILTERS, c, new, "rejected [limit]");
return;
}
2021-11-06 19:34:16 +00:00
if (!new && old)
CHANNEL_LIMIT_POP(c, OUT);
if (new)
stats->updates_accepted++;
else
stats->withdraws_accepted++;
if (old)
bmap_clear(&c->export_map, old->id);
if (new)
bmap_set(&c->export_map, new->id);
2024-04-07 09:43:52 +00:00
if (new && old)
channel_rte_trace_out(D_ROUTES, c, new, "replaced");
else if (new)
channel_rte_trace_out(D_ROUTES, c, new, "added");
else if (old)
channel_rte_trace_out(D_ROUTES, c, old, "removed");
p->rt_notify(p, c, net, new, old);
}
static void
rt_notify_basic(struct channel *c, const net_addr *net, rte *new, const rte *old, int force)
{
if (new && old && rte_same(new, old) && !force)
{
channel_rte_trace_out(D_ROUTES, c, new, "already exported");
if ((new->id != old->id) && bmap_test(&c->export_map, old->id))
{
bmap_set(&c->export_map, new->id);
bmap_clear(&c->export_map, old->id);
}
return;
}
/* Refeeding and old is new */
if (force && !old && bmap_test(&c->export_map, new->id))
old = new;
if (new)
new = export_filter(c, new, 0);
if (old && !bmap_test(&c->export_map, old->id))
old = NULL;
if (!new && !old)
return;
do_rt_notify(c, net, new, old);
}
void
channel_rpe_mark_seen(struct channel *c, struct rt_pending_export *rpe)
{
channel_trace(c, D_ROUTES, "Marking seen %p (%lu)", rpe, rpe->seq);
ASSERT_DIE(c->out_req.hook);
rpe_mark_seen(c->out_req.hook, rpe);
if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) == TES_FEEDING))
rpe_mark_seen(c->refeed_req.hook, rpe);
if (rpe->old)
bmap_clear(&c->export_reject_map, rpe->old->id);
}
void
rt_notify_accepted(struct rt_export_request *req, const net_addr *n,
struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
struct channel *c = channel_from_export_request(req);
int refeeding = channel_net_is_refeeding(c, n);
rte nb0, *new_best = NULL;
const rte *old_best = NULL;
for (uint i = 0; i < count; i++)
{
if (!rte_is_valid(feed[i]))
continue;
/* Has been already rejected, won't bother with it */
if (!refeeding && bmap_test(&c->export_reject_map, feed[i]->id))
continue;
/* Previously exported */
if (!old_best && bmap_test(&c->export_map, feed[i]->id))
{
if (new_best)
{
/* is superseded */
old_best = feed[i];
break;
}
else if (refeeding)
/* is superseeded but maybe by a new version of itself */
old_best = feed[i];
else
{
/* is still best */
DBG("rt_notify_accepted: idempotent\n");
goto done;
}
}
/* Have no new best route yet */
if (!new_best)
{
/* Try this route not seen before */
nb0 = *feed[i];
new_best = export_filter(c, &nb0, 0);
DBG("rt_notify_accepted: checking route id %u: %s\n", feed[i]->id, new_best ? "ok" : "no");
}
}
done:
/* Check obsolete routes for previously exported */
2022-08-31 09:58:27 +00:00
RPE_WALK(first, rpe, NULL)
{
channel_rpe_mark_seen(c, rpe);
if (rpe->old)
{
if (bmap_test(&c->export_map, rpe->old->id))
{
ASSERT_DIE(old_best == NULL);
old_best = rpe->old;
}
}
if (rpe == last)
break;
}
/* Nothing to export */
2021-09-29 15:59:50 +00:00
if (new_best || old_best)
do_rt_notify(c, n, new_best, old_best);
else
DBG("rt_notify_accepted: nothing to export\n");
if (refeeding)
channel_net_mark_refed(c, n);
}
rte *
rt_export_merged(struct channel *c, const net_addr *n, const rte **feed, uint count, linpool *pool, int silent)
{
_Thread_local static rte rloc;
int refeeding = !silent && channel_net_is_refeeding(c, n);
if (refeeding)
channel_net_mark_refed(c, n);
// struct proto *p = c->proto;
struct nexthop_adata *nhs = NULL;
const rte *best0 = feed[0];
rte *best = NULL;
if (!rte_is_valid(best0))
return NULL;
/* Already rejected, no need to re-run the filter */
if (!refeeding && bmap_test(&c->export_reject_map, best0->id))
return NULL;
rloc = *best0;
best = export_filter(c, &rloc, silent);
if (!best)
/* Best route doesn't pass the filter */
return NULL;
if (!rte_is_reachable(best))
/* Unreachable routes can't be merged */
return best;
for (uint i = 1; i < count; i++)
{
if (!rte_mergable(best0, feed[i]))
continue;
rte tmp0 = *feed[i];
rte *tmp = export_filter(c, &tmp0, !refeeding);
if (!tmp || !rte_is_reachable(tmp))
continue;
eattr *nhea = ea_find(tmp->attrs, &ea_gen_nexthop);
ASSERT_DIE(nhea);
if (nhs)
nhs = nexthop_merge(nhs, (struct nexthop_adata *) nhea->u.ptr, c->merge_limit, pool);
else
nhs = (struct nexthop_adata *) nhea->u.ptr;
}
if (nhs)
{
eattr *nhea = ea_find(best->attrs, &ea_gen_nexthop);
ASSERT_DIE(nhea);
nhs = nexthop_merge(nhs, (struct nexthop_adata *) nhea->u.ptr, c->merge_limit, pool);
ea_set_attr(&best->attrs,
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nhs->ad));
}
return best;
}
void
rt_notify_merged(struct rt_export_request *req, const net_addr *n,
struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
struct channel *c = channel_from_export_request(req);
// struct proto *p = c->proto;
#if 0 /* TODO: Find whether this check is possible when processing multiple changes at once. */
/* Check whether the change is relevant to the merged route */
if ((new_best == old_best) &&
(new_changed != old_changed) &&
!rte_mergable(new_best, new_changed) &&
!rte_mergable(old_best, old_changed))
return;
#endif
const rte *old_best = NULL;
/* Find old best route */
for (uint i = 0; i < count; i++)
if (bmap_test(&c->export_map, feed[i]->id))
{
old_best = feed[i];
break;
}
/* Check obsolete routes for previously exported */
2022-08-31 09:58:27 +00:00
RPE_WALK(first, rpe, NULL)
{
channel_rpe_mark_seen(c, rpe);
if (rpe->old)
{
if (bmap_test(&c->export_map, rpe->old->id))
{
ASSERT_DIE(old_best == NULL);
old_best = rpe->old;
}
}
if (rpe == last)
break;
}
/* Prepare new merged route */
rte *new_merged = count ? rt_export_merged(c, n, feed, count, tmp_linpool, 0) : NULL;
if (new_merged || old_best)
do_rt_notify(c, n, new_merged, old_best);
}
void
2022-08-31 09:58:27 +00:00
rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
struct channel *c = channel_from_export_request(req);
2023-07-03 18:38:24 +00:00
const rte *o = RTE_VALID_OR_NULL(first->old_best);
const rte *new_best = first->new_best;
int refeeding = channel_net_is_refeeding(c, net);
2022-08-31 09:58:27 +00:00
RPE_WALK(first, rpe, NULL)
{
channel_rpe_mark_seen(c, rpe);
new_best = rpe->new_best;
}
rte n0 = RTE_COPY_VALID(new_best);
if (n0.src || o)
rt_notify_basic(c, net, n0.src ? &n0 : NULL, o, refeeding);
if (refeeding)
channel_net_mark_refed(c, net);
}
void
2022-08-31 09:58:27 +00:00
rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
struct channel *c = channel_from_export_request(req);
2023-07-03 18:38:24 +00:00
const rte *n = RTE_VALID_OR_NULL(first->new);
const rte *o = RTE_VALID_OR_NULL(first->old);
channel_trace(c, D_ROUTES,
"Notifying any, net %N, first %p (%lu), new %p, old %p",
net, first, first->seq, n, o);
if (!n && !o || channel_net_is_refeeding(c, net))
{
/* We want to skip this notification because:
* - there is nothing to notify, or
* - this net is going to get a full refeed soon
*/
channel_rpe_mark_seen(c, first);
return;
}
struct rte_src *src = n ? n->src : o->src;
const rte *new_latest = first->new;
2022-08-31 09:58:27 +00:00
RPE_WALK(first, rpe, src)
{
channel_rpe_mark_seen(c, rpe);
new_latest = rpe->new;
}
rte n0 = RTE_COPY_VALID(new_latest);
if (n0.src || o)
rt_notify_basic(c, net, n0.src ? &n0 : NULL, o, 0);
channel_trace(c, D_ROUTES, "Notified net %N", net);
}
void
rt_feed_any(struct rt_export_request *req, const net_addr *net,
struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
struct channel *c = channel_from_export_request(req);
int refeeding = channel_net_is_refeeding(c, net);
channel_trace(c, D_ROUTES, "Feeding any, net %N, first %p (%lu), %p (%lu), count %u",
net, first, first ? first->seq : 0, last ? last->seq : 0, count);
for (uint i=0; i<count; i++)
if (rte_is_valid(feed[i]))
{
rte n0 = *feed[i];
rt_notify_basic(c, net, &n0, NULL, refeeding);
}
RPE_WALK(first, rpe, NULL)
{
channel_rpe_mark_seen(c, rpe);
if (rpe == last)
break;
}
channel_trace(c, D_ROUTES, "Fed %N", net);
if (refeeding)
channel_net_mark_refed(c, net);
}
void
rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe)
{
bmap_set(&hook->seq_map, rpe->seq);
}
struct rt_pending_export *
rpe_next(struct rt_pending_export *rpe, struct rte_src *src)
{
struct rt_pending_export *next = atomic_load_explicit(&rpe->next, memory_order_acquire);
if (!next)
return NULL;
if (!src)
return next;
while (rpe = next)
if (src == (rpe->new ? rpe->new->src : rpe->old->src))
return rpe;
else
next = atomic_load_explicit(&rpe->next, memory_order_acquire);
return NULL;
}
static void
rte_export(struct rt_export_hook *hook, struct rt_pending_export *rpe)
{
/* Seen already? */
if (bmap_test(&hook->seq_map, rpe->seq))
return;
const net_addr *n = rpe->new_best ? rpe->new_best->net : rpe->old_best->net;
/* Check export eligibility of this net */
if (!rt_prefilter_net(&hook->req->prefilter, n))
return;
if (hook->req->prefilter.mode == TE_ADDR_FOR)
bug("Continuos export of best prefix match not implemented yet.");
if (rpe->new)
hook->stats.updates_received++;
else
hook->stats.withdraws_received++;
if (rpe->old)
ASSERT_DIE(rpe->old->flags & REF_OBSOLETE);
if (hook->req->export_one)
hook->req->export_one(hook->req, n, rpe);
else if (hook->req->export_bulk)
{
uint count = 0;
const rte **feed = NULL;
const SKIP_BACK_DECLARE(struct netindex, i, addr, (net_addr (*)[0]) n);
ASSERT_DIE(i->index < atomic_load_explicit(&hook->tab->routes_block_size, memory_order_relaxed));
struct rt_pending_export *last;
{
RT_READ(hook->tab, tr);
/* Get the route block. */
net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
net *net = &routes[i->index];
/* Get the feed itself. It may change under our hands tho. */
last = atomic_load_explicit(&net->last, memory_order_acquire);
count = rte_feed_count(tr, net);
if (count)
{
feed = alloca(count * sizeof(rte *));
rte_feed_obtain(tr, net, feed, count);
}
/* Check that it indeed didn't change and the last export is still the same. */
if (last != atomic_load_explicit(&net->last, memory_order_acquire))
RT_READ_RETRY(tr);
}
hook->req->export_bulk(hook->req, n, rpe, last, feed, count);
}
else
bug("Export request must always provide an export method");
}
/**
* rte_announce - announce a routing table change
* @tab: table the route has been added to
* @net: network in question
* @new: the new or changed route
* @old: the previous route replaced by the new one
* @new_best: the new best route for the same network
* @old_best: the previous best route for the same network
*
* This function gets a routing table update and announces it to all protocols
* that are connected to the same table by their channels.
*
* There are two ways of how routing table changes are announced. First, there
* is a change of just one route in @net (which may caused a change of the best
* route of the network). In this case @new and @old describes the changed route
* and @new_best and @old_best describes best routes. Other routes are not
* affected, but in sorted table the order of other routes might change.
*
* The function announces the change to all associated channels. For each
* channel, an appropriate preprocessing is done according to channel &ra_mode.
* For example, %RA_OPTIMAL channels receive just changes of best routes.
*
* In general, we first call preexport() hook of a protocol, which performs
* basic checks on the route (each protocol has a right to veto or force accept
* of the route before any filter is asked). Then we consult an export filter
* of the channel and verify the old route in an export map of the channel.
* Finally, the rt_notify() hook of the protocol gets called.
*
* Note that there are also calls of rt_notify() hooks due to feed, but that is
* done outside of scope of rte_announce().
*/
static void
rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, const rte *new, const rte *old,
const rte *new_best, const rte *old_best)
{
/* Update network count */
tab->net_count += (!!new_best - !!old_best);
int new_best_valid = rte_is_valid(new_best);
int old_best_valid = rte_is_valid(old_best);
if ((new == old) && (new_best == old_best))
return;
if (new_best_valid)
new_best->sender->stats.pref++;
if (old_best_valid)
old_best->sender->stats.pref--;
SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, lfjour_push_prepare(&tab->journal));
if (!rpe)
{
rt_trace(tab, D_ROUTES, "Not announcing %N, "
"new=%p id %u from %s, "
"old=%p id %u from %s, "
"new_best=%p id %u, "
"old_best=%p id %u (no exporter present)",
i->addr,
new, new ? new->id : 0, new ? new->sender->req->name : NULL,
old, old ? old->id : 0, old ? old->sender->req->name : NULL,
new_best, new_best ? new_best->id : 0,
old_best, old_best ? old_best->id : 0);
/* Not announcing, can free old route immediately */
if (old)
{
hmap_clear(&tab->id_map, old->id);
rte_free(SKIP_BACK(struct rte_storage, rte, old), tab);
}
return;
}
rt_trace(tab, D_ROUTES, "Announcing %N, "
"new=%p id %u from %s, "
"old=%p id %u from %s, "
"new_best=%p id %u, "
"old_best=%p id %u seq=%lu",
i->addr,
new, new ? new->id : 0, new ? new->sender->req->name : NULL,
old, old ? old->id : 0, old ? old->sender->req->name : NULL,
new_best, new_best ? new_best->id : 0,
old_best, old_best ? old_best->id : 0,
rpe->li.seq);
*rpe = (struct rt_pending_export) {
.li = rpe->li, /* Keep the item's internal state */
.new = new,
.new_best = new_best,
.old = old,
.old_best = old_best,
};
lfjour_push_commit(&tab->journal);
/* Append to the same-network squasher list */
struct rt_pending_export *last = atomic_load_explicit(&net->last, memory_order_relaxed);
if (last)
{
struct rt_pending_export *rpenull = NULL;
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&last->next, &rpenull, rpe,
memory_order_release,
memory_order_relaxed));
}
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&net->last, &last, rpe,
memory_order_release,
memory_order_relaxed));
struct rt_pending_export *rpenull = NULL;
atomic_compare_exchange_strong_explicit(
&net->first, &rpenull, rpe,
memory_order_release,
memory_order_relaxed);
rt_check_cork_high(tab);
}
static inline void
rt_send_export_event(struct rt_export_hook *hook)
{
ev_send(hook->req->list, hook->event);
}
static void
rt_cleanup_export(struct lfjour *j, struct lfjour_item *i)
{
SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, i);
2022-09-07 11:54:20 +00:00
/* Unlink this export from struct network */
ASSERT_DIE(rpe->new || rpe->old);
const net_addr *n = rpe->new ?
rpe->new->net :
rpe->old->net;
struct netindex *ni = NET_TO_INDEX(n);
ASSERT_DIE(ni->index < atomic_load_explicit(&tab->routes_block_size, memory_order_relaxed));
net *routes = atomic_load_explicit(&tab->routes, memory_order_relaxed);
net *net = &routes[ni->index];
ASSERT_DIE(rpe == atomic_load_explicit(&net->first, memory_order_relaxed));
/* Update the first and last pointers */
struct rt_pending_export *last = rpe,
*next = atomic_load_explicit(&rpe->next, memory_order_relaxed);
if (atomic_compare_exchange_strong_explicit(
&net->last, &last, NULL,
memory_order_acq_rel, memory_order_acquire))
ASSERT_DIE(next == NULL);
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&net->first, &rpe, next,
memory_order_acq_rel, memory_order_relaxed));
/* Wait for very slow table readers */
synchronize_rcu();
if (rpe->old)
{
ASSERT_DIE(rpe->old->flags & REF_OBSOLETE);
hmap_clear(&tab->id_map, rpe->old->id);
rte_free(SKIP_BACK(struct rte_storage, rte, rpe->old), tab);
}
if (!routes && !next)
tab->gc_counter++;
}
static void
rt_import_cleared(void *_ih)
{
struct rt_import_hook *hook = _ih;
ASSERT_DIE(hook->import_state == TIS_CLEARED);
/* Local copy of the otherwise freed callback data */
void (*stopped)(struct rt_import_request *) = hook->stopped;
struct rt_import_request *req = hook->req;
/* Finally uncouple from the table */
RT_LOCKED(hook->table, tab)
{
req->hook = NULL;
rt_trace(tab, D_EVENTS, "Hook %s stopped", req->name);
rem_node(&hook->n);
mb_free(hook);
rt_unlock_table(tab);
2022-09-07 11:54:20 +00:00
}
/* And call the callback */
stopped(req);
}
static void
rt_cleanup_done(struct lfjour *j, u64 begin_seq, u64 end_seq)
{
SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
ASSERT_DIE(DG_IS_LOCKED(tab->lock.rtable));
if (~end_seq)
rt_trace(tab, D_STATES, "Export cleanup done on seq %lu to %lu", begin_seq, end_seq);
else
rt_trace(tab, D_STATES, "Export cleanup complete (begin seq %lu)", begin_seq);
rt_check_cork_low(tab);
struct rt_import_hook *ih; node *x, *n;
uint cleared_counter = 0;
if (tab->wait_counter)
WALK_LIST2_DELSAFE(ih, n, x, tab->imports, n)
if (ih->import_state == TIS_WAITING)
if (end_seq >= ih->flush_seq)
{
ih->import_state = TIS_CLEARED;
tab->wait_counter--;
cleared_counter++;
ih->cleanup_event = (event) {
.hook = rt_import_cleared,
.data = ih,
};
ev_send_loop(ih->req->loop, &ih->cleanup_event);
}
if (!EMPTY_LIST(tab->imports) &&
(tab->gc_counter >= tab->config->gc_threshold))
rt_kick_prune_timer(tab);
}
#define RT_EXPORT_BULK 1024
static void
rt_export_hook(void *_data)
{
struct rt_export_hook *c = _data;
struct lfjour_recipient *r = &c->recipient;
ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_READY);
/* Process the export */
for (uint i=0; i<RT_EXPORT_BULK; i++)
{
/* Get the next export if exists */
struct lfjour_item *li = lfjour_get(r);
/* Stop exporting if no export is available */
if (!li)
return;
/* Process sequence number reset event */
if (lfjour_reset_seqno(r))
bmap_reset(&c->seq_map, 16);
/* Process the export */
rte_export(c, SKIP_BACK(struct rt_pending_export, li, li));
/* And release the export */
lfjour_release(r);
}
/*
* is this actually needed?
if (used)
RT_LOCKED(tab, t)
if (no_next || t->cork_active)
rt_export_used(c->table, c->req->name, no_next ? "finished export bulk" : "cork active");
*/
/* Request continuation */
rt_send_export_event(c);
}
static inline int
rte_validate(struct channel *ch, rte *e)
{
int c;
const net_addr *n = e->net;
2023-11-23 22:33:44 +00:00
#define IGNORING(pre, post) do { \
log(L_WARN "%s.%s: Ignoring " pre " %N " post, ch->proto->name, ch->name, n); \
return 0; \
} while (0)
if (!net_validate(n))
2023-11-23 22:33:44 +00:00
IGNORING("bogus prefix", "");
2017-12-09 23:55:34 +00:00
/* FIXME: better handling different nettypes */
c = !net_is_flow(n) ?
net_classify(n): (IADDR_HOST | SCOPE_UNIVERSE);
if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
2023-11-23 22:33:44 +00:00
IGNORING("bogus route", "");
if (net_type_match(n, NB_DEST))
{
eattr *nhea = ea_find(e->attrs, &ea_gen_nexthop);
int dest = nhea_dest(nhea);
if (dest == RTD_NONE)
2023-11-23 22:33:44 +00:00
IGNORING("route", "with no destination");
if ((dest == RTD_UNICAST) &&
!nexthop_is_sorted((struct nexthop_adata *) nhea->u.ptr))
2023-11-23 22:33:44 +00:00
IGNORING("unsorted multipath route", "");
}
else if (ea_find(e->attrs, &ea_gen_nexthop))
2023-11-23 22:33:44 +00:00
IGNORING("route", "having a superfluous nexthop attribute");
2016-08-30 15:17:27 +00:00
return 1;
}
int
2023-07-03 18:38:24 +00:00
rte_same(const rte *x, const rte *y)
{
/* rte.flags / rte.pflags are not checked, as they are internal to rtable */
return
(x == y) || (
(x->attrs == y->attrs) ||
((!x->attrs->stored || !y->attrs->stored) && ea_same(x->attrs, y->attrs))
) &&
x->src == y->src &&
rte_is_filtered(x) == rte_is_filtered(y);
}
2023-07-03 18:38:24 +00:00
static inline int rte_is_ok(const rte *e) { return e && !rte_is_filtered(e); }
static void
rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, struct netindex *i, net *net, rte *new, struct rte_src *src)
{
struct rt_import_request *req = c->req;
struct rt_import_stats *stats = &c->stats;
struct rte_storage *old_best_stored = NET_BEST_ROUTE(table, net);
2023-07-03 18:38:24 +00:00
const rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
2022-06-27 10:32:15 +00:00
/* If the new route is identical to the old one, we find the attributes in
* cache and clone these with no performance drop. OTOH, if we were to lookup
* the attributes, such a route definitely hasn't been anywhere yet,
* therefore it's definitely worth the time. */
struct rte_storage *new_stored = NULL;
if (new)
2023-07-03 18:38:24 +00:00
{
new_stored = rte_store(new, i, table);
2023-07-03 18:38:24 +00:00
new = RTES_WRITE(new_stored);
}
2022-06-27 10:32:15 +00:00
struct rte_storage * _Atomic *last_ptr = NULL;
struct rte_storage *old_stored = NULL;
const rte *old = NULL;
/* Find the original route from the same protocol */
NET_WALK_ROUTES(table, net, ep, e)
{
last_ptr = &e->next;
if (e->rte.src == src)
if (old_stored)
bug("multiple routes in table with the same src");
else
old_stored = e;
}
if (old_stored)
{
old = &old_stored->rte;
/* If there is the same route in the routing table but from
* a different sender, then there are two paths from the
* source protocol to this routing table through transparent
* pipes, which is not allowed.
* We log that and ignore the route. */
if (old->sender != c)
{
if (!old->generation && !new->generation)
bug("Two protocols claim to author a route with the same rte_src in table %s: %N %s/%u:%u",
c->table->name, i->addr, old->src->owner->name, old->src->private_id, old->src->global_id);
log_rl(&table->rl_pipe, L_ERR "Route source collision in table %s: %N %s/%u:%u",
c->table->name, i->addr, old->src->owner->name, old->src->private_id, old->src->global_id);
}
2022-06-27 10:32:15 +00:00
if (new && rte_same(old, &new_stored->rte))
{
/* No changes, ignore the new route and refresh the old one */
2023-07-03 18:38:24 +00:00
old_stored->stale_cycle = new->stale_cycle;
if (!rte_is_filtered(new))
{
stats->updates_ignored++;
rt_rte_trace_in(D_ROUTES, req, new, "ignored");
}
2022-06-27 10:32:15 +00:00
/* We need to free the already stored route here before returning */
rte_free(new_stored, table);
return;
}
}
if (!old && !new)
{
stats->withdraws_ignored++;
return;
}
2022-06-27 10:32:15 +00:00
/* If rejected by import limit, we need to pretend there is no route */
if (req->preimport && (req->preimport(req, new, old) == 0))
{
rte_free(new_stored, table);
2022-06-27 10:32:15 +00:00
new_stored = NULL;
new = NULL;
}
if (!new && !old)
{
stats->withdraws_ignored++;
return;
}
int new_ok = rte_is_ok(new);
int old_ok = rte_is_ok(old);
if (new_ok)
stats->updates_accepted++;
else if (old_ok)
stats->withdraws_accepted++;
else
stats->withdraws_ignored++;
if (old_ok || new_ok)
table->last_rt_change = current_time();
/* Finalize the new stored route */
if (new_stored)
{
new->lastmod = current_time();
new->id = hmap_first_zero(&table->id_map);
hmap_set(&table->id_map, new->id);
}
/* We need to add a spinlock sentinel to the beginning */
struct rte_storage local_sentinel = {
.flags = REF_OBSOLETE,
.next = old_best_stored,
};
atomic_store_explicit(&net->routes, &local_sentinel, memory_order_release);
/* Mark also the old route as obsolete. */
if (old_stored)
old_stored->flags |= REF_OBSOLETE;
2012-07-04 19:31:03 +00:00
if (table->config->sorted)
{
2012-07-04 19:31:03 +00:00
/* If routes are sorted, just insert new route to appropriate position */
if (new_stored)
2012-07-04 19:31:03 +00:00
{
struct rte_storage * _Atomic *k = &local_sentinel.next, *kk;
for (; kk = atomic_load_explicit(k, memory_order_relaxed); k = &kk->next)
if ((kk != old_stored) && rte_better(new, &kk->rte))
2012-07-04 19:31:03 +00:00
break;
/* Do not flip the operation order, the list must stay consistent */
atomic_store_explicit(&new_stored->next, kk, memory_order_release);
atomic_store_explicit(k, new_stored, memory_order_release);
table->rt_count++;
2012-07-04 19:31:03 +00:00
}
}
2012-07-04 19:31:03 +00:00
else
{
2012-07-04 19:31:03 +00:00
/* If routes are not sorted, find the best route and move it on
the first position. There are several optimized cases. */
if (src->owner->rte_recalculate &&
2023-07-03 18:38:24 +00:00
src->owner->rte_recalculate(table, net, new_stored, old_stored, old_best_stored))
2012-07-04 19:31:03 +00:00
goto do_recalculate;
if (new_stored && rte_better(&new_stored->rte, old_best))
{
/* The first case - the new route is clearly optimal,
2012-07-04 19:31:03 +00:00
we link it at the first position */
/* First link to the chain */
atomic_store_explicit(&new_stored->next,
atomic_load_explicit(&local_sentinel.next, memory_order_acquire),
memory_order_release);
/* And then link to the added route */
atomic_store_explicit(&local_sentinel.next, new_stored, memory_order_release);
table->rt_count++;
}
2012-07-04 19:31:03 +00:00
else if (old == old_best)
{
/* The second case - the old best route will disappear, we add the
2012-07-04 19:31:03 +00:00
new route (if we have any) to the list (we don't care about
position) and then we elect the new optimal route and relink
that route at the first position and announce it. New optimal
route might be NULL if there is no more routes */
do_recalculate:
/* Add the new route to the list right behind the old one */
if (new_stored)
{
atomic_store_explicit(&new_stored->next, atomic_load_explicit(&old_stored->next, memory_order_relaxed), memory_order_release);
atomic_store_explicit(&old_stored->next, new_stored, memory_order_release);
table->rt_count++;
2012-07-04 19:31:03 +00:00
}
/* Find a new optimal route (if there is any) */
struct rte_storage * _Atomic *bp = &local_sentinel.next;
struct rte_storage *best = atomic_load_explicit(bp, memory_order_relaxed);
/* Best can't be the old one */
if (best == old_stored)
{
bp = &best->next;
best = atomic_load_explicit(bp, memory_order_relaxed);
}
if (best)
{
for (struct rte_storage *kk, * _Atomic *k = &best->next;
kk = atomic_load_explicit(k, memory_order_relaxed);
k = &kk->next)
if (rte_better(&kk->rte, &best->rte))
best = atomic_load_explicit(bp = k, memory_order_relaxed);
/* Now we know which route is the best one, we have to relink it
* to the front place. */
/* First we wait until all readers finish */
synchronize_rcu();
/* Now all readers must have seen the local spinlock sentinel
* and will wait until we re-arrange the structure */
/* The best route gets removed from its original place */
atomic_store_explicit(bp,
atomic_load_explicit(&best->next, memory_order_relaxed),
memory_order_release);
/* After the best route, the original chain shall be linked */
atomic_store_explicit(&best->next,
atomic_load_explicit(&local_sentinel.next, memory_order_relaxed),
memory_order_release);
/* And now we finally link the best route first */
atomic_store_explicit(&local_sentinel.next, best, memory_order_release);
}
}
else if (new_stored)
2012-07-04 19:31:03 +00:00
{
/* The third case - the new route is not better than the old
best route (therefore old_best != NULL) and the old best
route was not removed (therefore old_best == net->routes).
We just link the new route to the old/last position. */
if (old_stored)
{
atomic_store_explicit(&new_stored->next,
atomic_load_explicit(&old_stored->next, memory_order_relaxed),
memory_order_release);
atomic_store_explicit(&old_stored->next, new_stored, memory_order_release);
}
else
{
atomic_store_explicit(&new_stored->next, NULL, memory_order_relaxed);
atomic_store_explicit(last_ptr, new_stored, memory_order_release);
}
2012-07-04 19:31:03 +00:00
}
/* The fourth (empty) case - suboptimal route is being removed, nothing to do */
}
/* Finally drop the old route */
if (old_stored)
{
uint seen = 0;
NET_WALK_ROUTES(table, net, ep, e)
if (e == old_stored)
{
ASSERT_DIE(e->rte.src == src);
atomic_store_explicit(ep,
atomic_load_explicit(&e->next, memory_order_relaxed),
memory_order_release);
ASSERT_DIE(!seen++);
}
ASSERT_DIE(seen == 1);
}
struct rte_storage *new_best = atomic_load_explicit(&local_sentinel.next, memory_order_relaxed);
2012-07-04 19:31:03 +00:00
/* Log the route change */
if (new_ok)
rt_rte_trace_in(D_ROUTES, req, &new_stored->rte, new_stored == new_best ? "added [best]" : "added");
else if (old_ok)
2009-12-02 13:33:34 +00:00
{
if (old != old_best)
rt_rte_trace_in(D_ROUTES, req, old, "removed");
else if (new_best && rte_is_ok(&new_best->rte))
rt_rte_trace_in(D_ROUTES, req, old, "removed [replaced]");
else
rt_rte_trace_in(D_ROUTES, req, old, "removed [sole]");
}
else
if (req->trace_routes & D_ROUTES)
log(L_TRACE "%s > ignored %N %s->%s", req->name, i->addr, old ? "filtered" : "none", new ? "filtered" : "none");
2012-07-04 19:31:03 +00:00
/* Propagate the route change */
rte_announce(table, i, net,
RTE_OR_NULL(new_stored), RTE_OR_NULL(old_stored),
RTE_OR_NULL(new_best), RTE_OR_NULL(old_best_stored));
/* Now we can finally release the changes back for reading */
atomic_store_explicit(&net->routes, new_best, memory_order_release);
return;
}
2022-06-27 10:32:15 +00:00
int
2023-07-03 18:38:24 +00:00
channel_preimport(struct rt_import_request *req, rte *new, const rte *old)
{
SKIP_BACK_DECLARE(struct channel, c, in_req, req);
if (new && !old)
if (CHANNEL_LIMIT_PUSH(c, RX))
2022-06-27 10:32:15 +00:00
return 0;
if (!new && old)
CHANNEL_LIMIT_POP(c, RX);
int new_in = new && !rte_is_filtered(new);
int old_in = old && !rte_is_filtered(old);
int verdict = 1;
if (new_in && !old_in)
if (CHANNEL_LIMIT_PUSH(c, IN))
if (c->in_keep & RIK_REJECTED)
new->flags |= REF_FILTERED;
else
verdict = 0;
if (!new_in && old_in)
CHANNEL_LIMIT_POP(c, IN);
mpls_rte_preimport(new_in ? new : NULL, old_in ? old : NULL);
return verdict;
}
void
rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
{
if (!c->in_req.hook)
{
log(L_WARN "%s.%s: Called rte_update without import hook", c->proto->name, c->name);
return;
}
ASSERT(c->channel_state == CS_UP);
/* Storing prefilter routes as an explicit layer */
if (new && (c->in_keep & RIK_PREFILTER))
new->attrs = ea_lookup_tmp(new->attrs, 0, EALS_PREIMPORT);
#if 0
debug("%s.%s -(prefilter)-> %s: %N ", c->proto->name, c->name, c->table->name, n);
if (new) ea_dump(new->attrs);
else debug("withdraw");
debug("\n");
#endif
const struct filter *filter = c->in_filter;
struct channel_import_stats *stats = &c->import_stats;
struct mpls_fec *fec = NULL;
if (new)
{
new->net = n;
new->sender = c->in_req.hook;
int fr;
stats->updates_received++;
if ((filter == FILTER_REJECT) ||
((fr = f_run(filter, new, 0)) > F_ACCEPT))
{
stats->updates_filtered++;
channel_rte_trace_in(D_FILTERS, c, new, "filtered out");
if (c->in_keep & RIK_REJECTED)
new->flags |= REF_FILTERED;
else
new = NULL;
}
2022-05-15 13:53:35 +00:00
if (new && c->proto->mpls_channel)
if (mpls_handle_rte(c->proto->mpls_channel, n, new, &fec) < 0)
{
channel_rte_trace_in(D_FILTERS, c, new, "invalid");
stats->updates_invalid++;
new = NULL;
}
if (new)
2024-01-26 13:42:11 +00:00
{
new->attrs = ea_lookup_tmp(new->attrs,
(c->in_keep & RIK_PREFILTER) ? BIT32_ALL(EALS_PREIMPORT) : 0, EALS_FILTERED);
2024-01-26 13:42:11 +00:00
if (net_is_flow(n))
rt_flowspec_resolve_rte(new, c);
else
rt_next_hop_resolve_rte(new);
2024-01-26 13:42:11 +00:00
}
2022-05-15 13:53:35 +00:00
if (new && !rte_validate(c, new))
2022-05-15 13:53:35 +00:00
{
channel_rte_trace_in(D_FILTERS, c, new, "invalid");
stats->updates_invalid++;
new = NULL;
2022-05-15 13:53:35 +00:00
}
}
else
stats->withdraws_received++;
rte_import(&c->in_req, n, new, src);
if (fec)
{
mpls_unlock_fec(fec);
DBGL( "Unlock FEC %p (rte_update %N)", fec, n);
}
}
void
rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rte_src *src)
{
struct rt_import_hook *hook = req->hook;
if (!hook)
{
log(L_WARN "%s: Called rte_import without import hook", req->name);
return;
}
2022-09-07 11:54:20 +00:00
RT_LOCKED(hook->table, tab)
{
u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_acquire);
struct netindex *i;
net *routes = atomic_load_explicit(&tab->routes, memory_order_acquire);
2022-09-07 11:54:20 +00:00
net *nn;
if (new)
{
/* An update */
/* Set auxiliary values */
new->stale_cycle = hook->stale_set;
new->sender = hook;
/* Allocate the key structure */
i = net_get_index(tab->netindex, n);
new->net = i->addr;
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
/* Block size update */
u32 nbs = bs;
while (i->index >= nbs)
nbs *= 2;
if (nbs > bs)
{
net *nb = mb_alloc(tab->rp, nbs * sizeof *nb);
memcpy(&nb[0], routes, bs * sizeof *nb);
memset(&nb[bs], 0, (nbs - bs) * sizeof *nb);
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&tab->routes, &routes, nb,
memory_order_acq_rel, memory_order_relaxed));
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&tab->routes_block_size, &bs, nbs,
memory_order_acq_rel, memory_order_relaxed));
synchronize_rcu();
mb_free(routes);
routes = nb;
}
/* Update table tries */
struct f_trie *trie = atomic_load_explicit(&tab->trie, memory_order_relaxed);
if (trie)
trie_add_prefix(trie, i->addr, i->addr->pxlen, i->addr->pxlen);
if (tab->trie_new)
trie_add_prefix(tab->trie_new, i->addr, i->addr->pxlen, i->addr->pxlen);
}
else if ((i = net_find_index(tab->netindex, n)) && (i->index < bs))
/* Found an block where we can withdraw from */
;
else
{
/* No route for this net is present at all. Ignore right now. */
req->hook->stats.withdraws_ignored++;
if (req->trace_routes & D_ROUTES)
log(L_TRACE "%s > ignored %N withdraw", req->name, n);
return;
}
/* Resolve the net structure */
nn = &routes[i->index];
/* Recalculate the best route. */
rte_recalculate(tab, hook, i, nn, new, src);
2022-09-07 11:54:20 +00:00
}
}
2024-04-23 16:50:22 +00:00
struct rt_export_feed *
rt_net_feed(rtable *t, net_addr *a)
{
RT_READ(t, tr);
const struct netindex *i = net_find_index(t->netindex, a);
net *n = i ? net_find(tr, i) : NULL;
if (!n)
return 0;
/* Get the feed itself. It may change under our hands tho. */
struct rt_pending_export *first = atomic_load_explicit(&n->first, memory_order_acquire);
struct rt_pending_export *last = atomic_load_explicit(&n->last, memory_order_acquire);
/* Count the elements */
uint rcnt = rte_feed_count(tr, n);
uint ecnt = 0;
uint ocnt = 0;
for (struct rt_pending_export *rpe = first; rpe;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
{
ecnt++;
if (rpe->old)
ocnt++;
}
struct rt_export_feed *feed = NULL;
if (rcnt || ocnt || ecnt)
{
uint size = sizeof *feed
+ (rcnt+ocnt) * sizeof *feed->block + _Alignof(typeof(*feed->block))
+ ecnt * sizeof *feed->exports + _Alignof(typeof(*feed->exports));
feed = tmp_alloc(size);
feed->ni = i;
feed->count_routes = rcnt+ocnt;
feed->count_exports = ecnt;
BIRD_SET_ALIGNED_POINTER(feed->block, feed->data);
BIRD_SET_ALIGNED_POINTER(feed->exports, &feed->block[rcnt+ocnt]);
/* Consistency check */
ASSERT_DIE(((void *) &feed->exports[ecnt]) <= ((void *) feed) + size);
if (rcnt)
rte_feed_obtain_copy(tr, n, feed->block, rcnt);
if (ecnt)
{
uint e = 0;
uint rpos = rcnt;
for (struct rt_pending_export *rpe = first; rpe;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
if (e >= ecnt)
RT_READ_RETRY(tr);
else
{
feed->exports[e++] = rpe->seq;
/* Copy also obsolete routes */
if (rpe->old)
{
ASSERT_DIE(rpos < rcnt + ocnt);
feed->block[rpos++] = *rpe->old;
ea_free_later(ea_ref(rpe->old->attrs));
}
}
ASSERT_DIE(e == ecnt);
}
}
/* Check that it indeed didn't change and the last export is still the same. */
if (last != atomic_load_explicit(&n->last, memory_order_acquire) ||
first != atomic_load_explicit(&n->first, memory_order_acquire))
RT_READ_RETRY(tr);
return feed;
}
rte
rt_net_best(rtable *t, net_addr *a)
{
2022-09-07 11:54:20 +00:00
rte rt = {};
RT_READ(t, tr);
const struct netindex *i = net_find_index(t->netindex, a);
net *n = i ? net_find(tr, i) : NULL;
2024-04-23 16:50:22 +00:00
if (!n)
return rt;
2024-04-23 16:50:22 +00:00
struct rte_storage *e = NET_READ_BEST_ROUTE(tr, n);
if (!e || !rte_is_valid(&e->rte))
2024-04-23 16:50:22 +00:00
return rt;
ea_free_later(ea_ref(e->rte.attrs));
return RTE_COPY(e);
}
/* Check rtable for best route to given net whether it would be exported do p */
int
rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter)
{
rte rt = rt_net_best(t, a);
int v = c->proto->preexport ? c->proto->preexport(c, &rt) : 0;
if (v == RIC_PROCESS)
v = (f_run(filter, &rt, FF_SILENT) <= F_ACCEPT);
return v > 0;
}
static void
rt_table_export_done(void *hh)
{
struct rt_export_hook *hook = hh;
struct rt_export_request *req = hook->req;
void (*stopped)(struct rt_export_request *) = hook->stopped;
rtable *t = hook->tab;
/* Drop the hook */
2022-09-07 11:54:20 +00:00
RT_LOCKED(t, tab)
{
/* Unlink from the table */
if (lfjour_of_recipient(&hook->recipient))
lfjour_unregister(&hook->recipient);
2022-09-07 11:54:20 +00:00
DBG("Export hook %p in table %s finished uc=%u\n", hook, tab->name, tab->use_count);
2022-08-31 09:58:27 +00:00
/* Free the hook */
rp_free(hook->pool);
2022-09-07 11:54:20 +00:00
}
/* Inform the stopper */
2022-09-07 11:54:20 +00:00
CALL(stopped, req);
2022-08-31 09:58:27 +00:00
/* Unlock the table */
2022-09-07 11:54:20 +00:00
rt_unlock_table(t);
}
static inline void
rt_set_import_state(struct rt_import_hook *hook, u8 state)
{
hook->last_state_change = current_time();
hook->import_state = state;
2022-08-31 09:58:27 +00:00
CALL(hook->req->log_state_change, hook->req, state);
}
u8
rt_set_export_state(struct rt_export_hook *hook, u32 expected_mask, u8 state)
{
hook->last_state_change = current_time();
u8 old = atomic_exchange_explicit(&hook->export_state, state, memory_order_release);
if (!((1 << old) & expected_mask))
bug("Unexpected export state change from %s to %s, expected mask %02x",
rt_export_state_name(old),
rt_export_state_name(state),
expected_mask
);
if (old != state)
CALL(hook->req->log_state_change, hook->req, state);
return old;
}
void
2022-09-07 11:54:20 +00:00
rt_request_import(rtable *t, struct rt_import_request *req)
{
2022-09-07 11:54:20 +00:00
RT_LOCKED(t, tab)
{
rt_lock_table(tab);
2022-09-07 11:54:20 +00:00
struct rt_import_hook *hook = req->hook = mb_allocz(tab->rp, sizeof(struct rt_import_hook));
2022-09-07 11:54:20 +00:00
DBG("Lock table %s for import %p req=%p uc=%u\n", tab->name, hook, req, tab->use_count);
2022-09-07 11:54:20 +00:00
hook->req = req;
hook->table = t;
2022-09-07 11:54:20 +00:00
rt_set_import_state(hook, TIS_UP);
add_tail(&tab->imports, &hook->n);
}
}
void
rt_stop_import(struct rt_import_request *req, void (*stopped)(struct rt_import_request *))
{
ASSERT_DIE(req->hook);
struct rt_import_hook *hook = req->hook;
2022-09-07 11:54:20 +00:00
RT_LOCKED(hook->table, tab)
{
rt_set_import_state(hook, TIS_STOP);
hook->stopped = stopped;
rt_refresh_trace(tab, hook, "stop import");
/* Cancel table rr_counter */
if (hook->stale_set != hook->stale_pruned)
tab->rr_counter -= ((int) hook->stale_set - (int) hook->stale_pruned);
tab->rr_counter++;
hook->stale_set = hook->stale_pruned = hook->stale_pruning = hook->stale_valid = 0;
rt_schedule_prune(tab);
2022-09-07 11:54:20 +00:00
}
}
static void rt_table_export_start_feed(struct rtable_private *tab, struct rt_export_hook *hook);
static void
rt_table_export_uncork(void *_hook)
{
ASSERT_DIE(birdloop_inside(&main_birdloop));
struct rt_export_hook *hook = _hook;
struct birdloop *loop = hook->req->list->loop;
if (loop != &main_birdloop)
birdloop_enter(loop);
u8 state;
RT_LOCKED(hook->tab, tab)
switch (state = atomic_load_explicit(&hook->export_state, memory_order_relaxed))
{
case TES_HUNGRY:
rt_table_export_start_feed(tab, hook);
break;
case TES_STOP:
hook->event->hook = rt_table_export_done;
rt_send_export_event(hook);
break;
default:
bug("Uncorking a table export in a strange state: %u", state);
}
if (loop != &main_birdloop)
birdloop_leave(loop);
}
static void
2022-09-07 11:54:20 +00:00
rt_table_export_start_locked(struct rtable_private *tab, struct rt_export_request *req)
{
rt_lock_table(tab);
pool *p = rp_new(req->pool, req->pool->domain, "Export hook");
struct rt_export_hook *hook = req->hook = mb_allocz(p, sizeof(struct rt_export_hook));
hook->req = req;
hook->tab = RT_PUB(tab);
hook->pool = p;
atomic_store_explicit(&hook->export_state, TES_DOWN, memory_order_release);
hook->event = ev_new_init(p, rt_table_export_uncork, hook);
if (rt_cork_check(hook->event))
rt_set_export_state(hook, BIT32_ALL(TES_DOWN), TES_HUNGRY);
else
rt_table_export_start_feed(tab, hook);
}
static void
rt_table_export_start_feed(struct rtable_private *tab, struct rt_export_hook *hook)
{
struct rt_export_request *req = hook->req;
/* stats zeroed by mb_allocz */
switch (req->prefilter.mode)
{
case TE_ADDR_IN:
case TE_ADDR_NONE:
case TE_ADDR_TRIE:
case TE_ADDR_HOOK:
hook->feed_index = 0;
hook->event->hook = rt_feed_by_fib;
break;
case TE_ADDR_EQUAL:
hook->event->hook = rt_feed_equal;
break;
case TE_ADDR_FOR:
hook->event->hook = rt_feed_for;
break;
default:
bug("Requested an unknown export address mode");
}
DBG("New export hook %p req %p in table %s uc=%u\n", hook, req, tab->name, tab->use_count);
hook->recipient = (struct lfjour_recipient) {
.event = hook->event,
.target = req->list,
};
lfjour_register(&tab->journal, &hook->recipient);
SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, atomic_load_explicit(&hook->recipient.last, memory_order_relaxed));
req_trace(req, D_STATES, "Export initialized, last export %p (%lu)", rpe, rpe ? rpe->seq : 0);
bmap_init(&hook->seq_map, hook->pool, 16);
/* Regular export */
rt_set_export_state(hook, BIT32_ALL(TES_DOWN, TES_HUNGRY), TES_FEEDING);
rt_send_export_event(hook);
}
#if 0
2022-09-07 11:54:20 +00:00
static void
rt_table_export_start(struct rt_exporter *re, struct rt_export_request *req)
{
RT_LOCKED(SKIP_BACK(rtable, priv.exporter, re), tab)
2022-09-07 11:54:20 +00:00
rt_table_export_start_locked(tab, req);
}
#endif
2022-09-07 11:54:20 +00:00
void rt_request_export(rtable *t, struct rt_export_request *req)
{
2022-09-07 11:54:20 +00:00
RT_LOCKED(t, tab)
rt_table_export_start_locked(tab, req); /* Is locked inside */
}
static void
rt_stop_export_locked(struct rtable_private *tab, struct rt_export_hook *hook)
{
struct rt_export_request *req = hook->req;
/* Update export state, get old */
switch (rt_set_export_state(hook, BIT32_ALL(TES_HUNGRY, TES_FEEDING, TES_READY), TES_STOP))
{
case TES_STOP:
rt_trace(tab, D_EVENTS, "Stopping export hook %s already requested", req->name);
return;
case TES_HUNGRY:
rt_trace(tab, D_EVENTS, "Stopping export hook %s must wait for uncorking", req->name);
return;
case TES_FEEDING:
break;
}
rt_trace(tab, D_EVENTS, "Stopping export hook %s right now", req->name);
/* Reset the event as the stopped event */
ASSERT_DIE(birdloop_inside(req->list->loop));
hook->event->hook = rt_table_export_done;
/* Run the stopped event */
rt_send_export_event(hook);
}
void
rt_stop_export(struct rt_export_request *req, void (*stopped)(struct rt_export_request *))
{
ASSERT_DIE(birdloop_inside(req->list->loop));
struct rt_export_hook *hook = req->hook;
ASSERT_DIE(hook);
RT_LOCKED(hook->tab, t)
{
/* Set the stopped callback */
hook->stopped = stopped;
/* Do the rest */
rt_stop_export_locked(t, hook);
}
}
/**
* rt_refresh_begin - start a refresh cycle
* @t: related routing table
* @c related channel
*
* This function starts a refresh cycle for given routing table and announce
* hook. The refresh cycle is a sequence where the protocol sends all its valid
* routes to the routing table (by rte_update()). After that, all protocol
* routes (more precisely routes with @c as @sender) not sent during the
* refresh cycle but still in the table from the past are pruned. This is
* implemented by marking all related routes as stale by REF_STALE flag in
* rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
* flag in rt_refresh_end() and then removing such routes in the prune loop.
*/
void
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
rt_refresh_begin(struct rt_import_request *req)
{
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
struct rt_import_hook *hook = req->hook;
ASSERT_DIE(hook);
2022-09-07 11:54:20 +00:00
RT_LOCKED(hook->table, tab)
{
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
/* If the pruning routine is too slow */
if (((hook->stale_set - hook->stale_pruned) & 0xff) >= 240)
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
{
log(L_WARN "Route refresh flood in table %s (stale_set=%u, stale_pruned=%u)", hook->table->name, hook->stale_set, hook->stale_pruned);
/* Forcibly set all old routes' stale cycle to zero. */
u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_relaxed);
net *routes = atomic_load_explicit(&tab->routes, memory_order_relaxed);
for (u32 i = 0; i < bs; i++)
NET_WALK_ROUTES(tab, &routes[i], ep, e)
if (e->rte.sender == req->hook)
e->stale_cycle = 0;
/* Smash the route refresh counter and zero everything. */
tab->rr_counter -= ((int) hook->stale_set - (int) hook->stale_pruned);
hook->stale_set = hook->stale_valid = hook->stale_pruning = hook->stale_pruned = 0;
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
}
/* Now we can safely increase the stale_set modifier */
hook->stale_set++;
/* The table must know that we're route-refreshing */
tab->rr_counter++;
rt_refresh_trace(tab, hook, "route refresh begin");
2022-09-07 11:54:20 +00:00
}
}
/**
* rt_refresh_end - end a refresh cycle
* @t: related routing table
* @c: related channel
*
* This function ends a refresh cycle for given routing table and announce
* hook. See rt_refresh_begin() for description of refresh cycles.
*/
void
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
rt_refresh_end(struct rt_import_request *req)
{
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
struct rt_import_hook *hook = req->hook;
ASSERT_DIE(hook);
2022-09-07 11:54:20 +00:00
RT_LOCKED(hook->table, tab)
{
/* Now valid routes are only those one with the latest stale_set value */
UNUSED uint cnt = hook->stale_set - hook->stale_valid;
hook->stale_valid = hook->stale_set;
/* Here we can't kick the timer as we aren't in the table service loop */
2022-09-07 11:54:20 +00:00
rt_schedule_prune(tab);
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
rt_refresh_trace(tab, hook, "route refresh end");
2022-09-07 11:54:20 +00:00
}
}
/**
* rt_refresh_trace - log information about route refresh
* @tab: table
* @ih: import hook doing the route refresh
* @msg: what is happening
*
* This function consistently logs route refresh messages.
*/
static void
rt_refresh_trace(struct rtable_private *tab, struct rt_import_hook *ih, const char *msg)
{
if (ih->req->trace_routes & D_STATES)
log(L_TRACE "%s: %s: rr %u set %u valid %u pruning %u pruned %u", ih->req->name, msg,
tab->rr_counter, ih->stale_set, ih->stale_valid, ih->stale_pruning, ih->stale_pruned);
}
2000-06-01 17:12:19 +00:00
/**
* rte_dump - dump a route
* @e: &rte to be dumped
*
* This functions dumps contents of a &rte to debug output.
*/
void
rte_dump(struct rte_storage *e)
{
debug("(%u) %-1N", NET_TO_INDEX(e->rte.net)->index, e->rte.net);
debug("ID=%d ", e->rte.id);
debug("SENDER=%s ", e->rte.sender->req->name);
debug("PF=%02x ", e->rte.pflags);
debug("SRC=%uG ", e->rte.src->global_id);
ea_dump(e->rte.attrs);
debug("\n");
}
2000-06-01 17:12:19 +00:00
/**
* rt_dump - dump a routing table
* @t: routing table to be dumped
*
* This function dumps contents of a given routing table to debug output.
*/
void
rt_dump(rtable *tab)
{
RT_READ(tab, tp);
/* Looking at priv.deleted is technically unsafe but we don't care */
debug("Dump of routing table <%s>%s\n", tab->name, tab->priv.deleted ? " (deleted)" : "");
2022-09-07 11:54:20 +00:00
u32 bs = atomic_load_explicit(&tp->t->routes_block_size, memory_order_relaxed);
net *routes = atomic_load_explicit(&tp->t->routes, memory_order_relaxed);
for (u32 i = 0; i < bs; i++)
NET_READ_WALK_ROUTES(tp, &routes[i], ep, e)
rte_dump(e);
2022-09-07 11:54:20 +00:00
debug("\n");
}
2000-06-01 17:12:19 +00:00
/**
* rt_dump_all - dump all routing tables
*
* This function dumps contents of all routing tables to debug output.
*/
void
rt_dump_all(void)
{
rtable *t;
node *n;
WALK_LIST2(t, n, routing_tables, n)
rt_dump(t);
WALK_LIST2(t, n, deleted_routing_tables, n)
rt_dump(t);
}
void
2022-09-07 11:54:20 +00:00
rt_dump_hooks(rtable *tp)
{
2022-09-07 11:54:20 +00:00
RT_LOCKED(tp, tab)
{
debug("Dump of hooks in routing table <%s>%s\n", tab->name, tab->deleted ? " (deleted)" : "");
debug(" nhu_state=%u use_count=%d rt_count=%u\n",
tab->nhu_state, tab->use_count, tab->rt_count);
debug(" last_rt_change=%t gc_time=%t gc_counter=%d prune_state=%u\n",
tab->last_rt_change, tab->gc_time, tab->gc_counter, tab->prune_state);
struct rt_import_hook *ih;
WALK_LIST(ih, tab->imports)
{
ih->req->dump_req(ih->req);
debug(" Import hook %p requested by %p: pref=%u"
" last_state_change=%t import_state=%u stopped=%p\n",
ih, ih->req, ih->stats.pref,
ih->last_state_change, ih->import_state, ih->stopped);
}
WALK_TLIST(lfjour_recipient, r, &tab->journal.recipients)
{
SKIP_BACK_DECLARE(struct rt_export_hook, eh, recipient, r);
eh->req->dump_req(eh->req);
debug(" Export hook %p requested by %p:"
" refeed_pending=%u last_state_change=%t export_state=%u\n",
eh, eh->req, eh->refeed_pending, eh->last_state_change,
atomic_load_explicit(&eh->export_state, memory_order_relaxed));
}
debug("\n");
2022-09-07 11:54:20 +00:00
}
}
void
rt_dump_hooks_all(void)
{
rtable *t;
node *n;
debug("Dump of all table hooks\n");
WALK_LIST2(t, n, routing_tables, n)
rt_dump_hooks(t);
WALK_LIST2(t, n, deleted_routing_tables, n)
rt_dump_hooks(t);
}
static inline void
2022-09-07 11:54:20 +00:00
rt_schedule_nhu(struct rtable_private *tab)
{
2022-09-07 11:12:44 +00:00
if (tab->nhu_corked)
{
if (!(tab->nhu_corked & NHU_SCHEDULED))
tab->nhu_corked |= NHU_SCHEDULED;
}
else if (!(tab->nhu_state & NHU_SCHEDULED))
{
rt_trace(tab, D_EVENTS, "Scheduling NHU");
2022-09-07 11:12:44 +00:00
/* state change:
* NHU_CLEAN -> NHU_SCHEDULED
* NHU_RUNNING -> NHU_DIRTY
*/
if ((tab->nhu_state |= NHU_SCHEDULED) == NHU_SCHEDULED)
ev_send_loop(tab->loop, tab->nhu_event);
2022-09-07 11:12:44 +00:00
}
}
void
2022-09-07 11:54:20 +00:00
rt_schedule_prune(struct rtable_private *tab)
{
/* state change 0->1, 2->3 */
tab->prune_state |= 1;
ev_send_loop(tab->loop, tab->prune_event);
}
static void
rt_prune_timer(timer *t)
{
2022-09-07 11:54:20 +00:00
RT_LOCKED((rtable *) t->data, tab)
if (tab->gc_counter >= tab->config->gc_threshold)
rt_schedule_prune(tab);
}
static void
2022-09-07 11:54:20 +00:00
rt_kick_prune_timer(struct rtable_private *tab)
{
/* Return if prune is already scheduled */
if (tm_active(tab->prune_timer) || (tab->prune_state & 1))
return;
/* Randomize GC period to +/- 50% */
btime gc_period = tab->config->gc_period;
gc_period = (gc_period / 2) + (random_u32() % (uint) gc_period);
tm_start_in(tab->prune_timer, gc_period, tab->loop);
}
#define TLIST_PREFIX rt_flowspec_link
#define TLIST_TYPE struct rt_flowspec_link
#define TLIST_ITEM n
#define TLIST_WANT_WALK
#define TLIST_WANT_ADD_TAIL
#define TLIST_DEFINED_BEFORE
struct rt_flowspec_link {
TLIST_DEFAULT_NODE;
rtable *src;
rtable *dst;
u32 uc;
struct rt_export_request req;
};
#include "lib/tlists.h"
static void
rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
2022-09-07 11:54:20 +00:00
rtable *dst_pub = ln->dst;
ASSUME(rt_is_flow(dst_pub));
RT_LOCKED(dst_pub, dst)
{
/* No need to inspect it further if recalculation is already scheduled */
if ((dst->nhu_state == NHU_SCHEDULED) || (dst->nhu_state == NHU_DIRTY)
|| !trie_match_net(dst->flowspec_trie, net))
{
rpe_mark_seen_all(req->hook, first, NULL, NULL);
return;
}
/* This net may affect some flowspecs, check the actual change */
2023-07-03 18:38:24 +00:00
const rte *o = RTE_VALID_OR_NULL(first->old_best);
const rte *new_best = first->new_best;
RPE_WALK(first, rpe, NULL)
{
rpe_mark_seen(req->hook, rpe);
new_best = rpe->new_best;
}
/* Yes, something has actually changed. Schedule the update. */
if (o != RTE_VALID_OR_NULL(new_best))
rt_schedule_nhu(dst);
2022-09-07 11:54:20 +00:00
}
}
static void
rt_flowspec_dump_req(struct rt_export_request *req)
{
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
debug(" Flowspec link for table %s (%p)\n", ln->dst->name, req);
}
static void
rt_flowspec_log_state_change(struct rt_export_request *req, u8 state)
{
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
rt_trace(ln->dst, D_STATES, "Flowspec link from %s export state changed to %s",
ln->src->name, rt_export_state_name(state));
}
static struct rt_flowspec_link *
2022-09-07 11:54:20 +00:00
rt_flowspec_find_link(struct rtable_private *src, rtable *dst)
{
WALK_TLIST(rt_flowspec_link, ln, &src->flowspec_links)
if (ln->dst == dst && ln->req.hook)
switch (atomic_load_explicit(&ln->req.hook->export_state, memory_order_acquire))
{
case TES_HUNGRY:
case TES_FEEDING:
case TES_READY:
return ln;
}
return NULL;
}
void
2022-09-07 11:54:20 +00:00
rt_flowspec_link(rtable *src_pub, rtable *dst_pub)
{
2022-09-07 11:54:20 +00:00
ASSERT(rt_is_ip(src_pub));
ASSERT(rt_is_flow(dst_pub));
2022-09-07 11:54:20 +00:00
int lock_dst = 0;
birdloop_enter(dst_pub->loop);
2022-09-07 11:54:20 +00:00
RT_LOCKED(src_pub, src)
{
2022-09-07 11:54:20 +00:00
struct rt_flowspec_link *ln = rt_flowspec_find_link(src, dst_pub);
if (!ln)
{
pool *p = birdloop_pool(dst_pub->loop);
2022-09-07 11:54:20 +00:00
ln = mb_allocz(p, sizeof(struct rt_flowspec_link));
ln->src = src_pub;
ln->dst = dst_pub;
ln->req = (struct rt_export_request) {
.name = mb_sprintf(p, "%s.flowspec.notifier", dst_pub->name),
.list = birdloop_event_list(dst_pub->loop),
.pool = p,
2022-09-07 11:54:20 +00:00
.trace_routes = src->config->debug,
.dump_req = rt_flowspec_dump_req,
.log_state_change = rt_flowspec_log_state_change,
2022-09-07 11:54:20 +00:00
.export_one = rt_flowspec_export_one,
};
rt_flowspec_link_add_tail(&src->flowspec_links, ln);
2022-09-07 11:54:20 +00:00
rt_table_export_start_locked(src, &ln->req);
lock_dst = 1;
}
2022-09-07 11:54:20 +00:00
ln->uc++;
}
2022-09-07 11:54:20 +00:00
if (lock_dst)
rt_lock_table(dst_pub);
birdloop_leave(dst_pub->loop);
}
static void
rt_flowspec_link_stopped(struct rt_export_request *req)
{
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
rtable *dst = ln->dst;
mb_free(ln);
rt_unlock_table(dst);
}
void
rt_flowspec_unlink(rtable *src, rtable *dst)
{
birdloop_enter(dst->loop);
2022-09-07 11:54:20 +00:00
struct rt_flowspec_link *ln;
RT_LOCKED(src, t)
{
ln = rt_flowspec_find_link(t, dst);
2022-09-07 11:54:20 +00:00
ASSERT(ln && (ln->uc > 0));
2022-09-07 11:54:20 +00:00
if (!--ln->uc)
{
rt_flowspec_link_rem_node(&t->flowspec_links, ln);
ln->req.hook->stopped = rt_flowspec_link_stopped;
rt_stop_export_locked(t, ln->req.hook);
}
2022-09-07 11:54:20 +00:00
}
birdloop_leave(dst->loop);
}
static void
2022-09-07 11:54:20 +00:00
rt_flowspec_reset_trie(struct rtable_private *tab)
{
linpool *lp = tab->flowspec_trie->lp;
int ipv4 = tab->flowspec_trie->ipv4;
lp_flush(lp);
tab->flowspec_trie = f_new_trie(lp, 0);
tab->flowspec_trie->ipv4 = ipv4;
}
static void
rt_free(resource *_r)
{
SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
2022-09-07 11:54:20 +00:00
DBG("Deleting routing table %s\n", r->name);
ASSERT_DIE(r->use_count == 0);
r->config->table = NULL;
rem_node(&r->n);
if (r->hostcache)
rt_free_hostcache(r);
/* Freed automagically by the resource pool
fib_free(&r->fib);
hmap_free(&r->id_map);
rfree(r->rt_event);
mb_free(r);
*/
}
static void
rt_res_dump(resource *_r, unsigned indent)
{
SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
2022-09-07 11:54:20 +00:00
debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
/* TODO: move this to lfjour */
char x[32];
bsprintf(x, "%%%dspending export %%p\n", indent + 2);
WALK_TLIST(lfjour_block, n, &r->journal.pending)
debug(x, "", n);
}
static struct resclass rt_class = {
.name = "Routing table",
2022-09-07 11:54:20 +00:00
.size = sizeof(rtable),
.free = rt_free,
.dump = rt_res_dump,
.lookup = NULL,
.memsize = NULL,
};
static struct idm rtable_idm;
uint rtable_max_id = 0;
rtable *
rt_setup(pool *pp, struct rtable_config *cf)
{
ASSERT_DIE(birdloop_inside(&main_birdloop));
/* Start the service thread */
struct birdloop *loop = birdloop_new(pp, DOMAIN_ORDER(service), 0, "Routing table service %s", cf->name);
birdloop_enter(loop);
pool *sp = birdloop_pool(loop);
/* Create the table domain and pool */
DOMAIN(rtable) dom = DOMAIN_NEW(rtable);
LOCK_DOMAIN(rtable, dom);
pool *p = rp_newf(sp, dom.rtable, "Routing table data %s", cf->name);
/* Create the actual table */
2022-09-07 11:54:20 +00:00
struct rtable_private *t = ralloc(p, &rt_class);
t->rp = p;
t->loop = loop;
t->lock = dom;
t->rte_slab = sl_new(p, sizeof(struct rte_storage));
t->name = cf->name;
t->config = cf;
t->addr_type = cf->addr_type;
t->debug = cf->debug;
t->id = idm_alloc(&rtable_idm);
if (t->id >= rtable_max_id)
rtable_max_id = t->id + 1;
t->netindex = rt_global_netindex_hash;
atomic_store_explicit(&t->routes, mb_allocz(p, RT_INITIAL_ROUTES_BLOCK_SIZE * sizeof(net)), memory_order_relaxed);
atomic_store_explicit(&t->routes_block_size, RT_INITIAL_ROUTES_BLOCK_SIZE, memory_order_relaxed);
if (cf->trie_used)
{
struct f_trie *trie = f_new_trie(lp_new_default(p), 0);
trie->ipv4 = net_val_match(t->addr_type, NB_IP4 | NB_VPN4 | NB_ROA4);
atomic_store_explicit(&t->trie, trie, memory_order_relaxed);
}
init_list(&t->imports);
hmap_init(&t->id_map, p, 1024);
hmap_set(&t->id_map, 0);
t->nhu_event = ev_new_init(p, rt_next_hop_update, t);
t->nhu_uncork_event = ev_new_init(p, rt_nhu_uncork, t);
t->prune_timer = tm_new_init(p, rt_prune_timer, t, 0, 0);
t->prune_event = ev_new_init(p, rt_prune_table, t);
t->last_rt_change = t->gc_time = current_time();
t->journal.loop = t->loop;
t->journal.domain = t->lock.rtable;
t->journal.item_size = sizeof(struct rt_pending_export);
t->journal.item_done = rt_cleanup_export;
t->journal.cleanup_done = rt_cleanup_done;
lfjour_init(&t->journal, &cf->export_settle);
t->cork_threshold = cf->cork_threshold;
t->rl_pipe = (struct tbf) TBF_DEFAULT_LOG_LIMITS;
2022-09-07 11:54:20 +00:00
if (rt_is_flow(RT_PUB(t)))
{
t->flowspec_trie = f_new_trie(lp_new_default(p), 0);
t->flowspec_trie->ipv4 = (t->addr_type == NET_FLOW4);
}
UNLOCK_DOMAIN(rtable, dom);
birdloop_leave(t->loop);
2022-09-07 11:54:20 +00:00
return RT_PUB(t);
}
2000-06-01 17:12:19 +00:00
/**
* rt_init - initialize routing tables
*
* This function is called during BIRD startup. It initializes the
* routing table module.
*/
void
rt_init(void)
{
rta_init();
rt_table_pool = rp_new(&root_pool, the_bird_domain.the_bird, "Routing tables");
init_list(&routing_tables);
init_list(&deleted_routing_tables);
ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release");
rt_cork.run = (event) { .hook = rt_cork_release_hook };
idm_init(&rtable_idm, rt_table_pool, 256);
rt_global_netindex_hash = netindex_hash_new(rt_table_pool);
}
static _Bool
rt_prune_net(struct rtable_private *tab, struct network *n)
{
NET_WALK_ROUTES(tab, n, ep, e)
{
ASSERT_DIE(!(e->flags & REF_OBSOLETE));
struct rt_import_hook *s = e->rte.sender;
_Bool stale = (s->import_state == TIS_FLUSHING);
if (!stale)
{
/*
* The range of 0..256 is split by s->stale_* like this:
*
* pruned pruning valid set
* | | | |
* 0 v v v v 256
* |...........................+++++++++++........|
*
* We want to drop everything outside the marked range, thus
* (e->rte.stale_cycle < s->stale_valid) ||
* (e->rte.stale_cycle > s->stale_set))
* looks right.
*
* But the pointers may wrap around, and in the following situation, all the routes get pruned:
*
* set pruned pruning valid
* | | | |
* 0 v v v v 256
* |++++++..................................++++++|
*
* In that case, we want
* (e->rte.stale_cycle > s->stale_valid) ||
* (e->rte.stale_cycle < s->stale_set))
*
* Full logic table:
*
* permutation | result | (S < V) + (S < SC) + (SC < V)
* -----------------+----------+---------------------------------
* SC < V <= S | prune | 0 + 0 + 1 = 1
* S < SC < V | prune | 1 + 1 + 1 = 3
* V <= S < SC | prune | 0 + 1 + 0 = 1
* SC <= S < V | keep | 1 + 0 + 1 = 2
* V <= SC <= S | keep | 0 + 0 + 0 = 0
* S < V <= SC | keep | 1 + 1 + 0 = 2
*
* Now the following code hopefully makes sense.
*/
int sv = (s->stale_set < s->stale_valid);
int ssc = (s->stale_set < e->rte.stale_cycle);
int scv = (e->rte.stale_cycle < s->stale_valid);
stale = (sv + ssc + scv) & 1;
}
/* By the C standard, either the importer is flushing and stale_perm is 1,
* or by the table above, stale_perm is between 0 and 3, where even values
* say "keep" and odd values say "prune". */
if (stale)
{
/* Announce withdrawal */
struct netindex *i = RTE_GET_NETINDEX(&e->rte);
rte_recalculate(tab, e->rte.sender, i, n, NULL, e->rte.src);
return 1;
}
}
return 0;
}
/**
* rt_prune_table - prune a routing table
*
* The prune loop scans routing tables and removes routes belonging to flushing
* protocols, discarded routes and also stale network entries. It is called from
* rt_event(). The event is rescheduled if the current iteration do not finish
* the table. The pruning is directed by the prune state (@prune_state),
* specifying whether the prune cycle is scheduled or running, and there
* is also a persistent pruning iterator (@prune_fit).
*
* The prune loop is used also for channel flushing. For this purpose, the
* channels to flush are marked before the iteration and notified after the
* iteration.
*/
static void
rt_prune_table(void *_tab)
{
RT_LOCK((rtable *) _tab, tab);
int limit = 2000;
struct rt_import_hook *ih;
node *n, *x;
rt_trace(tab, D_STATES, "Pruning");
if (tab->prune_state == 0)
return;
if (tab->prune_state == 1)
{
/* Mark channels to flush */
WALK_LIST2(ih, n, tab->imports, n)
if (ih->import_state == TIS_STOP)
rt_set_import_state(ih, TIS_FLUSHING);
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
else if ((ih->stale_valid != ih->stale_pruning) && (ih->stale_pruning == ih->stale_pruned))
{
ih->stale_pruning = ih->stale_valid;
rt_refresh_trace(tab, ih, "table prune after refresh begin");
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
}
tab->prune_index = 0;
tab->prune_state = 2;
tab->gc_counter = 0;
tab->gc_time = current_time();
if (tab->prune_trie)
{
/* Init prefix trie pruning */
tab->trie_new = f_new_trie(lp_new_default(tab->rp), 0);
tab->trie_new->ipv4 = atomic_load_explicit(&tab->trie, memory_order_relaxed)->ipv4;
}
}
u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_relaxed);
net *routes = atomic_load_explicit(&tab->routes, memory_order_relaxed);
for (; tab->prune_index < bs; tab->prune_index++)
{
net *n = &routes[tab->prune_index];
while ((limit > 0) && rt_prune_net(tab, n))
limit--;
if (limit <= 0)
{
ev_send_loop(tab->loop, tab->prune_event);
return;
}
struct rte_storage *e = NET_BEST_ROUTE(tab, n);
if (tab->trie_new && e)
{
const net_addr *a = e->rte.net;
trie_add_prefix(tab->trie_new, a, a->pxlen, a->pxlen);
limit--;
}
}
rt_trace(tab, D_EVENTS, "Prune done");
lfjour_announce_now(&tab->journal);
/* state change 2->0, 3->1 */
if (tab->prune_state &= 1)
ev_send_loop(tab->loop, tab->prune_event);
struct f_trie *trie = atomic_load_explicit(&tab->trie, memory_order_relaxed);
if (tab->trie_new)
{
/* Finish prefix trie pruning */
atomic_store_explicit(&tab->trie, tab->trie_new, memory_order_release);
tab->trie_new = NULL;
tab->prune_trie = 0;
rt_trace(tab, D_EVENTS, "Trie prune done, new %p, old %p (%s)",
tab->trie_new, trie, tab->trie_lock_count ? "still used" : "freeing");
if (!tab->trie_lock_count)
{
synchronize_rcu();
rfree(trie->lp);
}
else
{
ASSERT(!tab->trie_old);
tab->trie_old = trie;
tab->trie_old_lock_count = tab->trie_lock_count;
tab->trie_lock_count = 0;
}
}
else
{
/* Schedule prefix trie pruning */
if (trie && !tab->trie_old && (trie->prefix_count > (2 * tab->net_count)))
{
/* state change 0->1, 2->3 */
tab->prune_state |= 1;
tab->prune_trie = 1;
}
}
/* Close flushed channels */
WALK_LIST2_DELSAFE(ih, n, x, tab->imports, n)
if (ih->import_state == TIS_FLUSHING)
{
DBG("flushing %s %s rr %u", ih->req->name, tab->name, tab->rr_counter);
ih->flush_seq = tab->journal.next_seq;
rt_set_import_state(ih, TIS_WAITING);
tab->rr_counter--;
tab->wait_counter++;
lfjour_schedule_cleanup(&tab->journal);
}
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
else if (ih->stale_pruning != ih->stale_pruned)
{
tab->rr_counter -= ((int) ih->stale_pruning - (int) ih->stale_pruned);
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
ih->stale_pruned = ih->stale_pruning;
rt_refresh_trace(tab, ih, "table prune after refresh end");
Route refresh in tables uses a stale counter. Until now, we were marking routes as REF_STALE and REF_DISCARD to cleanup old routes after route refresh. This needed a synchronous route table walk at both beginning and the end of route refresh routine, marking the routes by the flags. We avoid these walks by using a stale counter. Every route contains: u8 stale_cycle; Every import hook contains: u8 stale_set; u8 stale_valid; u8 stale_pruned; u8 stale_pruning; In base_state, stale_set == stale_valid == stale_pruned == stale_pruning and all routes' stale_cycle also have the same value. The route refresh looks like follows: + ----------- + --------- + ----------- + ------------- + ------------ + | | stale_set | stale_valid | stale_pruning | stale_pruned | | Base | x | x | x | x | | Begin | x+1 | x | x | x | ... now routes are being inserted with stale_cycle == (x+1) | End | x+1 | x+1 | x | x | ... now table pruning routine is scheduled | Prune begin | x+1 | x+1 | x+1 | x | ... now routes with stale_cycle not between stale_set and stale_valid are deleted | Prune end | x+1 | x+1 | x+1 | x+1 | + ----------- + --------- + ----------- + ------------- + ------------ + The pruning routine is asynchronous and may have high latency in high-load environments. Therefore, multiple route refresh requests may happen before the pruning routine starts, leading to this situation: | Prune begin | x+k | x+k | x -> x+k | x | ... or even | Prune begin | x+k+1 | x+k | x -> x+k | x | ... if the prune event starts while another route refresh is running. In such a case, the pruning routine still deletes routes not fitting between stale_set and and stale_valid, effectively pruning the remnants of all unpruned route refreshes from before: | Prune end | x+k | x+k | x+k | x+k | In extremely rare cases, there may happen too many route refreshes before any route prune routine finishes. If the difference between stale_valid and stale_pruned becomes more than 128 when requesting for another route refresh, the routine walks the table synchronously and resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
}
}
static void
rt_cork_release_hook(void *data UNUSED)
{
do synchronize_rcu();
while (
!atomic_load_explicit(&rt_cork.active, memory_order_acquire) &&
ev_run_list(&rt_cork.queue)
);
}
/**
* rt_lock_trie - lock a prefix trie of a routing table
* @tab: routing table with prefix trie to be locked
*
* The prune loop may rebuild the prefix trie and invalidate f_trie_walk_state
* structures. Therefore, asynchronous walks should lock the prefix trie using
* this function. That allows the prune loop to rebuild the trie, but postpones
* its freeing until all walks are done (unlocked by rt_unlock_trie()).
*
* Return a current trie that will be locked, the value should be passed back to
* rt_unlock_trie() for unlocking.
*
*/
const struct f_trie *
2022-09-07 11:54:20 +00:00
rt_lock_trie(struct rtable_private *tab)
{
const struct f_trie *trie = atomic_load_explicit(&tab->trie, memory_order_relaxed);
ASSERT(trie);
tab->trie_lock_count++;
return trie;
}
/**
* rt_unlock_trie - unlock a prefix trie of a routing table
* @tab: routing table with prefix trie to be locked
* @trie: value returned by matching rt_lock_trie()
*
* Done for trie locked by rt_lock_trie() after walk over the trie is done.
* It may free the trie and schedule next trie pruning.
*/
void
rt_unlock_trie(struct rtable_private *tab, const struct f_trie *trie)
{
ASSERT(trie);
const struct f_trie *tab_trie = atomic_load_explicit(&tab->trie, memory_order_relaxed);
if (trie == tab_trie)
{
/* Unlock the current prefix trie */
ASSERT(tab->trie_lock_count);
tab->trie_lock_count--;
}
else if (trie == tab->trie_old)
{
/* Unlock the old prefix trie */
ASSERT(tab->trie_old_lock_count);
tab->trie_old_lock_count--;
/* Free old prefix trie that is no longer needed */
if (!tab->trie_old_lock_count)
{
rfree(tab->trie_old->lp);
tab->trie_old = NULL;
/* Kick prefix trie pruning that was postponed */
if (tab_trie && (tab_trie->prefix_count > (2 * tab->net_count)))
{
tab->prune_trie = 1;
rt_kick_prune_timer(tab);
}
}
}
else
log(L_BUG "Invalid arg to rt_unlock_trie()");
}
void
rt_preconfig(struct config *c)
{
init_list(&c->tables);
c->def_tables[NET_IP4] = cf_define_symbol(c, cf_get_symbol(c, "master4"), SYM_TABLE, table, NULL);
c->def_tables[NET_IP6] = cf_define_symbol(c, cf_get_symbol(c, "master6"), SYM_TABLE, table, NULL);
}
void
rt_postconfig(struct config *c)
{
uint num_tables = list_length(&c->tables);
btime def_gc_period = 400 MS * num_tables;
def_gc_period = MAX(def_gc_period, 10 S);
def_gc_period = MIN(def_gc_period, 600 S);
struct rtable_config *rc;
WALK_LIST(rc, c->tables)
if (rc->gc_period == (uint) -1)
rc->gc_period = (uint) def_gc_period;
for (uint net_type = 0; net_type < NET_MAX; net_type++)
if (c->def_tables[net_type] && !c->def_tables[net_type]->table)
{
c->def_tables[net_type]->class = SYM_VOID;
c->def_tables[net_type] = NULL;
}
}
/*
* Some functions for handing internal next hop updates
* triggered by rt_schedule_nhu().
*/
void
2022-09-07 11:54:20 +00:00
ea_set_hostentry(ea_list **to, rtable *dep, rtable *src, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum])
{
struct {
struct hostentry_adata head;
2024-02-22 10:38:13 +00:00
u32 label_space[];
} *h;
u32 sz = sizeof *h + lnum * sizeof(u32);
h = alloca(sz);
memset(h, 0, sz);
2022-05-15 13:53:35 +00:00
2022-09-07 11:54:20 +00:00
RT_LOCKED(src, tab)
2024-02-22 10:38:13 +00:00
h->head.he = rt_get_hostentry(tab, gw, ll, dep);
2024-02-22 10:38:13 +00:00
memcpy(h->head.labels, labels, lnum * sizeof(u32));
2022-05-15 13:53:35 +00:00
2024-02-22 10:38:13 +00:00
ea_set_attr_data(to, &ea_gen_hostentry, 0, h->head.ad.data, (byte *) &h->head.labels[lnum] - h->head.ad.data);
2022-05-15 13:53:35 +00:00
}
static void
rta_apply_hostentry(ea_list **to, struct hostentry_adata *head)
{
2022-05-15 13:53:35 +00:00
u32 *labels = head->labels;
u32 lnum = (u32 *) (head->ad.data + head->ad.length) - labels;
struct hostentry *he = head->he;
2022-05-15 13:53:35 +00:00
rcu_read_lock();
u32 version = atomic_load_explicit(&he->version, memory_order_acquire);
while (1)
{
if (version & 1)
{
rcu_read_unlock();
birdloop_yield();
rcu_read_lock();
version = atomic_load_explicit(&he->version, memory_order_acquire);
continue;
}
ea_set_attr_u32(to, &ea_gen_igp_metric, 0, he->igp_metric);
if (!he->src)
{
ea_set_dest(to, 0, RTD_UNREACHABLE);
break;
}
eattr *he_nh_ea = ea_find(he->src, &ea_gen_nexthop);
ASSERT_DIE(he_nh_ea);
struct nexthop_adata *nhad = (struct nexthop_adata *) he_nh_ea->u.ptr;
int idest = nhea_dest(he_nh_ea);
if ((idest != RTD_UNICAST) ||
!lnum && he->nexthop_linkable)
{
/* Just link the nexthop chain, no label append happens. */
ea_copy_attr(to, he->src, &ea_gen_nexthop);
break;
}
2017-02-24 13:05:11 +00:00
uint total_size = OFFSETOF(struct nexthop_adata, nh);
NEXTHOP_WALK(nh, nhad)
{
if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
{
log(L_WARN "Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)",
nh->labels, lnum, nh->labels + lnum, MPLS_MAX_LABEL_STACK);
continue;
}
total_size += NEXTHOP_SIZE_CNT(nh->labels + lnum);
}
if (total_size == OFFSETOF(struct nexthop_adata, nh))
{
log(L_WARN "No valid nexthop remaining, setting route unreachable");
struct nexthop_adata nha = {
.ad.length = NEXTHOP_DEST_SIZE,
.dest = RTD_UNREACHABLE,
};
ea_set_attr_data(to, &ea_gen_nexthop, 0, &nha.ad.data, nha.ad.length);
break;
}
struct nexthop_adata *new = (struct nexthop_adata *) tmp_alloc_adata(total_size);
struct nexthop *dest = &new->nh;
NEXTHOP_WALK(nh, nhad)
{
if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
continue;
2017-02-24 13:05:11 +00:00
memcpy(dest, nh, NEXTHOP_SIZE(nh));
if (lnum)
{
memcpy(&(dest->label[dest->labels]), labels, lnum * sizeof labels[0]);
dest->labels += lnum;
}
if (ipa_nonzero(nh->gw))
/* Router nexthop */
dest->flags = (dest->flags & RNF_ONLINK);
else if (!(nh->iface->flags & IF_MULTIACCESS) || (nh->iface->flags & IF_LOOPBACK))
dest->gw = IPA_NONE; /* PtP link - no need for nexthop */
else if (ipa_nonzero(he->link))
dest->gw = he->link; /* Device nexthop with link-local address known */
else
dest->gw = he->addr; /* Device nexthop with link-local address unknown */
dest = NEXTHOP_NEXT(dest);
}
/* Fix final length */
new->ad.length = (void *) dest - (void *) new->ad.data;
ea_set_attr(to, EA_LITERAL_DIRECT_ADATA(
&ea_gen_nexthop, 0, &new->ad));
/* Has the HE version changed? */
u32 end_version = atomic_load_explicit(&he->version, memory_order_acquire);
/* Stayed stable, we can finalize the route */
if (end_version == version)
break;
/* No, retry once again */
version = end_version;
}
rcu_read_unlock();
ea_set_attr_u32(to, &ea_gen_hostentry_version, 0, version);
}
static inline int
2023-07-03 18:38:24 +00:00
rt_next_hop_update_rte(const rte *old, rte *new)
{
eattr *hev = ea_find(old->attrs, &ea_gen_hostentry_version);
if (!hev)
return 0;
u32 last_version = hev->u.data;
eattr *heea = ea_find(old->attrs, &ea_gen_hostentry);
ASSERT_DIE(heea);
struct hostentry_adata *head = (struct hostentry_adata *) heea->u.ptr;
u32 current_version = atomic_load_explicit(&head->he->version, memory_order_acquire);
if (current_version == last_version)
return 0;
*new = *old;
new->attrs = ea_strip_to(new->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED));
rta_apply_hostentry(&new->attrs, head);
return 1;
}
static inline void
rt_next_hop_resolve_rte(rte *r)
{
eattr *heea = ea_find(r->attrs, &ea_gen_hostentry);
if (!heea)
return;
rta_apply_hostentry(&r->attrs, (struct hostentry_adata *) heea->u.ptr);
}
#ifdef CONFIG_BGP
static inline int
net_flow_has_dst_prefix(const net_addr *n)
{
ASSUME(net_is_flow(n));
if (n->pxlen)
return 1;
if (n->type == NET_FLOW4)
{
const net_addr_flow4 *n4 = (void *) n;
return (n4->length > sizeof(net_addr_flow4)) && (n4->data[0] == FLOW_TYPE_DST_PREFIX);
}
else
{
const net_addr_flow6 *n6 = (void *) n;
return (n6->length > sizeof(net_addr_flow6)) && (n6->data[0] == FLOW_TYPE_DST_PREFIX);
}
}
static inline int
rta_as_path_is_empty(ea_list *a)
{
eattr *e = ea_find(a, "bgp_path");
return !e || (as_path_getlen(e->u.ptr) == 0);
}
static inline u32
rta_get_first_asn(ea_list *a)
{
eattr *e = ea_find(a, "bgp_path");
u32 asn;
return (e && as_path_get_first_regular(e->u.ptr, &asn)) ? asn : 0;
}
static inline enum flowspec_valid
rt_flowspec_check(rtable *tab_ip, struct rtable_private *tab_flow, const net_addr *n, ea_list *a, int interior)
{
ASSERT(rt_is_ip(tab_ip));
ASSERT(rt_is_flow(RT_PUB(tab_flow)));
/* RFC 8955 6. a) Flowspec has defined dst prefix */
if (!net_flow_has_dst_prefix(n))
return FLOWSPEC_INVALID;
/* RFC 9117 4.1. Accept AS_PATH is empty (fr */
if (interior && rta_as_path_is_empty(a))
return FLOWSPEC_VALID;
/* RFC 8955 6. b) Flowspec and its best-match route have the same originator */
/* Find flowspec dst prefix */
net_addr dst;
if (n->type == NET_FLOW4)
net_fill_ip4(&dst, net4_prefix(n), net4_pxlen(n));
else
net_fill_ip6(&dst, net6_prefix(n), net6_pxlen(n));
2022-09-07 11:54:20 +00:00
rte rb = {};
RT_READ(tab_ip, tip);
const struct f_trie *ip_trie = atomic_load_explicit(&tip->t->trie, memory_order_relaxed);
ASSERT_DIE(ip_trie);
/* Find best-match BGP unicast route for flowspec dst prefix */
net *nb = net_route(tip, &dst);
if (nb)
rb = RTE_COPY_VALID(RTE_OR_NULL(NET_READ_BEST_ROUTE(tip, nb)));
/* Register prefix to trie for tracking further changes */
int max_pxlen = (n->type == NET_FLOW4) ? IP4_MAX_PREFIX_LENGTH : IP6_MAX_PREFIX_LENGTH;
trie_add_prefix(tab_flow->flowspec_trie, &dst, (rb.net ? rb.net->pxlen : 0), max_pxlen);
/* No best-match BGP route -> no flowspec */
2022-09-07 11:54:20 +00:00
if (!rb.attrs || (rt_get_source_attr(&rb) != RTS_BGP))
return FLOWSPEC_INVALID;
/* Find ORIGINATOR_ID values */
u32 orig_a = ea_get_int(a, "bgp_originator_id", 0);
2022-09-07 11:54:20 +00:00
u32 orig_b = ea_get_int(rb.attrs, "bgp_originator_id", 0);
/* Originator is either ORIGINATOR_ID (if present), or BGP neighbor address (if not) */
if ((orig_a != orig_b) || (!orig_a && !orig_b && !ipa_equal(
ea_get_ip(a, &ea_gen_from, IPA_NONE),
2022-09-07 11:54:20 +00:00
ea_get_ip(rb.attrs, &ea_gen_from, IPA_NONE)
)))
return FLOWSPEC_INVALID;
/* Find ASN of the best-match route, for use in next checks */
2022-09-07 11:54:20 +00:00
u32 asn_b = rta_get_first_asn(rb.attrs);
if (!asn_b)
return FLOWSPEC_INVALID;
/* RFC 9117 4.2. For EBGP, flowspec and its best-match route are from the same AS */
if (!interior && (rta_get_first_asn(a) != asn_b))
return FLOWSPEC_INVALID;
/* RFC 8955 6. c) More-specific routes are from the same AS as the best-match route */
NH_LOCK(tip->t->netindex, nh);
TRIE_WALK(ip_trie, subnet, &dst)
{
net *nc = net_find_valid(tip, nh, &subnet);
if (!nc)
continue;
struct rte_storage *rs = NET_READ_BEST_ROUTE(tip, nc);
const rte *rc = &rs->rte;
if (rt_get_source_attr(rc) != RTS_BGP)
return FLOWSPEC_INVALID;
if (rta_get_first_asn(rc->attrs) != asn_b)
return FLOWSPEC_INVALID;
}
TRIE_WALK_END;
return FLOWSPEC_VALID;
}
#endif /* CONFIG_BGP */
static int
rt_flowspec_update_rte(struct rtable_private *tab, const rte *r, rte *new)
{
#ifdef CONFIG_BGP
if (r->generation || (rt_get_source_attr(r) != RTS_BGP))
return 0;
struct bgp_channel *bc = (struct bgp_channel *) SKIP_BACK(struct channel, in_req, r->sender->req);
if (!bc->base_table)
return 0;
SKIP_BACK_DECLARE(struct bgp_proto, p, p, bc->c.proto);
enum flowspec_valid old = rt_get_flowspec_valid(r),
valid = rt_flowspec_check(bc->base_table, tab, r->net, r->attrs, p->is_interior);
if (old == valid)
return 0;
*new = *r;
new->attrs = ea_strip_to(new->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED));
ea_set_attr_u32(&new->attrs, &ea_gen_flowspec_valid, 0, valid);
return 1;
#else
return 0;
#endif
}
static inline void
rt_flowspec_resolve_rte(rte *r, struct channel *c)
{
#ifdef CONFIG_BGP
enum flowspec_valid valid, old = rt_get_flowspec_valid(r);
struct bgp_channel *bc = (struct bgp_channel *) c;
if ( (rt_get_source_attr(r) == RTS_BGP)
&& (c->class == &channel_bgp)
&& (bc->base_table))
{
SKIP_BACK_DECLARE(struct bgp_proto, p, p, bc->c.proto);
RT_LOCKED(c->in_req.hook->table, tab)
valid = rt_flowspec_check(
bc->base_table, tab,
r->net, r->attrs, p->is_interior);
}
else
valid = FLOWSPEC_UNKNOWN;
if (valid == old)
return;
if (valid == FLOWSPEC_UNKNOWN)
ea_unset_attr(&r->attrs, 0, &ea_gen_flowspec_valid);
else
ea_set_attr_u32(&r->attrs, &ea_gen_flowspec_valid, 0, valid);
#endif
}
static inline int
rt_next_hop_update_net(struct rtable_private *tab, struct netindex *ni, net *n)
{
uint count = 0;
int is_flow = net_val_match(tab->addr_type, NB_FLOW);
struct rte_storage *old_best = NET_BEST_ROUTE(tab, n);
if (!old_best)
return 0;
NET_WALK_ROUTES(tab, n, ep, e)
count++;
if (!count)
return 0;
struct rte_multiupdate {
struct rte_storage *old, *new_stored;
rte new;
} *updates = tmp_allocz(sizeof(struct rte_multiupdate) * (count+1));
uint pos = 0;
NET_WALK_ROUTES(tab, n, ep, e)
updates[pos++].old = e;
uint mod = 0;
if (is_flow)
for (uint i = 0; i < pos; i++)
mod += rt_flowspec_update_rte(tab, &updates[i].old->rte, &updates[i].new);
else
for (uint i = 0; i < pos; i++)
mod += rt_next_hop_update_rte(&updates[i].old->rte, &updates[i].new);
if (!mod)
return 0;
/* We add a spinlock sentinel to the beginning */
struct rte_storage local_sentinel = {
.flags = REF_OBSOLETE,
.next = old_best,
};
atomic_store_explicit(&n->routes, &local_sentinel, memory_order_release);
/* Now we mark all the old routes obsolete */
for (uint i = 0; i < pos; i++)
if (updates[i].new.attrs)
updates[i].old->flags |= REF_OBSOLETE;
/* Wait for readers */
synchronize_rcu();
/* And now we go backwards to keep the list properly linked */
struct rte_storage *next = NULL;
for (int i = pos - 1; i >= 0; i--)
{
struct rte_storage *this;
2022-09-07 11:54:20 +00:00
if (updates[i].new.attrs)
{
rte *new = &updates[i].new;
new->lastmod = current_time();
new->id = hmap_first_zero(&tab->id_map);
hmap_set(&tab->id_map, new->id);
this = updates[i].new_stored = rte_store(new, ni, tab);
}
2022-09-07 11:54:20 +00:00
else
this = updates[i].old;
2022-09-07 11:54:20 +00:00
atomic_store_explicit(&this->next, next, memory_order_release);
next = this;
}
/* Add behind the sentinel */
atomic_store_explicit(&local_sentinel.next, next, memory_order_release);
2022-09-07 11:54:20 +00:00
/* Call the pre-comparison hooks */
for (uint i = 0; i < pos; i++)
if (updates[i].new_stored)
{
/* Not really an efficient way to compute this */
if (updates[i].old->rte.src->owner->rte_recalculate)
2023-07-03 18:38:24 +00:00
updates[i].old->rte.src->owner->rte_recalculate(tab, n, updates[i].new_stored, updates[i].old, old_best);
2022-09-07 11:54:20 +00:00
}
/* Find the new best route */
uint best_pos = 0;
struct rte_storage *new_best = updates[0].new_stored ?: updates[0].old;
for (uint i = 1; i < pos; i++)
{
struct rte_storage *s = updates[i].new_stored ?: updates[i].old;
if (rte_better(&s->rte, &new_best->rte))
{
best_pos = i;
new_best = s;
}
}
/* Relink the new best route to the first position */
struct rte_storage * _Atomic *best_prev;
if (best_pos)
best_prev = &(updates[best_pos-1].new_stored ?: updates[best_pos-1].old)->next;
else
best_prev = &local_sentinel.next;
/* Unlink from the original place */
atomic_store_explicit(best_prev,
atomic_load_explicit(&new_best->next, memory_order_relaxed),
memory_order_release);
/* Link out */
atomic_store_explicit(&new_best->next,
atomic_load_explicit(&local_sentinel.next, memory_order_relaxed),
memory_order_release);
/* Now we have to announce the routes the right way, to not cause any
* strange problems with consistency. */
ASSERT_DIE(updates[0].old == old_best);
/* Find new best route original position */
uint nbpos = ~0;
for (uint i=0; i<count; i++)
if ((updates[i].new_stored == new_best) || (updates[i].old == new_best))
{
nbpos = i;
break;
}
ASSERT_DIE(~nbpos);
const char *best_indicator[2][2] = {
{ "autoupdated", "autoupdated [-best]" },
{ "autoupdated [+best]", "autoupdated [best]" }
};
/* Best both updated and promoted: announce it first */
if (nbpos && updates[nbpos].new_stored)
{
rt_rte_trace_in(D_ROUTES, updates[nbpos].new.sender->req, &updates[nbpos].new,
best_indicator[1][0]);
rte_announce(tab, ni, n,
&updates[nbpos].new_stored->rte, &updates[nbpos].old->rte,
&new_best->rte, &old_best->rte);
}
else
nbpos = 0;
uint total = 0;
/* Announce the changes */
for (uint i=0; i<count; i++)
{
/* Not changed at all */
if (!updates[i].new_stored)
continue;
/* Already announced */
if (nbpos && (i == nbpos))
continue;
_Bool nb = (new_best->rte.src == updates[i].new.src), ob = (i == 0);
rt_rte_trace_in(D_ROUTES, updates[i].new.sender->req, &updates[i].new, best_indicator[nb][ob]);
rte_announce(tab, ni, n,
&updates[i].new_stored->rte, &updates[i].old->rte,
&new_best->rte, (!nbpos && !i) ? &old_best->rte : &new_best->rte);
total++;
}
/* Now we can finally release the changes back into the table */
atomic_store_explicit(&n->routes, new_best, memory_order_release);
return total;
}
static void
rt_nhu_uncork(void *_tab)
{
2022-09-07 11:54:20 +00:00
RT_LOCKED((rtable *) _tab, tab)
{
ASSERT_DIE(tab->nhu_corked);
2022-09-07 11:12:44 +00:00
ASSERT_DIE(tab->nhu_state == 0);
/* Reset the state */
2022-09-07 11:12:44 +00:00
tab->nhu_state = tab->nhu_corked;
tab->nhu_corked = 0;
rt_trace(tab, D_STATES, "Next hop updater uncorked");
ev_send_loop(tab->loop, tab->nhu_event);
2022-09-07 11:12:44 +00:00
}
}
static void
rt_next_hop_update(void *_tab)
{
RT_LOCK((rtable *) _tab, tab);
ASSERT_DIE(birdloop_inside(tab->loop));
if (tab->nhu_corked)
return;
2022-09-07 11:12:44 +00:00
if (!tab->nhu_state)
return;
2022-09-07 11:12:44 +00:00
/* Check corkedness */
if (rt_cork_check(tab->nhu_uncork_event))
2022-09-07 11:12:44 +00:00
{
rt_trace(tab, D_STATES, "Next hop updater corked");
if (tab->nhu_state & NHU_RUNNING)
lfjour_announce_now(&tab->journal);
2022-09-07 11:12:44 +00:00
tab->nhu_corked = tab->nhu_state;
tab->nhu_state = 0;
return;
2022-09-07 11:12:44 +00:00
}
int max_feed = 32;
2022-09-07 11:12:44 +00:00
/* Initialize a new run */
if (tab->nhu_state == NHU_SCHEDULED)
2022-09-07 11:12:44 +00:00
{
tab->nhu_index = 0;
2022-09-07 11:12:44 +00:00
tab->nhu_state = NHU_RUNNING;
2022-09-07 11:12:44 +00:00
if (tab->flowspec_trie)
rt_flowspec_reset_trie(tab);
}
2022-09-07 11:12:44 +00:00
/* Walk the fib one net after another */
u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_relaxed);
net *routes = atomic_load_explicit(&tab->routes, memory_order_relaxed);
for (; tab->nhu_index < bs; tab->nhu_index++)
{
net *n = &routes[tab->nhu_index];
struct rte_storage *s = NET_BEST_ROUTE(tab, n);
if (!s)
continue;
if (max_feed <= 0)
{
ev_send_loop(tab->loop, tab->nhu_event);
return;
}
TMP_SAVED
max_feed -= rt_next_hop_update_net(tab, RTE_GET_NETINDEX(&s->rte), n);
}
2022-09-07 11:12:44 +00:00
/* Finished NHU, cleanup */
rt_trace(tab, D_EVENTS, "NHU done, scheduling export timer");
/* State change:
* NHU_DIRTY -> NHU_SCHEDULED
* NHU_RUNNING -> NHU_CLEAN
*/
2022-09-07 11:12:44 +00:00
if ((tab->nhu_state &= NHU_SCHEDULED) == NHU_SCHEDULED)
ev_send_loop(tab->loop, tab->nhu_event);
}
void
rt_new_default_table(struct symbol *s)
{
for (uint addr_type = 0; addr_type < NET_MAX; addr_type++)
if (s == new_config->def_tables[addr_type])
{
ASSERT_DIE(!s->table);
s->table = rt_new_table(s, addr_type);
return;
}
bug("Requested an unknown new default table: %s", s->name);
}
struct rtable_config *
rt_get_default_table(struct config *cf, uint addr_type)
{
struct symbol *ts = cf->def_tables[addr_type];
if (!ts)
return NULL;
if (!ts->table)
rt_new_default_table(ts);
return ts->table;
}
struct rtable_config *
rt_new_table(struct symbol *s, uint addr_type)
{
if (s->table)
cf_error("Duplicate configuration of table %s", s->name);
struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
if (s == new_config->def_tables[addr_type])
s->table = c;
else
cf_define_symbol(new_config, s, SYM_TABLE, table, c);
c->name = s->name;
c->addr_type = addr_type;
c->gc_threshold = 1000;
c->gc_period = (uint) -1; /* set in rt_postconfig() */
c->cork_threshold.low = 1024;
c->cork_threshold.high = 8192;
c->export_settle = (struct settle_config) {
.min = 1 MS,
.max = 100 MS,
};
c->export_rr_settle = (struct settle_config) {
.min = 100 MS,
.max = 3 S,
};
c->debug = new_config->table_default_debug;
add_tail(&new_config->tables, &c->n);
/* First table of each type is kept as default */
if (! new_config->def_tables[addr_type])
new_config->def_tables[addr_type] = s;
return c;
}
2000-06-01 17:12:19 +00:00
/**
* rt_lock_table - lock a routing table
* @r: routing table to be locked
*
* Lock a routing table, because it's in use by a protocol,
* preventing it from being freed when it gets undefined in a new
* configuration.
*/
2022-09-07 11:54:20 +00:00
void
rt_lock_table_priv(struct rtable_private *r, const char *file, uint line)
{
2022-09-07 11:54:20 +00:00
rt_trace(r, D_STATES, "Locked at %s:%d", file, line);
r->use_count++;
}
2000-06-01 17:12:19 +00:00
/**
* rt_unlock_table - unlock a routing table
* @r: routing table to be unlocked
*
* Unlock a routing table formerly locked by rt_lock_table(),
* that is decrease its use count and delete it if it's scheduled
* for deletion by configuration changes.
*/
2022-09-07 11:54:20 +00:00
void
rt_unlock_table_priv(struct rtable_private *r, const char *file, uint line)
{
2022-09-07 13:06:22 +00:00
rt_trace(r, D_STATES, "Unlocked at %s:%d", file, line);
if (!--r->use_count && r->deleted)
/* Stop the service thread to finish this up */
ev_send(&global_event_list, ev_new_init(r->rp, rt_shutdown, r));
}
static void
rt_shutdown(void *tab_)
{
struct rtable_private *r = tab_;
birdloop_stop(r->loop, rt_delete, r);
2022-09-07 11:54:20 +00:00
}
2022-09-07 11:54:20 +00:00
static void
rt_delete(void *tab_)
{
ASSERT_DIE(birdloop_inside(&main_birdloop));
2022-09-07 11:54:20 +00:00
/* We assume that nobody holds the table reference now as use_count is zero.
* Anyway the last holder may still hold the lock. Therefore we lock and
* unlock it the last time to be sure that nobody is there. */
struct rtable_private *tab = RT_LOCK_SIMPLE((rtable *) tab_);
2022-09-07 11:54:20 +00:00
struct config *conf = tab->deleted;
DOMAIN(rtable) dom = tab->lock;
2022-09-07 11:54:20 +00:00
RT_UNLOCK_SIMPLE(RT_PUB(tab));
2022-09-07 11:54:20 +00:00
/* Everything is freed by freeing the loop */
birdloop_free(tab->loop);
2022-09-07 11:54:20 +00:00
config_del_obstacle(conf);
/* Also drop the domain */
DOMAIN_FREE(rtable, dom);
}
2022-09-07 11:54:20 +00:00
static void
2022-09-07 11:54:20 +00:00
rt_check_cork_low(struct rtable_private *tab)
{
if (!tab->cork_active)
return;
if (tab->deleted || (lfjour_pending_items(&tab->journal) < tab->cork_threshold.low))
{
tab->cork_active = 0;
rt_cork_release();
rt_trace(tab, D_STATES, "Uncorked");
}
}
static void
2022-09-07 11:54:20 +00:00
rt_check_cork_high(struct rtable_private *tab)
{
if (!tab->deleted && !tab->cork_active && (lfjour_pending_items(&tab->journal) >= tab->cork_threshold.high))
{
tab->cork_active = 1;
rt_cork_acquire();
lfjour_schedule_cleanup(&tab->journal);
// rt_export_used(&tab->exporter, tab->name, "corked");
rt_trace(tab, D_STATES, "Corked");
}
}
static int
2022-09-07 11:54:20 +00:00
rt_reconfigure(struct rtable_private *tab, struct rtable_config *new, struct rtable_config *old)
{
if ((new->addr_type != old->addr_type) ||
(new->sorted != old->sorted) ||
(new->trie_used != old->trie_used))
return 0;
DBG("\t%s: same\n", new->name);
2022-09-07 11:54:20 +00:00
new->table = RT_PUB(tab);
tab->name = new->name;
tab->config = new;
tab->debug = new->debug;
if (tab->hostcache)
tab->hostcache->req.trace_routes = new->debug;
WALK_TLIST(rt_flowspec_link, ln, &tab->flowspec_links)
ln->req.trace_routes = new->debug;
tab->cork_threshold = new->cork_threshold;
if (new->cork_threshold.high != old->cork_threshold.high)
rt_check_cork_high(tab);
if (new->cork_threshold.low != old->cork_threshold.low)
rt_check_cork_low(tab);
return 1;
}
static struct rtable_config *
rt_find_table_config(struct config *cf, char *name)
{
struct symbol *sym = cf_find_symbol(cf, name);
return (sym && (sym->class == SYM_TABLE)) ? sym->table : NULL;
}
2000-06-01 17:12:19 +00:00
/**
* rt_commit - commit new routing table configuration
* @new: new configuration
* @old: original configuration or %NULL if it's boot time config
*
* Scan differences between @old and @new configuration and modify
* the routing tables according to these changes. If @new defines a
* previously unknown table, create it, if it omits a table existing
* in @old, schedule it for deletion (it gets deleted when all protocols
* disconnect from it by calling rt_unlock_table()), if it exists
* in both configurations, leave it unchanged.
*/
void
rt_commit(struct config *new, struct config *old)
{
struct rtable_config *o, *r;
DBG("rt_commit:\n");
if (old)
{
WALK_LIST(o, old->tables)
{
_Bool ok;
RT_LOCKED(o->table, tab)
{
r = tab->deleted ? NULL : rt_find_table_config(new, o->name);
ok = r && !new->shutdown && rt_reconfigure(tab, r, o);
}
if (ok)
continue;
birdloop_enter(o->table->loop);
RT_LOCKED(o->table, tab)
{
DBG("\t%s: deleted\n", o->name);
tab->deleted = old;
config_add_obstacle(old);
rt_lock_table(tab);
rt_check_cork_low(tab);
2022-09-07 11:54:20 +00:00
if (tab->hcu_event)
{
if (ev_get_list(tab->hcu_event) == &rt_cork.queue)
ev_postpone(tab->hcu_event);
rt_stop_export_locked(tab, tab->hostcache->req.hook);
}
rt_unlock_table(tab);
}
birdloop_leave(o->table->loop);
}
}
WALK_LIST(r, new->tables)
if (!r->table)
{
r->table = rt_setup(rt_table_pool, r);
DBG("\t%s: created\n", r->name);
add_tail(&routing_tables, &r->table->n);
}
DBG("\tdone\n");
}
static void
rt_feed_done(struct rt_export_hook *c)
{
c->event->hook = rt_export_hook;
rt_set_export_state(c, BIT32_ALL(TES_FEEDING), TES_READY);
rt_send_export_event(c);
}
static enum {
RT_FEED_OK = 0,
RT_FEED_EMPTY = 1,
RT_FEED_OVERFLOW = 2,
RT_FEED_REJECTED = 3,
}
rt_feed_index(struct rt_export_hook *h, uint index)
{
struct rt_export_request *req = h->req;
const net_addr *a;
uint cnt;
const rte **feed;
struct rt_pending_export *first, *last;
{
RT_READ(h->tab, tr);
/* Get the route block from the table */
net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
u32 bs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
/* Nothing to actually feed */
if (index >= bs)
return RT_FEED_OVERFLOW;
/* Validate the network structure */
net *n = &routes[index];
struct rte_storage *s = NET_READ_BEST_ROUTE(tr, n);
last = atomic_load_explicit(&n->last, memory_order_acquire);
if (s)
a = s->rte.net;
else if (!last)
return RT_FEED_EMPTY;
else if (last->old)
a = last->old->net;
else
RT_READ_RETRY(tr);
/* Prefilter based on net_addr */
if (!rt_prefilter_net(&req->prefilter, a))
{
req_trace(req, D_ROUTES, "Feeding %N rejected by prefilter", a);
return RT_FEED_REJECTED;
}
/* Obtain the actual feed */
cnt = rte_feed_count(tr, n);
if (cnt)
{
feed = alloca(cnt * sizeof *feed);
rte_feed_obtain(tr, n, feed, cnt);
}
/* Check journal pointers; retry if changed */
first = atomic_load_explicit(&n->first, memory_order_acquire);
if (last != atomic_load_explicit(&n->last, memory_order_acquire))
RT_READ_RETRY(tr);
}
if (cnt)
{
if (req->export_bulk)
{
/* Call export_bulk preferably */
req->export_bulk(req, a, first, last, feed, cnt);
return RT_FEED_OK;
}
else
{
/* This by definition exports best only, yes, it's stupid, i know */
struct rt_pending_export rpe = { .new = feed[0], .new_best = feed[0], };
req->export_one(req, a, &rpe);
}
}
/* Unless export_bulk was called, the exporters didn't have enough
* information about seen journal items */
if (req->mark_seen)
RPE_WALK(first, rpe, NULL)
{
req->mark_seen(req, rpe);
if (rpe == last) break;
}
else
rpe_mark_seen_all(h, first, NULL, NULL);
return RT_FEED_OK;
}
2000-06-01 17:12:19 +00:00
/**
* rt_feed_by_fib - advertise all routes to a channel by walking a fib
* @c: channel to be fed
2000-06-01 17:12:19 +00:00
*
* This function performs one pass of advertisement of routes to a channel that
* is in the ES_FEEDING state. It is called by the protocol code as long as it
* has something to do. (We avoid transferring all the routes in single pass in
* order not to monopolize CPU time.)
2000-06-01 17:12:19 +00:00
*/
static void
rt_feed_by_fib(void *data)
2000-05-19 10:46:26 +00:00
{
struct rt_export_hook *c = data;
ASSERT(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
2000-05-19 10:46:26 +00:00
uint count = 0;
2022-09-07 11:54:20 +00:00
for (; (&main_birdloop == this_birdloop) ?
(count < 4096) :
task_still_in_limit();
c->feed_index++)
2022-09-07 11:54:20 +00:00
{
switch (rt_feed_index(c, c->feed_index))
{
case RT_FEED_REJECTED:
case RT_FEED_EMPTY:
break;
case RT_FEED_OK:
count++;
break;
case RT_FEED_OVERFLOW:
rt_feed_done(c);
return;
}
2022-09-07 11:54:20 +00:00
}
rt_send_export_event(c);
}
static void
rt_feed_equal(void *data)
{
struct rt_export_hook *c = data;
ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
ASSERT_DIE(c->req->prefilter.mode == TE_ADDR_EQUAL);
struct netindex *ni = net_find_index(c->tab->netindex, c->req->prefilter.addr);
if (ni)
rt_feed_index(c, ni->index);
rt_feed_done(c);
}
static void
rt_feed_for(void *data)
{
struct rt_export_hook *c = data;
ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
ASSERT_DIE(c->req->prefilter.mode == TE_ADDR_FOR);
u32 index;
{
RT_READ(c->tab, tr);
net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
net *n = net_route(tr, c->req->prefilter.addr);
if (!n)
{
rt_feed_done(c);
return;
}
index = (n - routes);
2022-09-07 11:54:20 +00:00
}
rt_feed_index(c, index);
rt_feed_done(c);
}
/*
* Import table
*/
void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net,
struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
for (uint i=0; i<count; i++)
if (feed[i]->sender == c->in_req.hook)
{
/* Strip the table-specific information */
rte new = rte_init_from(feed[i]);
/* Strip the later attribute layers */
new.attrs = ea_strip_to(new.attrs, BIT32_ALL(EALS_PREIMPORT));
/* And reload the route */
rte_update(c, net, &new, new.src);
}
rpe_mark_seen_all(req->hook, first, last, NULL);
}
/*
* Hostcache
*/
2015-12-24 14:52:03 +00:00
static inline u32
hc_hash(ip_addr a, rtable *dep)
{
2015-12-24 14:52:03 +00:00
return ipa_hash(a) ^ ptr_hash(dep);
}
static inline void
hc_insert(struct hostcache *hc, struct hostentry *he)
{
2015-05-19 06:53:34 +00:00
uint k = he->hash_key >> hc->hash_shift;
he->next = hc->hash_table[k];
hc->hash_table[k] = he;
}
static inline void
hc_remove(struct hostcache *hc, struct hostentry *he)
{
struct hostentry **hep;
2015-05-19 06:53:34 +00:00
uint k = he->hash_key >> hc->hash_shift;
for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
*hep = he->next;
}
#define HC_DEF_ORDER 10
#define HC_HI_MARK *4
#define HC_HI_STEP 2
#define HC_HI_ORDER 16 /* Must be at most 16 */
#define HC_LO_MARK /5
#define HC_LO_STEP 2
#define HC_LO_ORDER 10
static void
hc_alloc_table(struct hostcache *hc, pool *p, unsigned order)
{
uint hsize = 1 << order;
hc->hash_order = order;
2015-12-24 14:52:03 +00:00
hc->hash_shift = 32 - order;
hc->hash_max = (order >= HC_HI_ORDER) ? ~0U : (hsize HC_HI_MARK);
hc->hash_min = (order <= HC_LO_ORDER) ? 0U : (hsize HC_LO_MARK);
hc->hash_table = mb_allocz(p, hsize * sizeof(struct hostentry *));
}
static void
hc_resize(struct hostcache *hc, pool *p, unsigned new_order)
{
struct hostentry **old_table = hc->hash_table;
struct hostentry *he, *hen;
uint old_size = 1 << hc->hash_order;
uint i;
hc_alloc_table(hc, p, new_order);
for (i = 0; i < old_size; i++)
for (he = old_table[i]; he != NULL; he=hen)
{
hen = he->next;
hc_insert(hc, he);
}
mb_free(old_table);
}
static struct hostentry *
hc_new_hostentry(struct hostcache *hc, pool *p, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
{
struct hostentry *he = sl_alloc(hc->slab);
2017-02-24 13:05:11 +00:00
*he = (struct hostentry) {
.addr = a,
.link = ll,
.tab = dep,
.hash_key = k,
};
add_tail(&hc->hostentries, &he->ln);
hc_insert(hc, he);
hc->hash_items++;
if (hc->hash_items > hc->hash_max)
hc_resize(hc, p, hc->hash_order + HC_HI_STEP);
return he;
}
static void
hc_delete_hostentry(struct hostcache *hc, pool *p, struct hostentry *he)
{
ea_free(he->src);
2010-12-07 22:33:55 +00:00
rem_node(&he->ln);
hc_remove(hc, he);
sl_free(he);
hc->hash_items--;
if (hc->hash_items < hc->hash_min)
hc_resize(hc, p, hc->hash_order - HC_LO_STEP);
}
static void
hc_notify_dump_req(struct rt_export_request *req)
{
debug(" Table %s (%p)\n", req->name, req);
}
static void
hc_notify_log_state_change(struct rt_export_request *req, u8 state)
{
SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
2024-03-05 13:25:52 +00:00
rt_trace(hc->tab, D_STATES, "HCU Export state changed to %s", rt_export_state_name(state));
}
static void
hc_notify_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
2024-03-05 13:25:52 +00:00
RT_LOCKED(hc->tab, tab)
if (ev_active(tab->hcu_event) || !trie_match_net(hc->trie, net))
{
if (req->trace_routes & D_ROUTES)
log(L_TRACE "%s < boring %N (%u)",
req->name, net, NET_TO_INDEX(net)->index);
2023-05-01 12:20:27 +00:00
/* No interest in this update, mark seen only */
rpe_mark_seen_all(req->hook, first, NULL, NULL);
}
2023-05-01 12:20:27 +00:00
else
{
/* This net may affect some hostentries, check the actual change */
2023-07-03 18:38:24 +00:00
const rte *o = RTE_VALID_OR_NULL(first->old_best);
const rte *new_best = first->new_best;
2023-05-01 12:20:27 +00:00
RPE_WALK(first, rpe, NULL)
{
rpe_mark_seen(req->hook, rpe);
new_best = rpe->new_best;
}
if (req->trace_routes & D_ROUTES)
log(L_TRACE "%s < checking %N (%u)",
req->name, net, NET_TO_INDEX(net)->index);
2023-05-01 12:20:27 +00:00
/* Yes, something has actually changed. Do the hostcache update. */
if ((o != RTE_VALID_OR_NULL(new_best))
&& (atomic_load_explicit(&req->hook->export_state, memory_order_acquire) == TES_READY)
2024-03-05 13:25:52 +00:00
&& !ev_active(tab->hcu_event))
{
if (req->trace_routes & D_EVENTS)
log(L_TRACE "%s requesting HCU");
2024-03-05 13:25:52 +00:00
ev_send_loop(tab->loop, tab->hcu_event);
}
2023-05-01 12:20:27 +00:00
}
}
static void
2022-09-07 11:54:20 +00:00
rt_init_hostcache(struct rtable_private *tab)
{
struct hostcache *hc = mb_allocz(tab->rp, sizeof(struct hostcache));
init_list(&hc->hostentries);
hc->hash_items = 0;
hc_alloc_table(hc, tab->rp, HC_DEF_ORDER);
hc->slab = sl_new(tab->rp, sizeof(struct hostentry));
hc->lp = lp_new(tab->rp);
hc->trie = f_new_trie(hc->lp, 0);
2024-03-05 13:25:52 +00:00
hc->tab = RT_PUB(tab);
2024-03-05 13:25:52 +00:00
tab->hcu_event = ev_new_init(tab->rp, rt_update_hostcache, tab);
tab->hcu_uncork_event = ev_new_init(tab->rp, rt_update_hostcache, tab);
tab->hostcache = hc;
2024-03-05 13:25:52 +00:00
ev_send_loop(tab->loop, tab->hcu_event);
}
static void
2022-09-07 11:54:20 +00:00
rt_free_hostcache(struct rtable_private *tab)
{
struct hostcache *hc = tab->hostcache;
node *n;
WALK_LIST(n, hc->hostentries)
{
SKIP_BACK_DECLARE(struct hostentry, he, ln, n);
ea_free(he->src);
2010-12-07 22:33:55 +00:00
2024-03-05 13:25:52 +00:00
if (!lfuc_finished(&he->uc))
log(L_ERR "Hostcache is not empty in table %s", tab->name);
}
/* Freed automagically by the resource pool
rfree(hc->slab);
rfree(hc->lp);
mb_free(hc->hash_table);
mb_free(hc);
*/
}
static int
if_local_addr(ip_addr a, struct iface *i)
{
struct ifa *b;
WALK_LIST(b, i->addrs)
if (ipa_equal(a, b->ip))
return 1;
return 0;
}
2019-09-28 12:17:20 +00:00
u32
rt_get_igp_metric(const rte *rt)
{
eattr *ea = ea_find(rt->attrs, "igp_metric");
2010-08-02 11:11:53 +00:00
if (ea)
return ea->u.data;
if (rt_get_source_attr(rt) == RTS_DEVICE)
return 0;
if (rt->src->owner->class->rte_igp_metric)
return rt->src->owner->class->rte_igp_metric(rt);
return IGP_METRIC_UNKNOWN;
}
static int
2022-09-07 11:54:20 +00:00
rt_update_hostentry(struct rtable_private *tab, struct hostentry *he)
{
ea_list *old_src = he->src;
int direct = 0;
int pxlen = 0;
/* Signalize work in progress */
ASSERT_DIE((atomic_fetch_add_explicit(&he->version, 1, memory_order_acq_rel) & 1) == 0);
2015-12-24 14:52:03 +00:00
/* Reset the hostentry */
2010-12-07 22:33:55 +00:00
he->src = NULL;
he->nexthop_linkable = 0;
2010-12-07 22:33:55 +00:00
he->igp_metric = 0;
RT_READ_LOCKED(tab, tr);
2015-12-24 14:52:03 +00:00
net_addr he_addr;
net_fill_ip_host(&he_addr, he->addr);
net *n = net_route(tr, &he_addr);
/*
log(L_DEBUG "rt_update_hostentry(%s %p) got net_route(%N) = %p",
tab->name, he, &he_addr, n);
*/
if (n)
{
struct rte_storage *e = NET_BEST_ROUTE(tab, n);
ea_list *a = e->rte.attrs;
u32 pref = rt_get_preference(&e->rte);
NET_WALK_ROUTES(tab, n, ep, ee)
if (rte_is_valid(&ee->rte) &&
(rt_get_preference(&ee->rte) >= pref) &&
ea_find(ee->rte.attrs, &ea_gen_hostentry))
{
/* Recursive route should not depend on another recursive route */
log(L_WARN "Next hop address %I resolvable through recursive route for %N",
he->addr, ee->rte.net);
2010-12-07 22:33:55 +00:00
goto done;
}
2010-12-07 22:33:55 +00:00
pxlen = e->rte.net->pxlen;
eattr *nhea = ea_find(a, &ea_gen_nexthop);
ASSERT_DIE(nhea);
struct nexthop_adata *nhad = (void *) nhea->u.ptr;
if (NEXTHOP_IS_REACHABLE(nhad))
NEXTHOP_WALK(nh, nhad)
if (ipa_zero(nh->gw))
{
if (if_local_addr(he->addr, nh->iface))
{
/* The host address is a local address, this is not valid */
log(L_WARN "Next hop address %I is a local address of iface %s",
he->addr, nh->iface->name);
goto done;
}
direct++;
}
2017-03-08 15:27:18 +00:00
he->src = ea_ref(a);
he->nexthop_linkable = !direct;
he->igp_metric = rt_get_igp_metric(&e->rte);
if ((old_src != he->src) && (tab->debug & D_ROUTES))
if (ipa_zero(he->link))
log(L_TRACE "%s: Hostentry %p for %I in %s resolved via %N (%uG)",
tab->name, he, he->addr, he->tab->name, e->rte.net, e->rte.src->global_id);
else
log(L_TRACE "%s: Hostentry %p for %I %I in %s resolved via %N (%uG)",
tab->name, he, he->addr, he->link, he->tab->name, e->rte.net, e->rte.src->global_id);
}
else if (old_src && (tab->debug & D_ROUTES))
if (ipa_zero(he->link))
log(L_TRACE "%s: Hostentry %p for %I in %s not resolved",
tab->name, he, he->addr, he->tab->name);
else
log(L_TRACE "%s: Hostentry %p for %I %I in %s not resolved",
tab->name, he, he->addr, he->link, he->tab->name);
2017-03-08 15:27:18 +00:00
done:
/* Signalize work done and wait for readers */
ASSERT_DIE((atomic_fetch_add_explicit(&he->version, 1, memory_order_acq_rel) & 1) == 1);
synchronize_rcu();
/* Add a prefix range to the trie */
2015-12-24 14:52:03 +00:00
trie_add_prefix(tab->hostcache->trie, &he_addr, pxlen, he_addr.pxlen);
ea_free(old_src);
2010-12-07 22:33:55 +00:00
return old_src != he->src;
}
static void
rt_update_hostcache(void *data)
{
2022-09-07 11:54:20 +00:00
rtable **nhu_pending;
RT_LOCKED((rtable *) data, tab)
{
struct hostcache *hc = tab->hostcache;
/* Finish initialization */
if (!hc->req.name)
{
hc->req = (struct rt_export_request) {
.name = mb_sprintf(tab->rp, "%s.hcu.notifier", tab->name),
.list = birdloop_event_list(tab->loop),
.pool = birdloop_pool(tab->loop),
.trace_routes = tab->config->debug,
.dump_req = hc_notify_dump_req,
.log_state_change = hc_notify_log_state_change,
.export_one = hc_notify_export_one,
};
rt_table_export_start_locked(tab, &hc->req);
}
/* Shutdown shortcut */
if (!hc->req.hook)
return;
2024-03-05 13:25:52 +00:00
if (rt_cork_check(tab->hcu_uncork_event))
{
rt_trace(tab, D_STATES, "Hostcache update corked");
return;
}
/* Destination schedule map */
2022-09-07 11:54:20 +00:00
nhu_pending = tmp_allocz(sizeof(rtable *) * rtable_max_id);
struct hostentry *he;
node *n, *x;
/* Reset the trie */
lp_flush(hc->lp);
hc->trie = f_new_trie(hc->lp, 0);
WALK_LIST_DELSAFE(n, x, hc->hostentries)
{
he = SKIP_BACK(struct hostentry, ln, n);
2024-03-05 13:25:52 +00:00
if (lfuc_finished(&he->uc))
{
hc_delete_hostentry(hc, tab->rp, he);
continue;
}
if (rt_update_hostentry(tab, he))
nhu_pending[he->tab->id] = he->tab;
}
2022-09-07 11:54:20 +00:00
}
for (uint i=0; i<rtable_max_id; i++)
if (nhu_pending[i])
2022-09-07 11:54:20 +00:00
RT_LOCKED(nhu_pending[i], dst)
rt_schedule_nhu(dst);
}
2022-05-15 13:53:35 +00:00
static struct hostentry *
2022-09-07 11:54:20 +00:00
rt_get_hostentry(struct rtable_private *tab, ip_addr a, ip_addr ll, rtable *dep)
{
ip_addr link = ipa_zero(ll) ? a : ll;
struct hostentry *he;
if (!tab->hostcache)
rt_init_hostcache(tab);
2015-12-24 14:52:03 +00:00
u32 k = hc_hash(a, dep);
struct hostcache *hc = tab->hostcache;
for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
if (ipa_equal(he->addr, a) && ipa_equal(he->link, link) && (he->tab == dep))
break;
2024-03-05 13:25:52 +00:00
if (he)
{
if (tab->debug & D_ROUTES)
if (ipa_zero(ll))
log(L_TRACE "%s: Found existing hostentry %p for %I in %s",
tab->name, he, a, he->tab->name);
else
log(L_TRACE "%s: Found existing hostentry %p for %I %I in %s",
tab->name, he, a, ll, he->tab->name);
}
2024-03-05 13:25:52 +00:00
else
{
he = hc_new_hostentry(hc, tab->rp, a, link, dep, k);
he->owner = RT_PUB(tab);
rt_update_hostentry(tab, he);
}
/* Keep the hostentry alive until this task ends */
lfuc_lock_revive(&he->uc);
2024-03-05 13:25:52 +00:00
lfuc_unlock(&he->uc, birdloop_event_list(tab->loop), tab->hcu_event);
return he;
}
rte *
krt_export_net(struct channel *c, const net_addr *a, linpool *lp)
{
uint count;
const rte **feed;
struct rte_storage *best;
{
RT_READ(c->table, tr);
struct netindex *i = net_find_index(tr->t->netindex, a);
if (!i) return NULL;
net *net = net_find(tr, i);
if (!net) return NULL;
best = NET_READ_BEST_ROUTE(tr, net);
if (!best) return NULL;
if (!bmap_test(&c->export_map, best->rte.id)) return NULL;
if (c->ra_mode == RA_MERGED)
{
count = rte_feed_count(tr, net);
if (!count)
return NULL;
feed = alloca(count * sizeof(rte *));
rte_feed_obtain(tr, net, feed, count);
}
}
if (c->ra_mode == RA_MERGED)
return rt_export_merged(c, a, feed, count, lp, 1);
const struct filter *filter = c->out_filter;
static _Thread_local rte rt;
rt = best->rte;
if (!rte_is_valid(&rt))
return NULL;
if (filter == FILTER_REJECT)
return NULL;
/* We could run krt_preexport() here, but it is already handled by krt_is_installed() */
if (filter == FILTER_ACCEPT)
return &rt;
if (f_run(filter, &rt, FF_SILENT) > F_ACCEPT)
return NULL;
return &rt;
}
/*
* Documentation for functions declared inline in route.h
*/
#if 0
/**
* net_find - find a network entry
* @tab: a routing table
* @addr: address of the network
*
* net_find() looks up the given network in routing table @tab and
* returns a pointer to its &net entry or %NULL if no such network
* exists.
*/
static inline net *net_find(rtable *tab, net_addr *addr)
{ DUMMY; }
/**
* rte_cow - copy a route for writing
* @r: a route entry to be copied
*
* rte_cow() takes a &rte and prepares it for modification. The exact action
* taken depends on the flags of the &rte -- if it's a temporary entry, it's
* just returned unchanged, else a new temporary entry with the same contents
* is created.
*
* The primary use of this function is inside the filter machinery -- when
* a filter wants to modify &rte contents (to change the preference or to
* attach another set of attributes), it must ensure that the &rte is not
* shared with anyone else (and especially that it isn't stored in any routing
* table).
*
2000-06-07 12:29:08 +00:00
* Result: a pointer to the new writable &rte.
*/
static inline rte * rte_cow(rte *r)
{ DUMMY; }
#endif