1998-04-22 12:58:34 +00:00
|
|
|
/*
|
|
|
|
* BIRD Internet Routing Daemon -- Routing Table
|
|
|
|
*
|
2000-01-16 16:44:50 +00:00
|
|
|
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
|
2024-05-02 09:39:34 +00:00
|
|
|
* (c) 2019--2024 Maria Matejka <mq@jmq.cz>
|
1998-04-22 12:58:34 +00:00
|
|
|
*
|
|
|
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _BIRD_ROUTE_H_
|
|
|
|
#define _BIRD_ROUTE_H_
|
|
|
|
|
1999-05-17 20:14:52 +00:00
|
|
|
#include "lib/lists.h"
|
2024-02-11 21:58:29 +00:00
|
|
|
#include "lib/tlists.h"
|
2024-02-29 13:04:05 +00:00
|
|
|
#include "lib/lockfree.h"
|
2019-09-09 00:55:32 +00:00
|
|
|
#include "lib/bitmap.h"
|
1998-04-28 14:39:34 +00:00
|
|
|
#include "lib/resource.h"
|
2016-11-09 15:36:34 +00:00
|
|
|
#include "lib/net.h"
|
2023-12-08 15:13:14 +00:00
|
|
|
#include "lib/netindex.h"
|
2024-06-13 10:15:38 +00:00
|
|
|
#include "lib/obstacle.h"
|
2022-03-26 10:56:02 +00:00
|
|
|
#include "lib/type.h"
|
2022-03-31 17:00:00 +00:00
|
|
|
#include "lib/fib.h"
|
2022-03-31 17:09:38 +00:00
|
|
|
#include "lib/route.h"
|
2022-07-18 10:33:00 +00:00
|
|
|
#include "lib/event.h"
|
2022-07-28 11:50:59 +00:00
|
|
|
#include "lib/rcu.h"
|
2022-09-12 08:25:14 +00:00
|
|
|
#include "lib/io-loop.h"
|
2022-09-21 16:43:44 +00:00
|
|
|
#include "lib/settle.h"
|
1998-04-22 12:58:34 +00:00
|
|
|
|
2023-10-03 09:08:28 +00:00
|
|
|
#include "filter/data.h"
|
|
|
|
|
2024-06-13 10:15:38 +00:00
|
|
|
#include "conf/conf.h"
|
|
|
|
|
2022-07-15 12:57:02 +00:00
|
|
|
#include <stdatomic.h>
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
struct ea_list;
|
2023-10-15 21:52:46 +00:00
|
|
|
struct adata;
|
1998-05-20 11:54:33 +00:00
|
|
|
struct protocol;
|
1998-06-01 21:41:11 +00:00
|
|
|
struct proto;
|
2021-06-21 15:07:31 +00:00
|
|
|
struct channel;
|
2016-01-26 10:48:58 +00:00
|
|
|
struct rte_src;
|
2022-08-31 12:01:59 +00:00
|
|
|
struct hostcache;
|
1999-12-01 15:10:21 +00:00
|
|
|
struct symbol;
|
2021-02-10 02:09:57 +00:00
|
|
|
struct timer;
|
1999-12-01 15:10:21 +00:00
|
|
|
struct filter;
|
2021-11-29 18:23:42 +00:00
|
|
|
struct f_trie;
|
2021-12-02 02:30:39 +00:00
|
|
|
struct f_trie_walk_state;
|
1999-12-01 15:10:21 +00:00
|
|
|
struct cli;
|
1998-05-20 11:54:33 +00:00
|
|
|
|
1998-04-22 12:58:34 +00:00
|
|
|
/*
|
1999-04-12 18:01:07 +00:00
|
|
|
* Master Routing Tables. Generally speaking, each of them contains a FIB
|
|
|
|
* with each entry pointing to a list of route entries representing routes
|
|
|
|
* to given network (with the selected one at the head).
|
|
|
|
*
|
1998-04-22 12:58:34 +00:00
|
|
|
* Each of the RTE's contains variable data (the preference and protocol-dependent
|
1998-05-15 07:54:32 +00:00
|
|
|
* metrics) and a pointer to a route attribute block common for many routes).
|
1999-04-12 18:01:07 +00:00
|
|
|
*
|
|
|
|
* It's guaranteed that there is at most one RTE for every (prefix,proto) pair.
|
1998-04-22 12:58:34 +00:00
|
|
|
*/
|
|
|
|
|
1999-05-17 20:14:52 +00:00
|
|
|
struct rtable_config {
|
|
|
|
node n;
|
|
|
|
char *name;
|
2022-09-07 11:54:20 +00:00
|
|
|
union rtable *table;
|
1999-08-03 19:33:22 +00:00
|
|
|
struct proto_config *krt_attached; /* Kernel syncer attached to this table */
|
2015-11-05 11:48:52 +00:00
|
|
|
uint addr_type; /* Type of address data stored in table (NET_*) */
|
2022-07-13 10:02:34 +00:00
|
|
|
uint gc_threshold; /* Maximum number of operations before GC is run */
|
|
|
|
uint gc_period; /* Approximate time between two consecutive GC runs */
|
2023-12-07 13:38:05 +00:00
|
|
|
u32 debug; /* Debugging flags (D_*) */
|
2012-07-04 19:31:03 +00:00
|
|
|
byte sorted; /* Routes of network are sorted according to rte_better() */
|
2021-11-29 18:23:42 +00:00
|
|
|
byte trie_used; /* Rtable has attached trie */
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_cork_threshold {
|
|
|
|
u64 low, high;
|
|
|
|
} cork_threshold; /* Cork threshold values */
|
2022-09-21 16:43:44 +00:00
|
|
|
struct settle_config export_settle; /* Export announcement settler */
|
2022-09-26 10:09:14 +00:00
|
|
|
struct settle_config export_rr_settle;/* Export announcement settler config valid when any
|
|
|
|
route refresh is running */
|
2024-06-07 17:41:04 +00:00
|
|
|
struct settle_config digest_settle; /* Settle times for digests */
|
|
|
|
struct rtable_config *roa_aux_table; /* Auxiliary table config for ROA connections */
|
|
|
|
struct rt_stream_config {
|
|
|
|
struct rtable_config *src;
|
|
|
|
void (*setup)(union rtable *);
|
|
|
|
void (*stop)(union rtable *);
|
|
|
|
} master; /* Data source (this table is aux) */
|
1999-05-17 20:14:52 +00:00
|
|
|
};
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
/*
|
|
|
|
* Route export journal
|
|
|
|
*
|
|
|
|
* The journal itself is held in struct rt_exporter.
|
|
|
|
* Workflow:
|
|
|
|
* (1) Initialize by rt_exporter_init()
|
|
|
|
* (2) Push data by rt_exporter_push() (the export item is copied)
|
|
|
|
* (3) Shutdown by rt_exporter_shutdown(), event is called after cleanup
|
|
|
|
*
|
|
|
|
* Subscribers:
|
|
|
|
* (1) Initialize by rt_export_subscribe()
|
|
|
|
* (2a) Get data by rt_export_get();
|
|
|
|
* (2b) Release data after processing by rt_export_release()
|
|
|
|
* (3) Request refeed by rt_export_refeed()
|
|
|
|
* (4) Unsubscribe by rt_export_unsubscribe()
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct rt_export_request {
|
|
|
|
/* Formal name */
|
|
|
|
char *name;
|
|
|
|
|
|
|
|
/* Memory */
|
|
|
|
pool *pool;
|
|
|
|
|
|
|
|
/* State information */
|
|
|
|
enum rt_export_state {
|
|
|
|
#define RT_EXPORT_STATES \
|
|
|
|
DOWN, \
|
|
|
|
FEEDING, \
|
|
|
|
PARTIAL, \
|
|
|
|
READY, \
|
|
|
|
STOP, \
|
|
|
|
|
|
|
|
#define RT_EXPORT_STATES_ENUM_HELPER(p) TES_##p,
|
|
|
|
MACRO_FOREACH(RT_EXPORT_STATES_ENUM_HELPER, RT_EXPORT_STATES)
|
|
|
|
TES_MAX
|
|
|
|
#undef RT_EXPORT_STATES_ENUM_HELPER
|
|
|
|
} _Atomic export_state;
|
|
|
|
btime last_state_change;
|
|
|
|
|
|
|
|
/* Table feeding contraption */
|
|
|
|
struct rt_export_feeder {
|
|
|
|
/* Formal name */
|
2024-09-27 11:58:46 +00:00
|
|
|
const char *name;
|
2024-05-02 09:39:34 +00:00
|
|
|
|
|
|
|
/* Enlisting */
|
|
|
|
struct rt_exporter * _Atomic exporter;
|
2024-06-06 11:09:39 +00:00
|
|
|
DOMAIN(rtable) domain; /* Lock this instead of RCU */
|
2024-05-02 09:39:34 +00:00
|
|
|
|
|
|
|
/* Prefiltering, useful for more scenarios */
|
|
|
|
struct rt_prefilter {
|
|
|
|
/* Network prefilter mode (TE_ADDR_*) */
|
|
|
|
enum {
|
|
|
|
TE_ADDR_NONE = 0, /* No address matching */
|
|
|
|
TE_ADDR_EQUAL, /* Exact query - show route <addr> */
|
|
|
|
TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
|
|
|
|
TE_ADDR_IN, /* Interval query - show route in <addr> */
|
|
|
|
TE_ADDR_TRIE, /* Query defined by trie */
|
|
|
|
TE_ADDR_HOOK, /* Query processed by supplied custom hook */
|
|
|
|
} mode;
|
|
|
|
|
|
|
|
union {
|
|
|
|
const struct f_trie *trie;
|
|
|
|
const net_addr *addr;
|
|
|
|
int (*hook)(const struct rt_prefilter *, const net_addr *);
|
|
|
|
};
|
|
|
|
} prefilter;
|
|
|
|
|
|
|
|
#define TLIST_PREFIX rt_export_feeder
|
|
|
|
#define TLIST_TYPE struct rt_export_feeder
|
|
|
|
#define TLIST_ITEM n
|
|
|
|
#define TLIST_WANT_WALK
|
|
|
|
#define TLIST_WANT_ADD_TAIL
|
|
|
|
|
|
|
|
/* Feeding itself */
|
2024-05-30 06:22:40 +00:00
|
|
|
u32 feed_index; /* Index of the feed in progress */
|
2024-06-20 09:58:23 +00:00
|
|
|
u32 (*next_feed_index)(struct rt_export_feeder *, u32 try_this);
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_feeding_request {
|
|
|
|
struct rt_feeding_request *next; /* Next in request chain */
|
|
|
|
void (*done)(struct rt_feeding_request *);/* Called when this refeed finishes */
|
|
|
|
struct rt_prefilter prefilter; /* Reload only matching nets */
|
|
|
|
PACKED enum {
|
|
|
|
RFRS_INACTIVE = 0, /* Inactive request */
|
|
|
|
RFRS_PENDING, /* Request enqueued, do not touch */
|
|
|
|
RFRS_RUNNING, /* Request active, do not touch */
|
|
|
|
} state;
|
|
|
|
} *feeding, *feed_pending;
|
|
|
|
TLIST_DEFAULT_NODE;
|
|
|
|
u8 trace_routes;
|
|
|
|
} feeder;
|
|
|
|
|
|
|
|
/* Regular updates */
|
|
|
|
struct bmap seq_map; /* Which lfjour items are already processed */
|
|
|
|
struct bmap feed_map; /* Which nets were already fed (for initial feeding) */
|
|
|
|
struct lfjour_recipient r;
|
|
|
|
struct rt_export_union *cur;
|
|
|
|
|
|
|
|
/* Statistics */
|
|
|
|
struct rt_export_stats {
|
|
|
|
u32 updates_received; /* Number of route updates received */
|
|
|
|
u32 withdraws_received; /* Number of route withdraws received */
|
|
|
|
} stats;
|
|
|
|
|
|
|
|
/* Tracing */
|
|
|
|
u8 trace_routes;
|
|
|
|
void (*dump)(struct rt_export_request *req);
|
|
|
|
void (*fed)(struct rt_export_request *req);
|
|
|
|
};
|
|
|
|
|
|
|
|
#include "lib/tlists.h"
|
|
|
|
|
|
|
|
struct rt_export_union {
|
|
|
|
enum rt_export_kind {
|
|
|
|
RT_EXPORT_STOP = 1,
|
|
|
|
RT_EXPORT_FEED,
|
|
|
|
RT_EXPORT_UPDATE,
|
|
|
|
} kind;
|
|
|
|
const struct rt_export_item {
|
|
|
|
LFJOUR_ITEM_INHERIT(li); /* Member of lockfree journal */
|
|
|
|
char data[0]; /* Memcpy helper */
|
|
|
|
const rte *new, *old; /* Route update */
|
|
|
|
} *update;
|
|
|
|
const struct rt_export_feed {
|
|
|
|
uint count_routes, count_exports;
|
2024-06-03 12:23:41 +00:00
|
|
|
struct netindex *ni;
|
2024-05-02 09:39:34 +00:00
|
|
|
rte *block;
|
|
|
|
u64 *exports;
|
|
|
|
char data[0];
|
|
|
|
} *feed;
|
|
|
|
struct rt_export_request *req;
|
|
|
|
};
|
|
|
|
|
2024-12-12 13:55:25 +00:00
|
|
|
struct rt_feed_retry {
|
|
|
|
struct rcu_unwinder *u;
|
|
|
|
void *feed_block;
|
|
|
|
u32 feed_size;
|
|
|
|
u32 feed_request;
|
|
|
|
};
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_exporter {
|
|
|
|
struct lfjour journal; /* Journal for update keeping */
|
|
|
|
TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
|
2024-06-26 15:19:24 +00:00
|
|
|
bool _Atomic feeders_lock; /* Spinlock for the above list */
|
2024-05-02 09:39:34 +00:00
|
|
|
u8 trace_routes; /* Debugging flags (D_*) */
|
2024-05-30 06:22:40 +00:00
|
|
|
u8 net_type; /* Which net this exporter provides */
|
2024-06-06 11:09:39 +00:00
|
|
|
DOMAIN(rtable) domain; /* Lock this instead of RCU */
|
2024-05-30 06:22:40 +00:00
|
|
|
u32 _Atomic max_feed_index; /* Stop feeding at this index */
|
2024-05-02 09:39:34 +00:00
|
|
|
const char *name; /* Name for logging */
|
2024-05-30 06:22:40 +00:00
|
|
|
netindex_hash *netindex; /* Table for net <-> id conversion */
|
2024-05-02 09:39:34 +00:00
|
|
|
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
|
|
|
|
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
|
2024-12-12 13:55:25 +00:00
|
|
|
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rt_feed_retry *, u32, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
|
2024-05-02 09:39:34 +00:00
|
|
|
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
|
|
|
|
};
|
|
|
|
|
2024-06-10 14:25:53 +00:00
|
|
|
extern struct rt_export_feed rt_feed_index_out_of_range;
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
/* Exporter API */
|
|
|
|
void rt_exporter_init(struct rt_exporter *, struct settle_config *);
|
|
|
|
struct rt_export_item *rt_exporter_push(struct rt_exporter *, const struct rt_export_item *);
|
2024-12-12 13:55:25 +00:00
|
|
|
struct rt_export_feed *rt_alloc_feed(struct rt_feed_retry *ur, uint routes, uint exports);
|
2024-05-02 09:39:34 +00:00
|
|
|
void rt_exporter_shutdown(struct rt_exporter *, void (*stopped)(struct rt_exporter *));
|
|
|
|
|
|
|
|
/* Standalone feeds */
|
|
|
|
void rt_feeder_subscribe(struct rt_exporter *, struct rt_export_feeder *);
|
|
|
|
void rt_feeder_unsubscribe(struct rt_export_feeder *);
|
|
|
|
void rt_export_refeed_feeder(struct rt_export_feeder *, struct rt_feeding_request *);
|
|
|
|
|
|
|
|
struct rt_export_feed *rt_export_next_feed(struct rt_export_feeder *);
|
|
|
|
#define RT_FEED_WALK(_feeder, _f) \
|
|
|
|
for (const struct rt_export_feed *_f; _f = rt_export_next_feed(_feeder); ) \
|
|
|
|
|
2024-06-26 15:19:24 +00:00
|
|
|
static inline bool rt_export_feed_active(struct rt_export_feeder *f)
|
2024-05-02 09:39:34 +00:00
|
|
|
{ return !!atomic_load_explicit(&f->exporter, memory_order_acquire); }
|
|
|
|
|
|
|
|
/* Full blown exports */
|
|
|
|
void rtex_export_subscribe(struct rt_exporter *, struct rt_export_request *);
|
|
|
|
void rtex_export_unsubscribe(struct rt_export_request *);
|
|
|
|
|
|
|
|
const struct rt_export_union * rt_export_get(struct rt_export_request *);
|
|
|
|
void rt_export_release(const struct rt_export_union *);
|
|
|
|
void rt_export_retry_later(const struct rt_export_union *);
|
|
|
|
void rt_export_processed(struct rt_export_request *, u64);
|
|
|
|
void rt_export_refeed_request(struct rt_export_request *rer, struct rt_feeding_request *rfr);
|
|
|
|
|
|
|
|
static inline enum rt_export_state rt_export_get_state(struct rt_export_request *r)
|
|
|
|
{ return atomic_load_explicit(&r->export_state, memory_order_acquire); }
|
|
|
|
const char *rt_export_state_name(enum rt_export_state state);
|
|
|
|
|
|
|
|
static inline void rt_export_walk_cleanup(const struct rt_export_union **up)
|
|
|
|
{
|
|
|
|
if (*up)
|
|
|
|
rt_export_release(*up);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define RT_EXPORT_WALK(_reader, _u) \
|
|
|
|
for (CLEANUP(rt_export_walk_cleanup) const struct rt_export_union *_u;\
|
|
|
|
_u = rt_export_get(_reader); \
|
|
|
|
rt_export_release(_u)) \
|
|
|
|
|
|
|
|
/* Convenince common call to request refeed */
|
|
|
|
#define rt_export_refeed(h, r) _Generic((h), \
|
|
|
|
struct rt_export_feeder *: rt_export_refeed_feeder, \
|
|
|
|
struct rt_export_request *: rt_export_refeed_request, \
|
|
|
|
void *: bug)(h, r)
|
|
|
|
|
|
|
|
/* Subscription to regular table exports needs locking */
|
|
|
|
#define rt_export_subscribe(_t, _kind, f) do { \
|
|
|
|
RT_LOCKED(_t, tp) { \
|
|
|
|
rt_lock_table(tp); \
|
|
|
|
rtex_export_subscribe(&tp->export_##_kind, f); \
|
|
|
|
}} while (0) \
|
|
|
|
|
|
|
|
#define rt_export_unsubscribe(_kind, _fx) do { \
|
|
|
|
struct rt_export_request *_f = _fx; \
|
|
|
|
struct rt_exporter *e = atomic_load_explicit(&_f->feeder.exporter, memory_order_acquire); \
|
|
|
|
RT_LOCKED(SKIP_BACK(rtable, export_##_kind, e), _tp) { \
|
|
|
|
rtex_export_unsubscribe(_f); \
|
|
|
|
rt_unlock_table(_tp); \
|
|
|
|
}} while (0) \
|
|
|
|
|
2024-12-12 13:55:25 +00:00
|
|
|
#define RT_EXPORT_RETRY_ANCHOR(ur, u) \
|
|
|
|
struct rt_feed_retry ur = { \
|
|
|
|
.feed_block = tmp_alloc(512), \
|
|
|
|
.feed_size = 512, \
|
|
|
|
}; \
|
|
|
|
RCU_ANCHOR(u); \
|
|
|
|
ur.u = u; \
|
|
|
|
if (ur.feed_request > ur.feed_size) \
|
|
|
|
{ \
|
|
|
|
rcu_read_unlock(); \
|
|
|
|
ur.feed_size = ur.feed_request; \
|
|
|
|
/* allocate a little bit more just for good measure */ \
|
|
|
|
ur.feed_block = tmp_alloc((ur.feed_request * 3) / 2); \
|
|
|
|
rcu_read_lock(); \
|
|
|
|
} \
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
|
|
|
|
{
|
|
|
|
switch (p->mode)
|
|
|
|
{
|
|
|
|
case TE_ADDR_NONE: return 1;
|
|
|
|
case TE_ADDR_IN: return net_in_netX(n, p->addr);
|
|
|
|
case TE_ADDR_EQUAL: return net_equal(n, p->addr);
|
|
|
|
case TE_ADDR_FOR: return net_in_netX(p->addr, n);
|
|
|
|
case TE_ADDR_TRIE: return trie_match_net(p->trie, n);
|
|
|
|
case TE_ADDR_HOOK: return p->hook(p, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
bug("Crazy prefilter application attempt failed wildly.");
|
|
|
|
}
|
|
|
|
|
2024-06-26 15:19:24 +00:00
|
|
|
static inline bool
|
2024-05-02 09:39:34 +00:00
|
|
|
rt_net_is_feeding_feeder(struct rt_export_feeder *ref, const net_addr *n)
|
|
|
|
{
|
2024-06-16 19:36:45 +00:00
|
|
|
if (!rt_prefilter_net(&ref->prefilter, n))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!ref->feeding)
|
|
|
|
return 1;
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
for (struct rt_feeding_request *rfr = ref->feeding; rfr; rfr = rfr->next)
|
|
|
|
if (rt_prefilter_net(&rfr->prefilter, n))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-06-26 15:19:24 +00:00
|
|
|
static inline bool
|
2024-05-02 09:39:34 +00:00
|
|
|
rt_net_is_feeding_request(struct rt_export_request *req, const net_addr *n)
|
|
|
|
{
|
|
|
|
struct netindex *ni = NET_TO_INDEX(n);
|
2024-06-28 19:38:54 +00:00
|
|
|
switch (rt_export_get_state(req))
|
|
|
|
{
|
|
|
|
case TES_PARTIAL:
|
|
|
|
case TES_FEEDING:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Already fed */
|
|
|
|
if (bmap_test(&req->feed_map, ni->index))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return rt_net_is_feeding_feeder(&req->feeder, n);
|
2024-05-02 09:39:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define rt_net_is_feeding(h, n) _Generic((h), \
|
|
|
|
struct rt_export_feeder *: rt_net_is_feeding_feeder, \
|
|
|
|
struct rt_export_request *: rt_net_is_feeding_request, \
|
|
|
|
void *: bug)(h, n)
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The original rtable
|
|
|
|
*
|
|
|
|
* To be kept as is for now until we refactor the new structures out of BGP Attrs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2024-06-15 21:31:44 +00:00
|
|
|
struct rt_uncork_callback {
|
|
|
|
event ev;
|
|
|
|
callback cb;
|
|
|
|
};
|
|
|
|
|
2022-06-20 19:29:10 +00:00
|
|
|
struct rt_export_hook;
|
2022-09-05 04:58:42 +00:00
|
|
|
|
2022-09-05 10:55:36 +00:00
|
|
|
extern uint rtable_max_id;
|
|
|
|
|
2022-09-07 11:54:20 +00:00
|
|
|
/* The public part of rtable structure */
|
2023-11-20 16:40:25 +00:00
|
|
|
#define RTABLE_PUBLIC \
|
|
|
|
resource r; \
|
|
|
|
node n; /* Node in list of all tables */ \
|
|
|
|
char *name; /* Name of this table */ \
|
|
|
|
uint addr_type; /* Type of address data stored in table (NET_*) */ \
|
|
|
|
uint id; /* Integer table ID for fast lookup */ \
|
|
|
|
DOMAIN(rtable) lock; /* Lock to take to access the private parts */ \
|
|
|
|
struct rtable_config *config; /* Configuration of this table */ \
|
|
|
|
struct birdloop *loop; /* Service thread */ \
|
2023-12-08 15:13:14 +00:00
|
|
|
netindex_hash *netindex; /* Prefix index for this table */ \
|
2024-04-03 12:47:15 +00:00
|
|
|
struct network * _Atomic routes; /* Actual route objects in the table */ \
|
|
|
|
_Atomic u32 routes_block_size; /* Size of the route object pointer block */ \
|
|
|
|
struct f_trie * _Atomic trie; /* Trie of prefixes defined in fib */ \
|
2024-03-05 13:25:52 +00:00
|
|
|
event *hcu_event; /* Hostcache updater */ \
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_exporter export_all; /* Route export journal for all routes */ \
|
|
|
|
struct rt_exporter export_best; /* Route export journal for best routes */ \
|
2022-09-07 11:54:20 +00:00
|
|
|
|
|
|
|
/* The complete rtable structure */
|
|
|
|
struct rtable_private {
|
|
|
|
/* Once more the public part */
|
2023-11-14 11:53:40 +00:00
|
|
|
struct { RTABLE_PUBLIC; };
|
|
|
|
struct rtable_private **locked_at;
|
2022-09-07 11:54:20 +00:00
|
|
|
|
|
|
|
/* Here the private items not to be accessed without locking */
|
2021-03-30 16:51:31 +00:00
|
|
|
pool *rp; /* Resource pool to allocate everything from, including itself */
|
2020-01-28 10:42:46 +00:00
|
|
|
struct slab *rte_slab; /* Slab to allocate route objects */
|
2000-01-16 16:44:50 +00:00
|
|
|
int use_count; /* Number of protocols using this table */
|
2018-12-11 12:52:30 +00:00
|
|
|
u32 rt_count; /* Number of routes in the table */
|
2023-12-08 15:13:14 +00:00
|
|
|
u32 net_count; /* Number of nets in the table */
|
2023-12-08 10:33:43 +00:00
|
|
|
u32 debug; /* Debugging flags (D_*) */
|
2021-04-19 13:13:20 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
list imports; /* Registered route importers */
|
2024-05-02 09:39:34 +00:00
|
|
|
|
2024-02-11 21:58:29 +00:00
|
|
|
TLIST_STRUCT_DEF(rt_flowspec_link, struct rt_flowspec_link) flowspec_links; /* Links serving flowspec reload */
|
2021-04-19 13:13:20 +00:00
|
|
|
|
2019-09-09 00:55:32 +00:00
|
|
|
struct hmap id_map;
|
2010-07-05 15:50:19 +00:00
|
|
|
struct hostcache *hostcache;
|
2024-06-13 10:15:38 +00:00
|
|
|
config_ref deleted; /* Table doesn't exist in current configuration,
|
2000-01-16 16:44:50 +00:00
|
|
|
* delete as soon as use_count becomes 0 and remove
|
|
|
|
* obstacle from this routing table.
|
|
|
|
*/
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_export_request best_req; /* Internal request from best route announcement cleanup */
|
2024-06-15 21:31:44 +00:00
|
|
|
struct rt_uncork_callback nhu_uncork; /* Helper event to schedule NHU on uncork */
|
|
|
|
struct rt_uncork_callback hcu_uncork; /* Helper event to schedule HCU on uncork */
|
2022-07-13 10:02:34 +00:00
|
|
|
struct timer *prune_timer; /* Timer for periodic pruning / GC */
|
2024-02-29 13:04:05 +00:00
|
|
|
struct event *prune_event; /* Event for prune execution */
|
2021-02-10 02:09:57 +00:00
|
|
|
btime last_rt_change; /* Last time when route changed */
|
2017-06-06 14:47:30 +00:00
|
|
|
btime gc_time; /* Time of last GC */
|
2022-07-13 10:02:34 +00:00
|
|
|
uint gc_counter; /* Number of operations since last GC */
|
2022-09-26 10:09:14 +00:00
|
|
|
uint rr_counter; /* Number of currently running route refreshes,
|
|
|
|
in fact sum of (stale_set - stale_pruned) over all importers
|
|
|
|
+ one for each TIS_FLUSHING importer */
|
2023-01-19 09:56:16 +00:00
|
|
|
uint wait_counter; /* Number of imports in TIS_WAITING state */
|
2013-07-24 12:11:12 +00:00
|
|
|
byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */
|
2022-02-03 05:08:51 +00:00
|
|
|
byte prune_trie; /* Prune prefix trie during next table prune */
|
2023-01-19 09:56:16 +00:00
|
|
|
byte imports_flushing; /* Some imports are being flushed right now */
|
2010-07-05 15:50:19 +00:00
|
|
|
byte nhu_state; /* Next Hop Update state */
|
2022-08-02 15:51:58 +00:00
|
|
|
byte nhu_corked; /* Next Hop Update is corked with this state */
|
2022-07-15 12:57:02 +00:00
|
|
|
byte export_used; /* Pending Export pruning is scheduled */
|
2022-07-28 11:50:59 +00:00
|
|
|
byte cork_active; /* Cork has been activated */
|
|
|
|
struct rt_cork_threshold cork_threshold; /* Threshold for table cork */
|
2023-12-08 15:13:14 +00:00
|
|
|
u32 prune_index; /* Rtable prune FIB iterator */
|
|
|
|
u32 nhu_index; /* Next Hop Update FIB iterator */
|
2024-06-19 09:51:20 +00:00
|
|
|
event *nhu_event; /* Nexthop updater */
|
2022-02-03 05:08:51 +00:00
|
|
|
struct f_trie *trie_new; /* New prefix trie defined during pruning */
|
2024-04-03 12:47:15 +00:00
|
|
|
const struct f_trie *trie_old; /* Old prefix trie waiting to be freed */
|
2022-02-04 04:34:02 +00:00
|
|
|
u32 trie_lock_count; /* Prefix trie locked by walks */
|
|
|
|
u32 trie_old_lock_count; /* Old prefix trie locked by walks */
|
2020-05-01 20:26:24 +00:00
|
|
|
struct tbf rl_pipe; /* Rate limiting token buffer for pipe collisions */
|
2021-02-10 02:09:57 +00:00
|
|
|
|
2021-12-20 19:25:35 +00:00
|
|
|
struct f_trie *flowspec_trie; /* Trie for evaluation of flowspec notifications */
|
2022-09-14 23:38:18 +00:00
|
|
|
// struct mpls_domain *mpls_domain; /* Label allocator for MPLS */
|
2024-06-04 19:38:05 +00:00
|
|
|
u32 rte_free_deferred; /* Counter of deferred rte_free calls */
|
2024-06-07 17:41:04 +00:00
|
|
|
|
|
|
|
struct rt_digestor *export_digest; /* Route export journal for digest tries */
|
|
|
|
struct rt_stream *master; /* Data source (this table is aux) */
|
2022-09-07 11:54:20 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* The final union private-public rtable structure */
|
|
|
|
typedef union rtable {
|
2023-11-20 16:40:25 +00:00
|
|
|
struct {
|
|
|
|
RTABLE_PUBLIC;
|
|
|
|
};
|
2022-09-07 11:54:20 +00:00
|
|
|
struct rtable_private priv;
|
1998-05-15 07:54:32 +00:00
|
|
|
} rtable;
|
|
|
|
|
2023-11-14 11:53:40 +00:00
|
|
|
/* Define the lock cleanup function */
|
|
|
|
LOBJ_UNLOCK_CLEANUP(rtable, rtable);
|
2022-09-07 11:54:20 +00:00
|
|
|
|
2023-11-14 11:53:40 +00:00
|
|
|
#define RT_IS_LOCKED(tab) LOBJ_IS_LOCKED((tab), rtable)
|
|
|
|
#define RT_LOCKED(tab, tp) LOBJ_LOCKED((tab), tp, rtable, rtable)
|
2024-02-22 12:31:11 +00:00
|
|
|
#define RT_LOCK(tab, tp) LOBJ_LOCK((tab), tp, rtable, rtable)
|
2022-09-07 11:54:20 +00:00
|
|
|
|
2023-11-14 11:53:40 +00:00
|
|
|
#define RT_LOCK_SIMPLE(tab) LOBJ_LOCK_SIMPLE((tab), rtable)
|
|
|
|
#define RT_UNLOCK_SIMPLE(tab) LOBJ_UNLOCK_SIMPLE((tab), rtable)
|
2023-08-15 10:31:28 +00:00
|
|
|
|
2023-11-14 11:53:40 +00:00
|
|
|
#define RT_UNLOCKED_TEMPORARILY(tab, tp) LOBJ_UNLOCKED_TEMPORARILY((tab), tp, rtable, rtable)
|
2022-09-07 11:54:20 +00:00
|
|
|
|
2023-11-14 11:53:40 +00:00
|
|
|
#define RT_PUB(tab) SKIP_BACK(rtable, priv, tab)
|
2022-09-07 11:54:20 +00:00
|
|
|
|
2024-06-15 21:31:44 +00:00
|
|
|
#define RT_UNCORKING (1ULL << 44)
|
|
|
|
|
2022-07-28 11:50:59 +00:00
|
|
|
extern struct rt_cork {
|
2024-06-15 21:31:44 +00:00
|
|
|
_Atomic u64 active;
|
|
|
|
DOMAIN(resource) dom;
|
2022-07-28 11:50:59 +00:00
|
|
|
event_list queue;
|
|
|
|
} rt_cork;
|
|
|
|
|
|
|
|
static inline void rt_cork_acquire(void)
|
|
|
|
{
|
|
|
|
atomic_fetch_add_explicit(&rt_cork.active, 1, memory_order_acq_rel);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rt_cork_release(void)
|
|
|
|
{
|
2024-06-15 21:31:44 +00:00
|
|
|
u64 upd = atomic_fetch_add_explicit(&rt_cork.active, RT_UNCORKING, memory_order_acq_rel) + RT_UNCORKING;
|
|
|
|
|
|
|
|
/* Actualy released? */
|
|
|
|
if ((upd >> 44) == (upd & (RT_UNCORKING - 1)))
|
|
|
|
{
|
|
|
|
LOCK_DOMAIN(resource, rt_cork.dom);
|
|
|
|
synchronize_rcu();
|
|
|
|
ev_run_list(&rt_cork.queue);
|
|
|
|
UNLOCK_DOMAIN(resource, rt_cork.dom);
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_fetch_sub_explicit(&rt_cork.active, RT_UNCORKING + 1, memory_order_acq_rel);
|
2022-07-28 11:50:59 +00:00
|
|
|
}
|
|
|
|
|
2024-06-15 21:31:44 +00:00
|
|
|
void rt_cork_send_callback(void *_data);
|
2022-07-28 11:50:59 +00:00
|
|
|
|
2024-06-26 15:19:24 +00:00
|
|
|
static inline bool rt_cork_check(struct rt_uncork_callback *rcc)
|
2024-06-15 21:31:44 +00:00
|
|
|
{
|
|
|
|
/* Wait until all uncorks have finished */
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
/* Not corked */
|
|
|
|
u64 corked = atomic_load_explicit(&rt_cork.active, memory_order_acquire);
|
|
|
|
if (!corked)
|
|
|
|
{
|
|
|
|
rcu_read_unlock();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Yes, corked */
|
|
|
|
if (corked < RT_UNCORKING)
|
|
|
|
{
|
|
|
|
if (!rcc->ev.hook)
|
|
|
|
{
|
|
|
|
rcc->ev.hook = rt_cork_send_callback;
|
|
|
|
rcc->ev.data = rcc;
|
|
|
|
}
|
|
|
|
|
|
|
|
ev_send(&rt_cork.queue, &rcc->ev);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return 1;
|
|
|
|
}
|
2022-07-28 11:50:59 +00:00
|
|
|
|
2024-06-15 21:31:44 +00:00
|
|
|
/* In progress, retry */
|
|
|
|
rcu_read_unlock();
|
|
|
|
birdloop_yield();
|
|
|
|
}
|
2022-07-28 11:50:59 +00:00
|
|
|
}
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_pending_export {
|
|
|
|
struct rt_export_item it;
|
|
|
|
struct rt_pending_export *_Atomic next; /* Next export for the same net */
|
|
|
|
u64 seq_all; /* Interlink from BEST to ALL */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rt_net_pending_export {
|
|
|
|
struct rt_pending_export * _Atomic first, * _Atomic last;
|
|
|
|
};
|
2022-07-28 11:50:59 +00:00
|
|
|
|
1998-04-22 12:58:34 +00:00
|
|
|
typedef struct network {
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rte_storage * _Atomic routes; /* Available routes for this network */
|
|
|
|
|
|
|
|
/* Uncleaned pending exports */
|
|
|
|
struct rt_net_pending_export all;
|
|
|
|
struct rt_net_pending_export best;
|
1998-04-22 12:58:34 +00:00
|
|
|
} net;
|
|
|
|
|
2020-01-28 10:42:46 +00:00
|
|
|
struct rte_storage {
|
2024-04-03 12:47:15 +00:00
|
|
|
struct rte_storage * _Atomic next; /* Next in chain */
|
2023-07-03 18:38:24 +00:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
RTE_IN_TABLE_WRITABLE;
|
|
|
|
};
|
|
|
|
const struct rte rte; /* Route data */
|
|
|
|
};
|
2020-01-28 10:42:46 +00:00
|
|
|
};
|
|
|
|
|
2022-07-14 09:09:23 +00:00
|
|
|
#define RTE_COPY(r) ((r) ? (r)->rte : (rte) {})
|
2024-02-07 16:30:43 +00:00
|
|
|
#define RTE_COPY_VALID(r) (((r) && (rte_is_valid((r)))) ? *(r) : (rte) {})
|
2022-07-14 09:09:23 +00:00
|
|
|
#define RTE_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
|
2024-02-07 16:30:43 +00:00
|
|
|
#define RTE_VALID_OR_NULL(r) (((r) && (rte_is_valid((r)))) ? (r) : NULL)
|
2021-06-21 15:07:31 +00:00
|
|
|
|
2023-07-03 18:38:24 +00:00
|
|
|
#define RTES_WRITE(r) (((r) != ((struct rte_storage *) 0)) ? ((struct rte *) &(r)->rte) : NULL)
|
|
|
|
|
2023-12-08 15:13:14 +00:00
|
|
|
#define RTE_GET_NETINDEX(e) NET_TO_INDEX((e)->net)
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
/* Table import */
|
2023-11-02 13:33:00 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
struct rt_import_request {
|
|
|
|
struct rt_import_hook *hook; /* The table part of importer */
|
|
|
|
char *name;
|
|
|
|
u8 trace_routes;
|
|
|
|
|
2024-02-29 13:04:05 +00:00
|
|
|
struct birdloop *loop; /* Where to schedule cleanup event */
|
2022-09-01 09:17:35 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
void (*dump_req)(struct rt_import_request *req);
|
|
|
|
void (*log_state_change)(struct rt_import_request *req, u8 state);
|
|
|
|
/* Preimport is called when the @new route is just-to-be inserted, replacing @old.
|
|
|
|
* Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
|
2023-07-03 18:38:24 +00:00
|
|
|
int (*preimport)(struct rt_import_request *req, struct rte *new, const struct rte *old);
|
2021-06-21 15:07:31 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct rt_import_hook {
|
|
|
|
node n;
|
|
|
|
rtable *table; /* The connected table */
|
|
|
|
struct rt_import_request *req; /* The requestor */
|
|
|
|
|
|
|
|
struct rt_import_stats {
|
|
|
|
/* Import - from protocol to core */
|
|
|
|
u32 pref; /* Number of routes selected as best in the (adjacent) routing table */
|
|
|
|
u32 updates_ignored; /* Number of route updates rejected as already in route table */
|
|
|
|
u32 updates_accepted; /* Number of route updates accepted and imported */
|
|
|
|
u32 withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
|
|
|
|
u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
|
|
|
|
} stats;
|
|
|
|
|
2022-07-15 12:57:02 +00:00
|
|
|
u64 flush_seq; /* Table export seq when the channel announced flushing */
|
2021-06-21 15:07:31 +00:00
|
|
|
btime last_state_change; /* Time of last state transition */
|
|
|
|
|
|
|
|
u8 import_state; /* IS_* */
|
Route refresh in tables uses a stale counter.
Until now, we were marking routes as REF_STALE and REF_DISCARD to
cleanup old routes after route refresh. This needed a synchronous route
table walk at both beginning and the end of route refresh routine,
marking the routes by the flags.
We avoid these walks by using a stale counter. Every route contains:
u8 stale_cycle;
Every import hook contains:
u8 stale_set;
u8 stale_valid;
u8 stale_pruned;
u8 stale_pruning;
In base_state, stale_set == stale_valid == stale_pruned == stale_pruning
and all routes' stale_cycle also have the same value.
The route refresh looks like follows:
+ ----------- + --------- + ----------- + ------------- + ------------ +
| | stale_set | stale_valid | stale_pruning | stale_pruned |
| Base | x | x | x | x |
| Begin | x+1 | x | x | x |
... now routes are being inserted with stale_cycle == (x+1)
| End | x+1 | x+1 | x | x |
... now table pruning routine is scheduled
| Prune begin | x+1 | x+1 | x+1 | x |
... now routes with stale_cycle not between stale_set and stale_valid
are deleted
| Prune end | x+1 | x+1 | x+1 | x+1 |
+ ----------- + --------- + ----------- + ------------- + ------------ +
The pruning routine is asynchronous and may have high latency in
high-load environments. Therefore, multiple route refresh requests may
happen before the pruning routine starts, leading to this situation:
| Prune begin | x+k | x+k | x -> x+k | x |
... or even
| Prune begin | x+k+1 | x+k | x -> x+k | x |
... if the prune event starts while another route refresh is running.
In such a case, the pruning routine still deletes routes not fitting
between stale_set and and stale_valid, effectively pruning the remnants
of all unpruned route refreshes from before:
| Prune end | x+k | x+k | x+k | x+k |
In extremely rare cases, there may happen too many route refreshes
before any route prune routine finishes. If the difference between
stale_valid and stale_pruned becomes more than 128 when requesting for
another route refresh, the routine walks the table synchronously and
resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
|
|
|
u8 stale_set; /* Set this stale_cycle to imported routes */
|
|
|
|
u8 stale_valid; /* Routes with this stale_cycle and bigger are considered valid */
|
|
|
|
u8 stale_pruned; /* Last prune finished when this value was set at stale_valid */
|
|
|
|
u8 stale_pruning; /* Last prune started when this value was set at stale_valid */
|
2021-06-21 15:07:31 +00:00
|
|
|
|
|
|
|
void (*stopped)(struct rt_import_request *); /* Stored callback when import is stopped */
|
2024-02-29 13:04:05 +00:00
|
|
|
event cleanup_event; /* Used to finally unhook the import from the table */
|
2021-06-21 15:07:31 +00:00
|
|
|
};
|
|
|
|
|
2024-02-29 13:04:05 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
#define TIS_DOWN 0
|
|
|
|
#define TIS_UP 1
|
|
|
|
#define TIS_STOP 2
|
|
|
|
#define TIS_FLUSHING 3
|
|
|
|
#define TIS_WAITING 4
|
|
|
|
#define TIS_CLEARED 5
|
|
|
|
#define TIS_MAX 6
|
|
|
|
|
2022-06-20 17:10:49 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
void rt_request_import(rtable *tab, struct rt_import_request *req);
|
|
|
|
void rt_stop_import(struct rt_import_request *, void (*stopped)(struct rt_import_request *));
|
|
|
|
const char *rt_import_state_name(u8 state);
|
|
|
|
static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
|
2022-06-20 17:10:49 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
|
1999-04-05 20:25:03 +00:00
|
|
|
|
2024-06-07 17:41:04 +00:00
|
|
|
/* When rtable is just a view / aggregate, this is the basis for its source */
|
|
|
|
struct rt_stream {
|
|
|
|
struct rt_import_request dst;
|
|
|
|
rtable *dst_tab;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
#if 0
|
2022-09-05 04:58:42 +00:00
|
|
|
/*
|
|
|
|
* For table export processing
|
|
|
|
*/
|
|
|
|
|
2022-07-15 12:57:02 +00:00
|
|
|
/* Get next rpe. If src is given, it must match. */
|
|
|
|
struct rt_pending_export *rpe_next(struct rt_pending_export *rpe, struct rte_src *src);
|
|
|
|
|
2022-08-31 09:58:27 +00:00
|
|
|
/* Walk all rpe's */
|
|
|
|
#define RPE_WALK(first, it, src) \
|
|
|
|
for (struct rt_pending_export *it = (first); it; it = rpe_next(it, (src)))
|
|
|
|
|
2022-07-15 12:57:02 +00:00
|
|
|
/* Mark the pending export processed */
|
|
|
|
void rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
|
|
|
|
|
2023-03-31 08:46:17 +00:00
|
|
|
#define rpe_mark_seen_all(hook, first, last, src) do { \
|
|
|
|
RPE_WALK((first), _rpe, (src)) { \
|
|
|
|
rpe_mark_seen((hook), _rpe); \
|
|
|
|
if (_rpe == last) break; \
|
|
|
|
}} while (0)
|
2022-08-31 09:58:27 +00:00
|
|
|
|
2022-07-15 12:57:02 +00:00
|
|
|
/* Get pending export seen status */
|
|
|
|
int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
|
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
#endif
|
2022-09-05 04:58:42 +00:00
|
|
|
|
2023-03-30 08:34:28 +00:00
|
|
|
/*
|
|
|
|
* Channel export hooks. To be refactored out.
|
|
|
|
*/
|
|
|
|
|
2023-07-03 18:38:24 +00:00
|
|
|
int channel_preimport(struct rt_import_request *req, rte *new, const rte *old);
|
2023-03-30 08:34:28 +00:00
|
|
|
|
|
|
|
|
2009-05-31 13:24:27 +00:00
|
|
|
/* Types of route announcement, also used as flags */
|
2016-12-23 22:03:26 +00:00
|
|
|
#define RA_UNDEF 0 /* Undefined RA type */
|
2012-04-15 13:07:58 +00:00
|
|
|
#define RA_OPTIMAL 1 /* Announcement of optimal route change */
|
|
|
|
#define RA_ACCEPTED 2 /* Announcement of first accepted route */
|
|
|
|
#define RA_ANY 3 /* Announcement of any route change */
|
2015-06-08 00:20:43 +00:00
|
|
|
#define RA_MERGED 4 /* Announcement of optimal route merged with next ones */
|
2009-05-31 13:24:27 +00:00
|
|
|
|
Terminology cleanup: The import_control hook is now called preexport.
Once upon a time, far far away, there were the old Bird developers
discussing what direction of route flow shall be called import and
export. They decided to say "import to protocol" and "export to table"
when speaking about a protocol. When speaking about a table, they
spoke about "importing to table" and "exporting to protocol".
The latter terminology was adopted in configuration, then also the
bird CLI in commit ea2ae6dd0 started to use it (in year 2009). Now
it's 2018 and the terminology is the latter. Import is from protocol to
table, export is from table to protocol. Anyway, there was still an
import_control hook which executed right before route export.
One thing is funny. There are two commits in April 1999 with just two
minutes between them. The older announces the final settlement
on config terminology, the newer uses the other definition. Let's see
their commit messages as the git-log tool shows them (the newer first):
commit 9e0e485e50ea74c4f1c5cb65bdfe6ce819c2cee2
Author: Martin Mares <mj@ucw.cz>
Date: Mon Apr 5 20:17:59 1999 +0000
Added some new protocol hooks (look at the comments for better explanation):
make_tmp_attrs Convert inline attributes to ea_list
store_tmp_attrs Convert ea_list to inline attributes
import_control Pre-import decisions
commit 5056c559c4eb253a4eee10cf35b694faec5265eb
Author: Martin Mares <mj@ucw.cz>
Date: Mon Apr 5 20:15:31 1999 +0000
Changed syntax of attaching filters to protocols to hopefully the final
version:
EXPORT <filter-spec> for outbound routes (i.e., those announced
by BIRD to the rest of the world).
IMPORT <filter-spec> for inbound routes (i.e., those imported
by BIRD from the rest of the world).
where <filter-spec> is one of:
ALL pass all routes
NONE drop all routes
FILTER <name> use named filter
FILTER { <filter> } use explicitly defined filter
For all protocols, the default is IMPORT ALL, EXPORT NONE. This includes
the kernel protocol, so that you need to add EXPORT ALL to get the previous
configuration of kernel syncer (as usually, see doc/bird.conf.example for
a bird.conf example :)).
Let's say RIP to this almost 19-years-old inconsistency. For now, if you
import a route, it is always from protocol to table. If you export a
route, it is always from table to protocol.
And they lived happily ever after.
2018-02-14 12:42:53 +00:00
|
|
|
/* Return value of preexport() callback */
|
2013-02-08 22:58:27 +00:00
|
|
|
#define RIC_ACCEPT 1 /* Accepted by protocol */
|
|
|
|
#define RIC_PROCESS 0 /* Process it through import filter */
|
|
|
|
#define RIC_REJECT -1 /* Rejected by protocol */
|
|
|
|
#define RIC_DROP -2 /* Silently dropped by protocol */
|
|
|
|
|
2022-08-31 12:01:59 +00:00
|
|
|
/*
|
|
|
|
* Next hop update data structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define NHU_CLEAN 0
|
|
|
|
#define NHU_SCHEDULED 1
|
|
|
|
#define NHU_RUNNING 2
|
|
|
|
#define NHU_DIRTY 3
|
|
|
|
|
|
|
|
struct hostentry {
|
|
|
|
node ln;
|
|
|
|
ip_addr addr; /* IP address of host, part of key */
|
|
|
|
ip_addr link; /* (link-local) IP address of host, used as gw
|
|
|
|
if host is directly attached */
|
2022-09-07 11:54:20 +00:00
|
|
|
rtable *tab; /* Dependent table, part of key */
|
2023-08-28 13:36:40 +00:00
|
|
|
rtable *owner; /* Nexthop owner table */
|
2022-08-31 12:01:59 +00:00
|
|
|
struct hostentry *next; /* Next in hash chain */
|
|
|
|
unsigned hash_key; /* Hash key */
|
2024-03-05 13:25:52 +00:00
|
|
|
u32 igp_metric; /* Chosen route IGP metric */
|
2024-04-04 09:38:52 +00:00
|
|
|
_Atomic u32 version; /* Bumped on update */
|
|
|
|
byte nexthop_linkable; /* Nexthop list is completely non-device */
|
2024-05-31 07:47:56 +00:00
|
|
|
ea_list * _Atomic src; /* Source attributes */
|
2024-03-05 13:25:52 +00:00
|
|
|
struct lfuc uc; /* Use count */
|
2022-08-31 12:01:59 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hostcache {
|
|
|
|
slab *slab; /* Slab holding all hostentries */
|
2024-03-05 13:25:52 +00:00
|
|
|
rtable *tab; /* Parent routing table */
|
2022-08-31 12:01:59 +00:00
|
|
|
struct hostentry **hash_table; /* Hash table for hostentries */
|
|
|
|
unsigned hash_order, hash_shift;
|
|
|
|
unsigned hash_max, hash_min;
|
|
|
|
unsigned hash_items;
|
|
|
|
linpool *lp; /* Linpool for trie */
|
|
|
|
struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
|
|
|
|
list hostentries; /* List of all hostentries */
|
|
|
|
struct rt_export_request req; /* Notifier */
|
2024-05-02 09:39:34 +00:00
|
|
|
event source_event;
|
2022-08-31 12:01:59 +00:00
|
|
|
};
|
|
|
|
|
2024-06-07 17:41:04 +00:00
|
|
|
struct rt_digestor {
|
2024-06-03 12:23:41 +00:00
|
|
|
struct rt_export_request req; /* Notifier from the table */
|
2024-06-07 17:41:04 +00:00
|
|
|
struct lfjour digest; /* Digest journal of struct rt_digest */
|
2024-06-03 12:23:41 +00:00
|
|
|
struct settle settle; /* Settle timer before announcing digests */
|
|
|
|
struct f_trie *trie; /* Trie to be announced */
|
|
|
|
rtable *tab; /* Table this belongs to */
|
|
|
|
event event;
|
|
|
|
};
|
|
|
|
|
2024-06-07 17:41:04 +00:00
|
|
|
struct rt_digest {
|
2024-06-03 12:23:41 +00:00
|
|
|
LFJOUR_ITEM_INHERIT(li);
|
|
|
|
struct f_trie *trie; /* Trie marking all prefixes where ROA have changed */
|
|
|
|
};
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
#define rte_update channel_rte_import
|
2020-01-28 10:42:46 +00:00
|
|
|
/**
|
|
|
|
* rte_update - enter a new update to a routing table
|
|
|
|
* @c: channel doing the update
|
|
|
|
* @net: network address
|
|
|
|
* @rte: a &rte representing the new route
|
|
|
|
* @src: old route source identifier
|
|
|
|
*
|
|
|
|
* This function imports a new route to the appropriate table (via the channel).
|
|
|
|
* Table keys are @net (obligatory) and @rte->attrs->src.
|
|
|
|
* Both the @net and @rte pointers can be local.
|
|
|
|
*
|
|
|
|
* The route attributes (@rte->attrs) are obligatory. They can be also allocated
|
|
|
|
* locally. Anyway, if you use an already-cached attribute object, you shall
|
|
|
|
* call rta_clone() on that object yourself. (This semantics may change in future.)
|
|
|
|
*
|
|
|
|
* If the route attributes are local, you may set @rte->attrs->src to NULL, then
|
|
|
|
* the protocol's default route source will be supplied.
|
|
|
|
*
|
|
|
|
* When rte_update() gets a route, it automatically validates it. This includes
|
|
|
|
* checking for validity of the given network and next hop addresses and also
|
|
|
|
* checking for host-scope or link-scope routes. Then the import filters are
|
|
|
|
* processed and if accepted, the route is passed to route table recalculation.
|
|
|
|
*
|
|
|
|
* The accepted routes are then inserted into the table, replacing the old route
|
|
|
|
* for the same @net identified by @src. Then the route is announced
|
|
|
|
* to all the channels connected to the table using the standard export mechanism.
|
|
|
|
* Setting @rte to NULL makes this a withdraw, otherwise @rte->src must be the same
|
|
|
|
* as @src.
|
|
|
|
*
|
|
|
|
* All memory used for temporary allocations is taken from a special linpool
|
|
|
|
* @rte_update_pool and freed when rte_update() finishes.
|
|
|
|
*/
|
|
|
|
void rte_update(struct channel *c, const net_addr *net, struct rte *rte, struct rte_src *src);
|
|
|
|
|
2018-11-20 16:38:19 +00:00
|
|
|
extern list routing_tables;
|
1999-05-17 20:14:52 +00:00
|
|
|
struct config;
|
1998-05-20 11:54:33 +00:00
|
|
|
|
|
|
|
void rt_init(void);
|
1999-05-17 20:14:52 +00:00
|
|
|
void rt_preconfig(struct config *);
|
2022-07-13 10:02:34 +00:00
|
|
|
void rt_postconfig(struct config *);
|
2000-01-16 16:44:50 +00:00
|
|
|
void rt_commit(struct config *new, struct config *old);
|
2022-09-07 11:54:20 +00:00
|
|
|
void rt_lock_table_priv(struct rtable_private *, const char *file, uint line);
|
|
|
|
void rt_unlock_table_priv(struct rtable_private *, const char *file, uint line);
|
|
|
|
static inline void rt_lock_table_pub(rtable *t, const char *file, uint line)
|
|
|
|
{ RT_LOCKED(t, tt) rt_lock_table_priv(tt, file, line); }
|
|
|
|
static inline void rt_unlock_table_pub(rtable *t, const char *file, uint line)
|
|
|
|
{ RT_LOCKED(t, tt) rt_unlock_table_priv(tt, file, line); }
|
|
|
|
|
|
|
|
#define rt_lock_table(t) _Generic((t), rtable *: rt_lock_table_pub, \
|
|
|
|
struct rtable_private *: rt_lock_table_priv)((t), __FILE__, __LINE__)
|
|
|
|
#define rt_unlock_table(t) _Generic((t), rtable *: rt_unlock_table_pub, \
|
|
|
|
struct rtable_private *: rt_unlock_table_priv)((t), __FILE__, __LINE__)
|
|
|
|
|
2024-04-03 12:47:15 +00:00
|
|
|
const struct f_trie * rt_lock_trie(struct rtable_private *tab);
|
|
|
|
void rt_unlock_trie(struct rtable_private *tab, const struct f_trie *trie);
|
2021-12-20 19:25:35 +00:00
|
|
|
void rt_flowspec_link(rtable *src, rtable *dst);
|
|
|
|
void rt_flowspec_unlink(rtable *src, rtable *dst);
|
2021-03-30 16:51:31 +00:00
|
|
|
rtable *rt_setup(pool *, struct rtable_config *);
|
2024-06-07 17:41:04 +00:00
|
|
|
void rt_setup_digestor(struct rtable_private *tab);
|
2021-03-30 16:51:31 +00:00
|
|
|
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_export_feed *rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first);
|
|
|
|
rte rt_net_best(rtable *t, const net_addr *a);
|
2020-01-28 10:42:46 +00:00
|
|
|
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
|
2024-05-02 09:39:34 +00:00
|
|
|
rte *rt_export_merged(struct channel *c, const struct rt_export_feed *feed, linpool *pool, int silent);
|
Route refresh in tables uses a stale counter.
Until now, we were marking routes as REF_STALE and REF_DISCARD to
cleanup old routes after route refresh. This needed a synchronous route
table walk at both beginning and the end of route refresh routine,
marking the routes by the flags.
We avoid these walks by using a stale counter. Every route contains:
u8 stale_cycle;
Every import hook contains:
u8 stale_set;
u8 stale_valid;
u8 stale_pruned;
u8 stale_pruning;
In base_state, stale_set == stale_valid == stale_pruned == stale_pruning
and all routes' stale_cycle also have the same value.
The route refresh looks like follows:
+ ----------- + --------- + ----------- + ------------- + ------------ +
| | stale_set | stale_valid | stale_pruning | stale_pruned |
| Base | x | x | x | x |
| Begin | x+1 | x | x | x |
... now routes are being inserted with stale_cycle == (x+1)
| End | x+1 | x+1 | x | x |
... now table pruning routine is scheduled
| Prune begin | x+1 | x+1 | x+1 | x |
... now routes with stale_cycle not between stale_set and stale_valid
are deleted
| Prune end | x+1 | x+1 | x+1 | x+1 |
+ ----------- + --------- + ----------- + ------------- + ------------ +
The pruning routine is asynchronous and may have high latency in
high-load environments. Therefore, multiple route refresh requests may
happen before the pruning routine starts, leading to this situation:
| Prune begin | x+k | x+k | x -> x+k | x |
... or even
| Prune begin | x+k+1 | x+k | x -> x+k | x |
... if the prune event starts while another route refresh is running.
In such a case, the pruning routine still deletes routes not fitting
between stale_set and and stale_valid, effectively pruning the remnants
of all unpruned route refreshes from before:
| Prune end | x+k | x+k | x+k | x+k |
In extremely rare cases, there may happen too many route refreshes
before any route prune routine finishes. If the difference between
stale_valid and stale_pruned becomes more than 128 when requesting for
another route refresh, the routine walks the table synchronously and
resets all the stale values to a base state, while logging a warning.
2022-07-12 08:36:10 +00:00
|
|
|
void rt_refresh_begin(struct rt_import_request *);
|
|
|
|
void rt_refresh_end(struct rt_import_request *);
|
2022-09-07 11:54:20 +00:00
|
|
|
void rt_schedule_prune(struct rtable_private *t);
|
2020-01-28 10:42:46 +00:00
|
|
|
void rte_dump(struct rte_storage *);
|
1998-05-20 11:54:33 +00:00
|
|
|
void rt_dump(rtable *);
|
1998-05-24 14:40:29 +00:00
|
|
|
void rt_dump_all(void);
|
2021-06-21 15:07:31 +00:00
|
|
|
void rt_dump_hooks(rtable *);
|
|
|
|
void rt_dump_hooks_all(void);
|
2018-09-27 20:57:55 +00:00
|
|
|
int rt_reload_channel(struct channel *c);
|
|
|
|
void rt_reload_channel_abort(struct channel *c);
|
|
|
|
void rt_prune_sync(rtable *t, int all);
|
2015-11-05 11:48:52 +00:00
|
|
|
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
|
2022-09-01 12:21:56 +00:00
|
|
|
void rt_new_default_table(struct symbol *s);
|
|
|
|
struct rtable_config *rt_get_default_table(struct config *cf, uint addr_type);
|
1998-05-20 11:54:33 +00:00
|
|
|
|
2021-12-20 19:44:36 +00:00
|
|
|
static inline int rt_is_ip(rtable *tab)
|
|
|
|
{ return (tab->addr_type == NET_IP4) || (tab->addr_type == NET_IP6); }
|
|
|
|
|
|
|
|
static inline int rt_is_vpn(rtable *tab)
|
|
|
|
{ return (tab->addr_type == NET_VPN4) || (tab->addr_type == NET_VPN6); }
|
|
|
|
|
|
|
|
static inline int rt_is_roa(rtable *tab)
|
|
|
|
{ return (tab->addr_type == NET_ROA4) || (tab->addr_type == NET_ROA6); }
|
|
|
|
|
|
|
|
static inline int rt_is_flow(rtable *tab)
|
|
|
|
{ return (tab->addr_type == NET_FLOW4) || (tab->addr_type == NET_FLOW6); }
|
|
|
|
|
2018-09-27 20:57:55 +00:00
|
|
|
|
2017-12-08 14:59:44 +00:00
|
|
|
/* Default limit for ECMP next hops, defined in sysdep code */
|
|
|
|
extern const int rt_default_ecmp;
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2017-03-30 11:52:01 +00:00
|
|
|
struct rt_show_data_rtable {
|
|
|
|
node n;
|
2022-06-27 17:53:06 +00:00
|
|
|
const char *name;
|
2024-05-29 07:33:42 +00:00
|
|
|
struct rt_exporter *exporter;
|
2017-04-25 17:02:31 +00:00
|
|
|
struct channel *export_channel;
|
2022-06-16 21:24:56 +00:00
|
|
|
struct channel *prefilter;
|
2022-06-27 17:53:06 +00:00
|
|
|
struct krt_proto *kernel;
|
2024-05-02 09:39:34 +00:00
|
|
|
struct rt_export_feeder req; /* Export feeder in use */
|
2017-03-30 11:52:01 +00:00
|
|
|
};
|
|
|
|
|
1999-12-01 15:10:21 +00:00
|
|
|
struct rt_show_data {
|
2022-06-24 13:27:26 +00:00
|
|
|
struct cli *cli; /* Pointer back to the CLI */
|
2015-12-24 14:52:03 +00:00
|
|
|
net_addr *addr;
|
2017-04-25 17:02:31 +00:00
|
|
|
list tables;
|
|
|
|
struct rt_show_data_rtable *tab; /* Iterator over table list */
|
|
|
|
struct rt_show_data_rtable *last_table; /* Last table in output */
|
2017-03-30 11:52:01 +00:00
|
|
|
int verbose, tables_defined_by;
|
2024-06-13 14:58:41 +00:00
|
|
|
struct timeformat tf_route;
|
2019-02-15 12:53:17 +00:00
|
|
|
const struct filter *filter;
|
2009-05-11 00:01:11 +00:00
|
|
|
struct proto *show_protocol;
|
2009-05-22 15:12:15 +00:00
|
|
|
struct proto *export_protocol;
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel *export_channel;
|
2024-06-13 10:15:38 +00:00
|
|
|
OBSREF(struct config) running_on_config;
|
2024-05-02 09:39:34 +00:00
|
|
|
// struct rt_export_hook *kernel_export_hook;
|
2021-12-02 01:22:30 +00:00
|
|
|
int export_mode, addr_mode, primary_only, filtered, stats;
|
2017-04-25 17:02:31 +00:00
|
|
|
|
2017-03-30 11:52:01 +00:00
|
|
|
int net_counter, rt_counter, show_counter, table_counter;
|
|
|
|
int net_counter_last, rt_counter_last, show_counter_last;
|
2022-06-24 13:27:26 +00:00
|
|
|
int show_counter_last_flush;
|
1999-12-01 15:10:21 +00:00
|
|
|
};
|
2017-04-25 17:02:31 +00:00
|
|
|
|
1999-12-01 15:10:21 +00:00
|
|
|
void rt_show(struct rt_show_data *);
|
2022-09-07 11:54:20 +00:00
|
|
|
struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t);
|
2024-05-29 07:33:42 +00:00
|
|
|
struct rt_show_data_rtable * rt_show_add_exporter(struct rt_show_data *d, struct rt_exporter *e);
|
2017-03-30 11:52:01 +00:00
|
|
|
|
|
|
|
/* Value of table definition mode in struct rt_show_data */
|
|
|
|
#define RSD_TDB_DEFAULT 0 /* no table specified */
|
|
|
|
#define RSD_TDB_INDIRECT 0 /* show route ... protocol P ... */
|
|
|
|
#define RSD_TDB_ALL RSD_TDB_SET /* show route ... table all ... */
|
|
|
|
#define RSD_TDB_DIRECT RSD_TDB_SET | RSD_TDB_NMN /* show route ... table X table Y ... */
|
|
|
|
|
|
|
|
#define RSD_TDB_SET 0x1 /* internal: show empty tables */
|
|
|
|
#define RSD_TDB_NMN 0x2 /* internal: need matching net */
|
1999-12-01 15:10:21 +00:00
|
|
|
|
2014-10-02 10:46:26 +00:00
|
|
|
/* Value of export_mode in struct rt_show_data */
|
|
|
|
#define RSEM_NONE 0 /* Export mode not used */
|
|
|
|
#define RSEM_PREEXPORT 1 /* Routes ready for export, before filtering */
|
|
|
|
#define RSEM_EXPORT 2 /* Routes accepted by export filter */
|
|
|
|
#define RSEM_NOEXPORT 3 /* Routes rejected by export filter */
|
2019-09-09 00:55:32 +00:00
|
|
|
#define RSEM_EXPORTED 4 /* Routes marked in export map */
|
2014-10-02 10:46:26 +00:00
|
|
|
|
2022-05-15 13:53:35 +00:00
|
|
|
/* Host entry: Resolve hook for recursive nexthops */
|
|
|
|
extern struct ea_class ea_gen_hostentry;
|
2024-04-04 09:38:52 +00:00
|
|
|
extern struct ea_class ea_gen_hostentry_version;
|
2022-05-15 13:53:35 +00:00
|
|
|
struct hostentry_adata {
|
|
|
|
adata ad;
|
|
|
|
struct hostentry *he;
|
|
|
|
u32 labels[0];
|
|
|
|
};
|
|
|
|
|
2023-11-08 20:51:46 +00:00
|
|
|
#define HOSTENTRY_LABEL_COUNT(head) (head->ad.length + sizeof(struct adata) - sizeof(struct hostentry_adata)) / sizeof(u32)
|
2022-03-14 10:13:48 +00:00
|
|
|
|
2022-05-15 13:53:35 +00:00
|
|
|
void
|
2022-09-07 11:54:20 +00:00
|
|
|
ea_set_hostentry(ea_list **to, rtable *dep, rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum]);
|
2022-05-15 13:53:35 +00:00
|
|
|
|
2022-06-27 11:39:28 +00:00
|
|
|
void ea_show_hostentry(const struct adata *ad, byte *buf, uint size);
|
|
|
|
void ea_show_nexthop_list(struct cli *c, struct nexthop_adata *nhad);
|
2010-07-05 15:50:19 +00:00
|
|
|
|
1998-05-03 16:42:45 +00:00
|
|
|
/*
|
|
|
|
* Default protocol preferences
|
|
|
|
*/
|
|
|
|
|
2018-02-06 16:43:55 +00:00
|
|
|
#define DEF_PREF_DIRECT 240 /* Directly connected */
|
1998-05-03 16:42:45 +00:00
|
|
|
#define DEF_PREF_STATIC 200 /* Static route */
|
2000-05-08 10:40:00 +00:00
|
|
|
#define DEF_PREF_OSPF 150 /* OSPF intra-area, inter-area and type 1 external routes */
|
2016-04-28 16:01:40 +00:00
|
|
|
#define DEF_PREF_BABEL 130 /* Babel */
|
1998-05-03 16:42:45 +00:00
|
|
|
#define DEF_PREF_RIP 120 /* RIP */
|
|
|
|
#define DEF_PREF_BGP 100 /* BGP */
|
2015-09-17 15:15:30 +00:00
|
|
|
#define DEF_PREF_RPKI 100 /* RPKI */
|
2022-10-03 18:06:13 +00:00
|
|
|
#define DEF_PREF_L3VPN_IMPORT 80 /* L3VPN import -> lower than BGP */
|
|
|
|
#define DEF_PREF_L3VPN_EXPORT 120 /* L3VPN export -> higher than BGP */
|
2000-05-08 10:40:00 +00:00
|
|
|
#define DEF_PREF_INHERITED 10 /* Routes inherited from other routing daemons */
|
2022-04-20 10:24:26 +00:00
|
|
|
#define DEF_PREF_UNKNOWN 0 /* Routes with no preference set */
|
1998-05-03 16:42:45 +00:00
|
|
|
|
2012-03-18 16:32:30 +00:00
|
|
|
/*
|
|
|
|
* Route Origin Authorization
|
|
|
|
*/
|
|
|
|
|
2016-01-20 15:29:17 +00:00
|
|
|
#define ROA_UNKNOWN 0
|
|
|
|
#define ROA_VALID 1
|
|
|
|
#define ROA_INVALID 2
|
2012-03-18 16:32:30 +00:00
|
|
|
|
2024-11-06 11:12:37 +00:00
|
|
|
enum aspa_result {
|
|
|
|
ASPA_UNKNOWN = 0,
|
|
|
|
ASPA_VALID,
|
2024-11-26 19:32:53 +00:00
|
|
|
ASPA_INVALID,
|
2024-11-06 11:12:37 +00:00
|
|
|
};
|
2023-10-15 21:52:46 +00:00
|
|
|
|
2022-03-31 17:09:38 +00:00
|
|
|
int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
|
2024-11-29 09:49:53 +00:00
|
|
|
enum aspa_result aspa_check(rtable *tab, const struct adata *path, bool force_upstream);
|
2022-03-31 17:09:38 +00:00
|
|
|
|
2015-12-16 14:30:44 +00:00
|
|
|
#endif
|