mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-22 09:41:54 +00:00
Real almost-lockless feeds and more pull-like exports
Introducing a new omnipotent internal API to just pass route updates from whatever point wherever we want. From now on, all the exports should be processed by RT_WALK_EXPORTS macro, and you can also issue a separate feed-only request to just get a feed and finish. The exporters can now also stop and the readers must expect that to happen and recover. Main tables don't stop, though.
This commit is contained in:
parent
1d0e47e74b
commit
5cbebfc940
@ -36,6 +36,7 @@ struct config {
|
||||
u32 proto_default_mrtdump; /* Default protocol mrtdump mask */
|
||||
u32 channel_default_debug; /* Default channel debug mask */
|
||||
u32 table_default_debug; /* Default table debug mask */
|
||||
u32 show_route_debug; /* Exports to CLI debug mask */
|
||||
u16 filter_vstk, filter_estk; /* Filter stack depth */
|
||||
struct timeformat tf_route; /* Time format for 'show route' */
|
||||
struct timeformat tf_proto; /* Time format for 'show protocol' */
|
||||
|
@ -227,7 +227,8 @@ lfjour_announce_now(struct lfjour *j)
|
||||
return lfjour_schedule_cleanup(j);
|
||||
|
||||
WALK_TLIST(lfjour_recipient, r, &j->recipients)
|
||||
ev_send(r->target, r->event);
|
||||
if (r->event)
|
||||
ev_send(r->target, r->event);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -260,8 +261,7 @@ void
|
||||
lfjour_register(struct lfjour *j, struct lfjour_recipient *r)
|
||||
{
|
||||
ASSERT_DIE(!j->domain || DG_IS_LOCKED(j->domain));
|
||||
ASSERT_DIE(r->event);
|
||||
ASSERT_DIE(r->target);
|
||||
ASSERT_DIE(!r->event == !r->target);
|
||||
|
||||
atomic_store_explicit(&r->last, NULL, memory_order_relaxed);
|
||||
ASSERT_DIE(!r->cur);
|
||||
@ -275,6 +275,9 @@ lfjour_unregister(struct lfjour_recipient *r)
|
||||
struct lfjour *j = lfjour_of_recipient(r);
|
||||
ASSERT_DIE(!j->domain || DG_IS_LOCKED(j->domain));
|
||||
|
||||
if (r->cur)
|
||||
lfjour_release(r);
|
||||
|
||||
lfjour_recipient_rem_node(&j->recipients, r);
|
||||
lfjour_schedule_cleanup(j);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
src := cli.c cmds.c iface.c locks.c mpls.c neighbor.c password.c proto.c proto-build.c rt-attr.c rt-dev.c rt-fib.c rt-show.c rt-table.c
|
||||
src := cli.c cmds.c iface.c locks.c mpls.c neighbor.c password.c proto.c proto-build.c rt-attr.c rt-dev.c rt-export.c rt-fib.c rt-show.c rt-table.c
|
||||
obj := $(src-o-files)
|
||||
$(all-daemon)
|
||||
$(cf-local)
|
||||
|
13
nest/cli.c
13
nest/cli.c
@ -300,19 +300,12 @@ extern cli *cmd_reconfig_stored_cli;
|
||||
void
|
||||
cli_free(cli *c)
|
||||
{
|
||||
int defer = 0;
|
||||
if (c->cleanup)
|
||||
defer = c->cleanup(c);
|
||||
CALL(c->cleanup, c);
|
||||
|
||||
if (c == cmd_reconfig_stored_cli)
|
||||
cmd_reconfig_stored_cli = NULL;
|
||||
|
||||
if (defer)
|
||||
{
|
||||
sk_close(c->sock);
|
||||
c->sock = NULL;
|
||||
}
|
||||
else
|
||||
rp_free(c->pool);
|
||||
rp_free(c->pool);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -34,8 +34,7 @@ typedef struct cli {
|
||||
struct cli_out *tx_buf, *tx_pos, *tx_write;
|
||||
event *event;
|
||||
void (*cont)(struct cli *c);
|
||||
int (*cleanup)(struct cli *c); /* Return 0 if finished and cli may be freed immediately.
|
||||
Otherwise return 1 and call rfree(c->pool) when appropriate. */
|
||||
void (*cleanup)(struct cli *c); /* The CLI has closed prematurely */
|
||||
void *rover; /* Private to continuation routine */
|
||||
int last_reply;
|
||||
int restricted; /* CLI is restricted to read-only commands */
|
||||
|
@ -367,7 +367,6 @@ channel_item_:
|
||||
this_channel->out_filter = $4;
|
||||
}
|
||||
| EXPORT imexport { this_channel->out_filter = $2; }
|
||||
| EXPORT BLOCK expr { this_channel->feed_block_size = $3; }
|
||||
| RECEIVE LIMIT limit_spec { this_channel->rx_limit = $3; }
|
||||
| IMPORT LIMIT limit_spec { this_channel->in_limit = $3; }
|
||||
| EXPORT LIMIT limit_spec { this_channel->out_limit = $3; }
|
||||
@ -445,6 +444,7 @@ debug_default:
|
||||
| DEBUG CHANNELS debug_mask { new_config->channel_default_debug = $3; }
|
||||
| DEBUG TABLES debug_mask { new_config->table_default_debug = $3; }
|
||||
| DEBUG COMMANDS expr { new_config->cli_debug = $3; }
|
||||
| DEBUG SHOW ROUTE debug_mask { new_config->show_route_debug = $4; }
|
||||
;
|
||||
|
||||
/* MRTDUMP PROTOCOLS is in systep/unix/config.Y */
|
||||
|
807
nest/proto.c
807
nest/proto.c
File diff suppressed because it is too large
Load Diff
@ -122,12 +122,6 @@ struct proto_config {
|
||||
/* Protocol-specific data follow... */
|
||||
};
|
||||
|
||||
struct channel_import_request {
|
||||
struct channel_import_request *next; /* Next in request chain */
|
||||
void (*done)(struct channel_import_request *); /* Called when import finishes */
|
||||
const struct f_trie *trie; /* Reload only matching nets */
|
||||
};
|
||||
|
||||
#define TLIST_PREFIX proto
|
||||
#define TLIST_TYPE struct proto
|
||||
#define TLIST_ITEM n
|
||||
@ -194,15 +188,12 @@ struct proto {
|
||||
* reload_routes Request channel to reload all its routes to the core
|
||||
* (using rte_update()). Returns: 0=reload cannot be done,
|
||||
* 1= reload is scheduled and will happen (asynchronously).
|
||||
* feed_begin Notify channel about beginning of route feeding.
|
||||
* feed_end Notify channel about finish of route feeding.
|
||||
*/
|
||||
|
||||
void (*rt_notify)(struct proto *, struct channel *, const net_addr *net, struct rte *new, const struct rte *old);
|
||||
int (*preexport)(struct channel *, struct rte *rt);
|
||||
int (*reload_routes)(struct channel *, struct channel_import_request *cir);
|
||||
void (*feed_begin)(struct channel *);
|
||||
void (*feed_end)(struct channel *);
|
||||
void (*export_fed)(struct channel *);
|
||||
int (*reload_routes)(struct channel *, struct rt_feeding_request *cir);
|
||||
|
||||
/*
|
||||
* Routing entry hooks (called only for routes belonging to this protocol):
|
||||
@ -522,8 +513,6 @@ struct channel_config {
|
||||
|
||||
struct settle_config roa_settle; /* Settle times for ROA-induced reload */
|
||||
|
||||
uint feed_block_size; /* How many routes to feed at once */
|
||||
|
||||
u8 net_type; /* Routing table network type (NET_*), 0 for undefined */
|
||||
u8 ra_mode; /* Mode of received route advertisements (RA_*) */
|
||||
u16 preference; /* Default route preference */
|
||||
@ -545,8 +534,8 @@ struct channel {
|
||||
const struct filter *in_filter; /* Input filter */
|
||||
const struct filter *out_filter; /* Output filter */
|
||||
const net_addr *out_subprefix; /* Export only subprefixes of this net */
|
||||
struct bmap export_map; /* Keeps track which routes were really exported */
|
||||
struct bmap export_reject_map; /* Keeps track which routes were rejected by export filter */
|
||||
struct bmap export_accepted_map; /* Keeps track which routes were really exported */
|
||||
struct bmap export_rejected_map; /* Keeps track which routes were rejected by export filter */
|
||||
|
||||
struct limit rx_limit; /* Receive limit (for in_keep & RIK_REJECTED) */
|
||||
struct limit in_limit; /* Input limit */
|
||||
@ -579,15 +568,7 @@ struct channel {
|
||||
|
||||
struct rt_import_request in_req; /* Table import connection */
|
||||
struct rt_export_request out_req; /* Table export connection */
|
||||
|
||||
struct rt_export_request refeed_req; /* Auxiliary refeed request */
|
||||
struct bmap refeed_map; /* Auxiliary refeed netindex bitmap */
|
||||
struct channel_feeding_request *refeeding; /* Refeeding the channel */
|
||||
struct channel_feeding_request *refeed_pending; /* Scheduled refeeds */
|
||||
struct channel_import_request *importing; /* Importing the channel */
|
||||
struct channel_import_request *import_pending; /* Scheduled imports */
|
||||
|
||||
uint feed_block_size; /* How many routes to feed at once */
|
||||
event out_event; /* Table export event */
|
||||
|
||||
u8 net_type; /* Routing table network type (NET_*), 0 for undefined */
|
||||
u8 ra_mode; /* Mode of received route advertisements (RA_*) */
|
||||
@ -607,9 +588,9 @@ struct channel {
|
||||
|
||||
btime last_state_change; /* Time of last state transition */
|
||||
|
||||
struct rt_export_request reload_req; /* Feeder for import reload */
|
||||
struct rt_export_feeder reimporter; /* Feeder for import reload */
|
||||
event reimport_event; /* Event doing that import reload */
|
||||
|
||||
u8 reload_pending; /* Reloading and another reload is scheduled */
|
||||
u8 rpki_reload; /* RPKI changes trigger channel reload */
|
||||
|
||||
struct rt_exporter *out_table; /* Internal table for exported routes */
|
||||
@ -687,8 +668,6 @@ void proto_remove_channel(struct proto *p, struct channel *c);
|
||||
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
|
||||
|
||||
void channel_set_state(struct channel *c, uint state);
|
||||
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
|
||||
int channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n);
|
||||
|
||||
void channel_add_obstacle(struct channel *c);
|
||||
void channel_del_obstacle(struct channel *c);
|
||||
@ -697,53 +676,8 @@ static inline void channel_init(struct channel *c) { channel_set_state(c, CS_STA
|
||||
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
|
||||
static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); }
|
||||
|
||||
struct channel_feeding_request {
|
||||
struct channel_feeding_request *next; /* Next in request chain */
|
||||
void (*done)(struct channel_feeding_request *); /* Called when refeed finishes */
|
||||
const struct f_trie *trie; /* Reload only matching nets */
|
||||
PACKED enum channel_feeding_request_type {
|
||||
CFRT_DIRECT = 1, /* Refeed by export restart */
|
||||
CFRT_AUXILIARY, /* Refeed by auxiliary request */
|
||||
} type;
|
||||
PACKED enum {
|
||||
CFRS_INACTIVE = 0, /* Inactive request */
|
||||
CFRS_PENDING, /* Request enqueued, do not touch */
|
||||
CFRS_RUNNING, /* Request active, do not touch */
|
||||
} state;
|
||||
};
|
||||
|
||||
struct channel *channel_from_export_request(struct rt_export_request *req);
|
||||
void channel_request_feeding(struct channel *c, struct channel_feeding_request *);
|
||||
void channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type);
|
||||
|
||||
static inline int channel_net_is_refeeding(struct channel *c, const net_addr *n)
|
||||
{
|
||||
/* Not refeeding if not refeeding at all */
|
||||
if (!c->refeeding)
|
||||
return 0;
|
||||
|
||||
/* Not refeeding if already refed */
|
||||
struct netindex *ni = NET_TO_INDEX(n);
|
||||
if (bmap_test(&c->refeed_map, ni->index))
|
||||
return 0;
|
||||
|
||||
/* Refeeding if matching any request */
|
||||
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
||||
if (!cfr->trie || trie_match_net(cfr->trie, n))
|
||||
return 1;
|
||||
|
||||
/* Not matching any request */
|
||||
return 0;
|
||||
}
|
||||
static inline void channel_net_mark_refed(struct channel *c, const net_addr *n)
|
||||
{
|
||||
ASSERT_DIE(c->refeeding);
|
||||
|
||||
struct netindex *ni = NET_TO_INDEX(n);
|
||||
bmap_set(&c->refeed_map, ni->index);
|
||||
}
|
||||
|
||||
void channel_request_reload(struct channel *c);
|
||||
void channel_request_reload(struct channel *c, struct rt_feeding_request *cir);
|
||||
void channel_request_full_refeed(struct channel *c);
|
||||
|
||||
void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
|
||||
void *channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
|
||||
|
450
nest/route.h
450
nest/route.h
@ -2,7 +2,7 @@
|
||||
* BIRD Internet Routing Daemon -- Routing Table
|
||||
*
|
||||
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
|
||||
* (c) 2019--2021 Maria Matejka <mq@jmq.cz>
|
||||
* (c) 2019--2024 Maria Matejka <mq@jmq.cz>
|
||||
*
|
||||
* Can be freely distributed and used under the terms of the GNU GPL.
|
||||
*/
|
||||
@ -42,10 +42,6 @@ struct f_trie;
|
||||
struct f_trie_walk_state;
|
||||
struct cli;
|
||||
|
||||
struct rt_cork_threshold {
|
||||
u64 low, high;
|
||||
};
|
||||
|
||||
/*
|
||||
* Master Routing Tables. Generally speaking, each of them contains a FIB
|
||||
* with each entry pointing to a list of route entries representing routes
|
||||
@ -68,15 +64,272 @@ struct rtable_config {
|
||||
u32 debug; /* Debugging flags (D_*) */
|
||||
byte sorted; /* Routes of network are sorted according to rte_better() */
|
||||
byte trie_used; /* Rtable has attached trie */
|
||||
struct rt_cork_threshold cork_threshold; /* Cork threshold values */
|
||||
struct rt_cork_threshold {
|
||||
u64 low, high;
|
||||
} cork_threshold; /* Cork threshold values */
|
||||
struct settle_config export_settle; /* Export announcement settler */
|
||||
struct settle_config export_rr_settle;/* Export announcement settler config valid when any
|
||||
route refresh is running */
|
||||
};
|
||||
|
||||
/*
|
||||
* Route export journal
|
||||
*
|
||||
* The journal itself is held in struct rt_exporter.
|
||||
* Workflow:
|
||||
* (1) Initialize by rt_exporter_init()
|
||||
* (2) Push data by rt_exporter_push() (the export item is copied)
|
||||
* (3) Shutdown by rt_exporter_shutdown(), event is called after cleanup
|
||||
*
|
||||
* Subscribers:
|
||||
* (1) Initialize by rt_export_subscribe()
|
||||
* (2a) Get data by rt_export_get();
|
||||
* (2b) Release data after processing by rt_export_release()
|
||||
* (3) Request refeed by rt_export_refeed()
|
||||
* (4) Unsubscribe by rt_export_unsubscribe()
|
||||
*/
|
||||
|
||||
struct rt_export_request {
|
||||
/* Formal name */
|
||||
char *name;
|
||||
|
||||
/* Memory */
|
||||
pool *pool;
|
||||
|
||||
/* State information */
|
||||
enum rt_export_state {
|
||||
#define RT_EXPORT_STATES \
|
||||
DOWN, \
|
||||
FEEDING, \
|
||||
PARTIAL, \
|
||||
READY, \
|
||||
STOP, \
|
||||
|
||||
#define RT_EXPORT_STATES_ENUM_HELPER(p) TES_##p,
|
||||
MACRO_FOREACH(RT_EXPORT_STATES_ENUM_HELPER, RT_EXPORT_STATES)
|
||||
TES_MAX
|
||||
#undef RT_EXPORT_STATES_ENUM_HELPER
|
||||
} _Atomic export_state;
|
||||
btime last_state_change;
|
||||
|
||||
/* Table feeding contraption */
|
||||
struct rt_export_feeder {
|
||||
/* Formal name */
|
||||
char *name;
|
||||
|
||||
/* Enlisting */
|
||||
struct rt_exporter * _Atomic exporter;
|
||||
struct rt_export_feeder * _Atomic next;
|
||||
|
||||
/* Prefiltering, useful for more scenarios */
|
||||
struct rt_prefilter {
|
||||
/* Network prefilter mode (TE_ADDR_*) */
|
||||
enum {
|
||||
TE_ADDR_NONE = 0, /* No address matching */
|
||||
TE_ADDR_EQUAL, /* Exact query - show route <addr> */
|
||||
TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
|
||||
TE_ADDR_IN, /* Interval query - show route in <addr> */
|
||||
TE_ADDR_TRIE, /* Query defined by trie */
|
||||
TE_ADDR_HOOK, /* Query processed by supplied custom hook */
|
||||
} mode;
|
||||
|
||||
union {
|
||||
const struct f_trie *trie;
|
||||
const net_addr *addr;
|
||||
int (*hook)(const struct rt_prefilter *, const net_addr *);
|
||||
};
|
||||
} prefilter;
|
||||
|
||||
#define TLIST_PREFIX rt_export_feeder
|
||||
#define TLIST_TYPE struct rt_export_feeder
|
||||
#define TLIST_ITEM n
|
||||
#define TLIST_WANT_WALK
|
||||
#define TLIST_WANT_ADD_TAIL
|
||||
|
||||
/* Feeding itself */
|
||||
union {
|
||||
u64 feed_index; /* Index of the feed in progress */
|
||||
struct rt_feeding_index *feed_index_ptr; /* Use this when u64 is not enough */
|
||||
};
|
||||
struct rt_feeding_request {
|
||||
struct rt_feeding_request *next; /* Next in request chain */
|
||||
void (*done)(struct rt_feeding_request *);/* Called when this refeed finishes */
|
||||
struct rt_prefilter prefilter; /* Reload only matching nets */
|
||||
PACKED enum {
|
||||
RFRS_INACTIVE = 0, /* Inactive request */
|
||||
RFRS_PENDING, /* Request enqueued, do not touch */
|
||||
RFRS_RUNNING, /* Request active, do not touch */
|
||||
} state;
|
||||
} *feeding, *feed_pending;
|
||||
TLIST_DEFAULT_NODE;
|
||||
u8 trace_routes;
|
||||
} feeder;
|
||||
|
||||
/* Regular updates */
|
||||
struct bmap seq_map; /* Which lfjour items are already processed */
|
||||
struct bmap feed_map; /* Which nets were already fed (for initial feeding) */
|
||||
struct lfjour_recipient r;
|
||||
struct rt_export_union *cur;
|
||||
|
||||
/* Statistics */
|
||||
struct rt_export_stats {
|
||||
u32 updates_received; /* Number of route updates received */
|
||||
u32 withdraws_received; /* Number of route withdraws received */
|
||||
} stats;
|
||||
|
||||
/* Tracing */
|
||||
u8 trace_routes;
|
||||
void (*dump)(struct rt_export_request *req);
|
||||
void (*fed)(struct rt_export_request *req);
|
||||
};
|
||||
|
||||
#include "lib/tlists.h"
|
||||
|
||||
struct rt_export_union {
|
||||
enum rt_export_kind {
|
||||
RT_EXPORT_STOP = 1,
|
||||
RT_EXPORT_FEED,
|
||||
RT_EXPORT_UPDATE,
|
||||
} kind;
|
||||
const struct rt_export_item {
|
||||
LFJOUR_ITEM_INHERIT(li); /* Member of lockfree journal */
|
||||
char data[0]; /* Memcpy helper */
|
||||
const rte *new, *old; /* Route update */
|
||||
} *update;
|
||||
const struct rt_export_feed {
|
||||
uint count_routes, count_exports;
|
||||
const struct netindex *ni;
|
||||
rte *block;
|
||||
u64 *exports;
|
||||
char data[0];
|
||||
} *feed;
|
||||
struct rt_export_request *req;
|
||||
};
|
||||
|
||||
struct rt_exporter {
|
||||
struct lfjour journal; /* Journal for update keeping */
|
||||
TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
|
||||
_Bool _Atomic feeders_lock; /* Spinlock for the above list */
|
||||
u8 trace_routes; /* Debugging flags (D_*) */
|
||||
const char *name; /* Name for logging */
|
||||
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
|
||||
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
|
||||
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, const net_addr *, const struct rt_export_item *first);
|
||||
const net_addr *(*feed_next)(struct rt_exporter *, struct rcu_unwinder *, struct rt_export_feeder *);
|
||||
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
|
||||
};
|
||||
|
||||
/* Exporter API */
|
||||
void rt_exporter_init(struct rt_exporter *, struct settle_config *);
|
||||
struct rt_export_item *rt_exporter_push(struct rt_exporter *, const struct rt_export_item *);
|
||||
void rt_exporter_shutdown(struct rt_exporter *, void (*stopped)(struct rt_exporter *));
|
||||
|
||||
/* Standalone feeds */
|
||||
void rt_feeder_subscribe(struct rt_exporter *, struct rt_export_feeder *);
|
||||
void rt_feeder_unsubscribe(struct rt_export_feeder *);
|
||||
void rt_export_refeed_feeder(struct rt_export_feeder *, struct rt_feeding_request *);
|
||||
|
||||
struct rt_export_feed *rt_export_next_feed(struct rt_export_feeder *);
|
||||
#define RT_FEED_WALK(_feeder, _f) \
|
||||
for (const struct rt_export_feed *_f; _f = rt_export_next_feed(_feeder); ) \
|
||||
|
||||
static inline _Bool rt_export_feed_active(struct rt_export_feeder *f)
|
||||
{ return !!atomic_load_explicit(&f->exporter, memory_order_acquire); }
|
||||
|
||||
/* Full blown exports */
|
||||
void rtex_export_subscribe(struct rt_exporter *, struct rt_export_request *);
|
||||
void rtex_export_unsubscribe(struct rt_export_request *);
|
||||
|
||||
const struct rt_export_union * rt_export_get(struct rt_export_request *);
|
||||
void rt_export_release(const struct rt_export_union *);
|
||||
void rt_export_retry_later(const struct rt_export_union *);
|
||||
void rt_export_processed(struct rt_export_request *, u64);
|
||||
void rt_export_refeed_request(struct rt_export_request *rer, struct rt_feeding_request *rfr);
|
||||
|
||||
static inline enum rt_export_state rt_export_get_state(struct rt_export_request *r)
|
||||
{ return atomic_load_explicit(&r->export_state, memory_order_acquire); }
|
||||
const char *rt_export_state_name(enum rt_export_state state);
|
||||
|
||||
static inline void rt_export_walk_cleanup(const struct rt_export_union **up)
|
||||
{
|
||||
if (*up)
|
||||
rt_export_release(*up);
|
||||
}
|
||||
|
||||
#define RT_EXPORT_WALK(_reader, _u) \
|
||||
for (CLEANUP(rt_export_walk_cleanup) const struct rt_export_union *_u;\
|
||||
_u = rt_export_get(_reader); \
|
||||
rt_export_release(_u)) \
|
||||
|
||||
/* Convenince common call to request refeed */
|
||||
#define rt_export_refeed(h, r) _Generic((h), \
|
||||
struct rt_export_feeder *: rt_export_refeed_feeder, \
|
||||
struct rt_export_request *: rt_export_refeed_request, \
|
||||
void *: bug)(h, r)
|
||||
|
||||
/* Subscription to regular table exports needs locking */
|
||||
#define rt_export_subscribe(_t, _kind, f) do { \
|
||||
RT_LOCKED(_t, tp) { \
|
||||
rt_lock_table(tp); \
|
||||
rtex_export_subscribe(&tp->export_##_kind, f); \
|
||||
}} while (0) \
|
||||
|
||||
#define rt_export_unsubscribe(_kind, _fx) do { \
|
||||
struct rt_export_request *_f = _fx; \
|
||||
struct rt_exporter *e = atomic_load_explicit(&_f->feeder.exporter, memory_order_acquire); \
|
||||
RT_LOCKED(SKIP_BACK(rtable, export_##_kind, e), _tp) { \
|
||||
rtex_export_unsubscribe(_f); \
|
||||
rt_unlock_table(_tp); \
|
||||
}} while (0) \
|
||||
|
||||
static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
|
||||
{
|
||||
switch (p->mode)
|
||||
{
|
||||
case TE_ADDR_NONE: return 1;
|
||||
case TE_ADDR_IN: return net_in_netX(n, p->addr);
|
||||
case TE_ADDR_EQUAL: return net_equal(n, p->addr);
|
||||
case TE_ADDR_FOR: return net_in_netX(p->addr, n);
|
||||
case TE_ADDR_TRIE: return trie_match_net(p->trie, n);
|
||||
case TE_ADDR_HOOK: return p->hook(p, n);
|
||||
}
|
||||
|
||||
bug("Crazy prefilter application attempt failed wildly.");
|
||||
}
|
||||
|
||||
static inline _Bool
|
||||
rt_net_is_feeding_feeder(struct rt_export_feeder *ref, const net_addr *n)
|
||||
{
|
||||
for (struct rt_feeding_request *rfr = ref->feeding; rfr; rfr = rfr->next)
|
||||
if (rt_prefilter_net(&rfr->prefilter, n))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline _Bool
|
||||
rt_net_is_feeding_request(struct rt_export_request *req, const net_addr *n)
|
||||
{
|
||||
struct netindex *ni = NET_TO_INDEX(n);
|
||||
return
|
||||
!bmap_test(&req->feed_map, ni->index)
|
||||
&& rt_net_is_feeding_feeder(&req->feeder, n);
|
||||
}
|
||||
|
||||
#define rt_net_is_feeding(h, n) _Generic((h), \
|
||||
struct rt_export_feeder *: rt_net_is_feeding_feeder, \
|
||||
struct rt_export_request *: rt_net_is_feeding_request, \
|
||||
void *: bug)(h, n)
|
||||
|
||||
|
||||
/*
|
||||
* The original rtable
|
||||
*
|
||||
* To be kept as is for now until we refactor the new structures out of BGP Attrs.
|
||||
*/
|
||||
|
||||
|
||||
struct rt_export_hook;
|
||||
struct rt_export_request;
|
||||
struct rt_exporter;
|
||||
|
||||
extern uint rtable_max_id;
|
||||
|
||||
@ -96,6 +349,8 @@ extern uint rtable_max_id;
|
||||
struct f_trie * _Atomic trie; /* Trie of prefixes defined in fib */ \
|
||||
event *nhu_event; /* Nexthop updater */ \
|
||||
event *hcu_event; /* Hostcache updater */ \
|
||||
struct rt_exporter export_all; /* Route export journal for all routes */ \
|
||||
struct rt_exporter export_best; /* Route export journal for best routes */ \
|
||||
|
||||
/* The complete rtable structure */
|
||||
struct rtable_private {
|
||||
@ -112,7 +367,7 @@ struct rtable_private {
|
||||
u32 debug; /* Debugging flags (D_*) */
|
||||
|
||||
list imports; /* Registered route importers */
|
||||
struct lfjour journal; /* Exporter API structure */
|
||||
|
||||
TLIST_STRUCT_DEF(rt_flowspec_link, struct rt_flowspec_link) flowspec_links; /* Links serving flowspec reload */
|
||||
|
||||
struct hmap id_map;
|
||||
@ -121,6 +376,7 @@ struct rtable_private {
|
||||
* delete as soon as use_count becomes 0 and remove
|
||||
* obstacle from this routing table.
|
||||
*/
|
||||
struct rt_export_request best_req; /* Internal request from best route announcement cleanup */
|
||||
struct event *nhu_uncork_event; /* Helper event to schedule NHU on uncork */
|
||||
struct event *hcu_uncork_event; /* Helper event to schedule HCU on uncork */
|
||||
struct timer *prune_timer; /* Timer for periodic pruning / GC */
|
||||
@ -203,10 +459,22 @@ static inline _Bool rt_cork_check(event *e)
|
||||
return corked;
|
||||
}
|
||||
|
||||
struct rt_pending_export {
|
||||
struct rt_export_item it;
|
||||
struct rt_pending_export *_Atomic next; /* Next export for the same net */
|
||||
u64 seq_all; /* Interlink from BEST to ALL */
|
||||
};
|
||||
|
||||
struct rt_net_pending_export {
|
||||
struct rt_pending_export * _Atomic first, * _Atomic last;
|
||||
};
|
||||
|
||||
typedef struct network {
|
||||
struct rte_storage * _Atomic routes; /* Available routes for this network */
|
||||
struct rt_pending_export * _Atomic first, * _Atomic last; /* Uncleaned pending exports */
|
||||
struct rte_storage * _Atomic routes; /* Available routes for this network */
|
||||
|
||||
/* Uncleaned pending exports */
|
||||
struct rt_net_pending_export all;
|
||||
struct rt_net_pending_export best;
|
||||
} net;
|
||||
|
||||
struct rte_storage {
|
||||
@ -228,24 +496,7 @@ struct rte_storage {
|
||||
|
||||
#define RTE_GET_NETINDEX(e) NET_TO_INDEX((e)->net)
|
||||
|
||||
/* Table-channel connections */
|
||||
|
||||
struct rt_prefilter {
|
||||
union {
|
||||
const struct f_trie *trie;
|
||||
const net_addr *addr; /* Network prefilter address */
|
||||
int (*hook)(const struct rt_prefilter *, const net_addr *);
|
||||
};
|
||||
/* Network prefilter mode (TE_ADDR_*) */
|
||||
enum {
|
||||
TE_ADDR_NONE = 0, /* No address matching */
|
||||
TE_ADDR_EQUAL, /* Exact query - show route <addr> */
|
||||
TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
|
||||
TE_ADDR_IN, /* Interval query - show route in <addr> */
|
||||
TE_ADDR_TRIE, /* Query defined by trie */
|
||||
TE_ADDR_HOOK, /* Query processed by supplied custom hook */
|
||||
} mode;
|
||||
} PACKED;
|
||||
/* Table import */
|
||||
|
||||
struct rt_import_request {
|
||||
struct rt_import_hook *hook; /* The table part of importer */
|
||||
@ -288,94 +539,6 @@ struct rt_import_hook {
|
||||
event cleanup_event; /* Used to finally unhook the import from the table */
|
||||
};
|
||||
|
||||
struct rt_pending_export {
|
||||
LFJOUR_ITEM_INHERIT(li);
|
||||
struct rt_pending_export * _Atomic next; /* Next export for the same destination */
|
||||
const rte *new, *new_best, *old, *old_best;
|
||||
};
|
||||
|
||||
struct rt_export_feed {
|
||||
uint count_routes, count_exports;
|
||||
struct netindex *ni;
|
||||
rte *block;
|
||||
u64 *exports;
|
||||
char data[0];
|
||||
};
|
||||
|
||||
struct rt_export_request {
|
||||
struct rt_export_hook *hook; /* Table part of the export */
|
||||
char *name;
|
||||
u8 trace_routes;
|
||||
uint feed_block_size; /* How many routes to feed at once */
|
||||
struct rt_prefilter prefilter;
|
||||
|
||||
event_list *list; /* Where to schedule export events */
|
||||
pool *pool; /* Pool to use for allocations */
|
||||
|
||||
/* There are two methods of export. You can either request feeding every single change
|
||||
* or feeding the whole route feed. In case of regular export, &export_one is preferred.
|
||||
* Anyway, when feeding, &export_bulk is preferred, falling back to &export_one.
|
||||
* Thus, for RA_OPTIMAL, &export_one is only set,
|
||||
* for RA_MERGED and RA_ACCEPTED, &export_bulk is only set
|
||||
* and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
|
||||
*/
|
||||
void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
|
||||
void (*export_bulk)(struct rt_export_request *req, const net_addr *net,
|
||||
struct rt_pending_export *rpe, struct rt_pending_export *last,
|
||||
const rte **feed, uint count);
|
||||
|
||||
void (*mark_seen)(struct rt_export_request *req, struct rt_pending_export *rpe);
|
||||
|
||||
void (*dump_req)(struct rt_export_request *req);
|
||||
void (*log_state_change)(struct rt_export_request *req, u8);
|
||||
};
|
||||
|
||||
static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
|
||||
{
|
||||
switch (p->mode)
|
||||
{
|
||||
case TE_ADDR_NONE: return 1;
|
||||
case TE_ADDR_IN: return net_in_netX(n, p->addr);
|
||||
case TE_ADDR_EQUAL: return net_equal(n, p->addr);
|
||||
case TE_ADDR_FOR: return net_in_netX(p->addr, n);
|
||||
case TE_ADDR_TRIE: return trie_match_net(p->trie, n);
|
||||
case TE_ADDR_HOOK: return p->hook(p, n);
|
||||
}
|
||||
|
||||
bug("Crazy prefilter application attempt failed wildly.");
|
||||
}
|
||||
|
||||
struct rt_export_hook {
|
||||
struct lfjour_recipient recipient; /* Journal recipient structure */
|
||||
|
||||
pool *pool;
|
||||
|
||||
struct rt_export_request *req; /* The requestor */
|
||||
|
||||
struct rt_export_stats {
|
||||
/* Export - from core to protocol */
|
||||
u32 updates_received; /* Number of route updates received */
|
||||
u32 withdraws_received; /* Number of route withdraws received */
|
||||
} stats;
|
||||
|
||||
btime last_state_change; /* Time of last state transition */
|
||||
|
||||
_Atomic u8 export_state; /* Route export state (TES_*, see below) */
|
||||
struct event *event; /* Event running all the export operations */
|
||||
|
||||
struct bmap seq_map; /* Keep track which exports were already procesed */
|
||||
|
||||
void (*stopped)(struct rt_export_request *); /* Stored callback when export is stopped */
|
||||
|
||||
/* Table-specific items */
|
||||
|
||||
rtable *tab; /* The table pointer to use in corner cases */
|
||||
u32 feed_index; /* Routing table iterator used during feeding */
|
||||
|
||||
u8 refeed_pending; /* Refeeding and another refeed is scheduled */
|
||||
u8 feed_type; /* Which feeding method is used (TFT_*, see below) */
|
||||
};
|
||||
|
||||
|
||||
#define TIS_DOWN 0
|
||||
#define TIS_UP 1
|
||||
@ -385,35 +548,15 @@ struct rt_export_hook {
|
||||
#define TIS_CLEARED 5
|
||||
#define TIS_MAX 6
|
||||
|
||||
#define TES_DOWN 0
|
||||
#define TES_HUNGRY 1
|
||||
#define TES_FEEDING 2
|
||||
#define TES_READY 3
|
||||
#define TES_STOP 4
|
||||
#define TES_MAX 5
|
||||
|
||||
|
||||
#define TFT_FIB 1
|
||||
#define TFT_TRIE 2
|
||||
#define TFT_HASH 3
|
||||
|
||||
void rt_request_import(rtable *tab, struct rt_import_request *req);
|
||||
void rt_request_export(rtable *tab, struct rt_export_request *req);
|
||||
void rt_request_export_other(struct rt_exporter *tab, struct rt_export_request *req);
|
||||
|
||||
void rt_stop_import(struct rt_import_request *, void (*stopped)(struct rt_import_request *));
|
||||
void rt_stop_export(struct rt_export_request *, void (*stopped)(struct rt_export_request *));
|
||||
|
||||
const char *rt_import_state_name(u8 state);
|
||||
const char *rt_export_state_name(u8 state);
|
||||
|
||||
static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
|
||||
static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? atomic_load_explicit(&eh->export_state, memory_order_acquire) : TES_DOWN; }
|
||||
|
||||
u8 rt_set_export_state(struct rt_export_hook *hook, u32 expected_mask, u8 state);
|
||||
|
||||
void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* For table export processing
|
||||
*/
|
||||
@ -437,15 +580,7 @@ void rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
|
||||
/* Get pending export seen status */
|
||||
int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
|
||||
|
||||
/*
|
||||
* For rt_export_hook and rt_exporter inheritance
|
||||
*/
|
||||
|
||||
void rt_init_export(struct rt_exporter *re, struct rt_export_hook *hook);
|
||||
struct rt_export_hook *rt_alloc_export(struct rt_exporter *re, pool *pool, uint size);
|
||||
void rt_stop_export_common(struct rt_export_hook *hook);
|
||||
void rt_export_stopped(struct rt_export_hook *hook);
|
||||
void rt_exporter_init(struct rt_exporter *re);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Channel export hooks. To be refactored out.
|
||||
@ -453,15 +588,6 @@ void rt_exporter_init(struct rt_exporter *re);
|
||||
|
||||
int channel_preimport(struct rt_import_request *req, rte *new, const rte *old);
|
||||
|
||||
void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
|
||||
|
||||
void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
|
||||
void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
|
||||
void rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
|
||||
void rt_notify_accepted(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
|
||||
void rt_notify_merged(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
|
||||
|
||||
void channel_rpe_mark_seen(struct channel *c, struct rt_pending_export *rpe);
|
||||
|
||||
/* Types of route announcement, also used as flags */
|
||||
#define RA_UNDEF 0 /* Undefined RA type */
|
||||
@ -512,6 +638,7 @@ struct hostcache {
|
||||
struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
|
||||
list hostentries; /* List of all hostentries */
|
||||
struct rt_export_request req; /* Notifier */
|
||||
event source_event;
|
||||
};
|
||||
|
||||
#define rte_update channel_rte_import
|
||||
@ -574,10 +701,10 @@ void rt_flowspec_link(rtable *src, rtable *dst);
|
||||
void rt_flowspec_unlink(rtable *src, rtable *dst);
|
||||
rtable *rt_setup(pool *, struct rtable_config *);
|
||||
|
||||
struct rt_export_feed *rt_net_feed(rtable *t, net_addr *a);
|
||||
rte rt_net_best(rtable *t, net_addr *a);
|
||||
struct rt_export_feed *rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first);
|
||||
rte rt_net_best(rtable *t, const net_addr *a);
|
||||
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
|
||||
rte *rt_export_merged(struct channel *c, const net_addr *n, const rte ** feed, uint count, linpool *pool, int silent);
|
||||
rte *rt_export_merged(struct channel *c, const struct rt_export_feed *feed, linpool *pool, int silent);
|
||||
void rt_refresh_begin(struct rt_import_request *);
|
||||
void rt_refresh_end(struct rt_import_request *);
|
||||
void rt_schedule_prune(struct rtable_private *t);
|
||||
@ -588,7 +715,6 @@ void rt_dump_hooks(rtable *);
|
||||
void rt_dump_hooks_all(void);
|
||||
int rt_reload_channel(struct channel *c);
|
||||
void rt_reload_channel_abort(struct channel *c);
|
||||
void rt_refeed_channel(struct channel *c);
|
||||
void rt_prune_sync(rtable *t, int all);
|
||||
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
|
||||
void rt_new_default_table(struct symbol *s);
|
||||
@ -617,6 +743,7 @@ struct rt_show_data_rtable {
|
||||
struct channel *export_channel;
|
||||
struct channel *prefilter;
|
||||
struct krt_proto *kernel;
|
||||
struct rt_export_feeder req; /* Export feeder in use */
|
||||
};
|
||||
|
||||
struct rt_show_data {
|
||||
@ -625,14 +752,13 @@ struct rt_show_data {
|
||||
list tables;
|
||||
struct rt_show_data_rtable *tab; /* Iterator over table list */
|
||||
struct rt_show_data_rtable *last_table; /* Last table in output */
|
||||
struct rt_export_request req; /* Export request in use */
|
||||
int verbose, tables_defined_by;
|
||||
const struct filter *filter;
|
||||
struct proto *show_protocol;
|
||||
struct proto *export_protocol;
|
||||
struct channel *export_channel;
|
||||
struct config *running_on_config;
|
||||
struct rt_export_hook *kernel_export_hook;
|
||||
// struct rt_export_hook *kernel_export_hook;
|
||||
int export_mode, addr_mode, primary_only, filtered, stats;
|
||||
|
||||
int net_counter, rt_counter, show_counter, table_counter;
|
||||
|
@ -250,6 +250,9 @@ rt_get_source_o(struct rte_owner *p, u32 id)
|
||||
|
||||
if (src)
|
||||
{
|
||||
#ifdef RT_SOURCE_DEBUG
|
||||
log(L_INFO "Found source %uG", src->global_id);
|
||||
#endif
|
||||
lfuc_lock_revive(&src->uc);
|
||||
return src;
|
||||
}
|
||||
|
488
nest/rt-export.c
Normal file
488
nest/rt-export.c
Normal file
@ -0,0 +1,488 @@
|
||||
/*
|
||||
* BIRD -- Route Export Mechanisms
|
||||
*
|
||||
* (c) 2024 Maria Matejka <mq@jmq.cz>
|
||||
*
|
||||
* Can be freely distributed and used under the terms of the GNU GPL.
|
||||
*/
|
||||
|
||||
#include "nest/bird.h"
|
||||
#include "nest/route.h"
|
||||
#include "nest/protocol.h"
|
||||
|
||||
#define rtex_trace(_req, _cat, msg, args...) do { \
|
||||
if ((_req)->trace_routes & _cat) \
|
||||
log(L_TRACE "%s: " msg, (_req)->name, ##args); \
|
||||
} while (0)
|
||||
|
||||
static inline enum rt_export_state
|
||||
rt_export_change_state(struct rt_export_request *r, u32 expected_mask, enum rt_export_state state)
|
||||
{
|
||||
r->last_state_change = current_time();
|
||||
enum rt_export_state old = atomic_exchange_explicit(&r->export_state, state, memory_order_acq_rel);
|
||||
if (!((1 << old) & expected_mask))
|
||||
bug("Unexpected export state change from %s to %s, expected mask %02x",
|
||||
rt_export_state_name(old),
|
||||
rt_export_state_name(state),
|
||||
expected_mask
|
||||
);
|
||||
|
||||
rtex_trace(r, D_STATES, "Export state changed from %s to %s",
|
||||
rt_export_state_name(old), rt_export_state_name(state));
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
const struct rt_export_union *
|
||||
rt_export_get(struct rt_export_request *r)
|
||||
{
|
||||
ASSERT_DIE(!r->cur);
|
||||
|
||||
#define EXPORT_FOUND(_kind) do { \
|
||||
struct rt_export_union *reu = tmp_alloc(sizeof *reu); \
|
||||
*reu = (struct rt_export_union) { \
|
||||
.kind = _kind, \
|
||||
.req = r, \
|
||||
.update = update, \
|
||||
.feed = feed, \
|
||||
}; \
|
||||
return (r->cur = reu); \
|
||||
} while (0) \
|
||||
|
||||
#define NOT_THIS_UPDATE do { \
|
||||
lfjour_release(&r->r); \
|
||||
return rt_export_get(r); \
|
||||
} while (0) \
|
||||
|
||||
enum rt_export_state es = rt_export_get_state(r);
|
||||
switch (es)
|
||||
{
|
||||
case TES_DOWN:
|
||||
rtex_trace(r, (D_ROUTES|D_STATES), "Export is down");
|
||||
return NULL;
|
||||
|
||||
case TES_STOP:
|
||||
rtex_trace(r, (D_ROUTES|D_STATES), "Received stop event");
|
||||
struct rt_export_union *reu = tmp_alloc(sizeof *reu);
|
||||
*reu = (struct rt_export_union) {
|
||||
.kind = RT_EXPORT_STOP,
|
||||
.req = r,
|
||||
};
|
||||
return (r->cur = reu);
|
||||
|
||||
case TES_PARTIAL:
|
||||
case TES_FEEDING:
|
||||
case TES_READY:
|
||||
break;
|
||||
|
||||
case TES_MAX:
|
||||
bug("invalid export state");
|
||||
}
|
||||
|
||||
/* Process sequence number reset event */
|
||||
if (lfjour_reset_seqno(&r->r))
|
||||
bmap_reset(&r->seq_map, 4);
|
||||
|
||||
/* Get a new update */
|
||||
SKIP_BACK_DECLARE(struct rt_export_item, update, li, lfjour_get(&r->r));
|
||||
SKIP_BACK_DECLARE(struct rt_exporter, e, journal, lfjour_of_recipient(&r->r));
|
||||
struct rt_export_feed *feed = NULL;
|
||||
|
||||
/* No update, try feed */
|
||||
if (!update)
|
||||
if (es == TES_READY)
|
||||
{
|
||||
/* Fed up of feeding */
|
||||
rtex_trace(r, D_ROUTES, "Export drained");
|
||||
return NULL;
|
||||
}
|
||||
else if (feed = rt_export_next_feed(&r->feeder))
|
||||
{
|
||||
/* Feeding more */
|
||||
bmap_set(&r->feed_map, feed->ni->index);
|
||||
rtex_trace(r, D_ROUTES, "Feeding %N", feed->ni->addr);
|
||||
|
||||
EXPORT_FOUND(RT_EXPORT_FEED);
|
||||
}
|
||||
else if (rt_export_get_state(r) == TES_DOWN)
|
||||
{
|
||||
/* Torn down inbetween */
|
||||
rtex_trace(r, D_STATES, "Export ended itself");
|
||||
return NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No more food */
|
||||
rt_export_change_state(r, BIT32_ALL(TES_FEEDING, TES_PARTIAL), TES_READY);
|
||||
rtex_trace(r, D_STATES, "Fed up");
|
||||
CALL(r->fed, r);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* There actually is an update */
|
||||
if (bmap_test(&r->seq_map, update->seq))
|
||||
{
|
||||
/* But this update has been already processed, let's try another one */
|
||||
rtex_trace(r, D_ROUTES, "Skipping an already processed update %lu", update->seq);
|
||||
NOT_THIS_UPDATE;
|
||||
}
|
||||
|
||||
/* Is this update allowed by prefilter? */
|
||||
const net_addr *n = (update->new ?: update->old)->net;
|
||||
if (!rt_prefilter_net(&r->feeder.prefilter, n))
|
||||
{
|
||||
rtex_trace(r, D_ROUTES, "Not exporting %N due to prefilter", n);
|
||||
NOT_THIS_UPDATE;
|
||||
}
|
||||
|
||||
if ((es != TES_READY) && rt_net_is_feeding(r, n))
|
||||
{
|
||||
/* But this net shall get a feed first! */
|
||||
rtex_trace(r, D_ROUTES, "Expediting %N feed due to pending update %lu", n, update->seq);
|
||||
RCU_ANCHOR(u);
|
||||
feed = e->feed_net(e, u, n, update);
|
||||
|
||||
bmap_set(&r->feed_map, NET_TO_INDEX(n)->index);
|
||||
ASSERT_DIE(feed);
|
||||
|
||||
EXPORT_FOUND(RT_EXPORT_FEED);
|
||||
}
|
||||
|
||||
/* OK, now this actually is an update, thank you for your patience */
|
||||
rtex_trace(r, D_ROUTES, "Updating %N, seq %lu", n, update->seq);
|
||||
|
||||
EXPORT_FOUND(RT_EXPORT_UPDATE);
|
||||
|
||||
#undef NOT_THIS_UPDATE
|
||||
#undef EXPORT_FOUND
|
||||
}
|
||||
|
||||
void
|
||||
rt_export_release(const struct rt_export_union *u)
|
||||
{
|
||||
/* May be already released */
|
||||
if (!u->req)
|
||||
return;
|
||||
|
||||
struct rt_export_request *r = u->req;
|
||||
|
||||
/* Must be crosslinked */
|
||||
ASSERT_DIE(r->cur == u);
|
||||
r->cur = NULL;
|
||||
|
||||
switch (u->kind)
|
||||
{
|
||||
case RT_EXPORT_FEED:
|
||||
for (uint i = 0; i < u->feed->count_exports; i++)
|
||||
bmap_set(&r->seq_map, u->feed->exports[i]);
|
||||
|
||||
if (!u->update)
|
||||
break;
|
||||
|
||||
/* fall through */
|
||||
|
||||
case RT_EXPORT_UPDATE:
|
||||
rtex_trace(r, D_ROUTES, "Export %lu released", u->update->seq);
|
||||
lfjour_release(&r->r);
|
||||
|
||||
break;
|
||||
|
||||
case RT_EXPORT_STOP:
|
||||
/* Checking that we have indeed stopped the exporter */
|
||||
ASSERT_DIE(rt_export_get_state(r) == TES_DOWN);
|
||||
rtex_trace(r, D_ROUTES, "Export stopped");
|
||||
break;
|
||||
|
||||
default:
|
||||
bug("strange export kind");
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rt_export_processed(struct rt_export_request *r, u64 seq)
|
||||
{
|
||||
rtex_trace(r, D_ROUTES, "Marking export %lu as processed", seq);
|
||||
|
||||
/* Check sequence number reset event */
|
||||
if (lfjour_reset_seqno(&r->r))
|
||||
bmap_reset(&r->seq_map, 4);
|
||||
|
||||
ASSERT_DIE(!bmap_test(&r->seq_map, seq));
|
||||
bmap_set(&r->seq_map, seq);
|
||||
}
|
||||
|
||||
struct rt_export_feed *
|
||||
rt_export_next_feed(struct rt_export_feeder *f)
|
||||
{
|
||||
ASSERT_DIE(f);
|
||||
while (1)
|
||||
{
|
||||
RCU_ANCHOR(u);
|
||||
|
||||
struct rt_exporter *e = atomic_load_explicit(&f->exporter, memory_order_acquire);
|
||||
if (!e)
|
||||
{
|
||||
rtex_trace(f, (D_ROUTES|D_STATES), "Exporter kicked us away");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const net_addr *a = e->feed_next(e, u, f);
|
||||
if (!a)
|
||||
break;
|
||||
|
||||
if (!rt_prefilter_net(&f->prefilter, a))
|
||||
{
|
||||
rtex_trace(f, D_ROUTES, "Not feeding %N due to prefilter", a);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (f->feeding && !rt_net_is_feeding_feeder(f, a))
|
||||
{
|
||||
rtex_trace(f, D_ROUTES, "Not feeding %N, not requested", a);
|
||||
continue;
|
||||
}
|
||||
|
||||
struct rt_export_feed *feed = e->feed_net(e, u, a, NULL);
|
||||
if (feed)
|
||||
{
|
||||
rtex_trace(f, D_ROUTES, "Feeding %u routes for %N", feed->count_routes, a);
|
||||
return feed;
|
||||
}
|
||||
}
|
||||
|
||||
/* Feeding done */
|
||||
while (f->feeding)
|
||||
{
|
||||
struct rt_feeding_request *rfr = f->feeding;
|
||||
f->feeding = rfr->next;
|
||||
CALL(rfr->done, rfr);
|
||||
}
|
||||
|
||||
f->feed_index = 0;
|
||||
|
||||
if (f->feed_pending)
|
||||
{
|
||||
rtex_trace(f, D_STATES, "Feeding done, refeed request pending");
|
||||
f->feeding = f->feed_pending;
|
||||
f->feed_pending = NULL;
|
||||
return rt_export_next_feed(f);
|
||||
}
|
||||
else
|
||||
{
|
||||
rtex_trace(f, D_STATES, "Feeding done (%u)", f->feed_index);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
rt_feeding_request_default_done(struct rt_feeding_request *rfr)
|
||||
{
|
||||
mb_free(rfr);
|
||||
}
|
||||
|
||||
void
|
||||
rt_export_refeed_feeder(struct rt_export_feeder *f, struct rt_feeding_request *rfr)
|
||||
{
|
||||
if (!rfr)
|
||||
return;
|
||||
|
||||
rfr->next = f->feed_pending;
|
||||
f->feed_pending = rfr;
|
||||
}
|
||||
|
||||
void rt_export_refeed_request(struct rt_export_request *rer, struct rt_feeding_request *rfr)
|
||||
{
|
||||
if (!rfr)
|
||||
{
|
||||
rfr = mb_allocz(rer->pool, sizeof *rfr);
|
||||
rfr->done = rt_feeding_request_default_done;
|
||||
}
|
||||
|
||||
bmap_reset(&rer->feed_map, 4);
|
||||
rt_export_refeed_feeder(&rer->feeder, rfr);
|
||||
rt_export_change_state(rer, BIT32_ALL(TES_FEEDING, TES_PARTIAL, TES_READY), TES_PARTIAL);
|
||||
if (rer->r.event)
|
||||
ev_send(rer->r.target, rer->r.event);
|
||||
}
|
||||
|
||||
void
|
||||
rtex_export_subscribe(struct rt_exporter *e, struct rt_export_request *r)
|
||||
{
|
||||
rt_export_change_state(r, BIT32_ALL(TES_DOWN), TES_FEEDING);
|
||||
|
||||
ASSERT_DIE(r->pool);
|
||||
|
||||
rt_feeder_subscribe(e, &r->feeder);
|
||||
|
||||
lfjour_register(&e->journal, &r->r);
|
||||
|
||||
r->stats = (struct rt_export_stats) {};
|
||||
r->last_state_change = current_time();
|
||||
bmap_init(&r->seq_map, r->pool, 4);
|
||||
bmap_init(&r->feed_map, r->pool, 4);
|
||||
|
||||
rt_export_refeed_request(r, NULL);
|
||||
}
|
||||
|
||||
void
|
||||
rtex_export_unsubscribe(struct rt_export_request *r)
|
||||
{
|
||||
rt_feeder_unsubscribe(&r->feeder);
|
||||
|
||||
if (r->cur)
|
||||
rt_export_release(r->cur);
|
||||
|
||||
switch (rt_export_change_state(r, BIT32_ALL(TES_FEEDING, TES_PARTIAL, TES_READY, TES_STOP), TES_DOWN))
|
||||
{
|
||||
case TES_FEEDING:
|
||||
case TES_PARTIAL:
|
||||
case TES_READY:
|
||||
case TES_STOP:
|
||||
lfjour_unregister(&r->r);
|
||||
break;
|
||||
default:
|
||||
bug("not implemented");
|
||||
}
|
||||
|
||||
bmap_free(&r->feed_map);
|
||||
bmap_free(&r->seq_map);
|
||||
}
|
||||
|
||||
static void
|
||||
rt_exporter_cleanup_done(struct lfjour *j, u64 begin_seq UNUSED, u64 end_seq)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct rt_exporter, e, journal, j);
|
||||
|
||||
/* TODO: log the begin_seq / end_seq values */
|
||||
|
||||
CALL(e->cleanup_done, e, end_seq);
|
||||
if (e->stopped && (lfjour_count_recipients(j) == 0))
|
||||
{
|
||||
settle_cancel(&j->announce_timer);
|
||||
ev_postpone(&j->cleanup_event);
|
||||
e->stopped(e);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rt_exporter_init(struct rt_exporter *e, struct settle_config *scf)
|
||||
{
|
||||
rtex_trace(e, D_STATES, "Exporter init");
|
||||
e->journal.cleanup_done = rt_exporter_cleanup_done;
|
||||
lfjour_init(&e->journal, scf);
|
||||
ASSERT_DIE(e->feed_net);
|
||||
ASSERT_DIE(e->feed_next);
|
||||
}
|
||||
|
||||
struct rt_export_item *
|
||||
rt_exporter_push(struct rt_exporter *e, const struct rt_export_item *uit)
|
||||
{
|
||||
/* Get the object */
|
||||
struct lfjour_item *lit = lfjour_push_prepare(&e->journal);
|
||||
if (!lit)
|
||||
return NULL;
|
||||
|
||||
SKIP_BACK_DECLARE(struct rt_export_item, it, li, lit);
|
||||
|
||||
/* Copy the data, keeping the header */
|
||||
memcpy(&it->data, &uit->data, e->journal.item_size - OFFSETOF(struct rt_export_item, data));
|
||||
|
||||
/* Commit the update */
|
||||
rtex_trace(e, D_ROUTES, "Announcing change %lu at %N: %p (%u) -> %p (%u)",
|
||||
lit->seq, (uit->new ?: uit->old)->net,
|
||||
uit->old, uit->old ? uit->old->id : 0,
|
||||
uit->new, uit->new ? uit->new->id : 0);
|
||||
|
||||
lfjour_push_commit(&e->journal);
|
||||
|
||||
/* Return the update pointer */
|
||||
return it;
|
||||
}
|
||||
|
||||
#define RTEX_FEEDERS_LOCK(e) \
|
||||
while (atomic_exchange_explicit(&e->feeders_lock, 1, memory_order_acq_rel)) \
|
||||
birdloop_yield(); \
|
||||
CLEANUP(_rtex_feeders_unlock_) UNUSED struct rt_exporter *_rtex_feeders_locked_ = e;
|
||||
|
||||
static inline void _rtex_feeders_unlock_(struct rt_exporter **e)
|
||||
{
|
||||
ASSERT_DIE(atomic_exchange_explicit(&(*e)->feeders_lock, 0, memory_order_acq_rel));
|
||||
}
|
||||
|
||||
void
|
||||
rt_feeder_subscribe(struct rt_exporter *e, struct rt_export_feeder *f)
|
||||
{
|
||||
f->feed_index = 0;
|
||||
|
||||
atomic_store_explicit(&f->exporter, e, memory_order_relaxed);
|
||||
|
||||
RTEX_FEEDERS_LOCK(e);
|
||||
rt_export_feeder_add_tail(&e->feeders, f);
|
||||
|
||||
rtex_trace(f, D_STATES, "Subscribed to exporter %s", e->name);
|
||||
}
|
||||
|
||||
void
|
||||
rt_feeder_unsubscribe(struct rt_export_feeder *f)
|
||||
{
|
||||
RCU_ANCHOR(a);
|
||||
struct rt_exporter *e = atomic_exchange_explicit(&f->exporter, NULL, memory_order_acquire);
|
||||
if (e)
|
||||
{
|
||||
RTEX_FEEDERS_LOCK(e);
|
||||
rt_export_feeder_rem_node(&e->feeders, f);
|
||||
|
||||
rtex_trace(f, D_STATES, "Unsubscribed from exporter %s", e->name);
|
||||
}
|
||||
else
|
||||
rtex_trace(f, D_STATES, "Already unsubscribed");
|
||||
}
|
||||
|
||||
void
|
||||
rt_exporter_shutdown(struct rt_exporter *e, void (*stopped)(struct rt_exporter *))
|
||||
{
|
||||
rtex_trace(e, D_STATES, "Exporter shutdown");
|
||||
|
||||
/* Last lock check before dropping the domain reference */
|
||||
if (e->journal.domain)
|
||||
ASSERT_DIE(DG_IS_LOCKED(e->journal.domain));
|
||||
|
||||
e->journal.domain = NULL;
|
||||
|
||||
/* We have to tell every receiver to stop */
|
||||
_Bool done = 1;
|
||||
WALK_TLIST(lfjour_recipient, r, &e->journal.recipients)
|
||||
{
|
||||
done = 0;
|
||||
rt_export_change_state(
|
||||
SKIP_BACK(struct rt_export_request, r, r),
|
||||
BIT32_ALL(TES_FEEDING, TES_PARTIAL, TES_READY, TES_STOP),
|
||||
TES_STOP);
|
||||
}
|
||||
|
||||
/* We can drop feeders synchronously */
|
||||
{
|
||||
RTEX_FEEDERS_LOCK(e);
|
||||
WALK_TLIST_DELSAFE(rt_export_feeder, f, &e->feeders)
|
||||
{
|
||||
ASSERT_DIE(atomic_exchange_explicit(&f->exporter, NULL, memory_order_acq_rel) == e);
|
||||
rt_export_feeder_rem_node(&e->feeders, f);
|
||||
}
|
||||
}
|
||||
|
||||
/* Wait for feeders to finish */
|
||||
synchronize_rcu();
|
||||
|
||||
/* The rest is done via the cleanup routine */
|
||||
lfjour_do_cleanup_now(&e->journal);
|
||||
|
||||
if (done)
|
||||
{
|
||||
ev_postpone(&e->journal.cleanup_event);
|
||||
settle_cancel(&e->journal.announce_timer);
|
||||
CALL(stopped, e);
|
||||
}
|
||||
else
|
||||
// e->stopped = stopped;
|
||||
bug("not implemented yet");
|
||||
}
|
168
nest/rt-show.c
168
nest/rt-show.c
@ -18,6 +18,9 @@
|
||||
#include "filter/data.h"
|
||||
#include "sysdep/unix/krt.h"
|
||||
|
||||
static void rt_show_cont(struct cli *c);
|
||||
static void rt_show_done(struct rt_show_data *d);
|
||||
|
||||
static void
|
||||
rt_show_table(struct rt_show_data *d)
|
||||
{
|
||||
@ -93,7 +96,7 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint count)
|
||||
rt_show_net(struct rt_show_data *d, const struct rt_export_feed *feed)
|
||||
{
|
||||
struct cli *c = d->cli;
|
||||
byte ia[NET_MAX_TEXT_LENGTH+16+1];
|
||||
@ -108,9 +111,13 @@ rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint co
|
||||
uint last_label = 0;
|
||||
int pass = 0;
|
||||
|
||||
for (uint i = 0; i < count; i++)
|
||||
for (uint i = 0; i < feed->count_routes; i++)
|
||||
{
|
||||
if (!d->tab->prefilter && (rte_is_filtered(feed[i]) != d->filtered))
|
||||
rte *e = &feed->block[i];
|
||||
if (e->flags & REF_OBSOLETE)
|
||||
break;
|
||||
|
||||
if (!d->tab->prefilter && (rte_is_filtered(e) != d->filtered))
|
||||
continue;
|
||||
|
||||
d->rt_counter++;
|
||||
@ -120,20 +127,19 @@ rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint co
|
||||
if (pass)
|
||||
continue;
|
||||
|
||||
struct rte e = *feed[i];
|
||||
if (d->tab->prefilter)
|
||||
if (e.sender != d->tab->prefilter->in_req.hook)
|
||||
if (e->sender != d->tab->prefilter->in_req.hook)
|
||||
continue;
|
||||
else while (e.attrs->next)
|
||||
e.attrs = e.attrs->next;
|
||||
else
|
||||
e->attrs = ea_strip_to(e->attrs, BIT32_ALL(EALS_PREIMPORT));
|
||||
|
||||
/* Export channel is down, do not try to export routes to it */
|
||||
if (ec && !ec->out_req.hook)
|
||||
if (ec && (rt_export_get_state(&ec->out_req) == TES_DOWN))
|
||||
goto skip;
|
||||
|
||||
if (d->export_mode == RSEM_EXPORTED)
|
||||
{
|
||||
if (!bmap_test(&ec->export_map, e.id))
|
||||
if (!bmap_test(&ec->export_accepted_map, e->id))
|
||||
goto skip;
|
||||
|
||||
// if (ec->ra_mode != RA_ANY)
|
||||
@ -143,17 +149,17 @@ rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint co
|
||||
{
|
||||
/* Special case for merged export */
|
||||
pass = 1;
|
||||
rte *em = rt_export_merged(ec, n, feed, count, tmp_linpool, 1);
|
||||
rte *em = rt_export_merged(ec, feed, tmp_linpool, 1);
|
||||
|
||||
if (em)
|
||||
e = *em;
|
||||
e = em;
|
||||
else
|
||||
goto skip;
|
||||
}
|
||||
else if (d->export_mode)
|
||||
{
|
||||
struct proto *ep = ec->proto;
|
||||
int ic = ep->preexport ? ep->preexport(ec, &e) : 0;
|
||||
int ic = ep->preexport ? ep->preexport(ec, e) : 0;
|
||||
|
||||
if (ec->ra_mode == RA_OPTIMAL || ec->ra_mode == RA_MERGED)
|
||||
pass = 1;
|
||||
@ -169,7 +175,7 @@ rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint co
|
||||
* command may change the export filter and do not update routes.
|
||||
*/
|
||||
int do_export = (ic > 0) ||
|
||||
(f_run(ec->out_filter, &e, FF_SILENT) <= F_ACCEPT);
|
||||
(f_run(ec->out_filter, e, FF_SILENT) <= F_ACCEPT);
|
||||
|
||||
if (do_export != (d->export_mode == RSEM_EXPORT))
|
||||
goto skip;
|
||||
@ -179,27 +185,27 @@ rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint co
|
||||
}
|
||||
}
|
||||
|
||||
if (d->show_protocol && (&d->show_protocol->sources != e.src->owner))
|
||||
if (d->show_protocol && (&d->show_protocol->sources != e->src->owner))
|
||||
goto skip;
|
||||
|
||||
if (f_run(d->filter, &e, 0) > F_ACCEPT)
|
||||
if (f_run(d->filter, e, 0) > F_ACCEPT)
|
||||
goto skip;
|
||||
|
||||
if (d->stats < 2)
|
||||
{
|
||||
uint label = ea_get_int(e.attrs, &ea_gen_mpls_label, ~0U);
|
||||
uint label = ea_get_int(e->attrs, &ea_gen_mpls_label, ~0U);
|
||||
|
||||
if (first_show || (last_label != label))
|
||||
{
|
||||
if (!~label)
|
||||
net_format(n, ia, sizeof(ia));
|
||||
net_format(feed->ni->addr, ia, sizeof(ia));
|
||||
else
|
||||
bsnprintf(ia, sizeof(ia), "%N mpls %d", n, label);
|
||||
bsnprintf(ia, sizeof(ia), "%N mpls %d", feed->ni->addr, label);
|
||||
}
|
||||
else
|
||||
ia[0] = 0;
|
||||
|
||||
rt_show_rte(c, ia, &e, d, !d->tab->prefilter && !i);
|
||||
rt_show_rte(c, ia, e, d, !d->tab->prefilter && !i);
|
||||
first_show = 0;
|
||||
last_label = label;
|
||||
}
|
||||
@ -209,6 +215,7 @@ rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint co
|
||||
skip:
|
||||
if (d->primary_only)
|
||||
break;
|
||||
#undef e
|
||||
}
|
||||
|
||||
if ((d->show_counter - d->show_counter_last_flush) > 64)
|
||||
@ -219,73 +226,39 @@ rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint co
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_net_export_bulk(struct rt_export_request *req, const net_addr *n,
|
||||
struct rt_pending_export *first UNUSED, struct rt_pending_export *last UNUSED,
|
||||
const rte **feed, uint count)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
|
||||
return rt_show_net(d, n, feed, count);
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_export_stopped_cleanup(struct rt_export_request *req)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
|
||||
|
||||
/* The hook is now invalid */
|
||||
req->hook = NULL;
|
||||
|
||||
/* And free the CLI (deferred) */
|
||||
rp_free(d->cli->pool);
|
||||
}
|
||||
|
||||
static int
|
||||
rt_show_cleanup(struct cli *c)
|
||||
{
|
||||
struct rt_show_data *d = c->rover;
|
||||
struct rt_show_data_rtable *tab, *tabx;
|
||||
c->cleanup = NULL;
|
||||
|
||||
/* Cancel the feed */
|
||||
if (d->req.hook)
|
||||
/* Cancel the feeds */
|
||||
WALK_LIST_DELSAFE(tab, tabx, d->tables)
|
||||
{
|
||||
rt_stop_export(&d->req, rt_show_export_stopped_cleanup);
|
||||
return 1;
|
||||
if (rt_export_feed_active(&tab->req))
|
||||
rt_feeder_unsubscribe(&tab->req);
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rt_show_export_stopped(struct rt_export_request *req);
|
||||
|
||||
static void
|
||||
rt_show_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
if (state == TES_READY)
|
||||
rt_stop_export(req, rt_show_export_stopped);
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_dump_req(struct rt_export_request *req)
|
||||
{
|
||||
debug(" CLI Show Route Feed %p\n", req);
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_done(struct rt_show_data *d)
|
||||
{
|
||||
/* Force the cleanup */
|
||||
rt_show_cleanup(d->cli);
|
||||
|
||||
/* Write pending messages */
|
||||
cli_write_trigger(d->cli);
|
||||
|
||||
/* No more action */
|
||||
d->cli->cleanup = NULL;
|
||||
d->cli->cont = NULL;
|
||||
d->cli->rover = NULL;
|
||||
|
||||
/* Write pending messages */
|
||||
cli_write_trigger(d->cli);
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_cont(struct rt_show_data *d)
|
||||
rt_show_cont(struct cli *c)
|
||||
{
|
||||
struct cli *c = d->cli;
|
||||
struct rt_show_data *d = c->rover;
|
||||
|
||||
if (d->running_on_config && (d->running_on_config != config))
|
||||
{
|
||||
@ -293,17 +266,6 @@ rt_show_cont(struct rt_show_data *d)
|
||||
return rt_show_done(d);
|
||||
}
|
||||
|
||||
d->req = (struct rt_export_request) {
|
||||
.prefilter.addr = d->addr,
|
||||
.name = "CLI Show Route",
|
||||
.list = &global_work_list,
|
||||
.pool = c->pool,
|
||||
.export_bulk = rt_show_net_export_bulk,
|
||||
.dump_req = rt_show_dump_req,
|
||||
.log_state_change = rt_show_log_state_change,
|
||||
.prefilter.mode = d->addr_mode,
|
||||
};
|
||||
|
||||
d->table_counter++;
|
||||
|
||||
d->show_counter_last = d->show_counter;
|
||||
@ -313,16 +275,17 @@ rt_show_cont(struct rt_show_data *d)
|
||||
if (d->tables_defined_by & RSD_TDB_SET)
|
||||
rt_show_table(d);
|
||||
|
||||
rt_request_export(d->tab->table, &d->req);
|
||||
}
|
||||
RT_FEED_WALK(&d->tab->req, f)
|
||||
if (f->count_routes)
|
||||
rt_show_net(d, f);
|
||||
|
||||
static void
|
||||
rt_show_export_stopped(struct rt_export_request *req)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
|
||||
|
||||
/* The hook is now invalid */
|
||||
req->hook = NULL;
|
||||
if (rt_export_feed_active(&d->tab->req))
|
||||
rt_feeder_unsubscribe(&d->tab->req);
|
||||
else
|
||||
{
|
||||
cli_printf(c, 8004, "Table is shutting down");
|
||||
return rt_show_done(d);
|
||||
}
|
||||
|
||||
if (d->stats)
|
||||
{
|
||||
@ -330,21 +293,22 @@ rt_show_export_stopped(struct rt_export_request *req)
|
||||
rt_show_table(d);
|
||||
|
||||
cli_printf(d->cli, -1007, "%d of %d routes for %d networks in table %s",
|
||||
d->show_counter - d->show_counter_last, d->rt_counter - d->rt_counter_last,
|
||||
d->net_counter - d->net_counter_last, d->tab->name);
|
||||
d->show_counter - d->show_counter_last, d->rt_counter - d->rt_counter_last,
|
||||
d->net_counter - d->net_counter_last, d->tab->name);
|
||||
}
|
||||
|
||||
d->tab = NODE_NEXT(d->tab);
|
||||
|
||||
if (NODE_VALID(d->tab))
|
||||
return rt_show_cont(d);
|
||||
/* Gonna be called later by this_cli->cont() */
|
||||
return;
|
||||
|
||||
/* Printout total stats */
|
||||
if (d->stats && (d->table_counter > 1))
|
||||
{
|
||||
if (d->last_table) cli_printf(d->cli, -1007, "");
|
||||
cli_printf(d->cli, 14, "Total: %d of %d routes for %d networks in %d tables",
|
||||
d->show_counter, d->rt_counter, d->net_counter, d->table_counter);
|
||||
d->show_counter, d->rt_counter, d->net_counter, d->table_counter);
|
||||
}
|
||||
else if (!d->rt_counter && ((d->addr_mode == TE_ADDR_EQUAL) || (d->addr_mode == TE_ADDR_FOR)))
|
||||
cli_printf(d->cli, 8001, "Network not found");
|
||||
@ -388,7 +352,7 @@ rt_show_get_default_tables(struct rt_show_data *d)
|
||||
{
|
||||
WALK_LIST(c, d->export_protocol->channels)
|
||||
{
|
||||
if (!c->out_req.hook)
|
||||
if (rt_export_get_state(&c->out_req) == TES_DOWN)
|
||||
continue;
|
||||
|
||||
tab = rt_show_add_table(d, c->table);
|
||||
@ -450,6 +414,18 @@ rt_show_prepare_tables(struct rt_show_data *d)
|
||||
rem_node(&(tab->n));
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Open the export request */
|
||||
tab->req = (struct rt_export_feeder) {
|
||||
.name = "cli.feeder",
|
||||
.prefilter = {
|
||||
.addr = d->addr,
|
||||
.mode = d->addr_mode,
|
||||
},
|
||||
.trace_routes = config->show_route_debug,
|
||||
};
|
||||
|
||||
rt_feeder_subscribe(&tab->table->export_all, &tab->req);
|
||||
}
|
||||
|
||||
/* Ensure there is at least one table */
|
||||
@ -457,12 +433,6 @@ rt_show_prepare_tables(struct rt_show_data *d)
|
||||
cf_error("No valid tables");
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_dummy_cont(struct cli *c UNUSED)
|
||||
{
|
||||
/* Explicitly do nothing to prevent CLI from trying to parse another command. */
|
||||
}
|
||||
|
||||
void
|
||||
rt_show(struct rt_show_data *d)
|
||||
{
|
||||
@ -479,7 +449,7 @@ rt_show(struct rt_show_data *d)
|
||||
|
||||
this_cli->cleanup = rt_show_cleanup;
|
||||
this_cli->rover = d;
|
||||
this_cli->cont = rt_show_dummy_cont;
|
||||
this_cli->cont = rt_show_cont;
|
||||
|
||||
rt_show_cont(d);
|
||||
cli_write_trigger(this_cli);
|
||||
}
|
||||
|
2214
nest/rt-table.c
2214
nest/rt-table.c
File diff suppressed because it is too large
Load Diff
@ -2510,44 +2510,6 @@ babel_rt_notify(struct proto *P, struct channel *c UNUSED, const net_addr *net,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
babel_feed_begin(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct babel_proto *p = (struct babel_proto *) C->proto;
|
||||
struct fib *rtable = (C->net_type == NET_IP4) ? &p->ip4_rtable : &p->ip6_rtable;
|
||||
|
||||
FIB_WALK(rtable, struct babel_entry, e)
|
||||
if (e->valid == BABEL_ENTRY_VALID)
|
||||
e->valid = BABEL_ENTRY_REFEEDING;
|
||||
FIB_WALK_END;
|
||||
}
|
||||
|
||||
static void
|
||||
babel_feed_end(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct babel_proto *p = (struct babel_proto *) C->proto;
|
||||
struct fib *rtable = (C->net_type == NET_IP4) ? &p->ip4_rtable : &p->ip6_rtable;
|
||||
int changed = 0;
|
||||
|
||||
FIB_WALK(rtable, struct babel_entry, e)
|
||||
if (e->valid == BABEL_ENTRY_REFEEDING)
|
||||
{
|
||||
babel_entry_invalidate(e);
|
||||
changed++;
|
||||
}
|
||||
FIB_WALK_END;
|
||||
|
||||
if (changed)
|
||||
babel_trigger_update(p);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
babel_rte_better(const rte *new, const rte *old)
|
||||
{
|
||||
@ -2600,8 +2562,6 @@ babel_init(struct proto_config *CF)
|
||||
P->iface_sub.if_notify = babel_if_notify;
|
||||
P->rt_notify = babel_rt_notify;
|
||||
P->preexport = babel_preexport;
|
||||
P->feed_begin = babel_feed_begin;
|
||||
P->feed_end = babel_feed_end;
|
||||
|
||||
P->sources.class = &babel_rte_owner_class;
|
||||
|
||||
|
@ -318,7 +318,6 @@ struct babel_entry {
|
||||
#define BABEL_ENTRY_DUMMY 0 /* No outgoing route */
|
||||
#define BABEL_ENTRY_VALID 1 /* Valid outgoing route */
|
||||
#define BABEL_ENTRY_STALE 2 /* Stale outgoing route, waiting for GC */
|
||||
#define BABEL_ENTRY_REFEEDING 3 /* Route valid until feed ends */
|
||||
|
||||
|
||||
/*
|
||||
|
@ -2716,57 +2716,57 @@ bgp_rte_recalculate(struct rtable_private *table, net *net,
|
||||
}
|
||||
|
||||
void
|
||||
bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n,
|
||||
struct rt_pending_export *first, struct rt_pending_export *last,
|
||||
const rte **feed, uint count)
|
||||
bgp_rte_modify_stale(void *_bc)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
|
||||
struct bgp_channel *c = _bc;
|
||||
struct rt_import_hook *irh = c->c.in_req.hook;
|
||||
|
||||
/* Find our routes among others */
|
||||
for (uint i=0; i<count; i++)
|
||||
{
|
||||
const rte *r = feed[i];
|
||||
|
||||
if (
|
||||
!rte_is_valid(r) || /* Not a valid route */
|
||||
(r->sender != irh) || /* Not our route */
|
||||
(r->stale_cycle == irh->stale_set)) /* A new route, do not mark as stale */
|
||||
continue;
|
||||
|
||||
eattr *ea = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
|
||||
const struct adata *ad = ea ? ea->u.ptr : NULL;
|
||||
uint flags = ea ? ea->flags : BAF_PARTIAL;
|
||||
|
||||
/* LLGR not allowed, withdraw the route */
|
||||
if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
|
||||
RT_FEED_WALK(&c->stale_feed, f) TMP_SAVED
|
||||
if (task_still_in_limit())
|
||||
{
|
||||
rte_import(&c->c.in_req, n, NULL, r->src);
|
||||
continue;
|
||||
for (uint i = 0; i < f->count_routes; i++)
|
||||
{
|
||||
rte *r = &f->block[i];
|
||||
if ((r->sender != irh) || /* Not our route */
|
||||
(r->stale_cycle == irh->stale_set)) /* A new route, do not mark as stale */
|
||||
continue;
|
||||
|
||||
/* Don't reinstate obsolete routes */
|
||||
if (r->flags & REF_OBSOLETE)
|
||||
break;
|
||||
|
||||
eattr *ea = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
|
||||
const struct adata *ad = ea ? ea->u.ptr : NULL;
|
||||
uint flags = ea ? ea->flags : BAF_PARTIAL;
|
||||
|
||||
/* LLGR not allowed, withdraw the route */
|
||||
if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
|
||||
{
|
||||
rte_import(&c->c.in_req, r->net, NULL, r->src);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Route already marked as LLGR, do nothing */
|
||||
if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
|
||||
continue;
|
||||
|
||||
/* Mark the route as LLGR */
|
||||
bgp_set_attr_ptr(&r->attrs, BA_COMMUNITY, flags, int_set_add(tmp_linpool, ad, BGP_COMM_LLGR_STALE));
|
||||
|
||||
/* We need to update the route but keep it stale. */
|
||||
ASSERT_DIE(irh->stale_set == irh->stale_valid + 1);
|
||||
irh->stale_set--;
|
||||
rte_import(&c->c.in_req, r->net, r, r->src);
|
||||
irh->stale_set++;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
proto_send_event(c->c.proto, &c->stale_event);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Route already marked as LLGR, do nothing */
|
||||
if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
|
||||
continue;
|
||||
|
||||
/* Store the tmp_linpool state to aggresively save memory */
|
||||
struct lp_state *tmpp = lp_save(tmp_linpool);
|
||||
|
||||
/* Mark the route as LLGR */
|
||||
rte e0 = *r;
|
||||
bgp_set_attr_ptr(&e0.attrs, BA_COMMUNITY, flags, int_set_add(tmp_linpool, ad, BGP_COMM_LLGR_STALE));
|
||||
|
||||
/* We need to update the route but keep it stale. */
|
||||
ASSERT_DIE(irh->stale_set == irh->stale_valid + 1);
|
||||
irh->stale_set--;
|
||||
rte_import(&c->c.in_req, n, &e0, r->src);
|
||||
irh->stale_set++;
|
||||
|
||||
/* Restore the memory state */
|
||||
lp_restore(tmp_linpool, tmpp);
|
||||
}
|
||||
|
||||
rpe_mark_seen_all(req->hook, first, last, NULL);
|
||||
rt_feeder_unsubscribe(&c->stale_feed);
|
||||
}
|
||||
|
||||
|
||||
|
140
proto/bgp/bgp.c
140
proto/bgp/bgp.c
@ -749,6 +749,10 @@ bgp_conn_enter_established_state(struct bgp_conn *conn)
|
||||
if (peer->gr_aware)
|
||||
c->load_state = BFS_LOADING;
|
||||
|
||||
/* We'll also send End-of-RIB */
|
||||
if (p->cf->gr_mode)
|
||||
c->feed_state = BFS_LOADING;
|
||||
|
||||
c->ext_next_hop = c->cf->ext_next_hop && (bgp_channel_is_ipv6(c) || rem->ext_next_hop);
|
||||
c->add_path_rx = (loc->add_path & BGP_ADD_PATH_RX) && (rem->add_path & BGP_ADD_PATH_TX);
|
||||
c->add_path_tx = (loc->add_path & BGP_ADD_PATH_TX) && (rem->add_path & BGP_ADD_PATH_RX);
|
||||
@ -913,55 +917,25 @@ bgp_handle_graceful_restart(struct bgp_proto *p)
|
||||
tm_start_in(p->gr_timer, p->conn->remote_caps->gr_time S, p->p.loop);
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_graceful_restart_feed_done(struct rt_export_request *req)
|
||||
{
|
||||
req->hook = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_graceful_restart_feed_dump_req(struct rt_export_request *req)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
|
||||
debug(" BGP-GR %s.%s export request %p\n", c->c.proto->name, c->c.name, req);
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_graceful_restart_feed_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
|
||||
struct bgp_proto *p = (void *) c->c.proto;
|
||||
BGP_TRACE(D_EVENTS, "Long-lived graceful restart export state changed to %s", rt_export_state_name(state));
|
||||
|
||||
if (state == TES_READY)
|
||||
rt_stop_export(req, bgp_graceful_restart_feed_done);
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_graceful_restart_drop_export(struct rt_export_request *req UNUSED, const net_addr *n UNUSED, struct rt_pending_export *rpe UNUSED)
|
||||
{ /* Nothing to do */ }
|
||||
|
||||
static void
|
||||
bgp_graceful_restart_feed(struct bgp_channel *c)
|
||||
{
|
||||
c->stale_feed = (struct rt_export_request) {
|
||||
.name = "BGP-GR",
|
||||
.list = proto_event_list(c->c.proto),
|
||||
.pool = c->c.proto->pool,
|
||||
.feed_block_size = c->c.feed_block_size,
|
||||
.trace_routes = c->c.debug | c->c.proto->debug,
|
||||
.dump_req = bgp_graceful_restart_feed_dump_req,
|
||||
.log_state_change = bgp_graceful_restart_feed_log_state_change,
|
||||
.export_bulk = bgp_rte_modify_stale,
|
||||
.export_one = bgp_graceful_restart_drop_export,
|
||||
c->stale_feed = (struct rt_export_feeder) {
|
||||
.name = mb_sprintf(c->c.proto->pool, "%s.%s.llgr", c->c.proto->name, c->c.name),
|
||||
.trace_routes = c->c.debug,
|
||||
};
|
||||
c->stale_event = (event) {
|
||||
.hook = bgp_rte_modify_stale,
|
||||
.data = c,
|
||||
};
|
||||
|
||||
rt_request_export(c->c.table, &c->stale_feed);
|
||||
rt_feeder_subscribe(&c->c.table->export_all, &c->stale_feed);
|
||||
proto_send_event(c->c.proto, &c->stale_event);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* bgp_graceful_restart_done - finish active BGP graceful restart
|
||||
* @c: BGP channel
|
||||
@ -1624,86 +1598,41 @@ bgp_reload_out(struct proto *P, uintptr_t _ UNUSED, int __ UNUSED)
|
||||
cli_msg(-8006, "%s: bgp reload out not implemented yet", P->name);
|
||||
}
|
||||
|
||||
struct bgp_enhanced_refresh_request {
|
||||
struct rt_feeding_request rfr;
|
||||
struct bgp_channel *c;
|
||||
};
|
||||
|
||||
static void
|
||||
bgp_feed_begin(struct channel *C)
|
||||
void
|
||||
bgp_done_route_refresh(struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct bgp_proto *p = (void *) C->proto;
|
||||
struct bgp_channel *c = (void *) C;
|
||||
SKIP_BACK_DECLARE(struct bgp_enhanced_refresh_request, berr, rfr, rfr);
|
||||
struct bgp_channel *c = berr->c;
|
||||
SKIP_BACK_DECLARE(struct bgp_proto, p, p, c->c.proto);
|
||||
|
||||
/* Ignore non-BGP channels */
|
||||
if (C->class != &channel_bgp)
|
||||
return;
|
||||
/* Schedule EoRR packet */
|
||||
ASSERT_DIE(c->feed_state == BFS_REFRESHING);
|
||||
|
||||
/* This should not happen */
|
||||
if (!p->conn)
|
||||
return;
|
||||
c->feed_state = BFS_REFRESHED;
|
||||
bgp_schedule_packet(p->conn, c, PKT_UPDATE);
|
||||
|
||||
if (!C->refeeding)
|
||||
{
|
||||
if (p->cf->gr_mode)
|
||||
c->feed_state = BFS_LOADING;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!C->refeed_req.hook)
|
||||
{
|
||||
/* Direct refeed */
|
||||
if (C->out_table)
|
||||
{
|
||||
/* FIXME: THIS IS BROKEN, IT DOESN'T PRUNE THE OUT TABLE */
|
||||
c->feed_out_table = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT_DIE(p->enhanced_refresh);
|
||||
|
||||
/* It is refeed and both sides support enhanced route refresh */
|
||||
/* BoRR must not be sent before End-of-RIB */
|
||||
ASSERT_DIE((c->feed_state != BFS_LOADING) && (c->feed_state != BFS_LOADED));
|
||||
|
||||
c->feed_state = BFS_REFRESHING;
|
||||
bgp_schedule_packet(p->conn, c, PKT_BEGIN_REFRESH);
|
||||
}
|
||||
mb_free(berr);
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_feed_end(struct channel *C)
|
||||
bgp_export_fed(struct channel *C)
|
||||
{
|
||||
struct bgp_proto *p = (void *) C->proto;
|
||||
struct bgp_channel *c = (void *) C;
|
||||
|
||||
/* Ignore non-BGP channels */
|
||||
if (C->class != &channel_bgp)
|
||||
return;
|
||||
|
||||
if (c->feed_out_table)
|
||||
{
|
||||
c->feed_out_table = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* This should not happen */
|
||||
if (!p->conn)
|
||||
return;
|
||||
|
||||
/* Non-demarcated feed ended, nothing to do */
|
||||
if (c->feed_state == BFS_NONE)
|
||||
return;
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, c, c, C);
|
||||
SKIP_BACK_DECLARE(struct bgp_proto, p, p, c->c.proto);
|
||||
|
||||
/* Schedule End-of-RIB packet */
|
||||
if (c->feed_state == BFS_LOADING)
|
||||
{
|
||||
c->feed_state = BFS_LOADED;
|
||||
|
||||
/* Schedule EoRR packet */
|
||||
if (c->feed_state == BFS_REFRESHING)
|
||||
c->feed_state = BFS_REFRESHED;
|
||||
|
||||
/* Kick TX hook */
|
||||
bgp_schedule_packet(p->conn, c, PKT_UPDATE);
|
||||
bgp_schedule_packet(p->conn, c, PKT_UPDATE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
bgp_start_locked(void *_p)
|
||||
{
|
||||
@ -1936,8 +1865,7 @@ bgp_init(struct proto_config *CF)
|
||||
P->rt_notify = bgp_rt_notify;
|
||||
P->preexport = bgp_preexport;
|
||||
P->iface_sub.neigh_notify = bgp_neigh_notify;
|
||||
P->feed_begin = bgp_feed_begin;
|
||||
P->feed_end = bgp_feed_end;
|
||||
P->export_fed = bgp_export_fed;
|
||||
|
||||
P->sources.class = &bgp_rte_owner_class;
|
||||
P->sources.rte_recalculate = cf->deterministic_med ? bgp_rte_recalculate : NULL;
|
||||
|
@ -409,7 +409,8 @@ struct bgp_channel {
|
||||
|
||||
timer *stale_timer; /* Long-lived stale timer for LLGR */
|
||||
u32 stale_time; /* Stored LLGR stale time from last session */
|
||||
struct rt_export_request stale_feed; /* Feeder request for stale route modification */
|
||||
struct rt_export_feeder stale_feed; /* Feeder request for stale route modification */
|
||||
event stale_event; /* Feeder event for stale route modification */
|
||||
|
||||
u8 add_path_rx; /* Session expects receive of ADD-PATH extended NLRI */
|
||||
u8 add_path_tx; /* Session expects transmit of ADD-PATH extended NLRI */
|
||||
@ -647,7 +648,7 @@ void bgp_done_prefix(struct bgp_channel *c, struct bgp_prefix *px, struct bgp_bu
|
||||
int bgp_rte_better(const rte *, const rte *);
|
||||
int bgp_rte_mergable(const rte *pri, const rte *sec);
|
||||
int bgp_rte_recalculate(struct rtable_private *table, net *net, struct rte_storage *new, struct rte_storage *old, struct rte_storage *old_best);
|
||||
void bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
|
||||
void bgp_rte_modify_stale(void *bgp_channel);
|
||||
u32 bgp_rte_igp_metric(const rte *);
|
||||
void bgp_rt_notify(struct proto *P, struct channel *C, const net_addr *n, rte *new, const rte *old);
|
||||
int bgp_preexport(struct channel *, struct rte *);
|
||||
|
@ -2965,7 +2965,7 @@ bgp_rx_route_refresh(struct bgp_conn *conn, byte *pkt, uint len)
|
||||
/* FIXME: REQUEST REFRESH FROM OUT TABLE */
|
||||
}
|
||||
else
|
||||
channel_request_feeding_dynamic(&c->c, p->enhanced_refresh ? CFRT_DIRECT : CFRT_AUXILIARY);
|
||||
rt_export_refeed(&c->c.out_req, NULL);
|
||||
break;
|
||||
|
||||
case BGP_RR_BEGIN:
|
||||
|
@ -272,11 +272,8 @@ l3vpn_preexport(struct channel *C, rte *e)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: unify the code between l3vpn and pipe */
|
||||
void pipe_import_by_refeed_free(struct channel_feeding_request *cfr);
|
||||
|
||||
static int
|
||||
l3vpn_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
l3vpn_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct l3vpn_proto *p = (void *) C->proto;
|
||||
struct channel *feed = NULL;
|
||||
@ -302,32 +299,13 @@ l3vpn_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
|
||||
case NET_MPLS:
|
||||
/* MPLS doesn't support partial refeed, always do a full one. */
|
||||
channel_request_feeding_dynamic(p->ip4_channel, CFRT_DIRECT);
|
||||
channel_request_feeding_dynamic(p->ip6_channel, CFRT_DIRECT);
|
||||
cir->done(cir);
|
||||
channel_request_full_refeed(p->ip4_channel);
|
||||
channel_request_full_refeed(p->ip6_channel);
|
||||
rfr->done(rfr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (cir->trie)
|
||||
{
|
||||
struct import_to_export_reload *reload = lp_alloc(cir->trie->lp, sizeof *reload);
|
||||
*reload = (struct import_to_export_reload) {
|
||||
.cir = cir,
|
||||
.cfr = {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.done = pipe_import_by_refeed_free,
|
||||
.trie = cir->trie,
|
||||
},
|
||||
};
|
||||
channel_request_feeding(feed, &reload->cfr);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Route reload on one channel is just refeed on the other */
|
||||
channel_request_feeding_dynamic(feed, CFRT_DIRECT);
|
||||
cir->done(cir);
|
||||
}
|
||||
|
||||
rt_export_refeed(&feed->out_req, rfr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -474,10 +452,10 @@ l3vpn_reconfigure(struct proto *P, struct proto_config *CF)
|
||||
l3vpn_prepare_import_targets(p);
|
||||
|
||||
if (p->vpn4_channel && (p->vpn4_channel->channel_state == CS_UP))
|
||||
channel_request_feeding_dynamic(p->vpn4_channel, CFRT_AUXILIARY);
|
||||
channel_request_full_refeed(p->vpn4_channel);
|
||||
|
||||
if (p->vpn6_channel && (p->vpn6_channel->channel_state == CS_UP))
|
||||
channel_request_feeding_dynamic(p->vpn6_channel, CFRT_AUXILIARY);
|
||||
channel_request_full_refeed(p->vpn6_channel);
|
||||
}
|
||||
|
||||
if (export_changed)
|
||||
@ -487,10 +465,10 @@ l3vpn_reconfigure(struct proto *P, struct proto_config *CF)
|
||||
l3vpn_prepare_export_targets(p);
|
||||
|
||||
if (p->ip4_channel && (p->ip4_channel->channel_state == CS_UP))
|
||||
channel_request_feeding_dynamic(p->ip4_channel, CFRT_AUXILIARY);
|
||||
channel_request_full_refeed(p->ip4_channel);
|
||||
|
||||
if (p->ip6_channel && (p->ip6_channel->channel_state == CS_UP))
|
||||
channel_request_feeding_dynamic(p->ip6_channel, CFRT_AUXILIARY);
|
||||
channel_request_full_refeed(p->ip6_channel);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -109,7 +109,7 @@
|
||||
#include "lib/macro.h"
|
||||
|
||||
static int ospf_preexport(struct channel *C, rte *new);
|
||||
static int ospf_reload_routes(struct channel *C, struct channel_import_request *cir);
|
||||
static int ospf_reload_routes(struct channel *C, struct rt_feeding_request *rfr);
|
||||
static int ospf_rte_better(const rte *new, const rte *old);
|
||||
static u32 ospf_rte_igp_metric(const rte *rt);
|
||||
static void ospf_disp(timer *timer);
|
||||
@ -375,8 +375,6 @@ ospf_init(struct proto_config *CF)
|
||||
P->iface_sub.ifa_notify = cf->ospf2 ? ospf_ifa_notify2 : ospf_ifa_notify3;
|
||||
P->preexport = ospf_preexport;
|
||||
P->reload_routes = ospf_reload_routes;
|
||||
P->feed_begin = ospf_feed_begin;
|
||||
P->feed_end = ospf_feed_end;
|
||||
|
||||
P->sources.class = &ospf_rte_owner_class;
|
||||
|
||||
@ -433,12 +431,12 @@ ospf_schedule_rtcalc(struct ospf_proto *p)
|
||||
}
|
||||
|
||||
static int
|
||||
ospf_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
ospf_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct ospf_proto *p = (struct ospf_proto *) C->proto;
|
||||
|
||||
if (cir)
|
||||
CALL(cir->done, cir);
|
||||
if (rfr)
|
||||
CALL(rfr->done, rfr);
|
||||
|
||||
if (p->calcrt == 2)
|
||||
return 1;
|
||||
|
@ -550,36 +550,6 @@ ospf_update_lsadb(struct ospf_proto *p)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ospf_feed_begin(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct ospf_proto *p = (struct ospf_proto *) C->proto;
|
||||
struct top_hash_entry *en;
|
||||
|
||||
/* Mark all external LSAs as stale */
|
||||
WALK_SLIST(en, p->lsal)
|
||||
if (en->mode == LSA_M_EXPORT)
|
||||
en->mode = LSA_M_EXPORT_STALE;
|
||||
}
|
||||
|
||||
void
|
||||
ospf_feed_end(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct ospf_proto *p = (struct ospf_proto *) C->proto;
|
||||
struct top_hash_entry *en;
|
||||
|
||||
/* Flush stale LSAs */
|
||||
WALK_SLIST(en, p->lsal)
|
||||
if (en->mode == LSA_M_EXPORT_STALE)
|
||||
ospf_flush_lsa(p, en);
|
||||
}
|
||||
|
||||
static u32
|
||||
ort_to_lsaid(struct ospf_proto *p, ort *nf)
|
||||
{
|
||||
|
@ -48,18 +48,10 @@ pipe_rt_notify(struct proto *P, struct channel *src_ch, const net_addr *n, rte *
|
||||
{
|
||||
struct pipe_proto *p = (void *) P;
|
||||
struct channel *dst = (src_ch == p->pri) ? p->sec : p->pri;
|
||||
uint *flags = (src_ch == p->pri) ? &p->sec_flags : &p->pri_flags;
|
||||
|
||||
if (!new && !old)
|
||||
return;
|
||||
|
||||
/* Start the route refresh if requested to */
|
||||
if (*flags & PIPE_FL_RR_BEGIN_PENDING)
|
||||
{
|
||||
*flags &= ~PIPE_FL_RR_BEGIN_PENDING;
|
||||
rt_refresh_begin(&dst->in_req);
|
||||
}
|
||||
|
||||
if (new)
|
||||
{
|
||||
rte e0 = rte_init_from(new);
|
||||
@ -96,72 +88,14 @@ pipe_preexport(struct channel *C, rte *e)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
pipe_import_by_refeed_free(struct channel_feeding_request *cfr)
|
||||
{
|
||||
SKIP_BACK_DECLARE(struct import_to_export_reload, reload, cfr, cfr);
|
||||
reload->cir->done(reload->cir);
|
||||
}
|
||||
|
||||
static int
|
||||
pipe_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
pipe_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct pipe_proto *p = (void *) C->proto;
|
||||
if (cir->trie)
|
||||
{
|
||||
struct import_to_export_reload *reload = lp_alloc(cir->trie->lp, sizeof *reload);
|
||||
*reload = (struct import_to_export_reload) {
|
||||
.cir = cir,
|
||||
.cfr = {
|
||||
.type = CFRT_AUXILIARY,
|
||||
.done = pipe_import_by_refeed_free,
|
||||
.trie = cir->trie,
|
||||
},
|
||||
};
|
||||
channel_request_feeding((C == p->pri) ? p->sec : p->pri, &reload->cfr);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Route reload on one channel is just refeed on the other */
|
||||
channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
|
||||
cir->done(cir);
|
||||
}
|
||||
SKIP_BACK_DECLARE(struct pipe_proto, p, p, C->proto);
|
||||
rt_export_refeed(&((C == p->pri) ? p->sec : p->pri)->out_req, rfr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
pipe_feed_begin(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct pipe_proto *p = (void *) C->proto;
|
||||
uint *flags = (C == p->pri) ? &p->sec_flags : &p->pri_flags;
|
||||
|
||||
*flags |= PIPE_FL_RR_BEGIN_PENDING;
|
||||
}
|
||||
|
||||
static void
|
||||
pipe_feed_end(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct pipe_proto *p = (void *) C->proto;
|
||||
struct channel *dst = (C == p->pri) ? p->sec : p->pri;
|
||||
uint *flags = (C == p->pri) ? &p->sec_flags : &p->pri_flags;
|
||||
|
||||
/* If not even started, start the RR now */
|
||||
if (*flags & PIPE_FL_RR_BEGIN_PENDING)
|
||||
{
|
||||
*flags &= ~PIPE_FL_RR_BEGIN_PENDING;
|
||||
rt_refresh_begin(&dst->in_req);
|
||||
}
|
||||
|
||||
/* Finish RR always */
|
||||
rt_refresh_end(&dst->in_req);
|
||||
}
|
||||
|
||||
static void
|
||||
pipe_postconfig(struct proto_config *CF)
|
||||
{
|
||||
@ -239,8 +173,6 @@ pipe_init(struct proto_config *CF)
|
||||
P->rt_notify = pipe_rt_notify;
|
||||
P->preexport = pipe_preexport;
|
||||
P->reload_routes = pipe_reload_routes;
|
||||
P->feed_begin = pipe_feed_begin;
|
||||
P->feed_end = pipe_feed_end;
|
||||
|
||||
p->rl_gen = (struct tbf) TBF_DEFAULT_LOG_LIMITS;
|
||||
|
||||
@ -281,9 +213,9 @@ pipe_show_stats(struct pipe_proto *p)
|
||||
struct channel_export_stats *s2e = &p->sec->export_stats;
|
||||
|
||||
struct rt_import_stats *rs1i = p->pri->in_req.hook ? &p->pri->in_req.hook->stats : NULL;
|
||||
struct rt_export_stats *rs1e = p->pri->out_req.hook ? &p->pri->out_req.hook->stats : NULL;
|
||||
struct rt_export_stats *rs1e = &p->pri->out_req.stats;
|
||||
struct rt_import_stats *rs2i = p->sec->in_req.hook ? &p->sec->in_req.hook->stats : NULL;
|
||||
struct rt_export_stats *rs2e = p->sec->out_req.hook ? &p->sec->out_req.hook->stats : NULL;
|
||||
struct rt_export_stats *rs2e = &p->sec->out_req.stats;
|
||||
|
||||
u32 pri_routes = p->pri->in_limit.count;
|
||||
u32 sec_routes = p->sec->in_limit.count;
|
||||
@ -334,8 +266,8 @@ pipe_show_proto_info(struct proto *P)
|
||||
cli_msg(-1006, " Channel %s", "main");
|
||||
cli_msg(-1006, " Table: %s", p->pri->table->name);
|
||||
cli_msg(-1006, " Peer table: %s", p->sec->table->name);
|
||||
cli_msg(-1006, " Import state: %s", rt_export_state_name(rt_export_get_state(p->sec->out_req.hook)));
|
||||
cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(p->pri->out_req.hook)));
|
||||
cli_msg(-1006, " Import state: %s", rt_export_state_name(rt_export_get_state(&p->sec->out_req)));
|
||||
cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(&p->pri->out_req)));
|
||||
cli_msg(-1006, " Import filter: %s", filter_name(p->sec->out_filter));
|
||||
cli_msg(-1006, " Export filter: %s", filter_name(p->pri->out_filter));
|
||||
|
||||
|
@ -20,16 +20,8 @@ struct pipe_proto {
|
||||
struct proto p;
|
||||
struct channel *pri;
|
||||
struct channel *sec;
|
||||
uint pri_flags;
|
||||
uint sec_flags;
|
||||
struct tbf rl_gen;
|
||||
};
|
||||
|
||||
#define PIPE_FL_RR_BEGIN_PENDING 1 /* Route refresh should start with the first route notified */
|
||||
|
||||
#endif
|
||||
|
||||
struct import_to_export_reload {
|
||||
struct channel_import_request *cir; /* We can not free this struct before reload finishes. */
|
||||
struct channel_feeding_request cfr; /* New request we actually need - import was changed to feed the other side. */
|
||||
};
|
||||
|
@ -664,7 +664,7 @@ radv_reconfigure(struct proto *P, struct proto_config *CF)
|
||||
|
||||
/* We started to accept routes so we need to refeed them */
|
||||
if (!old->propagate_routes && new->propagate_routes)
|
||||
channel_request_feeding_dynamic(p->p.main_channel, CFRT_DIRECT);
|
||||
channel_request_full_refeed(p->p.main_channel);
|
||||
|
||||
IFACE_WALK(iface)
|
||||
{
|
||||
|
@ -419,46 +419,6 @@ rip_rt_notify(struct proto *P, struct channel *ch UNUSED, const net_addr *net, s
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rip_feed_begin(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct rip_proto *p = (struct rip_proto *) C->proto;
|
||||
|
||||
FIB_WALK(&p->rtable, struct rip_entry, en)
|
||||
{
|
||||
if (en->valid == RIP_ENTRY_VALID)
|
||||
en->valid = RIP_ENTRY_REFEEDING;
|
||||
}
|
||||
FIB_WALK_END;
|
||||
}
|
||||
|
||||
void
|
||||
rip_feed_end(struct channel *C)
|
||||
{
|
||||
if (!C->refeeding || C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
struct rip_proto *p = (struct rip_proto *) C->proto;
|
||||
int changed = 0;
|
||||
|
||||
FIB_WALK(&p->rtable, struct rip_entry, en)
|
||||
{
|
||||
if (en->valid == RIP_ENTRY_REFEEDING)
|
||||
{
|
||||
rip_withdraw_entry(p, en);
|
||||
changed++;
|
||||
}
|
||||
}
|
||||
FIB_WALK_END;
|
||||
|
||||
if (changed)
|
||||
rip_trigger_update(p);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
rip_flush_table(struct rip_proto *p, struct rip_neighbor *n)
|
||||
{
|
||||
@ -1149,13 +1109,13 @@ rip_trigger_update(struct rip_proto *p)
|
||||
*/
|
||||
|
||||
static int
|
||||
rip_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
rip_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct rip_proto *p = (struct rip_proto *) C->proto;
|
||||
|
||||
/* Always reload full */
|
||||
if (cir)
|
||||
CALL(cir->done, cir);
|
||||
if (rfr)
|
||||
CALL(rfr->done, rfr);
|
||||
|
||||
if (p->rt_reload)
|
||||
return 1;
|
||||
@ -1209,8 +1169,6 @@ rip_init(struct proto_config *CF)
|
||||
P->iface_sub.neigh_notify = rip_neigh_notify;
|
||||
P->reload_routes = rip_reload_routes;
|
||||
P->sources.class = &rip_rte_owner_class;
|
||||
P->feed_begin = rip_feed_begin;
|
||||
P->feed_end = rip_feed_end;
|
||||
|
||||
return P;
|
||||
}
|
||||
|
@ -194,7 +194,6 @@ struct rip_rte
|
||||
#define RIP_ENTRY_DUMMY 0 /* Only used to store list of incoming routes */
|
||||
#define RIP_ENTRY_VALID 1 /* Valid outgoing route */
|
||||
#define RIP_ENTRY_STALE 2 /* Stale outgoing route, waiting for GC */
|
||||
#define RIP_ENTRY_REFEEDING 3 /* Route valid until feed ends */
|
||||
|
||||
static inline int rip_is_v2(struct rip_proto *p)
|
||||
{ return p->rip2; }
|
||||
|
@ -188,13 +188,13 @@ static_mark_all(struct static_proto *p)
|
||||
}
|
||||
|
||||
static void
|
||||
static_mark_partial(struct static_proto *p, struct channel_import_request *cir)
|
||||
static_mark_partial(struct static_proto *p, struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct static_config *cf = (void *) p->p.cf;
|
||||
struct static_route *r;
|
||||
|
||||
WALK_LIST(r, cf->routes)
|
||||
if (r->state == SRS_CLEAN && trie_match_net(cir->trie, r->net))
|
||||
if (r->state == SRS_CLEAN && (!rfr || rt_prefilter_net(&rfr->prefilter, r->net)))
|
||||
{
|
||||
r->state = SRS_DIRTY;
|
||||
BUFFER_PUSH(p->marked) = r;
|
||||
@ -203,7 +203,7 @@ static_mark_partial(struct static_proto *p, struct channel_import_request *cir)
|
||||
if (!ev_active(p->event))
|
||||
ev_schedule(p->event);
|
||||
|
||||
cir->done(cir);
|
||||
rfr->done(rfr);
|
||||
}
|
||||
|
||||
|
||||
@ -430,14 +430,14 @@ static_bfd_notify(struct bfd_request *req)
|
||||
}
|
||||
|
||||
static int
|
||||
static_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
static_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct static_proto *p = (void *) C->proto;
|
||||
|
||||
TRACE(D_EVENTS, "Scheduling route reload");
|
||||
|
||||
if (cir->trie)
|
||||
static_mark_partial(p, cir);
|
||||
if (rfr)
|
||||
static_mark_partial(p, rfr);
|
||||
else
|
||||
static_mark_all(p);
|
||||
|
||||
|
@ -383,6 +383,7 @@ nl_error(struct nlmsghdr *h, int ignore_esrch)
|
||||
ec = netlink_error_to_os(e->error);
|
||||
if (ec && !(ignore_esrch && (ec == ESRCH)))
|
||||
log_rl(&rl_netlink_err, L_WARN "Netlink: %s", strerror(ec));
|
||||
|
||||
return ec;
|
||||
}
|
||||
|
||||
|
@ -466,6 +466,8 @@ krt_init_scan(struct krt_proto *p)
|
||||
case KPS_FLUSHING:
|
||||
bug("Can't scan, flushing");
|
||||
}
|
||||
|
||||
bug("Bad kernel sync state");
|
||||
}
|
||||
|
||||
static void
|
||||
@ -480,8 +482,8 @@ krt_prune(struct krt_proto *p)
|
||||
p->sync_state = KPS_PRUNING;
|
||||
KRT_TRACE(p, D_EVENTS, "Pruning table %s", p->p.main_channel->table->name);
|
||||
rt_refresh_end(&p->p.main_channel->in_req);
|
||||
channel_request_feeding_dynamic(p->p.main_channel, CFRT_DIRECT);
|
||||
return;
|
||||
channel_request_full_refeed(p->p.main_channel);
|
||||
break;
|
||||
|
||||
case KPS_PRUNING:
|
||||
bug("Kernel scan double-prune");
|
||||
@ -657,11 +659,21 @@ krt_preexport(struct channel *C, rte *e)
|
||||
#endif
|
||||
|
||||
if (!krt_capable(e))
|
||||
{
|
||||
if (C->debug & D_ROUTES)
|
||||
log(L_TRACE "%s.%s: refusing incapable route for %N",
|
||||
C->proto->name, C->name, e->net);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Before first scan we don't touch the routes */
|
||||
if (!SKIP_BACK(struct krt_proto, p, C->proto)->ready)
|
||||
{
|
||||
if (C->debug & D_ROUTES)
|
||||
log(L_TRACE "%s.%s not ready yet to accept route for %N",
|
||||
C->proto->name, C->name, e->net);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -716,53 +728,45 @@ krt_if_notify(struct proto *P, uint flags, struct iface *iface UNUSED)
|
||||
}
|
||||
|
||||
static int
|
||||
krt_reload_routes(struct channel *C, struct channel_import_request *cir)
|
||||
krt_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
|
||||
{
|
||||
struct krt_proto *p = (void *) C->proto;
|
||||
|
||||
|
||||
if (cir->trie)
|
||||
{
|
||||
cir->done(cir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Although we keep learned routes in krt_table, we rather schedule a scan */
|
||||
|
||||
if (KRT_CF->learn)
|
||||
{
|
||||
p->reload = 1;
|
||||
krt_scan_timer_kick(p);
|
||||
}
|
||||
|
||||
cir->done(cir);
|
||||
if (rfr)
|
||||
CALL(rfr->done, rfr);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void krt_cleanup(struct krt_proto *p);
|
||||
|
||||
static void
|
||||
krt_feed_end(struct channel *C)
|
||||
krt_export_fed(struct channel *C)
|
||||
{
|
||||
struct krt_proto *p = (void *) C->proto;
|
||||
|
||||
if (C->refeeding && C->refeed_req.hook)
|
||||
return;
|
||||
|
||||
p->ready = 1;
|
||||
p->initialized = 1;
|
||||
|
||||
switch (p->sync_state)
|
||||
{
|
||||
case KPS_PRUNING:
|
||||
KRT_TRACE(p, D_EVENTS, "Table %s pruned", C->table->name);
|
||||
p->sync_state = KPS_IDLE;
|
||||
return;
|
||||
|
||||
case KPS_IDLE:
|
||||
case KPS_SCANNING:
|
||||
krt_scan_timer_kick(p);
|
||||
return;
|
||||
break;
|
||||
|
||||
case KPS_SCANNING:
|
||||
break;
|
||||
|
||||
case KPS_PRUNING:
|
||||
KRT_TRACE(p, D_EVENTS, "Table %s pruned", p->p.main_channel->table->name);
|
||||
p->sync_state = KPS_IDLE;
|
||||
break;
|
||||
|
||||
case KPS_FLUSHING:
|
||||
krt_do_scan(p);
|
||||
@ -837,7 +841,7 @@ krt_init(struct proto_config *CF)
|
||||
p->p.rt_notify = krt_rt_notify;
|
||||
p->p.iface_sub.if_notify = krt_if_notify;
|
||||
p->p.reload_routes = krt_reload_routes;
|
||||
p->p.feed_end = krt_feed_end;
|
||||
p->p.export_fed = krt_export_fed;
|
||||
|
||||
p->p.sources.class = &krt_rte_owner_class;
|
||||
|
||||
@ -893,7 +897,7 @@ krt_shutdown(struct proto *P)
|
||||
if (p->initialized && !KRT_CF->persist && (P->down_code != PDC_CMD_GR_DOWN))
|
||||
{
|
||||
p->sync_state = KPS_FLUSHING;
|
||||
channel_request_feeding_dynamic(p->p.main_channel, CFRT_AUXILIARY);
|
||||
channel_request_full_refeed(p->p.main_channel);
|
||||
|
||||
/* Keeping the protocol UP until the feed-to-flush is done */
|
||||
return PS_UP;
|
||||
|
@ -89,7 +89,7 @@ static inline int
|
||||
krt_get_sync_error(struct krt_proto *p, struct rte *e)
|
||||
{
|
||||
return (p->p.proto_state == PS_UP) &&
|
||||
bmap_test(&p->p.main_channel->export_map, e->id) &&
|
||||
bmap_test(&p->p.main_channel->export_accepted_map, e->id) &&
|
||||
!bmap_test(&p->sync_map, e->id);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user