0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-09-19 11:55:21 +00:00

BGP: export table stores routes, reloads and shows in CLI.

In future, this and rtable's data structures should be probably merged
but it isn't a good idea to do now. The used data structure is similar
to rtable -- an array of pointers to linked lists.

Feed is lockless, as with all tables.

Full export (receiving updates) is not supported yet but we don't have
any method how to use it anyway. Gonna implement it later.
This commit is contained in:
Maria Matejka 2024-05-29 09:33:42 +02:00 committed by Katerina Kubecova
parent bbb9532f1e
commit 36aa64fe1c
12 changed files with 256 additions and 216 deletions

View File

@ -3445,6 +3445,13 @@ be used in explicit configuration.
<cf/show route/, and can be used to eliminate unnecessary updates or
withdraws. Default: off.
<tag><label id="rtable-export-settle-time">export settle time <m/time/ <m/time/</tag>
Minimum and maximum settle times, respectively, for announcements from
export table to external readers. These values don't apply for regular TX,
just for side channel exports. You will probably never need to change
these values.
Default values: <cf/10 ms 100 ms/. You have to always provide both values.
<tag><label id="bgp-secondary">secondary <m/switch/</tag>
Usually, if an export filter rejects a selected route, no other route is
propagated for that network. This option allows to try the next route in

View File

@ -580,6 +580,12 @@ static inline ea_list *ea_lookup_tmp(ea_list *r, u32 squash_upto, enum ea_stored
return ea_free_later(ea_lookup(r, squash_upto, oid));
}
static inline ea_list *ea_ref_tmp(ea_list *r)
{
ASSERT_DIE(r->stored);
return ea_free_later(ea_ref(r));
}
static inline ea_list *ea_strip_to(ea_list *r, u32 strip_to)
{
ASSERT_DIE(strip_to);

View File

@ -777,6 +777,12 @@ r_args:
rsdr->prefilter = $4;
$$->tables_defined_by = RSD_TDB_DIRECT;
}
| r_args EXPORT TABLE channel_arg {
if (!($4->out_table)) cf_error("No export table in channel %s.%s", $4->proto->name, $4->name);
struct rt_show_data_rtable *rsdr = rt_show_add_exporter($$, $4->out_table);
rsdr->name = "export";
$$->tables_defined_by = RSD_TDB_DIRECT;
}
| r_args FILTER filter {
$$ = $1;
if ($$->filter != FILTER_ACCEPT) cf_error("Filter specified twice");
@ -879,7 +885,6 @@ export_mode:
| EXPORT { $$ = RSEM_EXPORT; }
| NOEXPORT { $$ = RSEM_NOEXPORT; }
| EXPORTED { $$ = RSEM_EXPORTED; }
| EXPORT TABLE { cf_error("Temporarily switched off export table display."); $$ = RSEM_EXPORT_TABLE; }
;
/* This is ugly hack */

View File

@ -119,7 +119,6 @@ struct rt_export_request {
/* Enlisting */
struct rt_exporter * _Atomic exporter;
struct rt_export_feeder * _Atomic next;
/* Prefiltering, useful for more scenarios */
struct rt_prefilter {
@ -221,6 +220,7 @@ struct rt_exporter {
/* Exporter API */
void rt_exporter_init(struct rt_exporter *, struct settle_config *);
struct rt_export_item *rt_exporter_push(struct rt_exporter *, const struct rt_export_item *);
struct rt_export_feed *rt_alloc_feed(uint routes, uint exports);
void rt_exporter_shutdown(struct rt_exporter *, void (*stopped)(struct rt_exporter *));
/* Standalone feeds */
@ -738,7 +738,7 @@ extern const int rt_default_ecmp;
struct rt_show_data_rtable {
node n;
const char *name;
rtable *table;
struct rt_exporter *exporter;
struct channel *export_channel;
struct channel *prefilter;
struct krt_proto *kernel;
@ -767,6 +767,7 @@ struct rt_show_data {
void rt_show(struct rt_show_data *);
struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t);
struct rt_show_data_rtable * rt_show_add_exporter(struct rt_show_data *d, struct rt_exporter *e);
/* Value of table definition mode in struct rt_show_data */
#define RSD_TDB_DEFAULT 0 /* no table specified */
@ -783,7 +784,6 @@ struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t
#define RSEM_EXPORT 2 /* Routes accepted by export filter */
#define RSEM_NOEXPORT 3 /* Routes rejected by export filter */
#define RSEM_EXPORTED 4 /* Routes marked in export map */
#define RSEM_EXPORT_TABLE 5 /* Export from export table */
/* Host entry: Resolve hook for recursive nexthops */
extern struct ea_class ea_gen_hostentry;

View File

@ -213,6 +213,27 @@ rt_export_processed(struct rt_export_request *r, u64 seq)
bmap_set(&r->seq_map, seq);
}
struct rt_export_feed *
rt_alloc_feed(uint routes, uint exports)
{
struct rt_export_feed *feed;
uint size = sizeof *feed
+ routes * sizeof *feed->block + _Alignof(typeof(*feed->block))
+ exports * sizeof *feed->exports + _Alignof(typeof(*feed->exports));
feed = tmp_alloc(size);
feed->count_routes = routes;
feed->count_exports = exports;
BIRD_SET_ALIGNED_POINTER(feed->block, feed->data);
BIRD_SET_ALIGNED_POINTER(feed->exports, &feed->block[routes]);
/* Consistency check */
ASSERT_DIE(((void *) &feed->exports[exports]) <= ((void *) feed) + size);
return feed;
}
struct rt_export_feed *
rt_export_next_feed(struct rt_export_feeder *f)
{

View File

@ -320,12 +320,20 @@ rt_show_cont(struct cli *c)
}
struct rt_show_data_rtable *
rt_show_add_table(struct rt_show_data *d, rtable *t)
rt_show_add_exporter(struct rt_show_data *d, struct rt_exporter *e)
{
struct rt_show_data_rtable *tab = cfg_allocz(sizeof(struct rt_show_data_rtable));
tab->table = t;
tab->name = t->name;
tab->name = e->name;
tab->exporter = e;
add_tail(&(d->tables), &(tab->n));
return tab;
}
struct rt_show_data_rtable *
rt_show_add_table(struct rt_show_data *d, rtable *t)
{
struct rt_show_data_rtable *tab = rt_show_add_exporter(d, &t->export_all);
tab->name = t->name;
struct proto_config *krt = t->config->krt_attached;
if (krt)
@ -384,16 +392,17 @@ rt_show_prepare_tables(struct rt_show_data *d)
WALK_LIST_DELSAFE(tab, tabx, d->tables)
{
struct rt_exporter *ex = tab->exporter;
/* Ensure there is defined export_channel for each table */
if (d->export_mode)
{
rtable *rt = tab->table;
if (!tab->export_channel && d->export_channel &&
(rt == d->export_channel->table))
(ex == &d->export_channel->table->export_all))
tab->export_channel = d->export_channel;
if (!tab->export_channel && d->export_protocol)
tab->export_channel = proto_find_channel_by_table(d->export_protocol, rt);
tab->export_channel = proto_find_channel_by_table(d->export_protocol, SKIP_BACK(rtable, export_all, ex));
if (!tab->export_channel)
{
@ -406,7 +415,7 @@ rt_show_prepare_tables(struct rt_show_data *d)
}
/* Ensure specified network is compatible with each table */
if (d->addr && (tab->table->addr_type != d->addr->type))
if (d->addr && (ex->net_type != d->addr->type))
{
if (d->tables_defined_by & RSD_TDB_NMN)
cf_error("Incompatible type of prefix/ip for table %s", tab->name);
@ -425,7 +434,7 @@ rt_show_prepare_tables(struct rt_show_data *d)
.trace_routes = config->show_route_debug,
};
rt_feeder_subscribe(&tab->table->export_all, &tab->req);
rt_feeder_subscribe(ex, &tab->req);
}
/* Ensure there is at least one table */

View File

@ -1948,27 +1948,6 @@ rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rt
* Feeding
*/
static struct rt_export_feed *
rt_alloc_feed(uint routes, uint exports)
{
struct rt_export_feed *feed;
uint size = sizeof *feed
+ routes * sizeof *feed->block + _Alignof(typeof(*feed->block))
+ exports * sizeof *feed->exports + _Alignof(typeof(*feed->exports));
feed = tmp_alloc(size);
feed->count_routes = routes;
feed->count_exports = exports;
BIRD_SET_ALIGNED_POINTER(feed->block, feed->data);
BIRD_SET_ALIGNED_POINTER(feed->exports, &feed->block[routes]);
/* Consistency check */
ASSERT_DIE(((void *) &feed->exports[exports]) <= ((void *) feed) + size);
return feed;
}
static net *
rt_net_feed_get_net(struct rtable_reading *tr, uint index)
{

View File

@ -1648,7 +1648,7 @@ bgp_done_bucket(struct bgp_channel *c, struct bgp_bucket *b)
if (b == c->withdraw_bucket)
return 0;
if (EMPTY_LIST(b->prefixes))
if (enlisted(&b->send_node) && EMPTY_LIST(b->prefixes))
rem_node(&b->send_node);
if (b->px_uc || !EMPTY_LIST(b->prefixes))
@ -1723,6 +1723,8 @@ bgp_find_prefix(struct bgp_channel *c, struct netindex *ni, struct rte_src *src,
return px;
return NULL;
c->tx_lock = DOMAIN_NEW(rtable);
}
static struct bgp_prefix *
@ -1751,6 +1753,7 @@ bgp_get_prefix(struct bgp_channel *c, struct netindex *ni, struct rte_src *src,
atomic_store_explicit(&c->prefixes, nb, memory_order_release);
atomic_store_explicit(&c->prefixes_len, nlen, memory_order_release);
atomic_store_explicit(&c->prefix_exporter.max_feed_index, nlen, memory_order_release);
synchronize_rcu();
@ -1803,7 +1806,7 @@ bgp_update_prefix(struct bgp_channel *c, struct bgp_prefix *px, struct bgp_bucke
}
/* The new bucket is the same as we sent before */
if ((px->last == b) || c->c.out_table && !px->last && IS_WITHDRAW_BUCKET(b))
if ((px->last == b) || c->tx_keep && !px->last && IS_WITHDRAW_BUCKET(b))
{
if (px->cur)
BPX_TRACE("reverted");
@ -1870,14 +1873,15 @@ bgp_done_prefix(struct bgp_channel *c, struct bgp_prefix *px, struct bgp_bucket
rem_node(&px->buck_node);
/* We may want to store the updates */
if (c->c.out_table)
if (c->tx_keep)
{
/* Nothing to be sent right now */
px->cur = NULL;
/* Unref the previous sent version */
if (px->last)
px->last->px_uc--;
if (!--px->last->px_uc)
bgp_done_bucket(c, px->last);
/* Ref the current sent version */
if (!IS_WITHDRAW_BUCKET(buck))
@ -1893,11 +1897,142 @@ bgp_done_prefix(struct bgp_channel *c, struct bgp_prefix *px, struct bgp_bucket
bgp_free_prefix(c, px);
}
void
bgp_tx_resend(struct bgp_proto *p, struct bgp_channel *c)
{
LOCK_DOMAIN(rtable, c->tx_lock);
ASSERT_DIE(c->tx_keep);
uint seen = 0;
u32 len = atomic_load_explicit(&c->prefixes_len, memory_order_relaxed);
struct bgp_prefix * _Atomic * block =
atomic_load_explicit(&c->prefixes, memory_order_relaxed);
for (u32 i = 0; i < len; i++)
for (struct bgp_prefix * _Atomic *ppx = &block[i], *px;
px = atomic_load_explicit(ppx, memory_order_relaxed);
ppx = &px->next)
if (!px->cur)
{
ASSERT_DIE(px->last);
struct bgp_bucket *last = px->last;
/* Remove the last reference, we wanna resend the route */
px->last->px_uc--;
px->last = NULL;
/* And send it once again */
seen += bgp_update_prefix(c, px, last);
}
if (c->c.debug & D_EVENTS)
log(L_TRACE "%s.%s: TX resending %u routes",
c->c.proto->name, c->c.name, seen);
UNLOCK_DOMAIN(rtable, c->tx_lock);
if (seen)
bgp_schedule_packet(p->conn, c, PKT_UPDATE);
}
/*
* Prefix hash table exporter
*/
static void
bgp_out_item_done(struct lfjour *j, struct lfjour_item *i)
{}
static struct rt_export_feed *
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, const struct netindex *ni, const struct rt_export_item *_first)
{
struct rt_export_feed *feed = NULL;
SKIP_BACK_DECLARE(struct bgp_channel, c, prefix_exporter, e);
u32 len = atomic_load_explicit(&c->prefixes_len, memory_order_relaxed);
if (ni->index >= len)
return NULL;
struct bgp_prefix * _Atomic * block =
atomic_load_explicit(&c->prefixes, memory_order_relaxed);
uint count = 0;
for (struct bgp_prefix * _Atomic *ppx = &block[ni->index], *cpx;
cpx = atomic_load_explicit(ppx, memory_order_relaxed);
ppx = &cpx->next)
count++;
if (count)
{
feed = rt_alloc_feed(count, 0);
feed->ni = ni;
uint pos = 0;
for (struct bgp_prefix * _Atomic *ppx = &block[ni->index], *cpx;
cpx = atomic_load_explicit(ppx, memory_order_relaxed);
ppx = &cpx->next)
if (cpx->ni == ni)
{
if (cpx->cur)
if (pos >= count)
RCU_RETRY(u);
else
feed->block[pos++] = (rte) {
.attrs = cpx->cur->attrs ? ea_ref_tmp(cpx->cur->attrs) : NULL,
.net = ni->addr,
.src = cpx->src,
.lastmod = cpx->lastmod,
.flags = REF_PENDING,
};
if (cpx->last)
if (pos >= count)
RCU_RETRY(u);
else
feed->block[pos++] = (rte) {
.attrs = cpx->last->attrs ? ea_ref_tmp(cpx->last->attrs) : NULL,
.net = ni->addr,
.src = cpx->src,
.lastmod = cpx->lastmod,
.flags = REF_PENDING,
};
}
if (pos != count)
RCU_RETRY(u);
}
return feed;
}
/* TX structures Init and Free */
void
bgp_init_pending_tx(struct bgp_channel *c)
{
ASSERT_DIE(c->c.out_table == NULL);
bgp_init_bucket_table(c);
bgp_init_prefix_table(c);
c->prefix_exporter = (struct rt_exporter) {
.journal = {
.loop = c->c.proto->loop,
.item_size = sizeof(struct rt_export_item),
.item_done = bgp_out_item_done,
},
.name = mb_sprintf(c->c.proto->pool, "%s.%s.export", c->c.proto->name, c->c.name),
.net_type = c->c.net_type,
.max_feed_index = atomic_load_explicit(&c->prefixes_len, memory_order_relaxed),
.netindex = c->tx_netindex,
.trace_routes = c->c.debug,
.feed_net = bgp_out_feed_net,
};
rt_exporter_init(&c->prefix_exporter, &c->cf->ptx_exporter_settle);
c->c.out_table = &c->prefix_exporter;
}
struct bgp_pending_tx_finisher {
@ -1923,11 +2058,31 @@ bgp_free_pending_tx(struct bgp_channel *c)
LOCK_DOMAIN(rtable, c->tx_lock);
c->c.out_table = NULL;
rt_exporter_shutdown(&c->prefix_exporter, NULL);
struct bgp_prefix *px;
u32 len = atomic_load_explicit(&c->prefixes_len, memory_order_relaxed);
struct bgp_prefix * _Atomic * block =
atomic_load_explicit(&c->prefixes, memory_order_relaxed);
if (c->tx_keep)
{
/* Move all kept prefixes to the withdraw bucket */
struct bgp_bucket *b = bgp_get_withdraw_bucket(c);
for (u32 i = 0; i < len; i++)
for (struct bgp_prefix * _Atomic *ppx = &block[i], *cpx;
cpx = atomic_load_explicit(ppx, memory_order_relaxed);
ppx = &cpx->next)
bgp_update_prefix(c, cpx, b);
}
/* Flush withdrawals */
if (c->withdraw_bucket)
WALK_LIST_FIRST(px, c->withdraw_bucket->prefixes)
bgp_done_prefix(c, px, c->withdraw_bucket);
/* Flush pending TX */
struct bgp_bucket *b;
WALK_LIST_FIRST(b, c->bucket_queue)
{
@ -1936,16 +2091,14 @@ bgp_free_pending_tx(struct bgp_channel *c)
bgp_done_bucket(c, b);
}
u32 len = atomic_load_explicit(&c->prefixes_len, memory_order_relaxed);
struct bgp_prefix * _Atomic * block =
atomic_load_explicit(&c->prefixes, memory_order_relaxed);
/* Consistence and resource leak checks */
for (u32 i = 0; i < len; i++)
if (atomic_load_explicit(&block[i], memory_order_relaxed))
bug("Stray prefix after cleanup");
atomic_store_explicit(&c->prefixes, NULL, memory_order_release);
atomic_store_explicit(&c->prefixes_len, 0, memory_order_release);
atomic_store_explicit(&c->prefix_exporter.max_feed_index, 0, memory_order_release);
synchronize_rcu();
mb_free(block);
@ -1972,175 +2125,12 @@ bgp_free_pending_tx(struct bgp_channel *c)
channel_add_obstacle(&c->c);
netindex_hash_delete(c->tx_netindex, &bptf->e, proto_event_list(c->c.proto));
c->tx_netindex = NULL;
c->prefix_exporter.netindex = NULL;
UNLOCK_DOMAIN(rtable, c->tx_lock);
DOMAIN_FREE(rtable, c->tx_lock);
}
#if 0
/*
* Prefix hash table exporter
*/
struct bgp_out_export_hook {
struct rt_export_hook h;
u32 hash_iter; /* Iterator over hash */
};
static void
bgp_out_table_feed(void *data)
{
struct bgp_out_export_hook *hook = data;
SKIP_BACK_DECLARE(struct bgp_channel, c, prefix_exporter, hook->h.table);
int max = 512;
const net_addr *neq = (hook->h.req->prefilter.mode == TE_ADDR_EQUAL) ? hook->h.req->prefilter.addr : NULL;
const net_addr *cand = NULL;
do {
HASH_WALK_ITER(c->prefix_hash, PXH, n, hook->hash_iter)
{
switch (hook->h.req->prefilter.mode)
{
case TE_ADDR_HOOK:
case TE_ADDR_TRIE:
case TE_ADDR_IN:
if (!rt_prefilter_net(&hook->h.req->prefilter, n->net))
continue;
/* fall through */
case TE_ADDR_NONE:
/* Splitting only for multi-net exports */
if (--max <= 0)
HASH_WALK_ITER_PUT;
break;
case TE_ADDR_FOR:
if (!neq)
{
if (net_in_netX(hook->h.req->prefilter.addr, n->net) && (!cand || (n->net->length > cand->length)))
cand = n->net;
continue;
}
/* fall through */
case TE_ADDR_EQUAL:
if (!net_equal(n->net, neq))
continue;
break;
}
struct bgp_bucket *buck = n->cur ?: n->last;
ea_list *ea = NULL;
if (buck == c->withdraw_bucket)
ea_set_dest(&ea, 0, RTD_UNREACHABLE);
else
{
ea = buck->eattrs;
eattr *eanh = bgp_find_attr(ea, BA_NEXT_HOP);
ASSERT_DIE(eanh);
const ip_addr *nh = (const void *) eanh->u.ptr->data;
struct nexthop_adata nhad = {
.ad = { .length = sizeof (struct nexthop_adata) - sizeof (struct adata), },
.nh = { .gw = nh[0], },
};
ea_set_attr(&ea, EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, tmp_copy_adata(&nhad.ad)));
}
struct rte es = {
.attrs = ea,
.net = n->net,
.src = rt_find_source_global(n->path_id),
.sender = NULL,
.lastmod = n->lastmod,
.flags = n->cur ? REF_PENDING : 0,
};
struct rt_pending_export rpe = {
.new = &es, .new_best = &es,
};
if (hook->h.req->export_bulk)
{
const rte *feed = &es;
hook->h.req->export_bulk(hook->h.req, n->net, &rpe, &rpe, &feed, 1);
}
else if (hook->h.req->export_one)
hook->h.req->export_one(hook->h.req, n->net, &rpe);
else
bug("No export method in export request");
}
HASH_WALK_ITER_END;
neq = cand;
cand = NULL;
} while (neq);
if (hook->hash_iter)
ev_schedule_work(&hook->h.event);
else
rt_set_export_state(&hook->h, BIT32_ALL(TES_FEEDING), TES_READY);
}
static void
bgp_out_table_export_start(struct rt_exporter *re, struct rt_export_request *req)
{
req->hook = rt_alloc_export(re, req->pool, sizeof(struct bgp_out_export_hook));
req->hook->req = req;
SKIP_BACK_DECLARE(struct bgp_out_export_hook, hook, h, req->hook);
hook->h.event.hook = bgp_out_table_feed;
rt_init_export(re, req->hook);
}
static void
bgp_out_table_export_stop(struct rt_export_hook *hook)
{
rt_set_export_state(hook, BIT32_ALL(TES_HUNGRY, TES_FEEDING, TES_READY), TES_STOP);
rt_stop_export_common(hook);
}
static void
bgp_out_table_export_done(void *data)
{
struct bgp_out_export_hook *hook = data;
struct rt_export_request *req = hook->h.req;
void (*stopped)(struct rt_export_request *) = hook->h.stopped;
rt_export_stopped(&hook->h);
CALL(stopped, req);
}
static const struct rt_exporter_class bgp_out_table_export_class = {
.start = bgp_out_table_export_start,
.stop = bgp_out_table_export_stop,
.done = bgp_out_table_export_done,
};
void
bgp_setup_out_table(struct bgp_channel *c)
{
ASSERT_DIE(c->c.out_table == NULL);
c->prefix_exporter = (struct rt_exporter) {
.class = &bgp_out_table_export_class,
.addr_type = c->c.table->addr_type,
.rp = c->c.proto->pool,
};
rt_exporter_init(&c->prefix_exporter);
c->c.out_table = &c->prefix_exporter;
}
#else
void
bgp_setup_out_table(struct bgp_channel *c UNUSED)
{}
#endif
/*
* BGP protocol glue
*/

View File

@ -1594,7 +1594,26 @@ bgp_reload_in(struct proto *P, uintptr_t _ UNUSED, int __ UNUSED)
void
bgp_reload_out(struct proto *P, uintptr_t _ UNUSED, int __ UNUSED)
{
cli_msg(-8006, "%s: bgp reload out not implemented yet", P->name);
SKIP_BACK_DECLARE(struct bgp_proto, p, p, P);
if (P->proto_state == PS_UP)
{
struct bgp_channel *c;
BGP_WALK_CHANNELS(p, c)
if (&c->c != P->mpls_channel)
if (c->tx_keep)
{
bgp_tx_resend(p, c);
cli_msg(-15, "%s.%s: reloading", P->name, c->c.name);
}
else
{
rt_export_refeed(&c->c.out_req, NULL);
cli_msg(-15, "%s.%s: reloading by table refeed", P->name, c->c.name);
}
}
else
cli_msg(-8006, "%s: not reloading, not up", P->name);
}
struct bgp_enhanced_refresh_request {
@ -1938,10 +1957,8 @@ bgp_channel_start(struct channel *C)
c->pool = p->p.pool; // XXXX
if (c->cf->export_table)
bgp_setup_out_table(c);
bgp_init_pending_tx(c);
c->tx_keep = c->cf->export_table;
c->stale_timer = tm_new_init(c->pool, bgp_long_lived_stale_timeout, c, 0, 0);
@ -2416,6 +2433,9 @@ bgp_channel_reconfigure(struct channel *C, struct channel_config *CC, int *impor
(new->aigp_originate != old->aigp_originate))
*export_changed = 1;
/* Update prefix exporter settle timer */
c->prefix_exporter.journal.announce_timer.cf = c->cf->ptx_exporter_settle;
c->cf = new;
return 1;
}

View File

@ -176,6 +176,8 @@ struct bgp_channel_config {
u8 import_table; /* Use c.in_table as Adj-RIB-In */
u8 export_table; /* Keep Adj-RIB-Out and export it */
struct settle_config ptx_exporter_settle; /* Settle timer for export dumps */
struct rtable_config *igp_table_ip4; /* Table for recursive IPv4 next hop lookups */
struct rtable_config *igp_table_ip6; /* Table for recursive IPv6 next hop lookups */
struct rtable_config *base_table; /* Base table for Flowspec validation */
@ -407,13 +409,15 @@ struct bgp_channel {
slab *prefix_slab; /* Slab holding prefix nodes */
slab *bucket_slab; /* Slab holding buckets to send */
// struct rt_exporter prefix_exporter; /* Table-like exporter for ptx */
struct rt_exporter prefix_exporter; /* Table-like exporter for ptx */
ip_addr next_hop_addr; /* Local address for NEXT_HOP attribute */
ip_addr link_addr; /* Link-local version of next_hop_addr */
u32 packets_to_send; /* Bitmap of packet types to be sent */
u8 tx_keep; /* Keep prefixes to be sent */
u8 ext_next_hop; /* Session allows both IPv4 and IPv6 next hops */
u8 gr_ready; /* Neighbor could do GR on this AF */
@ -429,8 +433,6 @@ struct bgp_channel {
u8 feed_state; /* Feed state (TX) for EoR, RR packets, see BFS_* */
u8 load_state; /* Load state (RX) for EoR, RR packets, see BFS_* */
u8 feed_out_table; /* Refeed into out_table */
};
struct bgp_prefix {
@ -637,6 +639,7 @@ void bgp_setup_out_table(struct bgp_channel *c);
void bgp_init_pending_tx(struct bgp_channel *c);
void bgp_free_pending_tx(struct bgp_channel *c);
void bgp_tx_resend(struct bgp_proto *p, struct bgp_channel *c);
void bgp_withdraw_bucket(struct bgp_channel *c, struct bgp_bucket *b);
int bgp_done_bucket(struct bgp_channel *c, struct bgp_bucket *b);

View File

@ -264,6 +264,7 @@ bgp_channel_start: bgp_afi
BGP_CC->llgr_able = 0xff; /* undefined */
BGP_CC->llgr_time = ~0U; /* undefined */
BGP_CC->aigp = 0xff; /* undefined */
BGP_CC->ptx_exporter_settle = (struct settle_config) { 10 MS_, 100 MS_ };
}
};
@ -301,6 +302,7 @@ bgp_channel_item:
| REQUIRE ADD PATHS bool { BGP_CC->require_add_path = $4; }
| IMPORT TABLE bool { BGP_CC->import_table = $3; }
| EXPORT TABLE bool { BGP_CC->export_table = $3; }
| EXPORT SETTLE TIME settle { BGP_CC->ptx_exporter_settle = $4; }
| AIGP bool { BGP_CC->aigp = $2; BGP_CC->aigp_originate = 0; }
| AIGP ORIGINATE { BGP_CC->aigp = 1; BGP_CC->aigp_originate = 1; }
| COST expr { BGP_CC->cost = $2; if ($2 < 1) cf_error("Cost must be positive"); }

View File

@ -2964,10 +2964,8 @@ bgp_rx_route_refresh(struct bgp_conn *conn, byte *pkt, uint len)
{
case BGP_RR_REQUEST:
BGP_TRACE(D_PACKETS, "Got ROUTE-REFRESH");
if (c->c.out_table)
{
/* FIXME: REQUEST REFRESH FROM OUT TABLE */
}
if (c->tx_keep)
bgp_tx_resend(p, c);
else
rt_export_refeed(&c->c.out_req, NULL);
break;