1998-05-20 11:54:33 +00:00
|
|
|
/*
|
|
|
|
* BIRD -- Protocols
|
|
|
|
*
|
2000-01-16 16:44:50 +00:00
|
|
|
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
|
1998-05-20 11:54:33 +00:00
|
|
|
*
|
|
|
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
|
|
|
*/
|
|
|
|
|
2000-03-12 21:01:38 +00:00
|
|
|
#undef LOCAL_DEBUG
|
1998-06-03 08:38:53 +00:00
|
|
|
|
1998-05-20 11:54:33 +00:00
|
|
|
#include "nest/bird.h"
|
|
|
|
#include "nest/protocol.h"
|
|
|
|
#include "lib/resource.h"
|
|
|
|
#include "lib/lists.h"
|
1999-02-11 22:59:06 +00:00
|
|
|
#include "lib/event.h"
|
2017-12-07 17:35:46 +00:00
|
|
|
#include "lib/timer.h"
|
2000-01-16 23:30:06 +00:00
|
|
|
#include "lib/string.h"
|
1998-11-27 19:35:10 +00:00
|
|
|
#include "conf/conf.h"
|
2023-10-29 15:25:01 +00:00
|
|
|
#include "nest/route.h"
|
1998-10-17 11:05:18 +00:00
|
|
|
#include "nest/iface.h"
|
2022-09-14 23:38:18 +00:00
|
|
|
#include "nest/mpls.h"
|
1999-11-25 15:35:30 +00:00
|
|
|
#include "nest/cli.h"
|
1999-03-17 14:31:26 +00:00
|
|
|
#include "filter/filter.h"
|
2021-02-07 18:21:42 +00:00
|
|
|
#include "filter/f-inst.h"
|
1998-05-20 11:54:33 +00:00
|
|
|
|
2010-06-02 20:20:40 +00:00
|
|
|
pool *proto_pool;
|
2023-04-19 19:18:12 +00:00
|
|
|
static TLIST_LIST(proto) global_proto_list;
|
1999-02-11 22:59:06 +00:00
|
|
|
|
2022-03-18 21:05:50 +00:00
|
|
|
static list STATIC_LIST_INIT(protocol_list);
|
1999-02-11 22:59:06 +00:00
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
#define CD(c, msg, args...) ({ if (c->debug & D_STATES) log(L_TRACE "%s.%s: " msg, c->proto->name, c->name ?: "?", ## args); })
|
|
|
|
#define PD(p, msg, args...) ({ if (p->debug & D_STATES) log(L_TRACE "%s: " msg, p->name, ## args); })
|
2000-03-07 21:50:21 +00:00
|
|
|
|
2014-03-20 13:07:12 +00:00
|
|
|
static timer *gr_wait_timer;
|
|
|
|
|
|
|
|
#define GRS_NONE 0
|
|
|
|
#define GRS_INIT 1
|
|
|
|
#define GRS_ACTIVE 2
|
|
|
|
#define GRS_DONE 3
|
|
|
|
|
|
|
|
static int graceful_restart_state;
|
|
|
|
static u32 graceful_restart_locks;
|
1999-02-11 22:59:06 +00:00
|
|
|
|
|
|
|
static char *p_states[] = { "DOWN", "START", "UP", "STOP" };
|
2021-06-21 15:07:31 +00:00
|
|
|
static char *c_states[] = { "DOWN", "START", "UP", "STOP", "RESTART" };
|
1999-02-11 22:59:06 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
extern struct protocol proto_unix_iface;
|
|
|
|
|
2000-01-16 16:44:50 +00:00
|
|
|
static void proto_rethink_goal(struct proto *p);
|
2000-03-07 21:50:21 +00:00
|
|
|
static char *proto_state_name(struct proto *p);
|
2021-11-06 19:34:16 +00:00
|
|
|
static void channel_init_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
|
|
|
|
static void channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
|
|
|
|
static void channel_reset_limit(struct channel *c, struct limit *l, int dir);
|
2023-10-04 09:03:29 +00:00
|
|
|
static int channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n);
|
2023-11-02 13:33:00 +00:00
|
|
|
static int channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n);
|
2021-06-21 15:07:31 +00:00
|
|
|
static void channel_feed_end(struct channel *c);
|
2023-03-07 22:22:03 +00:00
|
|
|
static void channel_stop_export(struct channel *c);
|
2021-06-21 15:07:31 +00:00
|
|
|
static void channel_export_stopped(struct rt_export_request *req);
|
2023-09-29 14:24:50 +00:00
|
|
|
static void channel_refeed_stopped(struct rt_export_request *req);
|
2022-09-01 08:39:56 +00:00
|
|
|
static void channel_check_stopped(struct channel *c);
|
2023-11-02 13:33:00 +00:00
|
|
|
static void channel_reload_in_done(struct channel_import_request *cir);
|
|
|
|
static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
|
1999-02-13 19:15:28 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static inline int proto_is_done(struct proto *p)
|
2021-06-19 18:50:18 +00:00
|
|
|
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static inline int channel_is_active(struct channel *c)
|
2021-06-21 15:07:31 +00:00
|
|
|
{ return (c->channel_state != CS_DOWN); }
|
2014-03-24 11:32:12 +00:00
|
|
|
|
2021-03-08 19:45:22 +00:00
|
|
|
static inline int channel_reloadable(struct channel *c)
|
|
|
|
{ return c->proto->reload_routes && c->reloadable; }
|
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
static inline void
|
|
|
|
channel_log_state_change(struct channel *c)
|
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
CD(c, "State changed to %s", c_states[c->channel_state]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
channel_import_log_state_change(struct rt_import_request *req, u8 state)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, in_req, req);
|
|
|
|
CD(c, "Channel import state changed to %s", rt_import_state_name(state));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
channel_export_log_state_change(struct rt_export_request *req, u8 state)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
|
|
|
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
|
|
|
|
|
|
|
switch (state)
|
|
|
|
{
|
|
|
|
case TES_FEEDING:
|
|
|
|
if (c->proto->feed_begin)
|
2023-09-29 14:24:50 +00:00
|
|
|
c->proto->feed_begin(c);
|
2021-06-21 15:07:31 +00:00
|
|
|
break;
|
|
|
|
case TES_READY:
|
|
|
|
channel_feed_end(c);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
void
|
|
|
|
channel_refeed_log_state_change(struct rt_export_request *req, u8 state)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
|
|
|
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
|
|
|
|
|
|
|
switch (state)
|
|
|
|
{
|
|
|
|
case TES_FEEDING:
|
|
|
|
if (c->proto->feed_begin)
|
|
|
|
c->proto->feed_begin(c);
|
|
|
|
break;
|
|
|
|
case TES_READY:
|
|
|
|
rt_stop_export(req, channel_refeed_stopped);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
static void
|
|
|
|
channel_dump_import_req(struct rt_import_request *req)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, in_req, req);
|
|
|
|
debug(" Channel %s.%s import request %p\n", c->proto->name, c->name, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_dump_export_req(struct rt_export_request *req)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
|
|
|
debug(" Channel %s.%s export request %p\n", c->proto->name, c->name, req);
|
2020-12-07 21:19:40 +00:00
|
|
|
}
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
static void
|
|
|
|
channel_dump_refeed_req(struct rt_export_request *req)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
|
|
|
debug(" Channel %s.%s refeed request %p\n", c->proto->name, c->name, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_rpe_mark_seen_export(struct rt_export_request *req, struct rt_pending_export *rpe)
|
|
|
|
{
|
|
|
|
channel_rpe_mark_seen(SKIP_BACK(struct channel, out_req, req), rpe);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_rpe_mark_seen_refeed(struct rt_export_request *req, struct rt_pending_export *rpe)
|
|
|
|
{
|
|
|
|
channel_rpe_mark_seen(SKIP_BACK(struct channel, refeed_req, req), rpe);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct channel *
|
|
|
|
channel_from_export_request(struct rt_export_request *req)
|
|
|
|
{
|
|
|
|
if (req->dump_req == channel_dump_export_req)
|
|
|
|
return SKIP_BACK(struct channel, out_req, req);
|
|
|
|
|
|
|
|
if (req->dump_req == channel_dump_refeed_req)
|
|
|
|
return SKIP_BACK(struct channel, refeed_req, req);
|
|
|
|
|
|
|
|
bug("Garbled channel export request");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-03-24 11:32:12 +00:00
|
|
|
static void
|
|
|
|
proto_log_state_change(struct proto *p)
|
|
|
|
{
|
|
|
|
if (p->debug & D_STATES)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
char *name = proto_state_name(p);
|
|
|
|
if (name != p->last_state_name_announced)
|
2014-03-24 11:32:12 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
p->last_state_name_announced = name;
|
|
|
|
PD(p, "State changed to %s", proto_state_name(p));
|
2014-03-24 11:32:12 +00:00
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
2014-03-24 11:32:12 +00:00
|
|
|
else
|
|
|
|
p->last_state_name_announced = NULL;
|
1999-02-11 22:59:06 +00:00
|
|
|
}
|
1998-05-20 11:54:33 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel_config *
|
|
|
|
proto_cf_find_channel(struct proto_config *pc, uint net_type)
|
1998-06-03 08:38:53 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel_config *cc;
|
|
|
|
|
|
|
|
WALK_LIST(cc, pc->channels)
|
|
|
|
if (cc->net_type == net_type)
|
|
|
|
return cc;
|
|
|
|
|
|
|
|
return NULL;
|
1998-06-03 08:38:53 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
/**
|
|
|
|
* proto_find_channel_by_table - find channel connected to a routing table
|
|
|
|
* @p: protocol instance
|
|
|
|
* @t: routing table
|
|
|
|
*
|
|
|
|
* Returns pointer to channel or NULL
|
|
|
|
*/
|
|
|
|
struct channel *
|
2022-09-07 11:54:20 +00:00
|
|
|
proto_find_channel_by_table(struct proto *p, rtable *t)
|
1999-02-13 19:15:28 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel *c;
|
2012-03-15 10:58:08 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
WALK_LIST(c, p->channels)
|
|
|
|
if (c->table == t)
|
|
|
|
return c;
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
return NULL;
|
1999-02-13 19:15:28 +00:00
|
|
|
}
|
|
|
|
|
2017-04-25 17:02:31 +00:00
|
|
|
/**
|
|
|
|
* proto_find_channel_by_name - find channel by its name
|
|
|
|
* @p: protocol instance
|
|
|
|
* @n: channel name
|
|
|
|
*
|
|
|
|
* Returns pointer to channel or NULL
|
|
|
|
*/
|
|
|
|
struct channel *
|
|
|
|
proto_find_channel_by_name(struct proto *p, const char *n)
|
|
|
|
{
|
|
|
|
struct channel *c;
|
|
|
|
|
|
|
|
WALK_LIST(c, p->channels)
|
|
|
|
if (!strcmp(c->name, n))
|
|
|
|
return c;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
2016-01-26 10:48:58 +00:00
|
|
|
* proto_add_channel - connect protocol to a routing table
|
2000-06-02 13:42:36 +00:00
|
|
|
* @p: protocol instance
|
2016-01-26 10:48:58 +00:00
|
|
|
* @cf: channel configuration
|
2000-06-02 13:42:36 +00:00
|
|
|
*
|
2016-01-26 10:48:58 +00:00
|
|
|
* This function creates a channel between the protocol instance @p and the
|
|
|
|
* routing table specified in the configuration @cf, making the protocol hear
|
|
|
|
* all changes in the table and allowing the protocol to update routes in the
|
|
|
|
* table.
|
2012-03-15 10:58:08 +00:00
|
|
|
*
|
2016-01-26 10:48:58 +00:00
|
|
|
* The channel is linked in the protocol channel list and when active also in
|
|
|
|
* the table channel list. Channels are allocated from the global resource pool
|
|
|
|
* (@proto_pool) and they are automatically freed when the protocol is removed.
|
2000-06-02 13:42:36 +00:00
|
|
|
*/
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
struct channel *
|
|
|
|
proto_add_channel(struct proto *p, struct channel_config *cf)
|
|
|
|
{
|
2023-09-14 13:21:53 +00:00
|
|
|
struct channel *c = mb_allocz(proto_pool, cf->class->channel_size);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
c->name = cf->name;
|
2023-09-14 13:21:53 +00:00
|
|
|
c->class = cf->class;
|
2016-01-26 10:48:58 +00:00
|
|
|
c->proto = p;
|
|
|
|
c->table = cf->table->table;
|
2021-06-21 15:07:31 +00:00
|
|
|
rt_lock_table(c->table);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
c->in_filter = cf->in_filter;
|
|
|
|
c->out_filter = cf->out_filter;
|
2022-06-22 10:45:42 +00:00
|
|
|
c->out_subprefix = cf->out_subprefix;
|
2021-11-06 19:34:16 +00:00
|
|
|
|
2023-05-07 21:04:47 +00:00
|
|
|
c->feed_block_size = cf->feed_block_size;
|
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
channel_init_limit(c, &c->rx_limit, PLD_RX, &cf->rx_limit);
|
|
|
|
channel_init_limit(c, &c->in_limit, PLD_IN, &cf->in_limit);
|
|
|
|
channel_init_limit(c, &c->out_limit, PLD_OUT, &cf->out_limit);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
c->net_type = cf->net_type;
|
|
|
|
c->ra_mode = cf->ra_mode;
|
|
|
|
c->preference = cf->preference;
|
2020-12-07 21:19:40 +00:00
|
|
|
c->debug = cf->debug;
|
2016-01-26 10:48:58 +00:00
|
|
|
c->merge_limit = cf->merge_limit;
|
2022-06-16 21:24:56 +00:00
|
|
|
c->in_keep = cf->in_keep;
|
2021-02-12 04:05:18 +00:00
|
|
|
c->rpki_reload = cf->rpki_reload;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
c->channel_state = CS_DOWN;
|
2017-06-06 14:47:30 +00:00
|
|
|
c->last_state_change = current_time();
|
2016-01-26 10:48:58 +00:00
|
|
|
c->reloadable = 1;
|
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
init_list(&c->roa_subscriptions);
|
|
|
|
|
2023-09-14 13:21:53 +00:00
|
|
|
CALL(c->class->init, c, cf);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
add_tail(&p->channels, &c->n);
|
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
CD(c, "Connected to table %s", c->table->name);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-12-07 21:19:40 +00:00
|
|
|
proto_remove_channel(struct proto *p UNUSED, struct channel *c)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
ASSERT(c->channel_state == CS_DOWN);
|
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
CD(c, "Removed", c->name);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
rt_unlock_table(c->table);
|
2016-01-26 10:48:58 +00:00
|
|
|
rem_node(&c->n);
|
|
|
|
mb_free(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
proto_start_channels(struct proto *p)
|
|
|
|
{
|
|
|
|
struct channel *c;
|
|
|
|
WALK_LIST(c, p->channels)
|
|
|
|
if (!c->disabled)
|
|
|
|
channel_set_state(c, CS_UP);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
proto_pause_channels(struct proto *p)
|
1999-05-17 20:14:52 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel *c;
|
|
|
|
WALK_LIST(c, p->channels)
|
|
|
|
if (!c->disabled && channel_is_active(c))
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_set_state(c, CS_PAUSE);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
1999-05-17 20:14:52 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static void
|
|
|
|
proto_stop_channels(struct proto *p)
|
|
|
|
{
|
|
|
|
struct channel *c;
|
|
|
|
WALK_LIST(c, p->channels)
|
|
|
|
if (!c->disabled && channel_is_active(c))
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_set_state(c, CS_STOP);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
2012-03-15 10:58:08 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static void
|
|
|
|
proto_remove_channels(struct proto *p)
|
|
|
|
{
|
|
|
|
struct channel *c;
|
|
|
|
WALK_LIST_FIRST(c, p->channels)
|
|
|
|
proto_remove_channel(p, c);
|
|
|
|
}
|
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
/**
|
|
|
|
* # Automatic ROA reloads
|
|
|
|
*
|
|
|
|
* Route origin authorizations may (and do) change over time by updates via
|
|
|
|
* our RPKI protocols. This then manifests in ROA tables. As the roa_check()
|
|
|
|
* is always executed on a specific contents of ROA table in a specific moment
|
|
|
|
* of time, its value may switch after updates in the ROA table and therefore
|
|
|
|
* must be re-evaluated any time the result may have changed.
|
|
|
|
*
|
|
|
|
* To enable this mechanism, there are auxiliary tools integrated in BIRD
|
|
|
|
* to automatically re-evaluate all filters that may get a different outcome
|
|
|
|
* after ROA change.
|
|
|
|
*
|
|
|
|
* ROA Subscription Data Structure (struct roa_subscription) is the connector
|
|
|
|
* between the channel and the ROA table, keeping track about unprocessed
|
|
|
|
* changes and initiating the reloads. The modus operandi is as follows:
|
|
|
|
*
|
|
|
|
* Init 1. Check whether the filter uses ROA at all.
|
|
|
|
* Init 2. Request exports from the ROA table
|
|
|
|
* Init 3. Allocate a trie
|
|
|
|
*
|
|
|
|
* Export from ROA: This may affect all routes for prefixes matching the ROA
|
|
|
|
* prefix, disregarding its maxlen. Thus we mark these routes in the request's
|
|
|
|
* auxiliary trie. Then we ping the settle timer to wait a reasonable amount of
|
|
|
|
* time before actually requesting channel reload.
|
|
|
|
*
|
|
|
|
* Settle timer fires when nothing has pinged it for the 'min' time, or 'max'
|
|
|
|
* time has elapsed since the first ping. It then:
|
|
|
|
*
|
|
|
|
* - requests partial channel import / export reload based on the trie
|
|
|
|
* - allocates a new trie
|
|
|
|
*
|
|
|
|
* As the import/export reload uses the auxiliary trie to prefilter prefixes,
|
|
|
|
* the trie must be freed after the reload is done, which is ensured in the
|
|
|
|
* .done() hook of the reimport/reexport request.
|
|
|
|
*
|
|
|
|
* # Channel export refeed
|
|
|
|
*
|
|
|
|
* The request, either by ROA or from CLI, is enqueued to the channel and an
|
|
|
|
* auxiliary export hook is requested from the table. This way, the ordinary
|
|
|
|
* updates can flow uninterrupted while refeed gets prefiltered by the given
|
|
|
|
* trie (if given). When the auxiliary export hook finishes, the .done() hook
|
|
|
|
* is then called for the requestor to do their cleanup.
|
|
|
|
*
|
|
|
|
* While refeeding, special care must be taken about route changes inside the
|
|
|
|
* table. For this, an auxiliary trie is allocated to keep track about already
|
|
|
|
* refed net, to avoid unnecessary multiple re-evaluation of filters.
|
|
|
|
*
|
|
|
|
* # Channel import reload from import table
|
|
|
|
*
|
|
|
|
* When the import table is on, the channel keeps the original version of the route
|
|
|
|
* in the table together with the actual version after filters, in a form of
|
|
|
|
* an additional layer of route attributes underneath the actual version. This makes
|
|
|
|
* it exceptionally simple to get the original version of the route directly
|
|
|
|
* from the table by an ordinary export which strips all the newer layers.
|
|
|
|
*
|
|
|
|
* Then, by processing all these auxiliary exports, the channel basically re-imports
|
|
|
|
* all the routes into the table back again, re-evaluating the filters and ROA checks.
|
|
|
|
*
|
|
|
|
* # Channel import reload from protocols
|
|
|
|
*
|
|
|
|
* When the import table is off, the protocol gets the reimport request directly
|
|
|
|
* via the .reload_routes() hook and must do its internal route reload instead.
|
|
|
|
* The protocol may not support it and in such case, this function returns 0
|
|
|
|
* indicating that no partial reload is going to happen. It's then on the
|
|
|
|
* developer's or user's discretion to run a full reload instead.
|
|
|
|
*
|
|
|
|
* # Caveats, FIXME's, TODO's and other kinds of hell
|
|
|
|
*
|
|
|
|
* The partial reexport uses a trie to track state for single prefixes. This
|
|
|
|
* may do crazy things if a partial reload was to be performed on any other
|
|
|
|
* table than plain IPv6 or IPv4. Network types like VPNv6 or Flowspec may
|
|
|
|
* cause some crashes. This is currently not checked anywhere.
|
|
|
|
*
|
|
|
|
* Anyway, we decided to split the table FIB structure to carry only a mapping
|
|
|
|
* between a prefix and a locally-unique ID, and after this update is done
|
|
|
|
* (probably also in v2), the tracking tries may be easily replaced by
|
|
|
|
* bitfields, therefore fixing this bug.
|
|
|
|
*
|
|
|
|
* We also probably didn't do a proper analysis of the implemented algorithm
|
|
|
|
* for reexports, so if there is somebody willing to formally prove that we
|
|
|
|
* both won't miss any update and won't reexport more than needed, you're welcome
|
|
|
|
* to submit such a proof.
|
|
|
|
*
|
|
|
|
* We wish you a pleasant reading, analyzing and bugfixing experience.
|
|
|
|
*
|
|
|
|
* Kata, Maria and the BIRD Team
|
|
|
|
*/
|
|
|
|
|
2022-09-01 08:39:56 +00:00
|
|
|
struct roa_subscription {
|
|
|
|
node roa_node;
|
2022-09-09 13:04:36 +00:00
|
|
|
struct settle settle;
|
2022-09-01 08:39:56 +00:00
|
|
|
struct channel *c;
|
2024-02-08 13:34:54 +00:00
|
|
|
rtable *tab;
|
2022-09-01 08:39:56 +00:00
|
|
|
struct rt_export_request req;
|
2023-11-02 13:33:00 +00:00
|
|
|
struct f_trie *trie;
|
2022-09-01 08:39:56 +00:00
|
|
|
};
|
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
static void
|
|
|
|
channel_roa_in_reload_done(struct channel_import_request *req)
|
|
|
|
{
|
|
|
|
rfree(req->trie->lp);
|
|
|
|
}
|
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
static void
|
2022-09-09 13:04:36 +00:00
|
|
|
channel_roa_in_changed(struct settle *se)
|
2021-02-10 02:09:57 +00:00
|
|
|
{
|
2022-09-09 13:04:36 +00:00
|
|
|
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, settle, se);
|
2022-09-01 08:39:56 +00:00
|
|
|
struct channel *c = s->c;
|
2021-02-10 02:09:57 +00:00
|
|
|
|
2023-03-07 22:22:03 +00:00
|
|
|
CD(c, "Reload triggered by RPKI change");
|
2023-11-02 13:33:00 +00:00
|
|
|
struct channel_import_request *cir = lp_alloc(s->trie->lp, sizeof *cir);
|
|
|
|
*cir = (struct channel_import_request) {
|
|
|
|
.trie = s->trie,
|
|
|
|
.done = channel_roa_in_reload_done,
|
|
|
|
};
|
|
|
|
|
|
|
|
s->trie = f_new_trie(lp_new(c->proto->pool), 0);
|
|
|
|
|
|
|
|
channel_request_partial_reload(c, cir);
|
2021-02-10 02:09:57 +00:00
|
|
|
}
|
|
|
|
|
2023-10-04 08:39:46 +00:00
|
|
|
static void
|
|
|
|
channel_roa_out_reload_done(struct channel_feeding_request *req)
|
|
|
|
{
|
|
|
|
rfree(req->trie->lp);
|
|
|
|
}
|
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
static void
|
2022-09-09 13:04:36 +00:00
|
|
|
channel_roa_out_changed(struct settle *se)
|
2021-02-10 02:09:57 +00:00
|
|
|
{
|
2022-09-09 13:04:36 +00:00
|
|
|
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, settle, se);
|
2022-09-01 08:39:56 +00:00
|
|
|
struct channel *c = s->c;
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
CD(c, "Feeding triggered by RPKI change");
|
2021-02-10 02:09:57 +00:00
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
/* Setup feeding request */
|
2023-10-04 08:39:46 +00:00
|
|
|
struct channel_feeding_request *cfr = lp_alloc(s->trie->lp, sizeof *cfr);
|
|
|
|
*cfr = (struct channel_feeding_request) {
|
|
|
|
.type = CFRT_AUXILIARY,
|
|
|
|
.trie = s->trie,
|
|
|
|
.done = channel_roa_out_reload_done,
|
|
|
|
};
|
2023-09-29 14:24:50 +00:00
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
/* Prepare new trie */
|
2023-10-04 08:39:46 +00:00
|
|
|
s->trie = f_new_trie(lp_new(c->proto->pool), 0);
|
2023-11-02 13:33:00 +00:00
|
|
|
|
|
|
|
/* Actually request the feed */
|
|
|
|
channel_request_feeding(c, cfr);
|
2021-02-10 02:09:57 +00:00
|
|
|
}
|
|
|
|
|
2022-09-01 08:39:56 +00:00
|
|
|
static void
|
2023-10-03 09:07:46 +00:00
|
|
|
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
|
2022-09-01 08:39:56 +00:00
|
|
|
{
|
|
|
|
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
|
|
|
|
2023-10-03 09:07:46 +00:00
|
|
|
switch (net->type)
|
|
|
|
{
|
|
|
|
case NET_ROA4:
|
|
|
|
trie_add_prefix(s->trie, net, net_pxlen(net), 32);
|
|
|
|
break;
|
|
|
|
case NET_ROA6:
|
|
|
|
trie_add_prefix(s->trie, net, net_pxlen(net), 128);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bug("ROA table sent us a non-roa export");
|
|
|
|
}
|
|
|
|
|
2022-10-04 11:20:04 +00:00
|
|
|
settle_kick(&s->settle, s->c->proto->loop);
|
2022-09-01 08:39:56 +00:00
|
|
|
|
2023-03-31 08:46:17 +00:00
|
|
|
rpe_mark_seen_all(req->hook, first, NULL, NULL);
|
2022-09-01 08:39:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_dump_roa_req(struct rt_export_request *req)
|
|
|
|
{
|
|
|
|
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
|
|
|
struct channel *c = s->c;
|
|
|
|
|
2024-02-08 13:34:54 +00:00
|
|
|
debug(" Channel %s.%s ROA %s change notifier request %p\n",
|
2022-09-01 08:39:56 +00:00
|
|
|
c->proto->name, c->name,
|
2022-09-09 13:04:36 +00:00
|
|
|
(s->settle.hook == channel_roa_in_changed) ? "import" : "export",
|
2024-02-08 13:34:54 +00:00
|
|
|
req);
|
2022-09-01 08:39:56 +00:00
|
|
|
}
|
2021-02-10 02:09:57 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
channel_roa_is_subscribed(struct channel *c, rtable *tab, int dir)
|
|
|
|
{
|
2022-09-09 13:04:36 +00:00
|
|
|
void (*hook)(struct settle *) =
|
2021-02-10 02:09:57 +00:00
|
|
|
dir ? channel_roa_in_changed : channel_roa_out_changed;
|
|
|
|
|
|
|
|
struct roa_subscription *s;
|
|
|
|
node *n;
|
|
|
|
|
|
|
|
WALK_LIST2(s, n, c->roa_subscriptions, roa_node)
|
2024-02-08 13:34:54 +00:00
|
|
|
if ((tab == s->tab) && (s->settle.hook == hook))
|
2021-02-10 02:09:57 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
|
|
|
|
{
|
|
|
|
if (channel_roa_is_subscribed(c, tab, dir))
|
|
|
|
return;
|
|
|
|
|
|
|
|
struct roa_subscription *s = mb_allocz(c->proto->pool, sizeof(struct roa_subscription));
|
|
|
|
|
2022-09-01 08:39:56 +00:00
|
|
|
*s = (struct roa_subscription) {
|
2022-09-09 13:04:36 +00:00
|
|
|
.settle = SETTLE_INIT(&c->roa_settle, dir ? channel_roa_in_changed : channel_roa_out_changed, NULL),
|
2022-09-01 08:39:56 +00:00
|
|
|
.c = c,
|
2023-10-03 09:07:46 +00:00
|
|
|
.trie = f_new_trie(lp_new(c->proto->pool), 0),
|
2024-02-08 13:34:54 +00:00
|
|
|
.tab = tab,
|
2022-09-01 08:39:56 +00:00
|
|
|
.req = {
|
|
|
|
.name = mb_sprintf(c->proto->pool, "%s.%s.roa-%s.%s",
|
|
|
|
c->proto->name, c->name, dir ? "in" : "out", tab->name),
|
|
|
|
.list = proto_work_list(c->proto),
|
2023-04-21 13:26:06 +00:00
|
|
|
.pool = c->proto->pool,
|
2022-09-01 08:39:56 +00:00
|
|
|
.trace_routes = c->debug | c->proto->debug,
|
|
|
|
.dump_req = channel_dump_roa_req,
|
|
|
|
.export_one = channel_export_one_roa,
|
|
|
|
},
|
|
|
|
};
|
2021-02-10 02:09:57 +00:00
|
|
|
|
|
|
|
add_tail(&c->roa_subscriptions, &s->roa_node);
|
2022-09-05 04:58:42 +00:00
|
|
|
rt_request_export(tab, &s->req);
|
2021-02-10 02:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-09-01 08:39:56 +00:00
|
|
|
channel_roa_unsubscribed(struct rt_export_request *req)
|
2021-02-10 02:09:57 +00:00
|
|
|
{
|
2022-09-01 08:39:56 +00:00
|
|
|
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
|
|
|
struct channel *c = s->c;
|
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
rem_node(&s->roa_node);
|
|
|
|
mb_free(s);
|
2022-09-01 08:39:56 +00:00
|
|
|
|
|
|
|
channel_check_stopped(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_roa_unsubscribe(struct roa_subscription *s)
|
|
|
|
{
|
2023-10-03 09:07:46 +00:00
|
|
|
rfree(s->trie->lp);
|
2022-09-01 08:39:56 +00:00
|
|
|
rt_stop_export(&s->req, channel_roa_unsubscribed);
|
2023-03-07 22:22:03 +00:00
|
|
|
settle_cancel(&s->settle);
|
2024-02-08 13:34:54 +00:00
|
|
|
s->settle.hook = NULL;
|
2021-02-10 02:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_roa_subscribe_filter(struct channel *c, int dir)
|
|
|
|
{
|
|
|
|
const struct filter *f = dir ? c->in_filter : c->out_filter;
|
2022-09-07 11:54:20 +00:00
|
|
|
rtable *tab;
|
2021-02-12 04:05:18 +00:00
|
|
|
int valid = 1, found = 0;
|
2021-02-10 02:09:57 +00:00
|
|
|
|
|
|
|
if ((f == FILTER_ACCEPT) || (f == FILTER_REJECT))
|
|
|
|
return;
|
|
|
|
|
2021-03-08 19:45:22 +00:00
|
|
|
/* No automatic reload for non-reloadable channels */
|
|
|
|
if (dir && !channel_reloadable(c))
|
|
|
|
valid = 0;
|
|
|
|
|
2021-03-17 14:56:12 +00:00
|
|
|
#ifdef CONFIG_BGP
|
2021-02-12 04:05:18 +00:00
|
|
|
/* No automatic reload for BGP channels without in_table / out_table */
|
2023-09-14 13:21:53 +00:00
|
|
|
if (c->class == &channel_bgp)
|
2022-06-16 21:24:56 +00:00
|
|
|
valid = dir ? ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER) : !!c->out_table;
|
2021-03-17 14:56:12 +00:00
|
|
|
#endif
|
2021-02-12 04:05:18 +00:00
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
struct filter_iterator fit;
|
2023-07-02 13:10:39 +00:00
|
|
|
FILTER_ITERATE_INIT(&fit, f->root, c->proto->pool);
|
2021-02-10 02:09:57 +00:00
|
|
|
|
|
|
|
FILTER_ITERATE(&fit, fi)
|
|
|
|
{
|
|
|
|
switch (fi->fi_code)
|
|
|
|
{
|
2022-03-19 15:38:32 +00:00
|
|
|
case FI_ROA_CHECK:
|
|
|
|
tab = fi->i_FI_ROA_CHECK.rtc->table;
|
2021-02-12 04:05:18 +00:00
|
|
|
if (valid) channel_roa_subscribe(c, tab, dir);
|
|
|
|
found = 1;
|
2021-02-10 02:09:57 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
FILTER_ITERATE_END;
|
|
|
|
|
|
|
|
FILTER_ITERATE_CLEANUP(&fit);
|
2021-02-12 04:05:18 +00:00
|
|
|
|
|
|
|
if (!valid && found)
|
|
|
|
log(L_WARN "%s.%s: Automatic RPKI reload not active for %s",
|
|
|
|
c->proto->name, c->name ?: "?", dir ? "import" : "export");
|
2021-02-10 02:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_roa_unsubscribe_all(struct channel *c)
|
|
|
|
{
|
|
|
|
struct roa_subscription *s;
|
|
|
|
node *n, *x;
|
|
|
|
|
|
|
|
WALK_LIST2_DELSAFE(s, n, x, c->roa_subscriptions, roa_node)
|
|
|
|
channel_roa_unsubscribe(s);
|
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static void
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_start_import(struct channel *c)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
if (c->in_req.hook)
|
|
|
|
{
|
|
|
|
log(L_WARN "%s.%s: Attempted to start channel's already started import", c->proto->name, c->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
c->in_req = (struct rt_import_request) {
|
2022-08-31 09:58:27 +00:00
|
|
|
.name = mb_sprintf(c->proto->pool, "%s.%s", c->proto->name, c->name),
|
2021-06-21 15:07:31 +00:00
|
|
|
.trace_routes = c->debug | c->proto->debug,
|
2022-09-01 09:17:35 +00:00
|
|
|
.list = proto_work_list(c->proto),
|
2021-06-21 15:07:31 +00:00
|
|
|
.dump_req = channel_dump_import_req,
|
|
|
|
.log_state_change = channel_import_log_state_change,
|
|
|
|
.preimport = channel_preimport,
|
|
|
|
};
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
ASSERT(c->channel_state == CS_UP);
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_reset_limit(c, &c->rx_limit, PLD_RX);
|
|
|
|
channel_reset_limit(c, &c->in_limit, PLD_IN);
|
|
|
|
|
|
|
|
memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
|
|
|
|
|
|
|
|
DBG("%s.%s: Channel start import req=%p\n", c->proto->name, c->name, &c->in_req);
|
|
|
|
rt_request_import(c->table, &c->in_req);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_start_export(struct channel *c)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
if (c->out_req.hook)
|
|
|
|
{
|
|
|
|
log(L_WARN "%s.%s: Attempted to start channel's already started export", c->proto->name, c->name);
|
|
|
|
return;
|
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
ASSERT(c->channel_state == CS_UP);
|
|
|
|
|
2023-10-12 08:07:57 +00:00
|
|
|
pool *p = rp_newf(c->proto->pool, c->proto->pool->domain, "Channel %s.%s export", c->proto->name, c->name);
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
c->out_req = (struct rt_export_request) {
|
2023-10-12 08:07:57 +00:00
|
|
|
.name = mb_sprintf(p, "%s.%s", c->proto->name, c->name),
|
2021-06-19 18:50:18 +00:00
|
|
|
.list = proto_work_list(c->proto),
|
2023-10-12 08:07:57 +00:00
|
|
|
.pool = p,
|
2023-05-07 21:04:47 +00:00
|
|
|
.feed_block_size = c->feed_block_size,
|
2023-10-03 09:08:28 +00:00
|
|
|
.prefilter = {
|
|
|
|
.mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
|
|
|
|
.addr = c->out_subprefix,
|
|
|
|
},
|
2021-06-21 15:07:31 +00:00
|
|
|
.trace_routes = c->debug | c->proto->debug,
|
|
|
|
.dump_req = channel_dump_export_req,
|
|
|
|
.log_state_change = channel_export_log_state_change,
|
2023-09-29 14:24:50 +00:00
|
|
|
.mark_seen = channel_rpe_mark_seen_export,
|
2021-06-21 15:07:31 +00:00
|
|
|
};
|
|
|
|
|
2023-10-12 08:07:57 +00:00
|
|
|
bmap_init(&c->export_map, p, 16);
|
|
|
|
bmap_init(&c->export_reject_map, p, 16);
|
2024-01-08 11:02:25 +00:00
|
|
|
bmap_init(&c->refeed_map, p, 16);
|
2021-11-06 19:34:16 +00:00
|
|
|
|
|
|
|
channel_reset_limit(c, &c->out_limit, PLD_OUT);
|
2021-06-21 15:07:31 +00:00
|
|
|
|
|
|
|
memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
|
|
|
|
|
|
|
|
switch (c->ra_mode) {
|
|
|
|
case RA_OPTIMAL:
|
|
|
|
c->out_req.export_one = rt_notify_optimal;
|
|
|
|
break;
|
|
|
|
case RA_ANY:
|
|
|
|
c->out_req.export_one = rt_notify_any;
|
|
|
|
c->out_req.export_bulk = rt_feed_any;
|
|
|
|
break;
|
|
|
|
case RA_ACCEPTED:
|
|
|
|
c->out_req.export_bulk = rt_notify_accepted;
|
|
|
|
break;
|
|
|
|
case RA_MERGED:
|
|
|
|
c->out_req.export_bulk = rt_notify_merged;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bug("Unknown route announcement mode");
|
|
|
|
}
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
c->refeed_req = c->out_req;
|
2023-10-12 08:07:57 +00:00
|
|
|
c->refeed_req.name = mb_sprintf(p, "%s.%s.refeed", c->proto->name, c->name);
|
2023-09-29 14:24:50 +00:00
|
|
|
c->refeed_req.dump_req = channel_dump_refeed_req;
|
|
|
|
c->refeed_req.log_state_change = channel_refeed_log_state_change;
|
|
|
|
c->refeed_req.mark_seen = channel_rpe_mark_seen_refeed;
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
DBG("%s.%s: Channel start export req=%p\n", c->proto->name, c->name, &c->out_req);
|
2022-09-05 04:58:42 +00:00
|
|
|
rt_request_export(c->table, &c->out_req);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
static void
|
|
|
|
channel_check_stopped(struct channel *c)
|
|
|
|
{
|
|
|
|
switch (c->channel_state)
|
|
|
|
{
|
|
|
|
case CS_STOP:
|
2023-11-24 11:19:44 +00:00
|
|
|
if (c->obstacles || !EMPTY_LIST(c->roa_subscriptions) || c->out_req.hook || c->refeed_req.hook || c->in_req.hook || c->reload_req.hook)
|
2021-06-21 15:07:31 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
channel_set_state(c, CS_DOWN);
|
2023-04-02 17:34:22 +00:00
|
|
|
proto_send_event(c->proto, c->proto->event);
|
2021-06-21 15:07:31 +00:00
|
|
|
|
|
|
|
break;
|
|
|
|
case CS_PAUSE:
|
2023-11-24 11:19:44 +00:00
|
|
|
if (c->obstacles || !EMPTY_LIST(c->roa_subscriptions) || c->out_req.hook || c->refeed_req.hook || c->reload_req.hook)
|
2021-06-21 15:07:31 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
channel_set_state(c, CS_START);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
DBG("%s.%s: Channel requests/hooks stopped (in state %s)\n", c->proto->name, c->name, c_states[c->channel_state]);
|
|
|
|
}
|
|
|
|
|
2023-11-24 11:19:44 +00:00
|
|
|
void
|
|
|
|
channel_add_obstacle(struct channel *c)
|
|
|
|
{
|
|
|
|
c->obstacles++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
channel_del_obstacle(struct channel *c)
|
|
|
|
{
|
|
|
|
if (!--c->obstacles)
|
|
|
|
channel_check_stopped(c);
|
|
|
|
}
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
void
|
|
|
|
channel_import_stopped(struct rt_import_request *req)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, in_req, req);
|
|
|
|
|
|
|
|
mb_free(c->in_req.name);
|
|
|
|
c->in_req.name = NULL;
|
|
|
|
|
|
|
|
channel_check_stopped(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_export_stopped(struct rt_export_request *req)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
|
|
|
|
|
|
|
/* The hook has already stopped */
|
|
|
|
req->hook = NULL;
|
|
|
|
|
|
|
|
if (c->refeed_pending)
|
|
|
|
{
|
2023-09-29 14:24:50 +00:00
|
|
|
ASSERT_DIE(!c->refeeding);
|
|
|
|
c->refeeding = c->refeed_pending;
|
|
|
|
c->refeed_pending = NULL;
|
2021-11-23 12:06:27 +00:00
|
|
|
|
2022-10-12 09:30:27 +00:00
|
|
|
channel_reset_limit(c, &c->out_limit, PLD_OUT);
|
|
|
|
|
2023-02-27 21:03:41 +00:00
|
|
|
bmap_reset(&c->export_map, 16);
|
|
|
|
bmap_reset(&c->export_reject_map, 16);
|
2024-01-08 11:02:25 +00:00
|
|
|
bmap_reset(&c->refeed_map, 16);
|
2021-11-23 12:06:27 +00:00
|
|
|
|
2022-09-05 04:58:42 +00:00
|
|
|
rt_request_export(c->table, req);
|
2021-06-21 15:07:31 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-11-23 11:12:32 +00:00
|
|
|
bmap_free(&c->export_map);
|
|
|
|
bmap_free(&c->export_reject_map);
|
|
|
|
|
2023-10-12 08:07:57 +00:00
|
|
|
c->out_req.name = NULL;
|
|
|
|
rfree(c->out_req.pool);
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_check_stopped(c);
|
|
|
|
}
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
static void
|
|
|
|
channel_refeed_stopped(struct rt_export_request *req)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
|
|
|
|
|
|
|
req->hook = NULL;
|
|
|
|
|
|
|
|
channel_feed_end(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_init_feeding(struct channel *c)
|
|
|
|
{
|
2023-11-02 13:33:00 +00:00
|
|
|
int no_trie = 0;
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
|
|
|
|
if (cfrp->type == CFRT_DIRECT)
|
|
|
|
{
|
|
|
|
/* Direct feeding requested? Restart the export by force. */
|
|
|
|
channel_stop_export(c);
|
|
|
|
return;
|
|
|
|
}
|
2023-11-02 13:33:00 +00:00
|
|
|
else if (!cfrp->trie)
|
|
|
|
no_trie = 1;
|
2023-09-29 14:24:50 +00:00
|
|
|
|
|
|
|
/* No direct feeding, running auxiliary refeed. */
|
|
|
|
c->refeeding = c->refeed_pending;
|
|
|
|
c->refeed_pending = NULL;
|
2024-01-08 11:02:25 +00:00
|
|
|
bmap_reset(&c->refeed_map, 16);
|
2023-10-04 09:03:29 +00:00
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
if (no_trie)
|
|
|
|
{
|
|
|
|
c->refeed_req.prefilter.mode = TE_ADDR_NONE;
|
|
|
|
c->refeed_req.prefilter.hook = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
c->refeed_req.prefilter.mode = TE_ADDR_HOOK;
|
|
|
|
c->refeed_req.prefilter.hook = channel_refeed_prefilter;
|
|
|
|
}
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
rt_request_export(c->table, &c->refeed_req);
|
|
|
|
}
|
|
|
|
|
2023-10-04 09:03:29 +00:00
|
|
|
static int
|
|
|
|
channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
|
|
|
{
|
|
|
|
const struct channel *c =
|
|
|
|
SKIP_BACK(struct channel, refeed_req,
|
|
|
|
SKIP_BACK(struct rt_export_request, prefilter, p)
|
|
|
|
);
|
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
ASSERT_DIE(c->refeeding);
|
2023-10-04 09:03:29 +00:00
|
|
|
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
|
|
|
if (!cfr->trie || trie_match_net(cfr->trie, n))
|
|
|
|
return 1;
|
2023-11-02 13:33:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2023-10-04 09:03:29 +00:00
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
int
|
|
|
|
channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n)
|
|
|
|
{
|
|
|
|
for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
|
|
|
|
{
|
|
|
|
if (!cir->trie || trie_match_net(cir->trie, n))
|
|
|
|
return 1;
|
|
|
|
}
|
2023-10-04 09:03:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
static int
|
|
|
|
channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
|
|
|
|
{
|
|
|
|
const struct channel *c =
|
|
|
|
SKIP_BACK(struct channel, reload_req,
|
|
|
|
SKIP_BACK(struct rt_export_request, prefilter, p)
|
|
|
|
);
|
|
|
|
ASSERT_DIE(c->importing);
|
|
|
|
|
|
|
|
return channel_import_request_prefilter(c->importing, n);
|
|
|
|
}
|
2023-10-04 09:03:29 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
static void
|
|
|
|
channel_feed_end(struct channel *c)
|
|
|
|
{
|
|
|
|
/* Reset export limit if the feed ended with acceptable number of exported routes */
|
|
|
|
struct limit *l = &c->out_limit;
|
|
|
|
if (c->refeeding &&
|
|
|
|
(c->limit_active & (1 << PLD_OUT)) &&
|
|
|
|
(l->count <= l->max))
|
|
|
|
{
|
|
|
|
log(L_INFO "Protocol %s resets route export limit (%u)", c->proto->name, l->max);
|
2023-09-29 14:24:50 +00:00
|
|
|
c->limit_active &= ~(1 << PLD_OUT);
|
2021-06-21 15:07:31 +00:00
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
/* Queue the same refeed batch back into pending */
|
|
|
|
struct channel_feeding_request **ptr = &c->refeed_pending;
|
|
|
|
while (*ptr)
|
|
|
|
ptr = &((*ptr)->next);
|
|
|
|
|
|
|
|
*ptr = c->refeeding;
|
|
|
|
|
|
|
|
/* Mark the requests to be redone */
|
|
|
|
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
|
|
|
|
cfr->state = CFRS_PENDING;
|
|
|
|
|
|
|
|
c->refeeding = NULL;
|
2021-06-21 15:07:31 +00:00
|
|
|
}
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
/* Inform the protocol about the feed ending */
|
|
|
|
CALL(c->proto->feed_end, c);
|
|
|
|
|
|
|
|
/* Free the dynamic feeding requests */
|
|
|
|
for (struct channel_feeding_request *cfr = c->refeeding, *next = cfr ? cfr->next : NULL;
|
|
|
|
cfr;
|
|
|
|
(cfr = next), (next = next ? next->next : NULL))
|
2023-10-04 08:02:43 +00:00
|
|
|
CALL(cfr->done, cfr);
|
2023-09-29 14:24:50 +00:00
|
|
|
|
|
|
|
/* Drop the refeed batch */
|
|
|
|
c->refeeding = NULL;
|
2021-06-21 15:07:31 +00:00
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
/* Run the pending batch */
|
2021-06-21 15:07:31 +00:00
|
|
|
if (c->refeed_pending)
|
2023-09-29 14:24:50 +00:00
|
|
|
channel_init_feeding(c);
|
2021-06-21 15:07:31 +00:00
|
|
|
}
|
2018-09-27 20:57:55 +00:00
|
|
|
|
|
|
|
/* Called by protocol for reload from in_table */
|
|
|
|
void
|
2023-11-02 13:33:00 +00:00
|
|
|
channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
|
2018-09-27 20:57:55 +00:00
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
ASSERT(c->in_req.hook);
|
2023-11-02 13:33:00 +00:00
|
|
|
int no_trie = 0;
|
|
|
|
if (cir)
|
|
|
|
{
|
|
|
|
cir->next = c->import_pending;
|
|
|
|
c->import_pending = cir;
|
|
|
|
}
|
2018-09-27 20:57:55 +00:00
|
|
|
|
2023-03-07 22:22:03 +00:00
|
|
|
if (c->reload_req.hook)
|
|
|
|
{
|
|
|
|
CD(c, "Reload triggered before the previous one has finished");
|
|
|
|
c->reload_pending = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
/* If there is any full-reload request, we can disregard all partials */
|
|
|
|
for (struct channel_import_request *last = cir; last && no_trie==0;)
|
|
|
|
{
|
|
|
|
if (!last->trie)
|
|
|
|
no_trie = 1;
|
|
|
|
last = last->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* activating pending imports */
|
|
|
|
c->importing = c->import_pending;
|
|
|
|
c->import_pending = NULL;
|
|
|
|
|
|
|
|
if (no_trie)
|
|
|
|
{
|
|
|
|
c->reload_req.prefilter.mode = TE_ADDR_NONE;
|
|
|
|
c->reload_req.prefilter.hook = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
c->reload_req.prefilter.mode = TE_ADDR_HOOK;
|
|
|
|
c->reload_req.prefilter.hook = channel_import_prefilter;
|
|
|
|
}
|
|
|
|
|
2022-09-05 04:58:42 +00:00
|
|
|
rt_request_export(c->table, &c->reload_req);
|
2018-09-27 20:57:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-06-16 21:24:56 +00:00
|
|
|
channel_reload_stopped(struct rt_export_request *req)
|
2018-09-27 20:57:55 +00:00
|
|
|
{
|
2022-06-16 21:24:56 +00:00
|
|
|
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
|
2021-02-10 02:09:57 +00:00
|
|
|
|
|
|
|
/* Restart reload */
|
|
|
|
if (c->reload_pending)
|
2023-03-08 20:38:18 +00:00
|
|
|
{
|
|
|
|
c->reload_pending = 0;
|
2021-02-10 02:09:57 +00:00
|
|
|
channel_request_reload(c);
|
2023-03-08 20:38:18 +00:00
|
|
|
}
|
2023-03-07 22:22:03 +00:00
|
|
|
|
|
|
|
if (c->channel_state != CS_UP)
|
|
|
|
channel_check_stopped(c);
|
2018-09-27 20:57:55 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 21:24:56 +00:00
|
|
|
static void
|
|
|
|
channel_reload_log_state_change(struct rt_export_request *req, u8 state)
|
|
|
|
{
|
2022-10-11 17:26:31 +00:00
|
|
|
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
|
|
|
|
|
2022-06-16 21:24:56 +00:00
|
|
|
if (state == TES_READY)
|
2022-10-11 17:26:31 +00:00
|
|
|
{
|
2023-03-07 22:22:03 +00:00
|
|
|
if (c->channel_state == CS_UP)
|
|
|
|
rt_refresh_end(&c->in_req);
|
|
|
|
|
2022-06-16 21:24:56 +00:00
|
|
|
rt_stop_export(req, channel_reload_stopped);
|
2022-10-11 17:26:31 +00:00
|
|
|
}
|
2022-06-16 21:24:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_reload_dump_req(struct rt_export_request *req)
|
|
|
|
{
|
|
|
|
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
|
|
|
|
debug(" Channel %s.%s import reload request %p\n", c->proto->name, c->name, req);
|
|
|
|
}
|
|
|
|
|
2018-09-27 20:57:55 +00:00
|
|
|
/* Called by protocol to activate in_table */
|
2023-03-08 12:44:18 +00:00
|
|
|
static void
|
2018-09-27 20:57:55 +00:00
|
|
|
channel_setup_in_table(struct channel *c)
|
|
|
|
{
|
2022-06-16 21:24:56 +00:00
|
|
|
c->reload_req = (struct rt_export_request) {
|
|
|
|
.name = mb_sprintf(c->proto->pool, "%s.%s.import", c->proto->name, c->name),
|
2021-06-19 18:50:18 +00:00
|
|
|
.list = proto_work_list(c->proto),
|
2023-04-21 13:26:06 +00:00
|
|
|
.pool = c->proto->pool,
|
2023-05-07 21:04:47 +00:00
|
|
|
.feed_block_size = c->feed_block_size,
|
2022-06-16 21:24:56 +00:00
|
|
|
.trace_routes = c->debug | c->proto->debug,
|
|
|
|
.export_bulk = channel_reload_export_bulk,
|
|
|
|
.dump_req = channel_reload_dump_req,
|
|
|
|
.log_state_change = channel_reload_log_state_change,
|
|
|
|
};
|
2018-09-27 20:57:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static void
|
|
|
|
channel_do_start(struct channel *c)
|
|
|
|
{
|
|
|
|
c->proto->active_channels++;
|
|
|
|
|
2023-03-08 12:44:18 +00:00
|
|
|
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
|
|
|
channel_setup_in_table(c);
|
|
|
|
|
2023-09-14 13:21:53 +00:00
|
|
|
CALL(c->class->start, c);
|
2021-06-21 15:07:31 +00:00
|
|
|
|
|
|
|
channel_start_import(c);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
static void
|
|
|
|
channel_do_up(struct channel *c)
|
|
|
|
{
|
|
|
|
/* Register RPKI/ROA subscriptions */
|
2021-02-12 04:05:18 +00:00
|
|
|
if (c->rpki_reload)
|
|
|
|
{
|
|
|
|
channel_roa_subscribe_filter(c, 1);
|
|
|
|
channel_roa_subscribe_filter(c, 0);
|
|
|
|
}
|
2021-02-10 02:09:57 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static void
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_do_pause(struct channel *c)
|
|
|
|
{
|
2023-03-07 22:22:03 +00:00
|
|
|
/* Drop ROA subscriptions */
|
|
|
|
channel_roa_unsubscribe_all(c);
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
/* Need to abort feeding */
|
2023-03-07 22:22:03 +00:00
|
|
|
c->reload_pending = 0;
|
|
|
|
|
|
|
|
if (c->reload_req.hook && c->reload_req.hook->export_state != TES_STOP)
|
2022-06-16 21:24:56 +00:00
|
|
|
rt_stop_export(&c->reload_req, channel_reload_stopped);
|
2021-06-21 15:07:31 +00:00
|
|
|
|
|
|
|
/* Stop export */
|
2023-03-07 22:22:03 +00:00
|
|
|
c->refeed_pending = 0;
|
|
|
|
channel_stop_export(c);
|
2021-06-21 15:07:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_do_stop(struct channel *c)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
/* Stop import */
|
|
|
|
if (c->in_req.hook)
|
|
|
|
rt_stop_import(&c->in_req, channel_import_stopped);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
c->gr_wait = 0;
|
|
|
|
if (c->gr_lock)
|
|
|
|
channel_graceful_restart_unlock(c);
|
|
|
|
|
2023-09-14 13:21:53 +00:00
|
|
|
CALL(c->class->shutdown, c);
|
2019-12-10 17:18:02 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_do_down(struct channel *c)
|
|
|
|
{
|
2022-06-16 21:24:56 +00:00
|
|
|
ASSERT(!c->reload_req.hook);
|
2018-09-27 20:57:55 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
c->proto->active_channels--;
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
|
|
|
|
memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2019-08-13 16:22:07 +00:00
|
|
|
c->out_table = NULL;
|
2018-09-27 20:57:55 +00:00
|
|
|
|
2021-03-30 16:51:31 +00:00
|
|
|
/* The in_table and out_table are going to be freed by freeing their resource pools. */
|
|
|
|
|
2023-09-14 13:21:53 +00:00
|
|
|
CALL(c->class->cleanup, c);
|
2016-12-07 13:11:28 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
/* Schedule protocol shutddown */
|
|
|
|
if (proto_is_done(c->proto))
|
2023-04-02 17:34:22 +00:00
|
|
|
proto_send_event(c->proto, c->proto->event);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
channel_set_state(struct channel *c, uint state)
|
|
|
|
{
|
|
|
|
uint cs = c->channel_state;
|
|
|
|
|
2016-05-12 14:04:47 +00:00
|
|
|
DBG("%s reporting channel %s state transition %s -> %s\n", c->proto->name, c->name, c_states[cs], c_states[state]);
|
2016-01-26 10:48:58 +00:00
|
|
|
if (state == cs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
c->channel_state = state;
|
2017-06-06 14:47:30 +00:00
|
|
|
c->last_state_change = current_time();
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
switch (state)
|
|
|
|
{
|
|
|
|
case CS_START:
|
2021-06-21 15:07:31 +00:00
|
|
|
ASSERT(cs == CS_DOWN || cs == CS_PAUSE);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
if (cs == CS_DOWN)
|
|
|
|
channel_do_start(c);
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CS_UP:
|
|
|
|
ASSERT(cs == CS_DOWN || cs == CS_START);
|
|
|
|
|
|
|
|
if (cs == CS_DOWN)
|
|
|
|
channel_do_start(c);
|
|
|
|
|
2016-02-01 09:25:31 +00:00
|
|
|
if (!c->gr_wait && c->proto->rt_notify)
|
2016-01-26 10:48:58 +00:00
|
|
|
channel_start_export(c);
|
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
channel_do_up(c);
|
2016-01-26 10:48:58 +00:00
|
|
|
break;
|
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
case CS_PAUSE:
|
|
|
|
ASSERT(cs == CS_UP);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
if (cs == CS_UP)
|
|
|
|
channel_do_pause(c);
|
|
|
|
break;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
case CS_STOP:
|
|
|
|
ASSERT(cs == CS_UP || cs == CS_START || cs == CS_PAUSE);
|
2018-09-27 20:57:55 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
if (cs == CS_UP)
|
|
|
|
channel_do_pause(c);
|
2019-08-13 16:22:07 +00:00
|
|
|
|
2021-06-21 15:07:31 +00:00
|
|
|
channel_do_stop(c);
|
2016-01-26 10:48:58 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CS_DOWN:
|
2021-06-21 15:07:31 +00:00
|
|
|
ASSERT(cs == CS_STOP);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
channel_do_down(c);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
}
|
2020-12-07 21:19:40 +00:00
|
|
|
|
|
|
|
channel_log_state_change(c);
|
1999-05-17 20:14:52 +00:00
|
|
|
}
|
|
|
|
|
2012-03-15 10:58:08 +00:00
|
|
|
/**
|
2016-01-26 10:48:58 +00:00
|
|
|
* channel_request_feeding - request feeding routes to the channel
|
|
|
|
* @c: given channel
|
2012-03-15 10:58:08 +00:00
|
|
|
*
|
2016-01-26 10:48:58 +00:00
|
|
|
* Sometimes it is needed to send again all routes to the channel. This is
|
|
|
|
* called feeding and can be requested by this function. This would cause
|
|
|
|
* channel export state transition to ES_FEEDING (during feeding) and when
|
|
|
|
* completed, it will switch back to ES_READY. This function can be called
|
|
|
|
* even when feeding is already running, in that case it is restarted.
|
2012-03-15 10:58:08 +00:00
|
|
|
*/
|
2016-01-26 10:48:58 +00:00
|
|
|
void
|
2023-09-29 14:24:50 +00:00
|
|
|
channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
|
2012-03-15 10:58:08 +00:00
|
|
|
{
|
2023-11-02 13:33:00 +00:00
|
|
|
ASSERT_DIE(c->out_req.hook);
|
|
|
|
|
|
|
|
CD(c, "Feeding requested (%s)",
|
|
|
|
cfr->type == CFRT_DIRECT ? "direct" :
|
|
|
|
(cfr->trie ? "partial" : "auxiliary"));
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
/* Enqueue the request */
|
|
|
|
cfr->next = c->refeed_pending;
|
|
|
|
c->refeed_pending = cfr;
|
2022-10-06 15:51:32 +00:00
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
/* Initialize refeeds unless already refeeding */
|
|
|
|
if (!c->refeeding)
|
|
|
|
channel_init_feeding(c);
|
|
|
|
}
|
|
|
|
|
2023-10-04 08:02:43 +00:00
|
|
|
static void
|
|
|
|
channel_feeding_request_done_dynamic(struct channel_feeding_request *req)
|
|
|
|
{
|
|
|
|
mb_free(req);
|
|
|
|
}
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
void
|
|
|
|
channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type type)
|
|
|
|
{
|
2023-10-04 08:02:43 +00:00
|
|
|
struct channel_feeding_request *req = mb_alloc(c->proto->pool, sizeof *req);
|
|
|
|
*req = (struct channel_feeding_request) {
|
|
|
|
.type = type,
|
|
|
|
.done = channel_feeding_request_done_dynamic,
|
|
|
|
};
|
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
channel_request_feeding(c, req);
|
2023-03-07 22:22:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_stop_export(struct channel *c)
|
|
|
|
{
|
2023-09-29 14:24:50 +00:00
|
|
|
if (c->refeed_req.hook && (c->refeed_req.hook->export_state != TES_STOP))
|
|
|
|
rt_stop_export(&c->refeed_req, channel_refeed_stopped);
|
2023-03-07 22:22:03 +00:00
|
|
|
|
2023-09-29 14:24:50 +00:00
|
|
|
if (c->out_req.hook && (c->out_req.hook->export_state != TES_STOP))
|
|
|
|
rt_stop_export(&c->out_req, channel_export_stopped);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
static void
|
|
|
|
channel_import_request_done_dynamic(struct channel_import_request *req)
|
|
|
|
{
|
|
|
|
mb_free(req);
|
|
|
|
}
|
|
|
|
|
2023-11-09 15:53:34 +00:00
|
|
|
void
|
2016-01-26 10:48:58 +00:00
|
|
|
channel_request_reload(struct channel *c)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
ASSERT(c->in_req.hook);
|
2018-01-10 15:17:37 +00:00
|
|
|
ASSERT(channel_reloadable(c));
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
CD(c, "Reload requested");
|
2023-11-02 13:33:00 +00:00
|
|
|
struct channel_import_request* cir = mb_alloc(c->proto->pool, sizeof *cir);
|
|
|
|
cir->trie = NULL;
|
|
|
|
cir->done = channel_import_request_done_dynamic;
|
2020-12-07 21:19:40 +00:00
|
|
|
|
2023-03-08 12:44:18 +00:00
|
|
|
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
2023-11-02 13:33:00 +00:00
|
|
|
channel_schedule_reload(c, cir);
|
|
|
|
else if (! c->proto->reload_routes(c, cir))
|
|
|
|
bug("Channel %s.%s refused full import reload.", c->proto->name, c->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_request_partial_reload(struct channel *c, struct channel_import_request *cir)
|
|
|
|
{
|
|
|
|
ASSERT(c->in_req.hook);
|
|
|
|
ASSERT(channel_reloadable(c));
|
|
|
|
|
|
|
|
CD(c, "Partial import reload requested");
|
|
|
|
|
|
|
|
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
|
|
|
|
channel_schedule_reload(c, cir);
|
|
|
|
else if (! c->proto->reload_routes(c, cir))
|
|
|
|
cli_msg(-15, "%s.%s: partial reload refused, please run full reload instead", c->proto->name, c->name);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
const struct channel_class channel_basic = {
|
|
|
|
.channel_size = sizeof(struct channel),
|
|
|
|
.config_size = sizeof(struct channel_config)
|
|
|
|
};
|
|
|
|
|
|
|
|
void *
|
2018-01-09 17:42:22 +00:00
|
|
|
channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
struct channel_config *cf = NULL;
|
|
|
|
struct rtable_config *tab = NULL;
|
|
|
|
|
|
|
|
if (net_type)
|
|
|
|
{
|
|
|
|
if (!net_val_match(net_type, proto->protocol->channel_mask))
|
|
|
|
cf_error("Unsupported channel type");
|
|
|
|
|
2022-09-14 23:38:18 +00:00
|
|
|
if (proto->net_type && (net_type != proto->net_type) && (net_type != NET_MPLS))
|
2016-01-26 10:48:58 +00:00
|
|
|
cf_error("Different channel type");
|
|
|
|
|
2022-09-01 12:21:56 +00:00
|
|
|
tab = rt_get_default_table(new_config, net_type);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!cc)
|
|
|
|
cc = &channel_basic;
|
|
|
|
|
|
|
|
cf = cfg_allocz(cc->config_size);
|
|
|
|
cf->name = name;
|
2023-09-14 13:21:53 +00:00
|
|
|
cf->class = cc;
|
2018-01-09 17:42:22 +00:00
|
|
|
cf->parent = proto;
|
2016-01-26 10:48:58 +00:00
|
|
|
cf->table = tab;
|
|
|
|
cf->out_filter = FILTER_REJECT;
|
|
|
|
|
2023-05-07 21:04:47 +00:00
|
|
|
cf->feed_block_size = 16384;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
cf->net_type = net_type;
|
|
|
|
cf->ra_mode = RA_OPTIMAL;
|
|
|
|
cf->preference = proto->protocol->preference;
|
2020-12-07 21:19:40 +00:00
|
|
|
cf->debug = new_config->channel_default_debug;
|
2021-02-12 04:05:18 +00:00
|
|
|
cf->rpki_reload = 1;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2022-09-09 13:04:36 +00:00
|
|
|
cf->roa_settle = (struct settle_config) {
|
|
|
|
.min = 1 S,
|
|
|
|
.max = 20 S,
|
|
|
|
};
|
2022-09-01 08:39:56 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
add_tail(&proto->channels, &cf->n);
|
|
|
|
|
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
2018-01-09 17:42:22 +00:00
|
|
|
void *
|
|
|
|
channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto)
|
|
|
|
{
|
|
|
|
struct channel_config *cf;
|
|
|
|
|
|
|
|
/* We are using name as token, so no strcmp() */
|
|
|
|
WALK_LIST(cf, proto->channels)
|
|
|
|
if (cf->name == name)
|
|
|
|
{
|
|
|
|
/* Allow to redefine channel only if inherited from template */
|
|
|
|
if (cf->parent == proto)
|
|
|
|
cf_error("Multiple %s channels", name);
|
|
|
|
|
|
|
|
cf->parent = proto;
|
2022-10-03 16:53:21 +00:00
|
|
|
cf->copy = 1;
|
2018-01-09 17:42:22 +00:00
|
|
|
return cf;
|
|
|
|
}
|
|
|
|
|
|
|
|
return channel_config_new(cc, name, net_type, proto);
|
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel_config *
|
|
|
|
channel_copy_config(struct channel_config *src, struct proto_config *proto)
|
|
|
|
{
|
2023-09-14 13:21:53 +00:00
|
|
|
struct channel_config *dst = cfg_alloc(src->class->config_size);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2023-09-14 13:21:53 +00:00
|
|
|
memcpy(dst, src, src->class->config_size);
|
2020-11-24 02:42:23 +00:00
|
|
|
memset(&dst->n, 0, sizeof(node));
|
2016-01-26 10:48:58 +00:00
|
|
|
add_tail(&proto->channels, &dst->n);
|
2023-09-14 13:21:53 +00:00
|
|
|
CALL(src->class->copy_config, dst, src);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int reconfigure_type; /* Hack to propagate type info to channel_reconfigure() */
|
|
|
|
|
|
|
|
int
|
|
|
|
channel_reconfigure(struct channel *c, struct channel_config *cf)
|
|
|
|
{
|
2022-09-08 17:41:02 +00:00
|
|
|
/* Touched by reconfiguration */
|
|
|
|
c->stale = 0;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
/* FIXME: better handle these changes, also handle in_keep_filtered */
|
2022-06-16 21:24:56 +00:00
|
|
|
if ((c->table != cf->table->table) ||
|
|
|
|
(cf->ra_mode && (c->ra_mode != cf->ra_mode)) ||
|
2022-06-22 10:45:42 +00:00
|
|
|
(cf->in_keep != c->in_keep) ||
|
|
|
|
cf->out_subprefix && c->out_subprefix &&
|
|
|
|
!net_equal(cf->out_subprefix, c->out_subprefix) ||
|
|
|
|
(!cf->out_subprefix != !c->out_subprefix))
|
2016-01-26 10:48:58 +00:00
|
|
|
return 0;
|
|
|
|
|
2018-01-09 13:36:11 +00:00
|
|
|
/* Note that filter_same() requires arguments in (new, old) order */
|
|
|
|
int import_changed = !filter_same(cf->in_filter, c->in_filter);
|
|
|
|
int export_changed = !filter_same(cf->out_filter, c->out_filter);
|
2021-02-12 04:05:18 +00:00
|
|
|
int rpki_reload_changed = (cf->rpki_reload != c->rpki_reload);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
if (c->preference != cf->preference)
|
|
|
|
import_changed = 1;
|
|
|
|
|
|
|
|
if (c->merge_limit != cf->merge_limit)
|
|
|
|
export_changed = 1;
|
|
|
|
|
|
|
|
/* Reconfigure channel fields */
|
|
|
|
c->in_filter = cf->in_filter;
|
|
|
|
c->out_filter = cf->out_filter;
|
2021-11-06 19:34:16 +00:00
|
|
|
|
|
|
|
channel_update_limit(c, &c->rx_limit, PLD_RX, &cf->rx_limit);
|
|
|
|
channel_update_limit(c, &c->in_limit, PLD_IN, &cf->in_limit);
|
|
|
|
channel_update_limit(c, &c->out_limit, PLD_OUT, &cf->out_limit);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
// c->ra_mode = cf->ra_mode;
|
|
|
|
c->merge_limit = cf->merge_limit;
|
|
|
|
c->preference = cf->preference;
|
2023-10-03 09:08:28 +00:00
|
|
|
c->out_req.prefilter.addr = c->out_subprefix = cf->out_subprefix;
|
2020-12-07 21:19:40 +00:00
|
|
|
c->debug = cf->debug;
|
2021-06-21 15:07:31 +00:00
|
|
|
c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug;
|
2021-02-12 04:05:18 +00:00
|
|
|
c->rpki_reload = cf->rpki_reload;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2022-09-09 13:04:36 +00:00
|
|
|
if ( (c->roa_settle.min != cf->roa_settle.min)
|
|
|
|
|| (c->roa_settle.max != cf->roa_settle.max))
|
2022-09-01 08:39:56 +00:00
|
|
|
{
|
2022-09-09 13:04:36 +00:00
|
|
|
c->roa_settle = cf->roa_settle;
|
2022-09-01 08:39:56 +00:00
|
|
|
|
|
|
|
struct roa_subscription *s;
|
|
|
|
node *n;
|
|
|
|
|
|
|
|
WALK_LIST2(s, n, c->roa_subscriptions, roa_node)
|
2022-09-09 13:04:36 +00:00
|
|
|
{
|
|
|
|
s->settle.cf = cf->roa_settle;
|
|
|
|
if (settle_active(&s->settle))
|
|
|
|
settle_kick(&s->settle, &main_birdloop);
|
|
|
|
}
|
2022-09-01 08:39:56 +00:00
|
|
|
}
|
|
|
|
|
2016-12-07 13:11:28 +00:00
|
|
|
/* Execute channel-specific reconfigure hook */
|
2023-09-14 13:21:53 +00:00
|
|
|
if (c->class->reconfigure && !c->class->reconfigure(c, cf, &import_changed, &export_changed))
|
2016-12-07 13:11:28 +00:00
|
|
|
return 0;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
/* If the channel is not open, it has no routes and we cannot reload it anyways */
|
|
|
|
if (c->channel_state != CS_UP)
|
2020-12-07 21:19:40 +00:00
|
|
|
goto done;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-02-10 02:09:57 +00:00
|
|
|
/* Update RPKI/ROA subscriptions */
|
2021-02-12 04:05:18 +00:00
|
|
|
if (import_changed || export_changed || rpki_reload_changed)
|
2021-02-10 02:09:57 +00:00
|
|
|
{
|
|
|
|
channel_roa_unsubscribe_all(c);
|
2021-02-12 04:05:18 +00:00
|
|
|
|
|
|
|
if (c->rpki_reload)
|
|
|
|
{
|
|
|
|
channel_roa_subscribe_filter(c, 1);
|
|
|
|
channel_roa_subscribe_filter(c, 0);
|
|
|
|
}
|
2021-02-10 02:09:57 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (reconfigure_type == RECONFIG_SOFT)
|
|
|
|
{
|
|
|
|
if (import_changed)
|
|
|
|
log(L_INFO "Channel %s.%s changed import", c->proto->name, c->name);
|
|
|
|
|
|
|
|
if (export_changed)
|
|
|
|
log(L_INFO "Channel %s.%s changed export", c->proto->name, c->name);
|
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
goto done;
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Route reload may be not supported */
|
|
|
|
if (import_changed && !channel_reloadable(c))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (import_changed || export_changed)
|
|
|
|
log(L_INFO "Reloading channel %s.%s", c->proto->name, c->name);
|
|
|
|
|
|
|
|
if (import_changed)
|
|
|
|
channel_request_reload(c);
|
|
|
|
|
|
|
|
if (export_changed)
|
2023-09-29 14:24:50 +00:00
|
|
|
channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
done:
|
|
|
|
CD(c, "Reconfigured");
|
2016-01-26 10:48:58 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
proto_configure_channel(struct proto *p, struct channel **pc, struct channel_config *cf)
|
1999-05-17 20:14:52 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel *c = *pc;
|
1999-05-17 20:14:52 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (!c && cf)
|
|
|
|
{
|
2018-12-11 16:57:14 +00:00
|
|
|
/* We could add the channel, but currently it would just stay in down state
|
|
|
|
until protocol is restarted, so it is better to force restart anyways. */
|
2018-12-16 21:48:13 +00:00
|
|
|
if (p->proto_state != PS_DOWN)
|
|
|
|
{
|
|
|
|
log(L_INFO "Cannot add channel %s.%s", p->name, cf->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pc = proto_add_channel(p, cf);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
else if (c && !cf)
|
|
|
|
{
|
|
|
|
if (c->channel_state != CS_DOWN)
|
|
|
|
{
|
|
|
|
log(L_INFO "Cannot remove channel %s.%s", c->proto->name, c->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
proto_remove_channel(p, c);
|
|
|
|
*pc = NULL;
|
|
|
|
}
|
|
|
|
else if (c && cf)
|
|
|
|
{
|
|
|
|
if (!channel_reconfigure(c, cf))
|
|
|
|
{
|
|
|
|
log(L_INFO "Cannot reconfigure channel %s.%s", c->proto->name, c->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
2012-03-15 10:58:08 +00:00
|
|
|
}
|
|
|
|
|
2023-11-08 20:51:46 +00:00
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
static void
|
|
|
|
proto_cleanup(struct proto *p)
|
|
|
|
{
|
2023-02-07 13:27:23 +00:00
|
|
|
CALL(p->proto->cleanup, p);
|
|
|
|
|
2023-04-22 19:20:19 +00:00
|
|
|
if (p->pool)
|
|
|
|
{
|
|
|
|
rp_free(p->pool);
|
|
|
|
p->pool = NULL;
|
|
|
|
}
|
2021-06-19 18:50:18 +00:00
|
|
|
|
|
|
|
p->active = 0;
|
|
|
|
proto_log_state_change(p);
|
2023-09-24 21:22:43 +00:00
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
proto_rethink_goal(p);
|
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2012-03-15 10:58:08 +00:00
|
|
|
static void
|
2021-06-19 18:50:18 +00:00
|
|
|
proto_loop_stopped(void *ptr)
|
2012-03-15 10:58:08 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct proto *p = ptr;
|
|
|
|
|
2023-04-21 13:26:06 +00:00
|
|
|
ASSERT_DIE(birdloop_inside(&main_birdloop));
|
|
|
|
ASSERT_DIE(p->loop != &main_birdloop);
|
2021-06-19 18:50:18 +00:00
|
|
|
|
2023-04-22 19:20:19 +00:00
|
|
|
p->pool = NULL; /* is freed by birdloop_free() */
|
2023-02-24 08:13:35 +00:00
|
|
|
birdloop_free(p->loop);
|
2021-06-19 18:50:18 +00:00
|
|
|
p->loop = &main_birdloop;
|
2023-04-22 19:20:19 +00:00
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
proto_cleanup(p);
|
|
|
|
}
|
|
|
|
|
2023-11-08 20:51:46 +00:00
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
static void
|
|
|
|
proto_event(void *ptr)
|
|
|
|
{
|
|
|
|
struct proto *p = ptr;
|
2012-03-15 10:58:08 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (p->do_stop)
|
2012-03-15 10:58:08 +00:00
|
|
|
{
|
2023-01-31 12:07:46 +00:00
|
|
|
iface_unsubscribe(&p->iface_sub);
|
2021-10-18 19:22:58 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
p->do_stop = 0;
|
2012-03-15 10:58:08 +00:00
|
|
|
}
|
|
|
|
|
2023-10-13 08:22:09 +00:00
|
|
|
if (proto_is_done(p) && p->pool_inloop) /* perusing pool_inloop to do this once only */
|
2023-05-07 15:30:33 +00:00
|
|
|
{
|
2023-10-13 08:22:09 +00:00
|
|
|
rp_free(p->pool_inloop);
|
|
|
|
p->pool_inloop = NULL;
|
2021-06-19 18:50:18 +00:00
|
|
|
if (p->loop != &main_birdloop)
|
|
|
|
birdloop_stop_self(p->loop, proto_loop_stopped, p);
|
|
|
|
else
|
|
|
|
proto_cleanup(p);
|
2023-05-07 15:30:33 +00:00
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* proto_new - create a new protocol instance
|
|
|
|
* @c: protocol configuration
|
|
|
|
*
|
|
|
|
* When a new configuration has been read in, the core code starts
|
|
|
|
* initializing all the protocol instances configured by calling their
|
|
|
|
* init() hooks with the corresponding instance configuration. The initialization
|
|
|
|
* code of the protocol is expected to create a new instance according to the
|
|
|
|
* configuration by calling this function and then modifying the default settings
|
|
|
|
* to values wanted by the protocol.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
proto_new(struct proto_config *cf)
|
|
|
|
{
|
|
|
|
struct proto *p = mb_allocz(proto_pool, cf->protocol->proto_size);
|
|
|
|
|
|
|
|
p->cf = cf;
|
|
|
|
p->debug = cf->debug;
|
|
|
|
p->mrtdump = cf->mrtdump;
|
|
|
|
p->name = cf->name;
|
|
|
|
p->proto = cf->protocol;
|
|
|
|
p->net_type = cf->net_type;
|
|
|
|
p->disabled = cf->disabled;
|
|
|
|
p->hash_key = random_u32();
|
|
|
|
cf->proto = p;
|
|
|
|
|
|
|
|
init_list(&p->channels);
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct proto *
|
2023-04-19 19:18:12 +00:00
|
|
|
proto_init(struct proto_config *c, struct proto *after)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
struct protocol *pr = c->protocol;
|
|
|
|
struct proto *p = pr->init(c);
|
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
p->loop = &main_birdloop;
|
2016-01-26 10:48:58 +00:00
|
|
|
p->proto_state = PS_DOWN;
|
2017-06-06 14:47:30 +00:00
|
|
|
p->last_state_change = current_time();
|
2017-12-07 17:35:46 +00:00
|
|
|
p->vrf = c->vrf;
|
2023-04-19 19:18:12 +00:00
|
|
|
proto_add_after(&global_proto_list, p, after);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2018-10-01 13:55:23 +00:00
|
|
|
p->event = ev_new_init(proto_pool, proto_event, p);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
PD(p, "Initializing%s", p->disabled ? " [disabled]" : "");
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
proto_start(struct proto *p)
|
|
|
|
{
|
2021-06-19 18:50:18 +00:00
|
|
|
DBG("Kicking %s up\n", p->name);
|
|
|
|
PD(p, "Starting");
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (graceful_restart_state == GRS_INIT)
|
|
|
|
p->gr_recovery = 1;
|
2021-06-19 18:50:18 +00:00
|
|
|
|
|
|
|
if (p->cf->loop_order != DOMAIN_ORDER(the_bird))
|
2023-04-22 19:20:19 +00:00
|
|
|
{
|
|
|
|
p->loop = birdloop_new(proto_pool, p->cf->loop_order, p->cf->loop_max_latency, "Protocol %s", p->cf->name);
|
|
|
|
p->pool = birdloop_pool(p->loop);
|
|
|
|
}
|
|
|
|
else
|
2023-04-21 13:26:06 +00:00
|
|
|
p->pool = rp_newf(proto_pool, the_bird_domain.the_bird, "Protocol %s", p->cf->name);
|
2021-06-19 18:50:18 +00:00
|
|
|
|
2023-04-04 14:41:55 +00:00
|
|
|
p->iface_sub.target = proto_event_list(p);
|
2023-09-24 20:20:01 +00:00
|
|
|
p->iface_sub.name = p->name;
|
|
|
|
p->iface_sub.debug = !!(p->debug & D_IFACES);
|
2023-04-04 14:41:55 +00:00
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
PROTO_LOCKED_FROM_MAIN(p)
|
2023-05-07 15:30:33 +00:00
|
|
|
{
|
2023-10-13 08:22:09 +00:00
|
|
|
p->pool_inloop = rp_newf(p->pool, birdloop_domain(p->loop), "Protocol %s early cleanup objects", p->cf->name);
|
|
|
|
p->pool_up = rp_newf(p->pool, birdloop_domain(p->loop), "Protocol %s stop-free objects", p->cf->name);
|
2021-06-19 18:50:18 +00:00
|
|
|
proto_notify_state(p, (p->proto->start ? p->proto->start(p) : PS_UP));
|
2023-05-07 15:30:33 +00:00
|
|
|
}
|
1999-05-17 20:14:52 +00:00
|
|
|
}
|
|
|
|
|
2012-08-14 14:25:22 +00:00
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
|
|
|
* proto_config_new - create a new protocol configuration
|
|
|
|
* @pr: protocol the configuration will belong to
|
2011-11-06 23:31:23 +00:00
|
|
|
* @class: SYM_PROTO or SYM_TEMPLATE
|
2000-06-02 13:42:36 +00:00
|
|
|
*
|
|
|
|
* Whenever the configuration file says that a new instance
|
|
|
|
* of a routing protocol should be created, the parser calls
|
|
|
|
* proto_config_new() to create a configuration entry for this
|
|
|
|
* instance (a structure staring with the &proto_config header
|
|
|
|
* containing all the generic items followed by protocol-specific
|
|
|
|
* ones). Also, the configuration entry gets added to the list
|
|
|
|
* of protocol instances kept in the configuration.
|
2011-11-06 23:31:23 +00:00
|
|
|
*
|
|
|
|
* The function is also used to create protocol templates (when class
|
|
|
|
* SYM_TEMPLATE is specified), the only difference is that templates
|
|
|
|
* are not added to the list of protocol instances and therefore not
|
|
|
|
* initialized during protos_commit()).
|
2000-06-02 13:42:36 +00:00
|
|
|
*/
|
1999-02-05 21:37:34 +00:00
|
|
|
void *
|
2015-02-21 20:08:23 +00:00
|
|
|
proto_config_new(struct protocol *pr, int class)
|
1999-02-05 21:37:34 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct proto_config *cf = cfg_allocz(pr->config_size);
|
1999-02-05 21:37:34 +00:00
|
|
|
|
2011-11-06 23:31:23 +00:00
|
|
|
if (class == SYM_PROTO)
|
2016-01-26 10:48:58 +00:00
|
|
|
add_tail(&new_config->protos, &cf->n);
|
|
|
|
|
|
|
|
cf->global = new_config;
|
|
|
|
cf->protocol = pr;
|
|
|
|
cf->name = pr->name;
|
|
|
|
cf->class = class;
|
|
|
|
cf->debug = new_config->proto_default_debug;
|
|
|
|
cf->mrtdump = new_config->proto_default_mrtdump;
|
2021-06-19 18:50:18 +00:00
|
|
|
cf->loop_order = DOMAIN_ORDER(the_bird);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
init_list(&cf->channels);
|
|
|
|
|
|
|
|
return cf;
|
1999-02-05 21:37:34 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2011-11-06 23:31:23 +00:00
|
|
|
/**
|
|
|
|
* proto_copy_config - copy a protocol configuration
|
|
|
|
* @dest: destination protocol configuration
|
|
|
|
* @src: source protocol configuration
|
|
|
|
*
|
|
|
|
* Whenever a new instance of a routing protocol is created from the
|
|
|
|
* template, proto_copy_config() is called to copy a content of
|
|
|
|
* the source protocol configuration to the new protocol configuration.
|
|
|
|
* Name, class and a node in protos list of @dest are kept intact.
|
|
|
|
* copy_config() protocol hook is used to copy protocol-specific data.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
proto_copy_config(struct proto_config *dest, struct proto_config *src)
|
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel_config *cc;
|
2011-11-06 23:31:23 +00:00
|
|
|
node old_node;
|
|
|
|
int old_class;
|
2020-04-08 20:25:15 +00:00
|
|
|
const char *old_name;
|
2011-11-06 23:31:23 +00:00
|
|
|
|
|
|
|
if (dest->protocol != src->protocol)
|
|
|
|
cf_error("Can't copy configuration from a different protocol type");
|
|
|
|
|
|
|
|
if (dest->protocol->copy_config == NULL)
|
|
|
|
cf_error("Inheriting configuration for %s is not supported", src->protocol->name);
|
|
|
|
|
|
|
|
DBG("Copying configuration from %s to %s\n", src->name, dest->name);
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
/*
|
2011-11-06 23:31:23 +00:00
|
|
|
* Copy struct proto_config here. Keep original node, class and name.
|
|
|
|
* protocol-specific config copy is handled by protocol copy_config() hook
|
|
|
|
*/
|
|
|
|
|
|
|
|
old_node = dest->n;
|
|
|
|
old_class = dest->class;
|
|
|
|
old_name = dest->name;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
memcpy(dest, src, src->protocol->config_size);
|
2011-11-06 23:31:23 +00:00
|
|
|
|
|
|
|
dest->n = old_node;
|
|
|
|
dest->class = old_class;
|
|
|
|
dest->name = old_name;
|
2016-01-26 10:48:58 +00:00
|
|
|
init_list(&dest->channels);
|
2011-11-06 23:31:23 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
WALK_LIST(cc, src->channels)
|
|
|
|
channel_copy_config(cc, dest);
|
|
|
|
|
|
|
|
/* FIXME: allow for undefined copy_config */
|
2011-11-06 23:31:23 +00:00
|
|
|
dest->protocol->copy_config(dest, src);
|
|
|
|
}
|
|
|
|
|
2019-04-08 15:05:07 +00:00
|
|
|
void
|
|
|
|
proto_clone_config(struct symbol *sym, struct proto_config *parent)
|
|
|
|
{
|
|
|
|
struct proto_config *cf = proto_config_new(parent->protocol, SYM_PROTO);
|
|
|
|
proto_copy_config(cf, parent);
|
|
|
|
cf->name = sym->name;
|
|
|
|
cf->proto = NULL;
|
|
|
|
cf->parent = parent;
|
|
|
|
|
|
|
|
sym->class = cf->class;
|
2019-07-03 09:09:52 +00:00
|
|
|
sym->proto = cf;
|
2019-04-08 15:05:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
proto_undef_clone(struct symbol *sym, struct proto_config *cf)
|
|
|
|
{
|
|
|
|
rem_node(&cf->n);
|
|
|
|
|
|
|
|
sym->class = SYM_VOID;
|
2019-07-03 09:09:52 +00:00
|
|
|
sym->proto = NULL;
|
2019-04-08 15:05:07 +00:00
|
|
|
}
|
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
|
|
|
* protos_preconfig - pre-configuration processing
|
|
|
|
* @c: new configuration
|
|
|
|
*
|
|
|
|
* This function calls the preconfig() hooks of all routing
|
|
|
|
* protocols available to prepare them for reading of the new
|
|
|
|
* configuration.
|
|
|
|
*/
|
1998-05-20 11:54:33 +00:00
|
|
|
void
|
1999-02-05 21:37:34 +00:00
|
|
|
protos_preconfig(struct config *c)
|
1998-05-20 11:54:33 +00:00
|
|
|
{
|
1998-06-03 08:38:53 +00:00
|
|
|
struct protocol *p;
|
|
|
|
|
1999-12-06 13:44:45 +00:00
|
|
|
init_list(&c->protos);
|
2000-03-12 21:01:38 +00:00
|
|
|
DBG("Protocol preconfig:");
|
1998-06-03 08:38:53 +00:00
|
|
|
WALK_LIST(p, protocol_list)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
DBG(" %s", p->name);
|
|
|
|
p->name_counter = 0;
|
|
|
|
if (p->preconfig)
|
|
|
|
p->preconfig(p, c);
|
|
|
|
}
|
2000-03-12 21:01:38 +00:00
|
|
|
DBG("\n");
|
1999-02-05 21:37:34 +00:00
|
|
|
}
|
|
|
|
|
2010-02-06 18:19:09 +00:00
|
|
|
static int
|
|
|
|
proto_reconfigure(struct proto *p, struct proto_config *oc, struct proto_config *nc, int type)
|
|
|
|
{
|
|
|
|
/* If the protocol is DOWN, we just restart it */
|
|
|
|
if (p->proto_state == PS_DOWN)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* If there is a too big change in core attributes, ... */
|
|
|
|
if ((nc->protocol != oc->protocol) ||
|
2016-01-26 10:48:58 +00:00
|
|
|
(nc->net_type != oc->net_type) ||
|
2011-06-20 18:35:59 +00:00
|
|
|
(nc->disabled != p->disabled) ||
|
2021-11-15 09:53:58 +00:00
|
|
|
(nc->vrf != oc->vrf))
|
2010-02-06 18:19:09 +00:00
|
|
|
return 0;
|
|
|
|
|
2023-03-08 12:47:42 +00:00
|
|
|
p->sources.name = p->name = nc->name;
|
2023-12-08 10:33:43 +00:00
|
|
|
p->sources.debug = p->debug = nc->debug;
|
2010-02-06 18:19:09 +00:00
|
|
|
p->mrtdump = nc->mrtdump;
|
2016-01-26 10:48:58 +00:00
|
|
|
reconfigure_type = type;
|
2010-02-06 18:19:09 +00:00
|
|
|
|
|
|
|
/* Execute protocol specific reconfigure hook */
|
2016-01-26 10:48:58 +00:00
|
|
|
if (!p->proto->reconfigure || !p->proto->reconfigure(p, nc))
|
2010-02-06 18:19:09 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
DBG("\t%s: same\n", oc->name);
|
|
|
|
PD(p, "Reconfigured");
|
|
|
|
p->cf = nc;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-09-24 21:22:43 +00:00
|
|
|
static struct protos_commit_request {
|
|
|
|
struct config *new;
|
|
|
|
struct config *old;
|
|
|
|
enum protocol_startup phase;
|
|
|
|
int force_reconfig;
|
|
|
|
int type;
|
|
|
|
} protos_commit_request;
|
|
|
|
|
|
|
|
static int proto_rethink_goal_pending = 0;
|
|
|
|
|
|
|
|
static void protos_do_commit(struct config *new, struct config *old, int force_reconfig, int type);
|
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
|
|
|
* protos_commit - commit new protocol configuration
|
|
|
|
* @new: new configuration
|
|
|
|
* @old: old configuration or %NULL if it's boot time config
|
|
|
|
* @force_reconfig: force restart of all protocols (used for example
|
|
|
|
* when the router ID changes)
|
2009-06-19 21:49:34 +00:00
|
|
|
* @type: type of reconfiguration (RECONFIG_SOFT or RECONFIG_HARD)
|
2000-06-02 13:42:36 +00:00
|
|
|
*
|
|
|
|
* Scan differences between @old and @new configuration and adjust all
|
|
|
|
* protocol instances to conform to the new configuration.
|
|
|
|
*
|
|
|
|
* When a protocol exists in the new configuration, but it doesn't in the
|
|
|
|
* original one, it's immediately started. When a collision with the other
|
|
|
|
* running protocol would arise, the new protocol will be temporarily stopped
|
|
|
|
* by the locking mechanism.
|
|
|
|
*
|
|
|
|
* When a protocol exists in the old configuration, but it doesn't in the
|
|
|
|
* new one, it's shut down and deleted after the shutdown completes.
|
|
|
|
*
|
2009-06-19 21:49:34 +00:00
|
|
|
* When a protocol exists in both configurations, the core decides
|
|
|
|
* whether it's possible to reconfigure it dynamically - it checks all
|
|
|
|
* the core properties of the protocol (changes in filters are ignored
|
|
|
|
* if type is RECONFIG_SOFT) and if they match, it asks the
|
|
|
|
* reconfigure() hook of the protocol to see if the protocol is able
|
|
|
|
* to switch to the new configuration. If it isn't possible, the
|
|
|
|
* protocol is shut down and a new instance is started with the new
|
|
|
|
* configuration after the shutdown is completed.
|
2000-06-02 13:42:36 +00:00
|
|
|
*/
|
1999-02-05 21:37:34 +00:00
|
|
|
void
|
2009-06-19 21:49:34 +00:00
|
|
|
protos_commit(struct config *new, struct config *old, int force_reconfig, int type)
|
1999-02-05 21:37:34 +00:00
|
|
|
{
|
2023-09-24 21:22:43 +00:00
|
|
|
protos_commit_request = (struct protos_commit_request) {
|
|
|
|
.new = new,
|
|
|
|
.old = old,
|
|
|
|
.phase = (new->shutdown && !new->gr_down) ? PROTOCOL_STARTUP_REGULAR : PROTOCOL_STARTUP_NECESSARY,
|
|
|
|
.force_reconfig = force_reconfig,
|
|
|
|
.type = type,
|
|
|
|
};
|
|
|
|
|
|
|
|
protos_do_commit(new, old, force_reconfig, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
protos_do_commit(struct config *new, struct config *old, int force_reconfig, int type)
|
|
|
|
{
|
|
|
|
enum protocol_startup phase = protos_commit_request.phase;
|
2000-01-16 16:44:50 +00:00
|
|
|
struct proto_config *oc, *nc;
|
2011-11-06 23:31:23 +00:00
|
|
|
struct symbol *sym;
|
2016-01-26 10:48:58 +00:00
|
|
|
struct proto *p;
|
1999-02-05 21:37:34 +00:00
|
|
|
|
2023-09-24 21:22:43 +00:00
|
|
|
if ((phase < PROTOCOL_STARTUP_REGULAR) || (phase > PROTOCOL_STARTUP_NECESSARY))
|
|
|
|
{
|
|
|
|
protos_commit_request = (struct protos_commit_request) {};
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2000-01-16 16:44:50 +00:00
|
|
|
DBG("protos_commit:\n");
|
|
|
|
if (old)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
WALK_LIST(oc, old->protos)
|
1999-02-05 21:37:34 +00:00
|
|
|
{
|
2023-09-24 21:22:43 +00:00
|
|
|
if (oc->protocol->startup != phase)
|
|
|
|
continue;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
p = oc->proto;
|
|
|
|
sym = cf_find_symbol(new, oc->name);
|
2019-04-08 15:05:07 +00:00
|
|
|
|
2023-03-07 22:22:03 +00:00
|
|
|
struct birdloop *proto_loop = PROTO_ENTER_FROM_MAIN(p);
|
|
|
|
|
2019-04-08 15:05:07 +00:00
|
|
|
/* Handle dynamic protocols */
|
|
|
|
if (!sym && oc->parent && !new->shutdown)
|
|
|
|
{
|
|
|
|
struct symbol *parsym = cf_find_symbol(new, oc->parent->name);
|
|
|
|
if (parsym && parsym->class == SYM_PROTO)
|
|
|
|
{
|
|
|
|
/* This is hack, we would like to share config, but we need to copy it now */
|
|
|
|
new_config = new;
|
|
|
|
cfg_mem = new->mem;
|
2023-06-13 08:51:03 +00:00
|
|
|
new->current_scope = new->root_scope;
|
|
|
|
sym = cf_get_symbol(new, oc->name);
|
2019-07-03 09:09:52 +00:00
|
|
|
proto_clone_config(sym, parsym->proto);
|
2019-04-08 15:05:07 +00:00
|
|
|
new_config = NULL;
|
|
|
|
cfg_mem = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (sym && sym->class == SYM_PROTO && !new->shutdown)
|
|
|
|
{
|
|
|
|
/* Found match, let's check if we can smoothly switch to new configuration */
|
|
|
|
/* No need to check description */
|
2019-02-15 12:53:17 +00:00
|
|
|
nc = sym->proto;
|
2016-01-26 10:48:58 +00:00
|
|
|
nc->proto = p;
|
|
|
|
|
|
|
|
/* We will try to reconfigure protocol p */
|
2023-03-07 22:22:03 +00:00
|
|
|
if (!force_reconfig && proto_reconfigure(p, oc, nc, type))
|
|
|
|
{
|
|
|
|
PROTO_LEAVE_FROM_MAIN(proto_loop);
|
2016-01-26 10:48:58 +00:00
|
|
|
continue;
|
2023-03-07 22:22:03 +00:00
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2019-04-08 15:05:07 +00:00
|
|
|
if (nc->parent)
|
|
|
|
{
|
|
|
|
proto_undef_clone(sym, nc);
|
|
|
|
goto remove;
|
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
/* Unsuccessful, we will restart it */
|
|
|
|
if (!p->disabled && !nc->disabled)
|
|
|
|
log(L_INFO "Restarting protocol %s", p->name);
|
|
|
|
else if (p->disabled && !nc->disabled)
|
|
|
|
log(L_INFO "Enabling protocol %s", p->name);
|
|
|
|
else if (!p->disabled && nc->disabled)
|
|
|
|
log(L_INFO "Disabling protocol %s", p->name);
|
|
|
|
|
|
|
|
p->down_code = nc->disabled ? PDC_CF_DISABLE : PDC_CF_RESTART;
|
|
|
|
p->cf_new = nc;
|
|
|
|
}
|
|
|
|
else if (!new->shutdown)
|
|
|
|
{
|
2019-04-08 15:05:07 +00:00
|
|
|
remove:
|
2016-01-26 10:48:58 +00:00
|
|
|
log(L_INFO "Removing protocol %s", p->name);
|
|
|
|
p->down_code = PDC_CF_REMOVE;
|
|
|
|
p->cf_new = NULL;
|
|
|
|
}
|
2019-06-18 14:27:21 +00:00
|
|
|
else if (new->gr_down)
|
|
|
|
{
|
|
|
|
p->down_code = PDC_CMD_GR_DOWN;
|
|
|
|
p->cf_new = NULL;
|
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
else /* global shutdown */
|
|
|
|
{
|
|
|
|
p->down_code = PDC_CMD_SHUTDOWN;
|
|
|
|
p->cf_new = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
p->reconfiguring = 1;
|
2023-03-07 22:22:03 +00:00
|
|
|
PROTO_LEAVE_FROM_MAIN(proto_loop);
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
config_add_obstacle(old);
|
|
|
|
proto_rethink_goal(p);
|
1998-06-03 08:38:53 +00:00
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
|
2023-04-19 19:18:12 +00:00
|
|
|
struct proto *after = NULL;
|
2000-01-16 16:44:50 +00:00
|
|
|
|
|
|
|
WALK_LIST(nc, new->protos)
|
2023-09-24 21:22:43 +00:00
|
|
|
if ((nc->protocol->startup == phase) && !nc->proto)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
/* Not a first-time configuration */
|
|
|
|
if (old)
|
|
|
|
log(L_INFO "Adding protocol %s", nc->name);
|
|
|
|
|
2023-04-19 19:18:12 +00:00
|
|
|
p = proto_init(nc, after);
|
|
|
|
after = p;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2023-09-24 21:22:43 +00:00
|
|
|
proto_rethink_goal(p);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
|
|
|
else
|
2023-04-19 19:18:12 +00:00
|
|
|
after = nc->proto;
|
2000-01-16 16:44:50 +00:00
|
|
|
|
|
|
|
DBG("Protocol start\n");
|
2011-04-01 11:54:39 +00:00
|
|
|
|
2012-12-27 11:56:23 +00:00
|
|
|
/* Determine router ID for the first time - it has to be here and not in
|
|
|
|
global_commit() because it is postponed after start of device protocol */
|
|
|
|
if (!config->router_id)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
config->router_id = if_choose_router_id(config->router_id_from, 0);
|
|
|
|
if (!config->router_id)
|
|
|
|
die("Cannot determine router ID, please configure it manually");
|
|
|
|
}
|
2012-12-27 11:56:23 +00:00
|
|
|
|
2023-09-24 21:22:43 +00:00
|
|
|
/* Commit next round of protocols */
|
|
|
|
if (new->shutdown && !new->gr_down)
|
|
|
|
protos_commit_request.phase++;
|
|
|
|
else
|
|
|
|
protos_commit_request.phase--;
|
|
|
|
|
|
|
|
/* If something is pending, the next round will be called asynchronously from proto_rethink_goal(). */
|
|
|
|
if (!proto_rethink_goal_pending)
|
|
|
|
protos_do_commit(new, old, force_reconfig, type);
|
1998-06-03 08:38:53 +00:00
|
|
|
}
|
|
|
|
|
1998-10-17 11:05:18 +00:00
|
|
|
static void
|
2021-06-19 18:50:18 +00:00
|
|
|
proto_shutdown(struct proto *p)
|
1998-10-17 11:05:18 +00:00
|
|
|
{
|
2021-06-19 18:50:18 +00:00
|
|
|
if (p->proto_state == PS_START || p->proto_state == PS_UP)
|
|
|
|
{
|
|
|
|
/* Going down */
|
|
|
|
DBG("Kicking %s down\n", p->name);
|
|
|
|
PD(p, "Shutting down");
|
|
|
|
proto_notify_state(p, (p->proto->shutdown ? p->proto->shutdown(p) : PS_DOWN));
|
2023-09-24 21:22:43 +00:00
|
|
|
if (p->reconfiguring)
|
|
|
|
{
|
|
|
|
proto_rethink_goal_pending++;
|
|
|
|
p->reconfiguring = 2;
|
|
|
|
}
|
2021-06-19 18:50:18 +00:00
|
|
|
}
|
|
|
|
}
|
2000-01-16 16:44:50 +00:00
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
static void
|
|
|
|
proto_rethink_goal(struct proto *p)
|
|
|
|
{
|
2023-09-24 21:22:43 +00:00
|
|
|
int goal_pending = (p->reconfiguring == 2);
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (p->reconfiguring && !p->active)
|
|
|
|
{
|
|
|
|
struct proto_config *nc = p->cf_new;
|
2023-04-19 19:18:12 +00:00
|
|
|
struct proto *after = p->n.prev;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
DBG("%s has shut down for reconfiguration\n", p->name);
|
|
|
|
p->cf->proto = NULL;
|
|
|
|
config_del_obstacle(p->cf->global);
|
|
|
|
proto_remove_channels(p);
|
2023-04-19 19:18:12 +00:00
|
|
|
proto_rem_node(&global_proto_list, p);
|
2016-01-26 10:48:58 +00:00
|
|
|
rfree(p->event);
|
2017-12-07 20:54:47 +00:00
|
|
|
mb_free(p->message);
|
2016-01-26 10:48:58 +00:00
|
|
|
mb_free(p);
|
|
|
|
if (!nc)
|
2023-09-24 21:22:43 +00:00
|
|
|
goto done;
|
|
|
|
|
2023-04-19 19:18:12 +00:00
|
|
|
p = proto_init(nc, after);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
2000-01-16 16:44:50 +00:00
|
|
|
|
|
|
|
/* Determine what state we want to reach */
|
2000-01-16 17:40:26 +00:00
|
|
|
if (p->disabled || p->reconfiguring)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
2021-06-19 18:50:18 +00:00
|
|
|
PROTO_LOCKED_FROM_MAIN(p)
|
|
|
|
proto_shutdown(p);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
2021-06-19 18:50:18 +00:00
|
|
|
else if (!p->active)
|
|
|
|
proto_start(p);
|
2023-09-24 21:22:43 +00:00
|
|
|
|
|
|
|
done:
|
|
|
|
if (goal_pending && !--proto_rethink_goal_pending)
|
|
|
|
protos_do_commit(
|
|
|
|
protos_commit_request.new,
|
|
|
|
protos_commit_request.old,
|
|
|
|
protos_commit_request.force_reconfig,
|
|
|
|
protos_commit_request.type
|
|
|
|
);
|
1999-02-11 22:59:06 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 15:05:07 +00:00
|
|
|
struct proto *
|
|
|
|
proto_spawn(struct proto_config *cf, uint disabled)
|
|
|
|
{
|
2023-04-19 19:18:12 +00:00
|
|
|
struct proto *p = proto_init(cf, global_proto_list.last);
|
2019-04-08 15:05:07 +00:00
|
|
|
p->disabled = disabled;
|
|
|
|
proto_rethink_goal(p);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2014-03-23 00:35:33 +00:00
|
|
|
/**
|
|
|
|
* DOC: Graceful restart recovery
|
|
|
|
*
|
|
|
|
* Graceful restart of a router is a process when the routing plane (e.g. BIRD)
|
|
|
|
* restarts but both the forwarding plane (e.g kernel routing table) and routing
|
|
|
|
* neighbors keep proper routes, and therefore uninterrupted packet forwarding
|
|
|
|
* is maintained.
|
|
|
|
*
|
|
|
|
* BIRD implements graceful restart recovery by deferring export of routes to
|
|
|
|
* protocols until routing tables are refilled with the expected content. After
|
|
|
|
* start, protocols generate routes as usual, but routes are not propagated to
|
|
|
|
* them, until protocols report that they generated all routes. After that,
|
|
|
|
* graceful restart recovery is finished and the export (and the initial feed)
|
|
|
|
* to protocols is enabled.
|
|
|
|
*
|
|
|
|
* When graceful restart recovery need is detected during initialization, then
|
|
|
|
* enabled protocols are marked with @gr_recovery flag before start. Such
|
|
|
|
* protocols then decide how to proceed with graceful restart, participation is
|
2016-01-26 10:48:58 +00:00
|
|
|
* voluntary. Protocols could lock the recovery for each channel by function
|
2016-05-12 14:04:47 +00:00
|
|
|
* channel_graceful_restart_lock() (state stored in @gr_lock flag), which means
|
2016-01-26 10:48:58 +00:00
|
|
|
* that they want to postpone the end of the recovery until they converge and
|
|
|
|
* then unlock it. They also could set @gr_wait before advancing to %PS_UP,
|
|
|
|
* which means that the core should defer route export to that channel until
|
|
|
|
* the end of the recovery. This should be done by protocols that expect their
|
|
|
|
* neigbors to keep the proper routes (kernel table, BGP sessions with BGP
|
|
|
|
* graceful restart capability).
|
2014-03-23 00:35:33 +00:00
|
|
|
*
|
|
|
|
* The graceful restart recovery is finished when either all graceful restart
|
|
|
|
* locks are unlocked or when graceful restart wait timer fires.
|
|
|
|
*
|
|
|
|
*/
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2017-06-01 10:33:20 +00:00
|
|
|
static void graceful_restart_done(timer *t);
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2014-03-23 00:35:33 +00:00
|
|
|
/**
|
|
|
|
* graceful_restart_recovery - request initial graceful restart recovery
|
|
|
|
*
|
|
|
|
* Called by the platform initialization code if the need for recovery
|
|
|
|
* after graceful restart is detected during boot. Have to be called
|
|
|
|
* before protos_commit().
|
|
|
|
*/
|
2014-03-20 13:07:12 +00:00
|
|
|
void
|
|
|
|
graceful_restart_recovery(void)
|
|
|
|
{
|
|
|
|
graceful_restart_state = GRS_INIT;
|
|
|
|
}
|
|
|
|
|
2014-03-23 00:35:33 +00:00
|
|
|
/**
|
|
|
|
* graceful_restart_init - initialize graceful restart
|
|
|
|
*
|
|
|
|
* When graceful restart recovery was requested, the function starts an active
|
|
|
|
* phase of the recovery and initializes graceful restart wait timer. The
|
|
|
|
* function have to be called after protos_commit().
|
|
|
|
*/
|
2014-03-20 13:07:12 +00:00
|
|
|
void
|
|
|
|
graceful_restart_init(void)
|
|
|
|
{
|
|
|
|
if (!graceful_restart_state)
|
|
|
|
return;
|
|
|
|
|
|
|
|
log(L_INFO "Graceful restart started");
|
|
|
|
|
|
|
|
if (!graceful_restart_locks)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
graceful_restart_done(NULL);
|
|
|
|
return;
|
|
|
|
}
|
2014-03-20 13:07:12 +00:00
|
|
|
|
|
|
|
graceful_restart_state = GRS_ACTIVE;
|
2017-11-28 16:43:20 +00:00
|
|
|
gr_wait_timer = tm_new_init(proto_pool, graceful_restart_done, NULL, 0, 0);
|
|
|
|
tm_start(gr_wait_timer, config->gr_wait S);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
2014-03-23 00:35:33 +00:00
|
|
|
/**
|
|
|
|
* graceful_restart_done - finalize graceful restart
|
2016-05-12 13:49:44 +00:00
|
|
|
* @t: unused
|
2014-03-23 00:35:33 +00:00
|
|
|
*
|
|
|
|
* When there are no locks on graceful restart, the functions finalizes the
|
|
|
|
* graceful restart recovery. Protocols postponing route export until the end of
|
|
|
|
* the recovery are awakened and the export to them is enabled. All other
|
|
|
|
* related state is cleared. The function is also called when the graceful
|
|
|
|
* restart wait timer fires (but there are still some locks).
|
|
|
|
*/
|
2014-03-20 13:07:12 +00:00
|
|
|
static void
|
2017-06-01 10:33:20 +00:00
|
|
|
graceful_restart_done(timer *t UNUSED)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
|
|
|
log(L_INFO "Graceful restart done");
|
|
|
|
graceful_restart_state = GRS_DONE;
|
|
|
|
|
2023-04-19 19:18:12 +00:00
|
|
|
WALK_TLIST(proto, p, &global_proto_list)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
if (!p->gr_recovery)
|
|
|
|
continue;
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel *c;
|
|
|
|
WALK_LIST(c, p->channels)
|
|
|
|
{
|
2014-03-20 13:07:12 +00:00
|
|
|
/* Resume postponed export of routes */
|
2021-06-21 15:07:31 +00:00
|
|
|
if ((c->channel_state == CS_UP) && c->gr_wait && p->rt_notify)
|
2016-01-26 10:48:58 +00:00
|
|
|
channel_start_export(c);
|
2014-03-20 13:07:12 +00:00
|
|
|
|
|
|
|
/* Cleanup */
|
2016-01-26 10:48:58 +00:00
|
|
|
c->gr_wait = 0;
|
|
|
|
c->gr_lock = 0;
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
p->gr_recovery = 0;
|
|
|
|
}
|
|
|
|
|
2014-03-20 13:07:12 +00:00
|
|
|
graceful_restart_locks = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
graceful_restart_show_status(void)
|
|
|
|
{
|
|
|
|
if (graceful_restart_state != GRS_ACTIVE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cli_msg(-24, "Graceful restart recovery in progress");
|
2016-01-26 10:48:58 +00:00
|
|
|
cli_msg(-24, " Waiting for %d channels to recover", graceful_restart_locks);
|
2017-11-28 16:43:20 +00:00
|
|
|
cli_msg(-24, " Wait timer is %t/%u", tm_remains(gr_wait_timer), config->gr_wait);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
2014-03-23 00:35:33 +00:00
|
|
|
/**
|
2016-01-26 10:48:58 +00:00
|
|
|
* channel_graceful_restart_lock - lock graceful restart by channel
|
|
|
|
* @p: channel instance
|
2014-03-23 00:35:33 +00:00
|
|
|
*
|
|
|
|
* This function allows a protocol to postpone the end of graceful restart
|
|
|
|
* recovery until it converges. The lock is removed when the protocol calls
|
2016-01-26 10:48:58 +00:00
|
|
|
* channel_graceful_restart_unlock() or when the channel is closed.
|
2014-03-23 00:35:33 +00:00
|
|
|
*
|
|
|
|
* The function have to be called during the initial phase of graceful restart
|
|
|
|
* recovery and only for protocols that are part of graceful restart (i.e. their
|
|
|
|
* @gr_recovery is set), which means it should be called from protocol start
|
|
|
|
* hooks.
|
|
|
|
*/
|
2014-03-20 13:07:12 +00:00
|
|
|
void
|
2016-01-26 10:48:58 +00:00
|
|
|
channel_graceful_restart_lock(struct channel *c)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
|
|
|
ASSERT(graceful_restart_state == GRS_INIT);
|
2016-01-26 10:48:58 +00:00
|
|
|
ASSERT(c->proto->gr_recovery);
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (c->gr_lock)
|
2014-03-20 13:07:12 +00:00
|
|
|
return;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
c->gr_lock = 1;
|
2014-03-20 13:07:12 +00:00
|
|
|
graceful_restart_locks++;
|
|
|
|
}
|
|
|
|
|
2014-03-23 00:35:33 +00:00
|
|
|
/**
|
2016-01-26 10:48:58 +00:00
|
|
|
* channel_graceful_restart_unlock - unlock graceful restart by channel
|
|
|
|
* @p: channel instance
|
2014-03-23 00:35:33 +00:00
|
|
|
*
|
2016-01-26 10:48:58 +00:00
|
|
|
* This function unlocks a lock from channel_graceful_restart_lock(). It is also
|
2014-03-23 00:35:33 +00:00
|
|
|
* automatically called when the lock holding protocol went down.
|
|
|
|
*/
|
2014-03-20 13:07:12 +00:00
|
|
|
void
|
2016-01-26 10:48:58 +00:00
|
|
|
channel_graceful_restart_unlock(struct channel *c)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
if (!c->gr_lock)
|
2014-03-20 13:07:12 +00:00
|
|
|
return;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
c->gr_lock = 0;
|
2014-03-20 13:07:12 +00:00
|
|
|
graceful_restart_locks--;
|
|
|
|
|
|
|
|
if ((graceful_restart_state == GRS_ACTIVE) && !graceful_restart_locks)
|
2017-11-28 16:43:20 +00:00
|
|
|
tm_start(gr_wait_timer, 0);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
|
|
|
* protos_dump_all - dump status of all protocols
|
|
|
|
*
|
|
|
|
* This function dumps status of all existing protocol instances to the
|
|
|
|
* debug output. It involves printing of general status information
|
|
|
|
* such as protocol states, its position on the protocol lists
|
|
|
|
* and also calling of a dump() hook of the protocol to print
|
|
|
|
* the internals.
|
|
|
|
*/
|
1998-07-09 19:36:52 +00:00
|
|
|
void
|
|
|
|
protos_dump_all(void)
|
|
|
|
{
|
|
|
|
debug("Protocols:\n");
|
|
|
|
|
2023-04-19 19:18:12 +00:00
|
|
|
WALK_TLIST(proto, p, &global_proto_list) PROTO_LOCKED_FROM_MAIN(p)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
#define DPF(x) (p->x ? " " #x : "")
|
2023-03-06 18:28:08 +00:00
|
|
|
debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s\n",
|
2021-06-21 15:07:31 +00:00
|
|
|
p->name, p, p_states[p->proto_state], p->active_channels,
|
2021-06-19 18:50:18 +00:00
|
|
|
DPF(disabled), DPF(active), DPF(do_stop), DPF(reconfiguring));
|
2021-06-21 15:07:31 +00:00
|
|
|
#undef DPF
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
struct channel *c;
|
|
|
|
WALK_LIST(c, p->channels)
|
1998-07-09 19:36:52 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
debug("\tTABLE %s\n", c->table->name);
|
|
|
|
if (c->in_filter)
|
|
|
|
debug("\tInput filter: %s\n", filter_name(c->in_filter));
|
|
|
|
if (c->out_filter)
|
|
|
|
debug("\tOutput filter: %s\n", filter_name(c->out_filter));
|
2021-06-21 15:07:31 +00:00
|
|
|
debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
|
|
|
|
c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-",
|
|
|
|
c->out_req.hook ? rt_export_state_name(rt_export_get_state(c->out_req.hook)) : "-");
|
1998-07-09 19:36:52 +00:00
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2023-11-01 17:25:40 +00:00
|
|
|
debug("\tSOURCES\n");
|
|
|
|
rt_dump_sources(&p->sources);
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (p->proto->dump && (p->proto_state != PS_DOWN))
|
|
|
|
p->proto->dump(p);
|
|
|
|
}
|
1998-07-09 19:36:52 +00:00
|
|
|
}
|
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
|
|
|
* proto_build - make a single protocol available
|
|
|
|
* @p: the protocol
|
|
|
|
*
|
|
|
|
* After the platform specific initialization code uses protos_build()
|
|
|
|
* to add all the standard protocols, it should call proto_build() for
|
2000-06-07 12:29:08 +00:00
|
|
|
* all platform specific protocols to inform the core that they exist.
|
2000-06-02 13:42:36 +00:00
|
|
|
*/
|
2000-04-01 10:19:47 +00:00
|
|
|
void
|
|
|
|
proto_build(struct protocol *p)
|
|
|
|
{
|
|
|
|
add_tail(&protocol_list, &p->n);
|
|
|
|
}
|
|
|
|
|
2013-11-19 21:33:48 +00:00
|
|
|
/* FIXME: convert this call to some protocol hook */
|
|
|
|
extern void bfd_init_all(void);
|
|
|
|
|
2022-03-18 21:05:50 +00:00
|
|
|
void protos_build_gen(void);
|
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
|
|
|
* protos_build - build a protocol list
|
|
|
|
*
|
|
|
|
* This function is called during BIRD startup to insert
|
|
|
|
* all standard protocols to the global protocol list. Insertion
|
|
|
|
* of platform specific protocols (such as the kernel syncer)
|
|
|
|
* is in the domain of competence of the platform dependent
|
|
|
|
* startup code.
|
|
|
|
*/
|
1998-10-18 11:53:21 +00:00
|
|
|
void
|
|
|
|
protos_build(void)
|
|
|
|
{
|
2023-04-21 13:26:06 +00:00
|
|
|
proto_pool = rp_new(&root_pool, the_bird_domain.the_bird, "Protocols");
|
2023-04-24 14:10:59 +00:00
|
|
|
|
|
|
|
protos_build_gen();
|
1999-02-11 22:59:06 +00:00
|
|
|
}
|
|
|
|
|
2012-03-28 16:40:04 +00:00
|
|
|
|
2012-04-24 21:39:57 +00:00
|
|
|
/* Temporary hack to propagate restart to BGP */
|
|
|
|
int proto_restart;
|
2012-03-28 16:40:04 +00:00
|
|
|
|
2012-04-15 13:28:29 +00:00
|
|
|
static void
|
2023-02-06 14:06:12 +00:00
|
|
|
proto_restart_event_hook(void *_p)
|
2012-04-15 13:28:29 +00:00
|
|
|
{
|
2023-02-06 14:06:12 +00:00
|
|
|
struct proto *p = _p;
|
|
|
|
if (!p->down_sched)
|
|
|
|
return;
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2023-02-06 14:06:12 +00:00
|
|
|
proto_restart = (p->down_sched == PDS_RESTART);
|
|
|
|
p->disabled = 1;
|
|
|
|
proto_rethink_goal(p);
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2023-02-06 14:06:12 +00:00
|
|
|
p->restart_event = NULL;
|
|
|
|
p->restart_timer = NULL;
|
|
|
|
|
|
|
|
if (proto_restart)
|
|
|
|
/* No need to call proto_rethink_goal() here again as the proto_cleanup() routine will
|
|
|
|
* call it after the protocol stops ... and both these routines are fixed to main_birdloop.
|
|
|
|
*/
|
|
|
|
p->disabled = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
proto_send_restart_event(struct proto *p)
|
|
|
|
{
|
|
|
|
if (!p->restart_event)
|
|
|
|
p->restart_event = ev_new_init(p->pool, proto_restart_event_hook, p);
|
|
|
|
|
|
|
|
ev_send(&global_event_list, p->restart_event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
proto_send_restart_event_from_timer(struct timer *t)
|
|
|
|
{
|
|
|
|
proto_send_restart_event((struct proto *) t->data);
|
2012-04-15 13:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
proto_schedule_down(struct proto *p, byte restart, byte code)
|
|
|
|
{
|
|
|
|
/* Does not work for other states (even PS_START) */
|
|
|
|
ASSERT(p->proto_state == PS_UP);
|
|
|
|
|
|
|
|
/* Scheduled restart may change to shutdown, but not otherwise */
|
|
|
|
if (p->down_sched == PDS_DISABLE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
p->down_sched = restart ? PDS_RESTART : PDS_DISABLE;
|
|
|
|
p->down_code = code;
|
2023-02-06 14:06:12 +00:00
|
|
|
|
|
|
|
if (!restart)
|
|
|
|
{
|
|
|
|
if (p->restart_timer && tm_active(p->restart_timer))
|
|
|
|
tm_stop(p->restart_timer);
|
|
|
|
|
|
|
|
proto_send_restart_event(p);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!p->restart_timer)
|
|
|
|
p->restart_timer = tm_new_init(p->pool, proto_send_restart_event_from_timer, p, 0, 0);
|
|
|
|
|
|
|
|
tm_start_max_in(p->restart_timer, 250 MS, p->loop);
|
|
|
|
}
|
2012-04-15 13:28:29 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:55:37 +00:00
|
|
|
/**
|
|
|
|
* proto_set_message - set administrative message to protocol
|
|
|
|
* @p: protocol
|
|
|
|
* @msg: message
|
|
|
|
* @len: message length (-1 for NULL-terminated string)
|
|
|
|
*
|
|
|
|
* The function sets administrative message (string) related to protocol state
|
|
|
|
* change. It is called by the nest code for manual enable/disable/restart
|
|
|
|
* commands all routes to the protocol, and by protocol-specific code when the
|
|
|
|
* protocol state change is initiated by the protocol. Using NULL message clears
|
|
|
|
* the last message. The message string may be either NULL-terminated or with an
|
|
|
|
* explicit length.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
proto_set_message(struct proto *p, char *msg, int len)
|
|
|
|
{
|
|
|
|
mb_free(p->message);
|
|
|
|
p->message = NULL;
|
|
|
|
|
|
|
|
if (!msg || !len)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
len = strlen(msg);
|
|
|
|
|
|
|
|
if (!len)
|
|
|
|
return;
|
|
|
|
|
|
|
|
p->message = mb_alloc(proto_pool, len + 1);
|
|
|
|
memcpy(p->message, msg, len);
|
|
|
|
p->message[len] = 0;
|
|
|
|
}
|
|
|
|
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
static const char * channel_limit_name[] = {
|
|
|
|
[PLA_WARN] = "warn",
|
|
|
|
[PLA_BLOCK] = "block",
|
|
|
|
[PLA_RESTART] = "restart",
|
|
|
|
[PLA_DISABLE] = "disable",
|
|
|
|
};
|
2012-04-15 13:28:29 +00:00
|
|
|
|
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
static void
|
|
|
|
channel_log_limit(struct channel *c, struct limit *l, int dir)
|
2012-04-15 13:28:29 +00:00
|
|
|
{
|
2013-01-10 12:07:33 +00:00
|
|
|
const char *dir_name[PLD_MAX] = { "receive", "import" , "export" };
|
2021-11-06 19:34:16 +00:00
|
|
|
log(L_WARN "Channel %s.%s hits route %s limit (%d), action: %s",
|
|
|
|
c->proto->name, c->name, dir_name[dir], l->max, channel_limit_name[c->limit_actions[dir]]);
|
|
|
|
}
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
static void
|
|
|
|
channel_activate_limit(struct channel *c, struct limit *l, int dir)
|
|
|
|
{
|
|
|
|
if (c->limit_active & (1 << dir))
|
2012-04-21 19:05:36 +00:00
|
|
|
return;
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
c->limit_active |= (1 << dir);
|
|
|
|
channel_log_limit(c, l, dir);
|
|
|
|
}
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
static int
|
|
|
|
channel_limit_warn(struct limit *l, void *data)
|
|
|
|
{
|
|
|
|
struct channel_limit_data *cld = data;
|
|
|
|
struct channel *c = cld->c;
|
|
|
|
int dir = cld->dir;
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
channel_log_limit(c, l, dir);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
return 0;
|
2012-04-15 13:28:29 +00:00
|
|
|
}
|
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
static int
|
|
|
|
channel_limit_block(struct limit *l, void *data)
|
2014-04-26 22:46:32 +00:00
|
|
|
{
|
2021-11-06 19:34:16 +00:00
|
|
|
struct channel_limit_data *cld = data;
|
|
|
|
struct channel *c = cld->c;
|
|
|
|
int dir = cld->dir;
|
2014-04-26 22:46:32 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
channel_activate_limit(c, l, dir);
|
2014-04-26 22:46:32 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2014-04-26 22:46:32 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
static const byte chl_dir_down[PLD_MAX] = { PDC_RX_LIMIT_HIT, PDC_IN_LIMIT_HIT, PDC_OUT_LIMIT_HIT };
|
|
|
|
|
|
|
|
static int
|
|
|
|
channel_limit_down(struct limit *l, void *data)
|
|
|
|
{
|
|
|
|
struct channel_limit_data *cld = data;
|
|
|
|
struct channel *c = cld->c;
|
|
|
|
struct proto *p = c->proto;
|
|
|
|
int dir = cld->dir;
|
|
|
|
|
|
|
|
channel_activate_limit(c, l, dir);
|
|
|
|
|
|
|
|
if (p->proto_state == PS_UP)
|
|
|
|
proto_schedule_down(p, c->limit_actions[dir] == PLA_RESTART, chl_dir_down[dir]);
|
|
|
|
|
|
|
|
return 1;
|
2014-04-26 22:46:32 +00:00
|
|
|
}
|
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
static int (*channel_limit_action[])(struct limit *, void *) = {
|
|
|
|
[PLA_NONE] = NULL,
|
|
|
|
[PLA_WARN] = channel_limit_warn,
|
|
|
|
[PLA_BLOCK] = channel_limit_block,
|
|
|
|
[PLA_RESTART] = channel_limit_down,
|
|
|
|
[PLA_DISABLE] = channel_limit_down,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
2021-11-06 19:34:16 +00:00
|
|
|
l->action = channel_limit_action[cf->action];
|
|
|
|
c->limit_actions[dir] = cf->action;
|
|
|
|
|
|
|
|
struct channel_limit_data cld = { .c = c, .dir = dir };
|
|
|
|
limit_update(l, &cld, cf->action ? cf->limit : ~((u32) 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_init_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf)
|
|
|
|
{
|
|
|
|
channel_reset_limit(c, l, dir);
|
|
|
|
channel_update_limit(c, l, dir, cf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_reset_limit(struct channel *c, struct limit *l, int dir)
|
|
|
|
{
|
|
|
|
limit_reset(l);
|
|
|
|
c->limit_active &= ~(1 << dir);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
2021-09-27 14:40:28 +00:00
|
|
|
static struct rte_owner_class default_rte_owner_class;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static inline void
|
|
|
|
proto_do_start(struct proto *p)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
p->active = 1;
|
2021-09-27 14:40:28 +00:00
|
|
|
|
|
|
|
rt_init_sources(&p->sources, p->name, proto_event_list(p));
|
|
|
|
if (!p->sources.class)
|
|
|
|
p->sources.class = &default_rte_owner_class;
|
|
|
|
|
2023-12-08 10:33:43 +00:00
|
|
|
p->sources.debug = p->debug;
|
|
|
|
|
2021-06-19 18:50:18 +00:00
|
|
|
if (!p->cf->late_if_feed)
|
2023-02-07 13:27:23 +00:00
|
|
|
iface_subscribe(&p->iface_sub);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-01-26 10:48:58 +00:00
|
|
|
proto_do_up(struct proto *p)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
if (!p->main_source)
|
|
|
|
p->main_source = rt_get_source(p, 0);
|
2021-09-27 14:40:28 +00:00
|
|
|
// Locked automaticaly
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
proto_start_channels(p);
|
2021-06-19 18:50:18 +00:00
|
|
|
|
|
|
|
if (p->cf->late_if_feed)
|
2023-02-07 13:27:23 +00:00
|
|
|
iface_subscribe(&p->iface_sub);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static inline void
|
|
|
|
proto_do_pause(struct proto *p)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
proto_pause_channels(p);
|
2014-03-20 13:07:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-01-26 10:48:58 +00:00
|
|
|
proto_do_stop(struct proto *p)
|
2014-03-20 13:07:12 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
p->down_sched = 0;
|
2014-03-20 13:07:12 +00:00
|
|
|
p->gr_recovery = 0;
|
2014-03-23 00:35:33 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (p->main_source)
|
|
|
|
{
|
|
|
|
rt_unlock_source(p->main_source);
|
|
|
|
p->main_source = NULL;
|
|
|
|
}
|
2014-03-23 00:35:33 +00:00
|
|
|
|
2023-10-13 08:22:09 +00:00
|
|
|
rp_free(p->pool_up);
|
|
|
|
p->pool_up = NULL;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
proto_stop_channels(p);
|
2021-09-27 14:40:28 +00:00
|
|
|
rt_destroy_sources(&p->sources, p->event);
|
2021-06-19 18:50:18 +00:00
|
|
|
|
|
|
|
p->do_stop = 1;
|
2023-04-02 17:34:22 +00:00
|
|
|
proto_send_event(p, p->event);
|
2016-01-26 10:48:58 +00:00
|
|
|
}
|
2014-03-23 00:35:33 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
static void
|
|
|
|
proto_do_down(struct proto *p)
|
|
|
|
{
|
|
|
|
p->down_code = 0;
|
|
|
|
|
|
|
|
/* Shutdown is finished in the protocol event */
|
|
|
|
if (proto_is_done(p))
|
2023-04-02 17:34:22 +00:00
|
|
|
proto_send_event(p, p->event);
|
2014-03-23 00:35:33 +00:00
|
|
|
}
|
|
|
|
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2000-06-02 13:42:36 +00:00
|
|
|
/**
|
|
|
|
* proto_notify_state - notify core about protocol state change
|
|
|
|
* @p: protocol the state of which has changed
|
|
|
|
* @ps: the new status
|
|
|
|
*
|
|
|
|
* Whenever a state of a protocol changes due to some event internal
|
|
|
|
* to the protocol (i.e., not inside a start() or shutdown() hook),
|
|
|
|
* it should immediately notify the core about the change by calling
|
|
|
|
* proto_notify_state() which will write the new state to the &proto
|
|
|
|
* structure and take all the actions necessary to adapt to the new
|
2008-12-08 11:24:55 +00:00
|
|
|
* state. State change to PS_DOWN immediately frees resources of protocol
|
|
|
|
* and might execute start callback of protocol; therefore,
|
|
|
|
* it should be used at tail positions of protocol callbacks.
|
2000-06-02 13:42:36 +00:00
|
|
|
*/
|
1999-02-11 22:59:06 +00:00
|
|
|
void
|
2016-01-26 10:48:58 +00:00
|
|
|
proto_notify_state(struct proto *p, uint state)
|
1999-02-11 22:59:06 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
uint ps = p->proto_state;
|
1999-02-11 22:59:06 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
DBG("%s reporting state transition %s -> %s\n", p->name, p_states[ps], p_states[state]);
|
|
|
|
if (state == ps)
|
1999-02-11 22:59:06 +00:00
|
|
|
return;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
p->proto_state = state;
|
2017-06-06 14:47:30 +00:00
|
|
|
p->last_state_change = current_time();
|
2008-12-08 11:24:55 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
switch (state)
|
|
|
|
{
|
|
|
|
case PS_START:
|
|
|
|
ASSERT(ps == PS_DOWN || ps == PS_UP);
|
|
|
|
|
|
|
|
if (ps == PS_DOWN)
|
|
|
|
proto_do_start(p);
|
|
|
|
else
|
|
|
|
proto_do_pause(p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_UP:
|
|
|
|
ASSERT(ps == PS_DOWN || ps == PS_START);
|
|
|
|
|
|
|
|
if (ps == PS_DOWN)
|
|
|
|
proto_do_start(p);
|
|
|
|
|
|
|
|
proto_do_up(p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_STOP:
|
|
|
|
ASSERT(ps == PS_START || ps == PS_UP);
|
|
|
|
|
|
|
|
proto_do_stop(p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_DOWN:
|
|
|
|
if (ps != PS_STOP)
|
|
|
|
proto_do_stop(p);
|
|
|
|
|
|
|
|
proto_do_down(p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
bug("%s: Invalid state %d", p->name, ps);
|
|
|
|
}
|
2014-03-24 11:32:12 +00:00
|
|
|
|
|
|
|
proto_log_state_change(p);
|
1998-10-18 11:53:21 +00:00
|
|
|
}
|
1999-02-13 19:15:28 +00:00
|
|
|
|
1999-11-30 12:57:14 +00:00
|
|
|
/*
|
|
|
|
* CLI Commands
|
|
|
|
*/
|
|
|
|
|
|
|
|
static char *
|
|
|
|
proto_state_name(struct proto *p)
|
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
switch (p->proto_state)
|
|
|
|
{
|
|
|
|
case PS_DOWN: return p->active ? "flush" : "down";
|
|
|
|
case PS_START: return "start";
|
|
|
|
case PS_UP: return "up";
|
|
|
|
case PS_STOP: return "stop";
|
|
|
|
default: return "???";
|
|
|
|
}
|
1999-11-30 12:57:14 +00:00
|
|
|
}
|
|
|
|
|
2010-02-13 09:44:46 +00:00
|
|
|
static void
|
2016-01-26 10:48:58 +00:00
|
|
|
channel_show_stats(struct channel *c)
|
2010-02-13 09:44:46 +00:00
|
|
|
{
|
2021-06-21 15:07:31 +00:00
|
|
|
struct channel_import_stats *ch_is = &c->import_stats;
|
|
|
|
struct channel_export_stats *ch_es = &c->export_stats;
|
|
|
|
struct rt_import_stats *rt_is = c->in_req.hook ? &c->in_req.hook->stats : NULL;
|
|
|
|
struct rt_export_stats *rt_es = c->out_req.hook ? &c->out_req.hook->stats : NULL;
|
|
|
|
|
|
|
|
#define SON(ie, item) ((ie) ? (ie)->item : 0)
|
|
|
|
#define SCI(item) SON(ch_is, item)
|
|
|
|
#define SCE(item) SON(ch_es, item)
|
|
|
|
#define SRI(item) SON(rt_is, item)
|
|
|
|
#define SRE(item) SON(rt_es, item)
|
2016-01-26 10:48:58 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
u32 rx_routes = c->rx_limit.count;
|
|
|
|
u32 in_routes = c->in_limit.count;
|
|
|
|
u32 out_routes = c->out_limit.count;
|
|
|
|
|
2022-06-16 21:24:56 +00:00
|
|
|
if (c->in_keep)
|
2019-02-05 14:59:26 +00:00
|
|
|
cli_msg(-1006, " Routes: %u imported, %u filtered, %u exported, %u preferred",
|
2021-06-21 15:07:31 +00:00
|
|
|
in_routes, (rx_routes - in_routes), out_routes, SRI(pref));
|
2012-11-10 13:26:13 +00:00
|
|
|
else
|
2019-02-02 12:28:16 +00:00
|
|
|
cli_msg(-1006, " Routes: %u imported, %u exported, %u preferred",
|
2021-06-21 15:07:31 +00:00
|
|
|
in_routes, out_routes, SRI(pref));
|
|
|
|
|
|
|
|
cli_msg(-1006, " Route change stats: received rejected filtered ignored RX limit IN limit accepted");
|
|
|
|
cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u %10u %10u",
|
|
|
|
SCI(updates_received), SCI(updates_invalid),
|
|
|
|
SCI(updates_filtered), SRI(updates_ignored),
|
|
|
|
SCI(updates_limited_rx), SCI(updates_limited_in),
|
|
|
|
SRI(updates_accepted));
|
|
|
|
cli_msg(-1006, " Import withdraws: %10u %10u --- %10u --- %10u",
|
|
|
|
SCI(withdraws_received), SCI(withdraws_invalid),
|
|
|
|
SRI(withdraws_ignored), SRI(withdraws_accepted));
|
|
|
|
cli_msg(-1006, " Export updates: %10u %10u %10u --- %10u %10u",
|
|
|
|
SRE(updates_received), SCE(updates_rejected),
|
|
|
|
SCE(updates_filtered), SCE(updates_limited), SCE(updates_accepted));
|
|
|
|
cli_msg(-1006, " Export withdraws: %10u --- --- --- ---%10u",
|
|
|
|
SRE(withdraws_received), SCE(withdraws_accepted));
|
|
|
|
|
|
|
|
#undef SRI
|
|
|
|
#undef SRE
|
|
|
|
#undef SCI
|
|
|
|
#undef SCE
|
|
|
|
#undef SON
|
2010-02-13 09:44:46 +00:00
|
|
|
}
|
|
|
|
|
2012-04-15 13:28:29 +00:00
|
|
|
void
|
2021-11-06 19:34:16 +00:00
|
|
|
channel_show_limit(struct limit *l, const char *dsc, int active, int action)
|
2012-04-15 13:28:29 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
if (!l->action)
|
2012-04-21 19:05:36 +00:00
|
|
|
return;
|
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
cli_msg(-1006, " %-16s%d%s", dsc, l->max, active ? " [HIT]" : "");
|
|
|
|
cli_msg(-1006, " Action: %s", channel_limit_name[action]);
|
2012-04-15 13:28:29 +00:00
|
|
|
}
|
|
|
|
|
2012-03-15 10:58:08 +00:00
|
|
|
void
|
2016-01-26 10:48:58 +00:00
|
|
|
channel_show_info(struct channel *c)
|
2010-02-13 09:44:46 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
cli_msg(-1006, " Channel %s", c->name);
|
2016-12-07 13:11:28 +00:00
|
|
|
cli_msg(-1006, " State: %s", c_states[c->channel_state]);
|
2021-06-21 15:07:31 +00:00
|
|
|
cli_msg(-1006, " Import state: %s", rt_import_state_name(rt_import_get_state(c->in_req.hook)));
|
|
|
|
cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(c->out_req.hook)));
|
2016-01-26 10:48:58 +00:00
|
|
|
cli_msg(-1006, " Table: %s", c->table->name);
|
|
|
|
cli_msg(-1006, " Preference: %d", c->preference);
|
|
|
|
cli_msg(-1006, " Input filter: %s", filter_name(c->in_filter));
|
|
|
|
cli_msg(-1006, " Output filter: %s", filter_name(c->out_filter));
|
2010-02-13 09:44:46 +00:00
|
|
|
|
2014-03-20 13:07:12 +00:00
|
|
|
if (graceful_restart_state == GRS_ACTIVE)
|
2016-01-26 10:48:58 +00:00
|
|
|
cli_msg(-1006, " GR recovery: %s%s",
|
|
|
|
c->gr_lock ? " pending" : "",
|
|
|
|
c->gr_wait ? " waiting" : "");
|
2014-03-20 13:07:12 +00:00
|
|
|
|
2021-11-06 19:34:16 +00:00
|
|
|
channel_show_limit(&c->rx_limit, "Receive limit:", c->limit_active & (1 << PLD_RX), c->limit_actions[PLD_RX]);
|
|
|
|
channel_show_limit(&c->in_limit, "Import limit:", c->limit_active & (1 << PLD_IN), c->limit_actions[PLD_IN]);
|
|
|
|
channel_show_limit(&c->out_limit, "Export limit:", c->limit_active & (1 << PLD_OUT), c->limit_actions[PLD_OUT]);
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
if (c->channel_state != CS_DOWN)
|
|
|
|
channel_show_stats(c);
|
2010-02-13 09:44:46 +00:00
|
|
|
}
|
|
|
|
|
2020-12-07 21:19:40 +00:00
|
|
|
void
|
|
|
|
channel_cmd_debug(struct channel *c, uint mask)
|
|
|
|
{
|
|
|
|
if (cli_access_restricted())
|
|
|
|
return;
|
|
|
|
|
|
|
|
c->debug = mask;
|
|
|
|
cli_msg(0, "");
|
|
|
|
}
|
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
void
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_cmd_show(struct proto *p, uintptr_t verbose, int cnt)
|
1999-11-30 14:04:09 +00:00
|
|
|
{
|
2010-02-02 23:19:24 +00:00
|
|
|
byte buf[256], tbuf[TM_DATETIME_BUFFER_SIZE];
|
1999-12-01 12:00:15 +00:00
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
/* First protocol - show header */
|
|
|
|
if (!cnt)
|
2017-12-08 14:16:47 +00:00
|
|
|
cli_msg(-2002, "%-10s %-10s %-10s %-6s %-12s %s",
|
|
|
|
"Name", "Proto", "Table", "State", "Since", "Info");
|
2010-02-19 23:03:31 +00:00
|
|
|
|
1999-12-01 12:00:15 +00:00
|
|
|
buf[0] = 0;
|
|
|
|
if (p->proto->get_status)
|
|
|
|
p->proto->get_status(p, buf);
|
2017-06-06 14:47:30 +00:00
|
|
|
tm_format_time(tbuf, &config->tf_proto, p->last_state_change);
|
2017-12-08 14:16:47 +00:00
|
|
|
cli_msg(-1002, "%-10s %-10s %-10s %-6s %-12s %s",
|
1999-11-30 14:04:09 +00:00
|
|
|
p->name,
|
|
|
|
p->proto->name,
|
2016-01-26 10:48:58 +00:00
|
|
|
p->main_channel ? p->main_channel->table->name : "---",
|
1999-11-30 14:04:09 +00:00
|
|
|
proto_state_name(p),
|
2010-02-02 23:19:24 +00:00
|
|
|
tbuf,
|
1999-12-01 12:00:15 +00:00
|
|
|
buf);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
1999-11-30 14:04:09 +00:00
|
|
|
if (verbose)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
if (p->cf->dsc)
|
|
|
|
cli_msg(-1006, " Description: %s", p->cf->dsc);
|
2017-12-07 20:54:47 +00:00
|
|
|
if (p->message)
|
|
|
|
cli_msg(-1006, " Message: %s", p->message);
|
2016-01-26 10:48:58 +00:00
|
|
|
if (p->cf->router_id)
|
|
|
|
cli_msg(-1006, " Router ID: %R", p->cf->router_id);
|
2021-11-15 09:53:58 +00:00
|
|
|
if (p->vrf)
|
|
|
|
cli_msg(-1006, " VRF: %s", p->vrf->name);
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
if (p->proto->show_proto_info)
|
|
|
|
p->proto->show_proto_info(p);
|
|
|
|
else
|
1999-11-30 14:04:09 +00:00
|
|
|
{
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel *c;
|
|
|
|
WALK_LIST(c, p->channels)
|
|
|
|
channel_show_info(c);
|
1999-11-30 14:04:09 +00:00
|
|
|
}
|
2016-01-26 10:48:58 +00:00
|
|
|
|
|
|
|
cli_msg(-1006, "");
|
|
|
|
}
|
1999-11-30 14:04:09 +00:00
|
|
|
}
|
|
|
|
|
1999-11-25 15:35:30 +00:00
|
|
|
void
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_cmd_disable(struct proto *p, uintptr_t arg, int cnt UNUSED)
|
1999-11-25 15:35:30 +00:00
|
|
|
{
|
2010-02-19 23:03:31 +00:00
|
|
|
if (p->disabled)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
cli_msg(-8, "%s: already disabled", p->name);
|
|
|
|
return;
|
|
|
|
}
|
2010-02-19 23:03:31 +00:00
|
|
|
|
|
|
|
log(L_INFO "Disabling protocol %s", p->name);
|
|
|
|
p->disabled = 1;
|
2012-04-15 13:28:29 +00:00
|
|
|
p->down_code = PDC_CMD_DISABLE;
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_set_message(p, (char *) arg, -1);
|
2021-06-19 18:50:18 +00:00
|
|
|
proto_shutdown(p);
|
2010-02-19 23:03:31 +00:00
|
|
|
cli_msg(-9, "%s: disabled", p->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_cmd_enable(struct proto *p, uintptr_t arg, int cnt UNUSED)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
|
|
|
if (!p->disabled)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
cli_msg(-10, "%s: already enabled", p->name);
|
|
|
|
return;
|
|
|
|
}
|
2010-02-19 23:03:31 +00:00
|
|
|
|
|
|
|
log(L_INFO "Enabling protocol %s", p->name);
|
|
|
|
p->disabled = 0;
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_set_message(p, (char *) arg, -1);
|
2010-02-19 23:03:31 +00:00
|
|
|
proto_rethink_goal(p);
|
|
|
|
cli_msg(-11, "%s: enabled", p->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_cmd_restart(struct proto *p, uintptr_t arg, int cnt UNUSED)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
|
|
|
if (p->disabled)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
cli_msg(-8, "%s: already disabled", p->name);
|
|
|
|
return;
|
|
|
|
}
|
2010-02-19 23:03:31 +00:00
|
|
|
|
|
|
|
log(L_INFO "Restarting protocol %s", p->name);
|
|
|
|
p->disabled = 1;
|
2012-04-15 13:28:29 +00:00
|
|
|
p->down_code = PDC_CMD_RESTART;
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_set_message(p, (char *) arg, -1);
|
2021-06-19 18:50:18 +00:00
|
|
|
proto_shutdown(p);
|
2010-02-19 23:03:31 +00:00
|
|
|
p->disabled = 0;
|
2021-06-19 18:50:18 +00:00
|
|
|
/* After the protocol shuts down, proto_rethink_goal() is run from proto_event. */
|
2010-02-19 23:03:31 +00:00
|
|
|
cli_msg(-12, "%s: restarted", p->name);
|
|
|
|
}
|
|
|
|
|
2023-11-02 13:33:00 +00:00
|
|
|
struct channel_cmd_reload_feeding_request {
|
|
|
|
struct channel_feeding_request cfr;
|
|
|
|
struct proto_reload_request *prr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct channel_cmd_reload_import_request {
|
|
|
|
struct channel_import_request cir;
|
|
|
|
struct proto_reload_request *prr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_reload_out_done(struct channel_feeding_request *cfr)
|
|
|
|
{
|
|
|
|
struct channel_cmd_reload_feeding_request *ccrfr = SKIP_BACK(struct channel_cmd_reload_feeding_request, cfr, cfr);
|
|
|
|
if (atomic_fetch_sub_explicit(&ccrfr->prr->counter, 1, memory_order_acq_rel) == 1)
|
|
|
|
ev_send_loop(&main_birdloop, &ccrfr->prr->ev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
channel_reload_in_done(struct channel_import_request *cir)
|
|
|
|
{
|
|
|
|
struct channel_cmd_reload_import_request *ccrir = SKIP_BACK(struct channel_cmd_reload_import_request, cir, cir);
|
|
|
|
if (atomic_fetch_sub_explicit(&ccrir->prr->counter, 1, memory_order_acq_rel) == 1)
|
|
|
|
ev_send_loop(&main_birdloop, &ccrir->prr->ev);
|
|
|
|
}
|
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
void
|
2023-11-02 13:33:00 +00:00
|
|
|
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
2023-11-02 13:33:00 +00:00
|
|
|
struct proto_reload_request *prr = (void *) _prr;
|
2016-01-26 10:48:58 +00:00
|
|
|
struct channel *c;
|
2010-02-19 23:03:31 +00:00
|
|
|
if (p->disabled)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
cli_msg(-8, "%s: already disabled", p->name);
|
|
|
|
return;
|
|
|
|
}
|
2010-02-19 23:03:31 +00:00
|
|
|
|
|
|
|
/* If the protocol in not UP, it has no routes */
|
|
|
|
if (p->proto_state != PS_UP)
|
|
|
|
return;
|
|
|
|
|
2016-01-26 10:48:58 +00:00
|
|
|
/* All channels must support reload */
|
2023-11-02 13:33:00 +00:00
|
|
|
if (prr->dir != CMD_RELOAD_OUT)
|
2016-01-26 10:48:58 +00:00
|
|
|
WALK_LIST(c, p->channels)
|
2019-08-13 16:57:40 +00:00
|
|
|
if ((c->channel_state == CS_UP) && !channel_reloadable(c))
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
cli_msg(-8006, "%s: reload failed", p->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
log(L_INFO "Reloading protocol %s", p->name);
|
|
|
|
|
|
|
|
/* re-importing routes */
|
2023-11-02 13:33:00 +00:00
|
|
|
if (prr->dir != CMD_RELOAD_OUT)
|
2016-01-26 10:48:58 +00:00
|
|
|
WALK_LIST(c, p->channels)
|
2019-08-13 16:57:40 +00:00
|
|
|
if (c->channel_state == CS_UP)
|
2023-11-02 13:33:00 +00:00
|
|
|
{
|
|
|
|
if (prr->trie)
|
|
|
|
{
|
|
|
|
/* Increase the refeed counter */
|
|
|
|
atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
|
|
|
|
ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
|
|
|
|
|
|
|
|
struct channel_cmd_reload_import_request *req = lp_alloc(prr->trie->lp, sizeof *req);
|
|
|
|
*req = (struct channel_cmd_reload_import_request) {
|
|
|
|
.cir = {
|
|
|
|
.done = channel_reload_in_done,
|
|
|
|
.trie = prr->trie,
|
|
|
|
},
|
|
|
|
.prr = prr,
|
|
|
|
};
|
|
|
|
channel_request_partial_reload(c, &req->cir);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
channel_request_reload(c);
|
|
|
|
}
|
2012-04-15 13:28:29 +00:00
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
/* re-exporting routes */
|
2023-11-02 13:33:00 +00:00
|
|
|
if (prr->dir != CMD_RELOAD_IN)
|
2016-01-26 10:48:58 +00:00
|
|
|
WALK_LIST(c, p->channels)
|
2023-11-02 13:33:00 +00:00
|
|
|
if ((c->channel_state == CS_UP) && (c->out_req.hook))
|
|
|
|
if (prr->trie)
|
|
|
|
{
|
|
|
|
/* Increase the refeed counter */
|
|
|
|
atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
|
|
|
|
ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
|
|
|
|
|
|
|
|
/* Request actually the feeding */
|
|
|
|
|
|
|
|
struct channel_cmd_reload_feeding_request *req = lp_alloc(prr->trie->lp, sizeof *req);
|
|
|
|
*req = (struct channel_cmd_reload_feeding_request) {
|
|
|
|
.cfr = {
|
|
|
|
.type = CFRT_AUXILIARY,
|
|
|
|
.done = channel_reload_out_done,
|
|
|
|
.trie = prr->trie,
|
|
|
|
},
|
|
|
|
.prr = prr,
|
|
|
|
};
|
|
|
|
|
|
|
|
channel_request_feeding(c, &req->cfr);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
|
2010-02-19 23:03:31 +00:00
|
|
|
|
|
|
|
cli_msg(-15, "%s: reloading", p->name);
|
|
|
|
}
|
|
|
|
|
2021-03-16 19:10:00 +00:00
|
|
|
extern void pipe_update_debug(struct proto *P);
|
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
void
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_cmd_debug(struct proto *p, uintptr_t mask, int cnt UNUSED)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
|
|
|
p->debug = mask;
|
2021-03-16 19:10:00 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PIPE
|
|
|
|
if (p->proto == &proto_pipe)
|
|
|
|
pipe_update_debug(p);
|
|
|
|
#endif
|
2010-02-19 23:03:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_cmd_mrtdump(struct proto *p, uintptr_t mask, int cnt UNUSED)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
|
|
|
p->mrtdump = mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-04-08 20:25:15 +00:00
|
|
|
proto_apply_cmd_symbol(const struct symbol *s, void (* cmd)(struct proto *, uintptr_t, int), uintptr_t arg)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
|
|
|
if (s->class != SYM_PROTO)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
cli_msg(9002, "%s is not a protocol", s->name);
|
|
|
|
return;
|
|
|
|
}
|
2010-02-19 23:03:31 +00:00
|
|
|
|
2021-09-08 15:30:09 +00:00
|
|
|
if (s->proto->proto)
|
|
|
|
{
|
2021-06-19 18:50:18 +00:00
|
|
|
struct proto *p = s->proto->proto;
|
|
|
|
PROTO_LOCKED_FROM_MAIN(p)
|
|
|
|
cmd(p, arg, 0);
|
2021-09-08 15:30:09 +00:00
|
|
|
cli_msg(0, "");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
cli_msg(9002, "%s does not exist", s->name);
|
1999-11-25 15:35:30 +00:00
|
|
|
}
|
1999-12-03 11:40:45 +00:00
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
static void
|
2020-04-08 20:25:15 +00:00
|
|
|
proto_apply_cmd_patt(const char *patt, void (* cmd)(struct proto *, uintptr_t, int), uintptr_t arg)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
|
|
|
int cnt = 0;
|
|
|
|
|
2023-04-19 19:18:12 +00:00
|
|
|
WALK_TLIST(proto, p, &global_proto_list)
|
2016-01-26 10:48:58 +00:00
|
|
|
if (!patt || patmatch(patt, p->name))
|
2021-06-19 18:50:18 +00:00
|
|
|
PROTO_LOCKED_FROM_MAIN(p)
|
|
|
|
cmd(p, arg, cnt++);
|
2010-02-19 23:03:31 +00:00
|
|
|
|
|
|
|
if (!cnt)
|
|
|
|
cli_msg(8003, "No protocols match");
|
|
|
|
else
|
|
|
|
cli_msg(0, "");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-09-19 17:55:37 +00:00
|
|
|
proto_apply_cmd(struct proto_spec ps, void (* cmd)(struct proto *, uintptr_t, int),
|
|
|
|
int restricted, uintptr_t arg)
|
2010-02-19 23:03:31 +00:00
|
|
|
{
|
2010-02-21 08:57:26 +00:00
|
|
|
if (restricted && cli_access_restricted())
|
|
|
|
return;
|
|
|
|
|
2010-02-19 23:03:31 +00:00
|
|
|
if (ps.patt)
|
|
|
|
proto_apply_cmd_patt(ps.ptr, cmd, arg);
|
|
|
|
else
|
|
|
|
proto_apply_cmd_symbol(ps.ptr, cmd, arg);
|
|
|
|
}
|
|
|
|
|
1999-12-03 11:40:45 +00:00
|
|
|
struct proto *
|
|
|
|
proto_get_named(struct symbol *sym, struct protocol *pr)
|
|
|
|
{
|
2023-04-19 19:18:12 +00:00
|
|
|
struct proto *p;
|
1999-12-03 11:40:45 +00:00
|
|
|
|
|
|
|
if (sym)
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
if (sym->class != SYM_PROTO)
|
|
|
|
cf_error("%s: Not a protocol", sym->name);
|
|
|
|
|
2019-02-15 12:53:17 +00:00
|
|
|
p = sym->proto->proto;
|
2016-01-26 10:48:58 +00:00
|
|
|
if (!p || p->proto != pr)
|
|
|
|
cf_error("%s: Not a %s protocol", sym->name, pr->name);
|
|
|
|
}
|
1999-12-03 11:40:45 +00:00
|
|
|
else
|
2016-01-26 10:48:58 +00:00
|
|
|
{
|
|
|
|
p = NULL;
|
2023-04-19 19:18:12 +00:00
|
|
|
WALK_TLIST(proto, q, &global_proto_list)
|
2016-01-26 10:48:58 +00:00
|
|
|
if ((q->proto == pr) && (q->proto_state != PS_DOWN))
|
|
|
|
{
|
|
|
|
if (p)
|
|
|
|
cf_error("There are multiple %s protocols running", pr->name);
|
|
|
|
p = q;
|
|
|
|
}
|
|
|
|
if (!p)
|
|
|
|
cf_error("There is no %s protocol running", pr->name);
|
|
|
|
}
|
|
|
|
|
1999-12-03 11:40:45 +00:00
|
|
|
return p;
|
|
|
|
}
|
2020-05-14 01:48:17 +00:00
|
|
|
|
|
|
|
struct proto *
|
|
|
|
proto_iterate_named(struct symbol *sym, struct protocol *proto, struct proto *old)
|
|
|
|
{
|
|
|
|
if (sym)
|
|
|
|
{
|
|
|
|
/* Just the first pass */
|
|
|
|
if (old)
|
|
|
|
{
|
|
|
|
cli_msg(0, "");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sym->class != SYM_PROTO)
|
|
|
|
cf_error("%s: Not a protocol", sym->name);
|
|
|
|
|
|
|
|
struct proto *p = sym->proto->proto;
|
|
|
|
if (!p || (p->proto != proto))
|
|
|
|
cf_error("%s: Not a %s protocol", sym->name, proto->name);
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-04-19 19:18:12 +00:00
|
|
|
for (struct proto *p = old ? old->n.next : global_proto_list.first;
|
|
|
|
p;
|
|
|
|
p = p->n.next)
|
2020-05-14 01:48:17 +00:00
|
|
|
{
|
|
|
|
if ((p->proto == proto) && (p->proto_state != PS_DOWN))
|
|
|
|
{
|
|
|
|
cli_separator(this_cli);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not found anything during first pass */
|
|
|
|
if (!old)
|
|
|
|
cf_error("There is no %s protocol running", proto->name);
|
|
|
|
|
|
|
|
/* No more items */
|
|
|
|
cli_msg(0, "");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|