1998-05-20 11:54:33 +00:00
/*
* BIRD - - Protocols
*
2000-01-16 16:44:50 +00:00
* ( c ) 1998 - - 2000 Martin Mares < mj @ ucw . cz >
1998-05-20 11:54:33 +00:00
*
* Can be freely distributed and used under the terms of the GNU GPL .
*/
2000-03-12 21:01:38 +00:00
# undef LOCAL_DEBUG
1998-06-03 08:38:53 +00:00
1998-05-20 11:54:33 +00:00
# include "nest/bird.h"
# include "nest/protocol.h"
# include "lib/resource.h"
# include "lib/lists.h"
1999-02-11 22:59:06 +00:00
# include "lib/event.h"
2017-12-07 17:35:46 +00:00
# include "lib/timer.h"
2000-01-16 23:30:06 +00:00
# include "lib/string.h"
1998-11-27 19:35:10 +00:00
# include "conf/conf.h"
2023-10-29 15:25:01 +00:00
# include "nest/route.h"
1998-10-17 11:05:18 +00:00
# include "nest/iface.h"
2022-09-14 23:38:18 +00:00
# include "nest/mpls.h"
1999-11-25 15:35:30 +00:00
# include "nest/cli.h"
1999-03-17 14:31:26 +00:00
# include "filter/filter.h"
2021-02-07 18:21:42 +00:00
# include "filter/f-inst.h"
2024-06-18 13:33:07 +00:00
# include "proto/bgp/bgp.h"
1998-05-20 11:54:33 +00:00
2010-06-02 20:20:40 +00:00
pool * proto_pool ;
2023-04-19 19:18:12 +00:00
static TLIST_LIST ( proto ) global_proto_list ;
1999-02-11 22:59:06 +00:00
2022-03-18 21:05:50 +00:00
static list STATIC_LIST_INIT ( protocol_list ) ;
1999-02-11 22:59:06 +00:00
2024-05-31 10:27:59 +00:00
struct lfjour * proto_journal ;
2024-07-04 11:13:38 +00:00
struct lfjour * channel_journal ;
2024-05-31 10:27:59 +00:00
DOMAIN ( rtable ) proto_journal_domain ;
2020-12-07 21:19:40 +00:00
# define CD(c, msg, args...) ({ if (c->debug & D_STATES) log(L_TRACE "%s.%s: " msg, c->proto->name, c->name ?: "?", ## args); })
# define PD(p, msg, args...) ({ if (p->debug & D_STATES) log(L_TRACE "%s: " msg, p->name, ## args); })
2000-03-07 21:50:21 +00:00
2014-03-20 13:07:12 +00:00
static timer * gr_wait_timer ;
# define GRS_NONE 0
# define GRS_INIT 1
# define GRS_ACTIVE 2
# define GRS_DONE 3
static int graceful_restart_state ;
static u32 graceful_restart_locks ;
1999-02-11 22:59:06 +00:00
static char * p_states [ ] = { " DOWN " , " START " , " UP " , " STOP " } ;
2021-06-21 15:07:31 +00:00
static char * c_states [ ] = { " DOWN " , " START " , " UP " , " STOP " , " RESTART " } ;
1999-02-11 22:59:06 +00:00
2016-01-26 10:48:58 +00:00
extern struct protocol proto_unix_iface ;
2024-07-04 11:13:38 +00:00
struct proto_attrs * proto_state_table ;
2024-08-30 10:30:13 +00:00
//struct proto_attrs *channel_state_table;
2016-01-26 10:48:58 +00:00
2000-01-16 16:44:50 +00:00
static void proto_rethink_goal ( struct proto * p ) ;
2000-03-07 21:50:21 +00:00
static char * proto_state_name ( struct proto * p ) ;
2021-11-06 19:34:16 +00:00
static void channel_init_limit ( struct channel * c , struct limit * l , int dir , struct channel_limit * cf ) ;
static void channel_update_limit ( struct channel * c , struct limit * l , int dir , struct channel_limit * cf ) ;
static void channel_reset_limit ( struct channel * c , struct limit * l , int dir ) ;
2023-03-07 22:22:03 +00:00
static void channel_stop_export ( struct channel * c ) ;
2022-09-01 08:39:56 +00:00
static void channel_check_stopped ( struct channel * c ) ;
2024-07-04 11:13:38 +00:00
void init_journals ( void ) ;
2024-07-26 15:24:15 +00:00
void add_journal_channel ( struct channel * ch ) ;
2024-05-02 09:39:34 +00:00
static inline void channel_reimport ( struct channel * c , struct rt_feeding_request * rfr )
{
rt_export_refeed ( & c - > reimporter , rfr ) ;
ev_send ( proto_event_list ( c - > proto ) , & c - > reimport_event ) ;
}
static inline void channel_refeed ( struct channel * c , struct rt_feeding_request * rfr )
{
rt_export_refeed ( & c - > out_req , rfr ) ;
}
1999-02-13 19:15:28 +00:00
2024-05-31 10:27:59 +00:00
void init_proto_journal ( void ) ;
2016-01-26 10:48:58 +00:00
static inline int proto_is_done ( struct proto * p )
2021-06-19 18:50:18 +00:00
{ return ( p - > proto_state = = PS_DOWN ) & & proto_is_inactive ( p ) ; }
2016-01-26 10:48:58 +00:00
static inline int channel_is_active ( struct channel * c )
2021-06-21 15:07:31 +00:00
{ return ( c - > channel_state ! = CS_DOWN ) ; }
2014-03-24 11:32:12 +00:00
2021-03-08 19:45:22 +00:00
static inline int channel_reloadable ( struct channel * c )
2024-03-27 10:34:19 +00:00
{
return c - > reloadable & & c - > proto - > reload_routes
| | ( ( c - > in_keep & RIK_PREFILTER ) = = RIK_PREFILTER ) ;
}
2021-03-08 19:45:22 +00:00
2020-12-07 21:19:40 +00:00
static inline void
channel_log_state_change ( struct channel * c )
{
2021-06-21 15:07:31 +00:00
CD ( c , " State changed to %s " , c_states [ c - > channel_state ] ) ;
}
2024-05-02 09:39:34 +00:00
static void
2021-06-21 15:07:31 +00:00
channel_import_log_state_change ( struct rt_import_request * req , u8 state )
{
2024-04-26 10:14:33 +00:00
SKIP_BACK_DECLARE ( struct channel , c , in_req , req ) ;
2021-06-21 15:07:31 +00:00
CD ( c , " Channel import state changed to %s " , rt_import_state_name ( state ) ) ;
}
2024-05-02 09:39:34 +00:00
static void
channel_export_fed ( struct rt_export_request * req )
2021-06-21 15:07:31 +00:00
{
2024-04-26 10:14:33 +00:00
SKIP_BACK_DECLARE ( struct channel , c , out_req , req ) ;
2021-06-21 15:07:31 +00:00
2024-05-02 09:39:34 +00:00
struct limit * l = & c - > out_limit ;
if ( ( c - > limit_active & ( 1 < < PLD_OUT ) ) & & ( l - > count < = l - > max ) )
2021-06-21 15:07:31 +00:00
{
2024-05-02 09:39:34 +00:00
c - > limit_active & = ~ ( 1 < < PLD_OUT ) ;
channel_request_full_refeed ( c ) ;
2021-06-21 15:07:31 +00:00
}
2024-05-02 09:39:34 +00:00
else
CALL ( c - > proto - > export_fed , c ) ;
2021-06-21 15:07:31 +00:00
}
2023-09-29 14:24:50 +00:00
void
2024-05-02 09:39:34 +00:00
channel_request_full_refeed ( struct channel * c )
2023-09-29 14:24:50 +00:00
{
2024-05-02 09:39:34 +00:00
rt_export_refeed ( & c - > out_req , NULL ) ;
2023-09-29 14:24:50 +00:00
}
2021-06-21 15:07:31 +00:00
static void
channel_dump_import_req ( struct rt_import_request * req )
{
2024-04-26 10:14:33 +00:00
SKIP_BACK_DECLARE ( struct channel , c , in_req , req ) ;
2021-06-21 15:07:31 +00:00
debug ( " Channel %s.%s import request %p \n " , c - > proto - > name , c - > name , req ) ;
}
static void
channel_dump_export_req ( struct rt_export_request * req )
{
2024-04-26 10:14:33 +00:00
SKIP_BACK_DECLARE ( struct channel , c , out_req , req ) ;
2021-06-21 15:07:31 +00:00
debug ( " Channel %s.%s export request %p \n " , c - > proto - > name , c - > name , req ) ;
2020-12-07 21:19:40 +00:00
}
2023-09-29 14:24:50 +00:00
2014-03-24 11:32:12 +00:00
static void
proto_log_state_change ( struct proto * p )
{
if ( p - > debug & D_STATES )
2016-01-26 10:48:58 +00:00
{
char * name = proto_state_name ( p ) ;
if ( name ! = p - > last_state_name_announced )
2014-03-24 11:32:12 +00:00
{
2016-01-26 10:48:58 +00:00
p - > last_state_name_announced = name ;
PD ( p , " State changed to %s " , proto_state_name ( p ) ) ;
2014-03-24 11:32:12 +00:00
}
2016-01-26 10:48:58 +00:00
}
2014-03-24 11:32:12 +00:00
else
p - > last_state_name_announced = NULL ;
1999-02-11 22:59:06 +00:00
}
1998-05-20 11:54:33 +00:00
2016-01-26 10:48:58 +00:00
struct channel_config *
proto_cf_find_channel ( struct proto_config * pc , uint net_type )
1998-06-03 08:38:53 +00:00
{
2016-01-26 10:48:58 +00:00
struct channel_config * cc ;
WALK_LIST ( cc , pc - > channels )
if ( cc - > net_type = = net_type )
return cc ;
return NULL ;
1998-06-03 08:38:53 +00:00
}
2016-01-26 10:48:58 +00:00
/**
* proto_find_channel_by_table - find channel connected to a routing table
* @ p : protocol instance
* @ t : routing table
*
* Returns pointer to channel or NULL
*/
struct channel *
2022-09-07 11:54:20 +00:00
proto_find_channel_by_table ( struct proto * p , rtable * t )
1999-02-13 19:15:28 +00:00
{
2016-01-26 10:48:58 +00:00
struct channel * c ;
2012-03-15 10:58:08 +00:00
2016-01-26 10:48:58 +00:00
WALK_LIST ( c , p - > channels )
if ( c - > table = = t )
return c ;
2014-03-20 13:07:12 +00:00
2016-01-26 10:48:58 +00:00
return NULL ;
1999-02-13 19:15:28 +00:00
}
2017-04-25 17:02:31 +00:00
/**
* proto_find_channel_by_name - find channel by its name
* @ p : protocol instance
* @ n : channel name
*
* Returns pointer to channel or NULL
*/
struct channel *
proto_find_channel_by_name ( struct proto * p , const char * n )
{
struct channel * c ;
WALK_LIST ( c , p - > channels )
if ( ! strcmp ( c - > name , n ) )
return c ;
return NULL ;
}
2000-06-02 13:42:36 +00:00
/**
2016-01-26 10:48:58 +00:00
* proto_add_channel - connect protocol to a routing table
2000-06-02 13:42:36 +00:00
* @ p : protocol instance
2016-01-26 10:48:58 +00:00
* @ cf : channel configuration
2000-06-02 13:42:36 +00:00
*
2016-01-26 10:48:58 +00:00
* This function creates a channel between the protocol instance @ p and the
* routing table specified in the configuration @ cf , making the protocol hear
* all changes in the table and allowing the protocol to update routes in the
* table .
2012-03-15 10:58:08 +00:00
*
2016-01-26 10:48:58 +00:00
* The channel is linked in the protocol channel list and when active also in
* the table channel list . Channels are allocated from the global resource pool
* ( @ proto_pool ) and they are automatically freed when the protocol is removed .
2000-06-02 13:42:36 +00:00
*/
2016-01-26 10:48:58 +00:00
struct channel *
proto_add_channel ( struct proto * p , struct channel_config * cf )
{
2023-09-14 13:21:53 +00:00
struct channel * c = mb_allocz ( proto_pool , cf - > class - > channel_size ) ;
2024-08-27 13:02:38 +00:00
log ( " add channel to protocol %s id %i channel %x " , p - > name , p - > id , c ) ;
2016-01-26 10:48:58 +00:00
c - > name = cf - > name ;
2023-09-14 13:21:53 +00:00
c - > class = cf - > class ;
2016-01-26 10:48:58 +00:00
c - > proto = p ;
c - > table = cf - > table - > table ;
2021-06-21 15:07:31 +00:00
rt_lock_table ( c - > table ) ;
2016-01-26 10:48:58 +00:00
c - > in_filter = cf - > in_filter ;
c - > out_filter = cf - > out_filter ;
2022-06-22 10:45:42 +00:00
c - > out_subprefix = cf - > out_subprefix ;
2021-11-06 19:34:16 +00:00
channel_init_limit ( c , & c - > rx_limit , PLD_RX , & cf - > rx_limit ) ;
channel_init_limit ( c , & c - > in_limit , PLD_IN , & cf - > in_limit ) ;
channel_init_limit ( c , & c - > out_limit , PLD_OUT , & cf - > out_limit ) ;
2016-01-26 10:48:58 +00:00
c - > net_type = cf - > net_type ;
c - > ra_mode = cf - > ra_mode ;
c - > preference = cf - > preference ;
2020-12-07 21:19:40 +00:00
c - > debug = cf - > debug ;
2016-01-26 10:48:58 +00:00
c - > merge_limit = cf - > merge_limit ;
2022-06-16 21:24:56 +00:00
c - > in_keep = cf - > in_keep ;
2021-02-12 04:05:18 +00:00
c - > rpki_reload = cf - > rpki_reload ;
2016-01-26 10:48:58 +00:00
c - > channel_state = CS_DOWN ;
2017-06-06 14:47:30 +00:00
c - > last_state_change = current_time ( ) ;
2016-01-26 10:48:58 +00:00
c - > reloadable = 1 ;
2024-07-26 15:24:15 +00:00
c - > id = hmap_first_zero ( proto_state_table - > channel_id_maker ) ;
hmap_set ( proto_state_table - > channel_id_maker , c - > id ) ;
2016-01-26 10:48:58 +00:00
2021-02-10 02:09:57 +00:00
init_list ( & c - > roa_subscriptions ) ;
2023-09-14 13:21:53 +00:00
CALL ( c - > class - > init , c , cf ) ;
2016-01-26 10:48:58 +00:00
add_tail ( & p - > channels , & c - > n ) ;
2020-12-07 21:19:40 +00:00
CD ( c , " Connected to table %s " , c - > table - > name ) ;
2024-07-26 15:24:15 +00:00
add_journal_channel ( c ) ;
2016-01-26 10:48:58 +00:00
return c ;
}
2024-07-04 11:13:38 +00:00
struct channel *
proto_add_main_channel ( struct proto * p , struct channel_config * cf )
{
2024-08-27 13:02:38 +00:00
//log("before proto add chann proto_add_main_channel");
2024-07-04 11:13:38 +00:00
p - > main_channel = proto_add_channel ( p , cf ) ;
2024-08-27 13:02:38 +00:00
log ( " after proto add chann proto_add_main_channel %x proto id %i, chann id %i " , p - > main_channel , p - > id , p - > main_channel - > id ) ;
2024-07-04 11:13:38 +00:00
ea_list * eal = proto_state_table - > attrs [ p - > id ] ;
ea_set_attr ( & eal , EA_LITERAL_STORE_STRING ( & ea_table , 0 , p - > main_channel - > table - > name ) ) ;
2024-08-27 13:02:38 +00:00
//log("adding main table ea_rtable added eattrs %i", eal);
2024-07-26 15:24:15 +00:00
proto_journal_state_push ( eal , p , 1 ) ;
2024-07-04 11:13:38 +00:00
return p - > main_channel ;
}
2016-01-26 10:48:58 +00:00
void
2020-12-07 21:19:40 +00:00
proto_remove_channel ( struct proto * p UNUSED , struct channel * c )
2016-01-26 10:48:58 +00:00
{
ASSERT ( c - > channel_state = = CS_DOWN ) ;
2020-12-07 21:19:40 +00:00
CD ( c , " Removed " , c - > name ) ;
2016-01-26 10:48:58 +00:00
2024-08-27 13:02:38 +00:00
//log("proto_remove_channel get channel %i proto %i got %x", c->id, c->proto->id, get_channel_ea(c));
2024-07-26 15:24:15 +00:00
ea_list * eal = get_channel_ea ( c ) - > attrs ;
2024-07-04 11:13:38 +00:00
ea_set_attr ( & eal , EA_LITERAL_EMBEDDED ( & ea_deleted , 0 , 1 ) ) ;
channel_journal_state_push ( eal , c ) ;
2021-06-21 15:07:31 +00:00
rt_unlock_table ( c - > table ) ;
2016-01-26 10:48:58 +00:00
rem_node ( & c - > n ) ;
mb_free ( c ) ;
}
static void
proto_start_channels ( struct proto * p )
{
struct channel * c ;
WALK_LIST ( c , p - > channels )
if ( ! c - > disabled )
channel_set_state ( c , CS_UP ) ;
}
static void
proto_pause_channels ( struct proto * p )
1999-05-17 20:14:52 +00:00
{
2016-01-26 10:48:58 +00:00
struct channel * c ;
WALK_LIST ( c , p - > channels )
if ( ! c - > disabled & & channel_is_active ( c ) )
2021-06-21 15:07:31 +00:00
channel_set_state ( c , CS_PAUSE ) ;
2016-01-26 10:48:58 +00:00
}
1999-05-17 20:14:52 +00:00
2016-01-26 10:48:58 +00:00
static void
proto_stop_channels ( struct proto * p )
{
struct channel * c ;
WALK_LIST ( c , p - > channels )
if ( ! c - > disabled & & channel_is_active ( c ) )
2021-06-21 15:07:31 +00:00
channel_set_state ( c , CS_STOP ) ;
2016-01-26 10:48:58 +00:00
}
2012-03-15 10:58:08 +00:00
2016-01-26 10:48:58 +00:00
static void
proto_remove_channels ( struct proto * p )
{
struct channel * c ;
2024-08-27 13:02:38 +00:00
2016-01-26 10:48:58 +00:00
WALK_LIST_FIRST ( c , p - > channels )
proto_remove_channel ( p , c ) ;
}
2023-11-02 13:33:00 +00:00
/**
* # Automatic ROA reloads
*
* Route origin authorizations may ( and do ) change over time by updates via
* our RPKI protocols . This then manifests in ROA tables . As the roa_check ( )
* is always executed on a specific contents of ROA table in a specific moment
* of time , its value may switch after updates in the ROA table and therefore
* must be re - evaluated any time the result may have changed .
*
* To enable this mechanism , there are auxiliary tools integrated in BIRD
* to automatically re - evaluate all filters that may get a different outcome
* after ROA change .
*
* ROA Subscription Data Structure ( struct roa_subscription ) is the connector
* between the channel and the ROA table , keeping track about unprocessed
* changes and initiating the reloads . The modus operandi is as follows :
*
* Init 1. Check whether the filter uses ROA at all .
* Init 2. Request exports from the ROA table
* Init 3. Allocate a trie
*
* Export from ROA : This may affect all routes for prefixes matching the ROA
* prefix , disregarding its maxlen . Thus we mark these routes in the request ' s
* auxiliary trie . Then we ping the settle timer to wait a reasonable amount of
* time before actually requesting channel reload .
*
* Settle timer fires when nothing has pinged it for the ' min ' time , or ' max '
* time has elapsed since the first ping . It then :
*
* - requests partial channel import / export reload based on the trie
* - allocates a new trie
*
* As the import / export reload uses the auxiliary trie to prefilter prefixes ,
* the trie must be freed after the reload is done , which is ensured in the
* . done ( ) hook of the reimport / reexport request .
*
* # Channel export refeed
*
* The request , either by ROA or from CLI , is enqueued to the channel and an
* auxiliary export hook is requested from the table . This way , the ordinary
* updates can flow uninterrupted while refeed gets prefiltered by the given
* trie ( if given ) . When the auxiliary export hook finishes , the . done ( ) hook
* is then called for the requestor to do their cleanup .
*
* While refeeding , special care must be taken about route changes inside the
* table . For this , an auxiliary trie is allocated to keep track about already
* refed net , to avoid unnecessary multiple re - evaluation of filters .
*
* # Channel import reload from import table
*
* When the import table is on , the channel keeps the original version of the route
* in the table together with the actual version after filters , in a form of
* an additional layer of route attributes underneath the actual version . This makes
* it exceptionally simple to get the original version of the route directly
* from the table by an ordinary export which strips all the newer layers .
*
* Then , by processing all these auxiliary exports , the channel basically re - imports
* all the routes into the table back again , re - evaluating the filters and ROA checks .
*
* # Channel import reload from protocols
*
* When the import table is off , the protocol gets the reimport request directly
* via the . reload_routes ( ) hook and must do its internal route reload instead .
* The protocol may not support it and in such case , this function returns 0
* indicating that no partial reload is going to happen . It ' s then on the
* developer ' s or user ' s discretion to run a full reload instead .
*
* # Caveats , FIXME ' s , TODO ' s and other kinds of hell
*
* The partial reexport uses a trie to track state for single prefixes . This
* may do crazy things if a partial reload was to be performed on any other
* table than plain IPv6 or IPv4 . Network types like VPNv6 or Flowspec may
* cause some crashes . This is currently not checked anywhere .
*
* Anyway , we decided to split the table FIB structure to carry only a mapping
* between a prefix and a locally - unique ID , and after this update is done
* ( probably also in v2 ) , the tracking tries may be easily replaced by
* bitfields , therefore fixing this bug .
*
* We also probably didn ' t do a proper analysis of the implemented algorithm
* for reexports , so if there is somebody willing to formally prove that we
* both won ' t miss any update and won ' t reexport more than needed , you ' re welcome
* to submit such a proof .
*
* We wish you a pleasant reading , analyzing and bugfixing experience .
*
* Kata , Maria and the BIRD Team
*/
2022-09-01 08:39:56 +00:00
struct roa_subscription {
node roa_node ;
struct channel * c ;
2024-02-08 13:34:54 +00:00
rtable * tab ;
2024-05-02 09:39:34 +00:00
void ( * refeed_hook ) ( struct channel * , struct rt_feeding_request * ) ;
2024-06-03 12:23:41 +00:00
struct lfjour_recipient digest_recipient ;
event update_event ;
2024-06-20 09:58:23 +00:00
} ;
struct roa_reload_request {
struct rt_feeding_request req ;
struct roa_subscription * s ;
struct lfjour_item * item ;
2022-09-01 08:39:56 +00:00
} ;
2023-11-02 13:33:00 +00:00
static void
2024-05-02 09:39:34 +00:00
channel_roa_reload_done ( struct rt_feeding_request * req )
2023-11-02 13:33:00 +00:00
{
2024-06-20 09:58:23 +00:00
SKIP_BACK_DECLARE ( struct roa_reload_request , rrr , req , req ) ;
ASSERT_DIE ( rrr - > s - > c - > channel_state = = CS_UP ) ;
2024-06-06 12:04:06 +00:00
2024-06-20 09:58:23 +00:00
lfjour_release ( & rrr - > s - > digest_recipient , rrr - > item ) ;
ev_send ( proto_work_list ( rrr - > s - > c - > proto ) , & rrr - > s - > update_event ) ;
mb_free ( rrr ) ;
2024-05-02 09:39:34 +00:00
/* FIXME: this should reset import/export filters if ACTION BLOCK */
2023-11-02 13:33:00 +00:00
}
2021-02-10 02:09:57 +00:00
static void
2024-06-03 12:23:41 +00:00
channel_roa_changed ( void * _s )
2021-02-10 02:09:57 +00:00
{
2024-06-03 12:23:41 +00:00
struct roa_subscription * s = _s ;
2022-09-01 08:39:56 +00:00
2024-06-26 13:13:12 +00:00
u64 first_seq = 0 , last_seq = 0 ;
uint count = 0 ;
2024-06-20 09:58:23 +00:00
for ( struct lfjour_item * it ; it = lfjour_get ( & s - > digest_recipient ) ; )
{
SKIP_BACK_DECLARE ( struct rt_digest , rd , li , s - > digest_recipient . cur ) ;
struct roa_reload_request * rrr = mb_alloc ( s - > c - > proto - > pool , sizeof * rrr ) ;
* rrr = ( struct roa_reload_request ) {
. req = {
. prefilter = {
. mode = TE_ADDR_TRIE ,
. trie = rd - > trie ,
} ,
. done = channel_roa_reload_done ,
} ,
. s = s ,
. item = it ,
} ;
2024-06-26 13:13:12 +00:00
if ( ! first_seq ) first_seq = it - > seq ;
last_seq = it - > seq ;
count + + ;
2024-06-20 09:58:23 +00:00
s - > refeed_hook ( s - > c , & rrr - > req ) ;
}
2024-06-26 13:13:12 +00:00
if ( s - > c - > debug & D_EVENTS )
if ( count )
log ( L_INFO " %s.%s: Requested %u automatic roa reloads, seq %lu to %lu " ,
s - > c - > proto - > name , s - > c - > name , count , first_seq , last_seq ) ;
else
log ( L_INFO " %s.%s: No roa reload requested " ,
s - > c - > proto - > name , s - > c - > name ) ;
2024-05-02 09:39:34 +00:00
}
static inline void ( * channel_roa_reload_hook ( int dir ) ) ( struct channel * , struct rt_feeding_request * )
{
return dir ? channel_reimport : channel_refeed ;
2022-09-01 08:39:56 +00:00
}
2021-02-10 02:09:57 +00:00
static int
channel_roa_is_subscribed ( struct channel * c , rtable * tab , int dir )
{
struct roa_subscription * s ;
node * n ;
WALK_LIST2 ( s , n , c - > roa_subscriptions , roa_node )
2024-05-02 09:39:34 +00:00
if ( ( tab = = s - > tab ) & & ( s - > refeed_hook = = channel_roa_reload_hook ( dir ) ) )
2021-02-10 02:09:57 +00:00
return 1 ;
return 0 ;
}
static void
channel_roa_subscribe ( struct channel * c , rtable * tab , int dir )
{
if ( channel_roa_is_subscribed ( c , tab , dir ) )
return ;
2024-06-07 17:41:04 +00:00
rtable * aux = tab - > config - > roa_aux_table - > table ;
2021-02-10 02:09:57 +00:00
2024-06-07 17:41:04 +00:00
struct roa_subscription * s = mb_allocz ( c - > proto - > pool , sizeof ( struct roa_subscription ) ) ;
2022-09-01 08:39:56 +00:00
* s = ( struct roa_subscription ) {
. c = c ,
2024-06-07 17:41:04 +00:00
. tab = aux ,
2024-06-03 12:23:41 +00:00
. refeed_hook = channel_roa_reload_hook ( dir ) ,
. digest_recipient = {
. target = proto_work_list ( c - > proto ) ,
. event = & s - > update_event ,
} ,
2024-05-02 09:39:34 +00:00
. update_event = {
2024-06-03 12:23:41 +00:00
. hook = channel_roa_changed ,
2024-05-02 09:39:34 +00:00
. data = s ,
} ,
2022-09-01 08:39:56 +00:00
} ;
2024-08-27 13:02:38 +00:00
log ( " roa %x " , s ) ;
2021-02-10 02:09:57 +00:00
add_tail ( & c - > roa_subscriptions , & s - > roa_node ) ;
2024-06-07 17:41:04 +00:00
RT_LOCK ( aux , t ) ;
2024-06-03 12:23:41 +00:00
rt_lock_table ( t ) ;
2024-06-07 17:41:04 +00:00
rt_setup_digestor ( t ) ;
lfjour_register ( & t - > export_digest - > digest , & s - > digest_recipient ) ;
2021-02-10 02:09:57 +00:00
}
static void
2024-05-02 09:39:34 +00:00
channel_roa_unsubscribe ( struct roa_subscription * s )
2021-02-10 02:09:57 +00:00
{
2022-09-01 08:39:56 +00:00
struct channel * c = s - > c ;
2024-06-03 12:23:41 +00:00
RT_LOCKED ( s - > tab , t )
{
lfjour_unregister ( & s - > digest_recipient ) ;
rt_unlock_table ( t ) ;
}
2024-05-02 09:39:34 +00:00
2024-06-03 12:23:41 +00:00
ev_postpone ( & s - > update_event ) ;
2024-05-02 09:39:34 +00:00
2021-02-10 02:09:57 +00:00
rem_node ( & s - > roa_node ) ;
mb_free ( s ) ;
2024-06-03 12:23:41 +00:00
2022-09-01 08:39:56 +00:00
channel_check_stopped ( c ) ;
}
2021-02-10 02:09:57 +00:00
static void
channel_roa_subscribe_filter ( struct channel * c , int dir )
{
const struct filter * f = dir ? c - > in_filter : c - > out_filter ;
2022-09-07 11:54:20 +00:00
rtable * tab ;
2021-02-12 04:05:18 +00:00
int valid = 1 , found = 0 ;
2021-02-10 02:09:57 +00:00
if ( ( f = = FILTER_ACCEPT ) | | ( f = = FILTER_REJECT ) )
return ;
2021-03-08 19:45:22 +00:00
/* No automatic reload for non-reloadable channels */
if ( dir & & ! channel_reloadable ( c ) )
valid = 0 ;
2021-02-10 02:09:57 +00:00
struct filter_iterator fit ;
2023-07-02 13:10:39 +00:00
FILTER_ITERATE_INIT ( & fit , f - > root , c - > proto - > pool ) ;
2021-02-10 02:09:57 +00:00
FILTER_ITERATE ( & fit , fi )
{
switch ( fi - > fi_code )
{
2022-03-19 15:38:32 +00:00
case FI_ROA_CHECK :
tab = fi - > i_FI_ROA_CHECK . rtc - > table ;
2021-02-12 04:05:18 +00:00
if ( valid ) channel_roa_subscribe ( c , tab , dir ) ;
found = 1 ;
2021-02-10 02:09:57 +00:00
break ;
default :
break ;
}
}
FILTER_ITERATE_END ;
FILTER_ITERATE_CLEANUP ( & fit ) ;
2021-02-12 04:05:18 +00:00
if ( ! valid & & found )
log ( L_WARN " %s.%s: Automatic RPKI reload not active for %s " ,
c - > proto - > name , c - > name ? : " ? " , dir ? " import " : " export " ) ;
2021-02-10 02:09:57 +00:00
}
static void
channel_roa_unsubscribe_all ( struct channel * c )
{
struct roa_subscription * s ;
node * n , * x ;
WALK_LIST2_DELSAFE ( s , n , x , c - > roa_subscriptions , roa_node )
channel_roa_unsubscribe ( s ) ;
}
2016-01-26 10:48:58 +00:00
static void
2021-06-21 15:07:31 +00:00
channel_start_import ( struct channel * c )
2016-01-26 10:48:58 +00:00
{
2021-06-21 15:07:31 +00:00
if ( c - > in_req . hook )
{
log ( L_WARN " %s.%s: Attempted to start channel's already started import " , c - > proto - > name , c - > name ) ;
return ;
}
c - > in_req = ( struct rt_import_request ) {
2022-08-31 09:58:27 +00:00
. name = mb_sprintf ( c - > proto - > pool , " %s.%s " , c - > proto - > name , c - > name ) ,
2021-06-21 15:07:31 +00:00
. trace_routes = c - > debug | c - > proto - > debug ,
2024-02-29 13:04:05 +00:00
. loop = c - > proto - > loop ,
2021-06-21 15:07:31 +00:00
. dump_req = channel_dump_import_req ,
. log_state_change = channel_import_log_state_change ,
. preimport = channel_preimport ,
} ;
2016-01-26 10:48:58 +00:00
ASSERT ( c - > channel_state = = CS_UP ) ;
2021-06-21 15:07:31 +00:00
channel_reset_limit ( c , & c - > rx_limit , PLD_RX ) ;
channel_reset_limit ( c , & c - > in_limit , PLD_IN ) ;
2024-06-20 09:58:23 +00:00
bmap_init ( & c - > imported_map , c - > proto - > pool , 16 ) ;
2021-06-21 15:07:31 +00:00
memset ( & c - > import_stats , 0 , sizeof ( struct channel_import_stats ) ) ;
DBG ( " %s.%s: Channel start import req=%p \n " , c - > proto - > name , c - > name , & c - > in_req ) ;
rt_request_import ( c - > table , & c - > in_req ) ;
2016-01-26 10:48:58 +00:00
}
2024-05-02 09:39:34 +00:00
void channel_notify_basic ( void * ) ;
void channel_notify_accepted ( void * ) ;
void channel_notify_merged ( void * ) ;
2016-01-26 10:48:58 +00:00
static void
2021-06-21 15:07:31 +00:00
channel_start_export ( struct channel * c )
2016-01-26 10:48:58 +00:00
{
2024-05-02 09:39:34 +00:00
if ( rt_export_get_state ( & c - > out_req ) ! = TES_DOWN )
bug ( " %s.%s: Attempted to start channel's already started export " , c - > proto - > name , c - > name ) ;
2016-01-26 10:48:58 +00:00
2021-06-21 15:07:31 +00:00
ASSERT ( c - > channel_state = = CS_UP ) ;
2023-10-12 08:07:57 +00:00
pool * p = rp_newf ( c - > proto - > pool , c - > proto - > pool - > domain , " Channel %s.%s export " , c - > proto - > name , c - > name ) ;
2021-06-21 15:07:31 +00:00
c - > out_req = ( struct rt_export_request ) {
2023-10-12 08:07:57 +00:00
. name = mb_sprintf ( p , " %s.%s " , c - > proto - > name , c - > name ) ,
2024-05-02 09:39:34 +00:00
. r = {
. target = proto_work_list ( c - > proto ) ,
. event = & c - > out_event ,
} ,
2023-10-12 08:07:57 +00:00
. pool = p ,
2024-05-02 09:39:34 +00:00
. feeder . prefilter = {
2023-10-03 09:08:28 +00:00
. mode = c - > out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE ,
. addr = c - > out_subprefix ,
} ,
2021-06-21 15:07:31 +00:00
. trace_routes = c - > debug | c - > proto - > debug ,
2024-05-02 09:39:34 +00:00
. dump = channel_dump_export_req ,
. fed = channel_export_fed ,
} ;
c - > out_event = ( event ) {
. data = c ,
2021-06-21 15:07:31 +00:00
} ;
2024-05-02 09:39:34 +00:00
bmap_init ( & c - > export_accepted_map , p , 16 ) ;
bmap_init ( & c - > export_rejected_map , p , 16 ) ;
2021-11-06 19:34:16 +00:00
channel_reset_limit ( c , & c - > out_limit , PLD_OUT ) ;
2021-06-21 15:07:31 +00:00
memset ( & c - > export_stats , 0 , sizeof ( struct channel_export_stats ) ) ;
2024-05-02 09:39:34 +00:00
DBG ( " %s.%s: Channel start export req=%p \n " , c - > proto - > name , c - > name , & c - > out_req ) ;
2021-06-21 15:07:31 +00:00
switch ( c - > ra_mode ) {
case RA_OPTIMAL :
2024-05-02 09:39:34 +00:00
c - > out_event . hook = channel_notify_basic ;
rt_export_subscribe ( c - > table , best , & c - > out_req ) ;
2021-06-21 15:07:31 +00:00
break ;
case RA_ANY :
2024-05-02 09:39:34 +00:00
c - > out_event . hook = channel_notify_basic ;
rt_export_subscribe ( c - > table , all , & c - > out_req ) ;
2021-06-21 15:07:31 +00:00
break ;
case RA_ACCEPTED :
2024-05-02 09:39:34 +00:00
c - > out_event . hook = channel_notify_accepted ;
rt_export_subscribe ( c - > table , all , & c - > out_req ) ;
2021-06-21 15:07:31 +00:00
break ;
case RA_MERGED :
2024-05-02 09:39:34 +00:00
c - > out_event . hook = channel_notify_merged ;
rt_export_subscribe ( c - > table , all , & c - > out_req ) ;
2021-06-21 15:07:31 +00:00
break ;
default :
bug ( " Unknown route announcement mode " ) ;
}
2016-01-26 10:48:58 +00:00
}
2021-06-21 15:07:31 +00:00
static void
channel_check_stopped ( struct channel * c )
{
switch ( c - > channel_state )
{
case CS_STOP :
2024-05-02 09:39:34 +00:00
if ( c - > obstacles | | ! EMPTY_LIST ( c - > roa_subscriptions ) | | c - > in_req . hook )
2021-06-21 15:07:31 +00:00
return ;
2024-05-02 09:39:34 +00:00
ASSERT_DIE ( rt_export_get_state ( & c - > out_req ) = = TES_DOWN ) ;
ASSERT_DIE ( ! rt_export_feed_active ( & c - > reimporter ) ) ;
2021-06-21 15:07:31 +00:00
channel_set_state ( c , CS_DOWN ) ;
2023-04-02 17:34:22 +00:00
proto_send_event ( c - > proto , c - > proto - > event ) ;
2021-06-21 15:07:31 +00:00
break ;
case CS_PAUSE :
2024-05-02 09:39:34 +00:00
if ( c - > obstacles | | ! EMPTY_LIST ( c - > roa_subscriptions ) )
2021-06-21 15:07:31 +00:00
return ;
2024-05-02 09:39:34 +00:00
ASSERT_DIE ( rt_export_get_state ( & c - > out_req ) = = TES_DOWN ) ;
ASSERT_DIE ( ! rt_export_feed_active ( & c - > reimporter ) ) ;
2021-06-21 15:07:31 +00:00
channel_set_state ( c , CS_START ) ;
break ;
}
DBG ( " %s.%s: Channel requests/hooks stopped (in state %s) \n " , c - > proto - > name , c - > name , c_states [ c - > channel_state ] ) ;
}
2023-11-24 11:19:44 +00:00
void
channel_add_obstacle ( struct channel * c )
{
c - > obstacles + + ;
}
void
channel_del_obstacle ( struct channel * c )
{
if ( ! - - c - > obstacles )
channel_check_stopped ( c ) ;
}
2021-06-21 15:07:31 +00:00
void
channel_import_stopped ( struct rt_import_request * req )
{
2024-04-26 10:14:33 +00:00
SKIP_BACK_DECLARE ( struct channel , c , in_req , req ) ;
2021-06-21 15:07:31 +00:00
mb_free ( c - > in_req . name ) ;
c - > in_req . name = NULL ;
2024-06-20 09:58:23 +00:00
bmap_free ( & c - > imported_map ) ;
2021-06-21 15:07:31 +00:00
channel_check_stopped ( c ) ;
}
2024-06-20 09:58:23 +00:00
static u32
channel_reimport_next_feed_index ( struct rt_export_feeder * f , u32 try_this )
{
SKIP_BACK_DECLARE ( struct channel , c , reimporter , f ) ;
while ( ! bmap_test ( & c - > imported_map , try_this ) )
if ( ! ( try_this & ( try_this - 1 ) ) ) /* return every power of two to check for maximum */
return try_this ;
else
try_this + + ;
return try_this ;
}
2021-06-21 15:07:31 +00:00
static void
2024-05-02 09:39:34 +00:00
channel_do_reload ( void * _c )
2021-06-21 15:07:31 +00:00
{
2024-05-02 09:39:34 +00:00
struct channel * c = _c ;
2021-06-21 15:07:31 +00:00
2024-05-02 09:39:34 +00:00
RT_FEED_WALK ( & c - > reimporter , f )
2024-06-03 09:12:20 +00:00
{
2024-06-26 15:19:24 +00:00
bool seen = 0 ;
2024-06-03 09:12:20 +00:00
for ( uint i = 0 ; i < f - > count_routes ; i + + )
2024-05-02 09:39:34 +00:00
{
2024-06-03 09:12:20 +00:00
rte * r = & f - > block [ i ] ;
2021-06-21 15:07:31 +00:00
2024-06-03 09:12:20 +00:00
if ( r - > flags & REF_OBSOLETE )
break ;
2023-09-29 14:24:50 +00:00
2024-06-03 09:12:20 +00:00
if ( r - > sender = = c - > in_req . hook )
{
/* Strip the table-specific information */
rte new = rte_init_from ( r ) ;
2023-09-29 14:24:50 +00:00
2024-06-03 09:12:20 +00:00
/* Strip the later attribute layers */
new . attrs = ea_strip_to ( new . attrs , BIT32_ALL ( EALS_PREIMPORT ) ) ;
2023-09-29 14:24:50 +00:00
2024-06-03 09:12:20 +00:00
/* And reload the route */
rte_update ( c , r - > net , & new , new . src ) ;
2024-06-20 09:58:23 +00:00
seen = 1 ;
2024-05-02 09:39:34 +00:00
}
2023-09-29 14:24:50 +00:00
}
2024-06-03 09:12:20 +00:00
2024-06-20 09:58:23 +00:00
if ( ! seen )
bmap_clear ( & c - > imported_map , f - > ni - > index ) ;
2024-06-03 09:12:20 +00:00
/* Local data needed no more */
tmp_flush ( ) ;
MAYBE_DEFER_TASK ( proto_work_list ( c - > proto ) , & c - > reimport_event ,
" %s.%s reimport " , c - > proto - > name , c - > name ) ;
}
2022-06-16 21:24:56 +00:00
}
2018-09-27 20:57:55 +00:00
/* Called by protocol to activate in_table */
2023-03-08 12:44:18 +00:00
static void
2018-09-27 20:57:55 +00:00
channel_setup_in_table ( struct channel * c )
{
2024-05-02 09:39:34 +00:00
c - > reimporter = ( struct rt_export_feeder ) {
. name = mb_sprintf ( c - > proto - > pool , " %s.%s.reimport " , c - > proto - > name , c - > name ) ,
. trace_routes = c - > debug ,
2024-06-20 09:58:23 +00:00
. next_feed_index = channel_reimport_next_feed_index ,
2024-05-02 09:39:34 +00:00
} ;
c - > reimport_event = ( event ) {
. hook = channel_do_reload ,
. data = c ,
2022-06-16 21:24:56 +00:00
} ;
2024-05-02 09:39:34 +00:00
rt_feeder_subscribe ( & c - > table - > export_all , & c - > reimporter ) ;
2018-09-27 20:57:55 +00:00
}
2016-01-26 10:48:58 +00:00
static void
channel_do_start ( struct channel * c )
{
c - > proto - > active_channels + + ;
2023-03-08 12:44:18 +00:00
if ( ( c - > in_keep & RIK_PREFILTER ) = = RIK_PREFILTER )
channel_setup_in_table ( c ) ;
2023-09-14 13:21:53 +00:00
CALL ( c - > class - > start , c ) ;
2021-06-21 15:07:31 +00:00
channel_start_import ( c ) ;
2016-01-26 10:48:58 +00:00
}
2021-02-10 02:09:57 +00:00
static void
channel_do_up ( struct channel * c )
{
/* Register RPKI/ROA subscriptions */
2021-02-12 04:05:18 +00:00
if ( c - > rpki_reload )
{
channel_roa_subscribe_filter ( c , 1 ) ;
channel_roa_subscribe_filter ( c , 0 ) ;
}
2021-02-10 02:09:57 +00:00
}
2016-01-26 10:48:58 +00:00
static void
2021-06-21 15:07:31 +00:00
channel_do_pause ( struct channel * c )
{
2023-03-07 22:22:03 +00:00
/* Drop ROA subscriptions */
channel_roa_unsubscribe_all ( c ) ;
2021-06-21 15:07:31 +00:00
/* Stop export */
2023-03-07 22:22:03 +00:00
channel_stop_export ( c ) ;
2021-06-21 15:07:31 +00:00
}
static void
channel_do_stop ( struct channel * c )
2016-01-26 10:48:58 +00:00
{
2021-06-21 15:07:31 +00:00
/* Stop import */
if ( c - > in_req . hook )
rt_stop_import ( & c - > in_req , channel_import_stopped ) ;
2016-01-26 10:48:58 +00:00
2024-05-02 09:39:34 +00:00
/* Need to abort reimports as well */
rt_feeder_unsubscribe ( & c - > reimporter ) ;
2024-06-06 12:04:06 +00:00
ev_postpone ( & c - > reimport_event ) ;
2024-05-02 09:39:34 +00:00
2016-01-26 10:48:58 +00:00
c - > gr_wait = 0 ;
if ( c - > gr_lock )
channel_graceful_restart_unlock ( c ) ;
2023-09-14 13:21:53 +00:00
CALL ( c - > class - > shutdown , c ) ;
2019-12-10 17:18:02 +00:00
2016-01-26 10:48:58 +00:00
}
static void
channel_do_down ( struct channel * c )
{
2024-05-02 09:39:34 +00:00
ASSERT_DIE ( ! rt_export_feed_active ( & c - > reimporter ) ) ;
2018-09-27 20:57:55 +00:00
2016-01-26 10:48:58 +00:00
c - > proto - > active_channels - - ;
2021-06-21 15:07:31 +00:00
memset ( & c - > import_stats , 0 , sizeof ( struct channel_import_stats ) ) ;
memset ( & c - > export_stats , 0 , sizeof ( struct channel_export_stats ) ) ;
2016-01-26 10:48:58 +00:00
2019-08-13 16:22:07 +00:00
c - > out_table = NULL ;
2018-09-27 20:57:55 +00:00
2021-03-30 16:51:31 +00:00
/* The in_table and out_table are going to be freed by freeing their resource pools. */
2023-09-14 13:21:53 +00:00
CALL ( c - > class - > cleanup , c ) ;
2016-12-07 13:11:28 +00:00
2016-01-26 10:48:58 +00:00
/* Schedule protocol shutddown */
if ( proto_is_done ( c - > proto ) )
2023-04-02 17:34:22 +00:00
proto_send_event ( c - > proto , c - > proto - > event ) ;
2016-01-26 10:48:58 +00:00
}
void
channel_set_state ( struct channel * c , uint state )
{
uint cs = c - > channel_state ;
2016-05-12 14:04:47 +00:00
DBG ( " %s reporting channel %s state transition %s -> %s \n " , c - > proto - > name , c - > name , c_states [ cs ] , c_states [ state ] ) ;
2016-01-26 10:48:58 +00:00
if ( state = = cs )
return ;
c - > channel_state = state ;
2017-06-06 14:47:30 +00:00
c - > last_state_change = current_time ( ) ;
2016-01-26 10:48:58 +00:00
switch ( state )
{
case CS_START :
2021-06-21 15:07:31 +00:00
ASSERT ( cs = = CS_DOWN | | cs = = CS_PAUSE ) ;
2016-01-26 10:48:58 +00:00
if ( cs = = CS_DOWN )
channel_do_start ( c ) ;
break ;
case CS_UP :
ASSERT ( cs = = CS_DOWN | | cs = = CS_START ) ;
if ( cs = = CS_DOWN )
channel_do_start ( c ) ;
2016-02-01 09:25:31 +00:00
if ( ! c - > gr_wait & & c - > proto - > rt_notify )
2016-01-26 10:48:58 +00:00
channel_start_export ( c ) ;
2021-02-10 02:09:57 +00:00
channel_do_up ( c ) ;
2016-01-26 10:48:58 +00:00
break ;
2021-06-21 15:07:31 +00:00
case CS_PAUSE :
ASSERT ( cs = = CS_UP ) ;
2016-01-26 10:48:58 +00:00
2021-06-21 15:07:31 +00:00
if ( cs = = CS_UP )
channel_do_pause ( c ) ;
break ;
2016-01-26 10:48:58 +00:00
2021-06-21 15:07:31 +00:00
case CS_STOP :
ASSERT ( cs = = CS_UP | | cs = = CS_START | | cs = = CS_PAUSE ) ;
2018-09-27 20:57:55 +00:00
2021-06-21 15:07:31 +00:00
if ( cs = = CS_UP )
channel_do_pause ( c ) ;
2019-08-13 16:22:07 +00:00
2021-06-21 15:07:31 +00:00
channel_do_stop ( c ) ;
2016-01-26 10:48:58 +00:00
break ;
case CS_DOWN :
2021-06-21 15:07:31 +00:00
ASSERT ( cs = = CS_STOP ) ;
2016-01-26 10:48:58 +00:00
channel_do_down ( c ) ;
break ;
default :
ASSERT ( 0 ) ;
}
2020-12-07 21:19:40 +00:00
channel_log_state_change ( c ) ;
1999-05-17 20:14:52 +00:00
}
2023-03-07 22:22:03 +00:00
static void
channel_stop_export ( struct channel * c )
{
2024-05-02 09:39:34 +00:00
switch ( rt_export_get_state ( & c - > out_req ) )
{
case TES_FEEDING :
case TES_PARTIAL :
case TES_READY :
if ( c - > ra_mode = = RA_OPTIMAL )
rt_export_unsubscribe ( best , & c - > out_req ) ;
else
rt_export_unsubscribe ( all , & c - > out_req ) ;
2023-03-07 22:22:03 +00:00
2024-06-10 08:39:11 +00:00
ev_postpone ( & c - > out_event ) ;
2024-05-02 09:39:34 +00:00
bmap_free ( & c - > export_accepted_map ) ;
bmap_free ( & c - > export_rejected_map ) ;
2016-01-26 10:48:58 +00:00
2024-05-02 09:39:34 +00:00
c - > out_req . name = NULL ;
rfree ( c - > out_req . pool ) ;
2023-11-02 13:33:00 +00:00
2024-05-02 09:39:34 +00:00
channel_check_stopped ( c ) ;
break ;
2016-01-26 10:48:58 +00:00
2024-05-02 09:39:34 +00:00
case TES_DOWN :
break ;
2020-12-07 21:19:40 +00:00
2024-05-02 09:39:34 +00:00
case TES_STOP :
case TES_MAX :
bug ( " Impossible export state " ) ;
}
2023-11-02 13:33:00 +00:00
}
2024-05-02 09:39:34 +00:00
void
channel_request_reload ( struct channel * c , struct rt_feeding_request * cir )
2023-11-02 13:33:00 +00:00
{
ASSERT ( c - > in_req . hook ) ;
ASSERT ( channel_reloadable ( c ) ) ;
2024-05-02 09:39:34 +00:00
if ( cir )
CD ( c , " Partial import reload requested " ) ;
else
CD ( c , " Full import reload requested " ) ;
2023-11-02 13:33:00 +00:00
if ( ( c - > in_keep & RIK_PREFILTER ) = = RIK_PREFILTER )
2024-05-02 09:39:34 +00:00
channel_reimport ( c , cir ) ;
2023-11-02 13:33:00 +00:00
else if ( ! c - > proto - > reload_routes ( c , cir ) )
cli_msg ( - 15 , " %s.%s: partial reload refused, please run full reload instead " , c - > proto - > name , c - > name ) ;
2014-03-20 13:07:12 +00:00
}
2016-01-26 10:48:58 +00:00
const struct channel_class channel_basic = {
. channel_size = sizeof ( struct channel ) ,
. config_size = sizeof ( struct channel_config )
} ;
void *
2018-01-09 17:42:22 +00:00
channel_config_new ( const struct channel_class * cc , const char * name , uint net_type , struct proto_config * proto )
2016-01-26 10:48:58 +00:00
{
struct channel_config * cf = NULL ;
struct rtable_config * tab = NULL ;
if ( net_type )
{
if ( ! net_val_match ( net_type , proto - > protocol - > channel_mask ) )
cf_error ( " Unsupported channel type " ) ;
2022-09-14 23:38:18 +00:00
if ( proto - > net_type & & ( net_type ! = proto - > net_type ) & & ( net_type ! = NET_MPLS ) )
2016-01-26 10:48:58 +00:00
cf_error ( " Different channel type " ) ;
2022-09-01 12:21:56 +00:00
tab = rt_get_default_table ( new_config , net_type ) ;
2016-01-26 10:48:58 +00:00
}
if ( ! cc )
cc = & channel_basic ;
cf = cfg_allocz ( cc - > config_size ) ;
cf - > name = name ;
2023-09-14 13:21:53 +00:00
cf - > class = cc ;
2018-01-09 17:42:22 +00:00
cf - > parent = proto ;
2016-01-26 10:48:58 +00:00
cf - > table = tab ;
cf - > out_filter = FILTER_REJECT ;
cf - > net_type = net_type ;
cf - > ra_mode = RA_OPTIMAL ;
cf - > preference = proto - > protocol - > preference ;
2020-12-07 21:19:40 +00:00
cf - > debug = new_config - > channel_default_debug ;
2021-02-12 04:05:18 +00:00
cf - > rpki_reload = 1 ;
2016-01-26 10:48:58 +00:00
add_tail ( & proto - > channels , & cf - > n ) ;
return cf ;
}
2018-01-09 17:42:22 +00:00
void *
channel_config_get ( const struct channel_class * cc , const char * name , uint net_type , struct proto_config * proto )
{
struct channel_config * cf ;
/* We are using name as token, so no strcmp() */
WALK_LIST ( cf , proto - > channels )
if ( cf - > name = = name )
{
/* Allow to redefine channel only if inherited from template */
if ( cf - > parent = = proto )
cf_error ( " Multiple %s channels " , name ) ;
cf - > parent = proto ;
2022-10-03 16:53:21 +00:00
cf - > copy = 1 ;
2018-01-09 17:42:22 +00:00
return cf ;
}
return channel_config_new ( cc , name , net_type , proto ) ;
}
2016-01-26 10:48:58 +00:00
struct channel_config *
channel_copy_config ( struct channel_config * src , struct proto_config * proto )
{
2023-09-14 13:21:53 +00:00
struct channel_config * dst = cfg_alloc ( src - > class - > config_size ) ;
2016-01-26 10:48:58 +00:00
2023-09-14 13:21:53 +00:00
memcpy ( dst , src , src - > class - > config_size ) ;
2020-11-24 02:42:23 +00:00
memset ( & dst - > n , 0 , sizeof ( node ) ) ;
2016-01-26 10:48:58 +00:00
add_tail ( & proto - > channels , & dst - > n ) ;
2023-09-14 13:21:53 +00:00
CALL ( src - > class - > copy_config , dst , src ) ;
2016-01-26 10:48:58 +00:00
return dst ;
}
static int reconfigure_type ; /* Hack to propagate type info to channel_reconfigure() */
int
channel_reconfigure ( struct channel * c , struct channel_config * cf )
{
2022-09-08 17:41:02 +00:00
/* Touched by reconfiguration */
c - > stale = 0 ;
2016-01-26 10:48:58 +00:00
/* FIXME: better handle these changes, also handle in_keep_filtered */
2022-06-16 21:24:56 +00:00
if ( ( c - > table ! = cf - > table - > table ) | |
( cf - > ra_mode & & ( c - > ra_mode ! = cf - > ra_mode ) ) | |
2022-06-22 10:45:42 +00:00
( cf - > in_keep ! = c - > in_keep ) | |
cf - > out_subprefix & & c - > out_subprefix & &
! net_equal ( cf - > out_subprefix , c - > out_subprefix ) | |
( ! cf - > out_subprefix ! = ! c - > out_subprefix ) )
2016-01-26 10:48:58 +00:00
return 0 ;
2018-01-09 13:36:11 +00:00
/* Note that filter_same() requires arguments in (new, old) order */
int import_changed = ! filter_same ( cf - > in_filter , c - > in_filter ) ;
int export_changed = ! filter_same ( cf - > out_filter , c - > out_filter ) ;
2021-02-12 04:05:18 +00:00
int rpki_reload_changed = ( cf - > rpki_reload ! = c - > rpki_reload ) ;
2016-01-26 10:48:58 +00:00
if ( c - > preference ! = cf - > preference )
import_changed = 1 ;
if ( c - > merge_limit ! = cf - > merge_limit )
export_changed = 1 ;
/* Reconfigure channel fields */
c - > in_filter = cf - > in_filter ;
c - > out_filter = cf - > out_filter ;
2021-11-06 19:34:16 +00:00
channel_update_limit ( c , & c - > rx_limit , PLD_RX , & cf - > rx_limit ) ;
channel_update_limit ( c , & c - > in_limit , PLD_IN , & cf - > in_limit ) ;
channel_update_limit ( c , & c - > out_limit , PLD_OUT , & cf - > out_limit ) ;
2016-01-26 10:48:58 +00:00
// c->ra_mode = cf->ra_mode;
c - > merge_limit = cf - > merge_limit ;
c - > preference = cf - > preference ;
2024-05-02 09:39:34 +00:00
c - > out_req . feeder . prefilter . addr = c - > out_subprefix = cf - > out_subprefix ;
2020-12-07 21:19:40 +00:00
c - > debug = cf - > debug ;
2021-06-21 15:07:31 +00:00
c - > in_req . trace_routes = c - > out_req . trace_routes = c - > debug | c - > proto - > debug ;
2021-02-12 04:05:18 +00:00
c - > rpki_reload = cf - > rpki_reload ;
2016-01-26 10:48:58 +00:00
2016-12-07 13:11:28 +00:00
/* Execute channel-specific reconfigure hook */
2023-09-14 13:21:53 +00:00
if ( c - > class - > reconfigure & & ! c - > class - > reconfigure ( c , cf , & import_changed , & export_changed ) )
2016-12-07 13:11:28 +00:00
return 0 ;
2016-01-26 10:48:58 +00:00
/* If the channel is not open, it has no routes and we cannot reload it anyways */
if ( c - > channel_state ! = CS_UP )
2020-12-07 21:19:40 +00:00
goto done ;
2016-01-26 10:48:58 +00:00
2021-02-10 02:09:57 +00:00
/* Update RPKI/ROA subscriptions */
2021-02-12 04:05:18 +00:00
if ( import_changed | | export_changed | | rpki_reload_changed )
2021-02-10 02:09:57 +00:00
{
channel_roa_unsubscribe_all ( c ) ;
2021-02-12 04:05:18 +00:00
if ( c - > rpki_reload )
{
channel_roa_subscribe_filter ( c , 1 ) ;
channel_roa_subscribe_filter ( c , 0 ) ;
}
2021-02-10 02:09:57 +00:00
}
2016-01-26 10:48:58 +00:00
if ( reconfigure_type = = RECONFIG_SOFT )
{
if ( import_changed )
log ( L_INFO " Channel %s.%s changed import " , c - > proto - > name , c - > name ) ;
if ( export_changed )
log ( L_INFO " Channel %s.%s changed export " , c - > proto - > name , c - > name ) ;
2020-12-07 21:19:40 +00:00
goto done ;
2016-01-26 10:48:58 +00:00
}
/* Route reload may be not supported */
if ( import_changed & & ! channel_reloadable ( c ) )
return 0 ;
if ( import_changed | | export_changed )
log ( L_INFO " Reloading channel %s.%s " , c - > proto - > name , c - > name ) ;
if ( import_changed )
2024-05-02 09:39:34 +00:00
channel_request_reload ( c , NULL ) ;
2016-01-26 10:48:58 +00:00
if ( export_changed )
2024-05-02 09:39:34 +00:00
channel_request_full_refeed ( c ) ;
2016-01-26 10:48:58 +00:00
2020-12-07 21:19:40 +00:00
done :
CD ( c , " Reconfigured " ) ;
2016-01-26 10:48:58 +00:00
return 1 ;
}
int
proto_configure_channel ( struct proto * p , struct channel * * pc , struct channel_config * cf )
1999-05-17 20:14:52 +00:00
{
2016-01-26 10:48:58 +00:00
struct channel * c = * pc ;
1999-05-17 20:14:52 +00:00
2016-01-26 10:48:58 +00:00
if ( ! c & & cf )
{
2018-12-11 16:57:14 +00:00
/* We could add the channel, but currently it would just stay in down state
until protocol is restarted , so it is better to force restart anyways . */
2018-12-16 21:48:13 +00:00
if ( p - > proto_state ! = PS_DOWN )
{
log ( L_INFO " Cannot add channel %s.%s " , p - > name , cf - > name ) ;
return 0 ;
}
* pc = proto_add_channel ( p , cf ) ;
2016-01-26 10:48:58 +00:00
}
else if ( c & & ! cf )
{
if ( c - > channel_state ! = CS_DOWN )
{
log ( L_INFO " Cannot remove channel %s.%s " , c - > proto - > name , c - > name ) ;
return 0 ;
}
proto_remove_channel ( p , c ) ;
* pc = NULL ;
}
else if ( c & & cf )
{
if ( ! channel_reconfigure ( c , cf ) )
{
log ( L_INFO " Cannot reconfigure channel %s.%s " , c - > proto - > name , c - > name ) ;
return 0 ;
}
}
return 1 ;
2012-03-15 10:58:08 +00:00
}
2023-11-08 20:51:46 +00:00
2021-06-19 18:50:18 +00:00
static void
proto_cleanup ( struct proto * p )
{
2023-02-07 13:27:23 +00:00
CALL ( p - > proto - > cleanup , p ) ;
2023-04-22 19:20:19 +00:00
if ( p - > pool )
{
rp_free ( p - > pool ) ;
p - > pool = NULL ;
}
2021-06-19 18:50:18 +00:00
p - > active = 0 ;
proto_log_state_change ( p ) ;
2023-09-24 21:22:43 +00:00
2021-06-19 18:50:18 +00:00
proto_rethink_goal ( p ) ;
}
2016-01-26 10:48:58 +00:00
2012-03-15 10:58:08 +00:00
static void
2021-06-19 18:50:18 +00:00
proto_loop_stopped ( void * ptr )
2012-03-15 10:58:08 +00:00
{
2016-01-26 10:48:58 +00:00
struct proto * p = ptr ;
2023-04-21 13:26:06 +00:00
ASSERT_DIE ( birdloop_inside ( & main_birdloop ) ) ;
ASSERT_DIE ( p - > loop ! = & main_birdloop ) ;
2021-06-19 18:50:18 +00:00
2023-04-22 19:20:19 +00:00
p - > pool = NULL ; /* is freed by birdloop_free() */
2023-02-24 08:13:35 +00:00
birdloop_free ( p - > loop ) ;
2021-06-19 18:50:18 +00:00
p - > loop = & main_birdloop ;
2023-04-22 19:20:19 +00:00
2021-06-19 18:50:18 +00:00
proto_cleanup ( p ) ;
}
2023-11-08 20:51:46 +00:00
2021-06-19 18:50:18 +00:00
static void
proto_event ( void * ptr )
{
struct proto * p = ptr ;
2012-03-15 10:58:08 +00:00
2016-01-26 10:48:58 +00:00
if ( p - > do_stop )
2012-03-15 10:58:08 +00:00
{
2023-01-31 12:07:46 +00:00
iface_unsubscribe ( & p - > iface_sub ) ;
2021-10-18 19:22:58 +00:00
2016-01-26 10:48:58 +00:00
p - > do_stop = 0 ;
2012-03-15 10:58:08 +00:00
}
2023-10-13 08:22:09 +00:00
if ( proto_is_done ( p ) & & p - > pool_inloop ) /* perusing pool_inloop to do this once only */
2023-05-07 15:30:33 +00:00
{
2023-10-13 08:22:09 +00:00
rp_free ( p - > pool_inloop ) ;
p - > pool_inloop = NULL ;
2021-06-19 18:50:18 +00:00
if ( p - > loop ! = & main_birdloop )
birdloop_stop_self ( p - > loop , proto_loop_stopped , p ) ;
else
proto_cleanup ( p ) ;
2023-05-07 15:30:33 +00:00
}
2016-01-26 10:48:58 +00:00
}
/**
* proto_new - create a new protocol instance
* @ c : protocol configuration
*
* When a new configuration has been read in , the core code starts
* initializing all the protocol instances configured by calling their
* init ( ) hooks with the corresponding instance configuration . The initialization
* code of the protocol is expected to create a new instance according to the
* configuration by calling this function and then modifying the default settings
* to values wanted by the protocol .
*/
void *
proto_new ( struct proto_config * cf )
{
struct proto * p = mb_allocz ( proto_pool , cf - > protocol - > proto_size ) ;
2024-06-13 10:15:38 +00:00
OBSREF_SET ( p - > global_config , cf - > global ) ;
2016-01-26 10:48:58 +00:00
p - > cf = cf ;
p - > debug = cf - > debug ;
p - > mrtdump = cf - > mrtdump ;
p - > name = cf - > name ;
p - > proto = cf - > protocol ;
p - > net_type = cf - > net_type ;
p - > disabled = cf - > disabled ;
p - > hash_key = random_u32 ( ) ;
cf - > proto = p ;
2024-07-26 15:24:15 +00:00
p - > id = hmap_first_zero ( proto_state_table - > proto_id_maker ) ;
hmap_set ( proto_state_table - > proto_id_maker , p - > id ) ;
2024-08-27 13:02:38 +00:00
log ( " protocol %s %x has id %i " , p - > name , p , p - > id ) ;
if ( p - > id > = proto_state_table - > length ) //TODO check
2024-07-26 15:24:15 +00:00
protos_attr_field_grow ( ) ;
2024-08-30 10:30:13 +00:00
//init_list(&proto_state_table->channels_attrs[p->id]);
2024-08-27 13:02:38 +00:00
//log("init chann list %x (pid %i)", &proto_state_table->channels_attrs[p->id], p->id);
2024-07-26 15:24:15 +00:00
2016-01-26 10:48:58 +00:00
init_list ( & p - > channels ) ;
2024-08-27 13:02:38 +00:00
//log("philgrim init %x", p->channels);
//log("init chann list %x (pid %i)", &proto_state_table->channels_attrs[p->id], p->id);
2016-01-26 10:48:58 +00:00
return p ;
}
static struct proto *
2023-04-19 19:18:12 +00:00
proto_init ( struct proto_config * c , struct proto * after )
2016-01-26 10:48:58 +00:00
{
struct protocol * pr = c - > protocol ;
struct proto * p = pr - > init ( c ) ;
2021-06-19 18:50:18 +00:00
p - > loop = & main_birdloop ;
2024-05-31 10:27:59 +00:00
int old_state = p - > proto_state ;
2016-01-26 10:48:58 +00:00
p - > proto_state = PS_DOWN ;
2017-06-06 14:47:30 +00:00
p - > last_state_change = current_time ( ) ;
2017-12-07 17:35:46 +00:00
p - > vrf = c - > vrf ;
2023-04-19 19:18:12 +00:00
proto_add_after ( & global_proto_list , p , after ) ;
2016-01-26 10:48:58 +00:00
2018-10-01 13:55:23 +00:00
p - > event = ev_new_init ( proto_pool , proto_event , p ) ;
2016-01-26 10:48:58 +00:00
PD ( p , " Initializing%s " , p - > disabled ? " [disabled] " : " " ) ;
2024-05-31 10:27:59 +00:00
ea_list * eal = proto_state_to_eattr ( p , old_state , 0 ) ;
2024-07-04 11:13:38 +00:00
2024-07-26 15:24:15 +00:00
proto_journal_state_push ( eal , p , 1 ) ;
2024-05-31 10:27:59 +00:00
2016-01-26 10:48:58 +00:00
return p ;
}
static void
proto_start ( struct proto * p )
{
2021-06-19 18:50:18 +00:00
DBG ( " Kicking %s up \n " , p - > name ) ;
PD ( p , " Starting " ) ;
2016-01-26 10:48:58 +00:00
if ( graceful_restart_state = = GRS_INIT )
p - > gr_recovery = 1 ;
2021-06-19 18:50:18 +00:00
if ( p - > cf - > loop_order ! = DOMAIN_ORDER ( the_bird ) )
2023-04-22 19:20:19 +00:00
{
p - > loop = birdloop_new ( proto_pool , p - > cf - > loop_order , p - > cf - > loop_max_latency , " Protocol %s " , p - > cf - > name ) ;
p - > pool = birdloop_pool ( p - > loop ) ;
}
else
2023-04-21 13:26:06 +00:00
p - > pool = rp_newf ( proto_pool , the_bird_domain . the_bird , " Protocol %s " , p - > cf - > name ) ;
2021-06-19 18:50:18 +00:00
2023-04-04 14:41:55 +00:00
p - > iface_sub . target = proto_event_list ( p ) ;
2023-09-24 20:20:01 +00:00
p - > iface_sub . name = p - > name ;
p - > iface_sub . debug = ! ! ( p - > debug & D_IFACES ) ;
2023-04-04 14:41:55 +00:00
2021-06-19 18:50:18 +00:00
PROTO_LOCKED_FROM_MAIN ( p )
2023-05-07 15:30:33 +00:00
{
2023-10-13 08:22:09 +00:00
p - > pool_inloop = rp_newf ( p - > pool , birdloop_domain ( p - > loop ) , " Protocol %s early cleanup objects " , p - > cf - > name ) ;
p - > pool_up = rp_newf ( p - > pool , birdloop_domain ( p - > loop ) , " Protocol %s stop-free objects " , p - > cf - > name ) ;
2021-06-19 18:50:18 +00:00
proto_notify_state ( p , ( p - > proto - > start ? p - > proto - > start ( p ) : PS_UP ) ) ;
2023-05-07 15:30:33 +00:00
}
1999-05-17 20:14:52 +00:00
}
2012-08-14 14:25:22 +00:00
2000-06-02 13:42:36 +00:00
/**
* proto_config_new - create a new protocol configuration
* @ pr : protocol the configuration will belong to
2011-11-06 23:31:23 +00:00
* @ class : SYM_PROTO or SYM_TEMPLATE
2000-06-02 13:42:36 +00:00
*
* Whenever the configuration file says that a new instance
* of a routing protocol should be created , the parser calls
* proto_config_new ( ) to create a configuration entry for this
* instance ( a structure staring with the & proto_config header
* containing all the generic items followed by protocol - specific
* ones ) . Also , the configuration entry gets added to the list
* of protocol instances kept in the configuration .
2011-11-06 23:31:23 +00:00
*
* The function is also used to create protocol templates ( when class
* SYM_TEMPLATE is specified ) , the only difference is that templates
* are not added to the list of protocol instances and therefore not
* initialized during protos_commit ( ) ) .
2000-06-02 13:42:36 +00:00
*/
1999-02-05 21:37:34 +00:00
void *
2015-02-21 20:08:23 +00:00
proto_config_new ( struct protocol * pr , int class )
1999-02-05 21:37:34 +00:00
{
2016-01-26 10:48:58 +00:00
struct proto_config * cf = cfg_allocz ( pr - > config_size ) ;
1999-02-05 21:37:34 +00:00
2011-11-06 23:31:23 +00:00
if ( class = = SYM_PROTO )
2016-01-26 10:48:58 +00:00
add_tail ( & new_config - > protos , & cf - > n ) ;
cf - > global = new_config ;
cf - > protocol = pr ;
cf - > name = pr - > name ;
cf - > class = class ;
cf - > debug = new_config - > proto_default_debug ;
cf - > mrtdump = new_config - > proto_default_mrtdump ;
2021-06-19 18:50:18 +00:00
cf - > loop_order = DOMAIN_ORDER ( the_bird ) ;
2016-01-26 10:48:58 +00:00
init_list ( & cf - > channels ) ;
return cf ;
1999-02-05 21:37:34 +00:00
}
2016-01-26 10:48:58 +00:00
2011-11-06 23:31:23 +00:00
/**
* proto_copy_config - copy a protocol configuration
* @ dest : destination protocol configuration
* @ src : source protocol configuration
*
* Whenever a new instance of a routing protocol is created from the
* template , proto_copy_config ( ) is called to copy a content of
* the source protocol configuration to the new protocol configuration .
* Name , class and a node in protos list of @ dest are kept intact .
* copy_config ( ) protocol hook is used to copy protocol - specific data .
*/
void
proto_copy_config ( struct proto_config * dest , struct proto_config * src )
{
2016-01-26 10:48:58 +00:00
struct channel_config * cc ;
2011-11-06 23:31:23 +00:00
node old_node ;
int old_class ;
2020-04-08 20:25:15 +00:00
const char * old_name ;
2011-11-06 23:31:23 +00:00
if ( dest - > protocol ! = src - > protocol )
cf_error ( " Can't copy configuration from a different protocol type " ) ;
if ( dest - > protocol - > copy_config = = NULL )
cf_error ( " Inheriting configuration for %s is not supported " , src - > protocol - > name ) ;
DBG ( " Copying configuration from %s to %s \n " , src - > name , dest - > name ) ;
2016-01-26 10:48:58 +00:00
/*
2011-11-06 23:31:23 +00:00
* Copy struct proto_config here . Keep original node , class and name .
* protocol - specific config copy is handled by protocol copy_config ( ) hook
*/
old_node = dest - > n ;
old_class = dest - > class ;
old_name = dest - > name ;
2016-01-26 10:48:58 +00:00
memcpy ( dest , src , src - > protocol - > config_size ) ;
2011-11-06 23:31:23 +00:00
dest - > n = old_node ;
dest - > class = old_class ;
dest - > name = old_name ;
2016-01-26 10:48:58 +00:00
init_list ( & dest - > channels ) ;
2011-11-06 23:31:23 +00:00
2016-01-26 10:48:58 +00:00
WALK_LIST ( cc , src - > channels )
channel_copy_config ( cc , dest ) ;
/* FIXME: allow for undefined copy_config */
2011-11-06 23:31:23 +00:00
dest - > protocol - > copy_config ( dest , src ) ;
}
2019-04-08 15:05:07 +00:00
void
proto_clone_config ( struct symbol * sym , struct proto_config * parent )
{
struct proto_config * cf = proto_config_new ( parent - > protocol , SYM_PROTO ) ;
proto_copy_config ( cf , parent ) ;
cf - > name = sym - > name ;
cf - > proto = NULL ;
cf - > parent = parent ;
sym - > class = cf - > class ;
2019-07-03 09:09:52 +00:00
sym - > proto = cf ;
2019-04-08 15:05:07 +00:00
}
static void
proto_undef_clone ( struct symbol * sym , struct proto_config * cf )
{
rem_node ( & cf - > n ) ;
sym - > class = SYM_VOID ;
2019-07-03 09:09:52 +00:00
sym - > proto = NULL ;
2019-04-08 15:05:07 +00:00
}
2000-06-02 13:42:36 +00:00
/**
* protos_preconfig - pre - configuration processing
* @ c : new configuration
*
* This function calls the preconfig ( ) hooks of all routing
* protocols available to prepare them for reading of the new
* configuration .
*/
1998-05-20 11:54:33 +00:00
void
1999-02-05 21:37:34 +00:00
protos_preconfig ( struct config * c )
1998-05-20 11:54:33 +00:00
{
1998-06-03 08:38:53 +00:00
struct protocol * p ;
1999-12-06 13:44:45 +00:00
init_list ( & c - > protos ) ;
2000-03-12 21:01:38 +00:00
DBG ( " Protocol preconfig: " ) ;
1998-06-03 08:38:53 +00:00
WALK_LIST ( p , protocol_list )
2016-01-26 10:48:58 +00:00
{
DBG ( " %s " , p - > name ) ;
p - > name_counter = 0 ;
if ( p - > preconfig )
p - > preconfig ( p , c ) ;
}
2000-03-12 21:01:38 +00:00
DBG ( " \n " ) ;
1999-02-05 21:37:34 +00:00
}
2010-02-06 18:19:09 +00:00
static int
proto_reconfigure ( struct proto * p , struct proto_config * oc , struct proto_config * nc , int type )
{
/* If the protocol is DOWN, we just restart it */
if ( p - > proto_state = = PS_DOWN )
return 0 ;
/* If there is a too big change in core attributes, ... */
if ( ( nc - > protocol ! = oc - > protocol ) | |
2016-01-26 10:48:58 +00:00
( nc - > net_type ! = oc - > net_type ) | |
2011-06-20 18:35:59 +00:00
( nc - > disabled ! = p - > disabled ) | |
2021-11-15 09:53:58 +00:00
( nc - > vrf ! = oc - > vrf ) )
2010-02-06 18:19:09 +00:00
return 0 ;
2023-03-08 12:47:42 +00:00
p - > sources . name = p - > name = nc - > name ;
2023-12-08 10:33:43 +00:00
p - > sources . debug = p - > debug = nc - > debug ;
2010-02-06 18:19:09 +00:00
p - > mrtdump = nc - > mrtdump ;
2016-01-26 10:48:58 +00:00
reconfigure_type = type ;
2010-02-06 18:19:09 +00:00
/* Execute protocol specific reconfigure hook */
2016-01-26 10:48:58 +00:00
if ( ! p - > proto - > reconfigure | | ! p - > proto - > reconfigure ( p , nc ) )
2010-02-06 18:19:09 +00:00
return 0 ;
DBG ( " \t %s: same \n " , oc - > name ) ;
PD ( p , " Reconfigured " ) ;
p - > cf = nc ;
return 1 ;
}
2023-09-24 21:22:43 +00:00
static struct protos_commit_request {
struct config * new ;
struct config * old ;
enum protocol_startup phase ;
int type ;
} protos_commit_request ;
static int proto_rethink_goal_pending = 0 ;
2024-06-13 13:36:18 +00:00
static void protos_do_commit ( struct config * new , struct config * old , int type ) ;
2023-09-24 21:22:43 +00:00
2000-06-02 13:42:36 +00:00
/**
* protos_commit - commit new protocol configuration
* @ new : new configuration
* @ old : old configuration or % NULL if it ' s boot time config
2009-06-19 21:49:34 +00:00
* @ type : type of reconfiguration ( RECONFIG_SOFT or RECONFIG_HARD )
2000-06-02 13:42:36 +00:00
*
* Scan differences between @ old and @ new configuration and adjust all
* protocol instances to conform to the new configuration .
*
* When a protocol exists in the new configuration , but it doesn ' t in the
* original one , it ' s immediately started . When a collision with the other
* running protocol would arise , the new protocol will be temporarily stopped
* by the locking mechanism .
*
* When a protocol exists in the old configuration , but it doesn ' t in the
* new one , it ' s shut down and deleted after the shutdown completes .
*
2009-06-19 21:49:34 +00:00
* When a protocol exists in both configurations , the core decides
* whether it ' s possible to reconfigure it dynamically - it checks all
* the core properties of the protocol ( changes in filters are ignored
* if type is RECONFIG_SOFT ) and if they match , it asks the
* reconfigure ( ) hook of the protocol to see if the protocol is able
* to switch to the new configuration . If it isn ' t possible , the
* protocol is shut down and a new instance is started with the new
* configuration after the shutdown is completed .
2000-06-02 13:42:36 +00:00
*/
1999-02-05 21:37:34 +00:00
void
2024-06-13 13:36:18 +00:00
protos_commit ( struct config * new , struct config * old , int type )
1999-02-05 21:37:34 +00:00
{
2023-09-24 21:22:43 +00:00
protos_commit_request = ( struct protos_commit_request ) {
. new = new ,
. old = old ,
. phase = ( new - > shutdown & & ! new - > gr_down ) ? PROTOCOL_STARTUP_REGULAR : PROTOCOL_STARTUP_NECESSARY ,
. type = type ,
} ;
2024-06-13 13:36:18 +00:00
protos_do_commit ( new , old , type ) ;
2023-09-24 21:22:43 +00:00
}
static void
2024-06-13 13:36:18 +00:00
protos_do_commit ( struct config * new , struct config * old , int type )
2023-09-24 21:22:43 +00:00
{
enum protocol_startup phase = protos_commit_request . phase ;
2000-01-16 16:44:50 +00:00
struct proto_config * oc , * nc ;
2011-11-06 23:31:23 +00:00
struct symbol * sym ;
2016-01-26 10:48:58 +00:00
struct proto * p ;
1999-02-05 21:37:34 +00:00
2023-09-24 21:22:43 +00:00
if ( ( phase < PROTOCOL_STARTUP_REGULAR ) | | ( phase > PROTOCOL_STARTUP_NECESSARY ) )
{
protos_commit_request = ( struct protos_commit_request ) { } ;
return ;
}
2000-01-16 16:44:50 +00:00
DBG ( " protos_commit: \n " ) ;
if ( old )
2016-01-26 10:48:58 +00:00
{
WALK_LIST ( oc , old - > protos )
1999-02-05 21:37:34 +00:00
{
2023-09-24 21:22:43 +00:00
if ( oc - > protocol - > startup ! = phase )
continue ;
2016-01-26 10:48:58 +00:00
p = oc - > proto ;
sym = cf_find_symbol ( new , oc - > name ) ;
2019-04-08 15:05:07 +00:00
2023-03-07 22:22:03 +00:00
struct birdloop * proto_loop = PROTO_ENTER_FROM_MAIN ( p ) ;
2019-04-08 15:05:07 +00:00
/* Handle dynamic protocols */
if ( ! sym & & oc - > parent & & ! new - > shutdown )
{
struct symbol * parsym = cf_find_symbol ( new , oc - > parent - > name ) ;
if ( parsym & & parsym - > class = = SYM_PROTO )
{
/* This is hack, we would like to share config, but we need to copy it now */
new_config = new ;
cfg_mem = new - > mem ;
2023-06-13 08:51:03 +00:00
new - > current_scope = new - > root_scope ;
sym = cf_get_symbol ( new , oc - > name ) ;
2019-07-03 09:09:52 +00:00
proto_clone_config ( sym , parsym - > proto ) ;
2019-04-08 15:05:07 +00:00
new_config = NULL ;
cfg_mem = NULL ;
}
}
2016-01-26 10:48:58 +00:00
if ( sym & & sym - > class = = SYM_PROTO & & ! new - > shutdown )
{
/* Found match, let's check if we can smoothly switch to new configuration */
/* No need to check description */
2019-02-15 12:53:17 +00:00
nc = sym - > proto ;
2016-01-26 10:48:58 +00:00
nc - > proto = p ;
/* We will try to reconfigure protocol p */
2024-06-13 13:36:18 +00:00
if ( proto_reconfigure ( p , oc , nc , type ) )
2023-03-07 22:22:03 +00:00
{
2024-06-13 10:15:38 +00:00
OBSREF_CLEAR ( p - > global_config ) ;
OBSREF_SET ( p - > global_config , new ) ;
2023-03-07 22:22:03 +00:00
PROTO_LEAVE_FROM_MAIN ( proto_loop ) ;
2016-01-26 10:48:58 +00:00
continue ;
2023-03-07 22:22:03 +00:00
}
2016-01-26 10:48:58 +00:00
2019-04-08 15:05:07 +00:00
if ( nc - > parent )
{
proto_undef_clone ( sym , nc ) ;
goto remove ;
}
2016-01-26 10:48:58 +00:00
/* Unsuccessful, we will restart it */
if ( ! p - > disabled & & ! nc - > disabled )
log ( L_INFO " Restarting protocol %s " , p - > name ) ;
else if ( p - > disabled & & ! nc - > disabled )
log ( L_INFO " Enabling protocol %s " , p - > name ) ;
else if ( ! p - > disabled & & nc - > disabled )
log ( L_INFO " Disabling protocol %s " , p - > name ) ;
p - > down_code = nc - > disabled ? PDC_CF_DISABLE : PDC_CF_RESTART ;
p - > cf_new = nc ;
}
else if ( ! new - > shutdown )
{
2019-04-08 15:05:07 +00:00
remove :
2016-01-26 10:48:58 +00:00
log ( L_INFO " Removing protocol %s " , p - > name ) ;
p - > down_code = PDC_CF_REMOVE ;
p - > cf_new = NULL ;
}
2019-06-18 14:27:21 +00:00
else if ( new - > gr_down )
{
p - > down_code = PDC_CMD_GR_DOWN ;
p - > cf_new = NULL ;
}
2016-01-26 10:48:58 +00:00
else /* global shutdown */
{
p - > down_code = PDC_CMD_SHUTDOWN ;
p - > cf_new = NULL ;
}
p - > reconfiguring = 1 ;
2023-03-07 22:22:03 +00:00
PROTO_LEAVE_FROM_MAIN ( proto_loop ) ;
2016-01-26 10:48:58 +00:00
proto_rethink_goal ( p ) ;
1998-06-03 08:38:53 +00:00
}
2016-01-26 10:48:58 +00:00
}
2023-04-19 19:18:12 +00:00
struct proto * after = NULL ;
2000-01-16 16:44:50 +00:00
WALK_LIST ( nc , new - > protos )
2023-09-24 21:22:43 +00:00
if ( ( nc - > protocol - > startup = = phase ) & & ! nc - > proto )
2016-01-26 10:48:58 +00:00
{
/* Not a first-time configuration */
if ( old )
log ( L_INFO " Adding protocol %s " , nc - > name ) ;
2023-04-19 19:18:12 +00:00
p = proto_init ( nc , after ) ;
after = p ;
2016-01-26 10:48:58 +00:00
2023-09-24 21:22:43 +00:00
proto_rethink_goal ( p ) ;
2016-01-26 10:48:58 +00:00
}
else
2023-04-19 19:18:12 +00:00
after = nc - > proto ;
2000-01-16 16:44:50 +00:00
DBG ( " Protocol start \n " ) ;
2011-04-01 11:54:39 +00:00
2012-12-27 11:56:23 +00:00
/* Determine router ID for the first time - it has to be here and not in
global_commit ( ) because it is postponed after start of device protocol */
2024-06-13 14:30:51 +00:00
if ( ( phase = = PROTOCOL_STARTUP_NECESSARY ) & & ! old )
2016-01-26 10:48:58 +00:00
{
2024-06-13 14:30:51 +00:00
struct global_runtime * gr = atomic_load_explicit ( & global_runtime , memory_order_relaxed ) ;
if ( ! gr - > router_id )
{
gr - > router_id = if_choose_router_id ( new - > router_id_from , 0 ) ;
if ( ! gr - > router_id )
die ( " Cannot determine router ID, please configure it manually " ) ;
}
2016-01-26 10:48:58 +00:00
}
2012-12-27 11:56:23 +00:00
2023-09-24 21:22:43 +00:00
/* Commit next round of protocols */
if ( new - > shutdown & & ! new - > gr_down )
protos_commit_request . phase + + ;
else
protos_commit_request . phase - - ;
/* If something is pending, the next round will be called asynchronously from proto_rethink_goal(). */
if ( ! proto_rethink_goal_pending )
2024-06-13 13:36:18 +00:00
protos_do_commit ( new , old , type ) ;
1998-06-03 08:38:53 +00:00
}
1998-10-17 11:05:18 +00:00
static void
2021-06-19 18:50:18 +00:00
proto_shutdown ( struct proto * p )
1998-10-17 11:05:18 +00:00
{
2021-06-19 18:50:18 +00:00
if ( p - > proto_state = = PS_START | | p - > proto_state = = PS_UP )
{
/* Going down */
DBG ( " Kicking %s down \n " , p - > name ) ;
PD ( p , " Shutting down " ) ;
2024-08-27 13:02:38 +00:00
log ( " shutdown " ) ;
2021-06-19 18:50:18 +00:00
proto_notify_state ( p , ( p - > proto - > shutdown ? p - > proto - > shutdown ( p ) : PS_DOWN ) ) ;
2023-09-24 21:22:43 +00:00
if ( p - > reconfiguring )
{
proto_rethink_goal_pending + + ;
p - > reconfiguring = 2 ;
}
2021-06-19 18:50:18 +00:00
}
}
2000-01-16 16:44:50 +00:00
2021-06-19 18:50:18 +00:00
static void
proto_rethink_goal ( struct proto * p )
{
2023-09-24 21:22:43 +00:00
int goal_pending = ( p - > reconfiguring = = 2 ) ;
2016-01-26 10:48:58 +00:00
if ( p - > reconfiguring & & ! p - > active )
{
struct proto_config * nc = p - > cf_new ;
2023-04-19 19:18:12 +00:00
struct proto * after = p - > n . prev ;
2024-07-04 11:13:38 +00:00
ea_list * eal = proto_state_table - > attrs [ p - > id ] ;
ea_set_attr ( & eal , EA_LITERAL_EMBEDDED ( & ea_deleted , 0 , 1 ) ) ;
2024-07-26 15:24:15 +00:00
proto_journal_state_push ( eal , p , 1 ) ;
2024-07-04 11:13:38 +00:00
log ( " deleting %i " , p - > id ) ;
hmap_clear ( proto_state_table - > proto_id_maker , p - > id ) ;
atomic_store ( & proto_state_table - > attrs [ p - > id ] , NULL ) ;
2024-05-31 10:27:59 +00:00
2016-01-26 10:48:58 +00:00
DBG ( " %s has shut down for reconfiguration \n " , p - > name ) ;
p - > cf - > proto = NULL ;
2024-06-13 10:15:38 +00:00
OBSREF_CLEAR ( p - > global_config ) ;
2016-01-26 10:48:58 +00:00
proto_remove_channels ( p ) ;
2023-04-19 19:18:12 +00:00
proto_rem_node ( & global_proto_list , p ) ;
2016-01-26 10:48:58 +00:00
rfree ( p - > event ) ;
2017-12-07 20:54:47 +00:00
mb_free ( p - > message ) ;
2016-01-26 10:48:58 +00:00
mb_free ( p ) ;
if ( ! nc )
2023-09-24 21:22:43 +00:00
goto done ;
2023-04-19 19:18:12 +00:00
p = proto_init ( nc , after ) ;
2016-01-26 10:48:58 +00:00
}
2000-01-16 16:44:50 +00:00
/* Determine what state we want to reach */
2000-01-16 17:40:26 +00:00
if ( p - > disabled | | p - > reconfiguring )
2016-01-26 10:48:58 +00:00
{
2021-06-19 18:50:18 +00:00
PROTO_LOCKED_FROM_MAIN ( p )
proto_shutdown ( p ) ;
2016-01-26 10:48:58 +00:00
}
2021-06-19 18:50:18 +00:00
else if ( ! p - > active )
proto_start ( p ) ;
2023-09-24 21:22:43 +00:00
done :
if ( goal_pending & & ! - - proto_rethink_goal_pending )
protos_do_commit (
protos_commit_request . new ,
protos_commit_request . old ,
protos_commit_request . type
) ;
1999-02-11 22:59:06 +00:00
}
2019-04-08 15:05:07 +00:00
struct proto *
proto_spawn ( struct proto_config * cf , uint disabled )
{
2023-04-19 19:18:12 +00:00
struct proto * p = proto_init ( cf , global_proto_list . last ) ;
2019-04-08 15:05:07 +00:00
p - > disabled = disabled ;
proto_rethink_goal ( p ) ;
return p ;
}
2014-03-20 13:07:12 +00:00
2014-03-23 00:35:33 +00:00
/**
* DOC : Graceful restart recovery
*
* Graceful restart of a router is a process when the routing plane ( e . g . BIRD )
* restarts but both the forwarding plane ( e . g kernel routing table ) and routing
* neighbors keep proper routes , and therefore uninterrupted packet forwarding
* is maintained .
*
* BIRD implements graceful restart recovery by deferring export of routes to
* protocols until routing tables are refilled with the expected content . After
* start , protocols generate routes as usual , but routes are not propagated to
* them , until protocols report that they generated all routes . After that ,
* graceful restart recovery is finished and the export ( and the initial feed )
* to protocols is enabled .
*
* When graceful restart recovery need is detected during initialization , then
* enabled protocols are marked with @ gr_recovery flag before start . Such
* protocols then decide how to proceed with graceful restart , participation is
2016-01-26 10:48:58 +00:00
* voluntary . Protocols could lock the recovery for each channel by function
2016-05-12 14:04:47 +00:00
* channel_graceful_restart_lock ( ) ( state stored in @ gr_lock flag ) , which means
2016-01-26 10:48:58 +00:00
* that they want to postpone the end of the recovery until they converge and
* then unlock it . They also could set @ gr_wait before advancing to % PS_UP ,
* which means that the core should defer route export to that channel until
* the end of the recovery . This should be done by protocols that expect their
* neigbors to keep the proper routes ( kernel table , BGP sessions with BGP
* graceful restart capability ) .
2014-03-23 00:35:33 +00:00
*
* The graceful restart recovery is finished when either all graceful restart
* locks are unlocked or when graceful restart wait timer fires .
*
*/
2014-03-20 13:07:12 +00:00
2017-06-01 10:33:20 +00:00
static void graceful_restart_done ( timer * t ) ;
2014-03-20 13:07:12 +00:00
2014-03-23 00:35:33 +00:00
/**
* graceful_restart_recovery - request initial graceful restart recovery
*
* Called by the platform initialization code if the need for recovery
* after graceful restart is detected during boot . Have to be called
* before protos_commit ( ) .
*/
2014-03-20 13:07:12 +00:00
void
graceful_restart_recovery ( void )
{
graceful_restart_state = GRS_INIT ;
}
2014-03-23 00:35:33 +00:00
/**
* graceful_restart_init - initialize graceful restart
*
* When graceful restart recovery was requested , the function starts an active
* phase of the recovery and initializes graceful restart wait timer . The
* function have to be called after protos_commit ( ) .
*/
2014-03-20 13:07:12 +00:00
void
graceful_restart_init ( void )
{
if ( ! graceful_restart_state )
return ;
log ( L_INFO " Graceful restart started " ) ;
if ( ! graceful_restart_locks )
2016-01-26 10:48:58 +00:00
{
graceful_restart_done ( NULL ) ;
return ;
}
2014-03-20 13:07:12 +00:00
graceful_restart_state = GRS_ACTIVE ;
2017-11-28 16:43:20 +00:00
gr_wait_timer = tm_new_init ( proto_pool , graceful_restart_done , NULL , 0 , 0 ) ;
2024-06-13 14:30:51 +00:00
u32 gr_wait = atomic_load_explicit ( & global_runtime , memory_order_relaxed ) - > gr_wait ;
tm_start ( gr_wait_timer , gr_wait S ) ;
2014-03-20 13:07:12 +00:00
}
2014-03-23 00:35:33 +00:00
/**
* graceful_restart_done - finalize graceful restart
2016-05-12 13:49:44 +00:00
* @ t : unused
2014-03-23 00:35:33 +00:00
*
* When there are no locks on graceful restart , the functions finalizes the
* graceful restart recovery . Protocols postponing route export until the end of
* the recovery are awakened and the export to them is enabled . All other
* related state is cleared . The function is also called when the graceful
* restart wait timer fires ( but there are still some locks ) .
*/
2014-03-20 13:07:12 +00:00
static void
2024-06-13 14:30:51 +00:00
graceful_restart_done ( timer * t )
2014-03-20 13:07:12 +00:00
{
log ( L_INFO " Graceful restart done " ) ;
graceful_restart_state = GRS_DONE ;
2023-04-19 19:18:12 +00:00
WALK_TLIST ( proto , p , & global_proto_list )
2016-01-26 10:48:58 +00:00
{
if ( ! p - > gr_recovery )
continue ;
2014-03-20 13:07:12 +00:00
2016-01-26 10:48:58 +00:00
struct channel * c ;
WALK_LIST ( c , p - > channels )
{
2014-03-20 13:07:12 +00:00
/* Resume postponed export of routes */
2021-06-21 15:07:31 +00:00
if ( ( c - > channel_state = = CS_UP ) & & c - > gr_wait & & p - > rt_notify )
2016-01-26 10:48:58 +00:00
channel_start_export ( c ) ;
2014-03-20 13:07:12 +00:00
/* Cleanup */
2016-01-26 10:48:58 +00:00
c - > gr_wait = 0 ;
c - > gr_lock = 0 ;
2014-03-20 13:07:12 +00:00
}
2016-01-26 10:48:58 +00:00
p - > gr_recovery = 0 ;
}
2014-03-20 13:07:12 +00:00
graceful_restart_locks = 0 ;
2024-06-13 14:30:51 +00:00
rfree ( t ) ;
2014-03-20 13:07:12 +00:00
}
void
graceful_restart_show_status ( void )
{
if ( graceful_restart_state ! = GRS_ACTIVE )
return ;
cli_msg ( - 24 , " Graceful restart recovery in progress " ) ;
2016-01-26 10:48:58 +00:00
cli_msg ( - 24 , " Waiting for %d channels to recover " , graceful_restart_locks ) ;
2024-06-13 14:30:51 +00:00
cli_msg ( - 24 , " Wait timer is %t/%u " , tm_remains ( gr_wait_timer ) ,
atomic_load_explicit ( & global_runtime , memory_order_relaxed ) - > gr_wait ) ;
2014-03-20 13:07:12 +00:00
}
2014-03-23 00:35:33 +00:00
/**
2016-01-26 10:48:58 +00:00
* channel_graceful_restart_lock - lock graceful restart by channel
* @ p : channel instance
2014-03-23 00:35:33 +00:00
*
* This function allows a protocol to postpone the end of graceful restart
* recovery until it converges . The lock is removed when the protocol calls
2016-01-26 10:48:58 +00:00
* channel_graceful_restart_unlock ( ) or when the channel is closed .
2014-03-23 00:35:33 +00:00
*
* The function have to be called during the initial phase of graceful restart
* recovery and only for protocols that are part of graceful restart ( i . e . their
* @ gr_recovery is set ) , which means it should be called from protocol start
* hooks .
*/
2014-03-20 13:07:12 +00:00
void
2016-01-26 10:48:58 +00:00
channel_graceful_restart_lock ( struct channel * c )
2014-03-20 13:07:12 +00:00
{
ASSERT ( graceful_restart_state = = GRS_INIT ) ;
2016-01-26 10:48:58 +00:00
ASSERT ( c - > proto - > gr_recovery ) ;
2014-03-20 13:07:12 +00:00
2016-01-26 10:48:58 +00:00
if ( c - > gr_lock )
2014-03-20 13:07:12 +00:00
return ;
2016-01-26 10:48:58 +00:00
c - > gr_lock = 1 ;
2014-03-20 13:07:12 +00:00
graceful_restart_locks + + ;
}
2014-03-23 00:35:33 +00:00
/**
2016-01-26 10:48:58 +00:00
* channel_graceful_restart_unlock - unlock graceful restart by channel
* @ p : channel instance
2014-03-23 00:35:33 +00:00
*
2016-01-26 10:48:58 +00:00
* This function unlocks a lock from channel_graceful_restart_lock ( ) . It is also
2014-03-23 00:35:33 +00:00
* automatically called when the lock holding protocol went down .
*/
2014-03-20 13:07:12 +00:00
void
2016-01-26 10:48:58 +00:00
channel_graceful_restart_unlock ( struct channel * c )
2014-03-20 13:07:12 +00:00
{
2016-01-26 10:48:58 +00:00
if ( ! c - > gr_lock )
2014-03-20 13:07:12 +00:00
return ;
2016-01-26 10:48:58 +00:00
c - > gr_lock = 0 ;
2014-03-20 13:07:12 +00:00
graceful_restart_locks - - ;
if ( ( graceful_restart_state = = GRS_ACTIVE ) & & ! graceful_restart_locks )
2017-11-28 16:43:20 +00:00
tm_start ( gr_wait_timer , 0 ) ;
2014-03-20 13:07:12 +00:00
}
2000-06-02 13:42:36 +00:00
/**
* protos_dump_all - dump status of all protocols
*
* This function dumps status of all existing protocol instances to the
* debug output . It involves printing of general status information
* such as protocol states , its position on the protocol lists
* and also calling of a dump ( ) hook of the protocol to print
* the internals .
*/
1998-07-09 19:36:52 +00:00
void
protos_dump_all ( void )
{
debug ( " Protocols: \n " ) ;
2023-04-19 19:18:12 +00:00
WALK_TLIST ( proto , p , & global_proto_list ) PROTO_LOCKED_FROM_MAIN ( p )
2016-01-26 10:48:58 +00:00
{
2021-06-21 15:07:31 +00:00
# define DPF(x) (p->x ? " " #x : "")
2023-03-06 18:28:08 +00:00
debug ( " protocol %s (%p) state %s with %d active channels flags: %s%s%s%s \n " ,
2021-06-21 15:07:31 +00:00
p - > name , p , p_states [ p - > proto_state ] , p - > active_channels ,
2021-06-19 18:50:18 +00:00
DPF ( disabled ) , DPF ( active ) , DPF ( do_stop ) , DPF ( reconfiguring ) ) ;
2021-06-21 15:07:31 +00:00
# undef DPF
2016-01-26 10:48:58 +00:00
struct channel * c ;
WALK_LIST ( c , p - > channels )
1998-07-09 19:36:52 +00:00
{
2016-01-26 10:48:58 +00:00
debug ( " \t TABLE %s \n " , c - > table - > name ) ;
if ( c - > in_filter )
debug ( " \t Input filter: %s \n " , filter_name ( c - > in_filter ) ) ;
if ( c - > out_filter )
debug ( " \t Output filter: %s \n " , filter_name ( c - > out_filter ) ) ;
2021-06-21 15:07:31 +00:00
debug ( " \t Channel state: %s/%s/%s \n " , c_states [ c - > channel_state ] ,
c - > in_req . hook ? rt_import_state_name ( rt_import_get_state ( c - > in_req . hook ) ) : " - " ,
2024-05-02 09:39:34 +00:00
rt_export_state_name ( rt_export_get_state ( & c - > out_req ) ) ) ;
1998-07-09 19:36:52 +00:00
}
2016-01-26 10:48:58 +00:00
2023-11-01 17:25:40 +00:00
debug ( " \t SOURCES \n " ) ;
rt_dump_sources ( & p - > sources ) ;
2016-01-26 10:48:58 +00:00
if ( p - > proto - > dump & & ( p - > proto_state ! = PS_DOWN ) )
p - > proto - > dump ( p ) ;
}
1998-07-09 19:36:52 +00:00
}
2000-06-02 13:42:36 +00:00
/**
* proto_build - make a single protocol available
* @ p : the protocol
*
* After the platform specific initialization code uses protos_build ( )
* to add all the standard protocols , it should call proto_build ( ) for
2000-06-07 12:29:08 +00:00
* all platform specific protocols to inform the core that they exist .
2000-06-02 13:42:36 +00:00
*/
2000-04-01 10:19:47 +00:00
void
proto_build ( struct protocol * p )
{
add_tail ( & protocol_list , & p - > n ) ;
}
2013-11-19 21:33:48 +00:00
/* FIXME: convert this call to some protocol hook */
extern void bfd_init_all ( void ) ;
2022-03-18 21:05:50 +00:00
void protos_build_gen ( void ) ;
2000-06-02 13:42:36 +00:00
/**
* protos_build - build a protocol list
*
* This function is called during BIRD startup to insert
* all standard protocols to the global protocol list . Insertion
* of platform specific protocols ( such as the kernel syncer )
* is in the domain of competence of the platform dependent
* startup code .
*/
1998-10-18 11:53:21 +00:00
void
protos_build ( void )
{
2023-04-21 13:26:06 +00:00
proto_pool = rp_new ( & root_pool , the_bird_domain . the_bird , " Protocols " ) ;
2023-04-24 14:10:59 +00:00
2024-07-04 11:13:38 +00:00
init_journals ( ) ;
2024-06-20 09:11:06 +00:00
//create_dummy_recipient();
2023-04-24 14:10:59 +00:00
protos_build_gen ( ) ;
1999-02-11 22:59:06 +00:00
}
2012-03-28 16:40:04 +00:00
2012-04-24 21:39:57 +00:00
/* Temporary hack to propagate restart to BGP */
int proto_restart ;
2012-03-28 16:40:04 +00:00
2012-04-15 13:28:29 +00:00
static void
2023-02-06 14:06:12 +00:00
proto_restart_event_hook ( void * _p )
2012-04-15 13:28:29 +00:00
{
2023-02-06 14:06:12 +00:00
struct proto * p = _p ;
if ( ! p - > down_sched )
return ;
2012-04-15 13:28:29 +00:00
2023-02-06 14:06:12 +00:00
proto_restart = ( p - > down_sched = = PDS_RESTART ) ;
p - > disabled = 1 ;
proto_rethink_goal ( p ) ;
2012-04-15 13:28:29 +00:00
2023-02-06 14:06:12 +00:00
p - > restart_event = NULL ;
p - > restart_timer = NULL ;
if ( proto_restart )
/* No need to call proto_rethink_goal() here again as the proto_cleanup() routine will
* call it after the protocol stops . . . and both these routines are fixed to main_birdloop .
*/
p - > disabled = 0 ;
}
static void
proto_send_restart_event ( struct proto * p )
{
if ( ! p - > restart_event )
p - > restart_event = ev_new_init ( p - > pool , proto_restart_event_hook , p ) ;
ev_send ( & global_event_list , p - > restart_event ) ;
}
static void
proto_send_restart_event_from_timer ( struct timer * t )
{
proto_send_restart_event ( ( struct proto * ) t - > data ) ;
2012-04-15 13:28:29 +00:00
}
static inline void
proto_schedule_down ( struct proto * p , byte restart , byte code )
{
/* Does not work for other states (even PS_START) */
ASSERT ( p - > proto_state = = PS_UP ) ;
/* Scheduled restart may change to shutdown, but not otherwise */
if ( p - > down_sched = = PDS_DISABLE )
return ;
p - > down_sched = restart ? PDS_RESTART : PDS_DISABLE ;
p - > down_code = code ;
2023-02-06 14:06:12 +00:00
if ( ! restart )
{
if ( p - > restart_timer & & tm_active ( p - > restart_timer ) )
tm_stop ( p - > restart_timer ) ;
proto_send_restart_event ( p ) ;
}
else
{
if ( ! p - > restart_timer )
p - > restart_timer = tm_new_init ( p - > pool , proto_send_restart_event_from_timer , p , 0 , 0 ) ;
tm_start_max_in ( p - > restart_timer , 250 MS , p - > loop ) ;
}
2012-04-15 13:28:29 +00:00
}
2017-09-19 17:55:37 +00:00
/**
* proto_set_message - set administrative message to protocol
* @ p : protocol
* @ msg : message
* @ len : message length ( - 1 for NULL - terminated string )
*
* The function sets administrative message ( string ) related to protocol state
* change . It is called by the nest code for manual enable / disable / restart
* commands all routes to the protocol , and by protocol - specific code when the
* protocol state change is initiated by the protocol . Using NULL message clears
* the last message . The message string may be either NULL - terminated or with an
* explicit length .
*/
void
proto_set_message ( struct proto * p , char * msg , int len )
{
mb_free ( p - > message ) ;
p - > message = NULL ;
if ( ! msg | | ! len )
return ;
if ( len < 0 )
len = strlen ( msg ) ;
if ( ! len )
return ;
p - > message = mb_alloc ( proto_pool , len + 1 ) ;
memcpy ( p - > message , msg , len ) ;
p - > message [ len ] = 0 ;
2024-08-27 13:02:38 +00:00
log ( " p mess %x " , p - > message ) ;
2017-09-19 17:55:37 +00:00
}
2012-04-15 13:28:29 +00:00
2021-11-06 19:34:16 +00:00
static const char * channel_limit_name [ ] = {
[ PLA_WARN ] = " warn " ,
[ PLA_BLOCK ] = " block " ,
[ PLA_RESTART ] = " restart " ,
[ PLA_DISABLE ] = " disable " ,
} ;
2012-04-15 13:28:29 +00:00
2021-11-06 19:34:16 +00:00
static void
channel_log_limit ( struct channel * c , struct limit * l , int dir )
2012-04-15 13:28:29 +00:00
{
2013-01-10 12:07:33 +00:00
const char * dir_name [ PLD_MAX ] = { " receive " , " import " , " export " } ;
2021-11-06 19:34:16 +00:00
log ( L_WARN " Channel %s.%s hits route %s limit (%d), action: %s " ,
c - > proto - > name , c - > name , dir_name [ dir ] , l - > max , channel_limit_name [ c - > limit_actions [ dir ] ] ) ;
}
2012-04-15 13:28:29 +00:00
2021-11-06 19:34:16 +00:00
static void
channel_activate_limit ( struct channel * c , struct limit * l , int dir )
{
if ( c - > limit_active & ( 1 < < dir ) )
2012-04-21 19:05:36 +00:00
return ;
2012-04-15 13:28:29 +00:00
2021-11-06 19:34:16 +00:00
c - > limit_active | = ( 1 < < dir ) ;
channel_log_limit ( c , l , dir ) ;
}
2012-04-15 13:28:29 +00:00
2021-11-06 19:34:16 +00:00
static int
channel_limit_warn ( struct limit * l , void * data )
{
struct channel_limit_data * cld = data ;
struct channel * c = cld - > c ;
int dir = cld - > dir ;
2016-01-26 10:48:58 +00:00
2021-11-06 19:34:16 +00:00
channel_log_limit ( c , l , dir ) ;
2016-01-26 10:48:58 +00:00
2021-11-06 19:34:16 +00:00
return 0 ;
2012-04-15 13:28:29 +00:00
}
2021-11-06 19:34:16 +00:00
static int
channel_limit_block ( struct limit * l , void * data )
2014-04-26 22:46:32 +00:00
{
2021-11-06 19:34:16 +00:00
struct channel_limit_data * cld = data ;
struct channel * c = cld - > c ;
int dir = cld - > dir ;
2014-04-26 22:46:32 +00:00
2021-11-06 19:34:16 +00:00
channel_activate_limit ( c , l , dir ) ;
2014-04-26 22:46:32 +00:00
2021-11-06 19:34:16 +00:00
return 1 ;
}
2014-04-26 22:46:32 +00:00
2021-11-06 19:34:16 +00:00
static const byte chl_dir_down [ PLD_MAX ] = { PDC_RX_LIMIT_HIT , PDC_IN_LIMIT_HIT , PDC_OUT_LIMIT_HIT } ;
static int
channel_limit_down ( struct limit * l , void * data )
{
struct channel_limit_data * cld = data ;
struct channel * c = cld - > c ;
struct proto * p = c - > proto ;
int dir = cld - > dir ;
channel_activate_limit ( c , l , dir ) ;
if ( p - > proto_state = = PS_UP )
proto_schedule_down ( p , c - > limit_actions [ dir ] = = PLA_RESTART , chl_dir_down [ dir ] ) ;
return 1 ;
2014-04-26 22:46:32 +00:00
}
2021-11-06 19:34:16 +00:00
static int ( * channel_limit_action [ ] ) ( struct limit * , void * ) = {
[ PLA_NONE ] = NULL ,
[ PLA_WARN ] = channel_limit_warn ,
[ PLA_BLOCK ] = channel_limit_block ,
[ PLA_RESTART ] = channel_limit_down ,
[ PLA_DISABLE ] = channel_limit_down ,
} ;
static void
channel_update_limit ( struct channel * c , struct limit * l , int dir , struct channel_limit * cf )
2014-03-20 13:07:12 +00:00
{
2021-11-06 19:34:16 +00:00
l - > action = channel_limit_action [ cf - > action ] ;
c - > limit_actions [ dir ] = cf - > action ;
struct channel_limit_data cld = { . c = c , . dir = dir } ;
limit_update ( l , & cld , cf - > action ? cf - > limit : ~ ( ( u32 ) 0 ) ) ;
}
static void
channel_init_limit ( struct channel * c , struct limit * l , int dir , struct channel_limit * cf )
{
channel_reset_limit ( c , l , dir ) ;
channel_update_limit ( c , l , dir , cf ) ;
}
static void
channel_reset_limit ( struct channel * c , struct limit * l , int dir )
{
limit_reset ( l ) ;
c - > limit_active & = ~ ( 1 < < dir ) ;
2014-03-20 13:07:12 +00:00
}
2016-01-26 10:48:58 +00:00
static inline void
proto_do_start ( struct proto * p )
2014-03-20 13:07:12 +00:00
{
2016-01-26 10:48:58 +00:00
p - > active = 1 ;
2021-09-27 14:40:28 +00:00
2023-12-08 10:33:43 +00:00
p - > sources . debug = p - > debug ;
2024-06-25 09:19:14 +00:00
rt_init_sources ( & p - > sources , p - > name , proto_event_list ( p ) ) ;
2023-12-08 10:33:43 +00:00
2021-06-19 18:50:18 +00:00
if ( ! p - > cf - > late_if_feed )
2023-02-07 13:27:23 +00:00
iface_subscribe ( & p - > iface_sub ) ;
2014-03-20 13:07:12 +00:00
}
static void
2016-01-26 10:48:58 +00:00
proto_do_up ( struct proto * p )
2014-03-20 13:07:12 +00:00
{
2016-01-26 10:48:58 +00:00
if ( ! p - > main_source )
p - > main_source = rt_get_source ( p , 0 ) ;
2021-09-27 14:40:28 +00:00
// Locked automaticaly
2014-03-20 13:07:12 +00:00
2016-01-26 10:48:58 +00:00
proto_start_channels ( p ) ;
2021-06-19 18:50:18 +00:00
if ( p - > cf - > late_if_feed )
2023-02-07 13:27:23 +00:00
iface_subscribe ( & p - > iface_sub ) ;
2014-03-20 13:07:12 +00:00
}
2016-01-26 10:48:58 +00:00
static inline void
proto_do_pause ( struct proto * p )
2014-03-20 13:07:12 +00:00
{
2016-01-26 10:48:58 +00:00
proto_pause_channels ( p ) ;
2014-03-20 13:07:12 +00:00
}
static void
2016-01-26 10:48:58 +00:00
proto_do_stop ( struct proto * p )
2014-03-20 13:07:12 +00:00
{
2016-01-26 10:48:58 +00:00
p - > down_sched = 0 ;
2014-03-20 13:07:12 +00:00
p - > gr_recovery = 0 ;
2014-03-23 00:35:33 +00:00
2016-01-26 10:48:58 +00:00
if ( p - > main_source )
{
rt_unlock_source ( p - > main_source ) ;
p - > main_source = NULL ;
}
2014-03-23 00:35:33 +00:00
2023-10-13 08:22:09 +00:00
rp_free ( p - > pool_up ) ;
p - > pool_up = NULL ;
2016-01-26 10:48:58 +00:00
proto_stop_channels ( p ) ;
2021-09-27 14:40:28 +00:00
rt_destroy_sources ( & p - > sources , p - > event ) ;
2021-06-19 18:50:18 +00:00
p - > do_stop = 1 ;
2023-04-02 17:34:22 +00:00
proto_send_event ( p , p - > event ) ;
2016-01-26 10:48:58 +00:00
}
2014-03-23 00:35:33 +00:00
2016-01-26 10:48:58 +00:00
static void
proto_do_down ( struct proto * p )
{
p - > down_code = 0 ;
/* Shutdown is finished in the protocol event */
if ( proto_is_done ( p ) )
2023-04-02 17:34:22 +00:00
proto_send_event ( p , p - > event ) ;
2014-03-23 00:35:33 +00:00
}
2014-03-20 13:07:12 +00:00
2016-01-26 10:48:58 +00:00
2000-06-02 13:42:36 +00:00
/**
* proto_notify_state - notify core about protocol state change
* @ p : protocol the state of which has changed
* @ ps : the new status
*
* Whenever a state of a protocol changes due to some event internal
* to the protocol ( i . e . , not inside a start ( ) or shutdown ( ) hook ) ,
* it should immediately notify the core about the change by calling
* proto_notify_state ( ) which will write the new state to the & proto
* structure and take all the actions necessary to adapt to the new
2008-12-08 11:24:55 +00:00
* state . State change to PS_DOWN immediately frees resources of protocol
* and might execute start callback of protocol ; therefore ,
* it should be used at tail positions of protocol callbacks .
2000-06-02 13:42:36 +00:00
*/
1999-02-11 22:59:06 +00:00
void
2016-01-26 10:48:58 +00:00
proto_notify_state ( struct proto * p , uint state )
1999-02-11 22:59:06 +00:00
{
2016-01-26 10:48:58 +00:00
uint ps = p - > proto_state ;
1999-02-11 22:59:06 +00:00
2016-01-26 10:48:58 +00:00
DBG ( " %s reporting state transition %s -> %s \n " , p - > name , p_states [ ps ] , p_states [ state ] ) ;
if ( state = = ps )
1999-02-11 22:59:06 +00:00
return ;
2024-05-31 10:27:59 +00:00
int old_state = p - > proto_state ;
2016-01-26 10:48:58 +00:00
p - > proto_state = state ;
2017-06-06 14:47:30 +00:00
p - > last_state_change = current_time ( ) ;
2024-07-04 11:13:38 +00:00
ea_list * eal = proto_state_table - > attrs [ p - > id ] ;
ea_set_attr ( & eal , EA_LITERAL_EMBEDDED ( & ea_state , 0 , p - > proto_state ) ) ;
ea_set_attr ( & eal , EA_LITERAL_EMBEDDED ( & ea_old_state , 0 , old_state ) ) ;
2024-07-26 15:24:15 +00:00
proto_journal_state_push ( eal , p , 1 ) ;
2008-12-08 11:24:55 +00:00
2016-01-26 10:48:58 +00:00
switch ( state )
{
case PS_START :
ASSERT ( ps = = PS_DOWN | | ps = = PS_UP ) ;
if ( ps = = PS_DOWN )
proto_do_start ( p ) ;
else
proto_do_pause ( p ) ;
break ;
case PS_UP :
ASSERT ( ps = = PS_DOWN | | ps = = PS_START ) ;
if ( ps = = PS_DOWN )
proto_do_start ( p ) ;
proto_do_up ( p ) ;
break ;
case PS_STOP :
ASSERT ( ps = = PS_START | | ps = = PS_UP ) ;
proto_do_stop ( p ) ;
break ;
case PS_DOWN :
if ( ps ! = PS_STOP )
proto_do_stop ( p ) ;
proto_do_down ( p ) ;
break ;
default :
bug ( " %s: Invalid state %d " , p - > name , ps ) ;
}
2014-03-24 11:32:12 +00:00
proto_log_state_change ( p ) ;
1998-10-18 11:53:21 +00:00
}
1999-02-13 19:15:28 +00:00
1999-11-30 12:57:14 +00:00
/*
* CLI Commands
*/
static char *
proto_state_name ( struct proto * p )
{
2016-01-26 10:48:58 +00:00
switch ( p - > proto_state )
{
case PS_DOWN : return p - > active ? " flush " : " down " ;
case PS_START : return " start " ;
case PS_UP : return " up " ;
case PS_STOP : return " stop " ;
default : return " ??? " ;
}
1999-11-30 12:57:14 +00:00
}
2024-05-31 10:27:59 +00:00
static char *
proto_state_name_from_int ( int state )
{
switch ( state )
{
case PS_DOWN : return " flush or down " ;
case PS_START : return " start " ;
case PS_UP : return " up " ;
case PS_STOP : return " stop " ;
default : return " ??? " ;
}
}
2010-02-13 09:44:46 +00:00
static void
2016-01-26 10:48:58 +00:00
channel_show_stats ( struct channel * c )
2010-02-13 09:44:46 +00:00
{
2021-06-21 15:07:31 +00:00
struct channel_import_stats * ch_is = & c - > import_stats ;
struct channel_export_stats * ch_es = & c - > export_stats ;
struct rt_import_stats * rt_is = c - > in_req . hook ? & c - > in_req . hook - > stats : NULL ;
2024-05-02 09:39:34 +00:00
struct rt_export_stats * rt_es = & c - > out_req . stats ;
2021-06-21 15:07:31 +00:00
# define SON(ie, item) ((ie) ? (ie)->item : 0)
# define SCI(item) SON(ch_is, item)
# define SCE(item) SON(ch_es, item)
# define SRI(item) SON(rt_is, item)
# define SRE(item) SON(rt_es, item)
2016-01-26 10:48:58 +00:00
2021-11-06 19:34:16 +00:00
u32 rx_routes = c - > rx_limit . count ;
u32 in_routes = c - > in_limit . count ;
u32 out_routes = c - > out_limit . count ;
2022-06-16 21:24:56 +00:00
if ( c - > in_keep )
2019-02-05 14:59:26 +00:00
cli_msg ( - 1006 , " Routes: %u imported, %u filtered, %u exported, %u preferred " ,
2021-06-21 15:07:31 +00:00
in_routes , ( rx_routes - in_routes ) , out_routes , SRI ( pref ) ) ;
2012-11-10 13:26:13 +00:00
else
2019-02-02 12:28:16 +00:00
cli_msg ( - 1006 , " Routes: %u imported, %u exported, %u preferred " ,
2021-06-21 15:07:31 +00:00
in_routes , out_routes , SRI ( pref ) ) ;
cli_msg ( - 1006 , " Route change stats: received rejected filtered ignored RX limit IN limit accepted " ) ;
cli_msg ( - 1006 , " Import updates: %10u %10u %10u %10u %10u %10u %10u " ,
SCI ( updates_received ) , SCI ( updates_invalid ) ,
SCI ( updates_filtered ) , SRI ( updates_ignored ) ,
SCI ( updates_limited_rx ) , SCI ( updates_limited_in ) ,
SRI ( updates_accepted ) ) ;
cli_msg ( - 1006 , " Import withdraws: %10u %10u --- %10u --- %10u " ,
SCI ( withdraws_received ) , SCI ( withdraws_invalid ) ,
SRI ( withdraws_ignored ) , SRI ( withdraws_accepted ) ) ;
cli_msg ( - 1006 , " Export updates: %10u %10u %10u --- %10u %10u " ,
SRE ( updates_received ) , SCE ( updates_rejected ) ,
SCE ( updates_filtered ) , SCE ( updates_limited ) , SCE ( updates_accepted ) ) ;
cli_msg ( - 1006 , " Export withdraws: %10u --- --- --- ---%10u " ,
SRE ( withdraws_received ) , SCE ( withdraws_accepted ) ) ;
# undef SRI
# undef SRE
# undef SCI
# undef SCE
# undef SON
2010-02-13 09:44:46 +00:00
}
2012-04-15 13:28:29 +00:00
void
2021-11-06 19:34:16 +00:00
channel_show_limit ( struct limit * l , const char * dsc , int active , int action )
2012-04-15 13:28:29 +00:00
{
2016-01-26 10:48:58 +00:00
if ( ! l - > action )
2012-04-21 19:05:36 +00:00
return ;
2021-11-06 19:34:16 +00:00
cli_msg ( - 1006 , " %-16s%d%s " , dsc , l - > max , active ? " [HIT] " : " " ) ;
cli_msg ( - 1006 , " Action: %s " , channel_limit_name [ action ] ) ;
2012-04-15 13:28:29 +00:00
}
2012-03-15 10:58:08 +00:00
void
2016-01-26 10:48:58 +00:00
channel_show_info ( struct channel * c )
2010-02-13 09:44:46 +00:00
{
2016-01-26 10:48:58 +00:00
cli_msg ( - 1006 , " Channel %s " , c - > name ) ;
2016-12-07 13:11:28 +00:00
cli_msg ( - 1006 , " State: %s " , c_states [ c - > channel_state ] ) ;
2021-06-21 15:07:31 +00:00
cli_msg ( - 1006 , " Import state: %s " , rt_import_state_name ( rt_import_get_state ( c - > in_req . hook ) ) ) ;
2024-05-02 09:39:34 +00:00
cli_msg ( - 1006 , " Export state: %s " , rt_export_state_name ( rt_export_get_state ( & c - > out_req ) ) ) ;
2016-01-26 10:48:58 +00:00
cli_msg ( - 1006 , " Table: %s " , c - > table - > name ) ;
cli_msg ( - 1006 , " Preference: %d " , c - > preference ) ;
cli_msg ( - 1006 , " Input filter: %s " , filter_name ( c - > in_filter ) ) ;
cli_msg ( - 1006 , " Output filter: %s " , filter_name ( c - > out_filter ) ) ;
2010-02-13 09:44:46 +00:00
2014-03-20 13:07:12 +00:00
if ( graceful_restart_state = = GRS_ACTIVE )
2016-01-26 10:48:58 +00:00
cli_msg ( - 1006 , " GR recovery: %s%s " ,
c - > gr_lock ? " pending " : " " ,
c - > gr_wait ? " waiting " : " " ) ;
2014-03-20 13:07:12 +00:00
2021-11-06 19:34:16 +00:00
channel_show_limit ( & c - > rx_limit , " Receive limit: " , c - > limit_active & ( 1 < < PLD_RX ) , c - > limit_actions [ PLD_RX ] ) ;
channel_show_limit ( & c - > in_limit , " Import limit: " , c - > limit_active & ( 1 < < PLD_IN ) , c - > limit_actions [ PLD_IN ] ) ;
channel_show_limit ( & c - > out_limit , " Export limit: " , c - > limit_active & ( 1 < < PLD_OUT ) , c - > limit_actions [ PLD_OUT ] ) ;
2012-04-15 13:28:29 +00:00
2016-01-26 10:48:58 +00:00
if ( c - > channel_state ! = CS_DOWN )
channel_show_stats ( c ) ;
2010-02-13 09:44:46 +00:00
}
2020-12-07 21:19:40 +00:00
void
channel_cmd_debug ( struct channel * c , uint mask )
{
if ( cli_access_restricted ( ) )
return ;
c - > debug = mask ;
cli_msg ( 0 , " " ) ;
}
2010-02-19 23:03:31 +00:00
void
2024-07-04 11:13:38 +00:00
proto_cmd_show ( struct proto * p , union cmd_arg verbose , int cnt )
1999-11-30 14:04:09 +00:00
{
2010-02-02 23:19:24 +00:00
byte buf [ 256 ] , tbuf [ TM_DATETIME_BUFFER_SIZE ] ;
1999-12-01 12:00:15 +00:00
2010-02-19 23:03:31 +00:00
/* First protocol - show header */
if ( ! cnt )
2017-12-08 14:16:47 +00:00
cli_msg ( - 2002 , " %-10s %-10s %-10s %-6s %-12s %s " ,
" Name " , " Proto " , " Table " , " State " , " Since " , " Info " ) ;
2010-02-19 23:03:31 +00:00
2024-07-04 11:13:38 +00:00
ea_list * eal = proto_get_state_list ( p - > id ) ;
2024-05-31 10:27:59 +00:00
2024-07-04 11:13:38 +00:00
const char * name = ea_get_adata ( eal , & ea_name ) - > data ;
2024-08-27 13:02:38 +00:00
struct protocol * proto = ( struct protocol * ) ea_get_ptr ( eal , & ea_protocol_type , 0 ) ;
2024-07-04 11:13:38 +00:00
const int state = ea_get_int ( eal , & ea_state , 0 ) ;
const char * table = ea_get_adata ( eal , & ea_table ) - > data ;
1999-12-01 12:00:15 +00:00
buf [ 0 ] = 0 ;
if ( p - > proto - > get_status )
2024-07-04 11:13:38 +00:00
{
PROTO_LOCKED_FROM_MAIN ( p )
p - > proto - > get_status ( p , buf ) ;
}
const btime * time = ( btime * ) ea_get_adata ( eal , & ea_last_modified ) - > data ;
2024-07-11 12:43:32 +00:00
tm_format_time ( tbuf , & atomic_load_explicit ( & global_runtime , memory_order_acquire ) - > tf_proto , * time ) ; //todo readlock????
2024-06-13 14:30:51 +00:00
2017-12-08 14:16:47 +00:00
cli_msg ( - 1002 , " %-10s %-10s %-10s %-6s %-12s %s " ,
2024-05-31 10:27:59 +00:00
name ,
2024-08-27 13:02:38 +00:00
proto - > name ,
2024-07-04 11:13:38 +00:00
table ? table : " --- " ,
2024-05-31 10:27:59 +00:00
proto_state_name_from_int ( state ) ,
2010-02-02 23:19:24 +00:00
tbuf ,
2024-07-04 11:13:38 +00:00
buf ) ;
2016-01-26 10:48:58 +00:00
2024-07-04 11:13:38 +00:00
if ( verbose . verbose )
2016-01-26 10:48:58 +00:00
{
2024-07-04 11:13:38 +00:00
PROTO_LOCKED_FROM_MAIN ( p )
{
2016-01-26 10:48:58 +00:00
if ( p - > cf - > dsc )
cli_msg ( - 1006 , " Description: %s " , p - > cf - > dsc ) ;
2017-12-07 20:54:47 +00:00
if ( p - > message )
cli_msg ( - 1006 , " Message: %s " , p - > message ) ;
2016-01-26 10:48:58 +00:00
if ( p - > cf - > router_id )
cli_msg ( - 1006 , " Router ID: %R " , p - > cf - > router_id ) ;
2021-11-15 09:53:58 +00:00
if ( p - > vrf )
cli_msg ( - 1006 , " VRF: %s " , p - > vrf - > name ) ;
2016-01-26 10:48:58 +00:00
if ( p - > proto - > show_proto_info )
p - > proto - > show_proto_info ( p ) ;
else
1999-11-30 14:04:09 +00:00
{
2016-01-26 10:48:58 +00:00
struct channel * c ;
WALK_LIST ( c , p - > channels )
channel_show_info ( c ) ;
1999-11-30 14:04:09 +00:00
}
2016-01-26 10:48:58 +00:00
cli_msg ( - 1006 , " " ) ;
2024-07-04 11:13:38 +00:00
}
2016-01-26 10:48:58 +00:00
}
1999-11-30 14:04:09 +00:00
}
1999-11-25 15:35:30 +00:00
void
2017-09-19 17:55:37 +00:00
proto_cmd_disable ( struct proto * p , uintptr_t arg , int cnt UNUSED )
1999-11-25 15:35:30 +00:00
{
2010-02-19 23:03:31 +00:00
if ( p - > disabled )
2016-01-26 10:48:58 +00:00
{
cli_msg ( - 8 , " %s: already disabled " , p - > name ) ;
return ;
}
2010-02-19 23:03:31 +00:00
log ( L_INFO " Disabling protocol %s " , p - > name ) ;
p - > disabled = 1 ;
2012-04-15 13:28:29 +00:00
p - > down_code = PDC_CMD_DISABLE ;
2017-09-19 17:55:37 +00:00
proto_set_message ( p , ( char * ) arg , - 1 ) ;
2021-06-19 18:50:18 +00:00
proto_shutdown ( p ) ;
2010-02-19 23:03:31 +00:00
cli_msg ( - 9 , " %s: disabled " , p - > name ) ;
}
void
2017-09-19 17:55:37 +00:00
proto_cmd_enable ( struct proto * p , uintptr_t arg , int cnt UNUSED )
2010-02-19 23:03:31 +00:00
{
if ( ! p - > disabled )
2016-01-26 10:48:58 +00:00
{
cli_msg ( - 10 , " %s: already enabled " , p - > name ) ;
return ;
}
2010-02-19 23:03:31 +00:00
log ( L_INFO " Enabling protocol %s " , p - > name ) ;
p - > disabled = 0 ;
2017-09-19 17:55:37 +00:00
proto_set_message ( p , ( char * ) arg , - 1 ) ;
2010-02-19 23:03:31 +00:00
proto_rethink_goal ( p ) ;
cli_msg ( - 11 , " %s: enabled " , p - > name ) ;
}
void
2017-09-19 17:55:37 +00:00
proto_cmd_restart ( struct proto * p , uintptr_t arg , int cnt UNUSED )
2010-02-19 23:03:31 +00:00
{
if ( p - > disabled )
2016-01-26 10:48:58 +00:00
{
cli_msg ( - 8 , " %s: already disabled " , p - > name ) ;
return ;
}
2010-02-19 23:03:31 +00:00
log ( L_INFO " Restarting protocol %s " , p - > name ) ;
p - > disabled = 1 ;
2012-04-15 13:28:29 +00:00
p - > down_code = PDC_CMD_RESTART ;
2017-09-19 17:55:37 +00:00
proto_set_message ( p , ( char * ) arg , - 1 ) ;
2021-06-19 18:50:18 +00:00
proto_shutdown ( p ) ;
2010-02-19 23:03:31 +00:00
p - > disabled = 0 ;
2021-06-19 18:50:18 +00:00
/* After the protocol shuts down, proto_rethink_goal() is run from proto_event. */
2010-02-19 23:03:31 +00:00
cli_msg ( - 12 , " %s: restarted " , p - > name ) ;
}
2024-05-02 09:39:34 +00:00
struct channel_cmd_reload_request {
struct rt_feeding_request cfr ;
2023-11-02 13:33:00 +00:00
struct proto_reload_request * prr ;
} ;
static void
2024-05-02 09:39:34 +00:00
channel_reload_done ( struct rt_feeding_request * cfr )
2023-11-02 13:33:00 +00:00
{
2024-05-02 09:39:34 +00:00
SKIP_BACK_DECLARE ( struct channel_cmd_reload_request , ccrfr , cfr , cfr ) ;
2023-11-02 13:33:00 +00:00
if ( atomic_fetch_sub_explicit ( & ccrfr - > prr - > counter , 1 , memory_order_acq_rel ) = = 1 )
ev_send_loop ( & main_birdloop , & ccrfr - > prr - > ev ) ;
}
2024-05-02 09:39:34 +00:00
static struct rt_feeding_request *
channel_create_reload_request ( struct proto_reload_request * prr )
2023-11-02 13:33:00 +00:00
{
2024-05-02 09:39:34 +00:00
if ( ! prr - > trie )
return NULL ;
2024-06-03 12:23:41 +00:00
2024-05-02 09:39:34 +00:00
/* Increase the refeed counter */
atomic_fetch_add_explicit ( & prr - > counter , 1 , memory_order_relaxed ) ;
ASSERT_DIE ( this_cli - > parser_pool ! = prr - > trie - > lp ) ;
struct channel_cmd_reload_request * req = lp_alloc ( prr - > trie - > lp , sizeof * req ) ;
* req = ( struct channel_cmd_reload_request ) {
. cfr = {
. done = channel_reload_done ,
. prefilter = {
. mode = TE_ADDR_TRIE ,
. trie = prr - > trie ,
} ,
} ,
. prr = prr ,
} ;
return & req - > cfr ;
2023-11-02 13:33:00 +00:00
}
2010-02-19 23:03:31 +00:00
void
2023-11-02 13:33:00 +00:00
proto_cmd_reload ( struct proto * p , uintptr_t _prr , int cnt UNUSED )
2010-02-19 23:03:31 +00:00
{
2023-11-02 13:33:00 +00:00
struct proto_reload_request * prr = ( void * ) _prr ;
2016-01-26 10:48:58 +00:00
struct channel * c ;
2010-02-19 23:03:31 +00:00
if ( p - > disabled )
2016-01-26 10:48:58 +00:00
{
cli_msg ( - 8 , " %s: already disabled " , p - > name ) ;
return ;
}
2010-02-19 23:03:31 +00:00
/* If the protocol in not UP, it has no routes */
if ( p - > proto_state ! = PS_UP )
return ;
2016-01-26 10:48:58 +00:00
/* All channels must support reload */
2024-03-27 10:34:19 +00:00
if ( prr - > dir & CMD_RELOAD_IN )
2016-01-26 10:48:58 +00:00
WALK_LIST ( c , p - > channels )
2019-08-13 16:57:40 +00:00
if ( ( c - > channel_state = = CS_UP ) & & ! channel_reloadable ( c ) )
2016-01-26 10:48:58 +00:00
{
cli_msg ( - 8006 , " %s: reload failed " , p - > name ) ;
return ;
}
2010-02-19 23:03:31 +00:00
log ( L_INFO " Reloading protocol %s " , p - > name ) ;
/* re-importing routes */
2024-05-02 09:39:34 +00:00
WALK_LIST ( c , p - > channels )
if ( c - > channel_state = = CS_UP )
{
if ( prr - > dir & CMD_RELOAD_IN )
channel_request_reload ( c , channel_create_reload_request ( prr ) ) ;
2012-04-15 13:28:29 +00:00
2024-05-02 09:39:34 +00:00
if ( prr - > dir & CMD_RELOAD_OUT )
2024-06-06 21:06:27 +00:00
if ( c - > out_req . name )
rt_export_refeed ( & c - > out_req , channel_create_reload_request ( prr ) ) ;
2024-05-02 09:39:34 +00:00
}
2010-02-19 23:03:31 +00:00
cli_msg ( - 15 , " %s: reloading " , p - > name ) ;
}
2021-03-16 19:10:00 +00:00
extern void pipe_update_debug ( struct proto * P ) ;
2010-02-19 23:03:31 +00:00
void
2017-09-19 17:55:37 +00:00
proto_cmd_debug ( struct proto * p , uintptr_t mask , int cnt UNUSED )
2010-02-19 23:03:31 +00:00
{
p - > debug = mask ;
2021-03-16 19:10:00 +00:00
# ifdef CONFIG_PIPE
if ( p - > proto = = & proto_pipe )
pipe_update_debug ( p ) ;
# endif
2010-02-19 23:03:31 +00:00
}
void
2017-09-19 17:55:37 +00:00
proto_cmd_mrtdump ( struct proto * p , uintptr_t mask , int cnt UNUSED )
2010-02-19 23:03:31 +00:00
{
p - > mrtdump = mask ;
}
static void
2020-04-08 20:25:15 +00:00
proto_apply_cmd_symbol ( const struct symbol * s , void ( * cmd ) ( struct proto * , uintptr_t , int ) , uintptr_t arg )
2010-02-19 23:03:31 +00:00
{
if ( s - > class ! = SYM_PROTO )
2016-01-26 10:48:58 +00:00
{
cli_msg ( 9002 , " %s is not a protocol " , s - > name ) ;
return ;
}
2010-02-19 23:03:31 +00:00
2021-09-08 15:30:09 +00:00
if ( s - > proto - > proto )
{
2021-06-19 18:50:18 +00:00
struct proto * p = s - > proto - > proto ;
PROTO_LOCKED_FROM_MAIN ( p )
cmd ( p , arg , 0 ) ;
2021-09-08 15:30:09 +00:00
cli_msg ( 0 , " " ) ;
}
else
cli_msg ( 9002 , " %s does not exist " , s - > name ) ;
1999-11-25 15:35:30 +00:00
}
1999-12-03 11:40:45 +00:00
2010-02-19 23:03:31 +00:00
static void
2020-04-08 20:25:15 +00:00
proto_apply_cmd_patt ( const char * patt , void ( * cmd ) ( struct proto * , uintptr_t , int ) , uintptr_t arg )
2010-02-19 23:03:31 +00:00
{
int cnt = 0 ;
2023-04-19 19:18:12 +00:00
WALK_TLIST ( proto , p , & global_proto_list )
2016-01-26 10:48:58 +00:00
if ( ! patt | | patmatch ( patt , p - > name ) )
2021-06-19 18:50:18 +00:00
PROTO_LOCKED_FROM_MAIN ( p )
cmd ( p , arg , cnt + + ) ;
2010-02-19 23:03:31 +00:00
if ( ! cnt )
cli_msg ( 8003 , " No protocols match " ) ;
else
cli_msg ( 0 , " " ) ;
}
void
2017-09-19 17:55:37 +00:00
proto_apply_cmd ( struct proto_spec ps , void ( * cmd ) ( struct proto * , uintptr_t , int ) ,
int restricted , uintptr_t arg )
2010-02-19 23:03:31 +00:00
{
2010-02-21 08:57:26 +00:00
if ( restricted & & cli_access_restricted ( ) )
return ;
2010-02-19 23:03:31 +00:00
if ( ps . patt )
proto_apply_cmd_patt ( ps . ptr , cmd , arg ) ;
else
proto_apply_cmd_symbol ( ps . ptr , cmd , arg ) ;
}
2024-07-04 11:13:38 +00:00
void
proto_apply_cmd_no_lock ( struct proto_spec ps , void ( * cmd ) ( struct proto * , union cmd_arg , int ) ,
int restricted , union cmd_arg arg )
{
if ( restricted & & cli_access_restricted ( ) )
return ;
if ( ps . patt )
{
int cnt = 0 ;
const char * patt = ps . ptr ;
WALK_TLIST ( proto , p , & global_proto_list )
if ( ! patt | | patmatch ( patt , p - > name ) )
cmd ( p , arg , cnt + + ) ;
if ( ! cnt )
cli_msg ( 8003 , " No protocols match " ) ;
else
cli_msg ( 0 , " " ) ;
}
else
{
const struct symbol * s = ps . ptr ;
if ( s - > class ! = SYM_PROTO )
{
cli_msg ( 9002 , " %s is not a protocol " , s - > name ) ;
return ;
}
if ( s - > proto - > proto )
{
struct proto * p = s - > proto - > proto ;
cmd ( p , arg , 0 ) ;
cli_msg ( 0 , " " ) ;
}
else
cli_msg ( 9002 , " %s does not exist " , s - > name ) ;
}
}
1999-12-03 11:40:45 +00:00
struct proto *
proto_get_named ( struct symbol * sym , struct protocol * pr )
{
2023-04-19 19:18:12 +00:00
struct proto * p ;
1999-12-03 11:40:45 +00:00
if ( sym )
2016-01-26 10:48:58 +00:00
{
if ( sym - > class ! = SYM_PROTO )
cf_error ( " %s: Not a protocol " , sym - > name ) ;
2019-02-15 12:53:17 +00:00
p = sym - > proto - > proto ;
2016-01-26 10:48:58 +00:00
if ( ! p | | p - > proto ! = pr )
cf_error ( " %s: Not a %s protocol " , sym - > name , pr - > name ) ;
}
1999-12-03 11:40:45 +00:00
else
2016-01-26 10:48:58 +00:00
{
p = NULL ;
2023-04-19 19:18:12 +00:00
WALK_TLIST ( proto , q , & global_proto_list )
2016-01-26 10:48:58 +00:00
if ( ( q - > proto = = pr ) & & ( q - > proto_state ! = PS_DOWN ) )
{
if ( p )
cf_error ( " There are multiple %s protocols running " , pr - > name ) ;
p = q ;
}
if ( ! p )
cf_error ( " There is no %s protocol running " , pr - > name ) ;
}
1999-12-03 11:40:45 +00:00
return p ;
}
2020-05-14 01:48:17 +00:00
struct proto *
proto_iterate_named ( struct symbol * sym , struct protocol * proto , struct proto * old )
{
if ( sym )
{
/* Just the first pass */
if ( old )
{
cli_msg ( 0 , " " ) ;
return NULL ;
}
if ( sym - > class ! = SYM_PROTO )
cf_error ( " %s: Not a protocol " , sym - > name ) ;
struct proto * p = sym - > proto - > proto ;
if ( ! p | | ( p - > proto ! = proto ) )
cf_error ( " %s: Not a %s protocol " , sym - > name , proto - > name ) ;
return p ;
}
else
{
2023-04-19 19:18:12 +00:00
for ( struct proto * p = old ? old - > n . next : global_proto_list . first ;
p ;
p = p - > n . next )
2020-05-14 01:48:17 +00:00
{
if ( ( p - > proto = = proto ) & & ( p - > proto_state ! = PS_DOWN ) )
{
cli_separator ( this_cli ) ;
return p ;
}
}
/* Not found anything during first pass */
if ( ! old )
cf_error ( " There is no %s protocol running " , proto - > name ) ;
/* No more items */
cli_msg ( 0 , " " ) ;
return NULL ;
}
}
2024-05-31 10:27:59 +00:00
void
protos_attr_field_init ( void )
{
2024-08-27 13:02:38 +00:00
log ( " start of protos_attr_field_init " ) ;
2024-07-26 15:24:15 +00:00
int init_length = 16 ;
2024-07-04 11:13:38 +00:00
proto_state_table = mb_allocz ( & root_pool , sizeof ( struct proto_attrs ) ) ;
2024-08-30 10:30:13 +00:00
proto_state_table - > attrs = mb_allocz ( & root_pool , sizeof ( ea_list * _Atomic ) * init_length ) ;
proto_state_table - > channels_attrs = mb_allocz ( & root_pool , sizeof ( struct channel_attrs_list ) * init_length ) ;
2024-07-26 15:24:15 +00:00
proto_state_table - > length = init_length ;
2024-07-04 11:13:38 +00:00
proto_state_table - > proto_id_maker = mb_allocz ( & root_pool , sizeof ( struct hmap ) ) ;
proto_state_table - > channel_id_maker = mb_allocz ( & root_pool , sizeof ( struct hmap ) ) ;
2024-07-26 15:24:15 +00:00
hmap_init ( proto_state_table - > proto_id_maker , & root_pool , init_length ) ;
hmap_init ( proto_state_table - > channel_id_maker , & root_pool , init_length * 2 ) ;
2024-05-31 10:27:59 +00:00
//TODO free? or have self pool?
2024-08-27 13:02:38 +00:00
log ( " end of protos_attr_field_init (%x %x %x %x ) " , proto_state_table , proto_state_table - > attrs , proto_state_table - > channels_attrs , proto_state_table - > proto_id_maker ) ;
2024-05-31 10:27:59 +00:00
}
void
protos_attr_field_grow ( void )
{
2024-07-04 11:13:38 +00:00
log ( " grow " ) ;
2024-08-27 13:02:38 +00:00
ea_list * _Atomic * new_field = mb_allocz ( & root_pool , proto_state_table - > length * sizeof ( ea_list * _Atomic ) * 2 ) ;
log ( " for realocated field allocated %x " , new_field ) ;
2024-08-30 10:30:13 +00:00
struct channel_attrs_list * new_chann = mb_allocz ( & root_pool , proto_state_table - > length * sizeof ( struct channel_attrs_list ) * 2 ) ;
2024-08-27 13:02:38 +00:00
log ( " for realocated chann field allocated %x " , new_chann ) ;
memcpy ( new_field , proto_state_table - > attrs , proto_state_table - > length * ( sizeof ( ea_list * _Atomic ) ) ) ;
memcpy ( new_chann , proto_state_table - > channels_attrs , proto_state_table - > length * ( sizeof ( list ) ) ) ;
2024-07-04 11:13:38 +00:00
atomic_store ( & proto_state_table - > attrs , new_field ) ;
2024-08-30 10:30:13 +00:00
atomic_store ( & proto_state_table - > channels_attrs , new_chann ) ;
2024-07-04 11:13:38 +00:00
atomic_store ( & proto_state_table - > length , ( proto_state_table - > length * 2 ) ) ;
2024-05-31 10:27:59 +00:00
}
void
2024-06-18 13:33:07 +00:00
cleanup_journal_item ( struct lfjour * journal UNUSED , struct lfjour_item * i )
2024-05-31 10:27:59 +00:00
{
2024-08-27 13:02:38 +00:00
//log("cleanup_journal_item");
2024-05-31 10:27:59 +00:00
struct proto_pending_update * pupdate = SKIP_BACK ( struct proto_pending_update , li , i ) ;
2024-08-27 13:02:38 +00:00
//log("cleaning %x", pupdate->proto_attr, ea_get_int(pupdate->proto_attr, &ea_deleted, 0));
2024-07-26 15:24:15 +00:00
//ea_free_later(pupdate->old_attr);
int deleting = ea_get_int ( pupdate - > proto_attr , & ea_deleted , 0 ) ;
//TODO temporal eatters
if ( deleting )
2024-07-04 11:13:38 +00:00
{
2024-08-27 13:02:38 +00:00
//log("try to delete");
2024-05-31 10:27:59 +00:00
ea_free_later ( pupdate - > proto_attr ) ;
2024-07-26 15:24:15 +00:00
}
2024-05-31 10:27:59 +00:00
}
void
2024-06-18 13:33:07 +00:00
after_journal_birdloop_stop ( void * arg UNUSED ) { }
2024-05-31 10:27:59 +00:00
void
2024-07-04 11:13:38 +00:00
init_journal ( int item_size , char * loop_name )
2024-05-31 10:27:59 +00:00
{
proto_journal = mb_allocz ( & root_pool , sizeof ( struct lfjour ) ) ;
2024-08-27 13:02:38 +00:00
log ( " for new jour allocated %x " , proto_journal ) ;
2024-05-31 10:27:59 +00:00
struct settle_config cf = { . min = 0 , . max = 0 } ;
proto_journal - > item_done = cleanup_journal_item ;
2024-07-04 11:13:38 +00:00
proto_journal - > item_size = item_size ;
proto_journal - > loop = birdloop_new ( & root_pool , DOMAIN_ORDER ( service ) , 1 , loop_name ) ;
2024-05-31 10:27:59 +00:00
proto_journal - > domain = proto_journal_domain . rtable ;
lfjour_init ( proto_journal , & cf ) ;
}
2024-07-04 11:13:38 +00:00
void
init_journals ( void )
{
protos_attr_field_init ( ) ;
2024-08-28 14:25:14 +00:00
proto_journal_domain = DOMAIN_NEW ( rtable ) ;
2024-07-04 11:13:38 +00:00
init_journal ( sizeof ( struct proto_pending_update ) , " proto journal loop " ) ;
init_journal ( sizeof ( struct channel_pending_update ) , " channel journal loop " ) ;
}
2024-05-31 10:27:59 +00:00
ea_list *
proto_state_to_eattr ( struct proto * p , int old_state , int proto_deleting )
{
2024-08-27 13:02:38 +00:00
//log("protocol %s to eattr", p->name);
2024-05-31 10:27:59 +00:00
struct {
ea_list l ;
2024-08-28 13:58:15 +00:00
eattr a [ 9 + 19 ] ;
2024-05-31 10:27:59 +00:00
} eattrs ;
eattrs . l = ( ea_list ) { } ;
2024-07-04 11:13:38 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_STORE_STRING ( & ea_name , 0 , p - > name ) ;
//eattrs.a[eattrs.l.count++] = EA_LITERAL_STORE_STRING(&ea_protocol_name, 0, p->proto->name); this info is stored in ea_protocol_type
2024-07-11 12:43:32 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_STORE_PTR ( & ea_protocol_type , 0 , p - > proto ) ;
2024-07-04 11:13:38 +00:00
if ( p - > main_channel )
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_STORE_STRING ( & ea_table , 0 , p - > main_channel - > table - > name ) ;
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_state , 0 , p - > proto_state ) ;
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_old_state , 0 , old_state ) ;
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_STORE_ADATA ( & ea_last_modified , 0 , & p - > last_state_change , sizeof ( btime ) ) ;
2024-05-31 10:27:59 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_proto_id , 0 , p - > id ) ;
2024-07-04 11:13:38 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_deleted , 0 , proto_deleting ) ;
2024-06-18 13:33:07 +00:00
if ( p - > proto = = & proto_bgp )
bgp_state_to_eattr ( p , & eattrs . l , eattrs . a ) ;
2024-07-04 11:13:38 +00:00
return ea_lookup_slow ( & eattrs . l , 0 , EALS_CUSTOM ) ;
}
ea_list *
channel_state_to_eattr ( struct channel * ch , int proto_deleting )
{
struct {
ea_list l ;
2024-08-27 13:02:38 +00:00
eattr a [ 6 ] ;
2024-07-04 11:13:38 +00:00
} eattrs ;
2024-07-26 15:24:15 +00:00
eattrs . l = ( ea_list ) { } ;
2024-07-04 11:13:38 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_STORE_STRING ( & ea_name , 0 , ch - > name ) ;
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_proto_id , 0 , ch - > proto - > id ) ;
2024-08-27 13:02:38 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_channel_id , 0 , ch - > id ) ;
2024-07-04 11:13:38 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_deleted , 0 , proto_deleting ) ;
2024-08-19 13:04:13 +00:00
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_STORE_PTR ( & ea_rtable , 0 , ch - > table ) ;
2024-08-27 13:02:38 +00:00
log ( " ea_rtable added (%x), eattrs %x, str %x " , ch - > table , eattrs . l , eattrs . a [ 0 ] . u . ad - > data ) ;
2024-08-19 13:04:13 +00:00
2024-07-04 11:13:38 +00:00
if ( ch - > proto - > proto = = & proto_bgp & & ch ! = ch - > proto - > mpls_channel )
{
struct bgp_channel * bc = ( struct bgp_channel * ) ch ;
eattrs . a [ eattrs . l . count + + ] = EA_LITERAL_EMBEDDED ( & ea_bgp_afi , 0 , bc - > afi ) ;
}
2024-08-27 13:02:38 +00:00
//log("to lookup goes %i (%i)", eattrs.l, &eattrs.l);
2024-05-31 10:27:59 +00:00
return ea_lookup_slow ( & eattrs . l , 0 , EALS_CUSTOM ) ;
}
2024-07-04 11:13:38 +00:00
//mpls!!!!! ch nonskip
2024-05-31 10:27:59 +00:00
void
2024-07-26 15:24:15 +00:00
proto_journal_state_push ( ea_list * attr , struct proto * p , int save_to_jour )
2024-05-31 10:27:59 +00:00
{
2024-08-27 13:02:38 +00:00
//log("proto_journal_state_push %i", p->id);
//log("push state for %s", ea_get_adata(attr, &ea_name)->data);
2024-07-04 11:13:38 +00:00
ea_set_attr ( & attr , EA_LITERAL_STORE_ADATA ( & ea_last_modified , 0 , & p - > last_state_change , sizeof ( btime ) ) ) ;
attr = ea_lookup ( attr , 0 , EALS_CUSTOM ) ;
ea_list * old_attr = proto_state_table - > attrs [ p - > id ] ;
2024-07-26 15:24:15 +00:00
if ( save_to_jour )
{
atomic_store ( & proto_state_table - > attrs [ p - > id ] , attr ) ;
ASSERT ( ea_get_int ( attr , & ea_bgp_remote_open_msg_len , 0 ) = = 0 ) ;
}
2024-05-31 10:27:59 +00:00
LOCK_DOMAIN ( rtable , proto_journal_domain ) ;
struct proto_pending_update * pupdate = SKIP_BACK ( struct proto_pending_update , li , lfjour_push_prepare ( proto_journal ) ) ;
if ( ! pupdate )
{
UNLOCK_DOMAIN ( rtable , proto_journal_domain ) ;
2024-08-19 13:04:13 +00:00
log ( " why am i not creating pupdate? " ) ;
2024-05-31 10:27:59 +00:00
return ;
}
* pupdate = ( struct proto_pending_update ) {
. li = pupdate - > li , /* Keep the item's internal state */
2024-08-19 13:04:13 +00:00
. proto_attr = save_to_jour ? proto_state_table - > attrs [ p - > id ] : attr ,
2024-05-31 10:27:59 +00:00
. old_attr = old_attr ,
. protocol = p
} ;
lfjour_push_commit ( proto_journal ) ;
UNLOCK_DOMAIN ( rtable , proto_journal_domain ) ;
}
2024-07-04 11:13:38 +00:00
ea_list *
proto_get_state_list ( int id )
{
rcu_read_lock ( ) ;
ea_list * eal = proto_state_table - > attrs [ id ] ;
if ( eal )
ea_free_later ( ea_ref ( eal ) ) ;
rcu_read_unlock ( ) ;
return eal ;
}
struct channel_attrs *
get_channel_ea ( struct channel * ch )
{
struct channel_attrs * chan_att ;
2024-08-30 10:30:13 +00:00
WALK_TLIST ( channel_attrs , chan_att , & proto_state_table - > channels_attrs [ ch - > proto - > id ] )
2024-07-04 11:13:38 +00:00
{
2024-07-26 15:24:15 +00:00
const int id = ea_get_int ( chan_att - > attrs , & ea_channel_id , 0 ) ;
2024-08-30 10:30:13 +00:00
log ( " %x %i " , chan_att , id ) ;
2024-07-04 11:13:38 +00:00
if ( ch - > id = = id )
return chan_att ;
}
return NULL ;
}
void
channel_journal_state_push ( ea_list * attr , struct channel * ch )
{
2024-07-26 15:24:15 +00:00
attr = ea_lookup ( attr , 0 , EALS_CUSTOM ) ;
2024-08-27 13:02:38 +00:00
return ; // do we need the journal?
2024-07-26 15:24:15 +00:00
ea_list * old_attr = get_channel_ea ( ch ) - > attrs ;
2024-08-30 10:30:13 +00:00
struct channel_attrs * ch_attr = get_channel_ea ( ch ) ; //TODO is this ok? What about add new tail and delete old chan attr?
ch_attr - > attrs = attr ;
2024-07-04 11:13:38 +00:00
LOCK_DOMAIN ( rtable , proto_journal_domain ) ;
struct channel_pending_update * pupdate = SKIP_BACK ( struct channel_pending_update , li , lfjour_push_prepare ( channel_journal ) ) ;
if ( ! pupdate )
{
UNLOCK_DOMAIN ( rtable , proto_journal_domain ) ;
return ;
}
* pupdate = ( struct channel_pending_update ) {
. li = pupdate - > li , /* Keep the item's internal state */
. channel_attr = attr ,
. old_attr = old_attr ,
. channel = ch
} ;
lfjour_push_commit ( channel_journal ) ;
UNLOCK_DOMAIN ( rtable , proto_journal_domain ) ;
}
/*void
channel_journal_state_change ( struct channel * ch , int old_state , int new_state )
{
ea_list * eal = channel_state_to_eattr ( ch , new_state ) ;
struct channel_attrs * chan_att = get_channel_ea ( ch ) ;
atomic_store ( & chan_att - > attrs , eal ) ;
channel_journal_state_changed ( eal , old_eal , ch ) ;
} */
void
2024-07-26 15:24:15 +00:00
add_journal_channel ( struct channel * ch )
2024-07-04 11:13:38 +00:00
{
2024-08-30 10:30:13 +00:00
//if (!NODE_VALID(HEAD(proto_state_table->channels_attrs[ch->proto->id])))
// init_list(&proto_state_table->channels_attrs[ch->proto->id]); // if we realocated channels lists, earlier inicialization would be problematic. But it does not seem to be problem for nonempty lists
2024-08-27 13:02:38 +00:00
2024-07-26 15:24:15 +00:00
ea_list * eal = channel_state_to_eattr ( ch , 0 ) ;
2024-08-30 10:30:13 +00:00
struct channel_attrs * attr = mb_allocz ( & root_pool , sizeof ( struct channel_attrs ) ) ; //TODO free
2024-08-27 13:02:38 +00:00
log ( " for new channel allocated %x, eal %x " , attr , eal ) ;
2024-07-26 15:24:15 +00:00
attr - > attrs = eal ;
2024-08-30 10:30:13 +00:00
log ( " chann id %i %x " , ea_get_int ( eal , & ea_channel_id , 0 ) , eal ) ;
channel_attrs_add_tail ( & proto_state_table - > channels_attrs [ ch - > proto - > id ] , attr ) ;
log ( " added tail to lis %x ch id %i, p id %i " ,
& proto_state_table - > channels_attrs [ ch - > proto - > id ] , ch - > id , ch - > proto - > id ) ;
2024-08-27 13:02:38 +00:00
if ( get_channel_ea ( ch ) = = 0 )
bug ( " channel not stored ch id %i, p id %i " , ch - > id , ch - > proto - > id ) ;
2024-07-04 11:13:38 +00:00
}
2024-05-31 10:27:59 +00:00
void dummy_log_proto_attr_list ( void )
{
//debugging function
ea_list * eal ;
2024-07-26 15:24:15 +00:00
for ( u32 i = 0 ; i < proto_state_table - > length ; i + + )
2024-05-31 10:27:59 +00:00
{
2024-07-04 11:13:38 +00:00
eal = proto_get_state_list ( i ) ;
2024-05-31 10:27:59 +00:00
if ( eal )
{
2024-07-04 11:13:38 +00:00
const char * name = ea_find ( eal , & ea_name ) - > u . ad - > data ;
2024-08-27 13:02:38 +00:00
struct protocol * proto = ( struct protocol * ) ea_get_ptr ( eal , & ea_protocol_type , 0 ) ;
2024-07-04 11:13:38 +00:00
const int state = ea_get_int ( eal , & ea_state , 0 ) ;
const char * table = ea_get_adata ( eal , & ea_table ) - > data ;
const btime * time = ( btime * ) ea_get_adata ( eal , & ea_last_modified ) - > data ;
2024-08-27 13:02:38 +00:00
log ( " protocol %s of type %s is in state %i (table %s, last modified %t) " , name , proto - > name , state , table , time ) ;
2024-05-31 10:27:59 +00:00
}
}
}
void
fc_for_dummy_recipient ( void * rec )
{
struct lfjour_item * last_up ;
struct proto_pending_update * pupdate ;
while ( last_up = lfjour_get ( ( struct lfjour_recipient * ) rec ) )
{
pupdate = SKIP_BACK ( struct proto_pending_update , li , last_up ) ;
2024-07-04 11:13:38 +00:00
const char * name = ea_get_adata ( pupdate - > proto_attr , & ea_name ) - > data ;
int state = ea_get_int ( pupdate - > proto_attr , & ea_state , 0 ) ;
2024-05-31 10:27:59 +00:00
if ( name & & state )
2024-07-04 11:13:38 +00:00
log ( " protocol %s changed state to %i " , name , state ) ;
2024-05-31 10:27:59 +00:00
else
log ( " not found in %i " , pupdate - > proto_attr ) ;
2024-08-28 14:25:14 +00:00
lfjour_release ( rec , last_up ) ;
2024-05-31 10:27:59 +00:00
dummy_log_proto_attr_list ( ) ;
}
}
void
create_dummy_recipient ( void )
{
struct lfjour_recipient * r = mb_allocz ( & root_pool , sizeof ( struct lfjour_recipient ) ) ;
r - > event = ev_new_init ( & root_pool , fc_for_dummy_recipient , r ) ;
struct birdloop * loop = birdloop_new ( & root_pool , DOMAIN_ORDER ( service ) , 1 , " dummy recipient loop " ) ;
r - > target = birdloop_event_list ( loop ) ;
LOCK_DOMAIN ( rtable , proto_journal_domain ) ;
lfjour_register ( proto_journal , r ) ;
UNLOCK_DOMAIN ( rtable , proto_journal_domain ) ;
dummy_log_proto_attr_list ( ) ;
}