1998-05-15 07:54:32 +00:00
/*
2000-06-01 17:12:19 +00:00
* BIRD - - Routing Tables
1998-05-15 07:54:32 +00:00
*
2000-01-16 16:44:50 +00:00
* ( c ) 1998 - - 2000 Martin Mares < mj @ ucw . cz >
1998-05-15 07:54:32 +00:00
*
* Can be freely distributed and used under the terms of the GNU GPL .
*/
2000-06-01 17:12:19 +00:00
/**
* DOC : Routing tables
*
* Routing tables are probably the most important structures BIRD uses . They
* hold all the information about known networks , the associated routes and
* their attributes .
*
2000-06-08 12:37:21 +00:00
* There are multiple routing tables ( a primary one together with any
2000-06-01 17:12:19 +00:00
* number of secondary ones if requested by the configuration ) . Each table
* is basically a FIB containing entries describing the individual
2000-06-07 13:25:53 +00:00
* destination networks . For each network ( represented by structure & net ) ,
2000-06-08 12:37:21 +00:00
* there is a one - way linked list of route entries ( & rte ) , the first entry
* on the list being the best one ( i . e . , the one we currently use
2000-06-01 17:12:19 +00:00
* for routing ) , the order of the other ones is undetermined .
*
* The & rte contains information specific to the route ( preference , protocol
* metrics , time of last modification etc . ) and a pointer to a & rta structure
* ( see the route attribute module for a precise explanation ) holding the
* remaining route attributes which are expected to be shared by multiple
* routes in order to conserve memory .
*/
2000-03-12 21:01:38 +00:00
# undef LOCAL_DEBUG
1999-02-13 19:15:28 +00:00
1998-05-15 07:54:32 +00:00
# include "nest/bird.h"
# include "nest/route.h"
1998-05-20 11:54:33 +00:00
# include "nest/protocol.h"
1999-12-01 15:10:21 +00:00
# include "nest/cli.h"
# include "nest/iface.h"
1998-05-20 11:54:33 +00:00
# include "lib/resource.h"
1999-02-13 21:29:01 +00:00
# include "lib/event.h"
1999-12-01 15:10:21 +00:00
# include "lib/string.h"
1999-05-17 20:14:52 +00:00
# include "conf/conf.h"
1999-03-17 14:31:26 +00:00
# include "filter/filter.h"
2000-03-31 23:30:21 +00:00
# include "lib/string.h"
2004-05-31 17:16:47 +00:00
# include "lib/alloca.h"
2002-11-13 08:47:06 +00:00
2010-06-02 20:20:40 +00:00
pool * rt_table_pool ;
1998-05-20 11:54:33 +00:00
static slab * rte_slab ;
1999-04-05 20:25:03 +00:00
static linpool * rte_update_pool ;
1998-05-20 11:54:33 +00:00
1999-05-17 20:14:52 +00:00
static list routing_tables ;
1999-02-13 21:29:01 +00:00
2010-07-05 15:50:19 +00:00
static void rt_free_hostcache ( rtable * tab ) ;
static void rt_notify_hostcache ( rtable * tab , net * net ) ;
static void rt_update_hostcache ( rtable * tab ) ;
static void rt_next_hop_update ( rtable * tab ) ;
2016-01-26 10:48:58 +00:00
static inline void rt_prune_table ( rtable * tab ) ;
2014-03-20 13:07:12 +00:00
2000-03-12 20:30:53 +00:00
2012-08-14 14:25:22 +00:00
static inline struct ea_list *
make_tmp_attrs ( struct rte * rt , struct linpool * pool )
{
struct ea_list * ( * mta ) ( struct rte * rt , struct linpool * pool ) ;
mta = rt - > attrs - > src - > proto - > make_tmp_attrs ;
2016-10-12 12:16:34 +00:00
return mta ? mta ( rt , pool ) : NULL ;
2012-08-14 14:25:22 +00:00
}
2015-12-24 14:52:03 +00:00
2010-07-30 23:04:32 +00:00
/* Like fib_route(), but skips empty net entries */
2015-12-24 14:52:03 +00:00
static inline void *
net_route_ip4 ( struct fib * f , net_addr_ip4 * n )
2010-07-30 23:04:32 +00:00
{
2015-12-24 14:52:03 +00:00
net * r ;
2010-07-30 23:04:32 +00:00
2015-12-24 14:52:03 +00:00
while ( r = fib_find ( f , ( net_addr * ) n ) ,
! ( r & & rte_is_valid ( r - > routes ) ) & & ( n - > pxlen > 0 ) )
{
n - > pxlen - - ;
ip4_clrbit ( & n - > prefix , n - > pxlen ) ;
}
return r ;
}
static inline void *
net_route_ip6 ( struct fib * f , net_addr_ip6 * n )
{
net * r ;
while ( r = fib_find ( f , ( net_addr * ) n ) ,
! ( r & & rte_is_valid ( r - > routes ) ) & & ( n - > pxlen > 0 ) )
{
n - > pxlen - - ;
ip6_clrbit ( & n - > prefix , n - > pxlen ) ;
}
return r ;
}
2016-05-12 14:04:47 +00:00
void *
net_route ( rtable * tab , const net_addr * n )
2016-01-20 14:38:37 +00:00
{
2016-05-12 14:04:47 +00:00
ASSERT ( tab - > addr_type = = n - > type ) ;
2016-01-20 14:38:37 +00:00
2016-05-12 14:04:47 +00:00
net_addr * n0 = alloca ( n - > length ) ;
net_copy ( n0 , n ) ;
switch ( n - > type )
{
case NET_IP4 :
case NET_VPN4 :
case NET_ROA4 :
return net_route_ip4 ( & tab - > fib , ( net_addr_ip4 * ) n0 ) ;
case NET_IP6 :
case NET_VPN6 :
case NET_ROA6 :
return net_route_ip6 ( & tab - > fib , ( net_addr_ip6 * ) n0 ) ;
default :
return NULL ;
}
}
static int
net_roa_check_ip4 ( rtable * tab , const net_addr_ip4 * px , u32 asn )
{
struct net_addr_roa4 n = NET_ADDR_ROA4 ( px - > prefix , px - > pxlen , 0 , 0 ) ;
2016-01-20 14:38:37 +00:00
struct fib_node * fn ;
2016-05-12 14:04:47 +00:00
int anything = 0 ;
2016-01-20 14:38:37 +00:00
while ( 1 )
{
for ( fn = fib_get_chain ( & tab - > fib , ( net_addr * ) & n ) ; fn ; fn = fn - > next )
{
2016-05-12 14:04:47 +00:00
net_addr_roa4 * roa = ( void * ) fn - > addr ;
2016-01-20 14:38:37 +00:00
net * r = fib_node_to_user ( & tab - > fib , fn ) ;
2016-05-12 14:04:47 +00:00
if ( net_equal_prefix_roa4 ( roa , & n ) & & rte_is_valid ( r - > routes ) )
2016-01-20 14:38:37 +00:00
{
anything = 1 ;
if ( asn & & ( roa - > asn = = asn ) & & ( roa - > max_pxlen > = px - > pxlen ) )
return ROA_VALID ;
}
}
if ( n . pxlen = = 0 )
break ;
n . pxlen - - ;
ip4_clrbit ( & n . prefix , n . pxlen ) ;
}
return anything ? ROA_INVALID : ROA_UNKNOWN ;
}
2016-05-12 14:04:47 +00:00
static int
net_roa_check_ip6 ( rtable * tab , const net_addr_ip6 * px , u32 asn )
2016-01-20 14:38:37 +00:00
{
struct net_addr_roa6 n = NET_ADDR_ROA6 ( px - > prefix , px - > pxlen , 0 , 0 ) ;
struct fib_node * fn ;
2016-05-12 14:04:47 +00:00
int anything = 0 ;
2016-01-20 14:38:37 +00:00
while ( 1 )
{
for ( fn = fib_get_chain ( & tab - > fib , ( net_addr * ) & n ) ; fn ; fn = fn - > next )
{
2016-05-12 14:04:47 +00:00
net_addr_roa6 * roa = ( void * ) fn - > addr ;
2016-01-20 14:38:37 +00:00
net * r = fib_node_to_user ( & tab - > fib , fn ) ;
2016-05-12 14:04:47 +00:00
if ( net_equal_prefix_roa6 ( roa , & n ) & & rte_is_valid ( r - > routes ) )
2016-01-20 14:38:37 +00:00
{
anything = 1 ;
if ( asn & & ( roa - > asn = = asn ) & & ( roa - > max_pxlen > = px - > pxlen ) )
return ROA_VALID ;
}
}
if ( n . pxlen = = 0 )
break ;
n . pxlen - - ;
ip6_clrbit ( & n . prefix , n . pxlen ) ;
}
return anything ? ROA_INVALID : ROA_UNKNOWN ;
}
2016-05-12 14:04:47 +00:00
/**
* roa_check - check validity of route origination in a ROA table
* @ tab : ROA table
* @ n : network prefix to check
* @ asn : AS number of network prefix
*
* Implements RFC 6483 route validation for the given network prefix . The
* procedure is to find all candidate ROAs - ROAs whose prefixes cover the given
* network prefix . If there is no candidate ROA , return ROA_UNKNOWN . If there is
* a candidate ROA with matching ASN and maxlen field greater than or equal to
* the given prefix length , return ROA_VALID . Otherwise , return ROA_INVALID . If
* caller cannot determine origin AS , 0 could be used ( in that case ROA_VALID
* cannot happen ) . Table @ tab must have type NET_ROA4 or NET_ROA6 , network @ n
* must have type NET_IP4 or NET_IP6 , respectively .
*/
int
2016-01-20 14:38:37 +00:00
net_roa_check ( rtable * tab , const net_addr * n , u32 asn )
{
2016-05-12 14:04:47 +00:00
if ( ( tab - > addr_type = = NET_ROA4 ) & & ( n - > type = = NET_IP4 ) )
return net_roa_check_ip4 ( tab , ( const net_addr_ip4 * ) n , asn ) ;
else if ( ( tab - > addr_type = = NET_ROA6 ) & & ( n - > type = = NET_IP6 ) )
return net_roa_check_ip6 ( tab , ( const net_addr_ip6 * ) n , asn ) ;
2016-01-20 14:38:37 +00:00
else
2016-05-12 14:04:47 +00:00
return ROA_UNKNOWN ; /* Should not happen */
2010-07-30 23:04:32 +00:00
}
1998-05-20 11:54:33 +00:00
2000-06-01 17:12:19 +00:00
/**
* rte_find - find a route
* @ net : network node
2012-08-14 14:25:22 +00:00
* @ src : route source
2000-06-01 17:12:19 +00:00
*
* The rte_find ( ) function returns a route for destination @ net
2012-08-14 14:25:22 +00:00
* which is from route source @ src .
2000-06-01 17:12:19 +00:00
*/
1998-05-20 11:54:33 +00:00
rte *
2012-08-14 14:25:22 +00:00
rte_find ( net * net , struct rte_src * src )
1998-05-20 11:54:33 +00:00
{
rte * e = net - > routes ;
2012-08-14 14:25:22 +00:00
while ( e & & e - > attrs - > src ! = src )
1998-05-20 11:54:33 +00:00
e = e - > next ;
return e ;
}
2000-06-01 17:12:19 +00:00
/**
* rte_get_temp - get a temporary & rte
2000-06-02 12:29:55 +00:00
* @ a : attributes to assign to the new route ( a & rta ; in case it ' s
2000-06-07 12:29:08 +00:00
* un - cached , rte_update ( ) will create a cached copy automatically )
2000-06-01 17:12:19 +00:00
*
* Create a temporary & rte and bind it with the attributes @ a .
* Also set route preference to the default preference set for
* the protocol .
*/
1998-05-20 11:54:33 +00:00
rte *
rte_get_temp ( rta * a )
{
rte * e = sl_alloc ( rte_slab ) ;
e - > attrs = a ;
1998-06-04 20:28:19 +00:00
e - > flags = 0 ;
2016-01-26 10:48:58 +00:00
e - > pref = 0 ;
1998-05-20 11:54:33 +00:00
return e ;
}
1999-04-05 20:25:03 +00:00
rte *
rte_do_cow ( rte * r )
{
rte * e = sl_alloc ( rte_slab ) ;
memcpy ( e , r , sizeof ( rte ) ) ;
e - > attrs = rta_clone ( r - > attrs ) ;
e - > flags = 0 ;
return e ;
}
2015-06-08 00:20:43 +00:00
/**
* rte_cow_rta - get a private writable copy of & rte with writable & rta
* @ r : a route entry to be copied
* @ lp : a linpool from which to allocate & rta
*
* rte_cow_rta ( ) takes a & rte and prepares it and associated & rta for
* modification . There are three possibilities : First , both & rte and & rta are
* private copies , in that case they are returned unchanged . Second , & rte is
* private copy , but & rta is cached , in that case & rta is duplicated using
* rta_do_cow ( ) . Third , both & rte is shared and & rta is cached , in that case
* both structures are duplicated by rte_do_cow ( ) and rta_do_cow ( ) .
*
* Note that in the second case , cached & rta loses one reference , while private
* copy created by rta_do_cow ( ) is a shallow copy sharing indirect data ( eattrs ,
* nexthops , . . . ) with it . To work properly , original shared & rta should have
* another reference during the life of created private copy .
*
* Result : a pointer to the new writable & rte with writable & rta .
*/
rte *
rte_cow_rta ( rte * r , linpool * lp )
{
if ( ! rta_is_cached ( r - > attrs ) )
return r ;
rte * e = rte_cow ( r ) ;
rta * a = rta_do_cow ( r - > attrs , lp ) ;
rta_free ( e - > attrs ) ;
e - > attrs = a ;
return e ;
}
1998-05-20 11:54:33 +00:00
static int /* Actually better or at least as good as */
rte_better ( rte * new , rte * old )
{
1998-06-03 08:40:10 +00:00
int ( * better ) ( rte * , rte * ) ;
2012-11-10 13:26:13 +00:00
if ( ! rte_is_valid ( old ) )
1998-05-20 11:54:33 +00:00
return 1 ;
2012-11-10 13:26:13 +00:00
if ( ! rte_is_valid ( new ) )
return 0 ;
1998-05-20 11:54:33 +00:00
if ( new - > pref > old - > pref )
return 1 ;
if ( new - > pref < old - > pref )
return 0 ;
2012-08-14 14:25:22 +00:00
if ( new - > attrs - > src - > proto - > proto ! = old - > attrs - > src - > proto - > proto )
2000-03-01 11:48:11 +00:00
{
/*
* If the user has configured protocol preferences , so that two different protocols
* have the same preference , try to break the tie by comparing addresses . Not too
* useful , but keeps the ordering of routes unambiguous .
*/
2012-08-14 14:25:22 +00:00
return new - > attrs - > src - > proto - > proto > old - > attrs - > src - > proto - > proto ;
2000-03-01 11:48:11 +00:00
}
2012-08-14 14:25:22 +00:00
if ( better = new - > attrs - > src - > proto - > rte_better )
1998-06-03 08:40:10 +00:00
return better ( new , old ) ;
return 0 ;
1998-05-20 11:54:33 +00:00
}
2015-06-08 00:20:43 +00:00
static int
rte_mergable ( rte * pri , rte * sec )
{
int ( * mergable ) ( rte * , rte * ) ;
if ( ! rte_is_valid ( pri ) | | ! rte_is_valid ( sec ) )
return 0 ;
if ( pri - > pref ! = sec - > pref )
return 0 ;
if ( pri - > attrs - > src - > proto - > proto ! = sec - > attrs - > src - > proto - > proto )
return 0 ;
if ( mergable = pri - > attrs - > src - > proto - > rte_mergable )
return mergable ( pri , sec ) ;
return 0 ;
}
2000-03-12 20:30:53 +00:00
static void
rte_trace ( struct proto * p , rte * e , int dir , char * msg )
{
2017-03-08 15:27:18 +00:00
log ( L_TRACE " %s %c %s %N %s " , p - > name , dir , msg , e - > net - > n . addr , rta_dest_name ( e - > attrs - > dest ) ) ;
2000-03-12 20:30:53 +00:00
}
static inline void
2015-05-19 06:53:34 +00:00
rte_trace_in ( uint flag , struct proto * p , rte * e , char * msg )
2000-03-12 20:30:53 +00:00
{
if ( p - > debug & flag )
2000-03-12 20:49:08 +00:00
rte_trace ( p , e , ' > ' , msg ) ;
2000-03-12 20:30:53 +00:00
}
static inline void
2015-05-19 06:53:34 +00:00
rte_trace_out ( uint flag , struct proto * p , rte * e , char * msg )
2000-03-12 20:30:53 +00:00
{
if ( p - > debug & flag )
2000-03-12 20:49:08 +00:00
rte_trace ( p , e , ' < ' , msg ) ;
2000-03-12 20:30:53 +00:00
}
2012-04-15 13:07:58 +00:00
static rte *
2016-11-08 16:03:31 +00:00
export_filter_ ( struct channel * c , rte * rt0 , rte * * rt_free , ea_list * * tmpa , linpool * pool , int silent )
1999-03-17 14:31:26 +00:00
{
2016-01-26 10:48:58 +00:00
struct proto * p = c - > proto ;
struct filter * filter = c - > out_filter ;
struct proto_stats * stats = & c - > stats ;
2012-04-15 13:07:58 +00:00
ea_list * tmpb = NULL ;
rte * rt ;
int v ;
2012-03-15 10:58:08 +00:00
2012-04-15 13:07:58 +00:00
rt = rt0 ;
* rt_free = NULL ;
1999-08-03 19:33:22 +00:00
2012-04-15 13:07:58 +00:00
if ( ! tmpa )
2015-06-04 09:35:26 +00:00
tmpa = & tmpb ;
2016-09-06 15:08:45 +00:00
* tmpa = make_tmp_attrs ( rt , pool ) ;
2009-06-03 23:22:56 +00:00
2016-09-06 15:08:45 +00:00
v = p - > import_control ? p - > import_control ( p , & rt , tmpa , pool ) : 0 ;
2012-04-15 13:07:58 +00:00
if ( v < 0 )
{
if ( silent )
goto reject ;
2009-12-02 21:19:47 +00:00
2012-04-15 13:07:58 +00:00
stats - > exp_updates_rejected + + ;
2013-02-08 22:58:27 +00:00
if ( v = = RIC_REJECT )
rte_trace_out ( D_FILTERS , p , rt , " rejected by protocol " ) ;
2012-04-15 13:07:58 +00:00
goto reject ;
}
if ( v > 0 )
1999-04-05 20:25:03 +00:00
{
2012-04-15 13:07:58 +00:00
if ( ! silent )
rte_trace_out ( D_FILTERS , p , rt , " forced accept by protocol " ) ;
goto accept ;
1999-04-05 20:25:03 +00:00
}
2009-06-03 23:22:56 +00:00
2012-04-15 13:07:58 +00:00
v = filter & & ( ( filter = = FILTER_REJECT ) | |
2016-09-06 15:08:45 +00:00
( f_run ( filter , & rt , tmpa , pool , FF_FORCE_TMPATTR ) > F_ACCEPT ) ) ;
2012-04-15 13:07:58 +00:00
if ( v )
{
if ( silent )
goto reject ;
stats - > exp_updates_filtered + + ;
rte_trace_out ( D_FILTERS , p , rt , " filtered out " ) ;
goto reject ;
1999-04-05 20:25:03 +00:00
}
2009-06-03 23:22:56 +00:00
2012-04-15 13:07:58 +00:00
accept :
if ( rt ! = rt0 )
* rt_free = rt ;
return rt ;
reject :
/* Discard temporary rte */
if ( rt ! = rt0 )
rte_free ( rt ) ;
return NULL ;
}
2016-09-06 15:08:45 +00:00
static inline rte *
2016-11-08 16:03:31 +00:00
export_filter ( struct channel * c , rte * rt0 , rte * * rt_free , ea_list * * tmpa , int silent )
2016-09-06 15:08:45 +00:00
{
2016-11-08 16:03:31 +00:00
return export_filter_ ( c , rt0 , rt_free , tmpa , rte_update_pool , silent ) ;
2016-09-06 15:08:45 +00:00
}
2012-04-15 13:07:58 +00:00
static void
2016-01-26 10:48:58 +00:00
do_rt_notify ( struct channel * c , net * net , rte * new , rte * old , ea_list * tmpa , int refeed )
2012-04-15 13:07:58 +00:00
{
2016-01-26 10:48:58 +00:00
struct proto * p = c - > proto ;
struct proto_stats * stats = & c - > stats ;
2009-06-03 23:22:56 +00:00
2012-07-16 12:44:45 +00:00
2012-04-28 10:59:40 +00:00
/*
2012-07-16 12:44:45 +00:00
* First , apply export limit .
*
2012-04-28 10:59:40 +00:00
* Export route limits has several problems . Because exp_routes
* counter is reset before refeed , we don ' t really know whether
2012-07-16 12:44:45 +00:00
* limit is breached and whether the update is new or not . Therefore
2012-04-28 10:59:40 +00:00
* the number of really exported routes may exceed the limit
* temporarily ( routes exported before and new routes in refeed ) .
*
* Minor advantage is that if the limit is decreased and refeed is
* requested , the number of exported routes really decrease .
*
* Second problem is that with export limits , we don ' t know whether
* old was really exported ( it might be blocked by limit ) . When a
* withdraw is exported , we announce it even when the previous
* update was blocked . This is not a big issue , but the same problem
* is in updating exp_routes counter . Therefore , to be consistent in
* increases and decreases of exp_routes , we count exported routes
* regardless of blocking by limits .
*
* Similar problem is in handling updates - when a new route is
* received and blocking is active , the route would be blocked , but
* when an update for the route will be received later , the update
* would be propagated ( as old ! = NULL ) . Therefore , we have to block
* also non - new updates ( contrary to import blocking ) .
*/
2009-06-03 23:22:56 +00:00
2016-01-26 10:48:58 +00:00
struct channel_limit * l = & c - > out_limit ;
if ( l - > action & & new )
2012-04-24 21:39:57 +00:00
{
2012-04-28 10:59:40 +00:00
if ( ( ! old | | refeed ) & & ( stats - > exp_routes > = l - > limit ) )
2016-01-26 10:48:58 +00:00
channel_notify_limit ( c , l , PLD_OUT , stats - > exp_routes ) ;
2012-04-24 21:39:57 +00:00
if ( l - > state = = PLS_BLOCKED )
{
2012-04-28 10:59:40 +00:00
stats - > exp_routes + + ; /* see note above */
2012-04-24 21:39:57 +00:00
stats - > exp_updates_rejected + + ;
rte_trace_out ( D_FILTERS , p , new , " rejected [limit] " ) ;
2012-04-28 10:59:40 +00:00
new = NULL ;
2012-07-16 12:44:45 +00:00
if ( ! old )
return ;
2012-04-24 21:39:57 +00:00
}
}
2012-04-28 10:59:40 +00:00
2009-06-03 23:22:56 +00:00
if ( new )
2010-02-13 09:44:46 +00:00
stats - > exp_updates_accepted + + ;
2009-06-03 23:22:56 +00:00
else
2010-02-13 09:44:46 +00:00
stats - > exp_withdraws_accepted + + ;
2009-06-03 23:22:56 +00:00
2009-12-14 00:32:37 +00:00
/* Hack: We do not decrease exp_routes during refeed, we instead
reset exp_routes at the start of refeed . */
2009-06-03 23:22:56 +00:00
if ( new )
2010-02-13 09:44:46 +00:00
stats - > exp_routes + + ;
2009-12-14 00:32:37 +00:00
if ( old & & ! refeed )
2010-02-13 09:44:46 +00:00
stats - > exp_routes - - ;
2009-06-03 23:22:56 +00:00
2000-03-12 20:30:53 +00:00
if ( p - > debug & D_ROUTES )
{
if ( new & & old )
rte_trace_out ( D_ROUTES , p , new , " replaced " ) ;
else if ( new )
rte_trace_out ( D_ROUTES , p , new , " added " ) ;
2000-03-19 21:59:24 +00:00
else if ( old )
2000-03-12 20:30:53 +00:00
rte_trace_out ( D_ROUTES , p , old , " removed " ) ;
}
2000-05-13 11:00:37 +00:00
if ( ! new )
2016-04-04 14:17:11 +00:00
p - > rt_notify ( p , c , net , NULL , old , NULL ) ;
2000-05-13 11:00:37 +00:00
else if ( tmpa )
{
2000-05-30 21:24:15 +00:00
ea_list * t = tmpa ;
while ( t - > next )
t = t - > next ;
t - > next = new - > attrs - > eattrs ;
2016-04-04 14:17:11 +00:00
p - > rt_notify ( p , c , net , new , old , tmpa ) ;
2000-05-30 21:24:15 +00:00
t - > next = NULL ;
2000-05-13 11:00:37 +00:00
}
else
2016-04-04 14:17:11 +00:00
p - > rt_notify ( p , c , net , new , old , new - > attrs - > eattrs ) ;
2012-04-15 13:07:58 +00:00
}
static void
2016-01-26 10:48:58 +00:00
rt_notify_basic ( struct channel * c , net * net , rte * new0 , rte * old0 , int refeed )
2012-04-15 13:07:58 +00:00
{
2016-01-26 10:48:58 +00:00
struct proto * p = c - > proto ;
2012-04-15 13:07:58 +00:00
2015-05-16 18:17:59 +00:00
rte * new = new0 ;
rte * old = old0 ;
2012-04-15 13:07:58 +00:00
rte * new_free = NULL ;
rte * old_free = NULL ;
2015-06-04 09:35:26 +00:00
ea_list * tmpa = NULL ;
2012-04-15 13:07:58 +00:00
if ( new )
2016-01-26 10:48:58 +00:00
c - > stats . exp_updates_received + + ;
2012-04-15 13:07:58 +00:00
else
2016-01-26 10:48:58 +00:00
c - > stats . exp_withdraws_received + + ;
2012-04-15 13:07:58 +00:00
/*
* This is a tricky part - we don ' t know whether route ' old ' was
* exported to protocol ' p ' or was filtered by the export filter .
* We try to run the export filter to know this to have a correct
* value in ' old ' argument of rte_update ( and proper filter value )
*
* FIXME - this is broken because ' configure soft ' may change
* filters but keep routes . Refeed is expected to be called after
* change of the filters and with old = = new , therefore we do not
2015-05-16 18:17:59 +00:00
* even try to run the filter on an old route , This may lead to
2012-04-15 13:07:58 +00:00
* ' spurious withdraws ' but ensure that there are no ' missing
* withdraws ' .
*
* This is not completely safe as there is a window between
* reconfiguration and the end of refeed - if a newly filtered
* route disappears during this period , proper withdraw is not
* sent ( because old would be also filtered ) and the route is
* not refeeded ( because it disappeared before that ) .
*/
if ( new )
2016-01-26 10:48:58 +00:00
new = export_filter ( c , new , & new_free , & tmpa , 0 ) ;
2012-04-15 13:07:58 +00:00
if ( old & & ! refeed )
2016-01-26 10:48:58 +00:00
old = export_filter ( c , old , & old_free , NULL , 1 ) ;
2012-04-15 13:07:58 +00:00
if ( ! new & & ! old )
2015-05-16 18:17:59 +00:00
{
/*
* As mentioned above , ' old ' value may be incorrect in some race conditions .
* We generally ignore it with the exception of withdraw to pipe protocol .
* In that case we rather propagate unfiltered withdraws regardless of
* export filters to ensure that when a protocol is flushed , its routes are
* removed from all tables . Possible spurious unfiltered withdraws are not
* problem here as they are ignored if there is no corresponding route at
* the other end of the pipe . We directly call rt_notify ( ) hook instead of
* do_rt_notify ( ) to avoid logging and stat counters .
*/
# ifdef CONFIG_PIPE
if ( ( p - > proto = = & proto_pipe ) & & ! new0 & & ( p ! = old0 - > sender - > proto ) )
2016-04-04 14:17:11 +00:00
p - > rt_notify ( p , c , net , NULL , old0 , NULL ) ;
2015-05-16 18:17:59 +00:00
# endif
2012-04-15 13:07:58 +00:00
return ;
2015-05-16 18:17:59 +00:00
}
2012-04-15 13:07:58 +00:00
2016-01-26 10:48:58 +00:00
do_rt_notify ( c , net , new , old , tmpa , refeed ) ;
2012-04-15 13:07:58 +00:00
/* Discard temporary rte's */
if ( new_free )
rte_free ( new_free ) ;
if ( old_free )
rte_free ( old_free ) ;
}
static void
2016-01-26 10:48:58 +00:00
rt_notify_accepted ( struct channel * c , net * net , rte * new_changed , rte * old_changed , rte * before_old , int feed )
2012-04-15 13:07:58 +00:00
{
2016-01-26 10:48:58 +00:00
// struct proto *p = c->proto;
2012-04-15 13:07:58 +00:00
2015-06-04 09:35:26 +00:00
rte * r ;
2012-04-15 13:07:58 +00:00
rte * new_best = NULL ;
rte * old_best = NULL ;
rte * new_free = NULL ;
rte * old_free = NULL ;
2015-06-04 09:35:26 +00:00
ea_list * tmpa = NULL ;
2012-04-15 13:07:58 +00:00
2012-11-10 13:26:13 +00:00
/* Used to track whether we met old_changed position. If before_old is NULL
old_changed was the first and we met it implicitly before current best route . */
int old_meet = old_changed & & ! before_old ;
/* Note that before_old is either NULL or valid (not rejected) route.
If old_changed is valid , before_old have to be too . If old changed route
was not valid , caller must use NULL for both old_changed and before_old . */
2012-04-15 13:07:58 +00:00
if ( new_changed )
2016-01-26 10:48:58 +00:00
c - > stats . exp_updates_received + + ;
2012-04-15 13:07:58 +00:00
else
2016-01-26 10:48:58 +00:00
c - > stats . exp_withdraws_received + + ;
2012-04-15 13:07:58 +00:00
/* First, find the new_best route - first accepted by filters */
2012-11-10 13:26:13 +00:00
for ( r = net - > routes ; rte_is_valid ( r ) ; r = r - > next )
2012-04-15 13:07:58 +00:00
{
2016-01-26 10:48:58 +00:00
if ( new_best = export_filter ( c , r , & new_free , & tmpa , 0 ) )
2012-04-15 13:07:58 +00:00
break ;
/* Note if we walked around the position of old_changed route */
if ( r = = before_old )
old_meet = 1 ;
}
/*
* Second , handle the feed case . That means we do not care for
* old_best . It is NULL for feed , and the new_best for refeed .
* For refeed , there is a hack similar to one in rt_notify_basic ( )
* to ensure withdraws in case of changed filters
*/
if ( feed )
{
if ( feed = = 2 ) /* refeed */
2012-11-10 13:26:13 +00:00
old_best = new_best ? new_best :
( rte_is_valid ( net - > routes ) ? net - > routes : NULL ) ;
2012-04-15 13:07:58 +00:00
else
old_best = NULL ;
if ( ! new_best & & ! old_best )
return ;
goto found ;
}
/*
* Now , we find the old_best route . Generally , it is the same as the
* new_best , unless new_best is the same as new_changed or
* old_changed is accepted before new_best .
*
* There are four cases :
*
* - We would find and accept old_changed before new_best , therefore
* old_changed is old_best . In remaining cases we suppose this
* is not true .
*
* - We found no new_best , therefore there is also no old_best and
* we ignore this withdraw .
*
* - We found new_best different than new_changed , therefore
* old_best is the same as new_best and we ignore this update .
*
* - We found new_best the same as new_changed , therefore it cannot
* be old_best and we have to continue search for old_best .
*/
/* First case */
if ( old_meet )
2016-01-26 10:48:58 +00:00
if ( old_best = export_filter ( c , old_changed , & old_free , NULL , 1 ) )
2012-04-15 13:07:58 +00:00
goto found ;
/* Second case */
if ( ! new_best )
return ;
2012-04-24 21:39:57 +00:00
2012-07-04 19:31:03 +00:00
/* Third case, we use r instead of new_best, because export_filter() could change it */
2012-04-15 13:07:58 +00:00
if ( r ! = new_changed )
{
if ( new_free )
rte_free ( new_free ) ;
return ;
}
/* Fourth case */
2012-11-10 13:26:13 +00:00
for ( r = r - > next ; rte_is_valid ( r ) ; r = r - > next )
2012-04-15 13:07:58 +00:00
{
2016-01-26 10:48:58 +00:00
if ( old_best = export_filter ( c , r , & old_free , NULL , 1 ) )
2012-04-15 13:07:58 +00:00
goto found ;
if ( r = = before_old )
2016-01-26 10:48:58 +00:00
if ( old_best = export_filter ( c , old_changed , & old_free , NULL , 1 ) )
2012-04-15 13:07:58 +00:00
goto found ;
}
/* Implicitly, old_best is NULL and new_best is non-NULL */
found :
2016-01-26 10:48:58 +00:00
do_rt_notify ( c , net , new_best , old_best , tmpa , ( feed = = 2 ) ) ;
2012-04-15 13:07:58 +00:00
/* Discard temporary rte's */
if ( new_free )
rte_free ( new_free ) ;
if ( old_free )
rte_free ( old_free ) ;
1999-03-17 14:31:26 +00:00
}
2015-06-08 00:20:43 +00:00
2016-05-06 13:48:35 +00:00
static struct nexthop *
nexthop_merge_rta ( struct nexthop * nhs , rta * a , linpool * pool , int max )
2015-06-08 00:20:43 +00:00
{
2016-05-06 13:48:35 +00:00
return nexthop_merge ( nhs , & ( a - > nh ) , 1 , 0 , max , pool ) ;
2015-06-08 00:20:43 +00:00
}
rte *
2016-11-08 16:03:31 +00:00
rt_export_merged ( struct channel * c , net * net , rte * * rt_free , ea_list * * tmpa , linpool * pool , int silent )
2015-06-08 00:20:43 +00:00
{
2016-01-26 10:48:58 +00:00
// struct proto *p = c->proto;
2016-05-06 13:48:35 +00:00
struct nexthop * nhs = NULL ;
2015-06-08 00:20:43 +00:00
rte * best0 , * best , * rt0 , * rt , * tmp ;
best0 = net - > routes ;
* rt_free = NULL ;
if ( ! rte_is_valid ( best0 ) )
return NULL ;
2016-11-08 16:03:31 +00:00
best = export_filter_ ( c , best0 , rt_free , tmpa , pool , silent ) ;
2015-06-08 00:20:43 +00:00
if ( ! best | | ! rte_is_reachable ( best ) )
return best ;
for ( rt0 = best0 - > next ; rt0 ; rt0 = rt0 - > next )
{
if ( ! rte_mergable ( best0 , rt0 ) )
continue ;
2016-11-08 16:03:31 +00:00
rt = export_filter_ ( c , rt0 , & tmp , NULL , pool , 1 ) ;
2015-06-08 00:20:43 +00:00
if ( ! rt )
continue ;
if ( rte_is_reachable ( rt ) )
2016-05-06 13:48:35 +00:00
nhs = nexthop_merge_rta ( nhs , rt - > attrs , pool , c - > merge_limit ) ;
2015-06-08 00:20:43 +00:00
if ( tmp )
rte_free ( tmp ) ;
}
if ( nhs )
{
2016-05-06 13:48:35 +00:00
nhs = nexthop_merge_rta ( nhs , best - > attrs , pool , c - > merge_limit ) ;
2015-06-08 00:20:43 +00:00
if ( nhs - > next )
{
2016-09-06 15:08:45 +00:00
best = rte_cow_rta ( best , pool ) ;
2016-05-06 13:48:35 +00:00
nexthop_link ( best - > attrs , nhs ) ;
2015-06-08 00:20:43 +00:00
}
}
if ( best ! = best0 )
* rt_free = best ;
return best ;
}
static void
2016-01-26 10:48:58 +00:00
rt_notify_merged ( struct channel * c , net * net , rte * new_changed , rte * old_changed ,
2015-06-08 00:20:43 +00:00
rte * new_best , rte * old_best , int refeed )
{
2016-01-26 10:48:58 +00:00
// struct proto *p = c->proto;
2015-06-08 00:20:43 +00:00
rte * new_best_free = NULL ;
rte * old_best_free = NULL ;
rte * new_changed_free = NULL ;
rte * old_changed_free = NULL ;
ea_list * tmpa = NULL ;
/* We assume that all rte arguments are either NULL or rte_is_valid() */
/* This check should be done by the caller */
if ( ! new_best & & ! old_best )
return ;
/* Check whether the change is relevant to the merged route */
if ( ( new_best = = old_best ) & & ! refeed )
{
new_changed = rte_mergable ( new_best , new_changed ) ?
2016-01-26 10:48:58 +00:00
export_filter ( c , new_changed , & new_changed_free , NULL , 1 ) : NULL ;
2015-06-08 00:20:43 +00:00
old_changed = rte_mergable ( old_best , old_changed ) ?
2016-01-26 10:48:58 +00:00
export_filter ( c , old_changed , & old_changed_free , NULL , 1 ) : NULL ;
2015-06-08 00:20:43 +00:00
if ( ! new_changed & & ! old_changed )
return ;
}
if ( new_best )
2016-01-26 10:48:58 +00:00
c - > stats . exp_updates_received + + ;
2015-06-08 00:20:43 +00:00
else
2016-01-26 10:48:58 +00:00
c - > stats . exp_withdraws_received + + ;
2015-06-08 00:20:43 +00:00
/* Prepare new merged route */
if ( new_best )
2016-11-08 16:03:31 +00:00
new_best = rt_export_merged ( c , net , & new_best_free , & tmpa , rte_update_pool , 0 ) ;
2015-06-08 00:20:43 +00:00
/* Prepare old merged route (without proper merged next hops) */
/* There are some issues with running filter on old route - see rt_notify_basic() */
if ( old_best & & ! refeed )
2016-01-26 10:48:58 +00:00
old_best = export_filter ( c , old_best , & old_best_free , NULL , 1 ) ;
2015-06-08 00:20:43 +00:00
if ( new_best | | old_best )
2016-01-26 10:48:58 +00:00
do_rt_notify ( c , net , new_best , old_best , tmpa , refeed ) ;
2015-06-08 00:20:43 +00:00
/* Discard temporary rte's */
if ( new_best_free )
rte_free ( new_best_free ) ;
if ( old_best_free )
rte_free ( old_best_free ) ;
if ( new_changed_free )
rte_free ( new_changed_free ) ;
if ( old_changed_free )
rte_free ( old_changed_free ) ;
}
2000-06-02 12:41:25 +00:00
/**
* rte_announce - announce a routing table change
* @ tab : table the route has been added to
2009-05-31 13:24:27 +00:00
* @ type : type of route announcement ( RA_OPTIMAL or RA_ANY )
2000-06-02 12:41:25 +00:00
* @ net : network in question
* @ new : the new route to be announced
2009-05-31 13:24:27 +00:00
* @ old : the previous route for the same network
2016-05-12 13:49:44 +00:00
* @ new_best : the new best route for the same network
* @ old_best : the previous best route for the same network
* @ before_old : The previous route before @ old for the same network .
* If @ before_old is NULL @ old was the first .
2000-06-02 12:41:25 +00:00
*
* This function gets a routing table update and announces it
2009-06-01 12:07:13 +00:00
* to all protocols that acccepts given type of route announcement
* and are connected to the same table by their announcement hooks .
2000-06-02 12:41:25 +00:00
*
2016-05-12 13:49:44 +00:00
* Route announcement of type % RA_OPTIMAL si generated when optimal
2009-06-01 12:07:13 +00:00
* route ( in routing table @ tab ) changes . In that case @ old stores the
* old optimal route .
2009-05-31 13:24:27 +00:00
*
2016-05-12 13:49:44 +00:00
* Route announcement of type % RA_ANY si generated when any route ( in
2009-06-01 12:07:13 +00:00
* routing table @ tab ) changes In that case @ old stores the old route
* from the same protocol .
*
* For each appropriate protocol , we first call its import_control ( )
* hook which performs basic checks on the route ( each protocol has a
* right to veto or force accept of the route before any filter is
* asked ) and adds default values of attributes specific to the new
* protocol ( metrics , tags etc . ) . Then it consults the protocol ' s
* export filter and if it accepts the route , the rt_notify ( ) hook of
* the protocol gets called .
2000-06-02 12:41:25 +00:00
*/
1999-04-05 20:25:03 +00:00
static void
2015-06-08 00:20:43 +00:00
rte_announce ( rtable * tab , unsigned type , net * net , rte * new , rte * old ,
rte * new_best , rte * old_best , rte * before_old )
1998-05-20 11:54:33 +00:00
{
2015-06-08 00:20:43 +00:00
if ( ! rte_is_valid ( new ) )
new = NULL ;
2012-11-10 13:26:13 +00:00
if ( ! rte_is_valid ( old ) )
old = before_old = NULL ;
2015-06-08 00:20:43 +00:00
if ( ! rte_is_valid ( new_best ) )
new_best = NULL ;
if ( ! rte_is_valid ( old_best ) )
old_best = NULL ;
2012-11-10 13:26:13 +00:00
if ( ! old & & ! new )
return ;
1998-05-20 11:54:33 +00:00
2016-01-26 10:48:58 +00:00
if ( ( type = = RA_OPTIMAL ) & & tab - > hostcache )
rt_notify_hostcache ( tab , net ) ;
2010-07-05 15:50:19 +00:00
2016-01-26 10:48:58 +00:00
struct channel * c ; node * n ;
WALK_LIST2 ( c , n , tab - > channels , table_node )
1999-02-13 20:19:24 +00:00
{
2016-01-26 10:48:58 +00:00
if ( c - > export_state = = ES_DOWN )
continue ;
if ( c - > ra_mode = = type )
2012-04-15 13:07:58 +00:00
if ( type = = RA_ACCEPTED )
2016-01-26 10:48:58 +00:00
rt_notify_accepted ( c , net , new , old , before_old , 0 ) ;
2015-06-08 00:20:43 +00:00
else if ( type = = RA_MERGED )
2016-01-26 10:48:58 +00:00
rt_notify_merged ( c , net , new , old , new_best , old_best , 0 ) ;
2012-04-15 13:07:58 +00:00
else
2016-01-26 10:48:58 +00:00
rt_notify_basic ( c , net , new , old , 0 ) ;
1999-02-13 20:19:24 +00:00
}
1998-05-20 11:54:33 +00:00
}
1999-03-17 15:01:07 +00:00
static inline int
rte_validate ( rte * e )
{
int c ;
net * n = e - > net ;
2015-11-05 11:48:52 +00:00
// (n->n.pxlen > BITS_PER_IP_ADDRESS) || !ip_is_prefix(n->n.prefix,n->n.pxlen))
if ( ! net_validate ( n - > n . addr ) )
{
log ( L_WARN " Ignoring bogus prefix %N received via %s " ,
n - > n . addr , e - > sender - > proto - > name ) ;
return 0 ;
}
2010-02-26 09:55:58 +00:00
2015-11-05 11:48:52 +00:00
c = net_classify ( n - > n . addr ) ;
2010-02-26 09:55:58 +00:00
if ( ( c < 0 ) | | ! ( c & IADDR_HOST ) | | ( ( c & IADDR_SCOPE_MASK ) < = SCOPE_LINK ) )
2015-11-05 11:48:52 +00:00
{
log ( L_WARN " Ignoring bogus route %N received via %s " ,
n - > n . addr , e - > sender - > proto - > name ) ;
return 0 ;
}
2010-02-26 09:55:58 +00:00
2016-05-06 13:48:35 +00:00
if ( ( e - > attrs - > dest = = RTD_UNICAST ) & & ! nexthop_is_sorted ( & ( e - > attrs - > nh ) ) )
2016-08-30 15:17:27 +00:00
{
2016-11-08 16:03:31 +00:00
log ( L_WARN " Ignoring unsorted multipath route %N received via %s " ,
n - > n . addr , e - > sender - > proto - > name ) ;
2016-08-30 15:17:27 +00:00
return 0 ;
}
1999-03-17 15:01:07 +00:00
return 1 ;
}
2000-06-01 17:12:19 +00:00
/**
* rte_free - delete a & rte
* @ e : & rte to be deleted
*
* rte_free ( ) deletes the given & rte from the routing table it ' s linked to .
*/
1998-12-07 21:59:15 +00:00
void
1998-05-20 11:54:33 +00:00
rte_free ( rte * e )
1998-12-07 21:59:15 +00:00
{
2012-08-14 14:25:22 +00:00
if ( rta_is_cached ( e - > attrs ) )
1998-12-07 21:59:15 +00:00
rta_free ( e - > attrs ) ;
sl_free ( rte_slab , e ) ;
}
static inline void
rte_free_quick ( rte * e )
1998-05-20 11:54:33 +00:00
{
rta_free ( e - > attrs ) ;
sl_free ( rte_slab , e ) ;
}
2000-05-06 21:21:19 +00:00
static int
rte_same ( rte * x , rte * y )
{
return
x - > attrs = = y - > attrs & &
x - > flags = = y - > flags & &
x - > pflags = = y - > pflags & &
x - > pref = = y - > pref & &
2012-08-14 14:25:22 +00:00
( ! x - > attrs - > src - > proto - > rte_same | | x - > attrs - > src - > proto - > rte_same ( x , y ) ) ;
2000-05-06 21:21:19 +00:00
}
2012-11-16 12:29:16 +00:00
static inline int rte_is_ok ( rte * e ) { return e & & ! rte_is_filtered ( e ) ; }
1999-04-05 20:25:03 +00:00
static void
2016-01-26 10:48:58 +00:00
rte_recalculate ( struct channel * c , net * net , rte * new , struct rte_src * src )
1998-05-20 11:54:33 +00:00
{
2016-01-26 10:48:58 +00:00
struct proto * p = c - > proto ;
struct rtable * table = c - > table ;
struct proto_stats * stats = & c - > stats ;
2014-10-02 09:41:34 +00:00
static struct tbf rl_pipe = TBF_DEFAULT_LOG_LIMITS ;
2012-04-15 13:07:58 +00:00
rte * before_old = NULL ;
1998-05-20 11:54:33 +00:00
rte * old_best = net - > routes ;
rte * old = NULL ;
2012-04-15 13:07:58 +00:00
rte * * k ;
1998-05-20 11:54:33 +00:00
k = & net - > routes ; /* Find and remove original route from the same protocol */
while ( old = * k )
{
2012-08-14 14:25:22 +00:00
if ( old - > attrs - > src = = src )
1998-05-20 11:54:33 +00:00
{
2009-12-02 16:26:16 +00:00
/* If there is the same route in the routing table but from
* a different sender , then there are two paths from the
* source protocol to this routing table through transparent
* pipes , which is not allowed .
*
* We log that and ignore the route . If it is withdraw , we
* ignore it completely ( there might be ' spurious withdraws ' ,
* see FIXME in do_rte_announce ( ) )
*/
2012-03-15 10:58:08 +00:00
if ( old - > sender - > proto ! = p )
2009-12-02 16:26:16 +00:00
{
if ( new )
{
2015-11-05 11:48:52 +00:00
log_rl ( & rl_pipe , L_ERR " Pipe collision detected when sending %N to table %s " ,
net - > n . addr , table - > name ) ;
2009-12-02 16:26:16 +00:00
rte_free_quick ( new ) ;
}
return ;
}
2000-05-06 21:31:41 +00:00
if ( new & & rte_same ( old , new ) )
2000-05-06 21:21:19 +00:00
{
/* No changes, ignore the new route */
2012-11-10 13:26:13 +00:00
2012-11-15 00:29:01 +00:00
if ( ! rte_is_filtered ( new ) )
2012-11-10 13:26:13 +00:00
{
stats - > imp_updates_ignored + + ;
rte_trace_in ( D_ROUTES , p , new , " ignored " ) ;
}
2000-05-06 21:21:19 +00:00
rte_free_quick ( new ) ;
return ;
}
1998-05-20 11:54:33 +00:00
* k = old - > next ;
break ;
}
k = & old - > next ;
2012-04-15 13:07:58 +00:00
before_old = old ;
1998-05-20 11:54:33 +00:00
}
2012-04-15 13:07:58 +00:00
if ( ! old )
before_old = NULL ;
2009-06-03 23:22:56 +00:00
if ( ! old & & ! new )
{
2010-02-13 09:44:46 +00:00
stats - > imp_withdraws_ignored + + ;
2009-06-03 23:22:56 +00:00
return ;
}
2013-01-10 12:07:33 +00:00
int new_ok = rte_is_ok ( new ) ;
int old_ok = rte_is_ok ( old ) ;
2016-01-26 10:48:58 +00:00
struct channel_limit * l = & c - > rx_limit ;
if ( l - > action & & ! old & & new )
2012-04-15 13:28:29 +00:00
{
2012-11-15 00:29:01 +00:00
u32 all_routes = stats - > imp_routes + stats - > filt_routes ;
2012-11-10 13:26:13 +00:00
if ( all_routes > = l - > limit )
2016-01-26 10:48:58 +00:00
channel_notify_limit ( c , l , PLD_RX , all_routes ) ;
2012-04-21 19:05:36 +00:00
if ( l - > state = = PLS_BLOCKED )
{
2013-01-10 12:07:33 +00:00
/* In receive limit the situation is simple, old is NULL so
we just free new and exit like nothing happened */
2012-04-21 19:05:36 +00:00
stats - > imp_updates_ignored + + ;
rte_trace_in ( D_FILTERS , p , new , " ignored [limit] " ) ;
rte_free_quick ( new ) ;
return ;
}
2012-04-15 13:28:29 +00:00
}
2016-01-26 10:48:58 +00:00
l = & c - > in_limit ;
if ( l - > action & & ! old_ok & & new_ok )
2013-01-10 12:07:33 +00:00
{
if ( stats - > imp_routes > = l - > limit )
2016-01-26 10:48:58 +00:00
channel_notify_limit ( c , l , PLD_IN , stats - > imp_routes ) ;
2013-01-10 12:07:33 +00:00
if ( l - > state = = PLS_BLOCKED )
{
/* In import limit the situation is more complicated. We
shouldn ' t just drop the route , we should handle it like
it was filtered . We also have to continue the route
processing if old or new is non - NULL , but we should exit
if both are NULL as this case is probably assumed to be
already handled . */
stats - > imp_updates_ignored + + ;
rte_trace_in ( D_FILTERS , p , new , " ignored [limit] " ) ;
2016-01-26 10:48:58 +00:00
if ( c - > in_keep_filtered )
2013-01-10 12:07:33 +00:00
new - > flags | = REF_FILTERED ;
else
{ rte_free_quick ( new ) ; new = NULL ; }
/* Note that old && !new could be possible when
2016-01-26 10:48:58 +00:00
c - > in_keep_filtered changed in the recent past . */
2013-01-10 12:07:33 +00:00
if ( ! old & & ! new )
return ;
new_ok = 0 ;
goto skip_stats1 ;
}
}
2012-11-16 12:29:16 +00:00
if ( new_ok )
2010-02-13 09:44:46 +00:00
stats - > imp_updates_accepted + + ;
2012-11-16 12:29:16 +00:00
else if ( old_ok )
2010-02-13 09:44:46 +00:00
stats - > imp_withdraws_accepted + + ;
2012-11-16 12:29:16 +00:00
else
stats - > imp_withdraws_ignored + + ;
2009-06-03 23:22:56 +00:00
2013-01-10 12:07:33 +00:00
skip_stats1 :
2009-06-03 23:22:56 +00:00
if ( new )
2012-11-15 00:29:01 +00:00
rte_is_filtered ( new ) ? stats - > filt_routes + + : stats - > imp_routes + + ;
2009-06-03 23:22:56 +00:00
if ( old )
2012-11-15 00:29:01 +00:00
rte_is_filtered ( old ) ? stats - > filt_routes - - : stats - > imp_routes - - ;
2009-06-03 23:22:56 +00:00
2012-07-04 19:31:03 +00:00
if ( table - > config - > sorted )
1998-05-20 11:54:33 +00:00
{
2012-07-04 19:31:03 +00:00
/* If routes are sorted, just insert new route to appropriate position */
if ( new )
{
if ( before_old & & ! rte_better ( new , before_old ) )
k = & before_old - > next ;
else
k = & net - > routes ;
2009-08-11 13:49:56 +00:00
2012-07-04 19:31:03 +00:00
for ( ; * k ; k = & ( * k ) - > next )
if ( rte_better ( new , * k ) )
break ;
2009-08-11 13:49:56 +00:00
2012-07-04 19:31:03 +00:00
new - > next = * k ;
* k = new ;
}
1998-05-20 11:54:33 +00:00
}
2012-07-04 19:31:03 +00:00
else
1998-05-20 11:54:33 +00:00
{
2012-07-04 19:31:03 +00:00
/* If routes are not sorted, find the best route and move it on
the first position . There are several optimized cases . */
2012-08-14 14:25:22 +00:00
if ( src - > proto - > rte_recalculate & & src - > proto - > rte_recalculate ( table , net , new , old , old_best ) )
2012-07-04 19:31:03 +00:00
goto do_recalculate ;
if ( new & & rte_better ( new , old_best ) )
1998-05-20 11:54:33 +00:00
{
2012-07-04 19:31:03 +00:00
/* The first case - the new route is cleary optimal,
we link it at the first position */
2009-08-11 13:49:56 +00:00
new - > next = net - > routes ;
net - > routes = new ;
}
2012-07-04 19:31:03 +00:00
else if ( old = = old_best )
2009-08-11 13:49:56 +00:00
{
2012-07-04 19:31:03 +00:00
/* The second case - the old best route disappeared, we add the
new route ( if we have any ) to the list ( we don ' t care about
position ) and then we elect the new optimal route and relink
that route at the first position and announce it . New optimal
route might be NULL if there is no more routes */
do_recalculate :
/* Add the new route to the list */
if ( new )
1998-05-20 11:54:33 +00:00
{
2012-07-04 19:31:03 +00:00
new - > next = net - > routes ;
net - > routes = new ;
}
/* Find a new optimal route (if there is any) */
if ( net - > routes )
{
rte * * bp = & net - > routes ;
for ( k = & ( * bp ) - > next ; * k ; k = & ( * k ) - > next )
if ( rte_better ( * k , * bp ) )
bp = k ;
/* And relink it */
rte * best = * bp ;
* bp = best - > next ;
best - > next = net - > routes ;
net - > routes = best ;
1998-05-20 11:54:33 +00:00
}
}
2012-07-04 19:31:03 +00:00
else if ( new )
{
/* The third case - the new route is not better than the old
best route ( therefore old_best ! = NULL ) and the old best
route was not removed ( therefore old_best = = net - > routes ) .
We just link the new route after the old best route . */
ASSERT ( net - > routes ! = NULL ) ;
new - > next = net - > routes - > next ;
net - > routes - > next = new ;
}
/* The fourth (empty) case - suboptimal route was removed, nothing to do */
1998-05-20 11:54:33 +00:00
}
2009-08-11 13:49:56 +00:00
2012-07-04 19:31:03 +00:00
if ( new )
new - > lastmod = now ;
/* Log the route change */
2012-11-16 12:29:16 +00:00
if ( p - > debug & D_ROUTES )
2009-12-02 13:33:34 +00:00
{
2012-11-16 12:29:16 +00:00
if ( new_ok )
rte_trace ( p , new , ' > ' , new = = net - > routes ? " added [best] " : " added " ) ;
else if ( old_ok )
{
if ( old ! = old_best )
rte_trace ( p , old , ' > ' , " removed " ) ;
else if ( rte_is_ok ( net - > routes ) )
rte_trace ( p , old , ' > ' , " removed [replaced] " ) ;
else
rte_trace ( p , old , ' > ' , " removed [sole] " ) ;
}
2009-08-11 13:49:56 +00:00
}
2012-07-04 19:31:03 +00:00
/* Propagate the route change */
2015-06-08 00:20:43 +00:00
rte_announce ( table , RA_ANY , net , new , old , NULL , NULL , NULL ) ;
2012-07-04 19:31:03 +00:00
if ( net - > routes ! = old_best )
2015-06-08 00:20:43 +00:00
rte_announce ( table , RA_OPTIMAL , net , net - > routes , old_best , NULL , NULL , NULL ) ;
2012-07-04 19:31:03 +00:00
if ( table - > config - > sorted )
2015-06-08 00:20:43 +00:00
rte_announce ( table , RA_ACCEPTED , net , new , old , NULL , NULL , before_old ) ;
rte_announce ( table , RA_MERGED , net , new , old , net - > routes , old_best , NULL ) ;
2012-04-15 13:07:58 +00:00
if ( ! net - > routes & &
( table - > gc_counter + + > = table - > config - > gc_max_ops ) & &
( table - > gc_time + table - > config - > gc_min_time < = now ) )
2016-01-26 10:48:58 +00:00
rt_schedule_prune ( table ) ;
2012-04-15 13:07:58 +00:00
2012-11-16 12:29:16 +00:00
if ( old_ok & & p - > rte_remove )
p - > rte_remove ( net , old ) ;
if ( new_ok & & p - > rte_insert )
p - > rte_insert ( net , new ) ;
1998-05-20 11:54:33 +00:00
if ( old )
2012-11-16 12:29:16 +00:00
rte_free_quick ( old ) ;
1998-10-18 11:13:16 +00:00
}
1999-04-05 20:25:03 +00:00
static int rte_update_nest_cnt ; /* Nesting counter to allow recursive updates */
static inline void
rte_update_lock ( void )
{
rte_update_nest_cnt + + ;
}
static inline void
rte_update_unlock ( void )
{
if ( ! - - rte_update_nest_cnt )
lp_flush ( rte_update_pool ) ;
}
2013-06-13 09:27:14 +00:00
static inline void
rte_hide_dummy_routes ( net * net , rte * * dummy )
{
if ( net - > routes & & net - > routes - > attrs - > source = = RTS_DUMMY )
{
* dummy = net - > routes ;
net - > routes = ( * dummy ) - > next ;
}
}
static inline void
rte_unhide_dummy_routes ( net * net , rte * * dummy )
{
if ( * dummy )
{
( * dummy ) - > next = net - > routes ;
net - > routes = * dummy ;
}
}
2000-06-01 17:12:19 +00:00
/**
* rte_update - enter a new update to a routing table
* @ table : table to be updated
2016-01-26 10:48:58 +00:00
* @ c : channel doing the update
2000-06-01 17:12:19 +00:00
* @ net : network node
* @ p : protocol submitting the update
2009-06-01 12:07:13 +00:00
* @ src : protocol originating the update
2000-06-01 17:12:19 +00:00
* @ new : a & rte representing the new route or % NULL for route removal .
*
* This function is called by the routing protocols whenever they discover
* a new route or wish to update / remove an existing route . The right announcement
2000-06-07 12:29:08 +00:00
* sequence is to build route attributes first ( either un - cached with @ aflags set
2000-06-01 17:12:19 +00:00
* to zero or a cached one using rta_lookup ( ) ; in this case please note that
* you need to increase the use count of the attributes yourself by calling
* rta_clone ( ) ) , call rte_get_temp ( ) to obtain a temporary & rte , fill in all
* the appropriate data and finally submit the new & rte by calling rte_update ( ) .
*
2009-06-01 12:07:13 +00:00
* @ src specifies the protocol that originally created the route and the meaning
* of protocol - dependent data of @ new . If @ new is not % NULL , @ src have to be the
* same value as @ new - > attrs - > proto . @ p specifies the protocol that called
* rte_update ( ) . In most cases it is the same protocol as @ src . rte_update ( )
* stores @ p in @ new - > sender ;
*
2000-06-02 12:41:25 +00:00
* When rte_update ( ) gets any route , it automatically validates it ( checks ,
* whether the network and next hop address are valid IP addresses and also
* whether a normal routing protocol doesn ' t try to smuggle a host or link
* scope route to the table ) , converts all protocol dependent attributes stored
* in the & rte to temporary extended attributes , consults import filters of the
* protocol to see if the route should be accepted and / or its attributes modified ,
* stores the temporary attributes back to the & rte .
*
* Now , having a " public " version of the route , we
2009-06-01 12:07:13 +00:00
* automatically find any old route defined by the protocol @ src
2000-06-01 17:12:19 +00:00
* for network @ n , replace it by the new one ( or removing it if @ new is % NULL ) ,
* recalculate the optimal route for this destination and finally broadcast
2000-06-02 12:41:25 +00:00
* the change ( if any ) to all routing protocols by calling rte_announce ( ) .
2000-06-02 12:29:55 +00:00
*
* All memory used for attribute lists and other temporary allocations is taken
* from a special linear pool @ rte_update_pool and freed when rte_update ( )
* finishes .
2000-06-01 17:12:19 +00:00
*/
2009-05-31 13:24:27 +00:00
void
2015-09-17 15:15:30 +00:00
rte_update2 ( struct channel * c , const net_addr * n , rte * new , struct rte_src * src )
1999-04-05 20:25:03 +00:00
{
2016-01-26 10:48:58 +00:00
struct proto * p = c - > proto ;
struct proto_stats * stats = & c - > stats ;
struct filter * filter = c - > in_filter ;
1999-04-05 20:25:03 +00:00
ea_list * tmpa = NULL ;
2013-06-13 09:27:14 +00:00
rte * dummy = NULL ;
2016-04-08 11:08:03 +00:00
net * nn ;
1999-04-05 20:25:03 +00:00
2016-01-26 10:48:58 +00:00
ASSERT ( c - > channel_state = = CS_UP ) ;
1999-04-05 20:25:03 +00:00
rte_update_lock ( ) ;
if ( new )
{
2016-04-08 11:08:03 +00:00
nn = net_get ( c - > table , n ) ;
new - > net = nn ;
2016-01-26 10:48:58 +00:00
new - > sender = c ;
if ( ! new - > pref )
new - > pref = c - > preference ;
2009-03-25 18:05:52 +00:00
2010-02-13 09:44:46 +00:00
stats - > imp_updates_received + + ;
2000-03-12 20:30:53 +00:00
if ( ! rte_validate ( new ) )
{
rte_trace_in ( D_FILTERS , p , new , " invalid " ) ;
2010-02-13 09:44:46 +00:00
stats - > imp_updates_invalid + + ;
2000-03-12 20:30:53 +00:00
goto drop ;
}
2012-11-10 13:26:13 +00:00
2009-03-25 18:05:52 +00:00
if ( filter = = FILTER_REJECT )
2000-03-12 20:30:53 +00:00
{
2010-02-13 09:44:46 +00:00
stats - > imp_updates_filtered + + ;
2000-03-12 20:30:53 +00:00
rte_trace_in ( D_FILTERS , p , new , " filtered out " ) ;
2012-08-14 14:25:22 +00:00
2016-01-26 10:48:58 +00:00
if ( ! c - > in_keep_filtered )
2012-11-10 13:26:13 +00:00
goto drop ;
/* new is a private copy, i could modify it */
2012-11-15 00:29:01 +00:00
new - > flags | = REF_FILTERED ;
2000-03-12 20:30:53 +00:00
}
2012-11-10 13:26:13 +00:00
else
1999-04-05 20:25:03 +00:00
{
2013-11-23 10:50:34 +00:00
tmpa = make_tmp_attrs ( new , rte_update_pool ) ;
2012-11-10 13:26:13 +00:00
if ( filter & & ( filter ! = FILTER_REJECT ) )
2000-03-12 20:30:53 +00:00
{
2012-11-10 13:26:13 +00:00
ea_list * old_tmpa = tmpa ;
int fr = f_run ( filter , & new , & tmpa , rte_update_pool , 0 ) ;
if ( fr > F_ACCEPT )
{
stats - > imp_updates_filtered + + ;
rte_trace_in ( D_FILTERS , p , new , " filtered out " ) ;
2016-01-26 10:48:58 +00:00
if ( ! c - > in_keep_filtered )
2012-11-10 13:26:13 +00:00
goto drop ;
2012-11-15 00:29:01 +00:00
new - > flags | = REF_FILTERED ;
2012-11-10 13:26:13 +00:00
}
2013-11-23 10:50:34 +00:00
if ( tmpa ! = old_tmpa & & src - > proto - > store_tmp_attrs )
src - > proto - > store_tmp_attrs ( new , tmpa ) ;
2000-03-12 20:30:53 +00:00
}
1999-04-05 20:25:03 +00:00
}
2012-08-14 14:25:22 +00:00
if ( ! rta_is_cached ( new - > attrs ) ) /* Need to copy attributes */
1999-04-05 20:25:03 +00:00
new - > attrs = rta_lookup ( new - > attrs ) ;
new - > flags | = REF_COW ;
}
2009-06-03 23:22:56 +00:00
else
2012-08-14 14:25:22 +00:00
{
stats - > imp_withdraws_received + + ;
2016-04-08 11:08:03 +00:00
if ( ! ( nn = net_find ( c - > table , n ) ) | | ! src )
2012-08-14 14:25:22 +00:00
{
stats - > imp_withdraws_ignored + + ;
rte_update_unlock ( ) ;
return ;
}
}
2009-06-03 23:22:56 +00:00
2013-06-13 09:27:14 +00:00
recalc :
2016-04-08 11:08:03 +00:00
rte_hide_dummy_routes ( nn , & dummy ) ;
rte_recalculate ( c , nn , new , src ) ;
rte_unhide_dummy_routes ( nn , & dummy ) ;
1999-04-05 20:25:03 +00:00
rte_update_unlock ( ) ;
return ;
2013-06-13 09:27:14 +00:00
drop :
1999-04-05 20:25:03 +00:00
rte_free ( new ) ;
2013-06-13 09:27:14 +00:00
new = NULL ;
goto recalc ;
1999-04-05 20:25:03 +00:00
}
2010-07-05 15:50:19 +00:00
/* Independent call to rte_announce(), used from next hop
recalculation , outside of rte_update ( ) . new must be non - NULL */
static inline void
2015-06-08 00:20:43 +00:00
rte_announce_i ( rtable * tab , unsigned type , net * net , rte * new , rte * old ,
rte * new_best , rte * old_best )
2010-07-05 15:50:19 +00:00
{
rte_update_lock ( ) ;
2015-06-08 00:20:43 +00:00
rte_announce ( tab , type , net , new , old , new_best , old_best , NULL ) ;
2010-07-05 15:50:19 +00:00
rte_update_unlock ( ) ;
}
2016-10-14 13:37:04 +00:00
static inline void
rte_discard ( rte * old ) /* Non-filtered route deletion, used during garbage collection */
1998-10-18 11:13:16 +00:00
{
1999-04-05 20:25:03 +00:00
rte_update_lock ( ) ;
2015-06-04 09:35:26 +00:00
rte_recalculate ( old - > sender , old - > net , NULL , old - > attrs - > src ) ;
1999-04-05 20:25:03 +00:00
rte_update_unlock ( ) ;
1998-05-20 11:54:33 +00:00
}
2013-02-08 22:58:27 +00:00
/* Check rtable for best route to given net whether it would be exported do p */
int
2015-11-05 11:48:52 +00:00
rt_examine ( rtable * t , net_addr * a , struct proto * p , struct filter * filter )
2013-02-08 22:58:27 +00:00
{
2015-11-05 11:48:52 +00:00
net * n = net_find ( t , a ) ;
2013-02-08 22:58:27 +00:00
rte * rt = n ? n - > routes : NULL ;
if ( ! rte_is_valid ( rt ) )
return 0 ;
rte_update_lock ( ) ;
/* Rest is stripped down export_filter() */
2013-11-23 10:50:34 +00:00
ea_list * tmpa = make_tmp_attrs ( rt , rte_update_pool ) ;
2013-02-08 22:58:27 +00:00
int v = p - > import_control ? p - > import_control ( p , & rt , & tmpa , rte_update_pool ) : 0 ;
if ( v = = RIC_PROCESS )
v = ( f_run ( filter , & rt , & tmpa , rte_update_pool , FF_FORCE_TMPATTR ) < = F_ACCEPT ) ;
/* Discard temporary rte */
if ( rt ! = n - > routes )
rte_free ( rt ) ;
rte_update_unlock ( ) ;
return v > 0 ;
}
2014-03-23 00:35:33 +00:00
/**
* rt_refresh_begin - start a refresh cycle
* @ t : related routing table
2016-01-26 10:48:58 +00:00
* @ c related channel
2014-03-23 00:35:33 +00:00
*
* This function starts a refresh cycle for given routing table and announce
* hook . The refresh cycle is a sequence where the protocol sends all its valid
* routes to the routing table ( by rte_update ( ) ) . After that , all protocol
2016-01-26 10:48:58 +00:00
* routes ( more precisely routes with @ c as @ sender ) not sent during the
2014-03-23 00:35:33 +00:00
* refresh cycle but still in the table from the past are pruned . This is
* implemented by marking all related routes as stale by REF_STALE flag in
* rt_refresh_begin ( ) , then marking all related stale routes with REF_DISCARD
* flag in rt_refresh_end ( ) and then removing such routes in the prune loop .
*/
2014-03-20 13:07:12 +00:00
void
2016-01-26 10:48:58 +00:00
rt_refresh_begin ( rtable * t , struct channel * c )
2014-03-20 13:07:12 +00:00
{
2015-12-21 19:16:05 +00:00
FIB_WALK ( & t - > fib , net , n )
2014-03-20 13:07:12 +00:00
{
2015-12-21 19:16:05 +00:00
rte * e ;
2014-03-20 13:07:12 +00:00
for ( e = n - > routes ; e ; e = e - > next )
2016-01-26 10:48:58 +00:00
if ( e - > sender = = c )
2014-03-20 13:07:12 +00:00
e - > flags | = REF_STALE ;
}
FIB_WALK_END ;
}
2014-03-23 00:35:33 +00:00
/**
* rt_refresh_end - end a refresh cycle
* @ t : related routing table
2016-01-26 10:48:58 +00:00
* @ c : related channel
2014-03-23 00:35:33 +00:00
*
2016-01-26 10:48:58 +00:00
* This function ends a refresh cycle for given routing table and announce
2014-03-23 00:35:33 +00:00
* hook . See rt_refresh_begin ( ) for description of refresh cycles .
*/
2014-03-20 13:07:12 +00:00
void
2016-01-26 10:48:58 +00:00
rt_refresh_end ( rtable * t , struct channel * c )
2014-03-20 13:07:12 +00:00
{
int prune = 0 ;
2015-12-21 19:16:05 +00:00
FIB_WALK ( & t - > fib , net , n )
2014-03-20 13:07:12 +00:00
{
2015-12-21 19:16:05 +00:00
rte * e ;
2014-03-20 13:07:12 +00:00
for ( e = n - > routes ; e ; e = e - > next )
2016-01-26 10:48:58 +00:00
if ( ( e - > sender = = c ) & & ( e - > flags & REF_STALE ) )
2014-03-20 13:07:12 +00:00
{
e - > flags | = REF_DISCARD ;
prune = 1 ;
}
}
FIB_WALK_END ;
if ( prune )
rt_schedule_prune ( t ) ;
}
2000-06-01 17:12:19 +00:00
/**
* rte_dump - dump a route
* @ e : & rte to be dumped
*
* This functions dumps contents of a & rte to debug output .
*/
1998-05-20 11:54:33 +00:00
void
1998-10-20 15:13:18 +00:00
rte_dump ( rte * e )
1998-05-20 11:54:33 +00:00
{
1998-10-20 15:13:18 +00:00
net * n = e - > net ;
2015-11-05 11:48:52 +00:00
debug ( " %-1N " , n - > n . addr ) ;
1999-04-03 13:05:18 +00:00
debug ( " KF=%02x PF=%02x pref=%d lm=%d " , n - > n . flags , e - > pflags , e - > pref , now - e - > lastmod ) ;
1998-06-04 20:28:19 +00:00
rta_dump ( e - > attrs ) ;
2012-08-14 14:25:22 +00:00
if ( e - > attrs - > src - > proto - > proto - > dump_attrs )
e - > attrs - > src - > proto - > proto - > dump_attrs ( e ) ;
1998-06-04 20:28:19 +00:00
debug ( " \n " ) ;
1998-05-20 11:54:33 +00:00
}
1998-05-15 07:54:32 +00:00
2000-06-01 17:12:19 +00:00
/**
* rt_dump - dump a routing table
* @ t : routing table to be dumped
*
* This function dumps contents of a given routing table to debug output .
*/
1998-05-20 11:54:33 +00:00
void
rt_dump ( rtable * t )
{
1998-06-04 20:28:19 +00:00
debug ( " Dump of routing table <%s> \n " , t - > name ) ;
1998-12-20 14:01:37 +00:00
# ifdef DEBUGGING
1999-04-12 18:01:07 +00:00
fib_check ( & t - > fib ) ;
1998-12-20 14:01:37 +00:00
# endif
2015-12-21 19:16:05 +00:00
FIB_WALK ( & t - > fib , net , n )
1999-04-12 18:01:07 +00:00
{
2015-12-21 19:16:05 +00:00
rte * e ;
1999-04-12 18:01:07 +00:00
for ( e = n - > routes ; e ; e = e - > next )
rte_dump ( e ) ;
1998-06-04 20:28:19 +00:00
}
1999-04-12 18:01:07 +00:00
FIB_WALK_END ;
1998-06-04 20:28:19 +00:00
debug ( " \n " ) ;
1998-05-20 11:54:33 +00:00
}
1998-05-15 07:54:32 +00:00
2000-06-01 17:12:19 +00:00
/**
* rt_dump_all - dump all routing tables
*
* This function dumps contents of all routing tables to debug output .
*/
1998-05-24 14:49:14 +00:00
void
rt_dump_all ( void )
{
1999-05-17 20:14:52 +00:00
rtable * t ;
WALK_LIST ( t , routing_tables )
rt_dump ( t ) ;
1998-05-24 14:49:14 +00:00
}
2010-07-05 15:50:19 +00:00
static inline void
rt_schedule_hcu ( rtable * tab )
{
if ( tab - > hcu_scheduled )
return ;
tab - > hcu_scheduled = 1 ;
ev_schedule ( tab - > rt_event ) ;
}
static inline void
rt_schedule_nhu ( rtable * tab )
{
2017-02-22 13:02:03 +00:00
if ( tab - > nhu_state = = NHU_CLEAN )
2010-07-05 15:50:19 +00:00
ev_schedule ( tab - > rt_event ) ;
2017-02-22 13:02:03 +00:00
/* state change:
* NHU_CLEAN - > NHU_SCHEDULED
* NHU_RUNNING - > NHU_DIRTY
*/
tab - > nhu_state | = NHU_SCHEDULED ;
2010-07-05 15:50:19 +00:00
}
2016-01-26 10:48:58 +00:00
void
rt_schedule_prune ( rtable * tab )
2012-03-28 16:40:04 +00:00
{
2016-01-26 10:48:58 +00:00
if ( tab - > prune_state = = 0 )
ev_schedule ( tab - > rt_event ) ;
2012-03-28 16:40:04 +00:00
2016-01-26 10:48:58 +00:00
/* state change 0->1, 2->3 */
tab - > prune_state | = 1 ;
2012-03-28 16:40:04 +00:00
}
2016-01-26 10:48:58 +00:00
2000-04-27 22:28:49 +00:00
static void
2010-07-05 15:50:19 +00:00
rt_event ( void * ptr )
1999-02-13 21:29:01 +00:00
{
2010-07-05 15:50:19 +00:00
rtable * tab = ptr ;
2016-05-12 14:04:47 +00:00
rt_lock_table ( tab ) ;
2010-07-05 15:50:19 +00:00
if ( tab - > hcu_scheduled )
rt_update_hostcache ( tab ) ;
1999-05-17 20:14:52 +00:00
2010-07-05 15:50:19 +00:00
if ( tab - > nhu_state )
rt_next_hop_update ( tab ) ;
2014-03-20 13:07:12 +00:00
if ( tab - > prune_state )
2016-01-26 10:48:58 +00:00
rt_prune_table ( tab ) ;
2016-05-12 14:04:47 +00:00
rt_unlock_table ( tab ) ;
1999-02-13 21:29:01 +00:00
}
2000-03-04 22:21:06 +00:00
void
rt_setup ( pool * p , rtable * t , char * name , struct rtable_config * cf )
{
bzero ( t , sizeof ( * t ) ) ;
t - > name = name ;
t - > config = cf ;
2015-11-05 11:48:52 +00:00
t - > addr_type = cf ? cf - > addr_type : NET_IP4 ;
fib_init ( & t - > fib , p , t - > addr_type , sizeof ( net ) , OFFSETOF ( net , n ) , 0 , NULL ) ;
2016-01-26 10:48:58 +00:00
init_list ( & t - > channels ) ;
2000-03-04 22:21:06 +00:00
if ( cf )
{
2010-07-05 15:50:19 +00:00
t - > rt_event = ev_new ( p ) ;
t - > rt_event - > hook = rt_event ;
t - > rt_event - > data = t ;
2000-05-19 19:49:33 +00:00
t - > gc_time = now ;
2000-03-04 22:21:06 +00:00
}
}
2000-06-01 17:12:19 +00:00
/**
* rt_init - initialize routing tables
*
* This function is called during BIRD startup . It initializes the
* routing table module .
*/
1998-05-20 11:54:33 +00:00
void
rt_init ( void )
{
rta_init ( ) ;
1999-02-13 21:29:01 +00:00
rt_table_pool = rp_new ( & root_pool , " Routing tables " ) ;
1999-04-05 20:25:03 +00:00
rte_update_pool = lp_new ( rt_table_pool , 4080 ) ;
1999-02-13 21:29:01 +00:00
rte_slab = sl_new ( rt_table_pool , sizeof ( rte ) ) ;
1999-05-17 20:14:52 +00:00
init_list ( & routing_tables ) ;
1998-05-20 11:54:33 +00:00
}
1999-02-13 19:15:28 +00:00
2012-03-28 16:40:04 +00:00
2016-01-26 10:48:58 +00:00
/**
* rt_prune_table - prune a routing table
*
* The prune loop scans routing tables and removes routes belonging to flushing
* protocols , discarded routes and also stale network entries . It is called from
* rt_event ( ) . The event is rescheduled if the current iteration do not finish
* the table . The pruning is directed by the prune state ( @ prune_state ) ,
* specifying whether the prune cycle is scheduled or running , and there
* is also a persistent pruning iterator ( @ prune_fit ) .
*
* The prune loop is used also for channel flushing . For this purpose , the
* channels to flush are marked before the iteration and notified after the
* iteration .
*/
static void
rt_prune_table ( rtable * tab )
2012-03-28 16:40:04 +00:00
{
struct fib_iterator * fit = & tab - > prune_fit ;
2016-01-26 10:48:58 +00:00
int limit = 512 ;
struct channel * c ;
node * n , * x ;
1999-02-13 19:15:28 +00:00
DBG ( " Pruning route table %s \n " , tab - > name ) ;
2000-05-08 22:33:02 +00:00
# ifdef DEBUGGING
fib_check ( & tab - > fib ) ;
# endif
2012-03-28 16:40:04 +00:00
2016-01-26 10:48:58 +00:00
if ( tab - > prune_state = = 0 )
return ;
2012-03-28 16:40:04 +00:00
2016-01-26 10:48:58 +00:00
if ( tab - > prune_state = = 1 )
{
/* Mark channels to flush */
WALK_LIST2 ( c , n , tab - > channels , table_node )
if ( c - > channel_state = = CS_FLUSHING )
c - > flush_active = 1 ;
FIB_ITERATE_INIT ( fit , & tab - > fib ) ;
tab - > prune_state = 2 ;
}
2012-03-28 16:40:04 +00:00
1999-04-12 18:01:07 +00:00
again :
2015-12-21 19:16:05 +00:00
FIB_ITERATE_START ( & tab - > fib , fit , net , n )
1999-02-13 19:15:28 +00:00
{
1999-04-12 18:01:07 +00:00
rte * e ;
2012-03-28 16:40:04 +00:00
1999-04-12 18:01:07 +00:00
rescan :
2012-03-28 16:40:04 +00:00
for ( e = n - > routes ; e ; e = e - > next )
2016-01-26 10:48:58 +00:00
if ( e - > sender - > flush_active | | ( e - > flags & REF_DISCARD ) )
1999-04-12 18:01:07 +00:00
{
2016-01-26 10:48:58 +00:00
if ( limit < = 0 )
2012-03-28 16:40:04 +00:00
{
2015-12-21 19:16:05 +00:00
FIB_ITERATE_PUT ( fit ) ;
2016-01-26 10:48:58 +00:00
ev_schedule ( tab - > rt_event ) ;
return ;
2012-03-28 16:40:04 +00:00
}
2016-10-14 13:37:04 +00:00
rte_discard ( e ) ;
2016-01-26 10:48:58 +00:00
limit - - ;
2012-03-28 16:40:04 +00:00
1999-04-12 18:01:07 +00:00
goto rescan ;
}
2016-01-26 10:48:58 +00:00
2012-03-28 16:40:04 +00:00
if ( ! n - > routes ) /* Orphaned FIB entry */
1999-02-13 19:15:28 +00:00
{
2015-12-21 19:16:05 +00:00
FIB_ITERATE_PUT ( fit ) ;
fib_delete ( & tab - > fib , n ) ;
1999-04-12 18:01:07 +00:00
goto again ;
1999-02-13 19:15:28 +00:00
}
}
2015-12-21 19:16:05 +00:00
FIB_ITERATE_END ;
2012-03-28 16:40:04 +00:00
2000-05-08 22:33:02 +00:00
# ifdef DEBUGGING
fib_check ( & tab - > fib ) ;
# endif
2012-03-28 16:40:04 +00:00
2016-01-26 10:48:58 +00:00
tab - > gc_counter = 0 ;
tab - > gc_time = now ;
1999-05-17 20:14:52 +00:00
2016-01-26 10:48:58 +00:00
/* state change 2->0, 3->1 */
tab - > prune_state & = 1 ;
2014-03-20 13:07:12 +00:00
2016-01-26 10:48:58 +00:00
if ( tab - > prune_state > 0 )
ev_schedule ( tab - > rt_event ) ;
1999-05-17 20:14:52 +00:00
2016-01-26 10:48:58 +00:00
/* FIXME: This should be handled in a better way */
rt_prune_sources ( ) ;
2012-03-28 16:40:04 +00:00
2016-01-26 10:48:58 +00:00
/* Close flushed channels */
WALK_LIST2_DELSAFE ( c , n , x , tab - > channels , table_node )
if ( c - > flush_active )
{
c - > flush_active = 0 ;
2016-05-12 14:04:47 +00:00
channel_set_state ( c , CS_DOWN ) ;
2016-01-26 10:48:58 +00:00
}
return ;
1999-05-17 20:14:52 +00:00
}
2010-07-05 15:50:19 +00:00
void
rt_preconfig ( struct config * c )
{
init_list ( & c - > tables ) ;
2016-01-26 10:48:58 +00:00
rt_new_table ( cf_get_symbol ( " master4 " ) , NET_IP4 ) ;
rt_new_table ( cf_get_symbol ( " master6 " ) , NET_IP6 ) ;
2010-07-05 15:50:19 +00:00
}
2016-01-26 10:48:58 +00:00
/*
2010-07-05 15:50:19 +00:00
* Some functions for handing internal next hop updates
* triggered by rt_schedule_nhu ( ) .
*/
static inline int
rta_next_hop_outdated ( rta * a )
{
struct hostentry * he = a - > hostentry ;
2010-12-07 22:33:55 +00:00
if ( ! he )
return 0 ;
if ( ! he - > src )
return a - > dest ! = RTD_UNREACHABLE ;
2016-05-06 13:48:35 +00:00
return ( a - > dest ! = he - > dest ) | | ( a - > igp_metric ! = he - > igp_metric ) | |
2017-02-24 13:05:11 +00:00
( ! he - > nexthop_linkable ) | | ! nexthop_same ( & ( a - > nh ) , & ( he - > src - > nh ) ) ;
2010-07-05 15:50:19 +00:00
}
2017-03-22 14:00:07 +00:00
void
2017-03-17 14:48:09 +00:00
rta_apply_hostentry ( rta * a , struct hostentry * he , mpls_label_stack * mls )
2010-07-05 15:50:19 +00:00
{
a - > hostentry = he ;
a - > dest = he - > dest ;
2010-07-30 23:04:32 +00:00
a - > igp_metric = he - > igp_metric ;
2016-08-09 12:47:51 +00:00
2017-03-17 14:48:09 +00:00
if ( a - > dest ! = RTD_UNICAST )
2016-08-09 12:47:51 +00:00
{
2017-03-17 14:48:09 +00:00
/* No nexthop */
no_nexthop :
a - > nh = ( struct nexthop ) { } ;
if ( mls )
{ /* Store the label stack for later changes */
a - > nh . labels_orig = a - > nh . labels = mls - > len ;
memcpy ( a - > nh . label , mls - > stack , mls - > len * sizeof ( u32 ) ) ;
}
2016-08-09 12:47:51 +00:00
return ;
}
2017-03-17 14:48:09 +00:00
if ( ( ( ! mls ) | | ( ! mls - > len ) ) & & he - > nexthop_linkable )
{ /* Just link the nexthop chain, no label append happens. */
memcpy ( & ( a - > nh ) , & ( he - > src - > nh ) , nexthop_size ( & ( he - > src - > nh ) ) ) ;
return ;
}
struct nexthop * nhp = NULL , * nhr = NULL ;
int skip_nexthop = 0 ;
2017-03-22 14:00:07 +00:00
2017-03-17 14:48:09 +00:00
for ( struct nexthop * nh = & ( he - > src - > nh ) ; nh ; nh = nh - > next )
2016-08-09 12:47:51 +00:00
{
2017-03-17 14:48:09 +00:00
if ( skip_nexthop )
skip_nexthop - - ;
else
{
nhr = nhp ;
nhp = ( nhp ? ( nhp - > next = lp_allocz ( rte_update_pool , NEXTHOP_MAX_SIZE ) ) : & ( a - > nh ) ) ;
}
2017-02-24 13:05:11 +00:00
2017-03-17 14:48:09 +00:00
nhp - > iface = nh - > iface ;
nhp - > weight = nh - > weight ;
if ( mls )
2016-08-09 12:47:51 +00:00
{
2017-03-17 14:48:09 +00:00
nhp - > labels = nh - > labels + mls - > len ;
nhp - > labels_orig = mls - > len ;
2017-02-24 13:05:11 +00:00
if ( nhp - > labels < = MPLS_MAX_LABEL_STACK )
{
memcpy ( nhp - > label , nh - > label , nh - > labels * sizeof ( u32 ) ) ; /* First the hostentry labels */
2017-03-17 14:48:09 +00:00
memcpy ( & ( nhp - > label [ nh - > labels ] ) , mls - > stack , mls - > len * sizeof ( u32 ) ) ; /* Then the bottom labels */
2017-02-24 13:05:11 +00:00
}
else
{
log ( L_WARN " Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d) " ,
2017-03-17 14:48:09 +00:00
nh - > labels , mls - > len , nhp - > labels , MPLS_MAX_LABEL_STACK ) ;
skip_nexthop + + ;
2017-02-24 13:05:11 +00:00
continue ;
}
2016-08-09 12:47:51 +00:00
}
2017-03-17 14:48:09 +00:00
if ( ipa_nonzero ( nh - > gw ) )
nhp - > gw = nh - > gw ; /* Router nexthop */
else if ( ipa_nonzero ( he - > link ) )
nhp - > gw = he - > link ; /* Device nexthop with link-local address known */
else
nhp - > gw = he - > addr ; /* Device nexthop with link-local address unknown */
2016-08-09 12:47:51 +00:00
}
2017-02-24 13:05:11 +00:00
2017-03-17 14:48:09 +00:00
if ( skip_nexthop )
if ( nhr )
nhr - > next = NULL ;
else
{
a - > dest = RTD_UNREACHABLE ;
log ( L_WARN " No valid nexthop remaining, setting route unreachable " ) ;
goto no_nexthop ;
}
2010-07-05 15:50:19 +00:00
}
static inline rte *
2016-10-14 13:37:04 +00:00
rt_next_hop_update_rte ( rtable * tab UNUSED , rte * old )
2010-07-05 15:50:19 +00:00
{
2017-02-20 01:26:45 +00:00
rta * a = alloca ( RTA_MAX_SIZE ) ;
memcpy ( a , old - > attrs , rta_size ( old - > attrs ) ) ;
2017-03-17 14:48:09 +00:00
mpls_label_stack mls = { . len = a - > nh . labels_orig } ;
memcpy ( mls . stack , & a - > nh . label [ a - > nh . labels - mls . len ] , mls . len * sizeof ( u32 ) ) ;
rta_apply_hostentry ( a , old - > attrs - > hostentry , & mls ) ;
2017-02-20 01:26:45 +00:00
a - > aflags = 0 ;
2010-07-05 15:50:19 +00:00
rte * e = sl_alloc ( rte_slab ) ;
memcpy ( e , old , sizeof ( rte ) ) ;
2017-02-20 01:26:45 +00:00
e - > attrs = rta_lookup ( a ) ;
2010-07-05 15:50:19 +00:00
return e ;
}
static inline int
rt_next_hop_update_net ( rtable * tab , net * n )
{
rte * * k , * e , * new , * old_best , * * new_best ;
int count = 0 ;
int free_old_best = 0 ;
old_best = n - > routes ;
if ( ! old_best )
return 0 ;
for ( k = & n - > routes ; e = * k ; k = & e - > next )
2011-12-22 12:20:29 +00:00
if ( rta_next_hop_outdated ( e - > attrs ) )
{
new = rt_next_hop_update_rte ( tab , e ) ;
* k = new ;
2010-07-05 15:50:19 +00:00
2015-06-08 00:20:43 +00:00
rte_announce_i ( tab , RA_ANY , n , new , e , NULL , NULL ) ;
2012-03-15 10:58:08 +00:00
rte_trace_in ( D_ROUTES , new - > sender - > proto , new , " updated " ) ;
2010-07-05 15:50:19 +00:00
2011-12-22 12:20:29 +00:00
/* Call a pre-comparison hook */
/* Not really an efficient way to compute this */
2012-08-14 14:25:22 +00:00
if ( e - > attrs - > src - > proto - > rte_recalculate )
e - > attrs - > src - > proto - > rte_recalculate ( tab , n , new , e , NULL ) ;
2010-07-05 15:50:19 +00:00
2011-12-22 12:20:29 +00:00
if ( e ! = old_best )
rte_free_quick ( e ) ;
else /* Freeing of the old best rte is postponed */
free_old_best = 1 ;
2010-07-05 15:50:19 +00:00
2011-12-22 12:20:29 +00:00
e = new ;
count + + ;
}
if ( ! count )
return 0 ;
/* Find the new best route */
new_best = NULL ;
for ( k = & n - > routes ; e = * k ; k = & e - > next )
{
2010-07-05 15:50:19 +00:00
if ( ! new_best | | rte_better ( e , * new_best ) )
new_best = k ;
}
/* Relink the new best route to the first position */
new = * new_best ;
if ( new ! = n - > routes )
{
* new_best = new - > next ;
new - > next = n - > routes ;
n - > routes = new ;
}
/* Announce the new best route */
if ( new ! = old_best )
{
2015-06-08 00:20:43 +00:00
rte_announce_i ( tab , RA_OPTIMAL , n , new , old_best , NULL , NULL ) ;
2012-03-15 10:58:08 +00:00
rte_trace_in ( D_ROUTES , new - > sender - > proto , new , " updated [best] " ) ;
2010-07-05 15:50:19 +00:00
}
2015-06-08 00:20:43 +00:00
/* FIXME: Better announcement of merged routes */
rte_announce_i ( tab , RA_MERGED , n , new , old_best , new , old_best ) ;
2016-08-16 11:02:32 +00:00
if ( free_old_best )
2010-07-05 15:50:19 +00:00
rte_free_quick ( old_best ) ;
return count ;
}
static void
rt_next_hop_update ( rtable * tab )
{
struct fib_iterator * fit = & tab - > nhu_fit ;
int max_feed = 32 ;
2017-02-22 13:02:03 +00:00
if ( tab - > nhu_state = = NHU_CLEAN )
2010-07-05 15:50:19 +00:00
return ;
2017-02-22 13:02:03 +00:00
if ( tab - > nhu_state = = NHU_SCHEDULED )
2010-07-05 15:50:19 +00:00
{
FIB_ITERATE_INIT ( fit , & tab - > fib ) ;
2017-02-22 13:02:03 +00:00
tab - > nhu_state = NHU_RUNNING ;
2010-07-05 15:50:19 +00:00
}
2015-12-21 19:16:05 +00:00
FIB_ITERATE_START ( & tab - > fib , fit , net , n )
2010-07-05 15:50:19 +00:00
{
if ( max_feed < = 0 )
{
2015-12-21 19:16:05 +00:00
FIB_ITERATE_PUT ( fit ) ;
2010-07-05 15:50:19 +00:00
ev_schedule ( tab - > rt_event ) ;
return ;
}
2015-12-21 19:16:05 +00:00
max_feed - = rt_next_hop_update_net ( tab , n ) ;
2010-07-05 15:50:19 +00:00
}
2015-12-21 19:16:05 +00:00
FIB_ITERATE_END ;
2010-07-05 15:50:19 +00:00
2017-02-22 13:02:03 +00:00
/* State change:
* NHU_DIRTY - > NHU_SCHEDULED
* NHU_RUNNING - > NHU_CLEAN
*/
2010-07-05 15:50:19 +00:00
tab - > nhu_state & = 1 ;
2017-02-22 13:02:03 +00:00
if ( tab - > nhu_state ! = NHU_CLEAN )
2010-07-05 15:50:19 +00:00
ev_schedule ( tab - > rt_event ) ;
}
2000-03-04 22:21:06 +00:00
struct rtable_config *
2015-11-05 11:48:52 +00:00
rt_new_table ( struct symbol * s , uint addr_type )
2000-03-04 22:21:06 +00:00
{
2012-07-20 17:56:57 +00:00
/* Hack that allows to 'redefine' the master table */
2016-01-26 10:48:58 +00:00
if ( ( s - > class = = SYM_TABLE ) & &
( s - > def = = new_config - > def_tables [ addr_type ] ) & &
( ( addr_type = = NET_IP4 ) | | ( addr_type = = NET_IP6 ) ) )
2012-07-20 17:56:57 +00:00
return s - > def ;
2000-03-04 22:21:06 +00:00
struct rtable_config * c = cfg_allocz ( sizeof ( struct rtable_config ) ) ;
cf_define_symbol ( s , SYM_TABLE , c ) ;
c - > name = s - > name ;
2015-11-05 11:48:52 +00:00
c - > addr_type = addr_type ;
2000-05-19 19:49:33 +00:00
c - > gc_max_ops = 1000 ;
2000-03-04 22:21:06 +00:00
c - > gc_min_time = 5 ;
2016-01-26 10:48:58 +00:00
add_tail ( & new_config - > tables , & c - > n ) ;
/* First table of each type is kept as default */
if ( ! new_config - > def_tables [ addr_type ] )
new_config - > def_tables [ addr_type ] = c ;
2000-03-04 22:21:06 +00:00
return c ;
}
2000-06-01 17:12:19 +00:00
/**
* rt_lock_table - lock a routing table
* @ r : routing table to be locked
*
* Lock a routing table , because it ' s in use by a protocol ,
* preventing it from being freed when it gets undefined in a new
* configuration .
*/
1999-05-17 20:14:52 +00:00
void
2000-01-16 16:44:50 +00:00
rt_lock_table ( rtable * r )
1999-05-17 20:14:52 +00:00
{
2000-01-16 16:44:50 +00:00
r - > use_count + + ;
}
2000-06-01 17:12:19 +00:00
/**
* rt_unlock_table - unlock a routing table
* @ r : routing table to be unlocked
*
* Unlock a routing table formerly locked by rt_lock_table ( ) ,
* that is decrease its use count and delete it if it ' s scheduled
* for deletion by configuration changes .
*/
2000-01-16 16:44:50 +00:00
void
rt_unlock_table ( rtable * r )
{
if ( ! - - r - > use_count & & r - > deleted )
{
struct config * conf = r - > deleted ;
DBG ( " Deleting routing table %s \n " , r - > name ) ;
2015-11-09 00:01:12 +00:00
r - > config - > table = NULL ;
2010-07-05 15:50:19 +00:00
if ( r - > hostcache )
rt_free_hostcache ( r ) ;
2000-01-16 16:44:50 +00:00
rem_node ( & r - > n ) ;
fib_free ( & r - > fib ) ;
2010-07-05 15:50:19 +00:00
rfree ( r - > rt_event ) ;
2000-01-16 16:44:50 +00:00
mb_free ( r ) ;
config_del_obstacle ( conf ) ;
}
}
2000-06-01 17:12:19 +00:00
/**
* rt_commit - commit new routing table configuration
* @ new : new configuration
* @ old : original configuration or % NULL if it ' s boot time config
*
* Scan differences between @ old and @ new configuration and modify
* the routing tables according to these changes . If @ new defines a
* previously unknown table , create it , if it omits a table existing
* in @ old , schedule it for deletion ( it gets deleted when all protocols
* disconnect from it by calling rt_unlock_table ( ) ) , if it exists
* in both configurations , leave it unchanged .
*/
2000-01-16 16:44:50 +00:00
void
rt_commit ( struct config * new , struct config * old )
{
struct rtable_config * o , * r ;
1999-05-17 20:14:52 +00:00
2000-01-16 16:44:50 +00:00
DBG ( " rt_commit: \n " ) ;
if ( old )
1999-05-17 20:14:52 +00:00
{
2000-01-16 16:44:50 +00:00
WALK_LIST ( o , old - > tables )
{
rtable * ot = o - > table ;
if ( ! ot - > deleted )
{
2015-11-08 23:42:02 +00:00
struct symbol * sym = cf_find_symbol ( new , o - > name ) ;
2000-01-16 17:40:26 +00:00
if ( sym & & sym - > class = = SYM_TABLE & & ! new - > shutdown )
2000-01-16 16:44:50 +00:00
{
DBG ( " \t %s: same \n " , o - > name ) ;
r = sym - > def ;
r - > table = ot ;
ot - > name = r - > name ;
2000-03-04 22:21:06 +00:00
ot - > config = r ;
2012-07-04 19:31:03 +00:00
if ( o - > sorted ! = r - > sorted )
log ( L_WARN " Reconfiguration of rtable sorted flag not implemented " ) ;
2000-01-16 16:44:50 +00:00
}
else
{
2000-01-16 17:40:26 +00:00
DBG ( " \t %s: deleted \n " , o - > name ) ;
2000-01-16 16:44:50 +00:00
ot - > deleted = old ;
config_add_obstacle ( old ) ;
rt_lock_table ( ot ) ;
rt_unlock_table ( ot ) ;
}
}
}
1999-05-17 20:14:52 +00:00
}
2000-01-16 16:44:50 +00:00
WALK_LIST ( r , new - > tables )
if ( ! r - > table )
{
rtable * t = mb_alloc ( rt_table_pool , sizeof ( struct rtable ) ) ;
DBG ( " \t %s: created \n " , r - > name ) ;
2000-03-04 22:21:06 +00:00
rt_setup ( rt_table_pool , t , r - > name , r ) ;
2000-01-16 16:44:50 +00:00
add_tail ( & routing_tables , & t - > n ) ;
r - > table = t ;
}
DBG ( " \t done \n " ) ;
1999-05-17 20:14:52 +00:00
}
1999-12-01 15:10:21 +00:00
2009-05-31 13:24:27 +00:00
static inline void
2016-01-26 10:48:58 +00:00
do_feed_channel ( struct channel * c , net * n , rte * e )
2009-05-31 13:24:27 +00:00
{
rte_update_lock ( ) ;
2016-01-26 10:48:58 +00:00
if ( c - > ra_mode = = RA_ACCEPTED )
rt_notify_accepted ( c , n , e , NULL , NULL , c - > refeeding ? 2 : 1 ) ;
else if ( c - > ra_mode = = RA_MERGED )
rt_notify_merged ( c , n , NULL , NULL , e , c - > refeeding ? e : NULL , c - > refeeding ) ;
else /* RA_BASIC */
rt_notify_basic ( c , n , e , c - > refeeding ? e : NULL , c - > refeeding ) ;
2009-05-31 13:24:27 +00:00
rte_update_unlock ( ) ;
}
2000-06-01 17:12:19 +00:00
/**
2016-01-26 10:48:58 +00:00
* rt_feed_channel - advertise all routes to a channel
* @ c : channel to be fed
2000-06-01 17:12:19 +00:00
*
2016-01-26 10:48:58 +00:00
* This function performs one pass of advertisement of routes to a channel that
* is in the ES_FEEDING state . It is called by the protocol code as long as it
* has something to do . ( We avoid transferring all the routes in single pass in
* order not to monopolize CPU time . )
2000-06-01 17:12:19 +00:00
*/
2000-05-19 10:46:26 +00:00
int
2016-01-26 10:48:58 +00:00
rt_feed_channel ( struct channel * c )
2000-05-19 10:46:26 +00:00
{
2016-01-26 10:48:58 +00:00
struct fib_iterator * fit = & c - > feed_fit ;
2000-05-19 10:59:47 +00:00
int max_feed = 256 ;
2000-05-19 10:46:26 +00:00
2016-01-26 10:48:58 +00:00
ASSERT ( c - > export_state = = ES_FEEDING ) ;
if ( ! c - > feed_active )
2000-05-19 10:46:26 +00:00
{
2016-01-26 10:48:58 +00:00
FIB_ITERATE_INIT ( fit , & c - > table - > fib ) ;
c - > feed_active = 1 ;
2000-05-19 10:46:26 +00:00
}
2016-01-26 10:48:58 +00:00
FIB_ITERATE_START ( & c - > table - > fib , fit , net , n )
2000-05-19 10:46:26 +00:00
{
2008-11-14 22:03:15 +00:00
rte * e = n - > routes ;
2000-05-19 10:59:47 +00:00
if ( max_feed < = 0 )
{
2015-12-21 19:16:05 +00:00
FIB_ITERATE_PUT ( fit ) ;
2000-05-19 10:59:47 +00:00
return 0 ;
}
2009-05-31 13:24:27 +00:00
2016-01-26 10:48:58 +00:00
/* FIXME: perhaps we should change feed for RA_ACCEPTED to not use 'new' */
2012-11-10 13:26:13 +00:00
2016-01-26 10:48:58 +00:00
if ( ( c - > ra_mode = = RA_OPTIMAL ) | |
( c - > ra_mode = = RA_ACCEPTED ) | |
( c - > ra_mode = = RA_MERGED ) )
2012-11-10 13:26:13 +00:00
if ( rte_is_valid ( e ) )
2009-05-31 13:24:27 +00:00
{
2016-01-26 10:48:58 +00:00
/* In the meantime, the protocol may fell down */
if ( c - > export_state ! = ES_FEEDING )
goto done ;
2015-05-31 09:29:53 +00:00
2016-01-26 10:48:58 +00:00
do_feed_channel ( c , n , e ) ;
2009-05-31 13:24:27 +00:00
max_feed - - ;
}
2016-01-26 10:48:58 +00:00
if ( c - > ra_mode = = RA_ANY )
2015-05-31 09:29:53 +00:00
for ( e = n - > routes ; e ; e = e - > next )
2009-05-31 13:24:27 +00:00
{
2016-01-26 10:48:58 +00:00
/* In the meantime, the protocol may fell down */
if ( c - > export_state ! = ES_FEEDING )
goto done ;
2015-05-31 09:29:53 +00:00
if ( ! rte_is_valid ( e ) )
continue ;
2016-01-26 10:48:58 +00:00
do_feed_channel ( c , n , e ) ;
2009-05-31 13:24:27 +00:00
max_feed - - ;
}
2000-05-19 10:46:26 +00:00
}
2015-12-21 19:16:05 +00:00
FIB_ITERATE_END ;
2000-05-19 10:46:26 +00:00
2016-01-26 10:48:58 +00:00
done :
c - > feed_active = 0 ;
return 1 ;
2000-05-19 10:46:26 +00:00
}
2000-06-01 17:12:19 +00:00
/**
* rt_feed_baby_abort - abort protocol feeding
2016-01-26 10:48:58 +00:00
* @ c : channel
2000-06-01 17:12:19 +00:00
*
2016-01-26 10:48:58 +00:00
* This function is called by the protocol code when the protocol stops or
* ceases to exist during the feeding .
2000-06-01 17:12:19 +00:00
*/
2000-05-19 10:46:26 +00:00
void
2016-01-26 10:48:58 +00:00
rt_feed_channel_abort ( struct channel * c )
2000-05-19 10:46:26 +00:00
{
2016-01-26 10:48:58 +00:00
if ( c - > feed_active )
2000-05-19 10:46:26 +00:00
{
2016-01-26 10:48:58 +00:00
/* Unlink the iterator */
fit_get ( & c - > table - > fib , & c - > feed_fit ) ;
c - > feed_active = 0 ;
2000-05-19 10:46:26 +00:00
}
}
2010-07-26 14:39:27 +00:00
static inline unsigned
ptr_hash ( void * ptr )
{
uintptr_t p = ( uintptr_t ) ptr ;
return p ^ ( p < < 8 ) ^ ( p > > 16 ) ;
}
2015-12-24 14:52:03 +00:00
static inline u32
2010-07-26 14:39:27 +00:00
hc_hash ( ip_addr a , rtable * dep )
{
2015-12-24 14:52:03 +00:00
return ipa_hash ( a ) ^ ptr_hash ( dep ) ;
2010-07-26 14:39:27 +00:00
}
static inline void
hc_insert ( struct hostcache * hc , struct hostentry * he )
{
2015-05-19 06:53:34 +00:00
uint k = he - > hash_key > > hc - > hash_shift ;
2010-07-26 14:39:27 +00:00
he - > next = hc - > hash_table [ k ] ;
hc - > hash_table [ k ] = he ;
}
static inline void
hc_remove ( struct hostcache * hc , struct hostentry * he )
{
struct hostentry * * hep ;
2015-05-19 06:53:34 +00:00
uint k = he - > hash_key > > hc - > hash_shift ;
2010-07-26 14:39:27 +00:00
for ( hep = & hc - > hash_table [ k ] ; * hep ! = he ; hep = & ( * hep ) - > next ) ;
* hep = he - > next ;
}
# define HC_DEF_ORDER 10
# define HC_HI_MARK *4
# define HC_HI_STEP 2
# define HC_HI_ORDER 16 /* Must be at most 16 */
# define HC_LO_MARK / 5
# define HC_LO_STEP 2
# define HC_LO_ORDER 10
static void
hc_alloc_table ( struct hostcache * hc , unsigned order )
{
2016-10-14 13:37:04 +00:00
uint hsize = 1 < < order ;
2010-07-26 14:39:27 +00:00
hc - > hash_order = order ;
2015-12-24 14:52:03 +00:00
hc - > hash_shift = 32 - order ;
2016-10-14 13:37:04 +00:00
hc - > hash_max = ( order > = HC_HI_ORDER ) ? ~ 0U : ( hsize HC_HI_MARK ) ;
hc - > hash_min = ( order < = HC_LO_ORDER ) ? 0U : ( hsize HC_LO_MARK ) ;
2010-07-26 14:39:27 +00:00
hc - > hash_table = mb_allocz ( rt_table_pool , hsize * sizeof ( struct hostentry * ) ) ;
}
2010-07-05 15:50:19 +00:00
static void
2010-07-26 14:39:27 +00:00
hc_resize ( struct hostcache * hc , unsigned new_order )
2010-07-05 15:50:19 +00:00
{
2010-07-26 14:39:27 +00:00
struct hostentry * * old_table = hc - > hash_table ;
struct hostentry * he , * hen ;
2016-10-14 13:37:04 +00:00
uint old_size = 1 < < hc - > hash_order ;
uint i ;
2010-07-26 14:39:27 +00:00
hc_alloc_table ( hc , new_order ) ;
for ( i = 0 ; i < old_size ; i + + )
for ( he = old_table [ i ] ; he ! = NULL ; he = hen )
{
hen = he - > next ;
hc_insert ( hc , he ) ;
}
mb_free ( old_table ) ;
}
static struct hostentry *
2010-07-28 09:45:35 +00:00
hc_new_hostentry ( struct hostcache * hc , ip_addr a , ip_addr ll , rtable * dep , unsigned k )
2010-07-26 14:39:27 +00:00
{
struct hostentry * he = sl_alloc ( hc - > slab ) ;
2017-02-24 13:05:11 +00:00
* he = ( struct hostentry ) {
. addr = a ,
. link = ll ,
. tab = dep ,
. hash_key = k ,
} ;
2010-07-26 14:39:27 +00:00
add_tail ( & hc - > hostentries , & he - > ln ) ;
hc_insert ( hc , he ) ;
hc - > hash_items + + ;
if ( hc - > hash_items > hc - > hash_max )
hc_resize ( hc , hc - > hash_order + HC_HI_STEP ) ;
return he ;
}
static void
hc_delete_hostentry ( struct hostcache * hc , struct hostentry * he )
{
2010-12-07 22:33:55 +00:00
rta_free ( he - > src ) ;
2010-07-26 14:39:27 +00:00
rem_node ( & he - > ln ) ;
hc_remove ( hc , he ) ;
sl_free ( hc - > slab , he ) ;
hc - > hash_items - - ;
if ( hc - > hash_items < hc - > hash_min )
hc_resize ( hc , hc - > hash_order - HC_LO_STEP ) ;
2010-07-05 15:50:19 +00:00
}
static void
rt_init_hostcache ( rtable * tab )
{
struct hostcache * hc = mb_allocz ( rt_table_pool , sizeof ( struct hostcache ) ) ;
init_list ( & hc - > hostentries ) ;
2010-07-26 14:39:27 +00:00
hc - > hash_items = 0 ;
hc_alloc_table ( hc , HC_DEF_ORDER ) ;
hc - > slab = sl_new ( rt_table_pool , sizeof ( struct hostentry ) ) ;
2010-07-27 16:20:12 +00:00
hc - > lp = lp_new ( rt_table_pool , 1008 ) ;
2015-02-21 13:05:20 +00:00
hc - > trie = f_new_trie ( hc - > lp , sizeof ( struct f_trie_node ) ) ;
2010-07-27 16:20:12 +00:00
2010-07-05 15:50:19 +00:00
tab - > hostcache = hc ;
}
static void
rt_free_hostcache ( rtable * tab )
{
struct hostcache * hc = tab - > hostcache ;
node * n ;
WALK_LIST ( n , hc - > hostentries )
{
struct hostentry * he = SKIP_BACK ( struct hostentry , ln , n ) ;
2010-12-07 22:33:55 +00:00
rta_free ( he - > src ) ;
2010-07-05 15:50:19 +00:00
if ( he - > uc )
log ( L_ERR " Hostcache is not empty in table %s " , tab - > name ) ;
}
2010-07-26 14:39:27 +00:00
rfree ( hc - > slab ) ;
2010-07-27 16:20:12 +00:00
rfree ( hc - > lp ) ;
2010-07-26 14:39:27 +00:00
mb_free ( hc - > hash_table ) ;
2010-07-05 15:50:19 +00:00
mb_free ( hc ) ;
}
static void
rt_notify_hostcache ( rtable * tab , net * net )
{
if ( tab - > hcu_scheduled )
return ;
2015-12-24 14:52:03 +00:00
if ( trie_match_net ( tab - > hostcache - > trie , net - > n . addr ) )
rt_schedule_hcu ( tab ) ;
2010-07-05 15:50:19 +00:00
}
static int
if_local_addr ( ip_addr a , struct iface * i )
{
struct ifa * b ;
WALK_LIST ( b , i - > addrs )
if ( ipa_equal ( a , b - > ip ) )
return 1 ;
return 0 ;
}
2010-07-30 23:04:32 +00:00
static u32
rt_get_igp_metric ( rte * rt )
{
2010-08-02 11:11:53 +00:00
eattr * ea = ea_find ( rt - > attrs - > eattrs , EA_GEN_IGP_METRIC ) ;
if ( ea )
return ea - > u . data ;
2010-07-30 23:04:32 +00:00
rta * a = rt - > attrs ;
2011-05-05 12:14:20 +00:00
# ifdef CONFIG_OSPF
2010-07-30 23:04:32 +00:00
if ( ( a - > source = = RTS_OSPF ) | |
( a - > source = = RTS_OSPF_IA ) | |
( a - > source = = RTS_OSPF_EXT1 ) )
return rt - > u . ospf . metric1 ;
2011-05-05 12:14:20 +00:00
# endif
2010-07-30 23:04:32 +00:00
2011-05-05 12:14:20 +00:00
# ifdef CONFIG_RIP
2010-07-30 23:04:32 +00:00
if ( a - > source = = RTS_RIP )
return rt - > u . rip . metric ;
2011-05-05 12:14:20 +00:00
# endif
2010-07-30 23:04:32 +00:00
2016-05-06 13:48:35 +00:00
if ( a - > source = = RTS_DEVICE )
2010-07-30 23:04:32 +00:00
return 0 ;
return IGP_METRIC_UNKNOWN ;
}
2010-07-05 15:50:19 +00:00
static int
rt_update_hostentry ( rtable * tab , struct hostentry * he )
{
2010-12-07 22:33:55 +00:00
rta * old_src = he - > src ;
2010-07-27 16:20:12 +00:00
int pxlen = 0 ;
2010-07-05 15:50:19 +00:00
2015-12-24 14:52:03 +00:00
/* Reset the hostentry */
2010-12-07 22:33:55 +00:00
he - > src = NULL ;
2017-02-24 13:05:11 +00:00
he - > nexthop_linkable = 0 ;
2010-12-07 22:33:55 +00:00
he - > dest = RTD_UNREACHABLE ;
he - > igp_metric = 0 ;
2015-12-24 14:52:03 +00:00
net_addr he_addr ;
net_fill_ip_host ( & he_addr , he - > addr ) ;
net * n = net_route ( tab , & he_addr ) ;
2010-07-30 23:04:32 +00:00
if ( n )
2010-07-05 15:50:19 +00:00
{
2012-11-10 13:26:13 +00:00
rte * e = n - > routes ;
rta * a = e - > attrs ;
2015-11-05 11:48:52 +00:00
pxlen = n - > n . addr - > pxlen ;
2010-07-05 15:50:19 +00:00
2010-08-03 06:26:47 +00:00
if ( a - > hostentry )
{
/* Recursive route should not depend on another recursive route */
2015-11-05 11:48:52 +00:00
log ( L_WARN " Next hop address %I resolvable through recursive route for %N " ,
he - > addr , n - > n . addr ) ;
2010-12-07 22:33:55 +00:00
goto done ;
2010-08-03 06:26:47 +00:00
}
2010-12-07 22:33:55 +00:00
2017-03-17 14:48:09 +00:00
he - > dest = a - > dest ;
2017-02-24 13:05:11 +00:00
he - > nexthop_linkable = 1 ;
2017-03-17 14:48:09 +00:00
if ( he - > dest = = RTD_UNICAST )
{
for ( struct nexthop * nh = & ( a - > nh ) ; nh ; nh = nh - > next )
if ( ipa_zero ( nh - > gw ) )
{
if ( if_local_addr ( he - > addr , nh - > iface ) )
{
/* The host address is a local address, this is not valid */
log ( L_WARN " Next hop address %I is a local address of iface %s " ,
he - > addr , nh - > iface - > name ) ;
goto done ;
}
he - > nexthop_linkable = 0 ;
break ;
}
}
2017-03-08 15:27:18 +00:00
2010-12-07 22:33:55 +00:00
he - > src = rta_clone ( a ) ;
2012-11-10 13:26:13 +00:00
he - > igp_metric = rt_get_igp_metric ( e ) ;
2010-07-05 15:50:19 +00:00
}
2017-03-08 15:27:18 +00:00
done :
2010-07-27 16:20:12 +00:00
/* Add a prefix range to the trie */
2015-12-24 14:52:03 +00:00
trie_add_prefix ( tab - > hostcache - > trie , & he_addr , pxlen , he_addr . pxlen ) ;
2010-07-27 16:20:12 +00:00
2010-12-07 22:33:55 +00:00
rta_free ( old_src ) ;
return old_src ! = he - > src ;
2010-07-05 15:50:19 +00:00
}
static void
rt_update_hostcache ( rtable * tab )
{
struct hostcache * hc = tab - > hostcache ;
struct hostentry * he ;
node * n , * x ;
2010-07-27 16:20:12 +00:00
/* Reset the trie */
lp_flush ( hc - > lp ) ;
2015-02-21 13:05:20 +00:00
hc - > trie = f_new_trie ( hc - > lp , sizeof ( struct f_trie_node ) ) ;
2010-07-27 16:20:12 +00:00
2010-07-05 15:50:19 +00:00
WALK_LIST_DELSAFE ( n , x , hc - > hostentries )
{
he = SKIP_BACK ( struct hostentry , ln , n ) ;
if ( ! he - > uc )
{
2010-07-26 14:39:27 +00:00
hc_delete_hostentry ( hc , he ) ;
2010-07-05 15:50:19 +00:00
continue ;
}
if ( rt_update_hostentry ( tab , he ) )
rt_schedule_nhu ( he - > tab ) ;
}
tab - > hcu_scheduled = 0 ;
}
2017-03-22 14:00:07 +00:00
struct hostentry *
2012-08-14 14:25:22 +00:00
rt_get_hostentry ( rtable * tab , ip_addr a , ip_addr ll , rtable * dep )
2010-07-05 15:50:19 +00:00
{
struct hostentry * he ;
if ( ! tab - > hostcache )
rt_init_hostcache ( tab ) ;
2015-12-24 14:52:03 +00:00
u32 k = hc_hash ( a , dep ) ;
2010-07-26 14:39:27 +00:00
struct hostcache * hc = tab - > hostcache ;
for ( he = hc - > hash_table [ k > > hc - > hash_shift ] ; he ! = NULL ; he = he - > next )
if ( ipa_equal ( he - > addr , a ) & & ( he - > tab = = dep ) )
return he ;
2010-07-05 15:50:19 +00:00
2017-03-22 14:00:07 +00:00
he = hc_new_hostentry ( hc , a , ipa_zero ( ll ) ? a : ll , dep , k ) ;
2010-07-26 14:39:27 +00:00
rt_update_hostentry ( tab , he ) ;
2010-07-05 15:50:19 +00:00
return he ;
}
2012-08-14 14:25:22 +00:00
1999-12-01 15:10:21 +00:00
/*
* CLI commands
*/
2000-03-12 20:30:53 +00:00
static void
2000-05-06 22:57:39 +00:00
rt_show_rte ( struct cli * c , byte * ia , rte * e , struct rt_show_data * d , ea_list * tmpa )
2000-03-12 20:30:53 +00:00
{
2015-12-18 10:57:38 +00:00
byte from [ IPA_MAX_TEXT_LENGTH + 8 ] ;
2010-02-02 23:19:24 +00:00
byte tm [ TM_DATETIME_BUFFER_SIZE ] , info [ 256 ] ;
2000-03-12 20:30:53 +00:00
rta * a = e - > attrs ;
2010-02-08 11:42:09 +00:00
int primary = ( e - > net - > routes = = e ) ;
2011-10-06 20:48:49 +00:00
int sync_error = ( e - > net - > n . flags & KRF_SYNC_ERROR ) ;
2012-08-14 14:25:22 +00:00
void ( * get_route_info ) ( struct rte * , byte * buf , struct ea_list * attrs ) ;
2016-05-06 13:48:35 +00:00
struct nexthop * nh ;
2000-03-12 20:30:53 +00:00
2010-02-02 23:19:24 +00:00
tm_format_datetime ( tm , & config - > tf_route , e - > lastmod ) ;
2016-05-06 13:48:35 +00:00
if ( ipa_nonzero ( a - > from ) & & ! ipa_equal ( a - > from , a - > nh . gw ) )
1999-12-01 15:10:21 +00:00
bsprintf ( from , " from %I " , a - > from ) ;
else
from [ 0 ] = 0 ;
2012-08-14 14:25:22 +00:00
get_route_info = a - > src - > proto - > proto - > get_route_info ;
if ( get_route_info | | d - > verbose )
2000-05-06 22:57:39 +00:00
{
/* Need to normalize the extended attributes */
ea_list * t = tmpa ;
t = ea_append ( t , a - > eattrs ) ;
tmpa = alloca ( ea_scan ( t ) ) ;
ea_merge ( t , tmpa ) ;
2000-05-30 21:24:15 +00:00
ea_sort ( tmpa ) ;
2000-05-06 22:57:39 +00:00
}
2012-08-14 14:25:22 +00:00
if ( get_route_info )
get_route_info ( e , info , tmpa ) ;
1999-12-01 15:10:21 +00:00
else
bsprintf ( info , " (%d) " , e - > pref ) ;
2017-03-08 15:27:18 +00:00
2017-03-30 11:52:01 +00:00
if ( ! d - > show_counter )
cli_printf ( c , - 1007 , " Table %s: " , d - > tit - > table - > name ) ;
2017-03-08 15:27:18 +00:00
cli_printf ( c , - 1007 , " %-18s %s [%s %s%s]%s%s " , ia , rta_dest_name ( a - > dest ) ,
a - > src - > proto - > name , tm , from , primary ? ( sync_error ? " ! " : " * " ) : " " , info ) ;
if ( a - > dest = = RTD_UNICAST )
for ( nh = & ( a - > nh ) ; nh ; nh = nh - > next )
2016-06-13 13:49:53 +00:00
{
2017-03-08 15:27:18 +00:00
char mpls [ MPLS_MAX_LABEL_STACK * 12 + 5 ] , * lsp = mpls ;
2016-06-13 13:49:53 +00:00
if ( nh - > labels )
2017-03-08 15:27:18 +00:00
{
2016-06-13 13:49:53 +00:00
lsp + = bsprintf ( lsp , " mpls %d " , nh - > label [ 0 ] ) ;
for ( int i = 1 ; i < nh - > labels ; i + + )
lsp + = bsprintf ( lsp , " /%d " , nh - > label [ i ] ) ;
}
2017-03-08 15:27:18 +00:00
* lsp = ' \0 ' ;
2016-06-13 13:49:53 +00:00
if ( a - > nh . next )
2017-03-08 15:27:18 +00:00
cli_printf ( c , - 1007 , " \t via %I%s on %s weight %d " , nh - > gw , mpls , nh - > iface - > name , nh - > weight + 1 ) ;
2016-06-13 13:49:53 +00:00
else
2017-03-08 15:27:18 +00:00
cli_printf ( c , - 1007 , " \t via %I%s on %s " , nh - > gw , mpls , nh - > iface - > name ) ;
2016-06-13 13:49:53 +00:00
}
2017-03-08 15:27:18 +00:00
1999-12-01 15:10:21 +00:00
if ( d - > verbose )
2000-05-06 22:57:39 +00:00
rta_show ( c , a , tmpa ) ;
1999-12-01 15:10:21 +00:00
}
static void
rt_show_net ( struct cli * c , net * n , struct rt_show_data * d )
{
rte * e , * ee ;
2015-12-18 10:57:38 +00:00
byte ia [ NET_MAX_TEXT_LENGTH + 1 ] ;
2014-04-28 15:31:03 +00:00
struct ea_list * tmpa ;
2016-01-26 10:48:58 +00:00
struct channel * ec = d - > export_channel ;
2014-04-28 15:31:03 +00:00
int first = 1 ;
int pass = 0 ;
1999-12-01 15:10:21 +00:00
2016-12-07 14:36:15 +00:00
bsnprintf ( ia , sizeof ( ia ) , " %N " , n - > n . addr ) ;
2014-04-28 15:31:03 +00:00
for ( e = n - > routes ; e ; e = e - > next )
1999-12-01 15:10:21 +00:00
{
2012-11-15 00:29:01 +00:00
if ( rte_is_filtered ( e ) ! = d - > filtered )
2012-11-10 13:26:13 +00:00
continue ;
2000-05-07 11:28:59 +00:00
d - > rt_counter + + ;
2014-04-28 15:31:03 +00:00
d - > net_counter + = first ;
first = 0 ;
if ( pass )
continue ;
1999-12-01 15:10:21 +00:00
ee = e ;
rte_update_lock ( ) ; /* We use the update buffer for filtering */
2012-08-14 14:25:22 +00:00
tmpa = make_tmp_attrs ( e , rte_update_pool ) ;
2014-04-28 15:31:03 +00:00
2015-06-08 00:20:43 +00:00
/* Special case for merged export */
2016-01-26 10:48:58 +00:00
if ( ( d - > export_mode = = RSEM_EXPORT ) & & ( ec - > ra_mode = = RA_MERGED ) )
2015-06-08 00:20:43 +00:00
{
rte * rt_free ;
2016-11-08 16:03:31 +00:00
e = rt_export_merged ( ec , n , & rt_free , & tmpa , rte_update_pool , 1 ) ;
2015-06-08 00:20:43 +00:00
pass = 1 ;
if ( ! e )
{ e = ee ; goto skip ; }
}
else if ( d - > export_mode )
2000-05-06 22:57:39 +00:00
{
2014-04-28 15:31:03 +00:00
struct proto * ep = d - > export_protocol ;
int ic = ep - > import_control ? ep - > import_control ( ep , & e , & tmpa , rte_update_pool ) : 0 ;
2016-01-26 10:48:58 +00:00
if ( ec - > ra_mode = = RA_OPTIMAL | | ec - > ra_mode = = RA_MERGED )
2014-04-28 15:31:03 +00:00
pass = 1 ;
if ( ic < 0 )
goto skip ;
2014-10-02 10:46:26 +00:00
if ( d - > export_mode > RSEM_PREEXPORT )
2000-05-06 22:57:39 +00:00
{
2014-04-28 15:31:03 +00:00
/*
* FIXME - This shows what should be exported according to current
* filters , but not what was really exported . ' configure soft '
* command may change the export filter and do not update routes .
*/
2014-10-02 10:46:26 +00:00
int do_export = ( ic > 0 ) | |
2016-01-26 10:48:58 +00:00
( f_run ( ec - > out_filter , & e , & tmpa , rte_update_pool , FF_FORCE_TMPATTR ) < = F_ACCEPT ) ;
2014-04-28 15:31:03 +00:00
2014-10-02 10:46:26 +00:00
if ( do_export ! = ( d - > export_mode = = RSEM_EXPORT ) )
2014-04-28 15:31:03 +00:00
goto skip ;
2016-01-26 10:48:58 +00:00
if ( ( d - > export_mode = = RSEM_EXPORT ) & & ( ec - > ra_mode = = RA_ACCEPTED ) )
2014-04-28 15:31:03 +00:00
pass = 1 ;
2000-05-06 22:57:39 +00:00
}
}
2014-04-28 15:31:03 +00:00
if ( d - > show_protocol & & ( d - > show_protocol ! = e - > attrs - > src - > proto ) )
goto skip ;
if ( f_run ( d - > filter , & e , & tmpa , rte_update_pool , FF_FORCE_TMPATTR ) > F_ACCEPT )
goto skip ;
if ( d - > stats < 2 )
rt_show_rte ( c , ia , e , d , tmpa ) ;
2017-03-30 11:52:01 +00:00
d - > show_counter + + ;
2014-04-28 15:31:03 +00:00
ia [ 0 ] = 0 ;
skip :
1999-12-01 15:10:21 +00:00
if ( e ! = ee )
2011-05-21 20:48:08 +00:00
{
rte_free ( e ) ;
e = ee ;
}
1999-12-01 15:10:21 +00:00
rte_update_unlock ( ) ;
2014-04-28 15:31:03 +00:00
2000-05-08 10:37:45 +00:00
if ( d - > primary_only )
2000-05-06 22:57:39 +00:00
break ;
1999-12-01 15:10:21 +00:00
}
}
2016-01-26 10:48:58 +00:00
static struct channel *
rt_show_export_channel ( struct rt_show_data * d )
{
if ( ! d - > export_protocol - > rt_notify )
return NULL ;
2017-03-30 11:52:01 +00:00
return proto_find_channel_by_table ( d - > export_protocol , d - > tit - > table ) ;
}
static void
rt_show_cleanup ( struct cli * c )
{
struct rt_show_data * d = c - > rover ;
/* Unlink the iterator */
fit_get ( & d - > tit - > table - > fib , & d - > fit ) ;
rt_unlock_table ( d - > tit - > table ) ;
while ( NODE_VALID ( NODE_NEXT ( d - > tit ) ) )
rt_unlock_table ( ( d - > tit = NODE_NEXT ( d - > tit ) ) - > table ) ;
2016-01-26 10:48:58 +00:00
}
1999-12-01 15:10:21 +00:00
static void
rt_show_cont ( struct cli * c )
{
struct rt_show_data * d = c - > rover ;
2000-03-12 22:53:05 +00:00
# ifdef DEBUGGING
unsigned max = 4 ;
# else
unsigned max = 64 ;
# endif
2017-03-30 11:52:01 +00:00
struct fib * fib = & d - > tit - > table - > fib ;
1999-12-01 15:10:21 +00:00
struct fib_iterator * it = & d - > fit ;
2016-01-26 10:48:58 +00:00
if ( d - > export_mode )
1999-12-01 15:10:21 +00:00
{
2016-01-26 10:48:58 +00:00
/* Ensure we have current export channel */
d - > export_channel = rt_show_export_channel ( d ) ;
if ( ! d - > export_channel | | ( d - > export_channel - > export_state = = ES_DOWN ) )
{
cli_printf ( c , 8005 , " Channel is down " ) ;
2017-03-30 11:52:01 +00:00
rt_show_cleanup ( c ) ;
2000-05-06 22:57:39 +00:00
goto done ;
}
2016-01-26 10:48:58 +00:00
}
FIB_ITERATE_START ( fib , it , net , n )
{
1999-12-01 15:10:21 +00:00
if ( ! max - - )
{
2015-12-21 19:16:05 +00:00
FIB_ITERATE_PUT ( it ) ;
1999-12-01 15:10:21 +00:00
return ;
}
rt_show_net ( c , n , d ) ;
}
2015-12-21 19:16:05 +00:00
FIB_ITERATE_END ;
2017-03-30 11:52:01 +00:00
if ( ! d - > show_counter & & ( d - > tables_defined_by & RSD_TDB_SET ) )
cli_printf ( c , - 1007 , " Table %s: " , d - > tit - > table - > name ) ;
if ( d - > stats & & d - > stats_by_table )
cli_printf ( c , - 1007 , " %d of %d routes for %d networks in table %s " , d - > show_counter - d - > show_counter_last , d - > rt_counter - d - > rt_counter_last , d - > net_counter - d - > net_counter_last , d - > tit - > table - > name ) ;
rt_unlock_table ( d - > tit - > table ) ;
d - > table_counter + + ;
if ( NODE_VALID ( NODE_NEXT ( d - > tit ) ) )
{
d - > tit = NODE_NEXT ( d - > tit ) ;
FIB_ITERATE_INIT ( & d - > fit , & d - > tit - > table - > fib ) ;
d - > show_counter_last = d - > show_counter ;
d - > rt_counter_last = d - > rt_counter ;
d - > net_counter_last = d - > net_counter ;
d - > show_counter = 0 ;
d - > rt_counter = 0 ;
d - > net_counter = 0 ;
return ;
}
2000-05-07 11:28:59 +00:00
if ( d - > stats )
2017-03-30 11:52:01 +00:00
cli_printf ( c , 14 , " Total: %d of %d routes for %d networks in %d tables " , d - > show_counter , d - > rt_counter , d - > net_counter , d - > table_counter ) ;
2000-05-07 11:28:59 +00:00
else
cli_printf ( c , 0 , " " ) ;
2000-05-06 22:57:39 +00:00
done :
1999-12-01 15:10:21 +00:00
c - > cont = c - > cleanup = NULL ;
}
2017-03-30 11:52:01 +00:00
void rt_show_add_table ( struct rt_show_data * d , rtable * t )
1999-12-01 15:10:21 +00:00
{
2017-03-30 11:52:01 +00:00
struct rt_show_data_rtable * rsdr = cfg_alloc ( sizeof ( struct rt_show_data_rtable ) ) ;
rsdr - > table = t ;
add_tail ( & ( d - > table ) , & ( rsdr - > n ) ) ;
1999-12-01 15:10:21 +00:00
}
2017-03-30 11:52:01 +00:00
static inline void
rt_show_get_table ( struct proto * p , struct rt_show_data * d )
2016-01-26 10:48:58 +00:00
{
2017-03-30 11:52:01 +00:00
struct channel * c ;
WALK_LIST ( c , p - > channels )
if ( c - > table )
rt_show_add_table ( d , c - > table ) ;
2016-01-26 10:48:58 +00:00
2017-03-30 11:52:01 +00:00
}
2016-01-26 10:48:58 +00:00
2017-03-30 11:52:01 +00:00
static inline void
rt_show_get_default_table ( struct rt_show_data * d )
{
if ( d - > export_protocol | | d - > show_protocol )
{
rt_show_get_table ( d - > export_protocol ? : d - > show_protocol , d ) ;
return ;
}
2016-01-26 10:48:58 +00:00
2017-03-30 11:52:01 +00:00
for ( int i = 1 ; i < NET_MAX ; i + + )
if ( config - > def_tables [ i ] )
rt_show_add_table ( d , config - > def_tables [ i ] - > table ) ;
2016-01-26 10:48:58 +00:00
}
1999-12-01 15:10:21 +00:00
void
rt_show ( struct rt_show_data * d )
{
net * n ;
2017-03-30 11:52:01 +00:00
/* There may be implicit tables. */
if ( EMPTY_LIST ( d - > table ) ) rt_show_get_default_table ( d ) ;
2013-02-25 09:39:46 +00:00
2014-04-28 15:31:03 +00:00
/* Filtered routes are neither exported nor have sensible ordering */
if ( d - > filtered & & ( d - > export_mode | | d - > primary_only ) )
cli_msg ( 0 , " " ) ;
2015-12-24 14:52:03 +00:00
if ( ! d - > addr )
1999-12-01 15:10:21 +00:00
{
2017-03-30 11:52:01 +00:00
struct rt_show_data_rtable * rsdr ;
WALK_LIST ( rsdr , d - > table )
{
rt_lock_table ( rsdr - > table ) ;
}
d - > tit = HEAD ( d - > table ) ;
FIB_ITERATE_INIT ( & d - > fit , & d - > tit - > table - > fib ) ;
1999-12-01 15:10:21 +00:00
this_cli - > cont = rt_show_cont ;
this_cli - > cleanup = rt_show_cleanup ;
this_cli - > rover = d ;
}
else
{
2016-01-26 10:48:58 +00:00
if ( d - > export_mode )
{
/* Find channel associated with the export protocol */
d - > export_channel = rt_show_export_channel ( d ) ;
if ( ! d - > export_channel | | ( d - > export_channel - > export_state = = ES_DOWN ) )
{
cli_msg ( 8005 , " Channel is down " ) ;
return ;
}
}
2017-03-30 11:52:01 +00:00
struct rt_show_data_rtable * rsdr , * rn ;
WALK_LIST_DELSAFE ( rsdr , rn , d - > table )
2016-06-29 07:56:33 +00:00
{
2017-03-30 11:52:01 +00:00
/* Check table net types matching to query */
if ( rsdr - > table - > addr_type = = d - > addr - > type )
continue ;
if ( d - > tables_defined_by & RSD_TDB_NMN )
{
cli_msg ( 8001 , " Incompatible type of prefix/ip with table %s " , rsdr - > table - > name ) ;
return ;
}
rem_node ( & ( rsdr - > n ) ) ;
2016-06-29 07:56:33 +00:00
}
2017-03-30 11:52:01 +00:00
WALK_LIST ( rsdr , d - > table )
{
d - > tit = rsdr ;
2014-11-08 22:52:42 +00:00
2017-03-30 11:52:01 +00:00
if ( d - > show_for )
n = net_route ( rsdr - > table , d - > addr ) ;
else
n = net_find ( rsdr - > table , d - > addr ) ;
if ( n )
rt_show_net ( this_cli , n , d ) ;
}
2014-11-08 22:52:42 +00:00
if ( d - > rt_counter )
cli_msg ( 0 , " " ) ;
1999-12-01 15:10:21 +00:00
else
2017-03-30 11:52:01 +00:00
cli_msg ( 8001 , " Network not found in any specified table " ) ;
1999-12-01 15:10:21 +00:00
}
}
2000-06-02 12:29:55 +00:00
/*
* Documentation for functions declared inline in route . h
*/
#if 0
/**
* net_find - find a network entry
* @ tab : a routing table
* @ addr : address of the network
*
* net_find ( ) looks up the given network in routing table @ tab and
* returns a pointer to its & net entry or % NULL if no such network
* exists .
*/
2015-11-05 11:48:52 +00:00
static inline net * net_find ( rtable * tab , net_addr * addr )
2000-06-02 12:29:55 +00:00
{ DUMMY ; }
/**
* net_get - obtain a network entry
* @ tab : a routing table
* @ addr : address of the network
*
* net_get ( ) looks up the given network in routing table @ tab and
* returns a pointer to its & net entry . If no such entry exists , it ' s
* created .
*/
2015-11-05 11:48:52 +00:00
static inline net * net_get ( rtable * tab , net_addr * addr )
2000-06-02 12:29:55 +00:00
{ DUMMY ; }
/**
* rte_cow - copy a route for writing
* @ r : a route entry to be copied
*
* rte_cow ( ) takes a & rte and prepares it for modification . The exact action
* taken depends on the flags of the & rte - - if it ' s a temporary entry , it ' s
* just returned unchanged , else a new temporary entry with the same contents
* is created .
*
* The primary use of this function is inside the filter machinery - - when
* a filter wants to modify & rte contents ( to change the preference or to
* attach another set of attributes ) , it must ensure that the & rte is not
* shared with anyone else ( and especially that it isn ' t stored in any routing
* table ) .
*
2000-06-07 12:29:08 +00:00
* Result : a pointer to the new writable & rte .
2000-06-02 12:29:55 +00:00
*/
static inline rte * rte_cow ( rte * r )
{ DUMMY ; }
# endif