0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 01:31:55 +00:00

Merge branch 'add-path'

This commit is contained in:
Ondrej Zajicek 2014-02-06 20:15:05 +01:00
commit 5c200e0a4d
11 changed files with 179 additions and 178 deletions

View File

@ -1677,7 +1677,7 @@ for each neighbor using the following configuration parameters:
attributes to be transparent (for example does not prepend its AS number to
AS PATH attribute and keeps MED attribute). Default: disabled.
<tag>secondary <m/switch/</tag> Usually, if an import filter
<tag>secondary <m/switch/</tag> Usually, if an export filter
rejects a selected route, no other route is propagated for
that network. This option allows to try the next route in
order until one that is accepted is found or all routes for
@ -1687,6 +1687,15 @@ for each neighbor using the following configuration parameters:
This option requires that the connected routing table is
<ref id="dsc-sorted" name="sorted">. Default: off.
<tag>add paths <m/switch/|rx|tx</tag>
Standard BGP can propagate only one path (route) per destination network
(usually the selected one). This option controls the add-path protocol
extension, which allows to advertise any number of paths to a
destination. Note that to be active, add-path has to be enabled on both
sides of the BGP session, but it could be enabled separately for RX and
TX direction. When active, all available routes accepted by the export
filter are advertised to the neighbor. Default: off.
<tag>allow local as [<m/number/]</tag>
BGP prevents routing loops by rejecting received routes with
the local AS number in the AS path. This option allows to

View File

@ -6,6 +6,9 @@
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_BITOPTS_H_
#define _BIRD_BITOPTS_H_
/*
* Bit mask operations:
*
@ -19,3 +22,8 @@ u32 u32_mkmask(unsigned n);
int u32_masklen(u32 x);
u32 u32_log2(u32 v);
static inline u32 u32_hash(u32 v) { return v * 2902958171u; }
#endif

View File

@ -3,7 +3,9 @@
#define HASH(type) struct { type **data; uint count, order; }
#define HASH_TYPE(v) typeof(** (v).data)
#define HASH_SIZE(v) (1 << (v).order)
#define HASH_MASK(v) ((1 << (v).order)-1)
#define HASH_EQ(v,id,k1,k2...) (id##_EQ(k1, k2))
#define HASH_FN(v,id,key...) ((u32) (id##_FN(key)) >> (32 - (v).order))
#define HASH_INIT(v,pool,init_order) \
@ -15,16 +17,16 @@
#define HASH_FIND(v,id,key...) \
({ \
uint _h = id##_FN((key)) & HASH_MASK(v); \
u32 _h = HASH_FN(v, id, key); \
HASH_TYPE(v) *_n = (v).data[_h]; \
while (_n && !id##_EQ(id##_KEY(_n), (key))) \
while (_n && !HASH_EQ(v, id, id##_KEY(_n), key)) \
_n = id##_NEXT(_n); \
_n; \
})
#define HASH_INSERT(v,id,node) \
({ \
uint _h = id##_FN(id##_KEY((node))) & HASH_MASK(v); \
u32 _h = HASH_FN(v, id, id##_KEY((node))); \
HASH_TYPE(v) **_nn = (v).data + _h; \
id##_NEXT(node) = *_nn; \
*_nn = node; \
@ -33,72 +35,117 @@
#define HASH_DO_REMOVE(v,id,_nn) \
({ \
HASH_TYPE(v) *_n = *_nn; \
if (_n) \
{ \
*_nn = id##_NEXT(_n); \
*_nn = id##_NEXT((*_nn)); \
(v).count--; \
} \
_n; \
})
#define HASH_DELETE(v,id,key...) \
({ \
uint _h = id##_FN((key)) & HASH_MASK(v); \
HASH_TYPE(v) **_nn = (v).data + _h; \
u32 _h = HASH_FN(v, id, key); \
HASH_TYPE(v) *_n, **_nn = (v).data + _h; \
\
while ((*_nn) && !id##_EQ(id##_KEY((*_nn)), (key))) \
while ((*_nn) && !HASH_EQ(v, id, id##_KEY((*_nn)), key)) \
_nn = &(id##_NEXT((*_nn))); \
\
if (_n = *_nn) \
HASH_DO_REMOVE(v,id,_nn); \
_n; \
})
#define HASH_REMOVE(v,id,node) \
({ \
uint _h = id##_FN(id##_KEY((node))) & HASH_MASK(v); \
HASH_TYPE(v) **_nn = (v).data + _h; \
u32 _h = HASH_FN(v, id, id##_KEY((node))); \
HASH_TYPE(v) *_n, **_nn = (v).data + _h; \
\
while ((*_nn) && (*_nn != (node))) \
_nn = &(id##_NEXT((*_nn))); \
\
if (_n = *_nn) \
HASH_DO_REMOVE(v,id,_nn); \
_n; \
})
#define HASH_REHASH(v,id,pool,step) \
({ \
HASH_TYPE(v) *_n, *_n2, **_od; \
uint _i, _s; \
uint _i, _os; \
\
_s = HASH_SIZE(v); \
_os = HASH_SIZE(v); \
_od = (v).data; \
(v).count = 0; \
(v).order += (step); \
(v).data = mb_allocz(pool, HASH_SIZE(v) * sizeof(* (v).data)); \
\
for (_i = 0; _i < _s; _i++) \
for (_i = 0; _i < _os; _i++) \
for (_n = _od[_i]; _n && (_n2 = id##_NEXT(_n), 1); _n = _n2) \
HASH_INSERT(v, id, _n); \
\
mb_free(_od); \
})
#define HASH_DEFINE_REHASH_FN(id, type) \
static void id##_REHASH_FN(void *v, pool *p, int step) \
#define REHASH_LO_MARK(a,b,c,d,e,f) a
#define REHASH_HI_MARK(a,b,c,d,e,f) b
#define REHASH_LO_STEP(a,b,c,d,e,f) c
#define REHASH_HI_STEP(a,b,c,d,e,f) d
#define REHASH_LO_BOUND(a,b,c,d,e,f) e
#define REHASH_HI_BOUND(a,b,c,d,e,f) f
#define HASH_DEFINE_REHASH_FN(id,type) \
static void id##_REHASH(void *v, pool *p, int step) \
{ HASH_REHASH(* (HASH(type) *) v, id, p, step); }
#define HASH_TRY_REHASH_UP(v,id,pool) \
#define HASH_MAY_STEP_UP(v,id,pool) HASH_MAY_STEP_UP_(v,pool, id##_REHASH, id##_PARAMS)
#define HASH_MAY_STEP_DOWN(v,id,pool) HASH_MAY_STEP_DOWN_(v,pool, id##_REHASH, id##_PARAMS)
#define HASH_MAY_RESIZE_DOWN(v,id,pool) HASH_MAY_RESIZE_DOWN_(v,pool, id##_REHASH, id##_PARAMS)
#define HASH_MAY_STEP_UP_(v,pool,rehash_fn,args) \
({ \
if (((v).order < id##_REHASH_MAX) && ((v).count > HASH_SIZE(v))) \
id##_REHASH_FN(&v, pool, 1); \
if (((v).count > (HASH_SIZE(v) REHASH_HI_MARK(args))) && \
((v).order < (REHASH_HI_BOUND(args)))) \
rehash_fn(&(v), pool, REHASH_HI_STEP(args)); \
})
#define HASH_TRY_REHASH_DOWN(v,id,pool) \
#define HASH_MAY_STEP_DOWN_(v,pool,rehash_fn,args) \
({ \
if (((v).order > id##_REHASH_MIN) && ((v).count < HASH_SIZE(v)/2)) \
id##_REHASH_FN(&v, pool, -1); \
if (((v).count < (HASH_SIZE(v) REHASH_LO_MARK(args))) && \
((v).order > (REHASH_LO_BOUND(args)))) \
rehash_fn(&(v), pool, -(REHASH_LO_STEP(args))); \
})
#define HASH_MAY_RESIZE_DOWN_(v,pool,rehash_fn,args) \
({ \
int _o = (v).order; \
while (((v).count < ((1 << _o) REHASH_LO_MARK(args))) && \
(_o > (REHASH_LO_BOUND(args)))) \
_o -= (REHASH_LO_STEP(args)); \
if (_o < (v).order) \
rehash_fn(&(v), pool, _o - (int) (v).order); \
})
#define HASH_INSERT2(v,id,pool,node) \
({ \
HASH_INSERT(v, id, node); \
HASH_MAY_STEP_UP(v, id, pool); \
})
#define HASH_DELETE2(v,id,pool,key...) \
({ \
HASH_TYPE(v) *_n = HASH_DELETE(v, id, key); \
if (_n) HASH_MAY_STEP_DOWN(v, id, pool); \
_n; \
})
#define HASH_REMOVE2(v,id,pool,node) \
({ \
HASH_TYPE(v) *_n = HASH_REMOVE(v, id, node); \
if (_n) HASH_MAY_STEP_DOWN(v, id, pool); \
_n; \
})
#define HASH_WALK(v,next,n) \
do { \
HASH_TYPE(v) *n; \
@ -121,3 +168,13 @@
#define HASH_WALK_DELSAFE_END } while (0)
#define HASH_WALK_FILTER(v,next,n,nn) \
do { \
HASH_TYPE(v) *n, **nn; \
uint _i; \
uint _s = HASH_SIZE(v); \
for (_i = 0; _i < _s; _i++) \
for (nn = (v).data + _i; n = *nn; (*nn == n) ? (nn = &n->next) : NULL)
#define HASH_WALK_FILTER_END } while (0)

View File

@ -52,6 +52,7 @@ typedef u32 ip_addr;
#define ipa_mkmask(x) _MI(u32_mkmask(x))
#define ipa_mklen(x) u32_masklen(_I(x))
#define ipa_hash(x) ipv4_hash(_I(x))
#define ipa_hash32(x) ipv4_hash32(_I(x))
#define ipa_hton(x) x = _MI(htonl(_I(x)))
#define ipa_ntoh(x) x = _MI(ntohl(_I(x)))
#define ipa_classify(x) ipv4_classify(_I(x))
@ -86,6 +87,14 @@ static inline unsigned ipv4_hash(u32 a)
return a & 0xffff;
}
static inline u32 ipv4_hash32(u32 a)
{
/* Returns a 32-bit value, although low-order bits are not mixed */
a ^= a << 16;
a ^= a << 12;
return a;
}
static inline int ipv4_compare(u32 x, u32 y)
{
return (x > y) - (x < y);

View File

@ -58,6 +58,7 @@ typedef struct ipv6_addr {
#define ipa_mkmask(x) ipv6_mkmask(x)
#define ipa_mklen(x) ipv6_mklen(&(x))
#define ipa_hash(x) ipv6_hash(&(x))
#define ipa_hash32(x) ipv6_hash32(&(x))
#define ipa_hton(x) ipv6_hton(&(x))
#define ipa_ntoh(x) ipv6_ntoh(&(x))
#define ipa_classify(x) ipv6_classify(&(x))
@ -104,6 +105,13 @@ static inline unsigned ipv6_hash(ip_addr *a)
return (x ^ (x >> 16) ^ (x >> 8)) & 0xffff;
}
static inline u32 ipv6_hash32(ip_addr *a)
{
/* Returns a 32-bit hash key, although low-order bits are not ixed */
u32 x = _I0(*a) ^ _I1(*a) ^ _I2(*a) ^ _I3(*a);
return x ^ (x << 16) ^ (x << 24);
}
static inline u32 ipv6_getbit(ip_addr a, u32 y)
{
return a.addr[y / 32] & (0x80000000 >> (y % 32));

View File

@ -51,6 +51,7 @@
#include "nest/cli.h"
#include "nest/attrs.h"
#include "lib/alloca.h"
#include "lib/hash.h"
#include "lib/resource.h"
#include "lib/string.h"
@ -63,14 +64,20 @@ static slab *rte_src_slab;
/* rte source ID bitmap */
static u32 *src_ids;
static u32 src_id_size, src_id_used, src_id_pos;
#define SRC_ID_SIZE_DEF 4
#define SRC_ID_INIT_SIZE 4
/* rte source hash */
static struct rte_src **src_table;
static u32 src_hash_order, src_hash_size, src_hash_count;
#define SRC_HASH_ORDER_DEF 6
#define SRC_HASH_ORDER_MAX 18
#define SRC_HASH_ORDER_MIN 10
#define RSH_KEY(n) n->proto, n->private_id
#define RSH_NEXT(n) n->next
#define RSH_EQ(p1,n1,p2,n2) p1 == p2 && n1 == n2
#define RSH_FN(p,n) p->hash_key ^ u32_hash(n)
#define RSH_REHASH rte_src_rehash
#define RSH_PARAMS /2, *2, 1, 1, 8, 20
#define RSH_INIT_ORDER 6
static HASH(struct rte_src) src_hash;
struct protocol *attr_class_to_protocol[EAP_MAX];
@ -81,17 +88,14 @@ rte_src_init(void)
rte_src_slab = sl_new(rta_pool, sizeof(struct rte_src));
src_id_pos = 0;
src_id_size = SRC_ID_SIZE_DEF;
src_id_size = SRC_ID_INIT_SIZE;
src_ids = mb_allocz(rta_pool, src_id_size * sizeof(u32));
/* ID 0 is reserved */
src_ids[0] = 1;
src_id_used = 1;
src_hash_count = 0;
src_hash_order = SRC_HASH_ORDER_DEF;
src_hash_size = 1 << src_hash_order;
src_table = mb_allocz(rta_pool, src_hash_size * sizeof(struct rte_src *));
HASH_INIT(src_hash, rta_pool, RSH_INIT_ORDER);
}
static inline int u32_cto(unsigned int x) { return ffs(~x) - 1; }
@ -141,55 +145,21 @@ rte_src_free_id(u32 id)
src_id_used--;
}
static inline u32 rte_src_hash(struct proto *p, u32 x, u32 order)
{ return (x * 2902958171u) >> (32 - order); }
static void
rte_src_rehash(int step)
{
struct rte_src **old_tab, *src, *src_next;
u32 old_size, hash, i;
old_tab = src_table;
old_size = src_hash_size;
src_hash_order += step;
src_hash_size = 1 << src_hash_order;
src_table = mb_allocz(rta_pool, src_hash_size * sizeof(struct rte_src *));
for (i = 0; i < old_size; i++)
for (src = old_tab[i]; src; src = src_next)
{
src_next = src->next;
hash = rte_src_hash(src->proto, src->private_id, src_hash_order);
src->next = src_table[hash];
src_table[hash] = src;
}
mb_free(old_tab);
}
HASH_DEFINE_REHASH_FN(RSH, struct rte_src)
struct rte_src *
rt_find_source(struct proto *p, u32 id)
{
struct rte_src *src;
u32 hash = rte_src_hash(p, id, src_hash_order);
for (src = src_table[hash]; src; src = src->next)
if ((src->proto == p) && (src->private_id == id))
return src;
return NULL;
return HASH_FIND(src_hash, RSH, p, id);
}
struct rte_src *
rt_get_source(struct proto *p, u32 id)
{
struct rte_src *src;
u32 hash = rte_src_hash(p, id, src_hash_order);
struct rte_src *src = rt_find_source(p, id);
for (src = src_table[hash]; src; src = src->next)
if ((src->proto == p) && (src->private_id == id))
if (src)
return src;
src = sl_alloc(rte_src_slab);
@ -198,47 +168,26 @@ rt_get_source(struct proto *p, u32 id)
src->global_id = rte_src_alloc_id();
src->uc = 0;
src->next = src_table[hash];
src_table[hash] = src;
src_hash_count++;
if ((src_hash_count > src_hash_size) && (src_hash_order < SRC_HASH_ORDER_MAX))
rte_src_rehash(1);
HASH_INSERT2(src_hash, RSH, rta_pool, src);
return src;
}
static inline void
rt_remove_source(struct rte_src **sp)
{
struct rte_src *src = *sp;
*sp = src->next;
rte_src_free_id(src->global_id);
sl_free(rte_src_slab, src);
src_hash_count--;
}
void
rt_prune_sources(void)
{
struct rte_src **sp;
int i;
for (i = 0; i < src_hash_size; i++)
HASH_WALK_FILTER(src_hash, next, src, sp)
{
sp = &src_table[i];
while (*sp)
if (src->uc == 0)
{
if ((*sp)->uc == 0)
rt_remove_source(sp);
else
sp = &(*sp)->next;
HASH_DO_REMOVE(src_hash, RSH, sp);
rte_src_free_id(src->global_id);
sl_free(rte_src_slab, src);
}
}
HASH_WALK_FILTER_END;
while ((src_hash_count < (src_hash_size / 4)) && (src_hash_order > SRC_HASH_ORDER_MIN))
rte_src_rehash(-1);
HASH_MAY_RESIZE_DOWN(src_hash, RSH, rta_pool);
}

View File

@ -106,13 +106,13 @@
#define HASH_ID_KEY(n) n->loc_id
#define HASH_ID_NEXT(n) n->next_id
#define HASH_ID_EQ(a,b) (a == b)
#define HASH_ID_FN(k) (k)
#define HASH_ID_EQ(a,b) a == b
#define HASH_ID_FN(k) k
#define HASH_IP_KEY(n) n->addr
#define HASH_IP_NEXT(n) n->next_ip
#define HASH_IP_EQ(a,b) ipa_equal(a,b)
#define HASH_IP_FN(k) ipa_hash(k)
#define HASH_IP_FN(k) ipa_hash32(k)
static list bfd_proto_list;
static list bfd_wait_list;

View File

@ -58,6 +58,7 @@
* bgp_reconstruct_4b_attrs()).
*/
static byte bgp_mandatory_attrs[] = { BA_ORIGIN, BA_AS_PATH
#ifndef IPV6
,BA_NEXT_HOP
@ -875,70 +876,40 @@ bgp_free_bucket(struct bgp_proto *p, struct bgp_bucket *buck)
/* Prefix hash table */
static inline u32 prefix_hash(ip_addr prefix, int pxlen, u32 path_id, u32 order)
{
u32 x = ipa_hash(prefix) + pxlen + path_id;
return (x * 2902958171u) >> (32 - order);
}
#define PXH_KEY(n1) n1->n.prefix, n1->n.pxlen, n1->path_id
#define PXH_NEXT(n) n->next
#define PXH_EQ(p1,l1,i1,p2,l2,i2) ipa_equal(p1, p2) && l1 == l2 && i1 == i2
#define PXH_FN(p,l,i) ipa_hash32(p) ^ u32_hash((l << 16) ^ i)
static inline u32 px_hash_size(struct bgp_proto *p)
{ return 1 << p->px_hash_order; }
#define PXH_REHASH bgp_pxh_rehash
#define PXH_PARAMS /8, *2, 2, 2, 8, 20
HASH_DEFINE_REHASH_FN(PXH, struct bgp_prefix)
void
bgp_init_prefix_table(struct bgp_proto *p, u32 order)
{
p->px_hash_count = 0;
p->px_hash_order = order;
p->prefix_table = mb_allocz(p->p.pool, px_hash_size(p) * sizeof(struct bgp_prefix *));
HASH_INIT(p->prefix_hash, p->p.pool, order);
p->prefix_slab = sl_new(p->p.pool, sizeof(struct bgp_prefix));
}
static void
bgp_rehash_prefix_table(struct bgp_proto *p, int step)
{
struct bgp_prefix **old_tab, *px, *px_next;
u32 old_size, hash, i;
old_tab = p->prefix_table;
old_size = px_hash_size(p);
p->px_hash_order += step;
p->prefix_table = mb_allocz(p->p.pool, px_hash_size(p) * sizeof(struct bgp_prefix *));
for (i = 0; i < old_size; i++)
for (px = old_tab[i]; px; px = px_next)
{
px_next = px->next;
hash = prefix_hash(px->n.prefix, px->n.pxlen, px->path_id, p->px_hash_order);
px->next = p->prefix_table[hash];
p->prefix_table[hash] = px;
}
mb_free(old_tab);
}
static struct bgp_prefix *
bgp_get_prefix(struct bgp_proto *p, ip_addr prefix, int pxlen, u32 path_id)
{
struct bgp_prefix *bp;
u32 hash = prefix_hash(prefix, pxlen, path_id, p->px_hash_order);
struct bgp_prefix *bp = HASH_FIND(p->prefix_hash, PXH, prefix, pxlen, path_id);
for (bp = p->prefix_table[hash]; bp; bp = bp->next)
if (bp->n.pxlen == pxlen && ipa_equal(bp->n.prefix, prefix) && bp->path_id == path_id)
if (bp)
return bp;
bp = sl_alloc(p->prefix_slab);
bp->n.prefix = prefix;
bp->n.pxlen = pxlen;
bp->path_id = path_id;
bp->next = p->prefix_table[hash];
p->prefix_table[hash] = bp;
bp->bucket_node.next = NULL;
p->px_hash_count++;
if ((p->px_hash_count > px_hash_size(p)) && (p->px_hash_order < 18))
bgp_rehash_prefix_table(p, 1);
HASH_INSERT2(p->prefix_hash, PXH, p->p.pool, bp);
return bp;
}
@ -946,19 +917,8 @@ bgp_get_prefix(struct bgp_proto *p, ip_addr prefix, int pxlen, u32 path_id)
void
bgp_free_prefix(struct bgp_proto *p, struct bgp_prefix *bp)
{
struct bgp_prefix **bpp;
u32 hash = prefix_hash(bp->n.prefix, bp->n.pxlen, bp->path_id, p->px_hash_order);
for (bpp = &p->prefix_table[hash]; *bpp; *bpp = (*bpp)->next)
if (*bpp == bp)
break;
*bpp = bp->next;
HASH_REMOVE2(p->prefix_hash, PXH, p->p.pool, bp);
sl_free(p->prefix_slab, bp);
p->px_hash_count--;
if ((p->px_hash_count < (px_hash_size(p) / 4)) && (p->px_hash_order > 10))
bgp_rehash_prefix_table(p, -1);
}

View File

@ -68,6 +68,7 @@
#include "bgp.h"
struct linpool *bgp_linpool; /* Global temporary pool */
static sock *bgp_listen_sk; /* Global listening socket */
static int bgp_counter; /* Number of protocol instances using the listening socket */

View File

@ -12,6 +12,7 @@
#include <stdint.h>
#include "nest/route.h"
#include "nest/bfd.h"
#include "lib/hash.h"
struct linpool;
struct eattr;
@ -118,10 +119,8 @@ struct bgp_proto {
struct timer *startup_timer; /* Timer used to delay protocol startup due to previous errors (startup_delay) */
struct bgp_bucket **bucket_hash; /* Hash table of attribute buckets */
unsigned int hash_size, hash_count, hash_limit;
// struct fib prefix_fib; /* Prefixes to be sent */
struct bgp_prefix **prefix_table; /* Prefixes to be sent */
HASH(struct bgp_prefix) prefix_hash; /* Prefixes to be sent */
slab *prefix_slab; /* Slab holding prefix nodes */
u32 px_hash_order, px_hash_count;
list bucket_queue; /* Queue of buckets to send */
struct bgp_bucket *withdraw_bucket; /* Withdrawn routes */
unsigned startup_delay; /* Time to delay protocol startup by due to errors */

View File

@ -244,7 +244,7 @@ bgp_encode_prefixes(struct bgp_proto *p, byte *w, struct bgp_bucket *buck, unsig
ip_addr a;
int bytes;
while (!EMPTY_LIST(buck->prefixes) && remains >= (1+sizeof(ip_addr)))
while (!EMPTY_LIST(buck->prefixes) && (remains >= (5+sizeof(ip_addr))))
{
struct bgp_prefix *px = SKIP_BACK(struct bgp_prefix, bucket_node, HEAD(buck->prefixes));
DBG("\tDequeued route %I/%d\n", px->n.prefix, px->n.pxlen);
@ -253,6 +253,7 @@ bgp_encode_prefixes(struct bgp_proto *p, byte *w, struct bgp_bucket *buck, unsig
{
put_u32(w, px->path_id);
w += 4;
remains -= 4;
}
*w++ = px->n.pxlen;