0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-09-20 04:15:19 +00:00

Merge branch 'ballygarvan' into HEAD

Replacing the old 3.0-alpha0 cork mechanism with another one inside the
routing table. This version should be simpler and also quite clear what
it does, why and when.
This commit is contained in:
Maria Matejka 2022-08-02 17:58:14 +02:00
commit 0072d11f34
165 changed files with 11691 additions and 6219 deletions

View File

@ -3,7 +3,7 @@ variables:
LC_ALL: C.UTF-8
GIT_STRATEGY: fetch
DOCKER_CMD: docker --config="$HOME/.docker/$CI_JOB_ID/"
IMG_BASE: registry.labs.nic.cz/labs/bird
IMG_BASE: registry.nic.cz/labs/bird
TOOLS_DIR: /var/lib/gitlab-runner/bird-tools
stages:
@ -16,7 +16,7 @@ stages:
stage: image
allow_failure: true
script:
- $DOCKER_CMD login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.labs.nic.cz
- $DOCKER_CMD login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.nic.cz
# Make sure we refresh the base image if it updates (eg. security updates, etc)
# If we do just the build, cache is always reused and the freshness of the
# base image is never checked. However, pull always asks and updates the
@ -154,9 +154,9 @@ docker_ubuntu-20_04-amd64:
IMG_NAME: "ubuntu-20.04-amd64"
<<: *docker_build
docker_ubuntu-20_10-amd64:
docker_ubuntu-21_10-amd64:
variables:
IMG_NAME: "ubuntu-20.10-amd64"
IMG_NAME: "ubuntu-21.10-amd64"
<<: *docker_build
# GPG error
@ -224,135 +224,135 @@ docker_opensuse-15.3-amd64:
build-debian-8-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-8-amd64
image: registry.nic.cz/labs/bird:debian-8-amd64
build-debian-8-i386:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-8-i386
image: registry.nic.cz/labs/bird:debian-8-i386
build-debian-9-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-9-amd64
image: registry.nic.cz/labs/bird:debian-9-amd64
build-debian-9-i386:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-9-i386
image: registry.nic.cz/labs/bird:debian-9-i386
build-debian-10-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-10-amd64
image: registry.nic.cz/labs/bird:debian-10-amd64
build-debian-10-i386:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-10-i386
image: registry.nic.cz/labs/bird:debian-10-i386
build-debian-11-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-11-amd64
image: registry.nic.cz/labs/bird:debian-11-amd64
#build-debian-11-i386:
# <<: *build-linux
# image: registry.labs.nic.cz/labs/bird:debian-11-i386
# image: registry.nic.cz/labs/bird:debian-11-i386
build-debian-testing-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:debian-testing-amd64
image: registry.nic.cz/labs/bird:debian-testing-amd64
#build-debian-testing-i386:
# <<: *build-linux
# image: registry.labs.nic.cz/labs/bird:debian-testing-i386
# image: registry.nic.cz/labs/bird:debian-testing-i386
build-fedora-25-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-25-amd64
image: registry.nic.cz/labs/bird:fedora-25-amd64
build-fedora-26-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-26-amd64
image: registry.nic.cz/labs/bird:fedora-26-amd64
build-fedora-27-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-27-amd64
image: registry.nic.cz/labs/bird:fedora-27-amd64
build-fedora-28-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-28-amd64
image: registry.nic.cz/labs/bird:fedora-28-amd64
build-fedora-29-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-29-amd64
image: registry.nic.cz/labs/bird:fedora-29-amd64
build-fedora-30-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-30-amd64
image: registry.nic.cz/labs/bird:fedora-30-amd64
build-fedora-31-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-31-amd64
image: registry.nic.cz/labs/bird:fedora-31-amd64
build-fedora-32-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-32-amd64
image: registry.nic.cz/labs/bird:fedora-32-amd64
build-fedora-33-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-33-amd64
image: registry.nic.cz/labs/bird:fedora-33-amd64
build-fedora-34-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:fedora-33-amd64
image: registry.nic.cz/labs/bird:fedora-33-amd64
build-centos-8-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:centos-8-amd64
image: registry.nic.cz/labs/bird:centos-8-amd64
build-ubuntu-16_04-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:ubuntu-16.04-amd64
image: registry.nic.cz/labs/bird:ubuntu-16.04-amd64
build-ubuntu-18_04-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:ubuntu-18.04-amd64
image: registry.nic.cz/labs/bird:ubuntu-18.04-amd64
build-ubuntu-20_04-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:ubuntu-20.04-amd64
image: registry.nic.cz/labs/bird:ubuntu-20.04-amd64
build-ubuntu-20_10-amd64:
build-ubuntu-21_10-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:ubuntu-20.10-amd64
image: registry.nic.cz/labs/bird:ubuntu-21.10-amd64
#build-ubuntu-21_04-amd64:
# <<: *build-linux
# image: registry.labs.nic.cz/labs/bird:ubuntu-21.04-amd64
# image: registry.nic.cz/labs/bird:ubuntu-21.04-amd64
build-opensuse-15.0-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:opensuse-15.0-amd64
image: registry.nic.cz/labs/bird:opensuse-15.0-amd64
build-opensuse-15.1-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:opensuse-15.1-amd64
image: registry.nic.cz/labs/bird:opensuse-15.1-amd64
build-opensuse-15.2-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:opensuse-15.2-amd64
image: registry.nic.cz/labs/bird:opensuse-15.2-amd64
build-opensuse-15.3-amd64:
<<: *build-linux
image: registry.labs.nic.cz/labs/bird:opensuse-15.3-amd64
image: registry.nic.cz/labs/bird:opensuse-15.3-amd64
build-freebsd-11-amd64:
<<: *build-base
tags:
- freebsd
- amd64
#build-freebsd-11-amd64:
# <<: *build-base
# tags:
# - freebsd
# - amd64
build-freebsd-11-i386:
<<: *build-base
tags:
- freebsd
- i386
#build-freebsd-11-i386:
# <<: *build-base
# tags:
# - freebsd
# - i386
.pkg-deb: &pkg-deb
@ -390,60 +390,60 @@ build-freebsd-11-i386:
#pkg-debian-8-amd64:
# <<: *pkg-deb
# needs: [build-debian-8-amd64]
# image: registry.labs.nic.cz/labs/bird:debian-8-amd64
# image: registry.nic.cz/labs/bird:debian-8-amd64
# Dpkg error: PATH is not set
#pkg-debian-8-i386:
# <<: *pkg-deb
# needs: [build-debian-8-i386]
# image: registry.labs.nic.cz/labs/bird:debian-8-i386
# image: registry.nic.cz/labs/bird:debian-8-i386
# Dpkg error: PATH is not set
pkg-debian-9-amd64:
<<: *pkg-deb
needs: [build-debian-9-amd64]
image: registry.labs.nic.cz/labs/bird:debian-9-amd64
image: registry.nic.cz/labs/bird:debian-9-amd64
# Dpkg error: PATH is not set
pkg-debian-9-i386:
<<: *pkg-deb
needs: [build-debian-9-i386]
image: registry.labs.nic.cz/labs/bird:debian-9-i386
image: registry.nic.cz/labs/bird:debian-9-i386
pkg-debian-10-amd64:
<<: *pkg-deb
needs: [build-debian-10-amd64]
image: registry.labs.nic.cz/labs/bird:debian-10-amd64
image: registry.nic.cz/labs/bird:debian-10-amd64
pkg-debian-10-i386:
<<: *pkg-deb
needs: [build-debian-10-i386]
image: registry.labs.nic.cz/labs/bird:debian-10-i386
image: registry.nic.cz/labs/bird:debian-10-i386
pkg-debian-11-amd64:
<<: *pkg-deb
needs: [build-debian-11-amd64]
image: registry.labs.nic.cz/labs/bird:debian-11-amd64
image: registry.nic.cz/labs/bird:debian-11-amd64
pkg-fedora-30-amd64:
<<: *pkg-rpm-wa
needs: [build-fedora-30-amd64]
image: registry.labs.nic.cz/labs/bird:fedora-30-amd64
image: registry.nic.cz/labs/bird:fedora-30-amd64
pkg-fedora-31-amd64:
<<: *pkg-rpm-wa
needs: [build-fedora-31-amd64]
image: registry.labs.nic.cz/labs/bird:fedora-31-amd64
image: registry.nic.cz/labs/bird:fedora-31-amd64
pkg-fedora-32-amd64:
<<: *pkg-rpm-wa
needs: [build-fedora-32-amd64]
image: registry.labs.nic.cz/labs/bird:fedora-32-amd64
image: registry.nic.cz/labs/bird:fedora-32-amd64
pkg-fedora-33-amd64:
<<: *pkg-rpm-wa
needs: [build-fedora-33-amd64]
image: registry.labs.nic.cz/labs/bird:fedora-33-amd64
image: registry.nic.cz/labs/bird:fedora-33-amd64
pkg-fedora-34-amd64:
<<: *pkg-rpm
@ -453,42 +453,43 @@ pkg-fedora-34-amd64:
pkg-centos-8-amd64:
<<: *pkg-rpm-wa
needs: [build-centos-8-amd64]
image: registry.labs.nic.cz/labs/bird:centos-8-amd64
image: registry.nic.cz/labs/bird:centos-8-amd64
pkg-ubuntu-18.04-amd64:
<<: *pkg-deb
needs: [build-ubuntu-18_04-amd64]
image: registry.labs.nic.cz/labs/bird:ubuntu-18.04-amd64
image: registry.nic.cz/labs/bird:ubuntu-18.04-amd64
pkg-ubuntu-20.04-amd64:
<<: *pkg-deb
needs: [build-ubuntu-20_04-amd64]
image: registry.labs.nic.cz/labs/bird:ubuntu-20.04-amd64
image: registry.nic.cz/labs/bird:ubuntu-20.04-amd64
pkg-ubuntu-20.10-amd64:
pkg-ubuntu-21.10-amd64:
<<: *pkg-deb
needs: [build-ubuntu-20_10-amd64]
image: registry.labs.nic.cz/labs/bird:ubuntu-20.10-amd64
needs: [build-ubuntu-21_10-amd64]
image: registry.nic.cz/labs/bird:ubuntu-21.10-amd64
#pkg-ubuntu-21.04-amd64:
# <<: *pkg-deb
# needs: [build-ubuntu-21_04-amd64]
# image: registry.labs.nic.cz/labs/bird:ubuntu-21.04-amd64
# image: registry.nic.cz/labs/bird:ubuntu-21.04-amd64
pkg-opensuse-15.1-amd64:
<<: *pkg-rpm-wa
needs: [build-opensuse-15.1-amd64]
image: registry.labs.nic.cz/labs/bird:opensuse-15.1-amd64
image: registry.nic.cz/labs/bird:opensuse-15.1-amd64
pkg-opensuse-15.2-amd64:
<<: *pkg-rpm-wa
needs: [build-opensuse-15.2-amd64]
image: registry.labs.nic.cz/labs/bird:opensuse-15.2-amd64
image: registry.nic.cz/labs/bird:opensuse-15.2-amd64
pkg-opensuse-15.3-amd64:
<<: *pkg-rpm-wa
needs: [build-opensuse-15.3-amd64]
image: registry.labs.nic.cz/labs/bird:opensuse-15.3-amd64
image: registry.nic.cz/labs/bird:opensuse-15.3-amd64
build-birdlab:
@ -557,6 +558,11 @@ test-ospf-custom:
variables:
TEST_NAME: cf-ospf-custom
test-ospf-area:
<<: *test-base
variables:
TEST_NAME: cf-ospf-area
test-ospf-vrf:
<<: *test-base
variables:
@ -611,3 +617,8 @@ test-babel-auth:
<<: *test-base
variables:
TEST_NAME: cf-babel-auth
test-rip-base:
<<: *test-base
variables:
TEST_NAME: cf-rip-base

View File

@ -82,6 +82,9 @@ conf-lex-targets := $(addprefix $(objdir)/conf/,cf-lex.o)
conf-y-targets := $(addprefix $(objdir)/conf/,cf-parse.y keywords.h commands.h)
cf-local = $(conf-y-targets): $(s)config.Y
# nest/Makefile declarations needed for all other modules
proto-build-c := $(addprefix $(objdir)/nest/,proto-build.c)
src-o-files = $(patsubst %.c,$(o)%.o,$(src))
tests-target-files = $(patsubst %.c,$(o)%,$(tests_src))
@ -95,6 +98,13 @@ else
o = $(patsubst $(srcdir)%,$(objdir)%,$(s))
endif
define proto-build_in =
PROTO_BUILD += $(1)
$(proto-build-c): $(lastword $(MAKEFILE_LIST))
endef
proto-build = $(eval $(call proto-build_in,$(1)))
define clean_in =
clean::
rm -f $(addprefix $(o),$(1))

43
NEWS
View File

@ -1,3 +1,46 @@
Version 2.0.10 (2022-06-16)
o BGP performance improvements
o BFD: New 'strict bind' option
o RPKI: VRF support
o Allow use of 240.0.0.0/4 as a private range
o BIRD client uses exit status to report errors
o Important bugfixes
Version 2.0.9 (2022-02-09)
o BGP: Flowspec validation procedure
o Babel: MAC authentication support
o Routing table configuration blocks
o Optional prefix trie in routing table for faster LPM/interval queries
o CLI: New 'show route in <prefix>' command
o Filter: Faster (16-way) prefix sets
o Filter: MPLS label route attribute
o Filter: Operators to pick community components
o Filter: Operators to find minimum and maximum element of lists
o BGP: New 'free bind' option
o BGP: Log route updates that were changed to withdraws
o BGP: Improved 'invalid next hop' error reporting
o OSPF: Allow ifaces with host address as unnumbered PtP or PtMP ifaces
o OSPF: All packets on PtP networks should be sent to AllSPFRouters address
o Scripts for apkg-powered upstream packaging for deb and rpm
o Support for Blake2s and Blake2b hash functions
o Security keys / passwords can be entered in hexadecimal digits
o Memory statistics split into Effective and Overhead
o Linux: New option 'netlink rx buffer' to specify netlink socket buffer size
o BSD: Assume onlink flag on ifaces with only host addresses
o Many bugfixes
Notes:
For OSPF on PtP network, BIRD now sends all packets to multicast AllSPFRouters
address (as required in RFC 2328 8.1). This likely breaks setups with multiple
neighbors on a network configured as PtP, which worked in previous versions.
Such links should be configured as PtMP.
Since Linux 5.3, netlink socket can be flooded by route cache entries during
route table scan. This version mitigates that issue by using strict netlink
filtering.
Version 2.0.8 (2021-03-18)
o Automatic channel reloads based on RPKI changes
o Multiple static routes with the same network

26
aclocal.m4 vendored
View File

@ -2,6 +2,32 @@ dnl ** Additional Autoconf tests for BIRD configure script
dnl ** (c) 1999 Martin Mares <mj@ucw.cz>
dnl ** (c) 2021 Maria Matejka <mq@jmq.cz>
AC_DEFUN([BIRD_CHECK_POINTER_ALIGNMENT],
[
AC_CACHE_CHECK(
[how pointers are aligned],
[bird_cv_pointer_alignment],
AC_COMPILE_IFELSE([
AC_LANG_PROGRAM(
[
_Static_assert(_Alignof(void *) == 8, "bad");
], []
)
],
[bird_cv_pointer_alignment=8],
AC_COMPILE_IFELSE([
AC_LANG_PROGRAM(
[
_Static_assert(_Alignof(void *) == 4, "bad");
], []
)
],
[bird_cv_pointer_alignment=4],
[bird_cv_pointer_alignment=unknown]
))
)
])
AC_DEFUN([BIRD_CHECK_THREAD_LOCAL],
[
AC_CACHE_CHECK(

View File

@ -25,7 +25,6 @@ class BIRDFValPrinter(BIRDPrinter):
"T_ENUM_RTS": "i",
"T_ENUM_BGP_ORIGIN": "i",
"T_ENUM_SCOPE": "i",
"T_ENUM_RTC": "i",
"T_ENUM_RTD": "i",
"T_ENUM_ROA": "i",
"T_ENUM_NETTYPE": "i",

View File

@ -50,6 +50,7 @@ static byte *server_read_pos = server_read_buf;
int init = 1; /* During intial sequence */
int busy = 1; /* Executing BIRD command */
int interactive; /* Whether stdin is terminal */
int last_code; /* Last return code */
static int num_lines, skip_input;
int term_lns, term_cls;
@ -196,7 +197,7 @@ init_commands(void)
{
/* Initial command is finished and we want to exit */
cleanup();
exit(0);
exit((last_code < 8000) ? 0 : 1);
}
input_init();
@ -283,6 +284,8 @@ server_got_reply(char *x)
if (code)
PRINTF(len, "%s\n", verbose ? x : x+5);
last_code = code;
if (x[4] == ' ')
{
busy = 0;

View File

@ -42,7 +42,7 @@
#define PARSER 1
#include "nest/bird.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/protocol.h"
#include "filter/filter.h"
#include "filter/f-inst.h"
@ -77,19 +77,22 @@ static uint cf_hash(const byte *c);
#define SYM_NEXT(n) n->next
#define SYM_EQ(a,s1,b,s2) !strcmp(a,b) && s1 == s2
#define SYM_FN(k,s) cf_hash(k)
#define SYM_ORDER 6 /* Initial */
#define SYM_ORDER 4 /* Initial */
#define SYM_REHASH sym_rehash
#define SYM_PARAMS /8, *1, 2, 2, 6, 20
#define SYM_PARAMS /8, *1, 2, 2, 4, 20
HASH_DEFINE_REHASH_FN(SYM, struct symbol)
HASH(struct keyword) kw_hash;
HASH(struct ea_class) ea_name_hash;
struct sym_scope *conf_this_scope;
static struct sym_scope global_root_scope__init = { .active = 1, };
struct sym_scope *global_root_scope = &global_root_scope__init;
linpool *cfg_mem;
int (*cf_read_hook)(byte *buf, unsigned int max, int fd);
@ -255,7 +258,7 @@ WHITE [ \t]
return IP4;
}
{XIGIT}{2}(:{XIGIT}{2}|{XIGIT}{2}){15,} {
{XIGIT}{2}((:{XIGIT}{2}){15,}|({XIGIT}{2}){15,}) {
char *s = yytext;
size_t len = 0, i;
struct bytestring *bytes;
@ -347,7 +350,7 @@ else: {
return DDOT;
}
[={}:;,.()+*/%<>~\[\]?!\|-] {
[={}:;,.()+*/%<>~\[\]?!\|&-] {
return yytext[0];
}
@ -574,6 +577,8 @@ check_eof(void)
return 0;
}
static inline void cf_swap_soft_scope(void);
static struct symbol *
cf_new_symbol(const byte *c)
{
@ -583,45 +588,64 @@ cf_new_symbol(const byte *c)
if (l > SYM_MAX_LEN)
cf_error("Symbol too long");
cf_swap_soft_scope();
s = cfg_allocz(sizeof(struct symbol) + l + 1);
*s = (struct symbol) { .scope = conf_this_scope, .class = SYM_VOID, };
strcpy(s->name, c);
if (!new_config->sym_hash.data)
HASH_INIT(new_config->sym_hash, new_config->pool, SYM_ORDER);
if (!conf_this_scope->hash.data)
HASH_INIT(conf_this_scope->hash, new_config->pool, SYM_ORDER);
HASH_INSERT2(new_config->sym_hash, SYM, new_config->pool, s);
HASH_INSERT2(conf_this_scope->hash, SYM, new_config->pool, s);
if (conf_this_scope == new_config->root_scope)
add_tail(&(new_config->symbols), &(s->n));
return s;
}
static struct symbol *
cf_root_symbol(const byte *c)
{
uint l = strlen(c);
if (l > SYM_MAX_LEN)
bug("Root symbol %s too long", c);
struct symbol *s = mb_alloc(&root_pool, sizeof(struct symbol) + l + 1);
*s = (struct symbol) { .scope = global_root_scope, .class = SYM_VOID, };
memcpy(s->name, c, l+1);
if (!global_root_scope->hash.data)
HASH_INIT(global_root_scope->hash, &root_pool, SYM_ORDER);
HASH_INSERT2(global_root_scope->hash, SYM, &root_pool, s);
return s;
}
/**
* cf_find_symbol - find a symbol by name
* @cfg: specificed config
* cf_find_symbol_scope - find a symbol by name
* @scope: config scope
* @c: symbol name
*
* This functions searches the symbol table in the config @cfg for a symbol of
* given name. First it examines the current scope, then the second recent one
* This functions searches the symbol table in the scope @scope for a symbol of
* given name. First it examines the current scope, then the underlying one
* and so on until it either finds the symbol and returns a pointer to its
* &symbol structure or reaches the end of the scope chain and returns %NULL to
* signify no match.
*/
struct symbol *
cf_find_symbol(const struct config *cfg, const byte *c)
cf_find_symbol_scope(const struct sym_scope *scope, const byte *c)
{
struct symbol *s;
if (cfg->sym_hash.data &&
(s = HASH_FIND(cfg->sym_hash, SYM, c, 1)))
return s;
/* In CLI command parsing, fallback points to the current config, otherwise it is NULL. */
if (cfg->fallback &&
cfg->fallback->sym_hash.data &&
(s = HASH_FIND(cfg->fallback->sym_hash, SYM, c, 1)))
/* Find the symbol here or anywhere below */
while (scope)
if (scope->hash.data && (s = HASH_FIND(scope->hash, SYM, c, 1)))
return s;
else
scope = scope->next;
return NULL;
}
@ -638,7 +662,7 @@ cf_find_symbol(const struct config *cfg, const byte *c)
struct symbol *
cf_get_symbol(const byte *c)
{
return cf_find_symbol(new_config, c) ?: cf_new_symbol(c);
return cf_find_symbol_scope(conf_this_scope, c) ?: cf_new_symbol(c);
}
/**
@ -656,8 +680,8 @@ cf_localize_symbol(struct symbol *sym)
return sym;
/* If the scope is the current, it is already defined in this scope. */
if (sym->scope == conf_this_scope)
cf_error("Symbol already defined");
if (cf_symbol_is_local(sym))
cf_error("Symbol '%s' already defined", sym->name);
/* Not allocated here yet, doing it now. */
return cf_new_symbol(sym->name);
@ -689,10 +713,7 @@ cf_lex_symbol(const char *data)
struct symbol *sym = cf_get_symbol(data);
cf_lval.s = sym;
if (sym->class != SYM_VOID)
return CF_SYM_KNOWN;
/* Is it a keyword? */
/* Is it a keyword? Prefer the keyword. */
struct keyword *k = HASH_FIND(kw_hash, KW, data);
if (k)
{
@ -705,9 +726,11 @@ cf_lex_symbol(const char *data)
}
}
/* OK, undefined symbol */
cf_lval.s = sym;
/* OK, only a symbol. */
if (sym->class == SYM_VOID)
return CF_SYM_UNDEFINED;
else
return CF_SYM_KNOWN;
}
static void
@ -720,6 +743,34 @@ cf_lex_init_kh(void)
HASH_INSERT(kw_hash, KW, k);
}
void
ea_lex_register(struct ea_class *def)
{
struct symbol *sym = cf_root_symbol(def->name);
sym->class = SYM_ATTRIBUTE;
sym->attribute = def;
def->sym = sym;
}
void
ea_lex_unregister(struct ea_class *def)
{
struct symbol *sym = def->sym;
HASH_REMOVE2(global_root_scope->hash, SYM, &root_pool, sym);
mb_free(sym);
def->sym = NULL;
}
struct ea_class *
ea_class_find_by_name(const char *name)
{
struct symbol *sym = cf_find_symbol(global_root_scope, name);
if (!sym || (sym->class != SYM_ATTRIBUTE))
return NULL;
else
return sym->attribute;
}
/**
* cf_lex_init - initialize the lexer
* @is_cli: true if we're going to parse CLI command, false for configuration
@ -753,6 +804,11 @@ cf_lex_init(int is_cli, struct config *c)
c->root_scope = cfg_allocz(sizeof(struct sym_scope));
conf_this_scope = c->root_scope;
conf_this_scope->active = 1;
if (is_cli)
conf_this_scope->next = config->root_scope;
else
conf_this_scope->next = global_root_scope;
}
/**
@ -787,12 +843,60 @@ cf_push_scope(struct symbol *sym)
void
cf_pop_scope(void)
{
ASSERT(!conf_this_scope->soft_scopes);
conf_this_scope->active = 0;
conf_this_scope = conf_this_scope->next;
ASSERT(conf_this_scope);
}
/**
* cf_push_soft_scope - enter new soft scope
*
* If we want to enter a new anonymous scope that most likely will not contain
* any symbols, we can use cf_push_soft_scope() insteas of cf_push_scope().
* Such scope will be converted to a regular scope on first use.
*/
void
cf_push_soft_scope(void)
{
if (conf_this_scope->soft_scopes < 0xfe)
conf_this_scope->soft_scopes++;
else
cf_push_scope(NULL);
}
/**
* cf_pop_soft_scope - leave a soft scope
*
* Leave a soft scope entered by cf_push_soft_scope().
*/
void
cf_pop_soft_scope(void)
{
if (conf_this_scope->soft_scopes)
conf_this_scope->soft_scopes--;
else
cf_pop_scope();
}
/**
* cf_swap_soft_scope - convert soft scope to regular scope
*
* Soft scopes cannot hold symbols, so they must be converted to regular scopes
* on first use. It is done automatically by cf_new_symbol().
*/
static inline void
cf_swap_soft_scope(void)
{
if (conf_this_scope->soft_scopes)
{
conf_this_scope->soft_scopes--;
cf_push_scope(NULL);
}
}
/**
* cf_symbol_class_name - get name of a symbol class
* @sym: symbol

View File

@ -46,7 +46,7 @@
#undef LOCAL_DEBUG
#include "nest/bird.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/iface.h"
#include "lib/resource.h"
@ -140,6 +140,7 @@ config_parse(struct config *c)
protos_preconfig(c);
rt_preconfig(c);
cf_parse();
rt_postconfig(c);
if (EMPTY_LIST(c->protos))
cf_error("No protocol is specified in the config file");
@ -168,7 +169,6 @@ int
cli_parse(struct config *c)
{
int done = 0;
c->fallback = config;
new_config = c;
cfg_mem = c->mem;
if (setjmp(conf_jmpbuf))
@ -179,7 +179,6 @@ cli_parse(struct config *c)
done = 1;
cleanup:
c->fallback = NULL;
new_config = NULL;
cfg_mem = NULL;
return done;
@ -520,6 +519,8 @@ order_shutdown(int gr)
memcpy(c, config, sizeof(struct config));
init_list(&c->protos);
init_list(&c->tables);
init_list(&c->symbols);
memset(c->def_tables, 0, sizeof(c->def_tables));
c->shutdown = 1;
c->gr_down = gr;

View File

@ -16,7 +16,6 @@
#include "lib/timer.h"
/* Configuration structure */
struct config {
pool *pool; /* Pool the configuration is stored in */
linpool *mem; /* Linear pool containing configuration data */
@ -55,8 +54,7 @@ struct config {
char *err_file_name; /* File name containing error */
char *file_name; /* Name of main configuration file */
int file_fd; /* File descriptor of main configuration file */
HASH(struct symbol) sym_hash; /* Lexer: symbol hash table */
struct config *fallback; /* Link to regular config for CLI parsing */
struct sym_scope *root_scope; /* Scope for root symbols */
int obstacle_count; /* Number of items blocking freeing of this config */
int shutdown; /* This is a pseudo-config for daemon shutdown */
@ -123,7 +121,7 @@ struct symbol {
const struct f_line *function; /* For SYM_FUNCTION */
const struct filter *filter; /* For SYM_FILTER */
struct rtable_config *table; /* For SYM_TABLE */
struct f_dynamic_attr *attribute; /* For SYM_ATTRIBUTE */
struct ea_class *attribute; /* For SYM_ATTRIBUTE */
struct f_val *val; /* For SYM_CONSTANT */
uint offset; /* For SYM_VARIABLE */
};
@ -134,10 +132,16 @@ struct symbol {
struct sym_scope {
struct sym_scope *next; /* Next on scope stack */
struct symbol *name; /* Name of this scope */
HASH(struct symbol) hash; /* Local symbol hash */
uint slots; /* Variable slots */
int active; /* Currently entered */
byte active; /* Currently entered */
byte soft_scopes; /* Number of soft scopes above */
};
extern struct sym_scope *global_root_scope;
struct bytestring {
size_t length;
byte data[];
@ -186,12 +190,22 @@ int cf_lex(void);
void cf_lex_init(int is_cli, struct config *c);
void cf_lex_unwind(void);
struct symbol *cf_find_symbol(const struct config *cfg, const byte *c);
struct symbol *cf_find_symbol_scope(const struct sym_scope *scope, const byte *c);
static inline struct symbol *cf_find_symbol_cfg(const struct config *cfg, const byte *c)
{ return cf_find_symbol_scope(cfg->root_scope, c); }
#define cf_find_symbol(where, what) _Generic(*(where), \
struct config: cf_find_symbol_cfg, \
struct sym_scope: cf_find_symbol_scope \
)((where), (what))
struct symbol *cf_get_symbol(const byte *c);
struct symbol *cf_default_name(char *template, int *counter);
struct symbol *cf_localize_symbol(struct symbol *sym);
static inline int cf_symbol_is_local(struct symbol *sym)
{ return (sym->scope == conf_this_scope) && !conf_this_scope->soft_scopes; }
/**
* cf_define_symbol - define meaning of a symbol
* @sym: symbol to be defined
@ -215,6 +229,9 @@ struct symbol *cf_localize_symbol(struct symbol *sym);
void cf_push_scope(struct symbol *);
void cf_pop_scope(void);
void cf_push_soft_scope(void);
void cf_pop_soft_scope(void);
char *cf_symbol_class_name(struct symbol *sym);
/* Parser */

View File

@ -18,7 +18,7 @@ CF_HDR
#include "lib/string.h"
#include "nest/protocol.h"
#include "nest/iface.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/bfd.h"
#include "nest/cli.h"
#include "filter/filter.h"
@ -71,10 +71,12 @@ CF_DECLS
} xp;
enum filter_return fret;
enum ec_subtype ecs;
struct f_dynamic_attr fda;
struct ea_class *ea_class;
struct f_static_attr fsa;
struct f_attr_bit fab;
struct f_lval flv;
struct f_line *fl;
struct f_arg *fa;
const struct filter *f;
struct f_tree *e;
struct f_trie *trie;
@ -91,7 +93,7 @@ CF_DECLS
struct proto_spec ps;
struct channel_limit cl;
struct timeformat *tf;
mpls_label_stack *mls;
struct adata *ad;
struct bytestring *bs;
}
@ -110,16 +112,17 @@ CF_DECLS
%type <i> expr bool pxlen4
%type <time> expr_us time
%type <a> ipa
%type <net> net_ip4_ net_ip6_ net_ip6 net_ip_ net_ip net_or_ipa
%type <net> net_ip4_ net_ip4 net_ip6_ net_ip6 net_ip_ net_ip net_or_ipa
%type <net_ptr> net_ net_any net_vpn4_ net_vpn6_ net_vpn_ net_roa4_ net_roa6_ net_roa_ net_ip6_sadr_ net_mpls_
%type <mls> label_stack_start label_stack
%type <ad> label_stack_start label_stack
%type <t> text opttext
%type <s> symbol
%type <s> symbol symbol_known toksym
%nonassoc PREFIX_DUMMY
%left AND OR
%nonassoc '=' '<' '>' '~' GEQ LEQ NEQ NMA PO PC
%left '|' '&'
%left '+' '-'
%left '*' '/' '%'
%left '!'
@ -151,16 +154,16 @@ conf: definition ;
definition:
DEFINE symbol '=' term ';' {
struct f_val *val = cfg_allocz(sizeof(struct f_val));
if (f_eval(f_linearize($4), cfg_mem, val) > F_RETURN) cf_error("Runtime error");
cf_define_symbol($2, SYM_CONSTANT | val->type, val, val);
struct f_val val;
if (f_eval(f_linearize($4, 1), &val) > F_RETURN) cf_error("Runtime error");
cf_define_symbol($2, SYM_CONSTANT | val.type, val, lp_val_copy(cfg_mem, &val));
}
;
expr:
NUM
| '(' term ')' { $$ = f_eval_int(f_linearize($2)); }
| CF_SYM_KNOWN {
| '(' term ')' { $$ = f_eval_int(f_linearize($2, 1)); }
| symbol_known {
if ($1->class != (SYM_CONSTANT | T_INT)) cf_error("Number constant expected");
$$ = SYM_VAL($1).i; }
;
@ -171,7 +174,9 @@ expr_us:
| expr US { $$ = $1 US_; }
;
symbol: CF_SYM_UNDEFINED | CF_SYM_KNOWN ;
toksym: FROM | PREFERENCE ;
symbol: CF_SYM_UNDEFINED | CF_SYM_KNOWN | toksym ;
symbol_known: CF_SYM_KNOWN | toksym ;
/* Switches */
@ -303,6 +308,15 @@ net_:
/* Networks - regular */
net_ip4:
net_ip4_
| CF_SYM_KNOWN {
if (($1->class != (SYM_CONSTANT | T_NET)) || (SYM_VAL($1).net->type != NET_IP4))
cf_error("IPv4 network constant expected");
$$ = * SYM_VAL($1).net;
}
;
net_ip6:
net_ip6_
| CF_SYM_KNOWN {
@ -347,17 +361,19 @@ net_or_ipa:
label_stack_start: NUM
{
$$ = cfg_allocz(sizeof(mpls_label_stack));
$$->len = 1;
$$->stack[0] = $1;
$$ = cfg_allocz(ADATA_SIZE(MPLS_MAX_LABEL_STACK * sizeof(u32)));
$$->length = sizeof(u32);
*((u32 *)$$->data) = $1;
};
label_stack:
label_stack_start
| label_stack '/' NUM {
if ($1->len >= MPLS_MAX_LABEL_STACK)
if ($1->length >= MPLS_MAX_LABEL_STACK * sizeof(u32))
cf_error("Too many labels in stack");
$1->stack[$1->len++] = $3;
*((u32 *)($$->data + $1->length)) = $3;
$1->length += sizeof(u32);
$$ = $1;
}
;

View File

@ -142,7 +142,7 @@ flow_frag_opts:
;
flow4_item:
flow_srcdst net_ip {
flow_srcdst net_ip4 {
flow_builder_set_type(this_flow, $1);
flow_builder4_add_pfx(this_flow, (net_addr_ip4 *) &($2));
}

View File

@ -26,8 +26,7 @@ m4_define(CF_DEFINES, `m4_divert(-1)')
m4_define(CF_handle_kw, `m4_divert(1){ "m4_translit($1,[[A-Z]],[[a-z]])", $1, NULL },
m4_divert(-1)')
m4_define(CF_keywd, `m4_ifdef([[CF_tok_$1]],,[[m4_define([[CF_tok_$1]],1)CF_handle_kw($1)]])')
m4_define(CF_KEYWORDS, `m4_define([[CF_toks]],[[]])CF_iterate([[CF_keywd]], [[$@]])m4_ifelse(CF_toks,,,%token[[]]CF_toks
)DNL')
m4_define(CF_KEYWORDS, `CF_iterate([[CF_keywd]], [[$@]])DNL')
# CLI commands generate keywords as well
m4_define(CF_CLI, `CF_KEYWORDS(m4_translit($1, [[ ]], [[,]]))

View File

@ -31,7 +31,7 @@ m4_define(CF_iterate, `m4_define([[CF_iter]], m4_defn([[$1]]))CF_itera($2)')
# Keywords act as untyped %token
m4_define(CF_keywd, `m4_ifdef([[CF_tok_$1]],,[[m4_define([[CF_tok_$1]],1)m4_define([[CF_toks]],CF_toks $1)]])')
m4_define(CF_KEYWORDS, `m4_define([[CF_toks]],[[]])CF_iterate([[CF_keywd]], [[$@]])m4_ifelse(CF_toks,,,%token[[]]CF_toks
m4_define(CF_KEYWORDS, `m4_define([[CF_toks]],[[]])CF_iterate([[CF_keywd]], [[$@]])m4_ifelse(CF_toks,,,%token<s>[[]]CF_toks
)DNL')
# CLI commands

View File

@ -337,7 +337,7 @@ case $sysdesc in
;;
esac
AC_CHECK_HEADERS_ONCE([alloca.h syslog.h])
AC_CHECK_HEADERS_ONCE([alloca.h syslog.h stdatomic.h])
AC_CHECK_HEADER([sys/mman.h], [AC_DEFINE([HAVE_MMAP], [1], [Define to 1 if mmap() is available.])], have_mman=no)
AC_CHECK_FUNC([aligned_alloc], [AC_DEFINE([HAVE_ALIGNED_ALLOC], [1], [Define to 1 if aligned_alloc() is available.])], have_aligned_alloc=no)
AC_CHECK_MEMBERS([struct sockaddr.sa_len], [], [], [#include <sys/socket.h>])
@ -353,6 +353,13 @@ AC_C_BIGENDIAN(
[AC_MSG_ERROR([Cannot determine CPU endianity.])]
)
BIRD_CHECK_POINTER_ALIGNMENT
if test "$bird_cv_pointer_alignment" = "unknown" ; then
AC_MSG_ERROR([Couldn't determine pointer alignment])
else
AC_DEFINE_UNQUOTED([CPU_POINTER_ALIGNMENT], [$bird_cv_pointer_alignment], [Pointer alignment for macro usage])
fi
BIRD_CHECK_ANDROID_GLOB
if test "$bird_cv_lib_glob" = no ; then
AC_MSG_ERROR([glob.h not found.])

View File

@ -5,8 +5,9 @@ After=network.target
[Service]
Type=simple
ExecStartPre=/usr/sbin/bird -p
ExecStart=/usr/sbin/bird -f -u bird -g bird
ExecReload=/bin/kill -HUP $MAINPID
ExecReload=/usr/sbin/birdc configure
Restart=on-failure
[Install]

View File

@ -14,7 +14,7 @@ configuration - something in config which is not keyword.
(set-fill-column 80)
Copyright 1999,2000 Pavel Machek <pavel@ucw.cz>, distribute under GPL version 2 or later.
Copyright 1999 - 2022 CZ.NIC, z.s.p.o , distribute under GPL version 2 or later.
-->
@ -23,7 +23,6 @@ configuration - something in config which is not keyword.
<title>BIRD 2.0 User's Guide
<author>
Ondrej Filip <it/&lt;feela@network.cz&gt;/,
Pavel Machek <it/&lt;pavel@ucw.cz&gt;/,
Martin Mares <it/&lt;mj@ucw.cz&gt;/,
Maria Matejka <it/&lt;mq@jmq.cz&gt;/,
Ondrej Zajicek <it/&lt;santiago@crfreenet.org&gt;/
@ -145,13 +144,6 @@ BIRD executable by configuring out routing protocols you don't use, and
<p>You can pass several command-line options to bird:
<descrip>
<tag><label id="argv-block">-B <m/exp/</tag>
allocate memory using 2^<cf/exp/ byte sized blocks;
if you're expecting high memory load, raise this to
reduce number of allocated memory pages. For a million routes
in one table, the recommended setting is 18.
Default is your system page size, typically 12 for 4096 bytes.
<tag><label id="argv-config">-c <m/config name/</tag>
use given configuration file instead of <it/prefix/<file>/etc/bird.conf</file>.
@ -259,16 +251,9 @@ The global best route selection algorithm is (roughly) as follows:
</itemize>
<p><label id="dsc-table-sorted">Usually, a routing table just chooses a selected
route from a list of entries for one network. But if the <cf/sorted/ option is
activated, these lists of entries are kept completely sorted (according to
preference or some protocol-dependent metric). This is needed for some features
of some protocols (e.g. <cf/secondary/ option of BGP protocol, which allows to
accept not just a selected route, but the first route (in the sorted list) that
is accepted by filters), but it is incompatible with some other features (e.g.
<cf/deterministic med/ option of BGP protocol, which activates a way of choosing
selected route that cannot be described using comparison and ordering). Minor
advantage is that routes are shown sorted in <cf/show route/, minor disadvantage
is that it is slightly more computationally expensive.
route from a list of entries for one network. Optionally, these lists of entries
are kept completely sorted (according to preference or some protocol-dependent
metric). See <ref id="rtable-sorted" name="sorted"> table option for details.
<sect>Routes and network types
<label id="routes">
@ -635,18 +620,97 @@ include "tablename.conf";;
<cf/protocol/ times, and the <cf/iso long ms/ format for <cf/base/ and
<cf/log/ times.
<tag><label id="opt-table"><m/nettype/ table <m/name/ [sorted]</tag>
Create a new routing table. The default routing tables <cf/master4/ and
<cf/master6/ are created implicitly, other routing tables have to be
added by this command. Option <cf/sorted/ can be used to enable sorting
of routes, see <ref id="dsc-table-sorted" name="sorted table">
description for details.
<tag><label id="opt-table"><m/nettype/ table <m/name/ [ { <m/option/; [<m/.../] } ]</tag>
Define a new routing table. The default routing tables <cf/master4/ and
<cf/master6/ are defined implicitly, other routing tables have to be
defined by this option. See the <ref id="rtable-opts"
name="routing table configuration section"> for routing table options.
<tag><label id="opt-eval">eval <m/expr/</tag>
Evaluates given filter expression. It is used by the developers for testing of filters.
</descrip>
<sect>Routing table options
<label id="rtable-opts">
<p>Most routing tables do not need any options and are defined without an option
block, but there are still some options to tweak routing table behavior. Note
that implicit tables (<cf/master4/ and <cf/master6/) can be redefined in order
to set options.
<descrip>
<tag><label id="rtable-sorted">sorted <m/switch/</tag>
Usually, a routing table just chooses the selected (best) route from a
list of routes for each network, while keeping remaining routes unsorted.
If enabled, these lists of routes are kept completely sorted (according
to preference or some protocol-dependent metric).
This is needed for some protocol features (e.g. <cf/secondary/ option of
BGP protocol, which allows to accept not just a selected route, but the
first route (in the sorted list) that is accepted by filters), but it is
incompatible with some other features (e.g. <cf/deterministic med/
option of BGP protocol, which activates a way of choosing selected route
that cannot be described using comparison and ordering). Minor advantage
is that routes are shown sorted in <cf/show route/, minor disadvantage
is that it is slightly more computationally expensive. Default: off.
<tag><label id="rtable-trie">trie <m/switch/</tag>
BIRD routing tables are implemented with hash tables, which is efficient
for exact-match lookups, but inconvenient for longest-match lookups or
interval lookups (finding superprefix or subprefixes). This option
activates additional trie structure that is used to accelerate these
lookups, while using the hash table for exact-match lookups.
This has advantage for <ref id="rpki" name="RPKI"> (on ROA tables),
for <ref id="bgp-gateway" name="recursive next-hops"> (on IGP tables),
and is required for <ref id="bgp-validate" name="flowspec validation">
(on base IP tables). Another advantage is that interval results (like
from <cf/show route in .../ command) are lexicographically sorted. The
disadvantage is that trie-enabled routing tables require more memory,
which may be an issue especially in multi-table setups. Default: off.
<tag><label id="rtable-min-settle-time">min settle time <m/time/</tag>
Specify a minimum value of the settle time. When a ROA table changes,
automatic <ref id="proto-rpki-reload" name="RPKI reload"> may be
triggered, after a short settle time. Minimum settle time is a delay
from the last ROA table change to wait for more updates. Default: 1 s.
<tag><label id="rtable-max-settle-time">max settle time <m/time/</tag>
Specify a maximum value of the settle time. When a ROA table changes,
automatic <ref id="proto-rpki-reload" name="RPKI reload"> may be
triggered, after a short settle time. Maximum settle time is an upper
limit to the settle time from the initial ROA table change even if
there are consecutive updates gradually renewing the settle time.
Default: 20 s.
<tag><label id="rtable-gc-threshold">gc threshold <m/number/</tag>
Specify a minimum amount of removed networks that triggers a garbage
collection (GC) cycle. Default: 1000.
<tag><label id="rtable-gc-period">gc period <m/time/</tag>
Specify a period of time between consecutive GC cycles. When there is a
significant amount of route withdraws, GC cycles are executed repeatedly
with given period time (with some random factor). When there is just
small amount of changes, GC cycles are not executed. In extensive route
server setups, running GC on hundreds of full BGP routing tables can
take significant amount of time, therefore they should use higher GC
periods. Default: adaptive, based on number of routing tables in the
configuration. From 10 s (with <= 25 routing tables) up to 600 s (with
>= 1500 routing tables).
<tag><label id="rtable-cork-threshold">cork threshold <m/number/ <m/number/</tag>
Too many pending exports may lead to memory bloating. In such cases,
BIRD tries to relieve the memory pressure by pausing some routines until
the queue sizes get low enough. This option allows the user to set the
thresholds; first value is the low threshold (when to resume), the
second one is the high threshold (when to pause). The higher is the
threshold, the more memory can get used. In most cases, the defaults
should work for you. Default: 128, 512.
</descrip>
<sect>Protocol options
<label id="protocol-opts">
@ -874,10 +938,12 @@ inherited from templates can be updated by new definitions.
<cf/none/ is for dropping all routes. Default: <cf/all/ (except for
EBGP).
<tag><label id="proto-export">export <m/filter/</tag>
<tag><label id="proto-export">export [ in <m/prefix/ ] <m/filter/</tag>
This is similar to the <cf>import</cf> keyword, except that it works in
the direction from the routing table to the protocol. Default: <cf/none/
(except for EBGP).
the direction from the routing table to the protocol. If <cf/in/ keyword is used,
only routes inside the given prefix are exported. Other routes are completely
ignored (e.g. no logging and no statistics).
Default: <cf/none/ (except for EBGP).
<tag><label id="proto-import-keep-filtered">import keep filtered <m/switch/</tag>
Usually, if an import filter rejects a route, the route is forgotten.
@ -1205,8 +1271,8 @@ this:
<code>
filter not_too_far
int var;
{
int var;
if defined( rip_metric ) then
var = rip_metric;
else {
@ -1235,9 +1301,9 @@ local variables. Recursion is not allowed. Function definitions look like this:
<code>
function name ()
int local_variable;
{
local_variable = 5;
int local_variable;
int another_variable = 5;
}
function with_parameters (int parameter)
@ -1246,16 +1312,19 @@ function with_parameters (int parameter)
}
</code>
<p>Unlike in C, variables are declared after the <cf/function/ line, but before
the first <cf/{/. You can't declare variables in nested blocks. Functions are
called like in C: <cf>name(); with_parameters(5);</cf>. Function may return
values using the <cf>return <m/[expr]/</cf> command. Returning a value exits
from current function (this is similar to C).
<p>Like in C programming language, variables are declared inside function body,
either at the beginning, or mixed with other statements. Declarations may
contain initialization. You can also declare variables in nested blocks, such
variables have scope restricted to such block. There is a deprecated syntax to
declare variables after the <cf/function/ line, but before the first <cf/{/.
Functions are called like in C: <cf>name(); with_parameters(5);</cf>. Function
may return values using the <cf>return <m/[expr]/</cf> command. Returning a
value exits from current function (this is similar to C).
<p>Filters are defined in a way similar to functions except they can't have
<p>Filters are defined in a way similar to functions except they cannot have
explicit parameters. They get a route table entry as an implicit parameter, it
is also passed automatically to any functions called. The filter must terminate
with either <cf/accept/ or <cf/reject/ statement. If there's a runtime error in
with either <cf/accept/ or <cf/reject/ statement. If there is a runtime error in
filter, the route is rejected.
<p>A nice trick to debug filters is to use <cf>show route filter <m/name/</cf>
@ -1299,6 +1368,9 @@ in the foot).
The same syntax can also be used to construct a pair from two arbitrary
integer expressions (for example <cf/(1+2,a)/).
Operators <cf/.asn/ and <cf/.data/ can be used to extract corresponding
components of a pair: <cf>(<m/asn/, <m/data/)</cf>.
<tag><label id="type-quad">quad</tag>
This is a dotted quad of numbers used to represent router IDs (and
others). Each component can have a value from 0 to 255. Literals of
@ -1389,6 +1461,10 @@ in the foot).
pairs, LCs can be constructed using expressions for its parts, (e.g.
<cf/(myas, 10+20, 3*10)/, where <cf/myas/ is an integer variable).
Operators <cf/.asn/, <cf/.data1/, and <cf/.data2/ can be used
to extract corresponding components of LCs:
<cf>(<m/asn/, <m/data1/, <m/data2/)</cf>.
<tag><label id="type-set">int|pair|quad|ip|prefix|ec|lc|enum set</tag>
Filters recognize four types of sets. Sets are similar to strings: you
can pass them around but you can't modify them. Literals of type <cf>int
@ -1532,7 +1608,7 @@ in the foot).
Clist is similar to a set, except that unlike other sets, it can be
modified. The type is used for community list (a set of pairs) and for
cluster list (a set of quads). There exist no literals of this type.
There are three special operators on clists:
There are special operators on clists:
<cf><m/C/.len</cf> returns the length of clist <m/C/.
@ -1559,6 +1635,15 @@ in the foot).
<cf><m/C/.add(<m/P/);</cf> if <m/C/ is appropriate route attribute (for
example <cf/bgp_community/). Similarly for <cf/delete/ and <cf/filter/.
<cf><m/C/.min</cf> returns the minimum element of clist <m/C/.
<cf><m/C/.max</cf> returns the maximum element of clist <m/C/.
Operators <cf/.min/, <cf/.max/ can be used together with <cf/filter/
to extract the community from the specific subset of communities
(e.g. localpref or prepend) without the need to check every possible
value (e.g. <cf/filter(bgp_community, [(23456, 1000..1099)]).min/).
<tag><label id="type-eclist">eclist</tag>
Eclist is a data type used for BGP extended community lists. Eclists
are very similar to clists, but they are sets of ECs instead of pairs.
@ -1609,7 +1694,8 @@ prefix and an ASN as arguments.
<sect>Control structures
<label id="control-structures">
<p>Filters support two control structures: conditions and case switches.
<p>Filters support several control structures: conditions, for loops and case
switches.
<p>Syntax of a condition is: <cf>if <M>boolean expression</M> then <m/commandT/;
else <m/commandF/;</cf> and you can use <cf>{ <m/command1/; <m/command2/;
@ -1617,6 +1703,14 @@ else <m/commandF/;</cf> and you can use <cf>{ <m/command1/; <m/command2/;
omitted. If the <cf><m>boolean expression</m></cf> is true, <m/commandT/ is
executed, otherwise <m/commandF/ is executed.
<p>For loops allow to iterate over elements in compound data like BGP paths or
community lists. The syntax is: <cf>for [ <m/type/ ] <m/variable/ in <m/expr/
do <m/command/;</cf> and you can also use compound command like in conditions.
The expression is evaluated to a compound data, then for each element from such
data the command is executed with the item assigned to the variable. A variable
may be an existing one (when just name is used) or a locally defined (when type
and name is used). In both cases, it must have the same type as elements.
<p>The <cf>case</cf> is similar to case from Pascal. Syntax is <cf>case
<m/expr/ { else: | <m/num_or_prefix [ .. num_or_prefix]/: <m/statement/ ; [
... ] }</cf>. The expression after <cf>case</cf> can be of any type which can be
@ -1629,16 +1723,21 @@ neither of the <cf/:/ clauses, the statements after <cf/else:/ are executed.
<p>Here is example that uses <cf/if/ and <cf/case/ structures:
<code>
if 1234 = i then printn "."; else {
print "not 1234";
print "You need {} around multiple commands";
}
for int asn in bgp_path do {
printn "ASN: ", asn;
if asn < 65536 then print " (2B)"; else print " (4B)";
}
case arg1 {
2: print "two"; print "I can do more commands without {}";
3 .. 5: print "three to five";
else: print "something else";
}
if 1234 = i then printn "."; else {
print "not 1234";
print "You need {} around multiple commands";
}
</code>
@ -1667,17 +1766,8 @@ Common route attributes are:
primary key of the routing table. Read-only. (See the <ref id="routes"
name="chapter about routes">.)
<tag><label id="rta-scope"><m/enum/ scope</tag>
The scope of the route. Possible values: <cf/SCOPE_HOST/ for routes
local to this host, <cf/SCOPE_LINK/ for those specific for a physical
link, <cf/SCOPE_SITE/ and <cf/SCOPE_ORGANIZATION/ for private routes and
<cf/SCOPE_UNIVERSE/ for globally visible routes. This attribute is not
interpreted by BIRD and can be used to mark routes in filters. The
default value for new routes is <cf/SCOPE_UNIVERSE/.
<tag><label id="rta-preference"><m/int/ preference</tag>
Preference of the route. Valid values are 0-65535. (See the chapter
about routing tables.)
Preference of the route.
<tag><label id="rta-from"><m/ip/ from</tag>
The router which the route has originated from.
@ -2097,6 +2187,13 @@ protocol bfd [&lt;name&gt;] {
to configure separate BFD protocol instances for IPv4 and for IPv6
sessions.
<tag><label id="bfd-strict-bind">strict bind <m/switch/</tag>
Specify whether each BFD interface should use a separate listening
socket bound to its local address, or just use a shared listening socket
accepting all addresses. Binding to a specific address could be useful
in cases like running multiple BIRD instances on a machine, each
handling a different set of interfaces. Default: disabled.
<tag><label id="bfd-iface">interface <m/pattern/ [, <m/.../] { <m/options/ }</tag>
Interface definitions allow to specify options for sessions associated
with such interfaces and also may contain interface specific options.
@ -2281,6 +2378,7 @@ avoid routing loops.
<item> <rfc id="8092"> - BGP Large Communities Attribute
<item> <rfc id="8203"> - BGP Administrative Shutdown Communication
<item> <rfc id="8212"> - Default EBGP Route Propagation Behavior without Policies
<item> <rfc id="9117"> - Revised Validation Procedure for BGP Flow Specifications
</itemize>
<sect1>Route selection rules
@ -2403,6 +2501,12 @@ using the following configuration parameters:
same address family and using the same local port) should have set
<cf/strict bind/, or none of them. Default: disabled.
<tag><label id="bgp-free-bind">free bind <m/switch/</tag>
Use IP_FREEBIND socket option for the listening socket, which allows
binding to an IP address not (yet) assigned to an interface. Note that
all BGP instances that share a listening socket should have the same
value of the <cf/freebind/ option. Default: disabled.
<tag><label id="bgp-check-link">check link <M>switch</M></tag>
BGP could use hardware link state into consideration. If enabled,
BIRD tracks the link state of the associated interface and when link
@ -2614,13 +2718,6 @@ using the following configuration parameters:
disabled. Default: on, with automatic fallback to off when received
capability-related error.
<tag><label id="bgp-advertise-ipv4">advertise ipv4 <m/switch/</tag>
Advertise IPv4 multiprotocol capability. This is not a correct behavior
according to the strict interpretation of <rfc id="4760">, but it is
widespread and required by some BGP implementations (Cisco and Quagga).
This option is relevant to IPv4 mode with enabled capability
advertisement only. Default: on.
<tag><label id="bgp-advertise-hostname">advertise hostname <m/switch/</tag>
Advertise hostname capability along with the hostname. Default: off.
@ -2666,7 +2763,7 @@ using the following configuration parameters:
<tag><label id="bgp-error-wait-time">error wait time <m/number/,<m/number/</tag>
Minimum and maximum delay in seconds between a protocol failure (either
local or reported by the peer) and automatic restart. Doesn't apply
local or reported by the peer) and automatic restart. Doesn not apply
when <cf/disable after error/ is configured. If consecutive errors
happen, the delay is increased exponentially until it reaches the
maximum. Default: 60, 300.
@ -2844,6 +2941,31 @@ be used in explicit configuration.
explicitly (to conserve memory). This option requires that the connected
routing table is <ref id="dsc-table-sorted" name="sorted">. Default: off.
<tag><label id="bgp-validate">validate <m/switch/</tag>
Apply flowspec validation procedure as described in <rfc id="8955">
section 6 and <rfc id="9117">. The Validation procedure enforces that
only routers in the forwarding path for a network can originate flowspec
rules for that network. The validation procedure should be used for EBGP
to prevent injection of malicious flowspec rules from outside, but it
should also be used for IBGP to ensure that selected flowspec rules are
consistent with selected IP routes. The validation procedure uses an IP
routing table (<ref id="bgp-base-table" name="base table">, see below)
against which flowspec rules are validated. This option is limited to
flowspec channels. Default: off (for compatibility reasons).
Note that currently the flowspec validation does not work reliably
together with <ref id="bgp-import-table" name="import table"> option
enabled on flowspec channels.
<tag><label id="bgp-base-table">base table <m/name/</tag>
Specifies an IP table used for the flowspec validation procedure. The
table must have enabled <cf/trie/ option, otherwise the validation
procedure would not work. The type of the table must be <cf/ipv4/ for
<cf/flow4/ channels and <cf/ipv6/ for <cf/flow6/ channels. This option
is limited to flowspec channels. Default: the main table of the
<cf/ipv4/ / <cf/ipv6/ channel of the same BGP instance, or the
<cf/master4/ / <cf/master6/ table if there is no such channel.
<tag><label id="bgp-extended-next-hop">extended next hop <m/switch/</tag>
BGP expects that announced next hops have the same address family as
associated network prefixes. This option provides an extension to use
@ -3240,6 +3362,12 @@ channels.
allows to specify a limit on maximal number of nexthops in one route. By
default, multipath merging is disabled. If enabled, default value of the
limit is 16.
<tag><label id="krt-netlink-rx-buffer">netlink rx buffer <m/number/</tag> (Linux)
Set kernel receive buffer size (in bytes) for the netlink socket. The default
value is OS-dependent (from the <file>/proc/sys/net/core/rmem_default</file>
file), If you get some "Kernel dropped some netlink message ..." warnings,
you may increase this value.
</descrip>
<sect1>Attributes
@ -5165,7 +5293,7 @@ Note that for negated matches, value must be either zero or equal to bitmask
<cf>port 1..1023,1194,3306</cf>).
<tag><label id="flow-dport">dport <m/numbers-match/</tag>
Set a mating destination port numbers (e.g. <cf>dport 49151</cf>).
Set a matching destination port numbers (e.g. <cf>dport 49151</cf>).
<tag><label id="flow-sport">sport <m/numbers-match/</tag>
Set a matching source port numbers (e.g. <cf>sport = 0</cf>).
@ -5356,15 +5484,15 @@ name="atrey.karlin.mff.cuni.cz:/pub/rfc">).
</book>
<!--
LocalWords: GPL IPv GateD BGPv RIPv OSPFv Linux sgml html dvi sgmltools Pavel
LocalWords: GPL IPv GateD BGPv RIPv OSPFv Linux sgml html dvi sgmltools
LocalWords: linuxdoc dtd descrip config conf syslog stderr auth ospf bgp Mbps
LocalWords: router's eval expr num birdc ctl UNIX if's enums bool int ip GCC
LocalWords: len ipaddress pxlen netmask enum bgppath bgpmask clist gw md eth
LocalWords: RTS printn quitbird iBGP AS'es eBGP RFC multiprotocol IGP Machek
LocalWords: RTS printn quitbird iBGP AS'es eBGP RFC multiprotocol IGP
LocalWords: EGP misconfigurations keepalive pref aggr aggregator BIRD's RTC
LocalWords: OS'es AS's multicast nolisten misconfigured UID blackhole MRTD MTU
LocalWords: uninstalls ethernets IP binutils ANYCAST anycast dest RTD ICMP rfc
LocalWords: compat multicasts nonbroadcast pointopoint loopback sym stats
LocalWords: Perl SIGHUP dd mm yy HH MM SS EXT IA UNICAST multihop Discriminator txt
LocalWords: proto wildcard Ondrej Filip
LocalWords: proto wildcard
-->

View File

@ -12,9 +12,9 @@
<title>BIRD Programmer's Documentation
<author>
Ondrej Filip <it/&lt;feela@network.cz&gt;/,
Pavel Machek <it/&lt;pavel@ucw.cz&gt;/,
Martin Mares <it/&lt;mj@ucw.cz&gt;/,
Ondrej Zajicek <it/&lt;santiago@crfreenet.org&gt;/
Maria Matejka <it/&lt;mq@jmq.cz&gt;/,
</author>
<abstract>

View File

@ -22,6 +22,46 @@ static inline u32 pair_b(u32 p) { return p & 0xFFFF; }
#define f_generate_complex(fi_code, da, arg) \
f_new_inst(FI_EA_SET, f_new_inst(fi_code, f_new_inst(FI_EA_GET, da), arg), da)
#define f_generate_complex_sym(fi_code, sym, arg) ({ \
if (sym->class != SYM_ATTRIBUTE) \
cf_error("Can't empty %s: not an attribute", sym->name); \
f_generate_complex(fi_code, sym->attribute, arg); \
})
#define f_generate_complex_default(fi_code, da, arg, def) \
f_new_inst(FI_EA_SET, f_new_inst(fi_code, f_new_inst(FI_DEFAULT, f_new_inst(FI_EA_GET, da), f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = def })), arg), da)
static int
f_new_var(struct sym_scope *s)
{
/*
* - A variable is an offset on vstack from vbase.
* - Vbase is set on filter start / function call.
* - Scopes contain anonymous scopes (blocks) inside filter/function scope
* - Each scope knows number of vars in that scope
* - Offset is therefore a sum of 'slots' up to named scope
* - New variables are added on top of vstk, so intermediate values cannot
* be there during FI_VAR_INIT. I.e. no 'var' inside 'term'.
* - Also, each f_line must always have its scope, otherwise a variable may
* be defined but not initialized if relevant f_line is not executed.
*/
int offset = s->slots++;
while (!s->name)
{
s = s->next;
ASSERT(s);
offset += s->slots;
}
if (offset >= 0xff)
cf_error("Too many variables, at most 255 allowed");
return offset;
}
/*
* Sets and their items are during parsing handled as lists, linked
* through left ptr. The first item in a list also contains a pointer
@ -161,28 +201,32 @@ f_new_lc_item(u32 f1, u32 t1, u32 f2, u32 t2, u32 f3, u32 t3)
}
static inline struct f_inst *
f_generate_empty(struct f_dynamic_attr dyn)
f_generate_empty(const struct symbol *sym)
{
struct f_val empty;
if (sym->class != SYM_ATTRIBUTE)
cf_error("Can't empty %s: not an attribute", sym->name);
switch (dyn.type & EAF_TYPE_MASK) {
case EAF_TYPE_AS_PATH:
empty = f_const_empty_path;
break;
case EAF_TYPE_INT_SET:
empty = f_const_empty_clist;
break;
case EAF_TYPE_EC_SET:
empty = f_const_empty_eclist;
break;
case EAF_TYPE_LC_SET:
empty = f_const_empty_lclist;
break;
default:
cf_error("Can't empty that attribute");
}
const struct ea_class *def = sym->attribute;
const struct f_val *empty = f_get_empty(def->type);
if (!empty)
cf_error("Can't empty attribute %s", def->name);
return f_new_inst(FI_EA_SET, f_new_inst(FI_CONSTANT, empty), dyn);
return f_new_inst(FI_EA_SET, f_new_inst(FI_CONSTANT, *empty), def);
}
static inline struct f_inst *
f_implicit_roa_check(struct rtable_config *tab)
{
const struct ea_class *def = ea_class_find("bgp_path");
if (!def)
cf_error("Fatal: Couldn't find BGP path attribute definition.");
struct f_static_attr fsa = f_new_static_attr(T_NET, SA_NET, 1);
return f_new_inst(FI_ROA_CHECK,
f_new_inst(FI_RTA_GET, fsa),
f_new_inst(FI_AS_PATH_LAST, f_new_inst(FI_EA_GET, def)),
tab);
}
/*
@ -273,15 +317,17 @@ CF_KEYWORDS(FUNCTION, PRINT, PRINTN, UNSET, RETURN,
INT, BOOL, IP, TYPE, PREFIX, RD, PAIR, QUAD, EC, LC,
SET, STRING, BGPMASK, BGPPATH, CLIST, ECLIST, LCLIST,
IF, THEN, ELSE, CASE,
FOR, IN, DO,
TRUE, FALSE, RT, RO, UNKNOWN, GENERIC,
FROM, GW, NET, MASK, PROTO, SOURCE, SCOPE, DEST, IFNAME, IFINDEX, WEIGHT, GW_MPLS,
PREFERENCE,
FROM, GW, NET, MASK, PROTO, SCOPE, DEST, IFNAME, IFINDEX, WEIGHT, GW_MPLS,
ROA_CHECK, ASN, SRC, DST,
IS_V4, IS_V6,
LEN, MAXLEN,
DATA, DATA1, DATA2,
DEFINED,
ADD, DELETE, CONTAINS, RESET,
PREPEND, FIRST, LAST, LAST_NONAGGREGATED, MATCH,
ADD, DELETE, RESET,
PREPEND, FIRST, LAST, LAST_NONAGGREGATED,
MIN, MAX,
EMPTY,
FILTER, WHERE, EVAL, ATTRIBUTE,
BT_ASSERT, BT_TEST_SUITE, BT_CHECK_ASSIGN, BT_TEST_SAME, FORMAT, STACKS)
@ -290,13 +336,14 @@ CF_KEYWORDS(FUNCTION, PRINT, PRINTN, UNSET, RETURN,
%nonassoc ELSE
%type <xp> cmds_int cmd_prep
%type <x> term block cmd cmds constant constructor print_list var_list function_call symbol_value bgp_path_expr bgp_path bgp_path_tail
%type <fda> dynamic_attr
%type <x> term cmd cmd_var cmds cmds_scoped constant constructor print_list var var_init var_list function_call symbol_value bgp_path_expr bgp_path bgp_path_tail
%type <fsa> static_attr
%type <fab> attr_bit
%type <f> filter where_filter
%type <fl> filter_body function_body
%type <flv> lvalue
%type <i> type function_args function_vars
%type <i> type function_vars
%type <fa> function_argsn function_args
%type <ecs> ec_kind
%type <fret> break_command
%type <i32> cnum
@ -305,6 +352,7 @@ CF_KEYWORDS(FUNCTION, PRINT, PRINTN, UNSET, RETURN,
%type <v> set_atom switch_atom fipa
%type <px> fprefix
%type <t> get_cf_position
%type <s> for_var
CF_GRAMMAR
@ -328,17 +376,24 @@ filter_def:
conf: filter_eval ;
filter_eval:
EVAL term { f_eval_int(f_linearize($2)); }
EVAL term { f_eval_int(f_linearize($2, 1)); }
;
conf: custom_attr ;
custom_attr: ATTRIBUTE type symbol ';' {
cf_define_symbol($3, SYM_ATTRIBUTE, attribute, ca_lookup(new_config->pool, $3->name, $2)->fda);
if (($3->class == SYM_ATTRIBUTE) && ($3->scope == new_config->root_scope))
cf_error("Duplicate attribute %s definition", $3->name);
cf_define_symbol($3, SYM_ATTRIBUTE, attribute,
ea_register_alloc(new_config->pool, (struct ea_class) {
.name = $3->name,
.type = $2,
})->class);
};
conf: bt_test_suite ;
bt_test_suite:
BT_TEST_SUITE '(' CF_SYM_KNOWN ',' text ')' {
BT_TEST_SUITE '(' symbol_known ',' text ')' {
cf_assert_symbol($3, SYM_FUNCTION);
struct f_bt_test_suite *t = cfg_allocz(sizeof(struct f_bt_test_suite));
t->fn = $3->function;
@ -351,7 +406,7 @@ bt_test_suite:
conf: bt_test_same ;
bt_test_same:
BT_TEST_SAME '(' CF_SYM_KNOWN ',' CF_SYM_KNOWN ',' NUM ')' {
BT_TEST_SAME '(' symbol_known ',' symbol_known ',' NUM ')' {
cf_assert_symbol($3, SYM_FUNCTION);
cf_assert_symbol($5, SYM_FUNCTION);
struct f_bt_test_suite *t = cfg_allocz(sizeof(struct f_bt_test_suite));
@ -403,25 +458,28 @@ type:
;
function_argsn:
/* EMPTY */
/* EMPTY */ { $$ = NULL; }
| function_argsn type symbol ';' {
if ($3->scope->slots >= 0xfe) cf_error("Too many declarations, at most 255 allowed");
cf_define_symbol($3, SYM_VARIABLE | $2, offset, $3->scope->slots++);
$$ = cfg_alloc(sizeof(struct f_arg));
$$->arg = cf_define_symbol($3, SYM_VARIABLE | $2, offset, sym_->scope->slots++);
$$->next = $1;
}
;
function_args:
'(' ')' { $$ = 0; }
'(' ')' { $$ = NULL; }
| '(' function_argsn type symbol ')' {
cf_define_symbol($4, SYM_VARIABLE | $3, offset, $4->scope->slots++);
$$ = $4->scope->slots;
$$ = cfg_alloc(sizeof(struct f_arg));
$$->arg = cf_define_symbol($4, SYM_VARIABLE | $3, offset, sym_->scope->slots++);
$$->next = $2;
}
;
function_vars:
/* EMPTY */ { $$ = 0; }
| function_vars type symbol ';' {
cf_define_symbol($3, SYM_VARIABLE | $2, offset, $3->scope->slots++);
cf_define_symbol($3, SYM_VARIABLE | $2, offset, f_new_var(sym_->scope));
$$ = $1 + 1;
}
;
@ -429,7 +487,7 @@ function_vars:
filter_body: function_body ;
filter:
CF_SYM_KNOWN {
symbol_known {
cf_assert_symbol($1, SYM_FILTER);
$$ = $1->filter;
}
@ -449,20 +507,35 @@ where_filter:
function_body:
function_vars '{' cmds '}' {
$$ = f_linearize($3);
$$ = f_linearize($3, 0);
$$->vars = $1;
}
;
conf: function_def ;
function_def:
FUNCTION symbol { DBG( "Beginning of function %s\n", $2->name );
FUNCTION symbol {
DBG( "Beginning of function %s\n", $2->name );
$2 = cf_define_symbol($2, SYM_FUNCTION, function, NULL);
cf_push_scope($2);
} function_args function_body {
DBG("Definition of function %s with %u args and %u local vars.\n", $2->name, $4, $5->vars);
$5->args = $4;
$2->function = $5;
} function_args {
/* Make dummy f_line for storing function prototype */
struct f_line *dummy = cfg_allocz(sizeof(struct f_line));
$2->function = dummy;
/* Revert the args */
while ($4) {
struct f_arg *tmp = $4;
$4 = $4->next;
tmp->next = dummy->arg_list;
dummy->arg_list = tmp;
dummy->args++;
}
} function_body {
$6->args = $2->function->args;
$6->arg_list = $2->function->arg_list;
$2->function = $6;
cf_pop_scope();
}
;
@ -473,7 +546,11 @@ cmds: /* EMPTY */ { $$ = NULL; }
| cmds_int { $$ = $1.begin; }
;
cmd_prep: cmd {
cmds_scoped: { cf_push_soft_scope(); } cmds { cf_pop_soft_scope(); $$ = $2; } ;
cmd_var: var | cmd ;
cmd_prep: cmd_var {
$$.begin = $$.end = $1;
if ($1)
while ($$.end->next)
@ -495,15 +572,6 @@ cmds_int: cmd_prep
}
;
block:
cmd {
$$=$1;
}
| '{' cmds '}' {
$$=$2;
}
;
/*
* Complex types, their bison value is struct f_val
*/
@ -527,10 +595,10 @@ set_atom:
| VPN_RD { $$.type = T_RD; $$.val.ec = $1; }
| ENUM { $$.type = pair_a($1); $$.val.i = pair_b($1); }
| '(' term ')' {
if (f_eval(f_linearize($2), cfg_mem, &($$)) > F_RETURN) cf_error("Runtime error");
if (f_eval(f_linearize($2, 1), &($$)) > F_RETURN) cf_error("Runtime error");
if (!f_valid_set_type($$.type)) cf_error("Set-incompatible type");
}
| CF_SYM_KNOWN {
| symbol_known {
cf_assert_symbol($1, SYM_CONSTANT);
if (!f_valid_set_type(SYM_TYPE($1))) cf_error("%s: set-incompatible type", $1->name);
$$ = *$1->val;
@ -539,13 +607,13 @@ set_atom:
switch_atom:
NUM { $$.type = T_INT; $$.val.i = $1; }
| '(' term ')' { $$.type = T_INT; $$.val.i = f_eval_int(f_linearize($2)); }
| '(' term ')' { $$.type = T_INT; $$.val.i = f_eval_int(f_linearize($2, 1)); }
| fipa { $$ = $1; }
| ENUM { $$.type = pair_a($1); $$.val.i = pair_b($1); }
;
cnum:
term { $$ = f_eval_int(f_linearize($1)); }
term { $$ = f_eval_int(f_linearize($1, 1)); }
pair_item:
'(' cnum ',' cnum ')' { $$ = f_new_pair_item($2, $2, $4, $4); }
@ -629,19 +697,19 @@ fprefix_set:
;
switch_body: /* EMPTY */ { $$ = NULL; }
| switch_body switch_items ':' cmds {
| switch_body switch_items ':' cmds_scoped {
/* Fill data fields */
struct f_tree *t;
struct f_line *line = f_linearize($4);
struct f_line *line = f_linearize($4, 0);
for (t = $2; t; t = t->left)
t->data = line;
$$ = f_merge_items($1, $2);
}
| switch_body ELSECOL cmds {
| switch_body ELSECOL cmds_scoped {
struct f_tree *t = f_new_tree();
t->from.type = t->to.type = T_VOID;
t->right = t;
t->data = f_linearize($3);
t->data = f_linearize($3, 0);
$$ = f_merge_items($1, t);
}
;
@ -658,6 +726,7 @@ bgp_path:
bgp_path_tail:
NUM bgp_path_tail { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .asn = $1, .kind = PM_ASN, }, }); $$->next = $2; }
| NUM DDOT NUM bgp_path_tail { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .from = $1, .to = $3, .kind = PM_ASN_RANGE }, }); $$->next = $4; }
| '[' ']' bgp_path_tail { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .set = NULL, .kind = PM_ASN_SET }, }); $$->next = $3; }
| '[' set_items ']' bgp_path_tail {
if ($2->from.type != T_INT) cf_error("Only integer sets allowed in path mask");
$$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .set = build_tree($2), .kind = PM_ASN_SET }, }); $$->next = $4;
@ -677,6 +746,7 @@ constant:
| fipa { $$ = f_new_inst(FI_CONSTANT, $1); }
| VPN_RD { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_RD, .val.ec = $1, }); }
| net_ { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_NET, .val.net = $1, }); }
| '[' ']' { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_SET, .val.t = NULL, }); }
| '[' set_items ']' {
DBG( "We've got a set here..." );
$$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_SET, .val.t = build_tree($2), });
@ -700,31 +770,26 @@ var_list: /* EMPTY */ { $$ = NULL; }
| var_list ',' term { $$ = $3; $$->next = $1; }
function_call:
CF_SYM_KNOWN '(' var_list ')' {
symbol_known '(' var_list ')'
{
if ($1->class != SYM_FUNCTION)
cf_error("You can't call something which is not a function. Really.");
struct f_inst *fc = f_new_inst(FI_CALL, $1);
uint args = 0;
/* Revert the var_list */
struct f_inst *args = NULL;
while ($3) {
args++;
struct f_inst *tmp = $3->next;
$3->next = fc;
struct f_inst *tmp = $3;
$3 = $3->next;
fc = $3;
$3 = tmp;
tmp->next = args;
args = tmp;
}
if (args != $1->function->args)
cf_error("Function call '%s' got %u arguments, need %u arguments.",
$1->name, args, $1->function->args);
$$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_VOID });
$$->next = fc;
$$ = f_new_inst(FI_CALL, args, $1);
}
;
symbol_value: CF_SYM_KNOWN
symbol_value: symbol_known
{
switch ($1->class) {
case SYM_CONSTANT_RANGE:
@ -734,7 +799,7 @@ symbol_value: CF_SYM_KNOWN
$$ = f_new_inst(FI_VAR_GET, $1);
break;
case SYM_ATTRIBUTE:
$$ = f_new_inst(FI_EA_GET, *$1->attribute);
$$ = f_new_inst(FI_EA_GET, $1->attribute);
break;
default:
cf_error("Can't get value of symbol %s", $1->name);
@ -743,17 +808,13 @@ symbol_value: CF_SYM_KNOWN
;
static_attr:
FROM { $$ = f_new_static_attr(T_IP, SA_FROM, 0); }
| GW { $$ = f_new_static_attr(T_IP, SA_GW, 0); }
GW { $$ = f_new_static_attr(T_IP, SA_GW, 0); }
| NET { $$ = f_new_static_attr(T_NET, SA_NET, 1); }
| PROTO { $$ = f_new_static_attr(T_STRING, SA_PROTO, 1); }
| SOURCE { $$ = f_new_static_attr(T_ENUM_RTS, SA_SOURCE, 1); }
| SCOPE { $$ = f_new_static_attr(T_ENUM_SCOPE, SA_SCOPE, 0); }
| DEST { $$ = f_new_static_attr(T_ENUM_RTD, SA_DEST, 0); }
| IFNAME { $$ = f_new_static_attr(T_STRING, SA_IFNAME, 0); }
| IFINDEX { $$ = f_new_static_attr(T_INT, SA_IFINDEX, 1); }
| WEIGHT { $$ = f_new_static_attr(T_INT, SA_WEIGHT, 0); }
| PREFERENCE { $$ = f_new_static_attr(T_INT, SA_PREF, 0); }
| GW_MPLS { $$ = f_new_static_attr(T_INT, SA_GW_MPLS, 0); }
;
@ -763,6 +824,8 @@ term:
| term '-' term { $$ = f_new_inst(FI_SUBTRACT, $1, $3); }
| term '*' term { $$ = f_new_inst(FI_MULTIPLY, $1, $3); }
| term '/' term { $$ = f_new_inst(FI_DIVIDE, $1, $3); }
| term '&' term { $$ = f_new_inst(FI_BITAND, $1, $3); }
| term '|' term { $$ = f_new_inst(FI_BITOR, $1, $3); }
| term AND term { $$ = f_new_inst(FI_AND, $1, $3); }
| term OR term { $$ = f_new_inst(FI_OR, $1, $3); }
| term '=' term { $$ = f_new_inst(FI_EQ, $1, $3); }
@ -781,8 +844,10 @@ term:
| constructor { $$ = $1; }
| static_attr { $$ = f_new_inst(FI_RTA_GET, $1); }
| dynamic_attr { $$ = f_new_inst(FI_EA_GET, $1); }
| attr_bit {
struct f_inst *c = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = (1U << $1.bit)});
$$ = f_new_inst(FI_EQ, c, f_new_inst(FI_BITAND, f_new_inst(FI_EA_GET, $1.class), c));
}
| term '.' IS_V4 { $$ = f_new_inst(FI_IS_V4, $1); }
| term '.' TYPE { $$ = f_new_inst(FI_TYPE, $1); }
@ -790,13 +855,18 @@ term:
| term '.' RD { $$ = f_new_inst(FI_ROUTE_DISTINGUISHER, $1); }
| term '.' LEN { $$ = f_new_inst(FI_LENGTH, $1); }
| term '.' MAXLEN { $$ = f_new_inst(FI_ROA_MAXLEN, $1); }
| term '.' ASN { $$ = f_new_inst(FI_ROA_ASN, $1); }
| term '.' ASN { $$ = f_new_inst(FI_ASN, $1); }
| term '.' SRC { $$ = f_new_inst(FI_NET_SRC, $1); }
| term '.' DST { $$ = f_new_inst(FI_NET_DST, $1); }
| term '.' MASK '(' term ')' { $$ = f_new_inst(FI_IP_MASK, $1, $5); }
| term '.' FIRST { $$ = f_new_inst(FI_AS_PATH_FIRST, $1); }
| term '.' LAST { $$ = f_new_inst(FI_AS_PATH_LAST, $1); }
| term '.' LAST_NONAGGREGATED { $$ = f_new_inst(FI_AS_PATH_LAST_NAG, $1); }
| term '.' DATA { $$ = f_new_inst(FI_PAIR_DATA, $1); }
| term '.' DATA1 { $$ = f_new_inst(FI_LC_DATA1, $1); }
| term '.' DATA2 { $$ = f_new_inst(FI_LC_DATA2, $1); }
| term '.' MIN { $$ = f_new_inst(FI_MIN, $1); }
| term '.' MAX { $$ = f_new_inst(FI_MAX, $1); }
/* Communities */
/* This causes one shift/reduce conflict
@ -815,13 +885,11 @@ term:
| DELETE '(' term ',' term ')' { $$ = f_new_inst(FI_CLIST_DEL, $3, $5); }
| FILTER '(' term ',' term ')' { $$ = f_new_inst(FI_CLIST_FILTER, $3, $5); }
| ROA_CHECK '(' rtable ')' { $$ = f_new_inst(FI_ROA_CHECK_IMPLICIT, $3); }
| ROA_CHECK '(' rtable ',' term ',' term ')' { $$ = f_new_inst(FI_ROA_CHECK_EXPLICIT, $5, $7, $3); }
| ROA_CHECK '(' rtable ')' { $$ = f_implicit_roa_check($3); }
| ROA_CHECK '(' rtable ',' term ',' term ')' { $$ = f_new_inst(FI_ROA_CHECK, $5, $7, $3); }
| FORMAT '(' term ')' { $$ = f_new_inst(FI_FORMAT, $3); }
/* | term '.' LEN { $$->code = P('P','l'); } */
| function_call
;
@ -841,20 +909,53 @@ print_list: /* EMPTY */ { $$ = NULL; }
}
;
var_init:
/* empty */ { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { }); }
| '=' term { $$ = $2; }
;
var:
type symbol var_init ';' {
struct symbol *sym = cf_define_symbol($2, SYM_VARIABLE | $1, offset, f_new_var(sym_->scope));
$$ = f_new_inst(FI_VAR_INIT, $3, sym);
}
for_var:
type symbol { $$ = cf_define_symbol($2, SYM_VARIABLE | $1, offset, f_new_var(sym_->scope)); }
| CF_SYM_KNOWN { $$ = $1; cf_assert_symbol($1, SYM_VARIABLE); }
;
cmd:
IF term THEN block {
'{' cmds_scoped '}' {
$$ = $2;
}
| IF term THEN cmd {
$$ = f_new_inst(FI_CONDITION, $2, $4, NULL);
}
| IF term THEN block ELSE block {
| IF term THEN cmd ELSE cmd {
$$ = f_new_inst(FI_CONDITION, $2, $4, $6);
}
| CF_SYM_KNOWN '=' term ';' {
| FOR {
/* Reserve space for walk data on stack */
cf_push_scope(NULL);
conf_this_scope->slots += 2;
} for_var IN
/* Parse term in the parent scope */
{ conf_this_scope->active = 0; } term { conf_this_scope->active = 1; }
DO cmd {
cf_pop_scope();
$$ = f_new_inst(FI_FOR_INIT, $6, $3);
$$->next = f_new_inst(FI_FOR_NEXT, $3, $9);
}
| symbol_known '=' term ';' {
switch ($1->class) {
case SYM_VARIABLE_RANGE:
$$ = f_new_inst(FI_VAR_SET, $3, $1);
break;
case SYM_ATTRIBUTE:
$$ = f_new_inst(FI_EA_SET, $3, *$1->attribute);
if ($1->attribute->readonly)
cf_error("Attribute %s is read-only", $1->attribute->name);
$$ = f_new_inst(FI_EA_SET, $3, $1->attribute);
break;
default:
cf_error("Can't assign to symbol %s", $1->name);
@ -864,16 +965,25 @@ cmd:
DBG( "Ook, we'll return the value\n" );
$$ = f_new_inst(FI_RETURN, $2);
}
| dynamic_attr '=' term ';' {
$$ = f_new_inst(FI_EA_SET, $3, $1);
}
| static_attr '=' term ';' {
if ($1.readonly)
cf_error( "This static attribute is read-only.");
$$ = f_new_inst(FI_RTA_SET, $3, $1);
}
| UNSET '(' dynamic_attr ')' ';' {
$$ = f_new_inst(FI_EA_UNSET, $3);
| UNSET '(' symbol_known ')' ';' {
if ($3->class != SYM_ATTRIBUTE)
cf_error("Can't unset %s", $3->name);
if ($3->attribute->readonly)
cf_error("Attribute %s is read-only", $3->attribute->name);
$$ = f_new_inst(FI_EA_UNSET, $3->attribute);
}
| attr_bit '=' term ';' {
$$ = f_new_inst(FI_CONDITION, $3,
f_generate_complex_default(FI_BITOR, $1.class,
f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = (1U << $1.bit)}), 0),
f_generate_complex_default(FI_BITAND, $1.class,
f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = ~(1U << $1.bit)}), 0)
);
}
| break_command print_list ';' {
struct f_inst *breaker = f_new_inst(FI_DIE, $1);
@ -898,11 +1008,11 @@ cmd:
$$ = f_new_inst(FI_SWITCH, $2, build_tree($4));
}
| dynamic_attr '.' EMPTY ';' { $$ = f_generate_empty($1); }
| dynamic_attr '.' PREPEND '(' term ')' ';' { $$ = f_generate_complex( FI_PATH_PREPEND, $1, $5 ); }
| dynamic_attr '.' ADD '(' term ')' ';' { $$ = f_generate_complex( FI_CLIST_ADD, $1, $5 ); }
| dynamic_attr '.' DELETE '(' term ')' ';' { $$ = f_generate_complex( FI_CLIST_DEL, $1, $5 ); }
| dynamic_attr '.' FILTER '(' term ')' ';' { $$ = f_generate_complex( FI_CLIST_FILTER, $1, $5 ); }
| symbol_known '.' EMPTY ';' { $$ = f_generate_empty($1); }
| symbol_known '.' PREPEND '(' term ')' ';' { $$ = f_generate_complex_sym( FI_PATH_PREPEND, $1, $5 ); }
| symbol_known '.' ADD '(' term ')' ';' { $$ = f_generate_complex_sym( FI_CLIST_ADD, $1, $5 ); }
| symbol_known '.' DELETE '(' term ')' ';' { $$ = f_generate_complex_sym( FI_CLIST_DEL, $1, $5 ); }
| symbol_known '.' FILTER '(' term ')' ';' { $$ = f_generate_complex_sym( FI_CLIST_FILTER, $1, $5 ); }
| BT_ASSERT '(' get_cf_position term get_cf_position ')' ';' { $$ = assert_done($4, $3 + 1, $5 - 1); }
| BT_CHECK_ASSIGN '(' get_cf_position lvalue get_cf_position ',' term ')' ';' { $$ = assert_assign(&$4, $7, $3 + 1, $5 - 1); }
;
@ -913,8 +1023,17 @@ get_cf_position:
};
lvalue:
CF_SYM_KNOWN { cf_assert_symbol($1, SYM_VARIABLE); $$ = (struct f_lval) { .type = F_LVAL_VARIABLE, .sym = $1 }; }
symbol_known {
switch ($1->class) {
case SYM_VARIABLE_RANGE:
$$ = (struct f_lval) { .type = F_LVAL_VARIABLE, .sym = $1 };
break;
case SYM_ATTRIBUTE:
$$ = (struct f_lval) { .type = F_LVAL_EA, .da = $1->attribute };
break;
}
}
| static_attr { $$ = (struct f_lval) { .type = F_LVAL_SA, .sa = $1 }; }
| dynamic_attr { $$ = (struct f_lval) { .type = F_LVAL_EA, .da = $1 }; };
;
CF_END

View File

@ -16,10 +16,10 @@
#include "lib/unaligned.h"
#include "lib/net.h"
#include "lib/ip.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/iface.h"
#include "nest/attrs.h"
#include "lib/attrs.h"
#include "conf/conf.h"
#include "filter/filter.h"
#include "filter/f-inst.h"
@ -27,6 +27,8 @@
static const char * const f_type_str[] = {
[T_VOID] = "void",
[T_OPAQUE] = "opaque byte string",
[T_IFACE] = "interface",
[T_INT] = "int",
[T_BOOL] = "bool",
@ -36,7 +38,6 @@ static const char * const f_type_str[] = {
[T_ENUM_RTS] = "enum rts",
[T_ENUM_BGP_ORIGIN] = "enum bgp_origin",
[T_ENUM_SCOPE] = "enum scope",
[T_ENUM_RTC] = "enum rtc",
[T_ENUM_RTD] = "enum rtd",
[T_ENUM_ROA] = "enum roa",
[T_ENUM_NETTYPE] = "enum nettype",
@ -47,6 +48,7 @@ static const char * const f_type_str[] = {
[T_NET] = "prefix",
[T_STRING] = "string",
[T_PATH_MASK] = "bgpmask",
[T_PATH_MASK_ITEM] = "bgpmask item",
[T_PATH] = "bgppath",
[T_CLIST] = "clist",
[T_EC] = "ec",
@ -54,20 +56,31 @@ static const char * const f_type_str[] = {
[T_LC] = "lc",
[T_LCLIST] = "lclist",
[T_RD] = "rd",
[T_SET] = "set",
[T_PREFIX_SET] = "prefix set",
};
const char *
f_type_name(enum f_type t)
f_type_name(btype t)
{
if (t < ARRAY_SIZE(f_type_str))
return f_type_str[t] ?: "?";
if ((t == T_SET) || (t == T_PREFIX_SET))
return "set";
return "?";
return (t < ARRAY_SIZE(f_type_str)) ? (f_type_str[t] ?: "?") : "?";
}
btype
f_type_element_type(btype t)
{
switch(t) {
case T_PATH: return T_INT;
case T_CLIST: return T_PAIR;
case T_ECLIST: return T_EC;
case T_LCLIST: return T_LC;
default: return T_VOID;
};
}
const struct f_trie f_const_empty_trie = { .ipv4 = -1, };
const struct f_val f_const_empty_path = {
.type = T_PATH,
.val.ad = &null_adata,
@ -80,16 +93,11 @@ const struct f_val f_const_empty_path = {
}, f_const_empty_lclist = {
.type = T_LCLIST,
.val.ad = &null_adata,
}, f_const_empty_prefix_set = {
.type = T_PREFIX_SET,
.val.ti = &f_const_empty_trie,
};
static struct adata *
adata_empty(struct linpool *pool, int l)
{
struct adata *res = lp_alloc(pool, sizeof(struct adata) + l);
res->length = l;
return res;
}
static void
pm_format(const struct f_path_mask *p, buffer *buf)
{
@ -176,7 +184,7 @@ val_compare(const struct f_val *v1, const struct f_val *v2)
if (val_is_ip4(v1) && (v2->type == T_QUAD))
return uint_cmp(ipa_to_u32(v1->val.ip), v2->val.i);
debug( "Types do not match in val_compare\n" );
DBG( "Types do not match in val_compare\n" );
return F_CMP_ERROR;
}
@ -290,6 +298,12 @@ val_same(const struct f_val *v1, const struct f_val *v2)
int
clist_set_type(const struct f_tree *set, struct f_val *v)
{
if (!set)
{
v->type = T_VOID;
return 1;
}
switch (set->from.type)
{
case T_PAIR:
@ -412,7 +426,7 @@ clist_filter(struct linpool *pool, const struct adata *list, const struct f_val
if (nl == list->length)
return list;
struct adata *res = adata_empty(pool, nl);
struct adata *res = lp_alloc_adata(pool, nl);
memcpy(res->data, tmp, nl);
return res;
}
@ -446,7 +460,7 @@ eclist_filter(struct linpool *pool, const struct adata *list, const struct f_val
if (nl == list->length)
return list;
struct adata *res = adata_empty(pool, nl);
struct adata *res = lp_alloc_adata(pool, nl);
memcpy(res->data, tmp, nl);
return res;
}
@ -478,7 +492,7 @@ lclist_filter(struct linpool *pool, const struct adata *list, const struct f_val
if (nl == list->length)
return list;
struct adata *res = adata_empty(pool, nl);
struct adata *res = lp_alloc_adata(pool, nl);
memcpy(res->data, tmp, nl);
return res;
}
@ -526,6 +540,9 @@ val_in_range(const struct f_val *v1, const struct f_val *v2)
if (v2->type != T_SET)
return F_CMP_ERROR;
if (!v2->val.t)
return 0;
/* With integrated Quad<->IP implicit conversion */
if ((v1->type == v2->val.t->from.type) ||
((v1->type == T_QUAD) && val_is_ip4(&(v2->val.t->from)) && val_is_ip4(&(v2->val.t->to))))
@ -599,3 +616,75 @@ val_dump(const struct f_val *v) {
return val_dump_buffer;
}
struct f_val *
lp_val_copy(struct linpool *lp, const struct f_val *v)
{
switch (v->type)
{
case T_VOID:
case T_BOOL:
case T_INT:
case T_IP:
case T_PAIR:
case T_QUAD:
case T_EC:
case T_LC:
case T_RD:
case T_ENUM:
case T_PATH_MASK_ITEM:
/* These aren't embedded but there is no need to copy them */
case T_SET:
case T_PREFIX_SET:
case T_PATH_MASK:
case T_IFACE:
{
struct f_val *out = lp_alloc(lp, sizeof(*out));
*out = *v;
return out;
}
case T_NET:
{
struct {
struct f_val val;
net_addr net[0];
} *out = lp_alloc(lp, sizeof(*out) + v->val.net->length);
out->val = *v;
out->val.val.net = out->net;
net_copy(out->net, v->val.net);
return &out->val;
}
case T_STRING:
{
uint len = strlen(v->val.s);
struct {
struct f_val val;
char buf[0];
} *out = lp_alloc(lp, sizeof(*out) + len + 1);
out->val = *v;
out->val.val.s = out->buf;
memcpy(out->buf, v->val.s, len+1);
return &out->val;
}
case T_PATH:
case T_CLIST:
case T_ECLIST:
case T_LCLIST:
{
struct {
struct f_val val;
struct adata ad;
} *out = lp_alloc(lp, sizeof(*out) + v->val.ad->length);
out->val = *v;
out->val.val.ad = &out->ad;
memcpy(&out->ad, v->val.ad, v->val.ad->length);
return &out->val;
}
default:
bug("Unknown type in value copy: %d", v->type);
}
}

View File

@ -11,102 +11,30 @@
#define _BIRD_FILTER_DATA_H_
#include "nest/bird.h"
/* Type numbers must be in 0..0xff range */
#define T_MASK 0xff
/* Internal types */
enum f_type {
/* Nothing. Simply nothing. */
T_VOID = 0,
/* User visible types, which fit in int */
T_INT = 0x10,
T_BOOL = 0x11,
T_PAIR = 0x12, /* Notice that pair is stored as integer: first << 16 | second */
T_QUAD = 0x13,
/* Put enumerational types in 0x30..0x3f range */
T_ENUM_LO = 0x30,
T_ENUM_HI = 0x3f,
T_ENUM_RTS = 0x30,
T_ENUM_BGP_ORIGIN = 0x31,
T_ENUM_SCOPE = 0x32,
T_ENUM_RTC = 0x33,
T_ENUM_RTD = 0x34,
T_ENUM_ROA = 0x35,
T_ENUM_NETTYPE = 0x36,
T_ENUM_RA_PREFERENCE = 0x37,
T_ENUM_AF = 0x38,
/* new enums go here */
T_ENUM_EMPTY = 0x3f, /* Special hack for atomic_aggr */
#define T_ENUM T_ENUM_LO ... T_ENUM_HI
/* Bigger ones */
T_IP = 0x20,
T_NET = 0x21,
T_STRING = 0x22,
T_PATH_MASK = 0x23, /* mask for BGP path */
T_PATH = 0x24, /* BGP path */
T_CLIST = 0x25, /* Community list */
T_EC = 0x26, /* Extended community value, u64 */
T_ECLIST = 0x27, /* Extended community list */
T_LC = 0x28, /* Large community value, lcomm */
T_LCLIST = 0x29, /* Large community list */
T_RD = 0x2a, /* Route distinguisher for VPN addresses */
T_PATH_MASK_ITEM = 0x2b, /* Path mask item for path mask constructors */
T_SET = 0x80,
T_PREFIX_SET = 0x81,
} PACKED;
#include "lib/type.h"
/* Filter value; size of this affects filter memory consumption */
struct f_val {
enum f_type type; /* T_* */
union {
uint i;
u64 ec;
lcomm lc;
ip_addr ip;
const net_addr *net;
const char *s;
const struct f_tree *t;
const struct f_trie *ti;
const struct adata *ad;
const struct f_path_mask *path_mask;
struct f_path_mask_item pmi;
} val;
btype type; /* T_* */
union bval_long val;
};
/* Dynamic attribute definition (eattrs) */
struct f_dynamic_attr {
u8 type; /* EA type (EAF_*) */
u8 bit; /* For bitfield accessors */
enum f_type f_type; /* Filter type */
uint ea_code; /* EA code */
};
#define fputip(a) ({ ip_addr *ax = falloc(sizeof(*ax)); *ax = (a); ax; })
enum f_sa_code {
SA_FROM = 1,
SA_GW,
SA_GW = 1,
SA_NET,
SA_PROTO,
SA_SOURCE,
SA_SCOPE,
SA_DEST,
SA_IFNAME,
SA_IFINDEX,
SA_WEIGHT,
SA_PREF,
SA_GW_MPLS,
} PACKED;
/* Static attribute definition (members of struct rta) */
struct f_static_attr {
enum f_type f_type; /* Filter type */
btype type; /* Data type */
enum f_sa_code sa_code; /* Static attribute id */
int readonly:1; /* Don't allow writing */
};
@ -114,7 +42,6 @@ struct f_static_attr {
/* Filter l-value type */
enum f_lval_type {
F_LVAL_VARIABLE,
F_LVAL_PREFERENCE,
F_LVAL_SA,
F_LVAL_EA,
};
@ -124,7 +51,7 @@ struct f_lval {
enum f_lval_type type;
union {
struct symbol *sym;
struct f_dynamic_attr da;
const struct ea_class *da;
struct f_static_attr sa;
};
};
@ -141,18 +68,23 @@ struct f_tree {
void *data;
};
#define TRIE_STEP 4
#define TRIE_STACK_LENGTH 33
struct f_trie_node4
{
ip4_addr addr, mask, accept;
uint plen;
struct f_trie_node4 *c[2];
u16 plen;
u16 local;
struct f_trie_node4 *c[1 << TRIE_STEP];
};
struct f_trie_node6
{
ip6_addr addr, mask, accept;
uint plen;
struct f_trie_node6 *c[2];
u16 plen;
u16 local;
struct f_trie_node6 *c[1 << TRIE_STEP];
};
struct f_trie_node
@ -169,9 +101,20 @@ struct f_trie
u8 zero;
s8 ipv4; /* -1 for undefined / empty */
u16 data_size; /* Additional data for each trie node */
u32 prefix_count; /* Works only for restricted tries (pxlen == l == h) */
struct f_trie_node root; /* Root trie node */
};
struct f_trie_walk_state
{
u8 ipv4;
u8 accept_length; /* Current inter-node prefix position */
u8 start_pos; /* Initial prefix position in stack[0] */
u8 local_pos; /* Current intra-node prefix position */
u8 stack_pos; /* Current node in stack below */
const struct f_trie_node *stack[TRIE_STACK_LENGTH];
};
struct f_tree *f_new_tree(void);
struct f_tree *build_tree(struct f_tree *);
const struct f_tree *find_tree(const struct f_tree *t, const struct f_val *val);
@ -182,12 +125,75 @@ void tree_walk(const struct f_tree *t, void (*hook)(const struct f_tree *, void
struct f_trie *f_new_trie(linpool *lp, uint data_size);
void *trie_add_prefix(struct f_trie *t, const net_addr *n, uint l, uint h);
int trie_match_net(const struct f_trie *t, const net_addr *n);
int trie_match_longest_ip4(const struct f_trie *t, const net_addr_ip4 *net, net_addr_ip4 *dst, ip4_addr *found0);
int trie_match_longest_ip6(const struct f_trie *t, const net_addr_ip6 *net, net_addr_ip6 *dst, ip6_addr *found0);
void trie_walk_init(struct f_trie_walk_state *s, const struct f_trie *t, const net_addr *from);
int trie_walk_next(struct f_trie_walk_state *s, net_addr *net);
int trie_same(const struct f_trie *t1, const struct f_trie *t2);
void trie_format(const struct f_trie *t, buffer *buf);
static inline int
trie_match_next_longest_ip4(net_addr_ip4 *n, ip4_addr *found)
{
while (n->pxlen)
{
n->pxlen--;
ip4_clrbit(&n->prefix, n->pxlen);
if (ip4_getbit(*found, n->pxlen))
return 1;
}
return 0;
}
static inline int
trie_match_next_longest_ip6(net_addr_ip6 *n, ip6_addr *found)
{
while (n->pxlen)
{
n->pxlen--;
ip6_clrbit(&n->prefix, n->pxlen);
if (ip6_getbit(*found, n->pxlen))
return 1;
}
return 0;
}
#define TRIE_WALK_TO_ROOT_IP4(trie, net, dst) ({ \
net_addr_ip4 dst; \
ip4_addr _found; \
for (int _n = trie_match_longest_ip4(trie, net, &dst, &_found); \
_n; \
_n = trie_match_next_longest_ip4(&dst, &_found))
#define TRIE_WALK_TO_ROOT_IP6(trie, net, dst) ({ \
net_addr_ip6 dst; \
ip6_addr _found; \
for (int _n = trie_match_longest_ip6(trie, net, &dst, &_found); \
_n; \
_n = trie_match_next_longest_ip6(&dst, &_found))
#define TRIE_WALK_TO_ROOT_END })
#define TRIE_WALK(trie, net, from) ({ \
net_addr net; \
struct f_trie_walk_state tws_; \
trie_walk_init(&tws_, trie, from); \
while (trie_walk_next(&tws_, &net))
#define TRIE_WALK_END })
#define F_CMP_ERROR 999
const char *f_type_name(enum f_type t);
const char *f_type_name(btype t);
enum btype f_type_element_type(btype t);
int val_same(const struct f_val *v1, const struct f_val *v2);
int val_compare(const struct f_val *v1, const struct f_val *v2);
@ -195,15 +201,19 @@ void val_format(const struct f_val *v, buffer *buf);
char *val_format_str(struct linpool *lp, const struct f_val *v);
const char *val_dump(const struct f_val *v);
struct f_val *lp_val_copy(struct linpool *lp, const struct f_val *v);
static inline int val_is_ip4(const struct f_val *v)
{ return (v->type == T_IP) && ipa_is_ip4(v->val.ip); }
int val_in_range(const struct f_val *v1, const struct f_val *v2);
int clist_set_type(const struct f_tree *set, struct f_val *v);
static inline int eclist_set_type(const struct f_tree *set)
{ return set->from.type == T_EC; }
{ return !set || set->from.type == T_EC; }
static inline int lclist_set_type(const struct f_tree *set)
{ return set->from.type == T_LC; }
{ return !set || set->from.type == T_LC; }
static inline int path_set_type(const struct f_tree *set)
{ return !set || set->from.type == T_INT; }
const struct adata *clist_filter(struct linpool *pool, const struct adata *list, const struct f_val *set, int pos);
const struct adata *eclist_filter(struct linpool *pool, const struct adata *list, const struct f_val *set, int pos);
@ -219,8 +229,18 @@ undef_value(struct f_val v)
(v.val.ad == &null_adata);
}
extern const struct f_val f_const_empty_path, f_const_empty_clist, f_const_empty_eclist, f_const_empty_lclist;
extern const struct f_val f_const_empty_path, f_const_empty_clist, f_const_empty_eclist, f_const_empty_lclist, f_const_empty_prefix_set;
static inline const struct f_val *f_get_empty(btype t)
{
switch (t) {
case T_PATH: return &f_const_empty_path;
case T_CLIST: return &f_const_empty_clist;
case T_ECLIST: return &f_const_empty_eclist;
case T_LCLIST: return &f_const_empty_lclist;
default: return NULL;
}
}
enum filter_return f_eval(const struct f_line *expr, struct linpool *tmp_pool, struct f_val *pres);
enum filter_return f_eval(const struct f_line *expr, struct f_val *pres);
#endif

View File

@ -94,7 +94,7 @@ FID_DUMP_BODY()m4_dnl
debug("%s" $4 "\n", INDENT, $5);
]])
FID_INTERPRET_EXEC()m4_dnl
const $1 $2 = whati->$2
$1 $2 = whati->$2
FID_INTERPRET_BODY')
# Instruction arguments are needed only until linearization is done.
@ -191,6 +191,12 @@ if (f$1->type && f$2->type && (f$1->type != f$2->type) &&
cf_error("Arguments $1 and $2 of %s must be of the same type", f_instruction_name(what->fi_code));
FID_INTERPRET_BODY()')
m4_define(ARG_PREFER_SAME_TYPE, `
FID_NEW_BODY()m4_dnl
if (f$1->type && f$2->type && (f$1->type != f$2->type))
(void) (f_const_promotion(f$2, f$1->type) || f_const_promotion(f$1, f$2->type));
FID_INTERPRET_BODY()')
# Executing another filter line. This replaces the recursion
# that was needed in the former implementation.
m4_define(LINEX, `FID_INTERPRET_EXEC()LINEX_($1)FID_INTERPRET_NEW()return $1 FID_INTERPRET_BODY()')
@ -216,7 +222,7 @@ whati->f$1 = f$1;
FID_DUMP_BODY()m4_dnl
f_dump_line(item->fl$1, indent + 1);
FID_LINEARIZE_BODY()m4_dnl
item->fl$1 = f_linearize(whati->f$1);
item->fl$1 = f_linearize(whati->f$1, $2);
FID_SAME_BODY()m4_dnl
if (!f_same(f1->fl$1, f2->fl$1)) return 0;
FID_ITERATE_BODY()m4_dnl
@ -244,9 +250,13 @@ m4_define(ERROR,
# This macro specifies result type and makes there are no conflicting definitions
m4_define(RESULT_TYPE,
`m4_ifdef([[INST_RESULT_TYPE]],
[[m4_ifelse(INST_RESULT_TYPE,$1,,[[ERROR([[Multiple type definitons]])]])]],
[[m4_ifelse(INST_RESULT_TYPE,$1,,[[ERROR([[Multiple type definitions in]] INST_NAME)]])]],
[[m4_define(INST_RESULT_TYPE,$1) RESULT_TYPE_($1)]])')
m4_define(RESULT_TYPE_CHECK,
`m4_ifelse(INST_OUTVAL,0,,
[[m4_ifdef([[INST_RESULT_TYPE]],,[[ERROR([[Missing type definition in]] INST_NAME)]])]])')
m4_define(RESULT_TYPE_, `
FID_NEW_BODY()m4_dnl
what->type = $1;
@ -256,7 +266,7 @@ FID_INTERPRET_BODY()')
m4_define(SYMBOL, `FID_MEMBER(struct symbol *, sym, [[strcmp(f1->sym->name, f2->sym->name) || (f1->sym->class != f2->sym->class)]], "symbol %s", item->sym->name)')
m4_define(RTC, `FID_MEMBER(struct rtable_config *, rtc, [[strcmp(f1->rtc->name, f2->rtc->name)]], "route table %s", item->rtc->name)')
m4_define(STATIC_ATTR, `FID_MEMBER(struct f_static_attr, sa, f1->sa.sa_code != f2->sa.sa_code,,)')
m4_define(DYNAMIC_ATTR, `FID_MEMBER(struct f_dynamic_attr, da, f1->da.ea_code != f2->da.ea_code,,)')
m4_define(DYNAMIC_ATTR, `FID_MEMBER(const struct ea_class *, da, f1->da != f2->da,,)')
m4_define(ACCESS_RTE, `FID_HIC(,[[do { if (!fs->rte) runtime("No route to access"); } while (0)]],NEVER_CONSTANT())')
# 2) Code wrapping
@ -300,6 +310,7 @@ m4_define(FID_ITERATE, `FID_ZONE(10, Iteration)')
# This macro does all the code wrapping. See inline comments.
m4_define(INST_FLUSH, `m4_ifdef([[INST_NAME]], [[
RESULT_TYPE_CHECK()m4_dnl Check for defined RESULT_TYPE()
FID_ENUM()m4_dnl Contents of enum fi_code { ... }
INST_NAME(),
FID_ENUM_STR()m4_dnl Contents of const char * indexed by enum fi_code
@ -375,6 +386,7 @@ case INST_NAME(): {
#undef whati
#undef item
dest->items[pos].fi_code = what->fi_code;
dest->items[pos].flags = what->flags;
dest->items[pos].lineno = what->lineno;
break;
}
@ -402,6 +414,7 @@ m4_define(INST, `m4_dnl This macro is called on beginning of each instruction
INST_FLUSH()m4_dnl First, old data is flushed
m4_define([[INST_NAME]], [[$1]])m4_dnl Then we store instruction name,
m4_define([[INST_INVAL]], [[$2]])m4_dnl instruction input value count,
m4_define([[INST_OUTVAL]], [[$3]])m4_dnl instruction output value count,
m4_undefine([[INST_NEVER_CONSTANT]])m4_dnl reset NEVER_CONSTANT trigger,
m4_undefine([[INST_RESULT_TYPE]])m4_dnl and reset RESULT_TYPE value.
FID_INTERPRET_BODY()m4_dnl By default, every code is interpreter code.
@ -490,7 +503,7 @@ fi_constant(struct f_inst *what, struct f_val val)
}
static int
f_const_promotion(struct f_inst *arg, enum f_type want)
f_const_promotion(struct f_inst *arg, btype want)
{
if (arg->fi_code != FI_CONSTANT)
return 0;
@ -505,6 +518,11 @@ f_const_promotion(struct f_inst *arg, enum f_type want)
return 1;
}
else if ((c->type == T_SET) && (!c->val.t) && (want == T_PREFIX_SET)) {
*c = f_const_empty_prefix_set;
return 1;
}
return 0;
}
@ -560,7 +578,7 @@ FID_WR_PUT(8)
}
struct f_line *
f_linearize_concat(const struct f_inst * const inst[], uint count)
f_linearize_concat(const struct f_inst * const inst[], uint count, uint results)
{
uint len = 0;
for (uint i=0; i<count; i++)
@ -572,6 +590,8 @@ f_linearize_concat(const struct f_inst * const inst[], uint count)
for (uint i=0; i<count; i++)
out->len = linearize(out, inst[i], out->len);
out->results = results;
#ifdef LOCAL_DEBUG
f_dump_line(out, 0);
#endif
@ -640,7 +660,8 @@ FID_WR_PUT(4)m4_dnl
struct f_inst {
struct f_inst *next; /* Next instruction */
enum f_instruction_code fi_code; /* Instruction code */
enum f_type type; /* Type of returned value, if known */
enum f_instruction_flags flags; /* Flags, instruction-specific */
btype type; /* Type of returned value, if known */
int size; /* How many instructions are underneath */
int lineno; /* Line number */
union {

View File

@ -62,8 +62,9 @@
* m4_dnl INST(FI_NOP, in, out) { enum value, input args, output args
* m4_dnl ARG(num, type); argument, its id (in data fields) and type accessible by v1, v2, v3
* m4_dnl ARG_ANY(num); argument with no type check accessible by v1, v2, v3
* m4_dnl ARG_TYPE(num, type); just declare the type of argument
* m4_dnl VARARG; variable-length argument list; accessible by vv(i) and whati->varcount
* m4_dnl LINE(num, unused); this argument has to be converted to its own f_line
* m4_dnl LINE(num, out); this argument has to be converted to its own f_line
* m4_dnl SYMBOL; symbol handed from config
* m4_dnl STATIC_ATTR; static attribute definition
* m4_dnl DYNAMIC_ATTR; dynamic attribute definition
@ -80,10 +81,17 @@
* m4_dnl )
*
* m4_dnl RESULT(type, union-field, value); putting this on value stack
* m4_dnl RESULT_(type, union-field, value); like RESULT(), but do not declare the type
* m4_dnl RESULT_VAL(value-struct); pass the struct f_val directly
* m4_dnl RESULT_TYPE(type); just declare the type of result value
* m4_dnl RESULT_VOID; return undef
* m4_dnl }
*
* Note that runtime arguments m4_dnl (ARG*, VARARG) must be defined before
* parse-time arguments m4_dnl (LINE, SYMBOL, ...). During linearization,
* first ones move position in f_line by linearizing arguments first, while
* second ones store data to the current position.
*
* Also note that the { ... } blocks are not respected by M4 at all.
* If you get weird unmatched-brace-pair errors, check what it generated and why.
* What is really considered as one instruction is not the { ... } block
@ -91,6 +99,24 @@
*
* Other code is just copied into the interpreter part.
*
* The filter language uses a simple type system, where values have types
* (constants T_*) and also terms (instructions) are statically typed. Our
* static typing is partial (some terms do not declare types of arguments
* or results), therefore it can detect most but not all type errors and
* therefore we still have runtime type checks.
*
* m4_dnl Types of arguments are declared by macros ARG() and ARG_TYPE(),
* m4_dnl types of results are declared by RESULT() and RESULT_TYPE().
* m4_dnl Macros ARG_ANY(), RESULT_() and RESULT_VAL() do not declare types
* m4_dnl themselves, but can be combined with ARG_TYPE() / RESULT_TYPE().
*
* m4_dnl Note that types should be declared only once. If there are
* m4_dnl multiple RESULT() macros in an instruction definition, they must
* m4_dnl use the exact same expression for type, or they should be replaced
* m4_dnl by multiple RESULT_() macros and a common RESULT_TYPE() macro.
* m4_dnl See e.g. FI_EA_GET or FI_MIN instructions.
*
*
* If you are satisfied with this, you don't need to read the following
* detailed description of what is really done with the instruction definitions.
*
@ -212,10 +238,40 @@
* m4_dnl NEVER_CONSTANT-> don't generate pre-interpretation code at all
* m4_dnl ACCESS_RTE -> check that route is available, also NEVER_CONSTANT
* m4_dnl ACCESS_EATTRS -> pre-cache the eattrs; use only with ACCESS_RTE
* m4_dnl f_rta_cow(fs) -> function to call before any change to route should be done
*
* m4_dnl If you are stymied, see FI_CALL or FI_CONSTANT or just search for
* m4_dnl the mentioned macros in this file to see what is happening there in wild.
*
*
* A note about soundness of the type system:
*
* A type system is sound when types of expressions are consistent with
* types of values resulting from evaluation of such expressions. Untyped
* expressions are ok, but badly typed expressions are not sound. So is
* the type system of BIRD filtering code sound? There are some points:
*
* All cases of (one) m4_dnl RESULT() macro are obviously ok, as the macro
* both declares a type and returns a value. One have to check instructions
* that use m4_dnl RESULT_TYPE() macro. There are two issues:
*
* FI_AND, FI_OR - second argument is statically checked to be T_BOOL and
* passed as result without dynamic typecheck, declared to be T_BOOL. If
* an untyped non-bool expression is used as a second argument, then
* the mismatched type is returned.
*
* FI_VAR_GET - soundness depends on consistency of declared symbol types
* and stored values. This is maintained when values are stored by
* FI_VAR_SET, but when they are stored by FI_CALL, only static checking is
* used, so when an untyped expression returning mismatched value is used
* as a function argument, then inconsistent value is stored and subsequent
* FI_VAR_GET would be unsound.
*
* Both of these issues are inconsequential, as mismatched values from
* unsound expressions will be caught by dynamic typechecks like mismatched
* values from untyped expressions.
*
* Also note that FI_CALL is the only expression without properly declared
* result type.
*/
/* Binary operators */
@ -240,13 +296,23 @@
if (v2.val.i == 0) runtime( "Mother told me not to divide by 0" );
RESULT(T_INT, i, v1.val.i / v2.val.i);
}
INST(FI_BITOR, 2, 1) {
ARG(1,T_INT);
ARG(2,T_INT);
RESULT(T_INT, i, v1.val.i | v2.val.i);
}
INST(FI_BITAND, 2, 1) {
ARG(1,T_INT);
ARG(2,T_INT);
RESULT(T_INT, i, v1.val.i & v2.val.i);
}
INST(FI_AND, 1, 1) {
ARG(1,T_BOOL);
ARG_TYPE_STATIC(2,T_BOOL);
RESULT_TYPE(T_BOOL);
if (v1.val.i)
LINE(2,0);
LINE(2,1);
else
RESULT_VAL(v1);
}
@ -256,7 +322,7 @@
RESULT_TYPE(T_BOOL);
if (!v1.val.i)
LINE(2,0);
LINE(2,1);
else
RESULT_VAL(v1);
}
@ -349,7 +415,7 @@
break;
case T_SET:
if (vv(i).val.t->from.type != T_INT)
if (!path_set_type(vv(i).val.t))
runtime("Only integer sets allowed in path mask");
pm->item[i] = (struct f_path_mask_item) {
@ -371,12 +437,14 @@
INST(FI_NEQ, 2, 1) {
ARG_ANY(1);
ARG_ANY(2);
ARG_PREFER_SAME_TYPE(1, 2);
RESULT(T_BOOL, i, !val_same(&v1, &v2));
}
INST(FI_EQ, 2, 1) {
ARG_ANY(1);
ARG_ANY(2);
ARG_PREFER_SAME_TYPE(1, 2);
RESULT(T_BOOL, i, val_same(&v1, &v2));
}
@ -447,6 +515,18 @@
RESULT(T_BOOL, i, ipa_is_ip4(v1.val.ip));
}
INST(FI_VAR_INIT, 1, 0) {
NEVER_CONSTANT;
ARG_ANY(1);
SYMBOL;
ARG_TYPE(1, sym->class & 0xff);
/* New variable is always the last on stack */
uint pos = curline.vbase + sym->offset;
fstk->vstk[pos] = v1;
fstk->vcnt = pos + 1;
}
/* Set to indirect value prepared in v1 */
INST(FI_VAR_SET, 1, 0) {
NEVER_CONSTANT;
@ -477,12 +557,100 @@
RESULT_VAL(val);
}
INST(FI_FOR_INIT, 1, 0) {
NEVER_CONSTANT;
ARG_ANY(1);
SYMBOL;
FID_NEW_BODY()
ASSERT((sym->class & ~0xff) == SYM_VARIABLE);
/* Static type check */
if (f1->type)
{
enum btype t_var = (sym->class & 0xff);
enum btype t_arg = f_type_element_type(f1->type);
if (!t_arg)
cf_error("Value of expression in FOR must be iterable, got %s",
f_type_name(f1->type));
if (t_var != t_arg)
cf_error("Loop variable '%s' in FOR must be %s, is %s",
sym->name, f_type_name(t_arg), f_type_name(t_var));
}
FID_INTERPRET_BODY()
/* Dynamic type check */
if ((sym->class & 0xff) != f_type_element_type(v1.type))
runtime("Mismatched argument and variable type");
/* Setup the index */
v2 = (struct f_val) { .type = T_INT, .val.i = 0 };
/* Keep v1 and v2 on the stack */
fstk->vcnt += 2;
}
INST(FI_FOR_NEXT, 2, 0) {
NEVER_CONSTANT;
SYMBOL;
/* Type checks are done in FI_FOR_INIT */
/* Loop variable */
struct f_val *var = &fstk->vstk[curline.vbase + sym->offset];
int step = 0;
switch(v1.type)
{
case T_PATH:
var->type = T_INT;
step = as_path_walk(v1.val.ad, &v2.val.i, &var->val.i);
break;
case T_CLIST:
var->type = T_PAIR;
step = int_set_walk(v1.val.ad, &v2.val.i, &var->val.i);
break;
case T_ECLIST:
var->type = T_EC;
step = ec_set_walk(v1.val.ad, &v2.val.i, &var->val.ec);
break;
case T_LCLIST:
var->type = T_LC;
step = lc_set_walk(v1.val.ad, &v2.val.i, &var->val.lc);
break;
default:
runtime( "Clist or lclist expected" );
}
if (step)
{
/* Keep v1 and v2 on the stack */
fstk->vcnt += 2;
/* Repeat this instruction */
curline.pos--;
/* Execute the loop body */
LINE(1, 0);
/* Space for loop variable, may be unused */
fstk->vcnt += 1;
}
else
var->type = T_VOID;
}
INST(FI_CONDITION, 1, 0) {
ARG(1, T_BOOL);
if (v1.val.i)
LINE(2,0);
else
LINE(3,1);
LINE(3,0);
}
INST(FI_PRINT, 0, 0) {
@ -519,78 +687,98 @@
{
STATIC_ATTR;
ACCESS_RTE;
struct rta *rta = fs->rte->attrs;
ACCESS_EATTRS;
switch (sa.sa_code)
{
case SA_FROM: RESULT(sa.f_type, ip, rta->from); break;
case SA_GW: RESULT(sa.f_type, ip, rta->nh.gw); break;
case SA_NET: RESULT(sa.f_type, net, fs->rte->net); break;
case SA_PROTO: RESULT(sa.f_type, s, fs->rte->src->proto->name); break;
case SA_SOURCE: RESULT(sa.f_type, i, rta->source); break;
case SA_SCOPE: RESULT(sa.f_type, i, rta->scope); break;
case SA_DEST: RESULT(sa.f_type, i, rta->dest); break;
case SA_IFNAME: RESULT(sa.f_type, s, rta->nh.iface ? rta->nh.iface->name : ""); break;
case SA_IFINDEX: RESULT(sa.f_type, i, rta->nh.iface ? rta->nh.iface->index : 0); break;
case SA_WEIGHT: RESULT(sa.f_type, i, rta->nh.weight + 1); break;
case SA_PREF: RESULT(sa.f_type, i, rta->pref); break;
case SA_GW_MPLS: RESULT(sa.f_type, i, rta->nh.labels ? rta->nh.label[0] : MPLS_NULL); break;
case SA_NET: RESULT(sa.type, net, fs->rte->net); break;
case SA_PROTO: RESULT(sa.type, s, fs->rte->src->proto->name); break;
default:
bug("Invalid static attribute access (%u/%u)", sa.f_type, sa.sa_code);
{
struct eattr *nhea = ea_find(*fs->eattrs, &ea_gen_nexthop);
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
struct nexthop *nh = nhad ? &nhad->nh : NULL;
switch (sa.sa_code)
{
case SA_DEST:
RESULT(sa.type, i, nhad ?
(NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest)
: RTD_NONE);
break;
case SA_GW:
RESULT(sa.type, ip, nh ? nh->gw : IPA_NONE);
break;
case SA_IFNAME:
RESULT(sa.type, s, (nh && nh->iface) ? nh->iface->name : "");
break;
case SA_IFINDEX:
RESULT(sa.type, i, (nh && nh->iface) ? nh->iface->index : 0);
break;
case SA_WEIGHT:
RESULT(sa.type, i, (nh ? nh->weight : 0) + 1);
break;
case SA_GW_MPLS:
RESULT(sa.type, i, (nh && nh->labels) ? nh->label[0] : MPLS_NULL);
break;
default:
bug("Invalid static attribute access (%u/%u)", sa.type, sa.sa_code);
}
}
}
}
}
INST(FI_RTA_SET, 1, 0) {
ACCESS_RTE;
ACCESS_EATTRS;
ARG_ANY(1);
STATIC_ATTR;
ARG_TYPE(1, sa.f_type);
f_rta_cow(fs);
ARG_TYPE(1, sa.type);
{
struct rta *rta = fs->rte->attrs;
union {
struct nexthop_adata nha;
struct {
struct adata ad;
struct nexthop nh;
u32 label;
};
} nha;
nha.ad = (struct adata) {
.length = sizeof (struct nexthop_adata) - sizeof (struct adata),
};
eattr *a = NULL;
switch (sa.sa_code)
{
case SA_FROM:
rta->from = v1.val.ip;
break;
case SA_GW:
{
ip_addr ip = v1.val.ip;
struct iface *ifa = ipa_is_link_local(ip) ? rta->nh.iface : NULL;
neighbor *n = neigh_find(fs->rte->src->proto, ip, ifa, 0);
if (!n || (n->scope == SCOPE_HOST))
runtime( "Invalid gw address" );
rta->dest = RTD_UNICAST;
rta->nh.gw = ip;
rta->nh.iface = n->iface;
rta->nh.next = NULL;
rta->hostentry = NULL;
rta->nh.labels = 0;
}
break;
case SA_SCOPE:
rta->scope = v1.val.i;
break;
case SA_DEST:
{
int i = v1.val.i;
if ((i != RTD_BLACKHOLE) && (i != RTD_UNREACHABLE) && (i != RTD_PROHIBIT))
runtime( "Destination can be changed only to blackhole, unreachable or prohibit" );
rta->dest = i;
rta->nh.gw = IPA_NONE;
rta->nh.iface = NULL;
rta->nh.next = NULL;
rta->hostentry = NULL;
rta->nh.labels = 0;
nha.nha.dest = i;
nha.ad.length = NEXTHOP_DEST_SIZE;
break;
}
case SA_GW:
{
struct eattr *nh_ea = ea_find(*fs->eattrs, &ea_gen_nexthop);
ip_addr ip = v1.val.ip;
struct iface *ifa = (ipa_is_link_local(ip) && nh_ea) ?
((struct nexthop_adata *) nh_ea->u.ptr)->nh.iface : NULL;
neighbor *n = neigh_find(fs->rte->src->proto, ip, ifa, 0);
if (!n || (n->scope == SCOPE_HOST))
runtime( "Invalid gw address" );
nha.nh = (struct nexthop) {
.gw = ip,
.iface = n->iface,
};
}
break;
@ -600,12 +788,9 @@
if (!ifa)
runtime( "Invalid iface name" );
rta->dest = RTD_UNICAST;
rta->nh.gw = IPA_NONE;
rta->nh.iface = ifa;
rta->nh.next = NULL;
rta->hostentry = NULL;
rta->nh.labels = 0;
nha.nh = (struct nexthop) {
.iface = ifa,
};
}
break;
@ -614,13 +799,20 @@
if (v1.val.i >= 0x100000)
runtime( "Invalid MPLS label" );
struct eattr *nh_ea = ea_find(*fs->eattrs, &ea_gen_nexthop);
if (!nh_ea)
runtime( "No nexthop to add a MPLS label to" );
nha.nh = ((struct nexthop_adata *) nh_ea->u.ptr)->nh;
if (v1.val.i != MPLS_NULL)
{
rta->nh.label[0] = v1.val.i;
rta->nh.labels = 1;
nha.nh.label[0] = v1.val.i;
nha.nh.labels = 1;
nha.ad.length = sizeof nha - sizeof (struct adata);
}
else
rta->nh.labels = 0;
nha.nh.labels = 0;
}
break;
@ -629,22 +821,36 @@
int i = v1.val.i;
if (i < 1 || i > 256)
runtime( "Setting weight value out of bounds" );
if (rta->dest != RTD_UNICAST)
struct eattr *nh_ea = ea_find(*fs->eattrs, &ea_gen_nexthop);
if (!nh_ea)
runtime( "No nexthop to set weight on" );
struct nexthop_adata *nhad = (struct nexthop_adata *) nh_ea->u.ptr;
if (!NEXTHOP_IS_REACHABLE(nhad))
runtime( "Setting weight needs regular nexthop " );
/* Set weight on all next hops */
for (struct nexthop *nh = &rta->nh; nh; nh = nh->next)
nh->weight = i - 1;
}
break;
struct nexthop_adata *nhax = (struct nexthop_adata *) tmp_copy_adata(&nhad->ad);
case SA_PREF:
rta->pref = v1.val.i;
/* Set weight on all next hops */
NEXTHOP_WALK(nh, nhax)
nh->weight = i - 1;
a = ea_set_attr(fs->eattrs,
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nhax->ad));
}
break;
default:
bug("Invalid static attribute access (%u/%u)", sa.f_type, sa.sa_code);
bug("Invalid static attribute access (%u/%u)", sa.type, sa.sa_code);
}
if (!a)
a = ea_set_attr(fs->eattrs,
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, tmp_copy_adata(&nha.ad)));
a->originated = 1;
a->fresh = 1;
}
}
@ -652,75 +858,31 @@
DYNAMIC_ATTR;
ACCESS_RTE;
ACCESS_EATTRS;
RESULT_TYPE(da.f_type);
RESULT_TYPE(da->type);
{
eattr *e = ea_find(*fs->eattrs, da.ea_code);
const struct f_val *empty;
const eattr *e = ea_find(*fs->eattrs, da->id);
if (!e) {
/* A special case: undefined as_path looks like empty as_path */
if (da.type == EAF_TYPE_AS_PATH) {
RESULT_(T_PATH, ad, &null_adata);
break;
}
if (e)
{
ASSERT_DIE(e->type == da->type);
/* The same special case for int_set */
if (da.type == EAF_TYPE_INT_SET) {
RESULT_(T_CLIST, ad, &null_adata);
break;
}
/* The same special case for ec_set */
if (da.type == EAF_TYPE_EC_SET) {
RESULT_(T_ECLIST, ad, &null_adata);
break;
}
/* The same special case for lc_set */
if (da.type == EAF_TYPE_LC_SET) {
RESULT_(T_LCLIST, ad, &null_adata);
break;
}
/* Undefined value */
RESULT_VOID;
break;
}
switch (e->type & EAF_TYPE_MASK) {
case EAF_TYPE_INT:
RESULT_(da.f_type, i, e->u.data);
break;
case EAF_TYPE_ROUTER_ID:
RESULT_(T_QUAD, i, e->u.data);
break;
case EAF_TYPE_OPAQUE:
RESULT_(T_ENUM_EMPTY, i, 0);
break;
case EAF_TYPE_IP_ADDRESS:
RESULT_(T_IP, ip, *((ip_addr *) e->u.ptr->data));
break;
case EAF_TYPE_AS_PATH:
RESULT_(T_PATH, ad, e->u.ptr);
break;
case EAF_TYPE_BITFIELD:
RESULT_(T_BOOL, i, !!(e->u.data & (1u << da.bit)));
break;
case EAF_TYPE_INT_SET:
RESULT_(T_CLIST, ad, e->u.ptr);
break;
case EAF_TYPE_EC_SET:
RESULT_(T_ECLIST, ad, e->u.ptr);
break;
case EAF_TYPE_LC_SET:
RESULT_(T_LCLIST, ad, e->u.ptr);
break;
case EAF_TYPE_UNDEF:
RESULT_VOID;
switch (e->type) {
case T_IP:
RESULT_(T_IP, ip, *((const ip_addr *) e->u.ptr->data));
break;
default:
bug("Unknown dynamic attribute type");
RESULT_VAL([[(struct f_val) {
.type = e->type,
.val.bval = e->u,
}]]);
}
}
else if (empty = f_get_empty(da->type))
RESULT_VAL(*empty);
else
RESULT_VOID;
}
}
INST(FI_EA_SET, 1, 0) {
@ -728,62 +890,32 @@
ACCESS_EATTRS;
ARG_ANY(1);
DYNAMIC_ATTR;
ARG_TYPE(1, da.f_type);
ARG_TYPE(1, da->type);
{
struct ea_list *l = lp_alloc(fs->pool, sizeof(struct ea_list) + sizeof(eattr));
struct eattr *a;
l->next = NULL;
l->flags = EALF_SORTED;
l->count = 1;
l->attrs[0].id = da.ea_code;
l->attrs[0].flags = 0;
l->attrs[0].type = da.type | EAF_ORIGINATED | EAF_FRESH;
if (da->type >= EAF_TYPE__MAX)
bug("Unsupported attribute type");
switch (da.type) {
case EAF_TYPE_INT:
case EAF_TYPE_ROUTER_ID:
l->attrs[0].u.data = v1.val.i;
break;
case EAF_TYPE_OPAQUE:
switch (da->type) {
case T_OPAQUE:
case T_IFACE:
runtime( "Setting opaque attribute is not allowed" );
break;
case EAF_TYPE_IP_ADDRESS:;
int len = sizeof(ip_addr);
struct adata *ad = lp_alloc(fs->pool, sizeof(struct adata) + len);
ad->length = len;
(* (ip_addr *) ad->data) = v1.val.ip;
l->attrs[0].u.ptr = ad;
break;
case EAF_TYPE_AS_PATH:
case EAF_TYPE_INT_SET:
case EAF_TYPE_EC_SET:
case EAF_TYPE_LC_SET:
l->attrs[0].u.ptr = v1.val.ad;
break;
case EAF_TYPE_BITFIELD:
{
/* First, we have to find the old value */
eattr *e = ea_find(*fs->eattrs, da.ea_code);
u32 data = e ? e->u.data : 0;
if (v1.val.i)
l->attrs[0].u.data = data | (1u << da.bit);
else
l->attrs[0].u.data = data & ~(1u << da.bit);
}
case T_IP:
a = ea_set_attr(fs->eattrs,
EA_LITERAL_STORE_ADATA(da, 0, &v1.val.ip, sizeof(ip_addr)));
break;
default:
bug("Unknown dynamic attribute type");
a = ea_set_attr(fs->eattrs,
EA_LITERAL_GENERIC(da->id, da->type, 0, .u = v1.val.bval));
break;
}
f_rta_cow(fs);
l->next = *fs->eattrs;
*fs->eattrs = l;
a->originated = 1;
a->fresh = 1;
}
}
@ -792,21 +924,20 @@
ACCESS_RTE;
ACCESS_EATTRS;
{
struct ea_list *l = lp_alloc(fs->pool, sizeof(struct ea_list) + sizeof(eattr));
l->next = NULL;
l->flags = EALF_SORTED;
l->count = 1;
l->attrs[0].id = da.ea_code;
l->attrs[0].flags = 0;
l->attrs[0].type = EAF_TYPE_UNDEF | EAF_ORIGINATED | EAF_FRESH;
l->attrs[0].u.data = 0;
f_rta_cow(fs);
l->next = *fs->eattrs;
*fs->eattrs = l;
ea_unset_attr(fs->eattrs, 1, da);
}
INST(FI_DEFAULT, 2, 1) {
ARG_ANY(1);
ARG_ANY(2);
RESULT_TYPE(f_type_element_type(v2.type));
log(L_INFO "Type of arg 1 is: %d", v1.type);
if (v1.type == T_VOID)
RESULT_VAL(v2);
else
RESULT_VAL(v1);
}
INST(FI_LENGTH, 1, 1) { /* Get length of */
@ -901,14 +1032,31 @@
((net_addr_roa6 *) v1.val.net)->max_pxlen);
}
INST(FI_ROA_ASN, 1, 1) { /* Get ROA ASN */
ARG(1, T_NET);
INST(FI_ASN, 1, 1) { /* Get ROA ASN or community ASN part */
ARG_ANY(1);
RESULT_TYPE(T_INT);
switch(v1.type)
{
case T_NET:
if (!net_is_roa(v1.val.net))
runtime( "ROA expected" );
RESULT(T_INT, i, (v1.val.net->type == NET_ROA4) ?
RESULT_(T_INT, i, (v1.val.net->type == NET_ROA4) ?
((net_addr_roa4 *) v1.val.net)->asn :
((net_addr_roa6 *) v1.val.net)->asn);
break;
case T_PAIR:
RESULT_(T_INT, i, v1.val.i >> 16);
break;
case T_LC:
RESULT_(T_INT, i, v1.val.lc.asn);
break;
default:
runtime( "Net, pair or lc expected" );
}
}
INST(FI_IP, 1, 1) { /* Convert prefix to ... */
@ -942,7 +1090,90 @@
RESULT(T_INT, i, as_path_get_last_nonaggregated(v1.val.ad));
}
INST(FI_RETURN, 1, 1) {
INST(FI_PAIR_DATA, 1, 1) { /* Get data part from the standard community */
ARG(1, T_PAIR);
RESULT(T_INT, i, v1.val.i & 0xFFFF);
}
INST(FI_LC_DATA1, 1, 1) { /* Get data1 part from the large community */
ARG(1, T_LC);
RESULT(T_INT, i, v1.val.lc.ldp1);
}
INST(FI_LC_DATA2, 1, 1) { /* Get data2 part from the large community */
ARG(1, T_LC);
RESULT(T_INT, i, v1.val.lc.ldp2);
}
INST(FI_MIN, 1, 1) { /* Get minimum element from list */
ARG_ANY(1);
RESULT_TYPE(f_type_element_type(v1.type));
switch(v1.type)
{
case T_CLIST:
{
u32 val = 0;
int_set_min(v1.val.ad, &val);
RESULT_(T_PAIR, i, val);
}
break;
case T_ECLIST:
{
u64 val = 0;
ec_set_min(v1.val.ad, &val);
RESULT_(T_EC, ec, val);
}
break;
case T_LCLIST:
{
lcomm val = { 0, 0, 0 };
lc_set_min(v1.val.ad, &val);
RESULT_(T_LC, lc, val);
}
break;
default:
runtime( "Clist or lclist expected" );
}
}
INST(FI_MAX, 1, 1) { /* Get maximum element from list */
ARG_ANY(1);
RESULT_TYPE(f_type_element_type(v1.type));
switch(v1.type)
{
case T_CLIST:
{
u32 val = 0;
int_set_max(v1.val.ad, &val);
RESULT_(T_PAIR, i, val);
}
break;
case T_ECLIST:
{
u64 val = 0;
ec_set_max(v1.val.ad, &val);
RESULT_(T_EC, ec, val);
}
break;
case T_LCLIST:
{
lcomm val = { 0, 0, 0 };
lc_set_max(v1.val.ad, &val);
RESULT_(T_LC, lc, val);
}
break;
default:
runtime( "Clist or lclist expected" );
}
}
INST(FI_RETURN, 1, 0) {
NEVER_CONSTANT;
/* Acquire the return value */
ARG_ANY(1);
@ -970,28 +1201,59 @@
INST(FI_CALL, 0, 1) {
NEVER_CONSTANT;
VARARG;
SYMBOL;
/* Fake result type declaration */
RESULT_TYPE(T_VOID);
FID_NEW_BODY()
ASSERT(sym->class == SYM_FUNCTION);
if (whati->varcount != sym->function->args)
cf_error("Function '%s' expects %u arguments, got %u arguments",
sym->name, sym->function->args, whati->varcount);
/* Typecheck individual arguments */
struct f_inst *a = fvar;
struct f_arg *b = sym->function->arg_list;
for (uint i = 1; a && b; a = a->next, b = b->next, i++)
{
enum btype b_type = b->arg->class & 0xff;
if (a->type && (a->type != b_type) && !f_const_promotion(a, b_type))
cf_error("Argument %u of '%s' must be %s, got %s",
i, sym->name, f_type_name(b_type), f_type_name(a->type));
}
ASSERT(!a && !b);
/* Add implicit void slot for the return value */
struct f_inst *tmp = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_VOID });
tmp->next = whati->fvar;
whati->fvar = tmp;
what->size += tmp->size;
/* Mark recursive calls, they have dummy f_line */
if (!sym->function->len)
what->flags |= FIF_RECURSIVE;
FID_SAME_BODY()
if (!(f1->sym->flags & SYM_FLAG_SAME))
if (!(f1->sym->flags & SYM_FLAG_SAME) && !(f1_->flags & FIF_RECURSIVE))
return 0;
FID_ITERATE_BODY()
if (!(what->flags & FIF_RECURSIVE))
BUFFER_PUSH(fit->lines) = whati->sym->function;
FID_INTERPRET_BODY()
/* Push the body on stack */
LINEX(sym->function);
curline.vbase = curline.ventry;
curline.emask |= FE_RETURN;
/* Before this instruction was called, there was the T_VOID
* automatic return value pushed on value stack and also
* sym->function->args function arguments. Setting the
* vbase to point to first argument. */
ASSERT(curline.ventry >= sym->function->args);
curline.ventry -= sym->function->args;
curline.vbase = curline.ventry;
/* Arguments on stack */
fstk->vcnt += sym->function->args;
/* Storage for local variables */
f_vcnt_check_overflow(sym->function->vars);
@ -1105,17 +1367,10 @@
if (v1.type == T_PATH)
{
const struct f_tree *set = NULL;
u32 key = 0;
if (v2.type == T_INT)
key = v2.val.i;
else if ((v2.type == T_SET) && (v2.val.t->from.type == T_INT))
set = v2.val.t;
if ((v2.type == T_SET) && path_set_type(v2.val.t) || (v2.type == T_INT))
RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, &v2, 0) ]]);
else
runtime("Can't delete non-integer (set)");
RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, set, key, 0) ]]);
}
else if (v1.type == T_CLIST)
@ -1167,10 +1422,8 @@
if (v1.type == T_PATH)
{
u32 key = 0;
if ((v2.type == T_SET) && (v2.val.t->from.type == T_INT))
RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, v2.val.t, key, 1) ]]);
if ((v2.type == T_SET) && path_set_type(v2.val.t))
RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, &v2, 1) ]]);
else
runtime("Can't filter integer");
}
@ -1208,37 +1461,7 @@
runtime("Can't filter non-[e|l]clist");
}
INST(FI_ROA_CHECK_IMPLICIT, 0, 1) { /* ROA Check */
NEVER_CONSTANT;
RTC(1);
struct rtable *table = rtc->table;
ACCESS_RTE;
ACCESS_EATTRS;
const net_addr *net = fs->rte->net;
/* We ignore temporary attributes, probably not a problem here */
/* 0x02 is a value of BA_AS_PATH, we don't want to include BGP headers */
eattr *e = ea_find(*fs->eattrs, EA_CODE(PROTOCOL_BGP, 0x02));
if (!e || ((e->type & EAF_TYPE_MASK) != EAF_TYPE_AS_PATH))
runtime("Missing AS_PATH attribute");
u32 as = 0;
as_path_get_last(e->u.ptr, &as);
if (!table)
runtime("Missing ROA table");
if (table->addr_type != NET_ROA4 && table->addr_type != NET_ROA6)
runtime("Table type must be either ROA4 or ROA6");
if (table->addr_type != (net->type == NET_IP4 ? NET_ROA4 : NET_ROA6))
RESULT(T_ENUM_ROA, i, ROA_UNKNOWN); /* Prefix and table type mismatch */
else
RESULT(T_ENUM_ROA, i, [[ net_roa_check(table, net, as) ]]);
}
INST(FI_ROA_CHECK_EXPLICIT, 2, 1) { /* ROA Check */
INST(FI_ROA_CHECK, 2, 1) { /* ROA Check */
NEVER_CONSTANT;
ARG(1, T_NET);
ARG(2, T_INT);
@ -1260,7 +1483,7 @@
}
INST(FI_FORMAT, 1, 0) { /* Format */
INST(FI_FORMAT, 1, 1) { /* Format */
ARG_ANY(1);
RESULT(T_STRING, s, val_format_str(fpool, &v1));
}

View File

@ -22,7 +22,7 @@
/* Flags for instructions */
enum f_instruction_flags {
FIF_PRINTED = 1, /* FI_PRINT_AND_DIE: message put in buffer */
FIF_RECURSIVE = 1, /* FI_CALL: function is directly recursive */
} PACKED;
/* Include generated filter instruction declarations */
@ -35,19 +35,26 @@ const char *f_instruction_name_(enum f_instruction_code fi);
static inline const char *f_instruction_name(enum f_instruction_code fi)
{ return f_instruction_name_(fi) + 3; }
struct f_arg {
struct symbol *arg;
struct f_arg *next;
};
/* Filter structures for execution */
/* Line of instructions to be unconditionally executed one after another */
struct f_line {
uint len; /* Line length */
u8 args; /* Function: Args required */
u8 vars;
u8 results; /* Results left on stack: cmd -> 0, term -> 1 */
struct f_arg *arg_list;
struct f_line_item items[0]; /* The items themselves */
};
/* Convert the f_inst infix tree to the f_line structures */
struct f_line *f_linearize_concat(const struct f_inst * const inst[], uint count);
static inline struct f_line *f_linearize(const struct f_inst *root)
{ return f_linearize_concat(&root, 1); }
struct f_line *f_linearize_concat(const struct f_inst * const inst[], uint count, uint results);
static inline struct f_line *f_linearize(const struct f_inst *root, uint results)
{ return f_linearize_concat(&root, 1, results); }
void f_dump_line(const struct f_line *, uint indent);
@ -87,15 +94,17 @@ void f_add_lines(const struct f_line_item *what, struct filter_iterator *fit);
struct filter *f_new_where(struct f_inst *);
static inline struct f_dynamic_attr f_new_dynamic_attr(u8 type, enum f_type f_type, uint code) /* Type as core knows it, type as filters know it, and code of dynamic attribute */
{ return (struct f_dynamic_attr) { .type = type, .f_type = f_type, .ea_code = code }; } /* f_type currently unused; will be handy for static type checking */
static inline struct f_dynamic_attr f_new_dynamic_attr_bit(u8 bit, enum f_type f_type, uint code) /* Type as core knows it, type as filters know it, and code of dynamic attribute */
{ return (struct f_dynamic_attr) { .type = EAF_TYPE_BITFIELD, .bit = bit, .f_type = f_type, .ea_code = code }; } /* f_type currently unused; will be handy for static type checking */
static inline struct f_static_attr f_new_static_attr(int f_type, int code, int readonly)
{ return (struct f_static_attr) { .f_type = f_type, .sa_code = code, .readonly = readonly }; }
struct f_inst *f_generate_complex(enum f_instruction_code fi_code, struct f_dynamic_attr da, struct f_inst *argument);
static inline struct f_static_attr f_new_static_attr(btype type, int code, int readonly)
{ return (struct f_static_attr) { .type = type, .sa_code = code, .readonly = readonly }; }
struct f_inst *f_generate_roa_check(struct rtable_config *table, struct f_inst *prefix, struct f_inst *asn);
struct f_attr_bit {
const struct ea_class *class;
uint bit;
};
#define f_new_dynamic_attr_bit(_bit, _name) ((struct f_attr_bit) { .bit = _bit, .class = ea_class_find(_name) })
/* Hook for call bt_assert() function in configuration */
extern void (*bt_assert_hook)(int result, const struct f_line_item *assert);

View File

@ -2,7 +2,7 @@
* Filters: utility functions
*
* Copyright 1998 Pavel Machek <pavel@ucw.cz>
* 2017 Jan Maria Matejka <mq@ucw.cz>
* 2017 Maria Matejka <mq@ucw.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@ -13,7 +13,7 @@
#include "filter/f-inst.h"
#include "lib/idm.h"
#include "nest/protocol.h"
#include "nest/route.h"
#include "nest/rt.h"
#define P(a,b) ((a<<8) | b)
@ -37,147 +37,6 @@ struct filter *f_new_where(struct f_inst *where)
f_new_inst(FI_DIE, F_REJECT));
struct filter *f = cfg_allocz(sizeof(struct filter));
f->root = f_linearize(cond);
f->root = f_linearize(cond, 0);
return f;
}
#define CA_KEY(n) n->name, n->fda.type
#define CA_NEXT(n) n->next
#define CA_EQ(na,ta,nb,tb) (!strcmp(na,nb) && (ta == tb))
#define CA_FN(n,t) (mem_hash(n, strlen(n)) ^ (t*0xaae99453U))
#define CA_ORDER 8 /* Fixed */
struct ca_storage {
struct ca_storage *next;
struct f_dynamic_attr fda;
u32 uc;
char name[0];
};
HASH(struct ca_storage) ca_hash;
static struct idm ca_idm;
static struct ca_storage **ca_storage;
static uint ca_storage_max;
static void
ca_free(resource *r)
{
struct custom_attribute *ca = (void *) r;
struct ca_storage *cas = HASH_FIND(ca_hash, CA, ca->name, ca->fda->type);
ASSERT(cas);
ca->name = NULL;
ca->fda = NULL;
if (!--cas->uc) {
uint id = EA_CUSTOM_ID(cas->fda.ea_code);
idm_free(&ca_idm, id);
HASH_REMOVE(ca_hash, CA, cas);
ca_storage[id] = NULL;
mb_free(cas);
}
}
static void
ca_dump(resource *r)
{
struct custom_attribute *ca = (void *) r;
debug("name \"%s\" id 0x%04x ea_type 0x%02x f_type 0x%02x\n",
ca->name, ca->fda->ea_code, ca->fda->type, ca->fda->f_type);
}
static struct resclass ca_class = {
.name = "Custom attribute",
.size = sizeof(struct custom_attribute),
.free = ca_free,
.dump = ca_dump,
.lookup = NULL,
.memsize = NULL,
};
struct custom_attribute *
ca_lookup(pool *p, const char *name, int f_type)
{
int ea_type;
switch (f_type) {
case T_INT:
ea_type = EAF_TYPE_INT;
break;
case T_IP:
ea_type = EAF_TYPE_IP_ADDRESS;
break;
case T_QUAD:
ea_type = EAF_TYPE_ROUTER_ID;
break;
case T_PATH:
ea_type = EAF_TYPE_AS_PATH;
break;
case T_CLIST:
ea_type = EAF_TYPE_INT_SET;
break;
case T_ECLIST:
ea_type = EAF_TYPE_EC_SET;
break;
case T_LCLIST:
ea_type = EAF_TYPE_LC_SET;
break;
default:
cf_error("Custom route attribute of unsupported type");
}
static int inited = 0;
if (!inited) {
idm_init(&ca_idm, &root_pool, 8);
HASH_INIT(ca_hash, &root_pool, CA_ORDER);
ca_storage_max = 256;
ca_storage = mb_allocz(&root_pool, sizeof(struct ca_storage *) * ca_storage_max);
inited++;
}
struct ca_storage *cas = HASH_FIND(ca_hash, CA, name, ea_type);
if (cas) {
cas->uc++;
} else {
uint id = idm_alloc(&ca_idm);
if (id >= EA_CUSTOM_BIT)
cf_error("Too many custom attributes.");
if (id >= ca_storage_max) {
ca_storage_max *= 2;
ca_storage = mb_realloc(ca_storage, sizeof(struct ca_storage *) * ca_storage_max * 2);
}
cas = mb_allocz(&root_pool, sizeof(struct ca_storage) + strlen(name) + 1);
cas->fda = f_new_dynamic_attr(ea_type, f_type, EA_CUSTOM(id));
cas->uc = 1;
strcpy(cas->name, name);
ca_storage[id] = cas;
HASH_INSERT(ca_hash, CA, cas);
}
struct custom_attribute *ca = ralloc(p, &ca_class);
ca->fda = &(cas->fda);
ca->name = cas->name;
return ca;
}
const char *
ea_custom_name(uint ea)
{
uint id = EA_CUSTOM_ID(ea);
if (id >= ca_storage_max)
return NULL;
if (!ca_storage[id])
return NULL;
return ca_storage[id]->name;
}

View File

@ -35,10 +35,10 @@
#include "lib/ip.h"
#include "lib/net.h"
#include "lib/flowspec.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/iface.h"
#include "nest/attrs.h"
#include "lib/attrs.h"
#include "conf/conf.h"
#include "filter/filter.h"
#include "filter/f-inst.h"
@ -79,9 +79,6 @@ struct filter_state {
/* Cached pointer to ea_list */
struct ea_list **eattrs;
/* Linpool for adata allocation */
struct linpool *pool;
/* Buffer for log output */
struct buffer buf;
@ -99,28 +96,7 @@ void (*bt_assert_hook)(int result, const struct f_line_item *assert);
static inline void f_cache_eattrs(struct filter_state *fs)
{
fs->eattrs = &(fs->rte->attrs->eattrs);
}
/*
* rta_cow - prepare rta for modification by filter
*/
static void
f_rta_cow(struct filter_state *fs)
{
if (!rta_is_cached(fs->rte->attrs))
return;
/*
* Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
* with fs->old_rta (they will be copied when the cached rta will be obtained
* at the end of f_run()), also the lock of hostentry is inherited (we
* suppose hostentry is not changed by filters).
*/
fs->rte->attrs = rta_do_cow(fs->rte->attrs, fs->pool);
/* Re-cache the ea_list */
f_cache_eattrs(fs);
fs->eattrs = &(fs->rte->attrs);
}
static struct tbf rl_runtime_err = TBF_DEFAULT_LOG_LIMITS;
@ -185,8 +161,8 @@ interpret(struct filter_state *fs, const struct f_line *line, struct f_val *val)
return F_ERROR; \
} while(0)
#define falloc(size) lp_alloc(fs->pool, size)
#define fpool fs->pool
#define falloc(size) tmp_alloc(size)
#define fpool tmp_linpool
#define ACCESS_EATTRS do { if (!fs->eattrs) f_cache_eattrs(fs); } while (0)
@ -203,8 +179,7 @@ interpret(struct filter_state *fs, const struct f_line *line, struct f_val *val)
}
/* End of current line. Drop local variables before exiting. */
fstk->vcnt -= curline.line->vars;
fstk->vcnt -= curline.line->args;
fstk->vcnt = curline.ventry + curline.line->results;
fstk->ecnt--;
}
@ -237,7 +212,7 @@ interpret(struct filter_state *fs, const struct f_line *line, struct f_val *val)
* tmp_pool, otherwise the filters may modify it.
*/
enum filter_return
f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, int flags)
f_run(const struct filter *filter, struct rte *rte, int flags)
{
if (filter == FILTER_ACCEPT)
return F_ACCEPT;
@ -250,7 +225,6 @@ f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, in
/* Initialize the filter state */
filter_state = (struct filter_state) {
.rte = rte,
.pool = tmp_pool,
.flags = flags,
};
@ -285,11 +259,10 @@ f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, in
*/
enum filter_return
f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool)
f_eval_rte(const struct f_line *expr, struct rte *rte)
{
filter_state = (struct filter_state) {
.rte = rte,
.pool = tmp_pool,
};
f_stack_init(filter_state);
@ -308,11 +281,9 @@ f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool)
* @pres: here the output will be stored
*/
enum filter_return
f_eval(const struct f_line *expr, struct linpool *tmp_pool, struct f_val *pres)
f_eval(const struct f_line *expr, struct f_val *pres)
{
filter_state = (struct filter_state) {
.pool = tmp_pool,
};
filter_state = (struct filter_state) {};
f_stack_init(filter_state);
@ -331,9 +302,7 @@ uint
f_eval_int(const struct f_line *expr)
{
/* Called independently in parse-time to eval expressions */
filter_state = (struct filter_state) {
.pool = cfg_mem,
};
filter_state = (struct filter_state) {};
f_stack_init(filter_state);
@ -354,10 +323,10 @@ f_eval_int(const struct f_line *expr)
* f_eval_buf - get a value of a term and print it to the supplied buffer
*/
enum filter_return
f_eval_buf(const struct f_line *expr, struct linpool *tmp_pool, buffer *buf)
f_eval_buf(const struct f_line *expr, buffer *buf)
{
struct f_val val;
enum filter_return fret = f_eval(expr, tmp_pool, &val);
enum filter_return fret = f_eval(expr, &val);
if (fret <= F_RETURN)
val_format(&val, buf);
return fret;

View File

@ -13,8 +13,8 @@
#include "lib/resource.h"
#include "lib/ip.h"
#include "lib/macro.h"
#include "nest/route.h"
#include "nest/attrs.h"
#include "nest/rt.h"
#include "lib/attrs.h"
/* Possible return values of filter execution */
enum filter_return {
@ -51,10 +51,10 @@ struct filter {
struct rte;
enum filter_return f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, int flags);
enum filter_return f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool);
enum filter_return f_run(const struct filter *filter, struct rte *rte, int flags);
enum filter_return f_eval_rte(const struct f_line *expr, struct rte *rte);
uint f_eval_int(const struct f_line *expr);
enum filter_return f_eval_buf(const struct f_line *expr, struct linpool *tmp_pool, buffer *buf);
enum filter_return f_eval_buf(const struct f_line *expr, buffer *buf);
const char *filter_name(const struct filter *filter);
int filter_same(const struct filter *new, const struct filter *old);
@ -70,13 +70,4 @@ void filters_dump_all(void);
#define FF_SILENT 2 /* Silent filter execution */
/* Custom route attributes */
struct custom_attribute {
resource r;
struct f_dynamic_attr *fda;
const char *name;
};
struct custom_attribute *ca_lookup(pool *p, const char *name, int ea_type);
#endif

View File

@ -46,9 +46,7 @@ run_function(const void *arg)
if (t->cmp)
return t->result == f_same(t->fn, t->cmp);
linpool *tmp = lp_new_default(&root_pool);
enum filter_return fret = f_eval(t->fn, tmp, NULL);
rfree(tmp);
enum filter_return fret = f_eval(t->fn, NULL);
return (fret < F_REJECT);
}

View File

@ -9,7 +9,109 @@ router id 62.168.0.1;
/* We have to setup any protocol */
protocol device { }
/* Setting some custom attributes, enough to force BIRD to reallocate the attribute idmap */
attribute int test_ca_int1;
attribute int test_ca_int2;
attribute int test_ca_int3;
attribute int test_ca_int4;
attribute int test_ca_int5;
attribute int test_ca_int6;
attribute int test_ca_int7;
attribute int test_ca_int8;
attribute int test_ca_int9;
attribute int test_ca_int10;
attribute ip test_ca_ip1;
attribute ip test_ca_ip2;
attribute ip test_ca_ip3;
attribute ip test_ca_ip4;
attribute ip test_ca_ip5;
attribute ip test_ca_ip6;
attribute ip test_ca_ip7;
attribute ip test_ca_ip8;
attribute ip test_ca_ip9;
attribute ip test_ca_ip10;
attribute quad test_ca_quad1;
attribute quad test_ca_quad2;
attribute quad test_ca_quad3;
attribute quad test_ca_quad4;
attribute quad test_ca_quad5;
attribute quad test_ca_quad6;
attribute quad test_ca_quad7;
attribute quad test_ca_quad8;
attribute quad test_ca_quad9;
attribute quad test_ca_quad10;
attribute bgppath test_ca_bgppath1;
attribute bgppath test_ca_bgppath2;
attribute bgppath test_ca_bgppath3;
attribute bgppath test_ca_bgppath4;
attribute bgppath test_ca_bgppath5;
attribute bgppath test_ca_bgppath6;
attribute bgppath test_ca_bgppath7;
attribute bgppath test_ca_bgppath8;
attribute bgppath test_ca_bgppath9;
attribute bgppath test_ca_bgppath10;
attribute clist test_ca_clist1;
attribute clist test_ca_clist2;
attribute clist test_ca_clist3;
attribute clist test_ca_clist4;
attribute clist test_ca_clist5;
attribute clist test_ca_clist6;
attribute clist test_ca_clist7;
attribute clist test_ca_clist8;
attribute clist test_ca_clist9;
attribute clist test_ca_clist10;
attribute eclist test_ca_eclist1;
attribute eclist test_ca_eclist2;
attribute eclist test_ca_eclist3;
attribute eclist test_ca_eclist4;
attribute eclist test_ca_eclist5;
attribute eclist test_ca_eclist6;
attribute eclist test_ca_eclist7;
attribute eclist test_ca_eclist8;
attribute eclist test_ca_eclist9;
attribute eclist test_ca_eclist10;
attribute lclist test_ca_lclist1;
attribute lclist test_ca_lclist2;
attribute lclist test_ca_lclist3;
attribute lclist test_ca_lclist4;
attribute lclist test_ca_lclist5;
attribute lclist test_ca_lclist6;
attribute lclist test_ca_lclist7;
attribute lclist test_ca_lclist8;
attribute lclist test_ca_lclist9;
attribute lclist test_ca_lclist10;
attribute lclist test_ca_lclist_max1;
attribute lclist test_ca_lclist_max2;
attribute lclist test_ca_lclist_max3;
attribute lclist test_ca_lclist_max4;
attribute lclist test_ca_lclist_max5;
attribute lclist test_ca_lclist_max6;
attribute lclist test_ca_lclist_max7;
attribute lclist test_ca_lclist_max8;
attribute lclist test_ca_lclist_max9;
attribute lclist test_ca_lclist_max10;
attribute lclist test_ca_lclist_max11;
attribute lclist test_ca_lclist_max12;
attribute lclist test_ca_lclist_max13;
attribute lclist test_ca_lclist_max14;
attribute lclist test_ca_lclist_max15;
attribute lclist test_ca_lclist_max16;
attribute lclist test_ca_lclist_max17;
attribute lclist test_ca_lclist_max18;
attribute lclist test_ca_lclist_max19;
attribute lclist test_ca_lclist_max20;
attribute lclist test_ca_lclist_max21;
/* Uncomment this to get an error */
#attribute int bgp_path;
/*
* Common definitions and functions
@ -44,9 +146,8 @@ bt_test_same(onef, twof, 0);
*/
function t_bool()
bool b;
{
b = true;
bool b = true;
bt_assert(b);
bt_assert(!!b);
@ -82,12 +183,11 @@ define xyzzy = (120+10);
define '1a-a1' = (xyzzy-100);
function t_int()
int i;
{
bt_assert(xyzzy = 130);
bt_assert('1a-a1' = 30);
i = four;
int i = four;
i = 12*100 + 60/2 + i;
i = (i + 0);
bt_assert(i = 1234);
@ -111,6 +211,14 @@ int i;
bt_assert(!(i = 4));
bt_assert(1 <= 1);
bt_assert(!(1234 < 1234));
bt_assert(10 - 5 = 5);
bt_assert(4294967295 + 1 = 0);
bt_assert(6*9=54);
bt_assert(984/41 = 24);
bt_assert(123/45 = 2);
bt_assert(0xfee1a | 0xbeef = 0xffeff);
bt_assert(0xfee1a & 0xbeef = 0xae0a);
}
bt_test_suite(t_int, "Testing integers");
@ -128,14 +236,19 @@ define is2 = [(17+2), 17, 15, 11, 8, 5, 3, 2];
define is3 = [5, 17, 2, 11, 8, 15, 3, 19];
function t_int_set()
int set is;
{
int set is = [];
bt_assert(is = []);
bt_assert(0 !~ is);
bt_assert(1 ~ [1,2,3]);
bt_assert(5 ~ [1..20]);
bt_assert(2 ~ [ 1, 2, 3 ]);
bt_assert(5 ~ [ 4 .. 7 ]);
bt_assert(1 !~ [ 2, 3, 4 ]);
bt_assert(999 !~ [ 666, 333 ]);
bt_assert(1 !~ []);
bt_assert(1 !~ is);
is = [ 2, 3, 4, 7..11 ];
bt_assert(10 ~ is);
@ -170,6 +283,7 @@ int set is;
bt_assert([1,4..10,20] = [1,4..10,20]);
bt_assert(format([ 1, 2, 1, 1, 1, 3, 4, 1, 1, 1, 5 ]) = "[1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5]");
bt_assert(format([]) = "[]");
}
bt_test_suite(t_int_set, "Testing sets of integers");
@ -183,9 +297,8 @@ bt_test_suite(t_int_set, "Testing sets of integers");
*/
function t_string()
string st;
{
st = "Hello";
string st = "Hello";
bt_assert(format(st) = "Hello");
bt_assert(st ~ "Hell*");
bt_assert(st ~ "?ello");
@ -210,9 +323,8 @@ function 'mkpair-a'(int a)
}
function t_pair()
pair pp;
{
pp = (1, 2);
pair pp = (1, 2);
bt_assert(format(pp) = "(1,2)");
bt_assert((1,2) = pp);
bt_assert((1,1+1) = pp);
@ -233,10 +345,11 @@ bt_test_suite(t_pair, "Testing pairs");
*/
function t_pair_set()
pair pp;
pair set ps;
{
pp = (1, 2);
pair pp = (1, 2);
pair set ps = [];
bt_assert(pp !~ ps);
ps = [(1,(one+one)), (3,4)..(4,8), (5,*), (6,3..6)];
bt_assert(format(ps) = "[(1,2), (3,4)..(4,8), (5,0)..(5,65535), (6,3)..(6,6)]");
bt_assert(pp ~ ps);
@ -253,6 +366,7 @@ pair set ps;
bt_assert((6,6+one) !~ ps);
bt_assert(((one+6),2) !~ ps);
bt_assert((1,1) !~ ps);
bt_assert(pp !~ []);
ps = [(20..150, 200..300), (50100..50200, 1000..50000), (*, 5+5)];
bt_assert((100,200) ~ ps);
@ -304,6 +418,7 @@ quad qq;
qq = 1.2.3.4;
bt_assert(qq ~ [1.2.3.4, 5.6.7.8]);
bt_assert(qq !~ [1.2.1.1, 1.2.3.5]);
bt_assert(qq !~ []);
}
bt_test_suite(t_quad_set, "Testing sets of quads");
@ -335,6 +450,26 @@ ip p;
p = 1234:5678::;
bt_assert(!p.is_v4);
bt_assert(p.mask(24) = 1234:5600::);
p = 1:2:3:4:5:6:7:8;
bt_assert(!p.is_v4);
bt_assert(format(p) = "1:2:3:4:5:6:7:8");
bt_assert(p.mask(64) = 1:2:3:4::);
p = 10:20:30:40:50:60:70:80;
bt_assert(!p.is_v4);
bt_assert(format(p) = "10:20:30:40:50:60:70:80");
bt_assert(p.mask(64) = 10:20:30:40::);
p = 1090:20a0:30b0:40c0:50d0:60e0:70f0:8000;
bt_assert(!p.is_v4);
bt_assert(format(p) = "1090:20a0:30b0:40c0:50d0:60e0:70f0:8000");
bt_assert(p.mask(64) = 1090:20a0:30b0:40c0::);
p = ::fffe:6:c0c:936d:88c7:35d3;
bt_assert(!p.is_v4);
bt_assert(format(p) = "::fffe:6:c0c:936d:88c7:35d3");
bt_assert(p.mask(64) = 0:0:fffe:6::);
}
bt_test_suite(t_ip, "Testing ip address");
@ -364,6 +499,7 @@ ip set ips;
bt_assert(1.2.3.4 !~ [ 1.2.3.3, 1.2.3.5 ]);
bt_assert(1.2.3.4 ~ [ 1.2.3.3..1.2.3.5 ]);
bt_assert(1.2.3.4 !~ []);
}
bt_test_suite(t_ip_set, "Testing sets of ip address");
@ -378,9 +514,9 @@ bt_test_suite(t_ip_set, "Testing sets of ip address");
function t_enum()
{
bt_assert(format(RTS_STATIC) = "(enum 30)1");
bt_assert(format(NET_IP4) = "(enum 36)1");
bt_assert(format(NET_VPN6) = "(enum 36)4");
bt_assert(format(RTS_STATIC) = "(enum 31)1");
bt_assert(format(NET_IP4) = "(enum 3b)1");
bt_assert(format(NET_VPN6) = "(enum 3b)4");
bt_assert(RTS_STATIC ~ [RTS_STATIC, RTS_DEVICE]);
bt_assert(RTS_BGP !~ [RTS_STATIC, RTS_DEVICE]);
@ -452,13 +588,34 @@ function test_pxset(prefix set pxs)
bt_assert(1.0.0.0/8 ~ [ 1.0.0.0/8+ ]);
bt_assert(1.0.0.0/9 !~ [ 1.0.0.0/8- ]);
bt_assert(1.2.0.0/17 !~ [ 1.0.0.0/8{ 15 , 16 } ]);
bt_assert(net10 !~ []);
bt_assert([ 10.0.0.0/8{ 15 , 17 } ] = [ 10.0.0.0/8{ 15 , 17 } ]);
}
function test_empty_pxset(prefix set pxs)
int set s0;
prefix set s1;
{
s0 = [];
s1 = [];
bt_assert(pxs != s0);
bt_assert(pxs = s1);
bt_assert(pxs = []);
}
function t_prefix_set()
prefix set pxs;
{
pxs = [];
bt_assert(format(pxs) = "[]");
bt_assert(pxs = []);
bt_assert(1.2.0.0/16 !~ []);
bt_assert(1.2.0.0/16 !~ pxs);
test_empty_pxset([]);
test_empty_pxset(pxs);
pxs = [ 1.2.0.0/16, 1.4.0.0/16+, 44.66.88.64/30{24,28}, 12.34.56.0/24{8,16} ];
bt_assert(format(pxs) = "[1.2.0.0/16{0.1.0.0}, 1.4.0.0/16{0.1.255.255}, 12.34.0.0/16{1.255.0.0}, 44.66.88.64/28{0.0.1.240}]");
@ -478,6 +635,33 @@ prefix set pxs;
bt_assert(1.2.0.0/16 ~ [ 1.0.0.0/8{ 15 , 17 } ]);
bt_assert([ 10.0.0.0/8{ 15 , 17 } ] != [ 11.0.0.0/8{ 15 , 17 } ]);
/* Formatting of prefix sets, some cases are a bit strange */
bt_assert(format([ 0.0.0.0/0 ]) = "[0.0.0.0/0]");
bt_assert(format([ 10.10.0.0/32 ]) = "[10.10.0.0/32{0.0.0.1}]");
bt_assert(format([ 10.10.0.0/17 ]) = "[10.10.0.0/17{0.0.128.0}]");
bt_assert(format([ 10.10.0.0/17{17,19} ]) = "[10.10.0.0/17{0.0.224.0}]"); # 224 = 128+64+32
bt_assert(format([ 10.10.128.0/17{18,19} ]) = "[10.10.128.0/18{0.0.96.0}, 10.10.192.0/18{0.0.96.0}]"); # 96 = 64+32
bt_assert(format([ 10.10.64.0/18- ]) = "[0.0.0.0/0, 0.0.0.0/1{128.0.0.0}, 0.0.0.0/2{64.0.0.0}, 0.0.0.0/3{32.0.0.0}, 10.10.0.0/16{255.255.0.0}, 10.10.0.0/17{0.0.128.0}, 10.10.64.0/18{0.0.64.0}]");
bt_assert(format([ 10.10.64.0/18+ ]) = "[10.10.64.0/18{0.0.96.0}, 10.10.64.0/20{0.0.31.255}, 10.10.80.0/20{0.0.31.255}, 10.10.96.0/20{0.0.31.255}, 10.10.112.0/20{0.0.31.255}]");
bt_assert(format([ 10.10.160.0/19 ]) = "[10.10.160.0/19{0.0.32.0}]");
bt_assert(format([ 10.10.160.0/19{19,22} ]) = "[10.10.160.0/19{0.0.32.0}, 10.10.160.0/20{0.0.28.0}, 10.10.176.0/20{0.0.28.0}]"); # 28 = 16+8+4
bt_assert(format([ 10.10.160.0/19+ ]) = "[10.10.160.0/19{0.0.32.0}, 10.10.160.0/20{0.0.31.255}, 10.10.176.0/20{0.0.31.255}]");
bt_assert(format([ ::/0 ]) = "[::/0]");
bt_assert(format([ 11:22:33:44:55:66:77:88/128 ]) = "[11:22:33:44:55:66:77:88/128{::1}]");
bt_assert(format([ 11:22:33:44::/64 ]) = "[11:22:33:44::/64{0:0:0:1::}]");
bt_assert(format([ 11:22:33:44::/64+ ]) = "[11:22:33:44::/64{::1:ffff:ffff:ffff:ffff}]");
bt_assert(format([ 11:22:33:44::/65 ]) = "[11:22:33:44::/65{::8000:0:0:0}]");
bt_assert(format([ 11:22:33:44::/65{65,67} ]) = "[11:22:33:44::/65{::e000:0:0:0}]"); # e = 8+4+2
bt_assert(format([ 11:22:33:44:8000::/65{66,67} ]) = "[11:22:33:44:8000::/66{::6000:0:0:0}, 11:22:33:44:c000::/66{::6000:0:0:0}]"); # 6 = 4+2
bt_assert(format([ 11:22:33:44:4000::/66- ]) = "[::/0, ::/1{8000::}, ::/2{4000::}, ::/3{2000::}, 11:22:33:44::/64{ffff:ffff:ffff:ffff::}, 11:22:33:44::/65{::8000:0:0:0}, 11:22:33:44:4000::/66{::4000:0:0:0}]");
bt_assert(format([ 11:22:33:44:4000::/66+ ]) = "[11:22:33:44:4000::/66{::6000:0:0:0}, 11:22:33:44:4000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:5000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:6000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:7000::/68{::1fff:ffff:ffff:ffff}]");
bt_assert(format([ 11:22:33:44:c000::/67 ]) = "[11:22:33:44:c000::/67{::2000:0:0:0}]");
bt_assert(format([ 11:22:33:44:c000::/67{67,71} ]) = "[11:22:33:44:c000::/67{::2000:0:0:0}, 11:22:33:44:c000::/68{::1e00:0:0:0}, 11:22:33:44:d000::/68{::1e00:0:0:0}]");
bt_assert(format([ 11:22:33:44:c000::/67+ ]) = "[11:22:33:44:c000::/67{::2000:0:0:0}, 11:22:33:44:c000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:d000::/68{::1fff:ffff:ffff:ffff}]");
}
bt_test_suite(t_prefix_set, "Testing prefix sets");
@ -516,6 +700,12 @@ bt_test_suite(t_prefix6, "Testing prefix IPv6");
function t_prefix6_set()
prefix set pxs;
{
pxs = [];
bt_assert(format(pxs) = "[]");
bt_assert(pxs = []);
bt_assert(12::34/128 !~ []);
bt_assert(12::34/128 !~ pxs);
bt_assert(1180::/16 ~ [ 1100::/8{15, 17} ]);
bt_assert(12::34 = 12::34);
bt_assert(12::34 ~ [ 12::33..12::35 ]);
@ -557,6 +747,12 @@ prefix set pxs;
bt_assert(2000::/29 !~ pxs);
bt_assert(1100::/10 !~ pxs);
bt_assert(2010::/26 !~ pxs);
pxs = [ 52E0::/13{13,128} ];
bt_assert(52E7:BE81:379B:E6FD:541F:B0D0::/93 ~ pxs);
pxs = [ 41D8:8718::/30{0,30}, 413A:99A8:6C00::/38{38,128} ];
bt_assert(4180::/9 ~ pxs);
}
bt_test_suite(t_prefix6_set, "Testing prefix IPv6 sets");
@ -627,6 +823,7 @@ int set set12;
bt_assert(3 ~ p2);
bt_assert(p2 ~ [2, 10..20]);
bt_assert(p2 ~ [4, 10..20]);
bt_assert(p2 !~ []);
p2 = prepend(p2, 5);
bt_assert(p2 !~ pm1);
@ -637,6 +834,8 @@ int set set12;
bt_assert(p2 ~ [= 5 [2, 4, 6] 3 [1..2] 1 =]);
bt_assert(p2 ~ [= 5 set35 3 set12 set12 =]);
bt_assert(p2 ~ mkpath(5, 4));
bt_assert(p2 ~ [= * [3] * =]);
bt_assert(p2 !~ [= * [] * =]);
bt_assert(p2.len = 5);
bt_assert(p2.first = 5);
@ -645,6 +844,10 @@ int set set12;
bt_assert(p2.len = 5);
bt_assert(delete(p2, 3) = prepend(prepend(prepend(prepend(+empty+, 1), 2), 4), 5));
bt_assert(filter(p2, [1..3]) = prepend(prepend(prepend(+empty+, 1), 2), 3));
bt_assert(delete(p2, []) = p2);
bt_assert(filter(p2, []) = +empty+);
bt_assert(delete(prepend(prepend(+empty+, 0), 1), []) = prepend(prepend(+empty+, 0), 1));
bt_assert(filter(prepend(prepend(+empty+, 0), 1), []) = +empty+);
p2 = prepend( + empty +, 5 );
p2 = prepend( p2, 4 );
@ -664,6 +867,15 @@ int set set12;
bt_assert(delete(p2, [4..5]) = prepend(prepend(prepend(prepend(+empty+, 3), 3), 2), 1));
bt_assert(format([= 1 2+ 3 =]) = "[= 1 2 + 3 =]");
# iteration over path
int x = 0;
int y = 0;
for int i in p2 do {
x = x + i;
y = y + x;
}
bt_assert(x = 18 && y = 50);
}
bt_test_suite(t_path, "Testing paths");
@ -683,6 +895,11 @@ clist l;
clist l2;
clist r;
{
bt_assert((10, 20).asn = 10);
bt_assert((10, 20).data = 20);
bt_assert(p23.asn = 2);
bt_assert(p23.data = 3);
l = - empty -;
bt_assert(l !~ [(*,*)]);
bt_assert((l ~ [(*,*)]) != (l !~ [(*,*)]));
@ -700,6 +917,7 @@ clist r;
bt_assert(l ~ [(2,2..3)]);
bt_assert(l ~ [(1,1..2)]);
bt_assert(l ~ [(1,1)..(1,2)]);
bt_assert(l !~ []);
l = add(l, (2,5));
l = add(l, (5,one));
@ -737,6 +955,9 @@ clist r;
bt_assert(l !~ [(*,(one+6))]);
bt_assert(l !~ [(*, (one+one+one))]);
bt_assert(delete(l, []) = l);
bt_assert(filter(l, []) = -empty-);
l = delete(l, [(*,(one+onef(3)))]);
l = delete(l, [(*,(4+one))]);
bt_assert(l = add(-empty-, (3,1)));
@ -775,6 +996,18 @@ clist r;
r = filter(l, [(3,1), (*,2)]);
bt_assert(r = add(add(-empty-, (3,1)), (3,2)));
bt_assert(format(r) = "(clist (3,1) (3,2))");
# minimim & maximum element
r = add(add(add(add(add(-empty-, (2,1)), (1,3)), (2,2)), (3,1)), (2,3));
bt_assert(format(r) = "(clist (2,1) (1,3) (2,2) (3,1) (2,3))");
bt_assert(r.min = (1,3));
bt_assert(r.max = (3,1));
# iteration over clist
int x = 0;
for pair c in r do
x = x + c.asn * c.asn * c.data;
bt_assert(x = 36);
}
bt_test_suite(t_clist, "Testing lists of communities");
@ -848,11 +1081,15 @@ eclist r;
bt_assert((ro, 10.20.30.40, 100) !~ el);
bt_assert(el !~ [(rt, 10, 35..40)]);
bt_assert(el !~ [(ro, 10, *)]);
bt_assert(el !~ []);
el = add(el, (rt, 10, 40));
el2 = filter(el, [(rt, 10, 20..40)] );
el2 = add(el2, (rt, 10, 50));
bt_assert(delete(el, []) = el);
bt_assert(filter(el, []) = --empty--);
# eclist A (1,30,40)
bt_assert(el = add(add(add(--empty--, (rt, 10, 1)), (rt, 10, 30)), (rt, 10, 40)));
bt_assert(format(el) = "(eclist (rt, 10, 1) (rt, 10, 30) (rt, 10, 40))");
@ -880,6 +1117,19 @@ eclist r;
r = filter(el, [(rt, 10, 1), (rt, 10, 25..30), (ro, 10, 40)]);
bt_assert(r = add(add(--empty--, (rt, 10, 1)), (rt, 10, 30)));
bt_assert(format(r) = "(eclist (rt, 10, 1) (rt, 10, 30))");
# minimim & maximum element
r = add(add(add(add(add(--empty--, (rt, 2, 1)), (rt, 1, 3)), (rt, 2, 2)), (rt, 3, 1)), (rt, 2, 3));
bt_assert(format(r) = "(eclist (rt, 2, 1) (rt, 1, 3) (rt, 2, 2) (rt, 3, 1) (rt, 2, 3))");
bt_assert(r.min = (rt, 1, 3));
bt_assert(r.max = (rt, 3, 1));
# iteration over eclist
int x = 0;
for ec c in r do
if c > (rt, 2, 0) && c < (rt, 3, 0) then
x = x + 1;
bt_assert(x = 3);
}
bt_test_suite(t_eclist, "Testing lists of extended communities");
@ -939,6 +1189,10 @@ lclist r;
bt_assert(---empty--- = ---empty---);
bt_assert((10, 20, 30) !~ ---empty---);
bt_assert((10, 20, 30).asn = 10);
bt_assert((10, 20, 30).data1 = 20);
bt_assert((10, 20, 30).data2 = 30);
ll = --- empty ---;
ll = add(ll, (ten, 20, 30));
ll = add(ll, (1000, 2000, 3000));
@ -960,6 +1214,9 @@ lclist r;
ll2 = add(ll2, (30, 30, 30));
ll2 = add(ll2, (40, 40, 40));
bt_assert(delete(ll, []) = ll);
bt_assert(filter(ll, []) = ---empty---);
# lclist A (10, 20, 30)
bt_assert(format(ll) = "(lclist (10, 10, 10) (20, 20, 20) (30, 30, 30))");
@ -985,6 +1242,25 @@ lclist r;
r = filter(ll, [(5..15, *, *), (20, 15..25, *)]);
bt_assert(r = add(add(---empty---, (10, 10, 10)), (20, 20, 20)));
bt_assert(format(r) = "(lclist (10, 10, 10) (20, 20, 20))");
# minimim & maximum element
r = add(add(add(add(add(---empty---, (2, 3, 3)), (1, 2, 3)), (2, 3, 1)), (3, 1, 2)), (2, 1, 3));
bt_assert(format(r) = "(lclist (2, 3, 3) (1, 2, 3) (2, 3, 1) (3, 1, 2) (2, 1, 3))");
bt_assert(r.min = (1, 2, 3));
bt_assert(r.max = (3, 1, 2));
# iteration over lclist
int x = 0;
int y = 0;
lc mx = (0, 0, 0);
for lc c in r do {
int asn2 = c.asn * c.asn;
x = x + asn2 * c.data1;
y = y + asn2 * c.data2;
if c > mx then mx = c;
}
bt_assert(x = 39 && y = 49);
bt_assert(mx = r.max);
}
bt_test_suite(t_lclist, "Testing lists of large communities");
@ -1013,6 +1289,7 @@ lc set lls;
bt_assert(ll !~ [(5,10,15), (10,21,30)]);
bt_assert(ll !~ [(10,21..25,*)]);
bt_assert(ll !~ [(11, *, *)]);
bt_assert(ll !~ []);
lls = [(10, 10, 10), (20, 20, 15..25), (30, 30, *), (40, 35..45, *), (50, *, *), (55..65, *, *)];
bt_assert(format(lls) = "[(10, 10, 10), (20, 20, 15)..(20, 20, 25), (30, 30, 0)..(30, 30, 4294967295), (40, 35, 0)..(40, 45, 4294967295), (50, 0, 0)..(50, 4294967295, 4294967295), (55, 0, 0)..(65, 4294967295, 4294967295)]");
@ -1069,6 +1346,10 @@ bt_test_suite(t_rd, "Testing route distinguishers");
function t_rd_set()
rd set rds;
{
rds = [];
bt_assert(rds = []);
bt_assert(10:20 !~ rds);
rds = [10:20, 100000:100..100000:200];
bt_assert(format(rds) = "[10:20, 100000:100..100000:200]");
@ -1079,6 +1360,7 @@ rd set rds;
bt_assert(100000:128 ~ rds);
bt_assert(100000:200 ~ rds);
bt_assert(100010:150 !~ rds);
bt_assert(100010:150 !~ []);
}
bt_test_suite(t_rd_set, "Testing sets of route distinguishers");
@ -1145,7 +1427,85 @@ function fifteen()
return 15;
}
function local_vars(int j)
{
int k = 10;
bt_assert(j = 5 && k = 10);
{
int j = 15;
k = 20;
bt_assert(j = 15 && k = 20);
}
bt_assert(j = 5 && k = 20);
if j < 10 then
{
int j = 25;
string k = "hello";
bt_assert(j = 25 && k = "hello");
}
bt_assert(j = 5 && k = 20);
int m = 100;
{
j = 35;
int k = 40;
bt_assert(j = 35 && k = 40 && m = 100);
}
bt_assert(j = 35 && k = 20 && m = 100);
}
function factorial(int x)
{
if x = 0 then return 0;
if x = 1 then return 1;
else return x * factorial(x - 1);
}
function fibonacci(int x)
{
if x = 0 then return 0;
if x = 1 then return 1;
else return fibonacci(x - 1) + fibonacci(x - 2);
}
function hanoi_init(int a; int b)
{
if b = 0
then return +empty+;
else return prepend(hanoi_init(a + 1, b - 1), a);
}
function hanoi_solve(int n; bgppath h_src; bgppath h_dst; bgppath h_aux; bool x; bool y)
{
# x -> return src or dst
# y -> print state
if n = 0 then { if x then return h_src; else return h_dst; }
bgppath tmp1 = hanoi_solve(n - 1, h_src, h_aux, h_dst, true, y);
bgppath tmp2 = hanoi_solve(n - 1, h_src, h_aux, h_dst, false, false);
h_src = tmp1;
h_aux = tmp2;
int v = h_src.first;
# bt_assert(h_dst = +empty+ || v < h_dst.first);
h_src = delete(h_src, v);
h_dst = prepend(h_dst, v);
if y then
print "move: ", v, " src: ", h_src, " dst:", h_dst, " aux:", h_aux;
tmp1 = hanoi_solve(n - 1, h_aux, h_dst, h_src, true, y);
tmp2 = hanoi_solve(n - 1, h_aux, h_dst, h_src, false, false);
h_aux = tmp1;
h_dst = tmp2;
if x then return h_src; else return h_dst;
}
function t_call_function()
bgppath h_src;
{
bt_assert(fifteen() = 15);
@ -1157,6 +1517,17 @@ function t_call_function()
bt_assert(callme(4, 4) = 16);
bt_assert(callme(7, 2) = 14);
bt_assert(callmeagain(1, 2, 3) = 6);
local_vars(5);
bt_assert(factorial(5) = 120);
bt_assert(factorial(10) = 3628800);
bt_assert(fibonacci(10) = 55);
bt_assert(fibonacci(20) = 6765);
h_src = hanoi_init(1, 6);
bt_assert(format(h_src) = "(path 1 2 3 4 5 6)");
bt_assert(hanoi_solve(6, h_src, +empty+, +empty+, false, false) = h_src);
}
bt_test_suite(t_call_function, "Testing calling functions");
@ -1243,6 +1614,7 @@ function __test2()
filter testf
int j;
bool t;
{
print "Heya, filtering route to ", net.ip, " prefixlen ", net.len, " source ", source;
print "This route was from ", from;
@ -1254,6 +1626,54 @@ int j;
rip_metric = 14;
unset(rip_metric);
preference = 1234;
test_ca_int1 = 42;
test_ca_ip2 = 1.3.5.7;
test_ca_quad3 = 2.4.6.8;
test_ca_bgppath4 = +empty+;
test_ca_clist5 = -empty-;
test_ca_eclist6 = --empty--;
test_ca_lclist7 = ---empty---;
igp_metric = 53;
babel_metric = 64;
t = defined(babel_router_id);
j = babel_seqno;
bgp_origin = ORIGIN_IGP;
bgp_path = +empty+;
bgp_next_hop = 3456:789a:bcde:f012::3456:789a;
bgp_med = 71;
bgp_local_pref = 942;
t = defined(bgp_atomic_aggr);
t = defined(bgp_aggregator);
bgp_community = -empty-;
bgp_originator_id = 9.7.5.3;
bgp_cluster_list = -empty-;
bgp_ext_community = --empty--;
t = defined(bgp_aigp);
bgp_large_community = ---empty---;
t = defined(bgp_mpls_label_stack);
ospf_metric1 = 64;
ospf_metric2 = 111;
ospf_tag = 654432;
radv_preference = RA_PREF_LOW;
radv_lifetime = 28;
rip_metric = 2;
rip_tag = 4;
t = defined(rip_from);
krt_source = 17;
krt_metric = 19;
# krt_lock_mtu = false;
# krt_lock_window = true;
# krt_lock_rtt = krt_lock_rttvar && krt_lock_sstresh || krt_lock_cwnd;
accept "ok I take that";
}
@ -1353,13 +1773,16 @@ filter vpn_filter
bt_assert(net.type != NET_IP6);
bt_assert(net.rd = 0:1:2);
bool b = false;
case (net.type) {
NET_IP4: print "IPV4";
NET_IP6: print "IPV6";
else: b = true;
}
bt_assert(b);
bt_check_assign(from, 10.20.30.40);
bt_check_assign(gw, 55.55.55.44);
# bt_check_assign(gw, 55.55.55.44);
bgp_community.add((3,5));
bgp_ext_community.add((ro, 135, 999));

View File

@ -38,12 +38,6 @@ protocol static {
print from;
from = 1.2.3.4;
print from;
print scope;
scope = SCOPE_HOST;
print scope;
if !(scope ~ [ SCOPE_HOST, SCOPE_SITE ]) then {
print "Failed in test";
}
preference = 15;
print preference;

View File

@ -19,10 +19,7 @@ static void
start_conf_env(void)
{
bt_bird_init();
pool *p = rp_new(&root_pool, "helper_pool");
linpool *l = lp_new_default(p);
cfg_mem = l;
cfg_mem = tmp_linpool;
}
static struct f_tree *

File diff suppressed because it is too large Load Diff

View File

@ -14,9 +14,12 @@
#include "conf/conf.h"
#define TESTS_NUM 10
#define PREFIXES_NUM 10
#define PREFIXES_NUM 32
#define PREFIX_TESTS_NUM 10000
#define PREFIX_BENCH_NUM 100000000
#define TRIE_BUFFER_SIZE 1024
#define TEST_BUFFER_SIZE (1024*1024)
#define BIG_BUFFER_SIZE 10000
/* Wrapping structure for storing f_prefixes structures in list */
@ -31,146 +34,849 @@ xrandom(u32 max)
return (bt_random() % max);
}
static inline uint
get_exp_random(void)
{
uint r, n = 0;
for (r = bt_random(); r & 1; r = r >> 1)
n++;
return n;
}
static int
is_prefix_included(list *prefixes, struct f_prefix *needle)
compare_prefixes(const void *a, const void *b)
{
return net_compare(&((const struct f_prefix *) a)->net,
&((const struct f_prefix *) b)->net);
}
static inline int
matching_ip4_nets(const net_addr_ip4 *a, const net_addr_ip4 *b)
{
ip4_addr cmask = ip4_mkmask(MIN(a->pxlen, b->pxlen));
return ip4_compare(ip4_and(a->prefix, cmask), ip4_and(b->prefix, cmask)) == 0;
}
static inline int
matching_ip6_nets(const net_addr_ip6 *a, const net_addr_ip6 *b)
{
ip6_addr cmask = ip6_mkmask(MIN(a->pxlen, b->pxlen));
return ip6_compare(ip6_and(a->prefix, cmask), ip6_and(b->prefix, cmask)) == 0;
}
static inline int
matching_nets(const net_addr *a, const net_addr *b)
{
if (a->type != b->type)
return 0;
return (a->type == NET_IP4) ?
matching_ip4_nets((const net_addr_ip4 *) a, (const net_addr_ip4 *) b) :
matching_ip6_nets((const net_addr_ip6 *) a, (const net_addr_ip6 *) b);
}
static int
is_prefix_included(list *prefixes, const net_addr *needle)
{
struct f_prefix_node *n;
WALK_LIST(n, *prefixes)
if (matching_nets(&n->prefix.net, needle) &&
(n->prefix.lo <= needle->pxlen) && (needle->pxlen <= n->prefix.hi))
{
ip6_addr cmask = ip6_mkmask(MIN(n->prefix.net.pxlen, needle->net.pxlen));
char buf[64];
bt_format_net(buf, 64, &n->prefix.net);
bt_debug("FOUND %s %d-%d\n", buf, n->prefix.lo, n->prefix.hi);
ip6_addr ip = net6_prefix(&n->prefix.net);
ip6_addr needle_ip = net6_prefix(&needle->net);
if ((ipa_compare(ipa_and(ip, cmask), ipa_and(needle_ip, cmask)) == 0) &&
(n->prefix.lo <= needle->net.pxlen) && (needle->net.pxlen <= n->prefix.hi))
{
bt_debug("FOUND\t" PRIip6 "/%d %d-%d\n", ARGip6(net6_prefix(&n->prefix.net)), n->prefix.net.pxlen, n->prefix.lo, n->prefix.hi);
return 1; /* OK */
}
}
return 0; /* FAIL */
}
static struct f_prefix
get_random_ip6_prefix(void)
static void
get_random_net(net_addr *net, int v6)
{
struct f_prefix p;
u8 pxlen = xrandom(120)+8;
ip6_addr ip6 = ip6_build(bt_random(),bt_random(),bt_random(),bt_random());
net_addr_ip6 net6 = NET_ADDR_IP6(ip6, pxlen);
p.net = *((net_addr*) &net6);
if (bt_random() % 2)
if (!v6)
{
p.lo = 0;
p.hi = p.net.pxlen;
uint pxlen = xrandom(24)+8;
ip4_addr ip4 = ip4_from_u32((u32) bt_random());
net_fill_ip4(net, ip4_and(ip4, ip4_mkmask(pxlen)), pxlen);
}
else
{
p.lo = p.net.pxlen;
p.hi = net_max_prefix_length[p.net.type];
uint pxlen = xrandom(120)+8;
ip6_addr ip6 = ip6_build(bt_random(), bt_random(), bt_random(), bt_random());
net_fill_ip6(net, ip6_and(ip6, ip6_mkmask(pxlen)), pxlen);
}
return p;
}
static void
generate_random_ipv6_prefixes(list *prefixes)
get_random_prefix(struct f_prefix *px, int v6, int tight)
{
int i;
for (i = 0; i < PREFIXES_NUM; i++)
get_random_net(&px->net, v6);
if (tight)
{
struct f_prefix f = get_random_ip6_prefix();
struct f_prefix_node *px = calloc(1, sizeof(struct f_prefix_node));
px->prefix = f;
bt_debug("ADD\t" PRIip6 "/%d %d-%d\n", ARGip6(net6_prefix(&px->prefix.net)), px->prefix.net.pxlen, px->prefix.lo, px->prefix.hi);
add_tail(prefixes, &px->n);
px->lo = px->hi = px->net.pxlen;
}
else if (bt_random() % 2)
{
px->lo = 0;
px->hi = px->net.pxlen;
}
else
{
px->lo = px->net.pxlen;
px->hi = net_max_prefix_length[px->net.type];
}
}
static void
get_random_ip4_subnet(net_addr_ip4 *net, const net_addr_ip4 *src, int pxlen)
{
*net = NET_ADDR_IP4(ip4_and(src->prefix, ip4_mkmask(pxlen)), pxlen);
if (pxlen > src->pxlen)
{
ip4_addr rnd = ip4_from_u32((u32) bt_random());
ip4_addr mask = ip4_xor(ip4_mkmask(src->pxlen), ip4_mkmask(pxlen));
net->prefix = ip4_or(net->prefix, ip4_and(rnd, mask));
}
}
static void
get_random_ip6_subnet(net_addr_ip6 *net, const net_addr_ip6 *src, int pxlen)
{
*net = NET_ADDR_IP6(ip6_and(src->prefix, ip6_mkmask(pxlen)), pxlen);
if (pxlen > src->pxlen)
{
ip6_addr rnd = ip6_build(bt_random(), bt_random(), bt_random(), bt_random());
ip6_addr mask = ip6_xor(ip6_mkmask(src->pxlen), ip6_mkmask(pxlen));
net->prefix = ip6_or(net->prefix, ip6_and(rnd, mask));
}
}
static void
get_random_subnet(net_addr *net, const net_addr *src, int pxlen)
{
if (src->type == NET_IP4)
get_random_ip4_subnet((net_addr_ip4 *) net, (const net_addr_ip4 *) src, pxlen);
else
get_random_ip6_subnet((net_addr_ip6 *) net, (const net_addr_ip6 *) src, pxlen);
}
static void
get_inner_net(net_addr *net, const struct f_prefix *src)
{
int pxlen, step;
if (bt_random() % 2)
{
step = get_exp_random();
step = MIN(step, src->hi - src->lo);
pxlen = (bt_random() % 2) ? (src->lo + step) : (src->hi - step);
}
else
pxlen = src->lo + bt_random() % (src->hi - src->lo + 1);
get_random_subnet(net, &src->net, pxlen);
}
static void
swap_random_bits_ip4(net_addr_ip4 *net, int num)
{
for (int i = 0; i < num; i++)
{
ip4_addr swap = IP4_NONE;
ip4_setbit(&swap, bt_random() % net->pxlen);
net->prefix = ip4_xor(net->prefix, swap);
}
}
static void
swap_random_bits_ip6(net_addr_ip6 *net, int num)
{
for (int i = 0; i < num; i++)
{
ip6_addr swap = IP6_NONE;
ip6_setbit(&swap, bt_random() % net->pxlen);
net->prefix = ip6_xor(net->prefix, swap);
}
}
static void
swap_random_bits(net_addr *net, int num)
{
if (net->type == NET_IP4)
swap_random_bits_ip4((net_addr_ip4 *) net, num);
else
swap_random_bits_ip6((net_addr_ip6 *) net, num);
}
static void
get_outer_net(net_addr *net, const struct f_prefix *src)
{
int pxlen, step;
int inside = 0;
int max = net_max_prefix_length[src->net.type];
if ((src->lo > 0) && (bt_random() % 3))
{
step = 1 + get_exp_random();
step = MIN(step, src->lo);
pxlen = src->lo - step;
}
else if ((src->hi < max) && (bt_random() % 2))
{
step = 1 + get_exp_random();
step = MIN(step, max - src->hi);
pxlen = src->hi + step;
}
else
{
pxlen = src->lo + bt_random() % (src->hi - src->lo + 1);
inside = 1;
}
get_random_subnet(net, &src->net, pxlen);
/* Perhaps swap some bits in prefix */
if ((net->pxlen > 0) && (inside || (bt_random() % 4)))
swap_random_bits(net, 1 + get_exp_random());
}
static list *
make_random_prefix_list(int num, int v6, int tight)
{
list *prefixes = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
init_list(prefixes);
for (int i = 0; i < num; i++)
{
struct f_prefix_node *px = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
get_random_prefix(&px->prefix, v6, tight);
add_tail(prefixes, &px->n);
char buf[64];
bt_format_net(buf, 64, &px->prefix.net);
bt_debug("ADD %s{%d,%d}\n", buf, px->prefix.lo, px->prefix.hi);
}
return prefixes;
}
static struct f_trie *
make_trie_from_prefix_list(list *prefixes)
{
struct f_trie *trie = f_new_trie(tmp_linpool, 0);
struct f_prefix_node *n;
WALK_LIST(n, *prefixes)
trie_add_prefix(trie, &n->prefix.net, n->prefix.lo, n->prefix.hi);
return trie;
}
/*
* Read sequence of prefixes from file handle and return prefix list.
* Each prefix is on one line, sequence terminated by empty line or eof.
* Arg @plus means prefix should include all longer ones.
*/
static list *
read_prefix_list(FILE *f, int v6, int plus)
{
ASSERT(!v6);
uint a0, a1, a2, a3, pl;
char s[32];
int n;
list *pxlist = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
init_list(pxlist);
errno = 0;
while (fgets(s, 32, f))
{
if (s[0] == '\n')
return pxlist;
n = sscanf(s, "%u.%u.%u.%u/%u", &a0, &a1, &a2, &a3, &pl);
if (n != 5)
bt_abort_msg("Invalid content of trie_data");
struct f_prefix_node *px = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
net_fill_ip4(&px->prefix.net, ip4_build(a0, a1, a2, a3), pl);
px->prefix.lo = pl;
px->prefix.hi = plus ? IP4_MAX_PREFIX_LENGTH : pl;
add_tail(pxlist, &px->n);
char buf[64];
bt_format_net(buf, 64, &px->prefix.net);
bt_debug("ADD %s{%d,%d}\n", buf, px->prefix.lo, px->prefix.hi);
}
bt_syscall(errno, "fgets()");
return EMPTY_LIST(*pxlist) ? NULL : pxlist;
}
/*
* Open file, read multiple sequences of prefixes from it. Fill @data with
* prefix lists and @trie with generated tries. Return number of sequences /
* tries. Use separate linpool @lp0 for prefix lists and @lp1 for tries.
* Arg @plus means prefix should include all longer ones.
*/
static int
t_match_net(void)
read_prefix_file(const char *filename, int plus,
list *data[], struct f_trie *trie[])
{
FILE *f = fopen(filename, "r");
bt_syscall(!f, "fopen(%s)", filename);
int n = 0;
list *pxlist;
while (pxlist = read_prefix_list(f, 0, plus))
{
data[n] = pxlist;
trie[n] = make_trie_from_prefix_list(pxlist);
bt_debug("NEXT\n");
n++;
}
fclose(f);
bt_debug("DONE reading %d tries\n", n);
return n;
}
/*
* Select random subset of @dn prefixes from prefix list @src of length @sn,
* and store them to buffer @dst (of size @dn). Prefixes may be chosen multiple
* times. Randomize order of prefixes in @dst buffer.
*/
static void
select_random_prefix_subset(list *src[], net_addr dst[], int sn, int dn)
{
int pn = 0;
if (!dn)
return;
/* Compute total prefix number */
for (int i = 0; i < sn; i++)
pn += list_length(src[i]);
/* Change of selecting a prefix */
int rnd = (pn / dn) + 10;
int n = 0;
/* Iterate indefinitely over src array */
for (int i = 0; 1; i++, i = (i < sn) ? i : 0)
{
struct f_prefix_node *px;
WALK_LIST(px, *src[i])
{
if (xrandom(rnd) != 0)
continue;
net_copy(&dst[n], &px->prefix.net);
n++;
/* We have enough */
if (n == dn)
goto done;
}
}
done:
/* Shuffle networks */
for (int i = 0; i < dn; i++)
{
int j = xrandom(dn);
if (i == j)
continue;
net_addr tmp;
net_copy(&tmp, &dst[i]);
net_copy(&dst[i], &dst[j]);
net_copy(&dst[j], &tmp);
}
}
/* Fill @dst buffer with @dn randomly generated /32 prefixes */
static void
make_random_addresses(net_addr dst[], int dn)
{
for (int i = 0; i < dn; i++)
net_fill_ip4(&dst[i], ip4_from_u32((u32) bt_random()), IP4_MAX_PREFIX_LENGTH);
}
static void
test_match_net(list *prefixes, struct f_trie *trie, const net_addr *net)
{
char buf[64];
bt_format_net(buf, 64, net);
bt_debug("TEST %s\n", buf);
int should_be = is_prefix_included(prefixes, net);
int is_there = trie_match_net(trie, net);
bt_assert_msg(should_be == is_there, "Prefix %s %s match", buf,
(should_be ? "should" : "should not"));
}
static int
t_match_random_net(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
uint round;
for (round = 0; round < TESTS_NUM; round++)
int v6 = 0;
for (int round = 0; round < TESTS_NUM; round++)
{
list prefixes; /* of structs f_extended_prefix */
init_list(&prefixes);
struct f_trie *trie = f_new_trie(config->mem, 0);
list *prefixes = make_random_prefix_list(PREFIXES_NUM, v6, 0);
struct f_trie *trie = make_trie_from_prefix_list(prefixes);
generate_random_ipv6_prefixes(&prefixes);
struct f_prefix_node *n;
WALK_LIST(n, prefixes)
for (int i = 0; i < PREFIX_TESTS_NUM; i++)
{
trie_add_prefix(trie, &n->prefix.net, n->prefix.lo, n->prefix.hi);
net_addr net;
get_random_net(&net, v6);
test_match_net(prefixes, trie, &net);
}
int i;
for (i = 0; i < PREFIX_TESTS_NUM; i++)
{
struct f_prefix f = get_random_ip6_prefix();
bt_debug("TEST\t" PRIip6 "/%d\n", ARGip6(net6_prefix(&f.net)), f.net.pxlen);
int should_be = is_prefix_included(&prefixes, &f);
int is_there = trie_match_net(trie, &f.net);
bt_assert_msg(should_be == is_there, "Prefix " PRIip6 "/%d %s", ARGip6(net6_prefix(&f.net)), f.net.pxlen, (should_be ? "should be found in trie" : "should not be found in trie"));
}
struct f_prefix_node *nxt;
WALK_LIST_DELSAFE(n, nxt, prefixes)
{
free(n);
}
v6 = !v6;
tmp_flush();
}
bt_bird_cleanup();
return 1;
}
static int
t_match_inner_net(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
int v6 = 0;
for (int round = 0; round < TESTS_NUM; round++)
{
list *prefixes = make_random_prefix_list(PREFIXES_NUM, v6, 0);
struct f_trie *trie = make_trie_from_prefix_list(prefixes);
struct f_prefix_node *n = HEAD(*prefixes);
for (int i = 0; i < PREFIX_TESTS_NUM; i++)
{
net_addr net;
get_inner_net(&net, &n->prefix);
test_match_net(prefixes, trie, &net);
n = NODE_VALID(NODE_NEXT(n)) ? NODE_NEXT(n) : HEAD(*prefixes);
}
v6 = !v6;
tmp_flush();
}
bt_bird_cleanup();
return 1;
}
static int
t_match_outer_net(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
int v6 = 0;
for (int round = 0; round < TESTS_NUM; round++)
{
list *prefixes = make_random_prefix_list(PREFIXES_NUM, v6, 0);
struct f_trie *trie = make_trie_from_prefix_list(prefixes);
struct f_prefix_node *n = HEAD(*prefixes);
for (int i = 0; i < PREFIX_TESTS_NUM; i++)
{
net_addr net;
get_outer_net(&net, &n->prefix);
test_match_net(prefixes, trie, &net);
n = NODE_VALID(NODE_NEXT(n)) ? NODE_NEXT(n) : HEAD(*prefixes);
}
v6 = !v6;
tmp_flush();
}
v6 = !v6;
bt_bird_cleanup();
return 1;
}
/*
* Read prefixes from @filename, build set of tries, prepare test data and do
* PREFIX_BENCH_NUM trie lookups. With @plus = 0, use random subset of known
* prefixes as test data, with @plus = 1, use randomly generated /32 prefixes
* as test data.
*/
static int
benchmark_trie_dataset(const char *filename, int plus)
{
int n = 0;
list *data[TRIE_BUFFER_SIZE];
struct f_trie *trie[TRIE_BUFFER_SIZE];
net_addr *nets;
bt_reset_suite_case_timer();
bt_log_suite_case_result(1, "Reading %s", filename, n);
n = read_prefix_file(filename, plus, data, trie);
bt_log_suite_case_result(1, "Read prefix data, %d lists, ", n);
size_t trie_size = rmemsize(tmp_linpool).effective * 1000 / (1024*1024);
bt_log_suite_case_result(1, "Trie size %u.%03u MB",
(uint) (trie_size / 1000), (uint) (trie_size % 1000));
int t = PREFIX_BENCH_NUM / n;
int tb = MIN(t, TEST_BUFFER_SIZE);
nets = tmp_alloc(tb * sizeof(net_addr));
if (!plus)
select_random_prefix_subset(data, nets, n, tb);
else
make_random_addresses(nets, tb);
bt_log_suite_case_result(1, "Make test data, %d (%d) tests", t, tb);
bt_reset_suite_case_timer();
/*
int match = 0;
for (int i = 0; i < t; i++)
for (int j = 0; j < n; j++)
test_match_net(data[j], trie[j], &nets[i]);
*/
int match = 0;
for (int i = 0; i < t; i++)
for (int j = 0; j < n; j++)
if (trie_match_net(trie[j], &nets[i % TEST_BUFFER_SIZE]))
match++;
bt_log_suite_case_result(1, "Matching done, %d / %d matches", match, t * n);
tmp_flush();
return 1;
}
static int UNUSED
t_bench_trie_datasets_subset(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
/* Specific datasets, not included */
benchmark_trie_dataset("trie-data-bgp-1", 0);
benchmark_trie_dataset("trie-data-bgp-10", 0);
benchmark_trie_dataset("trie-data-bgp-100", 0);
benchmark_trie_dataset("trie-data-bgp-1000", 0);
bt_bird_cleanup();
return 1;
}
static int UNUSED
t_bench_trie_datasets_random(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
/* Specific datasets, not included */
benchmark_trie_dataset("trie-data-bgp-1", 1);
benchmark_trie_dataset("trie-data-bgp-10", 1);
benchmark_trie_dataset("trie-data-bgp-100", 1);
benchmark_trie_dataset("trie-data-bgp-1000", 1);
bt_bird_cleanup();
return 1;
}
static int
t_trie_same(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
int round;
for (round = 0; round < TESTS_NUM*4; round++)
int v6 = 0;
for (int round = 0; round < TESTS_NUM*4; round++)
{
struct f_trie * trie1 = f_new_trie(config->mem, 0);
struct f_trie * trie2 = f_new_trie(config->mem, 0);
list prefixes; /* a list of f_extended_prefix structures */
init_list(&prefixes);
int i;
for (i = 0; i < 100; i++)
generate_random_ipv6_prefixes(&prefixes);
list *prefixes = make_random_prefix_list(100 * PREFIXES_NUM, v6, 0);
struct f_trie *trie1 = f_new_trie(tmp_linpool, 0);
struct f_trie *trie2 = f_new_trie(tmp_linpool, 0);
struct f_prefix_node *n;
WALK_LIST(n, prefixes)
{
WALK_LIST(n, *prefixes)
trie_add_prefix(trie1, &n->prefix.net, n->prefix.lo, n->prefix.hi);
}
WALK_LIST_BACKWARDS(n, prefixes)
{
WALK_LIST_BACKWARDS(n, *prefixes)
trie_add_prefix(trie2, &n->prefix.net, n->prefix.lo, n->prefix.hi);
}
bt_assert(trie_same(trie1, trie2));
struct f_prefix_node *nxt;
WALK_LIST_DELSAFE(n, nxt, prefixes)
{
free(n);
}
v6 = !v6;
tmp_flush();
}
bt_bird_cleanup();
return 1;
}
static inline void
log_networks(const net_addr *a, const net_addr *b)
{
if (bt_verbose >= BT_VERBOSE_ABSOLUTELY_ALL)
{
char buf0[64];
char buf1[64];
bt_format_net(buf0, 64, a);
bt_format_net(buf1, 64, b);
bt_debug("Found %s expected %s\n", buf0, buf1);
}
}
static int
t_trie_walk(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
for (int round = 0; round < TESTS_NUM*8; round++)
{
int level = round / TESTS_NUM;
int v6 = level % 2;
int num = PREFIXES_NUM * (int[]){1, 10, 100, 1000}[level / 2];
int pos = 0, end = 0;
list *prefixes = make_random_prefix_list(num, v6, 1);
struct f_trie *trie = make_trie_from_prefix_list(prefixes);
struct f_prefix *pxset = malloc((num + 1) * sizeof(struct f_prefix));
struct f_prefix_node *n;
WALK_LIST(n, *prefixes)
pxset[pos++] = n->prefix;
memset(&pxset[pos], 0, sizeof (struct f_prefix));
qsort(pxset, num, sizeof(struct f_prefix), compare_prefixes);
/* Full walk */
bt_debug("Full walk (round %d, %d nets)\n", round, num);
pos = 0;
uint pxc = 0;
TRIE_WALK(trie, net, NULL)
{
log_networks(&net, &pxset[pos].net);
bt_assert(net_equal(&net, &pxset[pos].net));
/* Skip possible duplicates */
while (net_equal(&pxset[pos].net, &pxset[pos + 1].net))
pos++;
pos++;
pxc++;
}
TRIE_WALK_END;
bt_assert(pos == num);
bt_assert(pxc == trie->prefix_count);
bt_debug("Full walk done\n");
/* Prepare net for subnet walk - start with random prefix */
pos = bt_random() % num;
end = pos + (int[]){2, 2, 3, 4}[level / 2];
end = MIN(end, num);
struct f_prefix from = pxset[pos];
/* Find a common superprefix to several subsequent prefixes */
for (; pos < end; pos++)
{
if (net_equal(&from.net, &pxset[pos].net))
continue;
int common = !v6 ?
ip4_pxlen(net4_prefix(&from.net), net4_prefix(&pxset[pos].net)) :
ip6_pxlen(net6_prefix(&from.net), net6_prefix(&pxset[pos].net));
from.net.pxlen = MIN(from.net.pxlen, common);
if (!v6)
((net_addr_ip4 *) &from.net)->prefix =
ip4_and(net4_prefix(&from.net), net4_prefix(&pxset[pos].net));
else
((net_addr_ip6 *) &from.net)->prefix =
ip6_and(net6_prefix(&from.net), net6_prefix(&pxset[pos].net));
}
/* Fix irrelevant bits */
if (!v6)
((net_addr_ip4 *) &from.net)->prefix =
ip4_and(net4_prefix(&from.net), ip4_mkmask(net4_pxlen(&from.net)));
else
((net_addr_ip6 *) &from.net)->prefix =
ip6_and(net6_prefix(&from.net), ip6_mkmask(net6_pxlen(&from.net)));
/* Find initial position for final prefix */
for (pos = 0; pos < num; pos++)
if (compare_prefixes(&pxset[pos], &from) >= 0)
break;
int p0 = pos;
char buf0[64];
bt_format_net(buf0, 64, &from.net);
bt_debug("Subnet walk for %s (round %d, %d nets)\n", buf0, round, num);
/* Subnet walk */
TRIE_WALK(trie, net, &from.net)
{
log_networks(&net, &pxset[pos].net);
bt_assert(net_equal(&net, &pxset[pos].net));
bt_assert(net_in_netX(&net, &from.net));
/* Skip possible duplicates */
while (net_equal(&pxset[pos].net, &pxset[pos + 1].net))
pos++;
pos++;
}
TRIE_WALK_END;
bt_assert((pos == num) || !net_in_netX(&pxset[pos].net, &from.net));
bt_debug("Subnet walk done for %s (found %d nets)\n", buf0, pos - p0);
tmp_flush();
}
bt_bird_cleanup();
return 1;
}
static int
find_covering_nets(struct f_prefix *prefixes, int num, const net_addr *net, net_addr *found)
{
struct f_prefix key;
net_addr *n = &key.net;
int found_num = 0;
net_copy(n, net);
while (1)
{
struct f_prefix *px =
bsearch(&key, prefixes, num, sizeof(struct f_prefix), compare_prefixes);
if (px)
{
net_copy(&found[found_num], n);
found_num++;
}
if (n->pxlen == 0)
return found_num;
n->pxlen--;
if (n->type == NET_IP4)
ip4_clrbit(&((net_addr_ip4 *) n)->prefix, n->pxlen);
else
ip6_clrbit(&((net_addr_ip6 *) n)->prefix, n->pxlen);
}
}
static int
t_trie_walk_to_root(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
for (int round = 0; round < TESTS_NUM * 4; round++)
{
int level = round / TESTS_NUM;
int v6 = level % 2;
int num = PREFIXES_NUM * (int[]){32, 512}[level / 2];
int pos = 0;
int st = 0, sn = 0, sm = 0;
list *prefixes = make_random_prefix_list(num, v6, 1);
struct f_trie *trie = make_trie_from_prefix_list(prefixes);
struct f_prefix *pxset = malloc((num + 1) * sizeof(struct f_prefix));
struct f_prefix_node *pxn;
WALK_LIST(pxn, *prefixes)
pxset[pos++] = pxn->prefix;
memset(&pxset[pos], 0, sizeof (struct f_prefix));
qsort(pxset, num, sizeof(struct f_prefix), compare_prefixes);
int i;
for (i = 0; i < (PREFIX_TESTS_NUM / 10); i++)
{
net_addr from;
get_random_net(&from, v6);
net_addr found[129];
int found_num = find_covering_nets(pxset, num, &from, found);
int n = 0;
if (bt_verbose >= BT_VERBOSE_ABSOLUTELY_ALL)
{
char buf[64];
bt_format_net(buf, 64, &from);
bt_debug("Lookup for %s (expect %d)\n", buf, found_num);
}
/* Walk to root, separate for IPv4 and IPv6 */
if (!v6)
{
TRIE_WALK_TO_ROOT_IP4(trie, (net_addr_ip4 *) &from, net)
{
log_networks((net_addr *) &net, &found[n]);
bt_assert((n < found_num) && net_equal((net_addr *) &net, &found[n]));
n++;
}
TRIE_WALK_TO_ROOT_END;
}
else
{
TRIE_WALK_TO_ROOT_IP6(trie, (net_addr_ip6 *) &from, net)
{
log_networks((net_addr *) &net, &found[n]);
bt_assert((n < found_num) && net_equal((net_addr *) &net, &found[n]));
n++;
}
TRIE_WALK_TO_ROOT_END;
}
bt_assert(n == found_num);
/* Stats */
st += n;
sn += !!n;
sm = MAX(sm, n);
}
bt_debug("Success in %d / %d, sum %d, max %d\n", sn, i, st, sm);
tmp_flush();
}
bt_bird_cleanup();
return 1;
}
@ -179,8 +885,15 @@ main(int argc, char *argv[])
{
bt_init(argc, argv);
bt_test_suite(t_match_net, "Testing random prefix matching");
bt_test_suite(t_match_random_net, "Testing random prefix matching");
bt_test_suite(t_match_inner_net, "Testing random inner prefix matching");
bt_test_suite(t_match_outer_net, "Testing random outer prefix matching");
bt_test_suite(t_trie_same, "A trie filled forward should be same with a trie filled backward.");
bt_test_suite(t_trie_walk, "Testing TRIE_WALK() on random tries");
bt_test_suite(t_trie_walk_to_root, "Testing TRIE_WALK_TO_ROOT() on random tries");
// bt_test_suite(t_bench_trie_datasets_subset, "Benchmark tries from datasets by random subset of nets");
// bt_test_suite(t_bench_trie_datasets_random, "Benchmark tries from datasets by generated addresses");
return bt_exit_value();
}

View File

@ -1,7 +1,7 @@
src := bitmap.c bitops.c blake2s.c blake2b.c checksum.c event.c flowspec.c idm.c ip.c lists.c mac.c md5.c mempool.c net.c patmatch.c printf.c resource.c sha1.c sha256.c sha512.c slab.c slists.c strtoul.c tbf.c timer.c xmalloc.c
src := a-path.c a-set.c bitmap.c bitops.c blake2s.c blake2b.c checksum.c event.c flowspec.c idm.c ip.c lists.c mac.c md5.c mempool.c net.c patmatch.c printf.c rcu.c resource.c sha1.c sha256.c sha512.c slab.c slists.c strtoul.c tbf.c timer.c xmalloc.c
obj := $(src-o-files)
$(all-daemon)
tests_src := bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c checksum_test.c lists_test.c mac_test.c ip_test.c hash_test.c printf_test.c
tests_src := a-set_test.c a-path_test.c bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c checksum_test.c lists_test.c mac_test.c ip_test.c hash_test.c printf_test.c slab_test.c type_test.c
tests_targets := $(tests_targets) $(tests-target-files)
tests_objs := $(tests_objs) $(src-o-files)

View File

@ -8,8 +8,8 @@
*/
#include "nest/bird.h"
#include "nest/route.h"
#include "nest/attrs.h"
#include "nest/rt.h"
#include "lib/attrs.h"
#include "lib/resource.h"
#include "lib/unaligned.h"
#include "lib/string.h"
@ -591,7 +591,7 @@ as_path_match_set(const struct adata *path, const struct f_tree *set)
p += 2;
for (i=0; i<n; i++)
{
struct f_val v = {T_INT, .val.i = get_as(p)};
struct f_val v = { .type = T_INT, .val.i = get_as(p)};
if (find_tree(set, &v))
return 1;
p += BS;
@ -602,8 +602,10 @@ as_path_match_set(const struct adata *path, const struct f_tree *set)
}
const struct adata *
as_path_filter(struct linpool *pool, const struct adata *path, const struct f_tree *set, u32 key, int pos)
as_path_filter(struct linpool *pool, const struct adata *path, const struct f_val *set, int pos)
{
ASSERT((set->type == T_SET) || (set->type == T_INT));
if (!path)
return NULL;
@ -629,13 +631,13 @@ as_path_filter(struct linpool *pool, const struct adata *path, const struct f_tr
u32 as = get_as(p);
int match;
if (set)
if (set->type == T_SET)
{
struct f_val v = {T_INT, .val.i = as};
match = !!find_tree(set, &v);
struct f_val v = { .type = T_INT, .val.i = as};
match = !!find_tree(set->val.t, &v);
}
else
match = (as == key);
else /* T_INT */
match = (as == set->val.i);
if (match == pos)
{
@ -667,6 +669,35 @@ as_path_filter(struct linpool *pool, const struct adata *path, const struct f_tr
return res;
}
int
as_path_walk(const struct adata *path, uint *pos, uint *val)
{
if (!path)
return 0;
const u8 *p = path->data;
const u8 *q = p + path->length;
uint n, x = *pos;
while (p < q)
{
n = p[1];
p += 2;
if (x < n)
{
*val = get_as(p + x * BS);
*pos += 1;
return 1;
}
p += n * BS;
x -= n;
}
return 0;
}
struct pm_pos
{

View File

@ -9,9 +9,10 @@
#include "test/birdtest.h"
#include "test/bt-utils.h"
#include "nest/route.h"
#include "nest/attrs.h"
#include "nest/rt.h"
#include "lib/attrs.h"
#include "lib/resource.h"
#include "filter/data.h"
#define TESTS_NUM 30
#define AS_PATH_LENGTH 1000
@ -23,8 +24,6 @@
static int
t_as_path_match(void)
{
resource_init();
int round;
for (round = 0; round < TESTS_NUM; round++)
{
@ -32,14 +31,13 @@ t_as_path_match(void)
struct adata *as_path = &empty_as_path;
u32 first_prepended, last_prepended;
first_prepended = last_prepended = 0;
struct linpool *lp = lp_new_default(&root_pool);
struct f_path_mask *mask = alloca(sizeof(struct f_path_mask) + AS_PATH_LENGTH * sizeof(struct f_path_mask_item));
mask->len = AS_PATH_LENGTH;
for (int i = AS_PATH_LENGTH - 1; i >= 0; i--)
{
u32 val = bt_random();
as_path = as_path_prepend(lp, as_path, val);
as_path = as_path_prepend(tmp_linpool, as_path, val);
bt_debug("Prepending ASN: %10u \n", val);
if (i == 0)
@ -61,7 +59,7 @@ t_as_path_match(void)
bt_assert(as_path_get_last(as_path, &asn));
bt_assert_msg(asn == first_prepended, "as_path_get_last() should return the first prepended ASN");
rfree(lp);
tmp_flush();
}
return 1;
@ -70,16 +68,13 @@ t_as_path_match(void)
static int
t_path_format(void)
{
resource_init();
struct adata empty_as_path = {};
struct adata *as_path = &empty_as_path;
struct linpool *lp = lp_new_default(&root_pool);
uint i;
for (i = 4294967285; i <= 4294967294; i++)
{
as_path = as_path_prepend(lp, as_path, i);
as_path = as_path_prepend(tmp_linpool, as_path, i);
bt_debug("Prepending ASN: %10u \n", i);
}
@ -97,7 +92,7 @@ t_path_format(void)
as_path_format(as_path, buf2, SMALL_BUFFER_SIZE);
bt_assert_msg(strcmp(buf2, "4294967294 42...") == 0, "Small Buffer(%zu): '%s'", strlen(buf2), buf2);
rfree(lp);
tmp_flush();
return 1;
}
@ -116,11 +111,8 @@ count_asn_in_array(const u32 *array, u32 asn)
static int
t_path_include(void)
{
resource_init();
struct adata empty_as_path = {};
struct adata *as_path = &empty_as_path;
struct linpool *lp = lp_new_default(&root_pool);
u32 as_nums[AS_PATH_LENGTH] = {};
int i;
@ -128,7 +120,7 @@ t_path_include(void)
{
u32 val = bt_random();
as_nums[i] = val;
as_path = as_path_prepend(lp, as_path, val);
as_path = as_path_prepend(tmp_linpool, as_path, val);
}
for (i = 0; i < AS_PATH_LENGTH; i++)
@ -136,8 +128,9 @@ t_path_include(void)
int counts_of_contains = count_asn_in_array(as_nums, as_nums[i]);
bt_assert_msg(as_path_contains(as_path, as_nums[i], counts_of_contains), "AS Path should contains %d-times number %d", counts_of_contains, as_nums[i]);
bt_assert(as_path_filter(lp, as_path, NULL, as_nums[i], 0) != NULL);
bt_assert(as_path_filter(lp, as_path, NULL, as_nums[i], 1) != NULL);
struct f_val v = { .type = T_INT, .val.i = as_nums[i] };
bt_assert(as_path_filter(tmp_linpool, as_path, &v, 0) != NULL);
bt_assert(as_path_filter(tmp_linpool, as_path, &v, 1) != NULL);
}
for (i = 0; i < 10000; i++)
@ -152,7 +145,7 @@ t_path_include(void)
bt_assert_msg(result == 0, "As path should not contain the number %u", test_val);
}
rfree(lp);
tmp_flush();
return 1;
}
@ -161,16 +154,13 @@ t_path_include(void)
static int
t_as_path_converting(void)
{
resource_init();
struct adata empty_as_path = {};
struct adata *as_path = &empty_as_path;
struct linpool *lp = lp_new_default(&root_pool);
#define AS_PATH_LENGTH_FOR_CONVERTING_TEST 10
int i;
for (i = 0; i < AS_PATH_LENGTH_FOR_CONVERTING_TEST; i++)
as_path = as_path_prepend(lp, as_path, i);
as_path = as_path_prepend(tmp_linpool, as_path, i);
bt_debug("data length: %u \n", as_path->length);
@ -204,13 +194,10 @@ t_as_path_converting(void)
}
#endif
void resource_sys_init(void);
int
main(int argc, char *argv[])
{
bt_init(argc, argv);
resource_sys_init();
bt_test_suite(t_as_path_match, "Testing AS path matching and some a-path utilities.");
bt_test_suite(t_path_format, "Testing formating as path into byte buffer");

View File

@ -10,8 +10,8 @@
#include <stdlib.h>
#include "nest/bird.h"
#include "nest/route.h"
#include "nest/attrs.h"
#include "nest/rt.h"
#include "lib/attrs.h"
#include "lib/resource.h"
#include "lib/string.h"
@ -516,6 +516,48 @@ int_set_sort(struct linpool *pool, const struct adata *src)
return dst;
}
int
int_set_min(const struct adata *list, u32 *val)
{
if (!list)
return 0;
u32 *l = (u32 *) list->data;
int len = int_set_get_size(list);
int i;
if (len < 1)
return 0;
*val = *l++;
for (i = 1; i < len; i++, l++)
if (int_set_cmp(val, l) > 0)
*val = *l;
return 1;
}
int
int_set_max(const struct adata *list, u32 *val)
{
if (!list)
return 0;
u32 *l = (u32 *) list->data;
int len = int_set_get_size(list);
int i;
if (len < 1)
return 0;
*val = *l++;
for (i = 1; i < len; i++, l++)
if (int_set_cmp(val, l) < 0)
*val = *l;
return 1;
}
static int
ec_set_cmp(const void *X, const void *Y)
@ -541,6 +583,50 @@ ec_set_sort_x(struct adata *set)
qsort(set->data, set->length / 8, 8, ec_set_cmp);
}
int
ec_set_min(const struct adata *list, u64 *val)
{
if (!list)
return 0;
u32 *l = int_set_get_data(list);
int len = int_set_get_size(list);
int i;
if (len < 1)
return 0;
u32 *res = l; l += 2;
for (i = 2; i < len; i += 2, l += 2)
if (ec_set_cmp(res, l) > 0)
res = l;
*val = ec_generic(res[0], res[1]);
return 1;
}
int
ec_set_max(const struct adata *list, u64 *val)
{
if (!list)
return 0;
u32 *l = int_set_get_data(list);
int len = int_set_get_size(list);
int i;
if (len < 1)
return 0;
u32 *res = l; l += 2;
for (i = 2; i < len; i += 2, l += 2)
if (ec_set_cmp(res, l) < 0)
res = l;
*val = ec_generic(res[0], res[1]);
return 1;
}
static int
lc_set_cmp(const void *X, const void *Y)
@ -563,3 +649,95 @@ lc_set_sort(struct linpool *pool, const struct adata *src)
qsort(dst->data, dst->length / LCOMM_LENGTH, LCOMM_LENGTH, lc_set_cmp);
return dst;
}
int
lc_set_min(const struct adata *list, lcomm *val)
{
if (!list)
return 0;
u32 *l = int_set_get_data(list);
int len = int_set_get_size(list);
int i;
if (len < 1)
return 0;
u32 *res = l; l += 3;
for (i = 3; i < len; i += 3, l += 3)
if (lc_set_cmp(res, l) > 0)
res = l;
*val = (lcomm) { res[0], res[1], res[2] };
return 1;
}
int
lc_set_max(const struct adata *list, lcomm *val)
{
if (!list)
return 0;
u32 *l = int_set_get_data(list);
int len = int_set_get_size(list);
int i;
if (len < 1)
return 0;
u32 *res = l; l += 3;
for (i = 3; i < len; i += 3, l += 3)
if (lc_set_cmp(res, l) < 0)
res = l;
*val = (lcomm) { res[0], res[1], res[2] };
return 1;
}
int
int_set_walk(const struct adata *list, uint *pos, uint *val)
{
if (!list)
return 0;
if (*pos >= (uint) int_set_get_size(list))
return 0;
u32 *res = int_set_get_data(list) + *pos;
*val = *res;
*pos += 1;
return 1;
}
int
ec_set_walk(const struct adata *list, uint *pos, u64 *val)
{
if (!list)
return 0;
if (*pos >= (uint) int_set_get_size(list))
return 0;
u32 *res = int_set_get_data(list) + *pos;
*val = ec_generic(res[0], res[1]);
*pos += 2;
return 1;
}
int
lc_set_walk(const struct adata *list, uint *pos, lcomm *val)
{
if (!list)
return 0;
if (*pos >= (uint) int_set_get_size(list))
return 0;
u32 *res = int_set_get_data(list) + *pos;
*val = (lcomm) { res[0], res[1], res[2] };
*pos += 3;
return 1;
}

View File

@ -10,8 +10,8 @@
#include "test/bt-utils.h"
#include "lib/net.h"
#include "nest/route.h"
#include "nest/attrs.h"
#include "nest/rt.h"
#include "lib/attrs.h"
#include "lib/resource.h"
#define SET_SIZE 10
@ -25,8 +25,6 @@ static byte buf[BUFFER_SIZE] = {};
#define SET_SIZE_FOR_FORMAT_OUTPUT 10
struct linpool *lp;
enum set_type
{
SET_TYPE_INT,
@ -38,24 +36,23 @@ generate_set_sequence(enum set_type type, int len)
{
struct adata empty_as_path = {};
set_sequence = set_sequence_same = set_sequence_higher = set_random = &empty_as_path;
lp = lp_new_default(&root_pool);
int i;
for (i = 0; i < len; i++)
{
if (type == SET_TYPE_INT)
{
set_sequence = int_set_add(lp, set_sequence, i);
set_sequence_same = int_set_add(lp, set_sequence_same, i);
set_sequence_higher = int_set_add(lp, set_sequence_higher, i + SET_SIZE);
set_random = int_set_add(lp, set_random, bt_random());
set_sequence = int_set_add(tmp_linpool, set_sequence, i);
set_sequence_same = int_set_add(tmp_linpool, set_sequence_same, i);
set_sequence_higher = int_set_add(tmp_linpool, set_sequence_higher, i + SET_SIZE);
set_random = int_set_add(tmp_linpool, set_random, bt_random());
}
else if (type == SET_TYPE_EC)
{
set_sequence = ec_set_add(lp, set_sequence, i);
set_sequence_same = ec_set_add(lp, set_sequence_same, i);
set_sequence_higher = ec_set_add(lp, set_sequence_higher, i + SET_SIZE);
set_random = ec_set_add(lp, set_random, (bt_random() << 32 | bt_random()));
set_sequence = ec_set_add(tmp_linpool, set_sequence, i);
set_sequence_same = ec_set_add(tmp_linpool, set_sequence_same, i);
set_sequence_higher = ec_set_add(tmp_linpool, set_sequence_higher, i + SET_SIZE);
set_random = ec_set_add(tmp_linpool, set_random, (bt_random() << 32 | bt_random()));
}
else
bt_abort_msg("This should be unreachable");
@ -71,7 +68,6 @@ t_set_int_contains(void)
{
int i;
resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE);
bt_assert(int_set_get_size(set_sequence) == SET_SIZE);
@ -85,33 +81,29 @@ t_set_int_contains(void)
for (i = 0; i < SET_SIZE; i++)
bt_assert_msg(data[i] == i, "(data[i] = %d) == i = %d)", data[i], i);
rfree(lp);
return 1;
}
static int
t_set_int_union(void)
{
resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE);
const struct adata *set_union;
set_union = int_set_union(lp, set_sequence, set_sequence_same);
set_union = int_set_union(tmp_linpool, set_sequence, set_sequence_same);
bt_assert(int_set_get_size(set_union) == SET_SIZE);
bt_assert(int_set_format(set_union, 0, 2, buf, BUFFER_SIZE) == 0);
set_union = int_set_union(lp, set_sequence, set_sequence_higher);
set_union = int_set_union(tmp_linpool, set_sequence, set_sequence_higher);
bt_assert_msg(int_set_get_size(set_union) == SET_SIZE*2, "int_set_get_size(set_union) %d, SET_SIZE*2 %d", int_set_get_size(set_union), SET_SIZE*2);
bt_assert(int_set_format(set_union, 0, 2, buf, BUFFER_SIZE) == 0);
rfree(lp);
return 1;
}
static int
t_set_int_format(void)
{
resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE_FOR_FORMAT_OUTPUT);
bt_assert(int_set_format(set_sequence, 0, 0, buf, BUFFER_SIZE) == 0);
@ -125,21 +117,19 @@ t_set_int_format(void)
bt_assert(int_set_format(set_sequence, 1, 0, buf, BUFFER_SIZE) == 0);
bt_assert(strcmp(buf, "(0,0) (0,1) (0,2) (0,3) (0,4) (0,5) (0,6) (0,7) (0,8) (0,9)") == 0);
rfree(lp);
return 1;
}
static int
t_set_int_delete(void)
{
resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE);
const struct adata *deleting_sequence = set_sequence;
u32 i;
for (i = 0; i < SET_SIZE; i++)
{
deleting_sequence = int_set_del(lp, deleting_sequence, i);
deleting_sequence = int_set_del(tmp_linpool, deleting_sequence, i);
bt_assert_msg(int_set_get_size(deleting_sequence) == (int) (SET_SIZE-1-i),
"int_set_get_size(deleting_sequence) %d == SET_SIZE-1-i %d",
int_set_get_size(deleting_sequence),
@ -160,7 +150,6 @@ t_set_ec_contains(void)
{
u32 i;
resource_init();
generate_set_sequence(SET_TYPE_EC, SET_SIZE);
bt_assert(ec_set_get_size(set_sequence) == SET_SIZE);
@ -174,62 +163,54 @@ t_set_ec_contains(void)
// for (i = 0; i < SET_SIZE; i++)
// bt_assert_msg(data[i] == (SET_SIZE-1-i), "(data[i] = %d) == ((SET_SIZE-1-i) = %d)", data[i], SET_SIZE-1-i);
rfree(lp);
return 1;
}
static int
t_set_ec_union(void)
{
resource_init();
generate_set_sequence(SET_TYPE_EC, SET_SIZE);
const struct adata *set_union;
set_union = ec_set_union(lp, set_sequence, set_sequence_same);
set_union = ec_set_union(tmp_linpool, set_sequence, set_sequence_same);
bt_assert(ec_set_get_size(set_union) == SET_SIZE);
bt_assert(ec_set_format(set_union, 0, buf, BUFFER_SIZE) == 0);
set_union = ec_set_union(lp, set_sequence, set_sequence_higher);
set_union = ec_set_union(tmp_linpool, set_sequence, set_sequence_higher);
bt_assert_msg(ec_set_get_size(set_union) == SET_SIZE*2, "ec_set_get_size(set_union) %d, SET_SIZE*2 %d", ec_set_get_size(set_union), SET_SIZE*2);
bt_assert(ec_set_format(set_union, 0, buf, BUFFER_SIZE) == 0);
rfree(lp);
return 1;
}
static int
t_set_ec_format(void)
{
resource_init();
const struct adata empty_as_path = {};
set_sequence = set_sequence_same = set_sequence_higher = set_random = &empty_as_path;
lp = lp_new_default(&root_pool);
u64 i = 0;
set_sequence = ec_set_add(lp, set_sequence, i);
set_sequence = ec_set_add(tmp_linpool, set_sequence, i);
for (i = 1; i < SET_SIZE_FOR_FORMAT_OUTPUT; i++)
set_sequence = ec_set_add(lp, set_sequence, i + ((i%2) ? ((u64)EC_RO << 48) : ((u64)EC_RT << 48)));
set_sequence = ec_set_add(tmp_linpool, set_sequence, i + ((i%2) ? ((u64)EC_RO << 48) : ((u64)EC_RT << 48)));
bt_assert(ec_set_format(set_sequence, 0, buf, BUFFER_SIZE) == 0);
bt_assert_msg(strcmp(buf, "(unknown 0x0, 0, 0) (ro, 0, 1) (rt, 0, 2) (ro, 0, 3) (rt, 0, 4) (ro, 0, 5) (rt, 0, 6) (ro, 0, 7) (rt, 0, 8) (ro, 0, 9)") == 0,
"ec_set_format() returns '%s'", buf);
rfree(lp);
return 1;
}
static int
t_set_ec_delete(void)
{
resource_init();
generate_set_sequence(SET_TYPE_EC, SET_SIZE);
const struct adata *deleting_sequence = set_sequence;
u32 i;
for (i = 0; i < SET_SIZE; i++)
{
deleting_sequence = ec_set_del(lp, deleting_sequence, i);
deleting_sequence = ec_set_del(tmp_linpool, deleting_sequence, i);
bt_assert_msg(ec_set_get_size(deleting_sequence) == (int) (SET_SIZE-1-i),
"ec_set_get_size(deleting_sequence) %d == SET_SIZE-1-i %d",
ec_set_get_size(deleting_sequence), SET_SIZE-1-i);
@ -241,13 +222,10 @@ t_set_ec_delete(void)
}
void resource_sys_init(void);
int
main(int argc, char *argv[])
{
bt_init(argc, argv);
resource_sys_init();
bt_test_suite(t_set_int_contains, "Testing sets of integers: contains, get_data");
bt_test_suite(t_set_int_format, "Testing sets of integers: format");

View File

@ -11,7 +11,39 @@
#include <stdint.h>
#include "lib/unaligned.h"
#include "nest/route.h"
typedef struct adata {
uint length; /* Length of data */
byte data[0];
} adata;
#define ADATA_SIZE(s) BIRD_CPU_ALIGN(sizeof(struct adata) + s)
extern const adata null_adata; /* adata of length 0 */
static inline struct adata *
lp_alloc_adata(struct linpool *pool, uint len)
{
struct adata *ad = lp_alloc(pool, sizeof(struct adata) + len);
ad->length = len;
return ad;
}
static inline struct adata *
lp_store_adata(struct linpool *pool, const void *buf, uint len)
{
struct adata *ad = lp_alloc_adata(pool, len);
memcpy(ad->data, buf, len);
return ad;
}
#define tmp_alloc_adata(len) lp_alloc_adata(tmp_linpool, len)
#define tmp_store_adata(buf, len) lp_store_adata(tmp_linpool, buf, len)
#define tmp_copy_adata(ad) tmp_store_adata((ad)->data, (ad)->length)
static inline int adata_same(const struct adata *a, const struct adata *b)
{ return (!a && !b) || (a->length == b->length && !memcmp(a->data, b->data, a->length)); }
/* a-path.c */
@ -28,6 +60,7 @@
* to 16bit slot (like in 16bit AS_PATH). See RFC 4893 for details
*/
struct f_val;
struct f_tree;
int as_path_valid(byte *data, uint len, int bs, int sets, int confed, char *err, uint elen);
@ -49,7 +82,8 @@ int as_path_get_last(const struct adata *path, u32 *last_as);
u32 as_path_get_last_nonaggregated(const struct adata *path);
int as_path_contains(const struct adata *path, u32 as, int min);
int as_path_match_set(const struct adata *path, const struct f_tree *set);
const struct adata *as_path_filter(struct linpool *pool, const struct adata *path, const struct f_tree *set, u32 key, int pos);
const struct adata *as_path_filter(struct linpool *pool, const struct adata *path, const struct f_val *set, int pos);
int as_path_walk(const struct adata *path, uint *pos, uint *val);
static inline struct adata *as_path_prepend(struct linpool *pool, const struct adata *path, u32 as)
{ return as_path_prepend2(pool, path, AS_PATH_SEQUENCE, as); }
@ -218,6 +252,15 @@ struct adata *ec_set_del_nontrans(struct linpool *pool, const struct adata *set)
struct adata *int_set_sort(struct linpool *pool, const struct adata *src);
struct adata *ec_set_sort(struct linpool *pool, const struct adata *src);
struct adata *lc_set_sort(struct linpool *pool, const struct adata *src);
int int_set_min(const struct adata *list, u32 *val);
int ec_set_min(const struct adata *list, u64 *val);
int lc_set_min(const struct adata *list, lcomm *val);
int int_set_max(const struct adata *list, u32 *val);
int ec_set_max(const struct adata *list, u64 *val);
int lc_set_max(const struct adata *list, lcomm *val);
int int_set_walk(const struct adata *list, uint *pos, u32 *val);
int ec_set_walk(const struct adata *list, uint *pos, u64 *val);
int lc_set_walk(const struct adata *list, uint *pos, lcomm *val);
void ec_set_sort_x(struct adata *set); /* Sort in place */

View File

@ -14,12 +14,24 @@
/* Ugly structure offset handling macros */
struct align_probe { char x; long int y; };
#define OFFSETOF(s, i) ((size_t) &((s *)0)->i)
#define SKIP_BACK(s, i, p) ({ s *_ptr = ((s *)((char *)p - OFFSETOF(s, i))); ASSERT_DIE(&_ptr->i == p); _ptr; })
#define BIRD_ALIGN(s, a) (((s)+a-1)&~(a-1))
#define CPU_STRUCT_ALIGN (sizeof(struct align_probe))
#define CPU_STRUCT_ALIGN (MAX_(_Alignof(void*), _Alignof(u64)))
#define BIRD_CPU_ALIGN(s) BIRD_ALIGN((s), CPU_STRUCT_ALIGN)
/* Structure item alignment macros */
#define PADDING_NAME(id) _padding_##id
#define PADDING_(id, sz) u8 PADDING_NAME(id)[sz]
#if CPU_POINTER_ALIGNMENT == 4
#define PADDING(id, n32, n64) PADDING_(id, n32)
#elif CPU_POINTER_ALIGNMENT == 8
#define PADDING(id, n32, n64) PADDING_(id, n64)
#else
#error "Strange CPU pointer alignment: " CPU_POINTER_ALIGNMENT
#endif
/* Utility macros */
@ -33,6 +45,9 @@ struct align_probe { char x; long int y; };
#define MAX(a,b) MAX_(a,b)
#endif
#define ROUND_DOWN_POW2(a,b) ((a) & ~((b)-1))
#define ROUND_UP_POW2(a,b) (((a)+((b)-1)) & ~((b)-1))
#define U64(c) UINT64_C(c)
#define ABS(a) ((a)>=0 ? (a) : -(a))
#define DELTA(a,b) (((a)>=(b))?(a)-(b):(b)-(a))
@ -160,8 +175,13 @@ void debug(const char *msg, ...); /* Printf to debug output */
#if defined(LOCAL_DEBUG) || defined(GLOBAL_DEBUG)
#define DBG(x, y...) debug(x, ##y)
#define DBGL(x, y...) debug(x "\n", ##y)
#elif defined(DEBUG_TO_LOG)
#define DBG(...) do { } while (0)
#define DBGL(...) log(L_DEBUG __VA_ARGS__)
#else
#define DBG(x, y...) do { } while(0)
#define DBG(...) do { } while(0)
#define DBGL(...) do { } while (0)
#endif
#define ASSERT_DIE(x) do { if (!(x)) bug("Assertion '%s' failed at %s:%d", #x, __FILE__, __LINE__); } while(0)

View File

@ -24,7 +24,6 @@ t_bmap_set_clear_random(void)
{
struct bmap b;
resource_init();
bmap_init(&b, &root_pool, 1024);
char expected[MAX_NUM] = {};
@ -60,7 +59,6 @@ t_hmap_set_clear_random(void)
{
struct hmap b;
resource_init();
hmap_init(&b, &root_pool, 1024);
char expected[MAX_NUM] = {};
@ -119,7 +117,6 @@ t_hmap_set_clear_fill(void)
{
struct hmap b;
resource_init();
hmap_init(&b, &root_pool, 1024);
char expected[MAX_NUM] = {};

View File

@ -41,7 +41,6 @@ fill_expected_array(void)
static void
init_buffer(void)
{
resource_init();
buffer_pool = &root_pool;
BUFFER_INIT(buf, buffer_pool, MAX_NUM);
}

View File

@ -1,29 +0,0 @@
/*
* BIRD Coroutines
*
* (c) 2017 Martin Mares <mj@ucw.cz>
* (c) 2020-2021 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_CORO_H_
#define _BIRD_CORO_H_
#include "lib/resource.h"
/* A completely opaque coroutine handle. */
struct coroutine;
/* Coroutines are independent threads bound to pools.
* You request a coroutine by calling coro_run().
* It is forbidden to free a running coroutine from outside.
* The running coroutine must free itself by rfree() before returning.
*/
struct coroutine *coro_run(pool *, void (*entry)(void *), void *data);
/* Get self. */
extern _Thread_local struct coroutine *this_coro;
#endif

View File

@ -23,27 +23,91 @@
#include "nest/bird.h"
#include "lib/event.h"
#include "lib/locking.h"
#include "lib/io-loop.h"
extern _Thread_local struct coroutine *this_coro;
event_list global_event_list;
event_list global_work_list;
STATIC_ASSERT(OFFSETOF(event_list, _sentinel.next) >= OFFSETOF(event_list, _end[0]));
void
ev_init_list(event_list *el, struct birdloop *loop, const char *name)
{
el->name = name;
el->loop = loop;
atomic_store_explicit(&el->receiver, &el->_sentinel, memory_order_relaxed);
atomic_store_explicit(&el->_executor, &el->_sentinel, memory_order_relaxed);
atomic_store_explicit(&el->_sentinel.next, NULL, memory_order_relaxed);
}
/*
* The event list should work as a message passing point. Sending a message
* must be a fairly fast process with no locks and low waiting times. OTOH,
* processing messages always involves running the assigned code and the
* receiver is always a single one thread with no concurrency at all. There is
* also a postponing requirement to synchronously remove an event from a queue,
* yet we allow this only when the caller has its receiver event loop locked.
* It still means that the event may get postponed from other event in the same
* list, therefore we have to be careful.
*/
static inline int
ev_remove_from(event *e, event * _Atomic * head)
{
/* The head pointer stores where cur is pointed to from */
event * _Atomic *prev = head;
/* The current event in queue to check */
event *cur = atomic_load_explicit(prev, memory_order_acquire);
/* Pre-loaded next pointer; if NULL, this is sentinel */
event *next = atomic_load_explicit(&cur->next, memory_order_acquire);
while (next)
{
if (e == cur)
{
/* Check whether we have collided with somebody else
* adding an item to the queue. */
if (!atomic_compare_exchange_strong_explicit(
prev, &cur, next,
memory_order_acq_rel, memory_order_acquire))
{
/* This may happen only on list head */
ASSERT_DIE(prev == head);
/* Restart. The collision should never happen again. */
return ev_remove_from(e, head);
}
/* Successfully removed from the list; inactivate this event. */
atomic_store_explicit(&cur->next, NULL, memory_order_release);
return 1;
}
/* Go to the next event. */
prev = &cur->next;
cur = next;
next = atomic_load_explicit(&cur->next, memory_order_acquire);
}
return 0;
}
inline void
ev_postpone(event *e)
{
event_list *el = e->list;
if (!el)
/* Find the list to remove the event from */
event_list *sl = ev_get_list(e);
if (!sl)
return;
ASSERT_DIE(birdloop_inside(el->loop));
/* Postponing allowed only from the target loop */
ASSERT_DIE(birdloop_inside(sl->loop));
LOCK_DOMAIN(event, el->lock);
if (ev_active(e))
rem_node(&e->n);
UNLOCK_DOMAIN(event, el->lock);
/* Remove from one of these lists. */
ASSERT(ev_remove_from(e, &sl->_executor) || ev_remove_from(e, &sl->receiver));
}
static void
@ -54,7 +118,7 @@ ev_dump(resource *r)
debug("(code %p, data %p, %s)\n",
e->hook,
e->data,
e->n.next ? "scheduled" : "inactive");
atomic_load_explicit(&e->next, memory_order_relaxed) ? "scheduled" : "inactive");
}
static struct resclass ev_class = {
@ -108,47 +172,18 @@ ev_run(event *e)
inline void
ev_send(event_list *l, event *e)
{
DBG("ev_send(%p, %p)\n", l, e);
ASSERT_DIE(e->hook);
ASSERT_DIE(!e->list || (e->list == l) || (e->list->loop == l->loop));
event_list *sl = ev_get_list(e);
if (sl == l)
return;
if (sl)
bug("Queuing an already queued event to another queue is not supported.");
e->list = l;
event *next = atomic_load_explicit(&l->receiver, memory_order_acquire);
do atomic_store_explicit(&e->next, next, memory_order_relaxed);
while (!atomic_compare_exchange_strong_explicit(
&l->receiver, &next, e,
memory_order_acq_rel, memory_order_acquire));
struct event_cork *ec = e->cork;
uint ping = 0;
if (ec)
{
LOCK_DOMAIN(cork, ec->lock);
LOCK_DOMAIN(event, l->lock);
if (!enlisted(&e->n))
if (ec->count)
add_tail(&ec->events, &e->n);
else
{
add_tail(&l->events, &e->n);
ping = 1;
}
UNLOCK_DOMAIN(event, l->lock);
UNLOCK_DOMAIN(cork, ec->lock);
}
else
{
LOCK_DOMAIN(event, l->lock);
if (!enlisted(&e->n))
{
add_tail(&l->events, &e->n);
ping = 1;
}
UNLOCK_DOMAIN(event, l->lock);
}
if (ping)
birdloop_ping(l->loop);
}
@ -160,129 +195,57 @@ void io_log_event(void *hook, void *data);
*
* This function calls ev_run() for all events enqueued in the list @l.
*/
int
ev_run_list(event_list *l)
{
const _Bool legacy = LEGACY_EVENT_LIST(l);
if (legacy)
ASSERT_THE_BIRD_LOCKED;
node *n;
list tmp_list;
init_list(&tmp_list);
/* Move the event list contents to a local list to avoid executing repeatedly added events */
LOCK_DOMAIN(event, l->lock);
add_tail_list(&tmp_list, &l->events);
init_list(&l->events);
UNLOCK_DOMAIN(event, l->lock);
WALK_LIST_FIRST(n, tmp_list)
{
event *e = SKIP_BACK(event, n, n);
if (legacy)
{
/* The legacy way of event execution */
io_log_event(e->hook, e->data);
ev_postpone(e);
e->hook(e->data);
}
else
{
// io_log_event(e->hook, e->data); /* TODO: add support for event logging in other io loops */
ASSERT_DIE(e->list == l);
LOCK_DOMAIN(event, l->lock);
rem_node(&e->n);
UNLOCK_DOMAIN(event, l->lock);
e->hook(e->data);
}
}
LOCK_DOMAIN(event, l->lock);
int repeat = ! EMPTY_LIST(l->events);
UNLOCK_DOMAIN(event, l->lock);
return repeat;
}
int
ev_run_list_limited(event_list *l, uint limit)
{
ASSERT_DIE(LEGACY_EVENT_LIST(l));
ASSERT_THE_BIRD_LOCKED;
event * _Atomic *ep = &l->_executor;
node *n;
list tmp_list;
LOCK_DOMAIN(event, l->lock);
init_list(&tmp_list);
add_tail_list(&tmp_list, &l->events);
init_list(&l->events);
UNLOCK_DOMAIN(event, l->lock);
WALK_LIST_FIRST(n, tmp_list)
/* No pending events, refill the queue. */
if (atomic_load_explicit(ep, memory_order_relaxed) == &l->_sentinel)
{
event *e = SKIP_BACK(event, n, n);
/* Move the current event list aside and create a new one. */
event *received = atomic_exchange_explicit(
&l->receiver, &l->_sentinel, memory_order_acq_rel);
if (!limit)
break;
/* No event to run. */
if (received == &l->_sentinel)
return 0;
/* Setup the executor queue */
event *head = &l->_sentinel;
/* Flip the order of the events by relinking them one by one (push-pop) */
while (received != &l->_sentinel)
{
event *cur = received;
received = atomic_exchange_explicit(&cur->next, head, memory_order_relaxed);
head = cur;
}
/* Store the executor queue to its designated place */
atomic_store_explicit(ep, head, memory_order_relaxed);
}
/* Run the events in order. */
event *e;
while ((e = atomic_load_explicit(ep, memory_order_relaxed)) != &l->_sentinel)
{
/* Check limit */
if (!--limit)
return 1;
/* This is ugly hack, we want to log just events executed from the main I/O loop */
if ((l == &global_event_list) || (l == &global_work_list))
io_log_event(e->hook, e->data);
ev_run(e);
limit--;
/* Inactivate the event */
atomic_store_explicit(ep, atomic_load_explicit(&e->next, memory_order_relaxed), memory_order_relaxed);
atomic_store_explicit(&e->next, NULL, memory_order_relaxed);
/* Run the event */
e->hook(e->data);
tmp_flush();
}
LOCK_DOMAIN(event, l->lock);
if (!EMPTY_LIST(tmp_list))
{
/* Attach new items after the unprocessed old items */
add_tail_list(&tmp_list, &l->events);
init_list(&l->events);
add_tail_list(&l->events, &tmp_list);
}
int repeat = ! EMPTY_LIST(l->events);
UNLOCK_DOMAIN(event, l->lock);
return repeat;
}
void ev_cork(struct event_cork *ec)
{
LOCK_DOMAIN(cork, ec->lock);
ec->count++;
UNLOCK_DOMAIN(cork, ec->lock);
}
void ev_uncork(struct event_cork *ec)
{
LOCK_DOMAIN(cork, ec->lock);
if (--ec->count)
{
UNLOCK_DOMAIN(cork, ec->lock);
return;
}
node *n;
WALK_LIST_FIRST(n, ec->events)
{
event *e = SKIP_BACK(event, n, n);
event_list *el = e->list;
rem_node(&e->n);
LOCK_DOMAIN(event, el->lock);
add_tail(&el->events, &e->n);
UNLOCK_DOMAIN(event, el->lock);
birdloop_ping(el->loop);
}
UNLOCK_DOMAIN(cork, ec->lock);
birdloop_ping(&main_birdloop);
return atomic_load_explicit(&l->receiver, memory_order_relaxed) != &l->_sentinel;
}

View File

@ -14,88 +14,65 @@
#include <stdatomic.h>
DEFINE_DOMAIN(event);
DEFINE_DOMAIN(cork);
struct birdloop;
typedef struct event {
resource r;
void (*hook)(void *);
void *data;
node n; /* Internal link */
struct event_list *list; /* List where this event is put in */
struct event_cork *cork; /* Event execution limiter */
node cork_node;
struct event * _Atomic next;
} event;
typedef struct event_list {
list events;
pool *pool;
struct birdloop *loop;
DOMAIN(event) lock;
typedef union event_list {
struct {
event * _Atomic receiver; /* Event receive list */
event * _Atomic _executor; /* Event execute list */
const char *name;
struct birdloop *loop; /* The executor loop */
char _end[0];
};
event _sentinel; /* Sentinel node to actively detect list end */
} event_list;
struct event_cork {
DOMAIN(cork) lock;
u32 count;
list events;
};
extern event_list global_event_list;
extern event_list global_work_list;
event *ev_new(pool *);
void ev_run(event *);
static inline void ev_init_list(event_list *el, struct birdloop *loop, const char *name)
{
init_list(&el->events);
el->loop = loop;
el->lock = DOMAIN_NEW(event, name);
}
static inline void ev_init_cork(struct event_cork *ec, const char *name)
{
init_list(&ec->events);
ec->lock = DOMAIN_NEW(cork, name);
ec->count = 0;
};
void ev_send(event_list *, event *);
void ev_init_list(event_list *, struct birdloop *loop, const char *name);
void ev_enqueue(event_list *, event *);
#define ev_send ev_enqueue
#define ev_send_loop(l, e) ev_send(birdloop_event_list((l)), (e))
#define ev_schedule(e) ({ ASSERT_THE_BIRD_LOCKED; if (!ev_active((e))) ev_send(&global_event_list, (e)); })
#define ev_schedule_work(e) ({ ASSERT_THE_BIRD_LOCKED; if (!ev_active((e))) ev_send(&global_work_list, (e)); })
void ev_postpone(event *);
int ev_run_list(event_list *);
int ev_run_list_limited(event_list *, uint);
#define ev_run_list(l) ev_run_list_limited((l), ~0)
#define LEGACY_EVENT_LIST(l) (((l) == &global_event_list) || ((l) == &global_work_list))
void ev_cork(struct event_cork *);
void ev_uncork(struct event_cork *);
static inline u32 ev_corked(struct event_cork *ec)
{
if (!ec)
return 0;
LOCK_DOMAIN(cork, ec->lock);
u32 out = ec->count;
UNLOCK_DOMAIN(cork, ec->lock);
return out;
}
_Bool birdloop_inside(struct birdloop *loop);
static inline int
ev_active(event *e)
{
if (e->list == NULL)
return 0;
return atomic_load_explicit(&e->next, memory_order_relaxed) != NULL;
}
ASSERT_DIE(birdloop_inside(e->list->loop));
return enlisted(&e->n);
static inline event_list *
ev_get_list(event *e)
{
/* We are looking for the sentinel node at the list end.
* After this, we have s->next == NULL */
event *s = e;
for (event *sn; sn = atomic_load_explicit(&s->next, memory_order_acquire); s = sn)
;
/* No sentinel, no list. */
if (s == e)
return NULL;
else
return SKIP_BACK(event_list, _sentinel, s);
}
static inline event*

View File

@ -15,7 +15,7 @@
#include "nest/locks.h"
#include "sysdep/unix/unix.h"
#include "nest/iface.h"
#include "nest/route.h"
#include "nest/rt.h"
#define MAX_NUM 4
@ -48,19 +48,14 @@ init_event_check_points(void)
event_check_points[i] = 0;
}
void resource_sys_init(void);
static int
t_ev_run_list(void)
{
int i;
resource_sys_init();
resource_init();
olock_init();
birdloop_init();
io_init();
rt_init();
io_init();
if_init();
// roa_init();
config_init();
@ -85,9 +80,7 @@ main(int argc, char *argv[])
{
bt_init(argc, argv);
the_bird_lock();
bt_test_suite(t_ev_run_list, "Schedule and run 3 events in right order.");
the_bird_unlock();
return bt_exit_value();
}

119
lib/fib.h Normal file
View File

@ -0,0 +1,119 @@
/*
* BIRD Internet Routing Daemon -- Network prefix storage
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
* (c) 2022 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_LIB_FIB_H_
#define _BIRD_LIB_FIB_H_
/*
* BIRD FIBs are generic data structure for storing network prefixes.
* Also used for the master routing table. Currently implemented as
* a hash table.
*
* Available operations:
* - insertion of new entry
* - deletion of entry
* - searching for entry by network prefix
* - asynchronous retrieval of fib contents
*/
struct fib;
struct fib_node {
struct fib_node *next; /* Next in hash chain */
struct fib_iterator *readers; /* List of readers of this node */
net_addr addr[0];
};
struct fib_iterator { /* See lib/slists.h for an explanation */
struct fib_iterator *prev, *next; /* Must be synced with struct fib_node! */
byte efef; /* 0xff to distinguish between iterator and node */
byte pad[3];
struct fib_node *node; /* Or NULL if freshly merged */
uint hash;
};
typedef void (*fib_init_fn)(struct fib *, void *);
struct fib {
pool *fib_pool; /* Pool holding all our data */
slab *fib_slab; /* Slab holding all fib nodes */
struct fib_node **hash_table; /* Node hash table */
uint hash_size; /* Number of hash table entries (a power of two) */
uint hash_order; /* Binary logarithm of hash_size */
uint hash_shift; /* 32 - hash_order */
uint addr_type; /* Type of address data stored in fib (NET_*) */
uint node_size; /* FIB node size, 0 for nonuniform */
uint node_offset; /* Offset of fib_node struct inside of user data */
uint entries; /* Number of entries */
uint entries_min, entries_max; /* Entry count limits (else start rehashing) */
fib_init_fn init; /* Constructor */
};
static inline void * fib_node_to_user(struct fib *f, struct fib_node *e)
{ return e ? (void *) ((char *) e - f->node_offset) : NULL; }
static inline struct fib_node * fib_user_to_node(struct fib *f, void *e)
{ return e ? (void *) ((char *) e + f->node_offset) : NULL; }
void fib_init(struct fib *f, pool *p, uint addr_type, uint node_size, uint node_offset, uint hash_order, fib_init_fn init);
void *fib_find(struct fib *, const net_addr *); /* Find or return NULL if doesn't exist */
void *fib_get_chain(struct fib *f, const net_addr *a); /* Find first node in linked list from hash table */
void *fib_get(struct fib *, const net_addr *); /* Find or create new if nonexistent */
void *fib_route(struct fib *, const net_addr *); /* Longest-match routing lookup */
void fib_delete(struct fib *, void *); /* Remove fib entry */
void fib_free(struct fib *); /* Destroy the fib */
void fib_check(struct fib *); /* Consistency check for debugging */
void fit_init(struct fib_iterator *, struct fib *); /* Internal functions, don't call */
struct fib_node *fit_get(struct fib *, struct fib_iterator *);
void fit_put(struct fib_iterator *, struct fib_node *);
void fit_put_next(struct fib *f, struct fib_iterator *i, struct fib_node *n, uint hpos);
void fit_put_end(struct fib_iterator *i);
void fit_copy(struct fib *f, struct fib_iterator *dst, struct fib_iterator *src);
#define FIB_WALK(fib, type, z) do { \
struct fib_node *fn_, **ff_ = (fib)->hash_table; \
uint count_ = (fib)->hash_size; \
type *z; \
while (count_--) \
for (fn_ = *ff_++; z = fib_node_to_user(fib, fn_); fn_=fn_->next)
#define FIB_WALK_END } while (0)
#define FIB_ITERATE_INIT(it, fib) fit_init(it, fib)
#define FIB_ITERATE_START(fib, it, type, z) do { \
struct fib_node *fn_ = fit_get(fib, it); \
uint count_ = (fib)->hash_size; \
uint hpos_ = (it)->hash; \
type *z; \
for(;;) { \
if (!fn_) \
{ \
if (++hpos_ >= count_) \
break; \
fn_ = (fib)->hash_table[hpos_]; \
continue; \
} \
z = fib_node_to_user(fib, fn_);
#define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
#define FIB_ITERATE_PUT(it) fit_put(it, fn_)
#define FIB_ITERATE_PUT_NEXT(it, fib) fit_put_next(fib, it, fn_, hpos_)
#define FIB_ITERATE_PUT_END(it) fit_put_end(it)
#define FIB_ITERATE_UNLINK(it, fib) fit_get(fib, it)
#define FIB_ITERATE_COPY(dst, src, fib) fit_copy(fib, dst, src)
#endif

View File

@ -446,10 +446,7 @@ t_validation6(void)
static int
t_builder4(void)
{
resource_init();
struct flow_builder *fb = flow_builder_init(&root_pool);
linpool *lp = lp_new_default(&root_pool);
/* Expectation */
@ -492,7 +489,7 @@ t_builder4(void)
flow_builder_set_type(fb, FLOW_TYPE_TCP_FLAGS);
flow_builder_add_op_val(fb, 0, 0x55);
net_addr_flow4 *res = flow_builder4_finalize(fb, lp);
net_addr_flow4 *res = flow_builder4_finalize(fb, tmp_linpool);
bt_assert(memcmp(res, expect, expect->length) == 0);
@ -529,8 +526,6 @@ t_builder6(void)
{
net_addr_ip6 ip;
resource_init();
linpool *lp = lp_new_default(&root_pool);
struct flow_builder *fb = flow_builder_init(&root_pool);
fb->ipv6 = 1;
@ -574,7 +569,7 @@ t_builder6(void)
flow_builder_set_type(fb, FLOW_TYPE_LABEL);
flow_builder_add_op_val(fb, 0, 0x55);
net_addr_flow6 *res = flow_builder6_finalize(fb, lp);
net_addr_flow6 *res = flow_builder6_finalize(fb, tmp_linpool);
bt_assert(memcmp(res, expect, expect->length) == 0);
/* Reverse order */
@ -601,7 +596,7 @@ t_builder6(void)
flow_builder_set_type(fb, FLOW_TYPE_DST_PREFIX);
flow_builder6_add_pfx(fb, &ip, 61);
res = flow_builder6_finalize(fb, lp);
res = flow_builder6_finalize(fb, tmp_linpool);
bt_assert(memcmp(res, expect, expect->length) == 0);
return 1;
@ -666,13 +661,10 @@ t_formatting6(void)
return 1;
}
void resource_sys_init(void);
int
main(int argc, char *argv[])
{
bt_init(argc, argv);
resource_sys_init();
bt_test_suite(t_read_length, "Testing get NLRI length");
bt_test_suite(t_write_length, "Testing set NLRI length");

View File

@ -10,7 +10,7 @@
#ifndef _BIRD_HASH_H_
#define _BIRD_HASH_H_
#define HASH(type) struct { type **data; uint count, order; }
#define HASH(type) struct { type **data; uint count; u16 iterators; u8 order; u8 down_requested:1; }
#define HASH_TYPE(v) typeof(** (v).data)
#define HASH_SIZE(v) (1U << (v).order)
@ -125,19 +125,25 @@
#define HASH_MAY_STEP_DOWN_(v,pool,rehash_fn,args) \
({ \
if (((v).count < (HASH_SIZE(v) REHASH_LO_MARK(args))) && \
if ((v).iterators) \
(v).down_requested = 1; \
else if (((v).count < (HASH_SIZE(v) REHASH_LO_MARK(args))) && \
((v).order > (REHASH_LO_BOUND(args)))) \
rehash_fn(&(v), pool, -(REHASH_LO_STEP(args))); \
})
#define HASH_MAY_RESIZE_DOWN_(v,pool,rehash_fn,args) \
({ \
if ((v).iterators) \
(v).down_requested = 1; \
else { \
uint _o = (v).order; \
while (((v).count < ((1U << _o) REHASH_LO_MARK(args))) && \
(_o > (REHASH_LO_BOUND(args)))) \
_o -= (REHASH_LO_STEP(args)); \
if (_o < (v).order) \
rehash_fn(&(v), pool, _o - (v).order); \
} \
})
@ -195,6 +201,20 @@
#define HASH_WALK_FILTER_END } while (0)
#define HASH_WALK_ITER(v, id, n, iter) \
do { \
uint _hash_walk_iter_put = 0; \
uint _shift = 32 - (v).order; \
for ( ; !_hash_walk_iter_put; (iter) += (1U << _shift)) { \
_hash_walk_iter_put = ((iter) + (1U << _shift) == 0); \
for (HASH_TYPE(v) *n = (v).data[(iter) >> _shift]; n; n = id##_NEXT((n)))\
if (HASH_FN(v, id, id##_KEY(n)) >= ((iter) >> _shift)) \
#define HASH_WALK_ITER_PUT (_hash_walk_iter_put = 1)
#define HASH_WALK_ITER_END } } while (0)
static inline void
mem_hash_init(u64 *h)
{

View File

@ -61,7 +61,6 @@ dump_nodes(void)
static void
init_hash_(uint order)
{
resource_init();
my_pool = rp_new(&root_pool, "Test pool");
HASH_INIT(hash, my_pool, order);
@ -286,6 +285,46 @@ t_walk_filter(void)
return 1;
}
static int
t_walk_iter(void)
{
init_hash();
fill_hash();
u32 hit = 0;
u32 prev_hash = ~0;
for (uint cnt = 0; cnt < MAX_NUM; )
{
u32 last_hash = ~0;
// printf("PUT!\n");
HASH_WALK_ITER(hash, TEST, n, hit)
{
cnt++;
u32 cur_hash = HASH_FN(hash, TEST, n->key);
/*
printf("C%08x L%08x P%08x K%08x H%08x N%p S%d I%ld\n",
cur_hash, last_hash, prev_hash, n->key, hit, n, _shift, n - &nodes[0]);
*/
if (last_hash == ~0U)
{
if (prev_hash != ~0U)
bt_assert(prev_hash < cur_hash);
last_hash = prev_hash = cur_hash;
}
else
bt_assert(last_hash == cur_hash);
if (cnt < MAX_NUM)
HASH_WALK_ITER_PUT;
}
HASH_WALK_ITER_END;
}
return 1;
}
int
main(int argc, char *argv[])
{
@ -300,6 +339,7 @@ main(int argc, char *argv[])
bt_test_suite(t_walk_delsafe_remove, "HASH_WALK_DELSAFE and HASH_REMOVE");
bt_test_suite(t_walk_delsafe_remove2, "HASH_WALK_DELSAFE and HASH_REMOVE2. HASH_REMOVE2 is HASH_REMOVE and smart auto-resize function");
bt_test_suite(t_walk_filter, "HASH_WALK_FILTER");
bt_test_suite(t_walk_iter, "HASH_WALK_ITER");
return bt_exit_value();
}

View File

@ -14,12 +14,12 @@
#include "lib/event.h"
#include "lib/socket.h"
extern struct birdloop main_birdloop;
void sk_start(sock *s);
void sk_stop(sock *s);
void sk_reloop(sock *s, struct birdloop *loop);
extern struct birdloop main_birdloop;
/* Start a new birdloop owned by given pool and domain */
struct birdloop *birdloop_new(pool *p, uint order, const char *name);
@ -51,4 +51,8 @@ void birdloop_unlink(struct birdloop *loop);
void birdloop_ping(struct birdloop *loop);
void birdloop_init(void);
/* Yield for a little while. Use only in special cases. */
void birdloop_yield(void);
#endif /* _BIRD_IO_LOOP_H_ */

View File

@ -85,25 +85,29 @@ ip4_classify(ip4_addr ad)
u32 a = _I(ad);
u32 b = a >> 24U;
if (b && b <= 0xdf)
if (b < 0xe0)
{
if (b == 0x7f)
if (b == 0x00) /* 0.0.0.0/8 This network */
return IADDR_INVALID;
if (b == 0x7f) /* 127.0.0.0/8 Loopback address */
return IADDR_HOST | SCOPE_HOST;
else if ((b == 0x0a) ||
((a & 0xffff0000) == 0xc0a80000) ||
((a & 0xfff00000) == 0xac100000))
if ((b == 0x0a) || /* 10.0.0.0/8 Private range */
((a & 0xffff0000) == 0xc0a80000) || /* 192.168.0.0/16 Private range */
((a & 0xfff00000) == 0xac100000)) /* 172.16.0.0/12 Private range */
return IADDR_HOST | SCOPE_SITE;
else
return IADDR_HOST | SCOPE_UNIVERSE;
}
if (b >= 0xe0 && b <= 0xef)
if (b < 0xf0) /* 224.0.0.0/4 Multicast address */
return IADDR_MULTICAST | SCOPE_UNIVERSE;
if (a == 0xffffffff)
if (a == 0xffffffff) /* 255.255.255.255 Broadcast address */
return IADDR_BROADCAST | SCOPE_LINK;
return IADDR_INVALID;
return IADDR_HOST | SCOPE_SITE; /* 240.0.0.0/4 Reserved / private */
}
int

View File

@ -279,11 +279,35 @@ static inline uint ip6_pxlen(ip6_addr a, ip6_addr b)
return 32 * i + 31 - u32_log2(a.addr[i] ^ b.addr[i]);
}
static inline int ip4_prefix_equal(ip4_addr a, ip4_addr b, uint n)
{
return (_I(a) ^ _I(b)) < ((u64) 1 << (32 - n));
}
static inline int ip6_prefix_equal(ip6_addr a, ip6_addr b, uint n)
{
uint n0 = n / 32;
uint n1 = n % 32;
return
((n0 <= 0) || (_I0(a) == _I0(b))) &&
((n0 <= 1) || (_I1(a) == _I1(b))) &&
((n0 <= 2) || (_I2(a) == _I2(b))) &&
((n0 <= 3) || (_I3(a) == _I3(b))) &&
(!n1 || ((a.addr[n0] ^ b.addr[n0]) < (1u << (32 - n1))));
}
static inline u32 ip4_getbit(ip4_addr a, uint pos)
{ return _I(a) & (0x80000000 >> pos); }
{ return (_I(a) >> (31 - pos)) & 1; }
static inline u32 ip4_getbits(ip4_addr a, uint pos, uint n)
{ return (_I(a) >> ((32 - n) - pos)) & ((1u << n) - 1); }
static inline u32 ip6_getbit(ip6_addr a, uint pos)
{ return a.addr[pos / 32] & (0x80000000 >> (pos % 32)); }
{ return (a.addr[pos / 32] >> (31 - (pos % 32))) & 0x1; }
static inline u32 ip6_getbits(ip6_addr a, uint pos, uint n)
{ return (a.addr[pos / 32] >> ((32 - n) - (pos % 32))) & ((1u << n) - 1); }
static inline u32 ip4_setbit(ip4_addr *a, uint pos)
{ return _I(*a) |= (0x80000000 >> pos); }
@ -297,6 +321,13 @@ static inline u32 ip4_clrbit(ip4_addr *a, uint pos)
static inline u32 ip6_clrbit(ip6_addr *a, uint pos)
{ return a->addr[pos / 32] &= ~(0x80000000 >> (pos % 32)); }
static inline ip4_addr ip4_setbits(ip4_addr a, uint pos, uint val)
{ _I(a) |= val << (31 - pos); return a; }
static inline ip6_addr ip6_setbits(ip6_addr a, uint pos, uint val)
{ a.addr[pos / 32] |= val << (31 - pos % 32); return a; }
static inline ip4_addr ip4_opposite_m1(ip4_addr a)
{ return _MI4(_I(a) ^ 1); }
@ -331,11 +362,7 @@ static inline ip6_addr ip6_hton(ip6_addr a)
static inline ip6_addr ip6_ntoh(ip6_addr a)
{ return _MI6(ntohl(_I0(a)), ntohl(_I1(a)), ntohl(_I2(a)), ntohl(_I3(a))); }
#define MPLS_MAX_LABEL_STACK 8
typedef struct mpls_label_stack {
uint len;
u32 stack[MPLS_MAX_LABEL_STACK];
} mpls_label_stack;
#define MPLS_MAX_LABEL_STACK 16
static inline int
mpls_get(const char *buf, int buflen, u32 *stack)

View File

@ -167,6 +167,70 @@ t_ip6_ntop(void)
return bt_assert_batch(test_vectors, test_ipa_ntop, bt_fmt_ipa, bt_fmt_str);
}
static int
t_ip4_prefix_equal(void)
{
bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x1234ffff), 16));
bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x1234ffff), 17));
bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345000), 21));
bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345000), 22));
bt_assert( ip4_prefix_equal(ip4_from_u32(0x00000000), ip4_from_u32(0xffffffff), 0));
bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345678), 0));
bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345678), 32));
bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345679), 32));
bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x92345678), 32));
return 1;
}
static int
t_ip6_prefix_equal(void)
{
bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
ip6_build(0x20010db8, 0x1234ffff, 0xfefefefe, 0xdcdcdcdc),
48));
bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
ip6_build(0x20010db8, 0x1234ffff, 0xfefefefe, 0xdcdcdcdc),
49));
bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
ip6_build(0x20020db8, 0x12345678, 0xfefefefe, 0xdcdcdcdc),
48));
bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
ip6_build(0x20010db8, 0x12345678, 0xfefefefe, 0xdcdcdcdc),
64));
bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
ip6_build(0x20010db8, 0x1234567e, 0xfefefefe, 0xdcdcdcdc),
64));
bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20002020),
ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
106));
bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20002020),
ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
107));
bt_assert( ip6_prefix_equal(ip6_build(0xfeef0db8, 0x87654321, 0x10101010, 0x20202020),
ip6_build(0x20010db8, 0x12345678, 0xfefefefe, 0xdcdcdcdc),
0));
bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
128));
bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020),
ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202021),
128));
return 1;
}
int
main(int argc, char *argv[])
{
@ -176,6 +240,8 @@ main(int argc, char *argv[])
bt_test_suite(t_ip6_pton, "Converting IPv6 string to ip6_addr struct");
bt_test_suite(t_ip4_ntop, "Converting ip4_addr struct to IPv4 string");
bt_test_suite(t_ip6_ntop, "Converting ip6_addr struct to IPv6 string");
bt_test_suite(t_ip4_prefix_equal, "Testing ip4_prefix_equal()");
bt_test_suite(t_ip6_prefix_equal, "Testing ip6_prefix_equal()");
return bt_exit_value();
}

View File

@ -110,15 +110,6 @@ add_head(list *l, node *n)
l->head = n;
}
LIST_INLINE void
self_link(node *n)
{
ASSUME(n->prev == NULL);
ASSUME(n->next == NULL);
n->prev = n->next = n;
}
/**
* insert_node - insert a node to a list
* @n: a new list node

View File

@ -42,6 +42,7 @@ typedef union list { /* In fact two overlayed nodes */
};
} list;
#define STATIC_LIST_INIT(name) name = { .head = &name.tail_node, .tail = &name.head_node, .null = NULL }
#define NODE (node *)
#define HEAD(list) ((void *)((list).head))
@ -90,7 +91,6 @@ enlisted(node *n)
#define LIST_INLINE
void add_tail(list *, node *);
void add_head(list *, node *);
void self_link(node *);
void rem_node(node *);
void add_tail_list(list *, list *);
void init_list(list *);

View File

@ -16,8 +16,7 @@ struct lock_order {
struct domain_generic *the_bird;
struct domain_generic *proto;
struct domain_generic *rtable;
struct domain_generic *cork;
struct domain_generic *event;
struct domain_generic *resource;
};
extern _Thread_local struct lock_order locking_stack;

View File

@ -27,26 +27,24 @@
struct lp_chunk {
struct lp_chunk *next;
uint size;
uintptr_t data_align[0];
byte data[0];
};
const int lp_chunk_size = sizeof(struct lp_chunk);
#define LP_DATA_SIZE (page_size - OFFSETOF(struct lp_chunk, data))
struct linpool {
resource r;
byte *ptr, *end;
pool *p;
struct lp_chunk *first, *current; /* Normal (reusable) chunks */
struct lp_chunk *first_large; /* Large chunks */
uint chunk_size, threshold, total:31, use_pages:1, total_large;
uint total, total_large;
};
static void lp_free(resource *);
static void lp_dump(resource *);
static resource *lp_lookup(resource *, unsigned long);
static size_t lp_memsize(resource *r);
static struct resmem lp_memsize(resource *r);
static struct resclass lp_class = {
"LinPool",
@ -60,26 +58,14 @@ static struct resclass lp_class = {
/**
* lp_new - create a new linear memory pool
* @p: pool
* @blk: block size
*
* lp_new() creates a new linear memory pool resource inside the pool @p.
* The linear pool consists of a list of memory chunks of size at least
* @blk.
* The linear pool consists of a list of memory chunks of page size.
*/
linpool
*lp_new(pool *p, uint blk)
*lp_new(pool *p)
{
linpool *m = ralloc(p, &lp_class);
m->p = p;
if (!blk)
{
m->use_pages = 1;
blk = page_size - lp_chunk_size;
}
m->chunk_size = blk;
m->threshold = 3*blk/4;
return m;
return ralloc(p, &lp_class);
}
/**
@ -110,14 +96,13 @@ lp_alloc(linpool *m, uint size)
else
{
struct lp_chunk *c;
if (size >= m->threshold)
if (size > LP_DATA_SIZE)
{
/* Too large => allocate large chunk */
c = xmalloc(sizeof(struct lp_chunk) + size);
m->total_large += size;
c->next = m->first_large;
m->first_large = c;
c->size = size;
}
else
{
@ -129,14 +114,10 @@ lp_alloc(linpool *m, uint size)
else
{
/* Need to allocate a new chunk */
if (m->use_pages)
c = alloc_page(m->p);
else
c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size);
c = alloc_page();
m->total += m->chunk_size;
m->total += LP_DATA_SIZE;
c->next = NULL;
c->size = m->chunk_size;
if (m->current)
m->current->next = c;
@ -145,7 +126,7 @@ lp_alloc(linpool *m, uint size)
}
m->current = c;
m->ptr = c->data + size;
m->end = c->data + m->chunk_size;
m->end = c->data + LP_DATA_SIZE;
}
return c->data;
}
@ -207,7 +188,7 @@ lp_flush(linpool *m)
/* Move ptr to the first chunk and free all large chunks */
m->current = c = m->first;
m->ptr = c ? c->data : NULL;
m->end = c ? c->data + m->chunk_size : NULL;
m->end = c ? c->data + LP_DATA_SIZE : NULL;
while (c = m->first_large)
{
@ -230,6 +211,7 @@ lp_save(linpool *m, lp_state *p)
{
p->current = m->current;
p->large = m->first_large;
p->total_large = m->total_large;
p->ptr = m->ptr;
}
@ -251,12 +233,12 @@ lp_restore(linpool *m, lp_state *p)
/* Move ptr to the saved pos and free all newer large chunks */
m->current = c = p->current;
m->ptr = p->ptr;
m->end = c ? c->data + m->chunk_size : NULL;
m->end = c ? c->data + LP_DATA_SIZE : NULL;
m->total_large = p->total_large;
while ((c = m->first_large) && (c != p->large))
{
m->first_large = c->next;
m->total_large -= c->size;
xfree(c);
}
}
@ -270,10 +252,7 @@ lp_free(resource *r)
for(d=m->first; d; d = c)
{
c = d->next;
if (m->use_pages)
free_page(m->p, d);
else
xfree(d);
free_page(d);
}
for(d=m->first_large; d; d = c)
{
@ -293,30 +272,33 @@ lp_dump(resource *r)
;
for(cntl=0, c=m->first_large; c; c=c->next, cntl++)
;
debug("(chunk=%d threshold=%d count=%d+%d total=%d+%d)\n",
m->chunk_size,
m->threshold,
debug("(count=%d+%d total=%d+%d)\n",
cnt,
cntl,
m->total,
m->total_large);
}
static size_t
static struct resmem
lp_memsize(resource *r)
{
linpool *m = (linpool *) r;
struct lp_chunk *c;
int cnt = 0;
struct resmem sz = {
.overhead = sizeof(struct linpool) + ALLOC_OVERHEAD,
.effective = m->total_large,
};
for(c=m->first; c; c=c->next)
cnt++;
for(c=m->first_large; c; c=c->next)
cnt++;
for (struct lp_chunk *c = m->first_large; c; c = c->next)
sz.overhead += sizeof(struct lp_chunk) + ALLOC_OVERHEAD;
return ALLOC_OVERHEAD + sizeof(struct linpool) +
cnt * (ALLOC_OVERHEAD + sizeof(struct lp_chunk)) +
m->total + m->total_large;
uint regular = 0;
for (struct lp_chunk *c = m->first; c; c = c->next)
regular++;
sz.effective += LP_DATA_SIZE * regular;
sz.overhead += (sizeof(struct lp_chunk) + ALLOC_OVERHEAD) * regular;
return sz;
}
@ -327,10 +309,7 @@ lp_lookup(resource *r, unsigned long a)
struct lp_chunk *c;
for(c=m->first; c; c=c->next)
if ((unsigned long) c->data <= a && (unsigned long) c->data + c->size > a)
return r;
for(c=m->first_large; c; c=c->next)
if ((unsigned long) c->data <= a && (unsigned long) c->data + c->size > a)
if ((unsigned long) c->data <= a && (unsigned long) c->data + LP_DATA_SIZE > a)
return r;
return NULL;
}

View File

@ -38,6 +38,7 @@
#define NB_IP (NB_IP4 | NB_IP6)
#define NB_VPN (NB_VPN4 | NB_VPN6)
#define NB_ROA (NB_ROA4 | NB_ROA6)
#define NB_FLOW (NB_FLOW4 | NB_FLOW6)
#define NB_DEST (NB_IP | NB_IP6_SADR | NB_VPN | NB_MPLS)
#define NB_ANY 0xffffffff

View File

@ -568,3 +568,51 @@ buffer_puts(buffer *buf, const char *str)
buf->pos = (bp < be) ? bp : buf->end;
}
#define POOL_PRINTF_MAXBUF 1024
char *mb_vsprintf(pool *p, const char *fmt, va_list args)
{
char buf[POOL_PRINTF_MAXBUF];
int count = bvsnprintf(buf, POOL_PRINTF_MAXBUF, fmt, args);
if (count < 0)
bug("Attempted to mb_vsprintf() a too long string");
char *out = mb_alloc(p, count + 1);
memcpy(out, buf, count + 1);
return out;
}
char *mb_sprintf(pool *p, const char *fmt, ...)
{
va_list args;
char *out;
va_start(args, fmt);
out = mb_vsprintf(p, fmt, args);
va_end(args);
return out;
}
char *lp_vsprintf(linpool *p, const char *fmt, va_list args)
{
char buf[POOL_PRINTF_MAXBUF];
int count = bvsnprintf(buf, POOL_PRINTF_MAXBUF, fmt, args);
if (count < 0)
bug("Attempted to mb_vsprintf() a too long string");
char *out = lp_alloc(p, count + 1);
memcpy(out, buf, count + 1);
return out;
}
char *lp_sprintf(linpool *p, const char *fmt, ...)
{
va_list args;
char *out;
va_start(args, fmt);
out = lp_vsprintf(p, fmt, args);
va_end(args);
return out;
}

79
lib/rcu.c Normal file
View File

@ -0,0 +1,79 @@
/*
* BIRD Library -- Read-Copy-Update Basic Operations
*
* (c) 2021 Maria Matejka <mq@jmq.cz>
* (c) 2021 CZ.NIC z.s.p.o.
*
* Can be freely distributed and used under the terms of the GNU GPL.
* Note: all the relevant patents shall be expired.
*
* Using the Supplementary Material for User-Level Implementations of Read-Copy-Update
* by Matthieu Desnoyers, Paul E. McKenney, Alan S. Stern, Michel R. Dagenais and Jonathan Walpole
* obtained from https://www.efficios.com/pub/rcu/urcu-supp-accepted.pdf
*/
#include "lib/rcu.h"
#include "lib/io-loop.h"
#include "lib/locking.h"
_Atomic uint rcu_gp_ctl = RCU_NEST_CNT;
_Thread_local struct rcu_birdloop *this_rcu_birdloop = NULL;
static list rcu_birdloop_list;
static struct rcu_birdloop main_rcu_birdloop;
DEFINE_DOMAIN(resource);
static DOMAIN(resource) rcu_domain;
static int
rcu_gp_ongoing(_Atomic uint *ctl)
{
uint val = atomic_load(ctl);
return (val & RCU_NEST_CNT) && ((val ^ rcu_gp_ctl) & RCU_GP_PHASE);
}
static void
update_counter_and_wait(void)
{
atomic_fetch_xor(&rcu_gp_ctl, RCU_GP_PHASE);
struct rcu_birdloop *rc;
WALK_LIST(rc, rcu_birdloop_list)
while (rcu_gp_ongoing(&rc->ctl))
birdloop_yield();
}
void
synchronize_rcu(void)
{
LOCK_DOMAIN(resource, rcu_domain);
update_counter_and_wait();
update_counter_and_wait();
UNLOCK_DOMAIN(resource, rcu_domain);
}
void
rcu_birdloop_start(struct rcu_birdloop *rc)
{
LOCK_DOMAIN(resource, rcu_domain);
add_tail(&rcu_birdloop_list, &rc->n);
this_rcu_birdloop = rc;
UNLOCK_DOMAIN(resource, rcu_domain);
}
void
rcu_birdloop_stop(struct rcu_birdloop *rc)
{
LOCK_DOMAIN(resource, rcu_domain);
this_rcu_birdloop = NULL;
rem_node(&rc->n);
UNLOCK_DOMAIN(resource, rcu_domain);
}
void
rcu_init(void)
{
rcu_domain = DOMAIN_NEW(resource, "Read-Copy-Update");
init_list(&rcu_birdloop_list);
rcu_birdloop_start(&main_rcu_birdloop);
}

55
lib/rcu.h Normal file
View File

@ -0,0 +1,55 @@
/*
* BIRD Library -- Read-Copy-Update Basic Operations
*
* (c) 2021 Maria Matejka <mq@jmq.cz>
* (c) 2021 CZ.NIC z.s.p.o.
*
* Can be freely distributed and used under the terms of the GNU GPL.
* Note: all the relevant patents shall be expired.
*/
#ifndef _BIRD_RCU_H_
#define _BIRD_RCU_H_
#include "lib/birdlib.h"
#include "lib/lists.h"
#include <stdatomic.h>
#define RCU_GP_PHASE 0x100000
#define RCU_NEST_MASK 0x0fffff
#define RCU_NEST_CNT 0x000001
extern _Atomic uint rcu_gp_ctl;
struct rcu_birdloop {
node n;
_Atomic uint ctl;
};
extern _Thread_local struct rcu_birdloop *this_rcu_birdloop;
static inline void rcu_read_lock(void)
{
uint cmp = atomic_load_explicit(&this_rcu_birdloop->ctl, memory_order_acquire);
if (cmp & RCU_NEST_MASK)
atomic_store_explicit(&this_rcu_birdloop->ctl, cmp + RCU_NEST_CNT, memory_order_relaxed);
else
atomic_store(&this_rcu_birdloop->ctl, atomic_load_explicit(&rcu_gp_ctl, memory_order_acquire));
}
static inline void rcu_read_unlock(void)
{
atomic_fetch_sub(&this_rcu_birdloop->ctl, RCU_NEST_CNT);
}
void synchronize_rcu(void);
/* Registering and unregistering a birdloop. To be called from birdloop implementation */
void rcu_birdloop_start(struct rcu_birdloop *);
void rcu_birdloop_stop(struct rcu_birdloop *);
/* Run this from resource init */
void rcu_init(void);
#endif

View File

@ -2,6 +2,7 @@
* BIRD Resource Manager
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
* (c) 2021 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@ -13,6 +14,7 @@
#include "nest/bird.h"
#include "lib/resource.h"
#include "lib/string.h"
#include "lib/rcu.h"
/**
* DOC: Resource pools
@ -28,25 +30,10 @@
* is freed upon shutdown of the module.
*/
struct pool {
resource r;
list inside;
struct pool_pages *pages;
const char *name;
};
struct pool_pages {
uint free;
uint used;
void *ptr[0];
};
#define POOL_PAGES_MAX ((page_size - sizeof(struct pool_pages)) / sizeof (void *))
static void pool_dump(resource *);
static void pool_free(resource *);
static resource *pool_lookup(resource *, unsigned long);
static size_t pool_memsize(resource *P);
static struct resmem pool_memsize(resource *P);
static struct resclass pool_class = {
"Pool",
@ -59,9 +46,6 @@ static struct resclass pool_class = {
pool root_pool;
void *alloc_sys_page(void);
void free_sys_page(void *);
static int indent;
/**
@ -81,6 +65,20 @@ rp_new(pool *p, const char *name)
return z;
}
pool *
rp_newf(pool *p, const char *fmt, ...)
{
pool *z = rp_new(p, NULL);
va_list args;
va_start(args, fmt);
z->name = mb_vsprintf(p, fmt, args);
va_end(args);
return z;
}
static void
pool_free(resource *P)
{
@ -94,14 +92,6 @@ pool_free(resource *P)
xfree(r);
r = rr;
}
if (p->pages)
{
ASSERT_DIE(!p->pages->used);
for (uint i=0; i<p->pages->free; i++)
free_sys_page(p->pages->ptr[i]);
free_sys_page(p->pages);
}
}
static void
@ -117,18 +107,22 @@ pool_dump(resource *P)
indent -= 3;
}
static size_t
static struct resmem
pool_memsize(resource *P)
{
pool *p = (pool *) P;
resource *r;
size_t sum = sizeof(pool) + ALLOC_OVERHEAD;
struct resmem sum = {
.effective = 0,
.overhead = sizeof(pool) + ALLOC_OVERHEAD,
};
WALK_LIST(r, p->inside)
sum += rmemsize(r);
if (p->pages)
sum += page_size * (p->pages->used + p->pages->free + 1);
{
struct resmem add = rmemsize(r);
sum.effective += add.effective;
sum.overhead += add.overhead;
}
return sum;
}
@ -216,14 +210,17 @@ rdump(void *res)
debug("NULL\n");
}
size_t
struct resmem
rmemsize(void *res)
{
resource *r = res;
if (!r)
return 0;
return (struct resmem) {};
if (!r->class->memsize)
return r->class->size + ALLOC_OVERHEAD;
return (struct resmem) {
.effective = r->class->size - sizeof(resource),
.overhead = ALLOC_OVERHEAD + sizeof(resource),
};
return r->class->memsize(r);
}
@ -282,11 +279,34 @@ rlookup(unsigned long a)
void
resource_init(void)
{
resource_sys_init();
rcu_init();
root_pool.r.class = &pool_class;
root_pool.name = "Root";
init_list(&root_pool.inside);
tmp_init(&root_pool);
}
_Thread_local struct tmp_resources tmp_res;
void
tmp_init(pool *p)
{
tmp_res.lp = lp_new_default(p);
tmp_res.parent = p;
tmp_res.pool = rp_new(p, "TMP");
}
void
tmp_flush(void)
{
lp_flush(tmp_linpool);
rfree(tmp_res.pool);
tmp_res.pool = rp_new(tmp_res.parent, "TMP");
}
/**
* DOC: Memory blocks
*
@ -328,11 +348,14 @@ mbl_lookup(resource *r, unsigned long a)
return NULL;
}
static size_t
static struct resmem
mbl_memsize(resource *r)
{
struct mblock *m = (struct mblock *) r;
return ALLOC_OVERHEAD + sizeof(struct mblock) + m->size;
return (struct resmem) {
.effective = m->size,
.overhead = ALLOC_OVERHEAD + sizeof(struct mblock),
};
}
static struct resclass mb_class = {
@ -416,21 +439,6 @@ mb_realloc(void *m, unsigned size)
return b->data;
}
/**
* mb_move - move a memory block
* @m: memory block
* @p: target pool
*
* mb_move() moves the given memory block to another pool in the same way
* as rmove() moves a plain resource.
*/
void
mb_move(void *m, pool *p)
{
struct mblock *b = SKIP_BACK(struct mblock, data, m);
rmove(b, p);
}
/**
* mb_free - free a memory block
@ -448,39 +456,6 @@ mb_free(void *m)
rfree(b);
}
void *
alloc_page(pool *p)
{
if (!p->pages)
{
p->pages = alloc_sys_page();
p->pages->free = 0;
p->pages->used = 1;
}
else
p->pages->used++;
if (p->pages->free)
{
void *ptr = p->pages->ptr[--p->pages->free];
bzero(ptr, page_size);
return ptr;
}
else
return alloc_sys_page();
}
void
free_page(pool *p, void *ptr)
{
ASSERT_DIE(p->pages);
p->pages->used--;
if (p->pages->free >= POOL_PAGES_MAX)
return free_sys_page(ptr);
else
p->pages->ptr[p->pages->free++] = ptr;
}
#define STEP_UP(x) ((x) + (x)/2 + 4)

View File

@ -2,6 +2,7 @@
* BIRD Resource Manager
*
* (c) 1998--1999 Martin Mares <mj@ucw.cz>
* (c) 2021 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@ -11,6 +12,11 @@
#include "lib/lists.h"
struct resmem {
size_t effective; /* Memory actually used for data storage */
size_t overhead; /* Overhead memory imposed by allocator strategies */
};
/* Resource */
typedef struct resource {
@ -26,21 +32,27 @@ struct resclass {
void (*free)(resource *); /* Freeing function */
void (*dump)(resource *); /* Dump to debug output */
resource *(*lookup)(resource *, unsigned long); /* Look up address (only for debugging) */
size_t (*memsize)(resource *); /* Return size of memory used by the resource, may be NULL */
struct resmem (*memsize)(resource *); /* Return size of memory used by the resource, may be NULL */
};
/* Estimate of system allocator overhead per item, for memory consumtion stats */
#define ALLOC_OVERHEAD 8
#define ALLOC_OVERHEAD 16
/* Generic resource manipulation */
typedef struct pool pool;
typedef struct pool {
resource r;
list inside;
const char *name;
} pool;
void resource_init(void);
pool *rp_new(pool *, const char *); /* Create new pool */
pool *rp_newf(pool *, const char *, ...); /* Create a new pool with a formatted string as its name */
void rfree(void *); /* Free single resource */
void rdump(void *); /* Dump to debug output */
size_t rmemsize(void *res); /* Return size of memory used by the resource */
struct resmem rmemsize(void *res); /* Return size of memory used by the resource */
void rlookup(unsigned long); /* Look up address (only for debugging) */
void rmove(void *, pool *); /* Move to a different pool */
@ -53,7 +65,6 @@ extern pool root_pool;
void *mb_alloc(pool *, unsigned size);
void *mb_allocz(pool *, unsigned size);
void *mb_realloc(void *m, unsigned size);
void mb_move(void *, pool *);
void mb_free(void *);
/* Memory pools with linear allocation */
@ -63,9 +74,10 @@ typedef struct linpool linpool;
typedef struct lp_state {
void *current, *large;
byte *ptr;
uint total_large;
} lp_state;
linpool *lp_new(pool *, unsigned blk);
linpool *lp_new(pool *);
void *lp_alloc(linpool *, unsigned size); /* Aligned */
void *lp_allocu(linpool *, unsigned size); /* Unaligned */
void *lp_allocz(linpool *, unsigned size); /* With clear */
@ -73,10 +85,23 @@ void lp_flush(linpool *); /* Free everything, but leave linpool */
void lp_save(linpool *m, lp_state *p); /* Save state */
void lp_restore(linpool *m, lp_state *p); /* Restore state */
extern const int lp_chunk_size;
#define LP_GAS 1024
#define LP_GOOD_SIZE(x) (((x + LP_GAS - 1) & (~(LP_GAS - 1))) - lp_chunk_size)
#define lp_new_default(p) lp_new(p, 0)
struct tmp_resources {
pool *pool, *parent;
linpool *lp;
};
extern _Thread_local struct tmp_resources tmp_res;
#define tmp_linpool tmp_res.lp
#define tmp_alloc(sz) lp_alloc(tmp_linpool, sz)
#define tmp_allocu(sz) lp_allocu(tmp_linpool, sz)
#define tmp_allocz(sz) lp_allocz(tmp_linpool, sz)
void tmp_init(pool *p);
void tmp_flush(void);
#define lp_new_default lp_new
/* Slabs */
@ -85,7 +110,7 @@ typedef struct slab slab;
slab *sl_new(pool *, unsigned size);
void *sl_alloc(slab *);
void *sl_allocz(slab *);
void sl_free(slab *, void *);
void sl_free(void *);
/*
* Low-level memory allocation functions, please don't use
@ -94,12 +119,13 @@ void sl_free(slab *, void *);
void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_size);
extern long page_size;
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
void *alloc_page(pool *);
void free_page(pool *, void *);
#define PAGE_HEAD(x) ((void *) (((intptr_t) (x)) & ~(page_size-1)))
#define PAGE_HEAD(x) ((void *) (((uintptr_t) (x)) & ~(page_size-1)))
extern long page_size;
void *alloc_page(void);
void free_page(void *);
void resource_sys_init(void);
#ifdef HAVE_LIBDMALLOC
/*

446
lib/route.h Normal file
View File

@ -0,0 +1,446 @@
/*
* BIRD Internet Routing Daemon -- Routing data structures
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
* (c) 2022 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_LIB_ROUTE_H_
#define _BIRD_LIB_ROUTE_H_
#include "lib/type.h"
struct network;
struct proto;
struct cli;
typedef struct rte {
struct ea_list *attrs; /* Attributes of this route */
const net_addr *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
btime lastmod; /* Last modified (set by table) */
u32 id; /* Table specific route id */
byte flags; /* Table-specific flags */
byte pflags; /* Protocol-specific flags */
u8 generation; /* If this route import is based on other previously exported route,
this value should be 1 + MAX(generation of the parent routes).
Otherwise the route is independent and this value is zero. */
u8 stale_cycle; /* Auxiliary value for route refresh */
} rte;
#define REF_FILTERED 2 /* Route is rejected by import filter */
#define REF_PENDING 32 /* Route has not propagated completely yet */
/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
static inline int rte_is_valid(rte *r) { return r && !(r->flags & REF_FILTERED); }
/* Route just has REF_FILTERED flag */
static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED); }
struct rte_src {
struct rte_src *next; /* Hash chain */
struct proto *proto; /* Protocol the source is based on */
u32 private_id; /* Private ID, assigned by the protocol */
u32 global_id; /* Globally unique ID of the source */
unsigned uc; /* Use count */
};
struct rte_src *rt_find_source(struct proto *p, u32 id);
struct rte_src *rt_get_source(struct proto *p, u32 id);
struct rte_src *rt_find_source_global(u32 id);
static inline void rt_lock_source(struct rte_src *src) { src->uc++; }
static inline void rt_unlock_source(struct rte_src *src) { src->uc--; }
void rt_prune_sources(void);
/*
* Route Attributes
*
* Beware: All standard BGP attributes must be represented here instead
* of making them local to the route. This is needed to ensure proper
* construction of BGP route attribute lists.
*/
/* Nexthop structure */
struct nexthop {
ip_addr gw; /* Next hop */
struct iface *iface; /* Outgoing interface */
byte flags;
byte weight;
byte labels; /* Number of all labels */
u32 label[0];
};
/* For packing one into eattrs */
struct nexthop_adata {
struct adata ad;
/* There is either a set of nexthops or a special destination (RTD_*) */
union {
struct nexthop nh;
uint dest;
};
};
#define NEXTHOP_DEST_SIZE (OFFSETOF(struct nexthop_adata, dest) + sizeof(uint) - OFFSETOF(struct adata, data))
#define NEXTHOP_DEST_LITERAL(x) ((struct nexthop_adata) { \
.ad.length = NEXTHOP_DEST_SIZE, .dest = (x), })
#define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
#define RTS_STATIC 1 /* Normal static route */
#define RTS_INHERIT 2 /* Route inherited from kernel */
#define RTS_DEVICE 3 /* Device route */
#define RTS_STATIC_DEVICE 4 /* Static device route */
#define RTS_REDIRECT 5 /* Learned via redirect */
#define RTS_RIP 6 /* RIP route */
#define RTS_OSPF 7 /* OSPF route */
#define RTS_OSPF_IA 8 /* OSPF inter-area route */
#define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
#define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
#define RTS_BGP 11 /* BGP route */
#define RTS_PIPE 12 /* Inter-table wormhole */
#define RTS_BABEL 13 /* Babel route */
#define RTS_RPKI 14 /* Route Origin Authorization */
#define RTS_PERF 15 /* Perf checker */
#define RTS_MAX 16
#define RTD_NONE 0 /* Undefined next hop */
#define RTD_UNICAST 1 /* A standard next hop */
#define RTD_BLACKHOLE 2 /* Silently drop packets */
#define RTD_UNREACHABLE 3 /* Reject as unreachable */
#define RTD_PROHIBIT 4 /* Administratively prohibited */
#define RTD_MAX 5
extern const char * rta_dest_names[RTD_MAX];
static inline const char *rta_dest_name(uint n)
{ return (n < RTD_MAX) ? rta_dest_names[n] : "???"; }
/*
* Extended Route Attributes
*/
typedef struct eattr {
word id; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
byte flags; /* Protocol-dependent flags */
byte type; /* Attribute type */
byte rfu:5;
byte originated:1; /* The attribute has originated locally */
byte fresh:1; /* An uncached attribute (e.g. modified in export filter) */
byte undef:1; /* Explicitly undefined */
PADDING(unused, 3, 3);
union bval u;
} eattr;
#define EA_CODE_MASK 0xffff
#define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
#define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
#define EA_BIT_GET(ea) ((ea) >> 24)
typedef struct ea_list {
struct ea_list *next; /* In case we have an override list */
byte flags; /* Flags: EALF_... */
byte rfu;
word count; /* Number of attributes */
eattr attrs[0]; /* Attribute definitions themselves */
} ea_list;
struct ea_storage {
struct ea_storage *next_hash; /* Next in hash chain */
struct ea_storage **pprev_hash; /* Previous in hash chain */
u32 uc; /* Use count */
u32 hash_key; /* List hash */
ea_list l[0]; /* The list itself */
};
#define EALF_SORTED 1 /* Attributes are sorted by code */
#define EALF_BISECT 2 /* Use interval bisection for searching */
#define EALF_CACHED 4 /* List is cached */
struct ea_class {
#define EA_CLASS_INSIDE \
const char *name; /* Name (both print and filter) */ \
struct symbol *sym; /* Symbol to export to configs */ \
uint id; /* Autoassigned attribute ID */ \
uint uc; /* Reference count */ \
btype type; /* Data type ID */ \
uint readonly:1; /* This attribute can't be changed by filters */ \
uint conf:1; /* Requested by config */ \
uint hidden:1; /* Technical attribute, do not show, do not expose to filters */ \
void (*format)(const eattr *ea, byte *buf, uint size); \
void (*stored)(const eattr *ea); /* When stored into global hash */ \
void (*freed)(const eattr *ea); /* When released from global hash */ \
EA_CLASS_INSIDE;
};
struct ea_class_ref {
resource r;
struct ea_class *class;
};
void ea_register_init(struct ea_class *);
struct ea_class_ref *ea_register_alloc(pool *, struct ea_class);
#define EA_REGISTER_ALL_HELPER(x) ea_register_init(x);
#define EA_REGISTER_ALL(...) MACRO_FOREACH(EA_REGISTER_ALL_HELPER, __VA_ARGS__)
struct ea_class *ea_class_find_by_id(uint id);
struct ea_class *ea_class_find_by_name(const char *name);
static inline struct ea_class *ea_class_self(struct ea_class *self) { return self; }
#define ea_class_find(_arg) _Generic((_arg), \
uint: ea_class_find_by_id, \
word: ea_class_find_by_id, \
char *: ea_class_find_by_name, \
const char *: ea_class_find_by_name, \
struct ea_class *: ea_class_self)(_arg)
struct ea_walk_state {
ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
eattr *ea; /* Current eattr, initially NULL */
u32 visited[4]; /* Bitfield, limiting max to 128 */
};
#define ea_find(_l, _arg) _Generic((_arg), uint: ea_find_by_id, struct ea_class *: ea_find_by_class, char *: ea_find_by_name)(_l, _arg)
eattr *ea_find_by_id(ea_list *, unsigned ea);
static inline eattr *ea_find_by_class(ea_list *l, const struct ea_class *def)
{ return ea_find_by_id(l, def->id); }
static inline eattr *ea_find_by_name(ea_list *l, const char *name)
{
const struct ea_class *def = ea_class_find_by_name(name);
return def ? ea_find_by_class(l, def) : NULL;
}
#define ea_get_int(_l, _ident, _def) ({ \
struct ea_class *cls = ea_class_find((_ident)); \
ASSERT_DIE(cls->type & EAF_EMBEDDED); \
const eattr *ea = ea_find((_l), cls->id); \
(ea ? ea->u.data : (_def)); \
})
#define ea_get_ip(_l, _ident, _def) ({ \
struct ea_class *cls = ea_class_find((_ident)); \
ASSERT_DIE(cls->type == T_IP); \
const eattr *ea = ea_find((_l), cls->id); \
(ea ? *((const ip_addr *) ea->u.ptr->data) : (_def)); \
})
eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
void ea_dump(ea_list *);
int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */
ea_list *ea_append(ea_list *to, ea_list *what);
void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
/* Normalize ea_list; allocates the result from tmp_linpool */
ea_list *ea_normalize(ea_list *e, int overlay);
uint ea_list_size(ea_list *);
void ea_list_copy(ea_list *dest, ea_list *src, uint size);
#define EA_LOCAL_LIST(N) struct { ea_list l; eattr a[N]; }
#define EA_LITERAL_EMBEDDED(_class, _flags, _val) ({ \
btype _type = (_class)->type; \
ASSERT_DIE(_type & EAF_EMBEDDED); \
EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.i = _val); \
})
#define EA_LITERAL_STORE_ADATA(_class, _flags, _buf, _len) ({ \
btype _type = (_class)->type; \
ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = tmp_store_adata((_buf), (_len))); \
})
#define EA_LITERAL_DIRECT_ADATA(_class, _flags, _adata) ({ \
btype _type = (_class)->type; \
ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = _adata); \
})
#define EA_LITERAL_GENERIC(_id, _type, _flags, ...) \
((eattr) { .id = _id, .type = _type, .flags = _flags, __VA_ARGS__ })
static inline eattr *
ea_set_attr(ea_list **to, eattr a)
{
EA_LOCAL_LIST(1) *ea = tmp_alloc(sizeof(*ea));
*ea = (typeof(*ea)) {
.l.flags = EALF_SORTED,
.l.count = 1,
.l.next = *to,
.a[0] = a,
};
*to = &ea->l;
return &ea->a[0];
}
static inline void
ea_unset_attr(ea_list **to, _Bool local, const struct ea_class *def)
{
ea_set_attr(to, EA_LITERAL_GENERIC(def->id, 0, 0,
.fresh = local, .originated = local, .undef = 1));
}
static inline void
ea_set_attr_u32(ea_list **to, const struct ea_class *def, uint flags, u64 data)
{ ea_set_attr(to, EA_LITERAL_EMBEDDED(def, flags, data)); }
static inline void
ea_set_attr_data(ea_list **to, const struct ea_class *def, uint flags, const void *data, uint len)
{ ea_set_attr(to, EA_LITERAL_STORE_ADATA(def, flags, data, len)); }
static inline void
ea_copy_attr(ea_list **to, ea_list *from, const struct ea_class *def)
{
eattr *e = ea_find_by_class(from, def);
if (e)
if (e->type & EAF_EMBEDDED)
ea_set_attr_u32(to, def, e->flags, e->u.data);
else
ea_set_attr_data(to, def, e->flags, e->u.ptr->data, e->u.ptr->length);
else
ea_unset_attr(to, 0, def);
}
/*
* Common route attributes
*/
/* Preference: first-order comparison */
extern struct ea_class ea_gen_preference;
static inline u32 rt_get_preference(rte *rt)
{ return ea_get_int(rt->attrs, &ea_gen_preference, 0); }
/* IGP metric: second-order comparison */
extern struct ea_class ea_gen_igp_metric;
u32 rt_get_igp_metric(const rte *rt);
#define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
protocol-specific metric is availabe */
/* From: Advertising router */
extern struct ea_class ea_gen_from;
/* Source: An old method to devise the route source protocol and kind.
* To be superseded in a near future by something more informative. */
extern struct ea_class ea_gen_source;
static inline u32 rt_get_source_attr(const rte *rt)
{ return ea_get_int(rt->attrs, &ea_gen_source, 0); }
/* Flowspec validation result */
enum flowspec_valid {
FLOWSPEC_UNKNOWN = 0,
FLOWSPEC_VALID = 1,
FLOWSPEC_INVALID = 2,
FLOWSPEC__MAX,
};
extern const char * flowspec_valid_names[FLOWSPEC__MAX];
static inline const char *flowspec_valid_name(enum flowspec_valid v)
{ return (v < FLOWSPEC__MAX) ? flowspec_valid_names[v] : "???"; }
extern struct ea_class ea_gen_flowspec_valid;
static inline enum flowspec_valid rt_get_flowspec_valid(rte *rt)
{ return ea_get_int(rt->attrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
/* Next hop: For now, stored as adata */
extern struct ea_class ea_gen_nexthop;
static inline void ea_set_dest(struct ea_list **to, uint flags, uint dest)
{
struct nexthop_adata nhad = NEXTHOP_DEST_LITERAL(dest);
ea_set_attr_data(to, &ea_gen_nexthop, flags, &nhad.ad.data, nhad.ad.length);
}
/* Next hop structures */
#define NEXTHOP_ALIGNMENT (_Alignof(struct nexthop))
#define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
#define NEXTHOP_SIZE(_nh) NEXTHOP_SIZE_CNT(((_nh)->labels))
#define NEXTHOP_SIZE_CNT(cnt) BIRD_ALIGN((sizeof(struct nexthop) + sizeof(u32) * (cnt)), NEXTHOP_ALIGNMENT)
#define nexthop_size(nh) NEXTHOP_SIZE((nh))
#define NEXTHOP_NEXT(_nh) ((void *) (_nh) + NEXTHOP_SIZE(_nh))
#define NEXTHOP_END(_nhad) ((_nhad)->ad.data + (_nhad)->ad.length)
#define NEXTHOP_VALID(_nh, _nhad) ((void *) (_nh) < (void *) NEXTHOP_END(_nhad))
#define NEXTHOP_ONE(_nhad) (NEXTHOP_NEXT(&(_nhad)->nh) == NEXTHOP_END(_nhad))
#define NEXTHOP_WALK(_iter, _nhad) for ( \
struct nexthop *_iter = &(_nhad)->nh; \
(void *) _iter < (void *) NEXTHOP_END(_nhad); \
_iter = NEXTHOP_NEXT(_iter))
static inline int nexthop_same(struct nexthop_adata *x, struct nexthop_adata *y)
{ return adata_same(&x->ad, &y->ad); }
struct nexthop_adata *nexthop_merge(struct nexthop_adata *x, struct nexthop_adata *y, int max, linpool *lp);
struct nexthop_adata *nexthop_sort(struct nexthop_adata *x, linpool *lp);
int nexthop_is_sorted(struct nexthop_adata *x);
#define NEXTHOP_IS_REACHABLE(nhad) ((nhad)->ad.length > NEXTHOP_DEST_SIZE)
/* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
static inline int rte_is_reachable(rte *r)
{
eattr *nhea = ea_find(r->attrs, &ea_gen_nexthop);
if (!nhea)
return 0;
struct nexthop_adata *nhad = (void *) nhea->u.ptr;
return NEXTHOP_IS_REACHABLE(nhad);
}
static inline int nhea_dest(eattr *nhea)
{
if (!nhea)
return RTD_NONE;
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
if (NEXTHOP_IS_REACHABLE(nhad))
return RTD_UNICAST;
else
return nhad->dest;
}
static inline int rte_dest(const rte *r)
{
return nhea_dest(ea_find(r->attrs, &ea_gen_nexthop));
}
void rta_init(void);
ea_list *ea_lookup(ea_list *, int overlay); /* Get a cached (and normalized) variant of this attribute list */
static inline int ea_is_cached(const ea_list *r) { return r->flags & EALF_CACHED; }
static inline struct ea_storage *ea_get_storage(ea_list *r)
{
ASSERT_DIE(ea_is_cached(r));
return SKIP_BACK(struct ea_storage, l[0], r);
}
static inline ea_list *ea_clone(ea_list *r) { ea_get_storage(r)->uc++; return r; }
void ea__free(struct ea_storage *r);
static inline void ea_free(ea_list *l) {
if (!l) return;
struct ea_storage *r = ea_get_storage(l);
if (!--r->uc) ea__free(r);
}
void ea_dump(ea_list *);
void ea_dump_all(void);
void ea_show_list(struct cli *, ea_list *);
#define rta_lookup ea_lookup
#define rta_is_cached ea_is_cached
#define rta_clone ea_clone
#define rta_free ea_free
#endif

View File

@ -32,6 +32,7 @@
#include "nest/bird.h"
#include "lib/resource.h"
#include "lib/string.h"
#include "lib/tlists.h"
#undef FAKE_SLAB /* Turn on if you want to debug memory allocations */
@ -42,7 +43,7 @@
static void slab_free(resource *r);
static void slab_dump(resource *r);
static resource *slab_lookup(resource *r, unsigned long addr);
static size_t slab_memsize(resource *r);
static struct resmem slab_memsize(resource *r);
#ifdef FAKE_SLAB
@ -98,7 +99,7 @@ sl_allocz(slab *s)
}
void
sl_free(slab *s, void *oo)
sl_free(void *oo)
{
struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
@ -128,7 +129,7 @@ slab_dump(resource *r)
debug("(%d objects per %d bytes)\n", cnt, s->size);
}
static size_t
static struct resmem
slab_memsize(resource *r)
{
slab *s = (slab *) r;
@ -138,7 +139,10 @@ slab_memsize(resource *r)
WALK_LIST(o, s->objs)
cnt++;
return ALLOC_OVERHEAD + sizeof(struct slab) + cnt * (ALLOC_OVERHEAD + s->size);
return (struct resmem) {
.effective = cnt * s->size,
.overhead = ALLOC_OVERHEAD + sizeof(struct slab) + cnt * ALLOC_OVERHEAD,
};
}
@ -150,12 +154,38 @@ slab_memsize(resource *r)
#define MAX_EMPTY_HEADS 1
enum sl_head_state {
slh_empty = 2,
slh_partial = 0,
slh_full = 1,
} PACKED;
struct sl_head {
struct slab *slab;
TLIST_NODE(sl_head, struct sl_head) n;
u16 num_full;
enum sl_head_state state;
u32 used_bits[0];
};
struct sl_alignment { /* Magic structure for testing of alignment */
byte data;
int x[0];
};
#define TLIST_PREFIX sl_head
#define TLIST_TYPE struct sl_head
#define TLIST_ITEM n
#define TLIST_WANT_WALK
#define TLIST_WANT_ADD_HEAD
#include "lib/tlists.h"
struct slab {
resource r;
pool *p;
uint obj_size, head_size, head_bitfield_len;
uint objs_per_slab, num_empty_heads, data_size;
list empty_heads, partial_heads, full_heads;
struct sl_head_list empty_heads, partial_heads, full_heads;
};
static struct resclass sl_class = {
@ -167,18 +197,15 @@ static struct resclass sl_class = {
slab_memsize
};
struct sl_head {
node n;
u32 num_full;
u32 used_bits[0];
};
#define SL_GET_HEAD(x) PAGE_HEAD(x)
struct sl_alignment { /* Magic structure for testing of alignment */
byte data;
int x[0];
};
#define SL_HEAD_CHANGE_STATE(_s, _h, _from, _to) ({ \
ASSERT_DIE(_h->state == slh_##_from); \
sl_head_rem_node(&_s->_from##_heads, _h); \
sl_head_add_head(&_s->_to##_heads, _h); \
_h->state = slh_##_to; \
})
#define SL_GET_HEAD(x) ((struct sl_head *) PAGE_HEAD(x))
/**
* sl_new - create a new Slab
@ -192,10 +219,9 @@ slab *
sl_new(pool *p, uint size)
{
slab *s = ralloc(p, &sl_class);
s->p = p;
uint align = sizeof(struct sl_alignment);
if (align < sizeof(int))
align = sizeof(int);
if (align < sizeof(void *))
align = sizeof(void *);
s->data_size = size;
size = (size + align - 1) / align * align;
s->obj_size = size;
@ -216,9 +242,6 @@ sl_new(pool *p, uint size)
bug("Slab: object too large");
s->num_empty_heads = 0;
init_list(&s->empty_heads);
init_list(&s->partial_heads);
init_list(&s->full_heads);
return s;
}
@ -235,8 +258,7 @@ sl_alloc(slab *s)
struct sl_head *h;
redo:
h = HEAD(s->partial_heads);
if (!h->n.next)
if (!(h = s->partial_heads.first))
goto no_partial;
okay:
for (uint i=0; i<s->head_bitfield_len; i++)
@ -256,26 +278,27 @@ okay:
return out;
}
rem_node(&h->n);
add_tail(&s->full_heads, &h->n);
SL_HEAD_CHANGE_STATE(s, h, partial, full);
goto redo;
no_partial:
h = HEAD(s->empty_heads);
if (h->n.next)
if (h = s->empty_heads.first)
{
rem_node(&h->n);
add_head(&s->partial_heads, &h->n);
SL_HEAD_CHANGE_STATE(s, h, empty, partial);
s->num_empty_heads--;
goto okay;
}
h = alloc_page(s->p);
h = alloc_page();
ASSERT_DIE(SL_GET_HEAD(h) == h);
#ifdef POISON
memset(h, 0xba, page_size);
#endif
ASSERT_DIE(SL_GET_HEAD(h) == h);
memset(h, 0, s->head_size);
add_head(&s->partial_heads, &h->n);
h->slab = s;
sl_head_add_head(&s->partial_heads, h);
goto okay;
}
@ -304,9 +327,10 @@ sl_allocz(slab *s)
* and returns it back to the Slab @s.
*/
void
sl_free(slab *s, void *oo)
sl_free(void *oo)
{
struct sl_head *h = SL_GET_HEAD(oo);
struct slab *s = h->slab;
#ifdef POISON
memset(oo, 0xdb, s->data_size);
@ -319,24 +343,22 @@ sl_free(slab *s, void *oo)
h->used_bits[pos / 32] &= ~(1 << (pos % 32));
if (h->num_full-- == s->objs_per_slab)
{
rem_node(&h->n);
add_head(&s->partial_heads, &h->n);
}
if ((h->num_full-- == s->objs_per_slab) && (h->state == slh_full))
SL_HEAD_CHANGE_STATE(s, h, full, partial);
else if (!h->num_full)
{
rem_node(&h->n);
sl_head_rem_node(&s->partial_heads, h);
if (s->num_empty_heads >= MAX_EMPTY_HEADS)
{
#ifdef POISON
memset(h, 0xde, page_size);
#endif
free_page(s->p, h);
free_page(h);
}
else
{
add_head(&s->empty_heads, &h->n);
sl_head_add_head(&s->empty_heads, h);
h->state = slh_empty;
s->num_empty_heads++;
}
}
@ -346,14 +368,13 @@ static void
slab_free(resource *r)
{
slab *s = (slab *) r;
struct sl_head *h, *g;
WALK_LIST_DELSAFE(h, g, s->empty_heads)
free_page(s->p, h);
WALK_LIST_DELSAFE(h, g, s->partial_heads)
free_page(s->p, h);
WALK_LIST_DELSAFE(h, g, s->full_heads)
free_page(s->p, h);
WALK_TLIST_DELSAFE(sl_head, h, &s->empty_heads)
free_page(h);
WALK_TLIST_DELSAFE(sl_head, h, &s->partial_heads)
free_page(h);
WALK_TLIST_DELSAFE(sl_head, h, &s->full_heads)
free_page(h);
}
static void
@ -361,45 +382,53 @@ slab_dump(resource *r)
{
slab *s = (slab *) r;
int ec=0, pc=0, fc=0;
struct sl_head *h;
WALK_LIST(h, s->empty_heads)
WALK_TLIST(sl_head, h, &s->empty_heads)
ec++;
WALK_LIST(h, s->partial_heads)
WALK_TLIST(sl_head, h, &s->partial_heads)
pc++;
WALK_LIST(h, s->full_heads)
WALK_TLIST(sl_head, h, &s->full_heads)
fc++;
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
static size_t
static struct resmem
slab_memsize(resource *r)
{
slab *s = (slab *) r;
size_t heads = 0;
struct sl_head *h;
WALK_LIST(h, s->empty_heads)
heads++;
WALK_LIST(h, s->partial_heads)
heads++;
WALK_LIST(h, s->full_heads)
WALK_TLIST(sl_head, h, &s->full_heads)
heads++;
// return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + page_size);
return ALLOC_OVERHEAD + sizeof(struct slab); /* The page sizes are accounted for in the pool */
size_t items = heads * s->objs_per_slab;
WALK_TLIST(sl_head, h, &s->partial_heads)
{
heads++;
items += h->num_full;
}
WALK_TLIST(sl_head, h, &s->empty_heads)
heads++;
size_t eff = items * s->data_size;
return (struct resmem) {
.effective = eff,
.overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * page_size - eff,
};
}
static resource *
slab_lookup(resource *r, unsigned long a)
{
slab *s = (slab *) r;
struct sl_head *h;
WALK_LIST(h, s->partial_heads)
WALK_TLIST(sl_head, h, &s->partial_heads)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
WALK_LIST(h, s->full_heads)
WALK_TLIST(sl_head, h, &s->full_heads)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;

171
lib/slab_test.c Normal file
View File

@ -0,0 +1,171 @@
/*
* BIRD Library -- Slab Alloc / Dealloc Tests
*
* (c) 2022 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#include "test/birdtest.h"
#include "lib/resource.h"
#include "lib/bitops.h"
static const int sizes[] = {
8, 12, 18, 27, 41, 75, 131, 269,
};
#define TEST_SIZE 1024 * 128
#define ITEMS(sz) TEST_SIZE / ( (sz) >> u32_log2((sz))/2 )
struct test_request {
int size;
enum strategy {
TEST_NONE,
TEST_FORWARDS,
TEST_BACKWARDS,
TEST_RANDOM,
TEST_MIXED,
TEST__MAX,
} strategy;
};
const char * const strategy_name[TEST__MAX] = {
[TEST_FORWARDS] = "forwards",
[TEST_BACKWARDS] = "backwards",
[TEST_RANDOM] = "random",
[TEST_MIXED] = "mixed",
};
static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
{
byte *out = sl_alloc(s);
for (int p=0; p < sz; p++)
out[p] = p & 0xff;
struct resmem ns = rmemsize((resource *) s);
bt_assert(sliz->effective + sz == ns.effective);
bt_assert((sliz->overhead - sz - ns.overhead) % page_size == 0);
*sliz = ns;
return out;
}
static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz)
{
for (int p=0; p < sz; p++)
{
bt_assert(block[p] == (p & 0xff));
block[p]++;
}
sl_free(block);
struct resmem ns = rmemsize((resource *) s);
bt_assert(sliz->effective - sz == ns.effective);
bt_assert((sliz->overhead + sz - ns.overhead) % page_size == 0);
*sliz = ns;
}
static inline struct resmem get_memsize(slab *s)
{
struct resmem sz = rmemsize((resource *) s);
bt_assert(sz.effective == 0);
return sz;
}
static int
t_slab(const void *data)
{
const struct test_request *tr = data;
int sz = tr->size;
slab *s = sl_new(&root_pool, sz);
struct resmem sliz = get_memsize(s);
int n = ITEMS(sz);
byte **block = mb_alloc(&root_pool, n * sizeof(*block));
switch (tr->strategy) {
case TEST_FORWARDS:
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
for (int i = 0; i < n; i++)
test_free(s, block[i], sz, &sliz);
break;
case TEST_BACKWARDS:
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
for (int i = n - 1; i >= 0; i--)
test_free(s, block[i], sz, &sliz);
break;
case TEST_RANDOM:
for (int i = 0; i < n; i++)
block[i] = test_alloc(s, sz, &sliz);
for (int i = 0; i < n; i++)
{
int pos = bt_random() % (n - i);
test_free(s, block[pos], sz, &sliz);
if (pos != n - i - 1)
block[pos] = block[n - i - 1];
}
break;
case TEST_MIXED:
{
int cur = 0;
int pending = n;
while (cur + pending > 0) {
int action = bt_random() % (cur + pending);
if (action < cur) {
test_free(s, block[action], sz, &sliz);
if (action != --cur)
block[action] = block[cur];
} else {
block[cur++] = test_alloc(s, sz, &sliz);
pending--;
}
}
break;
}
default: bug("This shouldn't happen");
}
mb_free(block);
return 1;
}
int main(int argc, char *argv[])
{
bt_init(argc, argv);
struct test_request tr;
for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++)
for (uint strategy = TEST_FORWARDS; strategy < TEST__MAX; strategy++)
{
tr = (struct test_request) {
.size = sizes[i],
.strategy = strategy,
};
bt_test_suite_arg(t_slab, &tr, "Slab allocator test, size=%d, strategy=%s",
tr.size, strategy_name[strategy]);
}
return bt_exit_value();
}

View File

@ -57,7 +57,6 @@ typedef struct birdsock {
uint fast_rx; /* RX has higher priority in event loop */
uint rbsize;
int (*rx_hook)(struct birdsock *, uint size); /* NULL=receiving turned off, returns 1 to clear rx buffer */
struct event_cork *cork; /* Cork to temporarily stop receiving data */
byte *tbuf, *tpos; /* NULL=allocate automatically */
byte *ttx; /* Internal */
@ -126,6 +125,7 @@ extern int sk_priority_control; /* Suggested priority for control traffic, shou
#define SKF_TTL_RX 0x08 /* Report TTL / Hop Limit for RX packets */
#define SKF_BIND 0x10 /* Bind datagram socket to given source address */
#define SKF_HIGH_PORT 0x20 /* Choose port from high range if possible */
#define SKF_FREEBIND 0x40 /* Allow socket to bind to a nonlocal address */
#define SKF_THREAD 0x100 /* Socked used in thread, Do not add to main loop */
#define SKF_TRUNCATED 0x200 /* Received packet was truncated, set by IO layer */

View File

@ -20,6 +20,11 @@ int bvsprintf(char *str, const char *fmt, va_list args);
int bsnprintf(char *str, int size, const char *fmt, ...);
int bvsnprintf(char *str, int size, const char *fmt, va_list args);
char *mb_sprintf(pool *p, const char *fmt, ...);
char *mb_vsprintf(pool *p, const char *fmt, va_list args);
char *lp_sprintf(linpool *p, const char *fmt, ...);
char *lp_vsprintf(linpool *p, const char *fmt, va_list args);
int buffer_vprint(buffer *buf, const char *fmt, va_list args);
int buffer_print(buffer *buf, const char *fmt, ...);
void buffer_puts(buffer *buf, const char *str);

View File

@ -32,7 +32,6 @@
#include "nest/bird.h"
#include "lib/coro.h"
#include "lib/heap.h"
#include "lib/resource.h"
#include "lib/timer.h"
@ -117,7 +116,7 @@ tm_set_in_tl(timer *t, btime when, struct timeloop *local_timeloop)
t->loop = local_timeloop;
if ((t->index == 1) && (local_timeloop->coro != this_coro))
if (t->index == 1)
birdloop_ping(local_timeloop->loop);
}
@ -193,6 +192,7 @@ timers_fire(struct timeloop *loop, int io_log)
io_log_event(t->hook, t->data);
t->hook(t);
tmp_flush();
}
}

View File

@ -41,7 +41,6 @@ struct timeloop
BUFFER_(timer *) timers;
struct domain_generic *domain;
struct birdloop *loop;
struct coroutine *coro;
};
#define TLOCK_TIMER_ASSERT(loop) ASSERT_DIE((loop)->domain && DG_IS_LOCKED((loop)->domain))

172
lib/tlists.h Normal file
View File

@ -0,0 +1,172 @@
/*
* BIRD Library -- Typed Linked Lists
*
* (c) 2022 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*
*
* This implementation of linked lists forces its members to be
* typed. On the other hand, it needs to be implemented as ugly macros to
* keep the needed genericity.
*
* Usage:
* 1. Include this file
* 2. Define the node structure
* 3. For every list type you need to define:
* A. #define TLIST_PREFIX and other macros
* B. Include this file once again
*
* Macros to define:
* TLIST_PREFIX: prefix to prepend to everything generated
* TLIST_TYPE: the actual node type
* TLIST_ITEM: where the tlist structure is
* TLIST_WANT_WALK: if defined, generates a helper functions for list walking macros
* TLIST_WANT_ADD_HEAD: if defined, TLIST_PREFIX_add_head() is generated to
* add an item to the beginning of the list
* TLIST_WANT_ADD_TAIL: if defined, TLIST_PREFIX_add_tail() is generated to
* add an item to the end of the list
*
* TLIST_PREFIX_rem_node() is generated always.
*
* All these macros are #undef-ed by including this file.
*
* Example:
*
* #include "lib/tlists.h"
*
* struct foo {
* ...
* TLIST_NODE(bar, struct foo) baz;
* ...
* };
*
* #define TLIST_PREFIX bar
* #define TLIST_TYPE struct foo
* #define TLIST_ITEM baz
*
* #define TLIST_WANT_WALK
* #define TLIST_WANT_ADD_HEAD
*
* #include "lib/tlists.h"
*
* ...
* (end of example)
*
*/
#ifdef _BIRD_LIB_TLISTS_H_
# ifdef TLIST_PREFIX
/* Check for mandatory arguments */
#ifndef TLIST_TYPE
#error "TLIST_TYPE must be defined"
#endif
#ifndef TLIST_ITEM
#error "TLIST_ITEM must be defined"
#endif
#ifndef TLIST_PREFIX
#error "TLIST_PREFIX must be defined"
#endif
#define TLIST_NAME(x) MACRO_CONCAT_AFTER(TLIST_PREFIX,_##x)
#ifndef TLIST_LIST_STRUCT
#define TLIST_LIST_STRUCT TLIST_NAME(list)
#endif
typedef struct TLIST_LIST_STRUCT {
TLIST_TYPE *first;
TLIST_TYPE *last;
} TLIST_LIST_STRUCT;
#ifdef TLIST_WANT_WALK
static inline struct TLIST_NAME(node) * TLIST_NAME(node_get)(TLIST_TYPE *node)
{ return &(node->TLIST_ITEM); }
#endif
#ifdef TLIST_WANT_ADD_HEAD
static inline void TLIST_NAME(add_head)(TLIST_LIST_STRUCT *list, TLIST_TYPE *node)
{
ASSERT_DIE(!node->TLIST_ITEM.prev && !node->TLIST_ITEM.next);
if (node->TLIST_ITEM.next = list->first)
list->first->TLIST_ITEM.prev = node;
else
list->last = node;
list->first = node;
}
#endif
#ifdef TLIST_WANT_ADD_TAIL
static inline void TLIST_NAME(add_tail)(TLIST_LIST_STRUCT *list, TLIST_TYPE *node)
{
ASSERT_DIE(!node->TLIST_ITEM.prev && !node->TLIST_ITEM.next);
if (node->TLIST_ITEM.prev = list->last)
list->last->TLIST_ITEM.next = node;
else
list->first = node;
list->last = node;
}
#endif
static inline void TLIST_NAME(rem_node)(TLIST_LIST_STRUCT *list, TLIST_TYPE *node)
{
if (node->TLIST_ITEM.prev)
node->TLIST_ITEM.prev->TLIST_ITEM.next = node->TLIST_ITEM.next;
else
{
ASSERT_DIE(list->first == node);
list->first = node->TLIST_ITEM.next;
}
if (node->TLIST_ITEM.next)
node->TLIST_ITEM.next->TLIST_ITEM.prev = node->TLIST_ITEM.prev;
else
{
ASSERT_DIE(list->last == node);
list->last = node->TLIST_ITEM.prev;
}
node->TLIST_ITEM.next = node->TLIST_ITEM.prev = NULL;
}
#undef TLIST_PREFIX
#undef TLIST_NAME
#undef TLIST_LIST_STRUCT
#undef TLIST_TYPE
#undef TLIST_ITEM
#undef TLIST_WANT_ADD_HEAD
#undef TLIST_WANT_ADD_TAIL
# endif
#else
#define _BIRD_LIB_TLISTS_H_
#include "lib/macro.h"
#if defined(TLIST_NAME) || defined(TLIST_PREFIX)
#error "You should first include lib/tlists.h without requesting a TLIST"
#endif
#define TLIST_NODE(_name, _type) struct _name##_node { _type *next; _type *prev; }
#define TLIST_LIST(_name) struct _name##_list
/* Use ->first and ->last to access HEAD and TAIL */
#define THEAD(_name, _list) (_list)->first
#define TTAIL(_name, _list) (_list)->last
/* Walkaround macros: simple and resilient to node removal */
#define WALK_TLIST(_name, _node, _list) \
for (typeof((_list)->first) _node = (_list)->first; \
_node; _node = _name##_node_get((_node))->next)
#define WALK_TLIST_DELSAFE(_name, _node, _list) \
for (typeof((_list)->first) _node = (_list)->first, \
_helper = _node ? _name##_node_get((_list)->first)->next : NULL; \
_node; \
(_node = _helper) ? (_helper = _name##_node_get(_helper)->next) : 0)
/* Empty check */
#define EMPTY_TLIST(_name, _list) (!(_list)->first)
#endif

112
lib/type.h Normal file
View File

@ -0,0 +1,112 @@
/*
* BIRD Internet Routing Daemon -- Internal Data Types
*
* (c) 2022 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_TYPE_H_
#define _BIRD_TYPE_H_
#include "lib/birdlib.h"
#include "lib/attrs.h"
union bval {
#define BVAL_ITEMS \
struct { \
u32 data; /* Integer type inherited from eattrs */ \
PADDING(data, 0, 4); /* Must be padded on 64-bits */ \
}; \
struct { \
u32 i; /* Integer type inherited from filters */ \
PADDING(i, 0, 4); /* Must be padded on 64-bits */ \
}; \
const struct adata *ptr; /* Generic attribute data inherited from eattrs */ \
const struct adata *ad; /* Generic attribute data inherited from filters */ \
BVAL_ITEMS;
};
union bval_long {
union bval bval; /* For direct assignments */
BVAL_ITEMS; /* For item-wise access */
u64 ec;
lcomm lc;
ip_addr ip;
const net_addr *net;
const char *s;
const struct f_tree *t;
const struct f_trie *ti;
const struct f_path_mask *path_mask;
struct f_path_mask_item pmi;
};
/* Internal types */
enum btype {
/* Nothing. Simply nothing. */
T_VOID = 0,
/* Something but inaccessible. */
T_OPAQUE = 0x02, /* Opaque byte string (not filterable) */
T_IFACE = 0x0c, /* Pointer to an interface (inside adata) */
T_NEXTHOP_LIST = 0x2c, /* The whole nexthop block */
T_HOSTENTRY = 0x2e, /* Hostentry with possible MPLS labels */
/* Types shared with eattrs */
T_INT = 0x01, /* 32-bit unsigned integer number */
T_IP = 0x04, /* IP address */
T_QUAD = 0x05, /* Router ID (IPv4 address) */
T_PATH = 0x06, /* BGP AS path (encoding per RFC 1771:4.3) */
T_CLIST = 0x0a, /* Set of u32's (e.g., a community list) */
T_ECLIST = 0x0e, /* Set of pairs of u32's - ext. community list */
T_LCLIST = 0x08, /* Set of triplets of u32's - large community list */
T_ENUM_BGP_ORIGIN = 0x11, /* BGP Origin enum */
T_ENUM_RA_PREFERENCE = 0x13, /* RA Preference enum */
T_ENUM_FLOWSPEC_VALID = 0x15, /* Flowspec validation result */
#define EAF_TYPE__MAX 0x1f
#define EAF_EMBEDDED 0x01 /* Data stored in eattr.u.data (part of type spec) */
/* Otherwise, attribute data is adata */
/* Other user visible types which fit in int */
T_BOOL = 0xa0,
T_PAIR = 0xa4, /* Notice that pair is stored as integer: first << 16 | second */
/* Put enumerational types in 0x20..0x3f range */
T_ENUM_LO = 0x10,
T_ENUM_HI = 0x3f,
T_ENUM_RTS = 0x31,
T_ENUM_SCOPE = 0x33,
T_ENUM_RTD = 0x37,
T_ENUM_ROA = 0x39,
T_ENUM_NETTYPE = 0x3b,
T_ENUM_AF = 0x3d,
/* new enums go here */
#define T_ENUM T_ENUM_LO ... T_ENUM_HI
/* Bigger ones */
T_NET = 0xb0,
T_STRING = 0xb4,
T_PATH_MASK = 0xb8, /* mask for BGP path */
T_EC = 0xbc, /* Extended community value, u64 */
T_LC = 0xc0, /* Large community value, lcomm */
T_RD = 0xc4, /* Route distinguisher for VPN addresses */
T_PATH_MASK_ITEM = 0xc8, /* Path mask item for path mask constructors */
T_SET = 0x80,
T_PREFIX_SET = 0x84,
} PACKED;
typedef enum btype btype;
STATIC_ASSERT(sizeof(btype) == sizeof(byte));
#endif

79
lib/type_test.c Normal file
View File

@ -0,0 +1,79 @@
/*
* BIRD Library -- Data Type Alignment Tests
*
* (c) 2022 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#include "test/birdtest.h"
#include "lib/type.h"
#include "lib/route.h"
#define CHECK_ONE(val) \
for (uint i=0; i<sizeof(val); i++) \
bt_assert(((const u8 *) &val)[i] == (u8) ~0);
#define SET_PADDING(val, name) \
for (uint i=0; i<sizeof(val.PADDING_NAME(name)); i++) \
val.PADDING_NAME(name)[i] = ~0;
static int
t_bval(void)
{
union bval v;
memset(&v, 0, sizeof(v));
v.data = ~0;
SET_PADDING(v, data);
CHECK_ONE(v);
memset(&v, 0, sizeof(v));
v.i = ~0;
SET_PADDING(v, i);
CHECK_ONE(v);
memset(&v, 0, sizeof(v));
v.ptr = (void *) ~0;
CHECK_ONE(v);
memset(&v, 0, sizeof(v));
v.ad = (void *) ~0;
CHECK_ONE(v);
return 1;
}
static int
t_eattr(void)
{
struct eattr e;
memset(&e, 0, sizeof(e));
e.id = ~0;
e.flags = ~0;
e.type = ~0;
e.rfu = ~0;
e.originated = ~0;
e.fresh = ~0;
e.undef = ~0;
memset(&e.u, ~0, sizeof(e.u)); /* Assumes t_bval passed */
SET_PADDING(e, unused);
CHECK_ONE(e);
return 1;
}
int main(int argc, char *argv[])
{
bt_init(argc, argv);
bt_test_suite(t_bval, "Structure alignment test: bval");
bt_test_suite(t_eattr, "Structure alignment test: eattr");
return bt_exit_value();
}

View File

@ -1,6 +1,6 @@
Summary: BIRD Internet Routing Daemon
Name: bird
Version: 2.0.8
Version: 2.0.10
Release: 1
Copyright: GPL
Group: Networking/Daemons

View File

@ -1,9 +1,10 @@
FROM ubuntu:20.10
FROM ubuntu:21.10
ENV DEBIAN_FRONTEND=noninteractive
RUN sed -i 's/deb.debian.org/ftp.cz.debian.org/' /etc/apt/sources.list
RUN apt-get -y update
RUN apt-get -y upgrade
RUN apt-get -y install \
RUN apt-get -y --no-install-recommends install \
tzdata \
build-essential \
flex \
bison \

View File

@ -1,8 +1,17 @@
src := a-path.c a-set.c cli.c cmds.c iface.c locks.c neighbor.c password.c proto.c rt-attr.c rt-dev.c rt-fib.c rt-show.c rt-table.c
src := cli.c cmds.c iface.c locks.c neighbor.c password.c proto.c proto-build.c rt-attr.c rt-dev.c rt-fib.c rt-show.c rt-table.c
obj := $(src-o-files)
$(all-daemon)
$(cf-local)
$(call proto-build,dev_build)
tests_src := a-set_test.c a-path_test.c
$(proto-build-c): $(lastword $(MAKEFILE_LIST))
$(E)echo GEN $@
$(Q)echo "#include \"lib/birdlib.h\"" > $@
$(Q)$(patsubst %,echo 'void %(void);' >> $@;,$(PROTO_BUILD))
$(Q)echo "void protos_build_gen(void) {" >> $@
$(Q)$(patsubst %,echo ' %();'>>$@;,$(PROTO_BUILD))
$(Q)echo "}" >> $@
tests_src :=
tests_targets := $(tests_targets) $(tests-target-files)
tests_objs := $(tests_objs) $(src-o-files)

View File

@ -9,7 +9,6 @@
#ifndef _BIRD_BIRD_H_
#define _BIRD_BIRD_H_
#include "sysdep/config.h"
#include "lib/birdlib.h"
#include "lib/ip.h"
#include "lib/net.h"

View File

@ -319,7 +319,6 @@ cli_new(void *priv)
c->event->data = c;
c->cont = cli_hello;
c->parser_pool = lp_new_default(c->pool);
c->show_pool = lp_new_default(c->pool);
c->rx_buf = mb_alloc(c->pool, CLI_RX_BUF_SIZE);
ev_schedule(c->event);
return c;
@ -409,10 +408,13 @@ void
cli_free(cli *c)
{
cli_set_log_echo(c, 0, 0);
int defer = 0;
if (c->cleanup)
c->cleanup(c);
defer = c->cleanup(c);
if (c == cmd_reconfig_stored_cli)
cmd_reconfig_stored_cli = NULL;
if (!defer)
rfree(c->pool);
}

View File

@ -33,12 +33,12 @@ typedef struct cli {
struct cli_out *tx_buf, *tx_pos, *tx_write;
event *event;
void (*cont)(struct cli *c);
void (*cleanup)(struct cli *c);
int (*cleanup)(struct cli *c); /* Return 0 if finished and cli may be freed immediately.
Otherwise return 1 and call rfree(c->pool) when appropriate. */
void *rover; /* Private to continuation routine */
int last_reply;
int restricted; /* CLI is restricted to read-only commands */
struct linpool *parser_pool; /* Pool used during parsing */
struct linpool *show_pool; /* Pool used during route show */
byte *ring_buf; /* Ring buffer for asynchronous messages */
byte *ring_end, *ring_read, *ring_write; /* Pointers to the ring buffer */
uint ring_overflow; /* Counter of ring overflows */

View File

@ -8,7 +8,7 @@
#include "nest/bird.h"
#include "nest/protocol.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/cli.h"
#include "conf/conf.h"
#include "nest/cmds.h"
@ -51,7 +51,8 @@ cmd_show_symbols(struct sym_show_data *sd)
cli_msg(1010, "%-8s\t%s", sd->sym->name, cf_symbol_class_name(sd->sym));
else
{
HASH_WALK(config->sym_hash, next, sym)
for (const struct sym_scope *scope = config->root_scope; scope; scope = scope->next)
HASH_WALK(scope->hash, next, sym)
{
if (!sym->scope->active)
continue;
@ -67,31 +68,63 @@ cmd_show_symbols(struct sym_show_data *sd)
}
}
static void
print_size(char *dsc, size_t val)
#define SIZE_SUFFIX " kMGT"
#define SIZE_FORMAT "% 4u.%1u % 1cB"
#define SIZE_ARGS(a) (a).val, (a).decimal, SIZE_SUFFIX[(a).magnitude]
struct size_args {
u64 val:48;
u64 decimal:8;
u64 magnitude:8;
};
static struct size_args
get_size_args(u64 val)
{
char *px = " kMG";
int i = 0;
while ((val >= 10000) && (i < 3))
#define VALDEC 10 /* One decimal place */
val *= VALDEC;
uint i = 0;
while ((val >= 10000 * VALDEC) && (i < 4))
{
val = (val + 512) / 1024;
i++;
}
cli_msg(-1018, "%-17s %4u %cB", dsc, (unsigned) val, px[i]);
return (struct size_args) {
.val = (val / VALDEC),
.decimal = (val % VALDEC),
.magnitude = i,
};
}
static void
print_size(char *dsc, struct resmem vals)
{
struct size_args effective = get_size_args(vals.effective);
struct size_args overhead = get_size_args(vals.overhead);
cli_msg(-1018, "%-17s " SIZE_FORMAT " " SIZE_FORMAT, dsc, SIZE_ARGS(effective), SIZE_ARGS(overhead));
}
extern pool *rt_table_pool;
extern pool *rta_pool;
extern uint *pages_kept;
void
cmd_show_memory(void)
{
cli_msg(-1018, "BIRD memory usage");
cli_msg(-1018, "%-17s Effective Overhead", "");
print_size("Routing tables:", rmemsize(rt_table_pool));
print_size("Route attributes:", rmemsize(rta_pool));
print_size("Protocols:", rmemsize(proto_pool));
print_size("Total:", rmemsize(&root_pool));
struct resmem total = rmemsize(&root_pool);
#ifdef HAVE_MMAP
print_size("Standby memory:", (struct resmem) { .overhead = page_size * *pages_kept });
total.overhead += page_size * *pages_kept;
#endif
print_size("Total:", total);
cli_msg(0, "");
}
@ -101,7 +134,7 @@ cmd_eval(const struct f_line *expr)
buffer buf;
LOG_BUFFER_INIT(buf);
if (f_eval_buf(expr, this_cli->parser_pool, &buf) > F_RETURN)
if (f_eval_buf(expr, &buf) > F_RETURN)
{
cli_msg(8008, "runtime error");
return;

View File

@ -17,6 +17,7 @@ CF_HDR
CF_DEFINES
static struct rtable_config *this_table;
static struct proto_config *this_proto;
static struct channel_config *this_channel;
static struct iface_patt *this_ipatt;
@ -117,13 +118,14 @@ CF_KEYWORDS(IPV4, IPV6, VPN4, VPN6, ROA4, ROA6, FLOW4, FLOW6, SADR, MPLS)
CF_KEYWORDS(RECEIVE, LIMIT, ACTION, WARN, BLOCK, RESTART, DISABLE, KEEP, FILTERED, RPKI)
CF_KEYWORDS(PASSWORD, KEY, FROM, PASSIVE, TO, ID, EVENTS, PACKETS, PROTOCOLS, CHANNELS, INTERFACES)
CF_KEYWORDS(ALGORITHM, KEYED, HMAC, MD5, SHA1, SHA256, SHA384, SHA512, BLAKE2S128, BLAKE2S256, BLAKE2B256, BLAKE2B512)
CF_KEYWORDS(PRIMARY, STATS, COUNT, BY, FOR, COMMANDS, PREEXPORT, NOEXPORT, EXPORTED, GENERATE)
CF_KEYWORDS(BGP, PASSWORDS, DESCRIPTION, SORTED)
CF_KEYWORDS(RELOAD, IN, OUT, MRTDUMP, MESSAGES, RESTRICT, MEMORY, IGP_METRIC, CLASS, DSCP)
CF_KEYWORDS(PRIMARY, STATS, COUNT, FOR, IN, COMMANDS, PREEXPORT, NOEXPORT, EXPORTED, GENERATE)
CF_KEYWORDS(BGP, PASSWORDS, DESCRIPTION)
CF_KEYWORDS(RELOAD, IN, OUT, MRTDUMP, MESSAGES, RESTRICT, MEMORY, CLASS, DSCP)
CF_KEYWORDS(TIMEFORMAT, ISO, SHORT, LONG, ROUTE, PROTOCOL, BASE, LOG, S, MS, US)
CF_KEYWORDS(GRACEFUL, RESTART, WAIT, MAX, FLUSH, AS)
CF_KEYWORDS(GRACEFUL, RESTART, WAIT, MAX, AS)
CF_KEYWORDS(MIN, IDLE, RX, TX, INTERVAL, MULTIPLIER, PASSIVE)
CF_KEYWORDS(CHECK, LINK)
CF_KEYWORDS(CORK, SORTED, TRIE, MIN, MAX, SETTLE, TIME, GC, THRESHOLD, PERIOD)
/* For r_args_channel */
CF_KEYWORDS(IPV4, IPV4_MC, IPV4_MPLS, IPV6, IPV6_MC, IPV6_MPLS, IPV6_SADR, VPN4, VPN4_MC, VPN4_MPLS, VPN6, VPN6_MC, VPN6_MPLS, ROA4, ROA6, FLOW4, FLOW6, MPLS, PRI, SEC)
@ -131,7 +133,7 @@ CF_KEYWORDS(IPV4, IPV4_MC, IPV4_MPLS, IPV6, IPV6_MC, IPV6_MPLS, IPV6_SADR, VPN4,
CF_ENUM(T_ENUM_RTS, RTS_, STATIC, INHERIT, DEVICE, STATIC_DEVICE, REDIRECT,
RIP, OSPF, OSPF_IA, OSPF_EXT1, OSPF_EXT2, BGP, PIPE, BABEL)
CF_ENUM(T_ENUM_SCOPE, SCOPE_, HOST, LINK, SITE, ORGANIZATION, UNIVERSE, UNDEFINED)
CF_ENUM(T_ENUM_RTD, RTD_, UNICAST, BLACKHOLE, UNREACHABLE, PROHIBIT)
CF_ENUM(T_ENUM_RTD, RTD_, BLACKHOLE, UNREACHABLE, PROHIBIT)
CF_ENUM(T_ENUM_ROA, ROA_, UNKNOWN, VALID, INVALID)
CF_ENUM_PX(T_ENUM_AF, AF_, AFI_, IPV4, IPV6)
@ -141,7 +143,7 @@ CF_ENUM_PX(T_ENUM_AF, AF_, AFI_, IPV4, IPV6)
%type <s> optproto
%type <ra> r_args
%type <sd> sym_args
%type <i> proto_start echo_mask echo_size debug_mask debug_list debug_flag mrtdump_mask mrtdump_list mrtdump_flag export_mode limit_action net_type table_sorted tos password_algorithm
%type <i> proto_start echo_mask echo_size debug_mask debug_list debug_flag mrtdump_mask mrtdump_list mrtdump_flag export_mode limit_action net_type tos password_algorithm
%type <ps> proto_patt proto_patt2
%type <cc> channel_start proto_channel
%type <cl> limit_spec
@ -163,7 +165,7 @@ rtrid:
idval:
NUM { $$ = $1; }
| '(' term ')' { $$ = f_eval_int(f_linearize($2)); }
| '(' term ')' { $$ = f_eval_int(f_linearize($2, 1)); }
| IP4 { $$ = ip4_to_u32($1); }
| CF_SYM_KNOWN {
if ($1->class == (SYM_CONSTANT | T_INT) || $1->class == (SYM_CONSTANT | T_QUAD))
@ -206,16 +208,43 @@ CF_ENUM(T_ENUM_NETTYPE, NET_, IP4, IP6, VPN4, VPN6, ROA4, ROA6, FLOW4, FLOW6, IP
conf: table ;
table_sorted:
{ $$ = 0; }
| SORTED { $$ = 1; }
table: table_start table_sorted table_opt_list ;
table_start: net_type TABLE symbol {
this_table = rt_new_table($3, $1);
}
;
table: net_type TABLE symbol table_sorted {
struct rtable_config *cf;
cf = rt_new_table($3, $1);
cf->sorted = $4;
table_sorted:
/* empty */
| SORTED { this_table->sorted = 1; }
;
table_opt:
SORTED bool { this_table->sorted = $2; }
| TRIE bool {
if (!net_val_match(this_table->addr_type, NB_IP | NB_VPN | NB_ROA | NB_IP6_SADR))
cf_error("Trie option not supported for %s table", net_label[this_table->addr_type]);
this_table->trie_used = $2;
}
| MIN SETTLE TIME expr_us { this_table->min_settle_time = $4; }
| MAX SETTLE TIME expr_us { this_table->max_settle_time = $4; }
| GC THRESHOLD expr { this_table->gc_threshold = $3; }
| GC PERIOD expr_us { this_table->gc_period = (uint) $3; if ($3 > 3600 S_) cf_error("GC period must be at most 3600 s"); }
| CORK THRESHOLD expr expr {
if ($3 > $4) cf_error("Cork low threshold must be lower than the high threshold.");
this_table->cork_threshold.low = $3;
this_table->cork_threshold.high = $4; }
;
table_opts:
/* empty */
| table_opts table_opt ';'
;
table_opt_list:
/* empty */
| '{' table_opts '}'
;
@ -283,12 +312,25 @@ channel_item_:
this_channel->table = $2;
}
| IMPORT imexport { this_channel->in_filter = $2; }
| EXPORT IN net_any imexport {
if (this_channel->net_type && ($3->type != this_channel->net_type))
cf_error("Incompatible export prefilter type");
this_channel->out_subprefix = $3;
this_channel->out_filter = $4;
}
| EXPORT imexport { this_channel->out_filter = $2; }
| RECEIVE LIMIT limit_spec { this_channel->rx_limit = $3; }
| IMPORT LIMIT limit_spec { this_channel->in_limit = $3; }
| EXPORT LIMIT limit_spec { this_channel->out_limit = $3; }
| PREFERENCE expr { this_channel->preference = $2; check_u16($2); }
| IMPORT KEEP FILTERED bool { this_channel->in_keep_filtered = $4; }
| IMPORT KEEP FILTERED bool {
if ($4)
this_channel->in_keep |= RIK_REJECTED;
else if ((this_channel->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
cf_error("Import keep filtered is implied by the import table.");
else
this_channel->in_keep &= ~RIK_REJECTED;
}
| RPKI RELOAD bool { this_channel->rpki_reload = $3; }
;
@ -617,20 +659,29 @@ r_args:
$$ = cfg_allocz(sizeof(struct rt_show_data));
init_list(&($$->tables));
$$->filter = FILTER_ACCEPT;
$$->running_on_config = new_config->fallback;
$$->running_on_config = config;
$$->cli = this_cli;
}
| r_args net_any {
$$ = $1;
if ($$->addr) cf_error("Only one prefix expected");
$$->addr = $2;
$$->addr_mode = TE_ADDR_EQUAL;
}
| r_args FOR r_args_for {
$$ = $1;
if ($$->addr) cf_error("Only one prefix expected");
$$->show_for = 1;
$$->addr = $3;
$$->addr_mode = TE_ADDR_FOR;
}
| r_args TABLE CF_SYM_KNOWN {
| r_args IN net_any {
$$ = $1;
if ($$->addr) cf_error("Only one prefix expected");
if (!net_type_match($3, NB_IP)) cf_error("Only IP networks accepted for 'in' argument");
$$->addr = $3;
$$->addr_mode = TE_ADDR_IN;
}
| r_args TABLE symbol_known {
cf_assert_symbol($3, SYM_TABLE);
$$ = $1;
rt_show_add_table($$, $3->table->table);
@ -644,13 +695,13 @@ r_args:
$$->tables_defined_by = RSD_TDB_ALL;
}
| r_args IMPORT TABLE channel_arg {
if (!$4->in_table) cf_error("No import table in channel %s.%s", $4->proto->name, $4->name);
rt_show_add_table($$, $4->in_table->tab);
if (!($4->in_keep & RIK_PREFILTER)) cf_error("No import table in channel %s.%s", $4->proto->name, $4->name);
rt_show_add_exporter($$, &$4->table->exporter, "import")->prefilter = $4;
$$->tables_defined_by = RSD_TDB_DIRECT;
}
| r_args EXPORT TABLE channel_arg {
if (!$4->out_table) cf_error("No export table in channel %s.%s", $4->proto->name, $4->name);
rt_show_add_table($$, $4->out_table->tab);
rt_show_add_exporter($$, $4->out_table, "export");
$$->tables_defined_by = RSD_TDB_DIRECT;
}
| r_args FILTER filter {
@ -675,7 +726,7 @@ r_args:
$$ = $1;
$$->filtered = 1;
}
| r_args export_mode CF_SYM_KNOWN {
| r_args export_mode symbol_known {
cf_assert_symbol($3, SYM_PROTO);
struct proto_config *c = (struct proto_config *) $3->proto;
$$ = $1;
@ -692,7 +743,7 @@ r_args:
$$->export_channel = $3;
$$->tables_defined_by = RSD_TDB_INDIRECT;
}
| r_args PROTOCOL CF_SYM_KNOWN {
| r_args PROTOCOL symbol_known {
cf_assert_symbol($3, SYM_PROTO);
struct proto_config *c = (struct proto_config *) $3->proto;
$$ = $1;
@ -820,7 +871,7 @@ CF_CLI(DUMP INTERFACES,,, [[Dump interface information]])
CF_CLI(DUMP NEIGHBORS,,, [[Dump neighbor cache]])
{ neigh_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP ATTRIBUTES,,, [[Dump attribute cache]])
{ rta_dump_all(); cli_msg(0, ""); } ;
{ ea_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP ROUTES,,, [[Dump routes]])
{ rt_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP TABLES,,, [[Dump table connections]])
@ -831,7 +882,7 @@ CF_CLI(DUMP FILTER ALL,,, [[Dump all filters in linearized form]])
{ filters_dump_all(); cli_msg(0, ""); } ;
CF_CLI(EVAL, term, <expr>, [[Evaluate an expression]])
{ cmd_eval(f_linearize($2)); } ;
{ cmd_eval(f_linearize($2, 1)); } ;
CF_CLI_HELP(ECHO, ..., [[Control echoing of log messages]])
CF_CLI(ECHO, echo_mask echo_size, (all | off | { debug|trace|info|remote|warning|error|auth [, ...] }) [<buffer-size>], [[Control echoing of log messages]]) {
@ -894,9 +945,6 @@ proto_patt2:
| TEXT { $$.ptr = $1; $$.patt = 1; }
;
dynamic_attr: IGP_METRIC { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_INT, EA_GEN_IGP_METRIC); } ;
CF_CODE
CF_END

View File

@ -591,7 +591,7 @@ ifa_update(struct ifa *a)
if (ipa_equal(b->brd, a->brd) &&
ipa_equal(b->opposite, a->opposite) &&
b->scope == a->scope &&
!((b->flags ^ a->flags) & IA_PEER))
!((b->flags ^ a->flags) & (IA_SECONDARY | IA_PEER | IA_HOST)))
{
b->flags |= IA_UPDATED;
return b;

View File

@ -345,7 +345,7 @@ neigh_free(neighbor *n)
{
rem_node(&n->n);
rem_node(&n->if_n);
sl_free(neigh_slab, n);
sl_free(n);
}
/**

View File

@ -15,19 +15,17 @@
#include "lib/event.h"
#include "lib/timer.h"
#include "lib/string.h"
#include "lib/coro.h"
#include "conf/conf.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/iface.h"
#include "nest/cli.h"
#include "filter/filter.h"
#include "filter/f-inst.h"
pool *proto_pool;
list proto_list;
list STATIC_LIST_INIT(proto_list);
static list protocol_list;
struct protocol *class_to_protocol[PROTOCOL__MAX];
static list STATIC_LIST_INIT(protocol_list);
#define CD(c, msg, args...) ({ if (c->debug & D_STATES) log(L_TRACE "%s.%s: " msg, c->proto->name, c->name ?: "?", ## args); })
#define PD(p, msg, args...) ({ if (p->debug & D_STATES) log(L_TRACE "%s: " msg, p->name, ## args); })
@ -48,7 +46,7 @@ static char *c_states[] = { "DOWN", "START", "UP", "STOP", "RESTART" };
extern struct protocol proto_unix_iface;
static void channel_aux_request_refeed(struct channel_aux_table *cat);
static void channel_request_reload(struct channel *c);
static void proto_shutdown_loop(timer *);
static void proto_rethink_goal(struct proto *p);
static char *proto_state_name(struct proto *p);
@ -110,9 +108,7 @@ channel_export_log_state_change(struct rt_export_request *req, u8 state)
switch (state)
{
case TES_FEEDING:
if (c->out_table)
rt_refresh_begin(&c->out_table->push);
else if (c->proto->feed_begin)
if (c->proto->feed_begin)
c->proto->feed_begin(c, !c->refeeding);
break;
case TES_READY:
@ -202,8 +198,7 @@ proto_find_channel_by_name(struct proto *p, const char *n)
return NULL;
}
rte * channel_preimport(struct rt_import_request *req, rte *new, rte *old);
rte * channel_in_preimport(struct rt_import_request *req, rte *new, rte *old);
int channel_preimport(struct rt_import_request *req, rte *new, rte *old);
void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
@ -240,6 +235,7 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
c->in_filter = cf->in_filter;
c->out_filter = cf->out_filter;
c->out_subprefix = cf->out_subprefix;
channel_init_limit(c, &c->rx_limit, PLD_RX, &cf->rx_limit);
channel_init_limit(c, &c->in_limit, PLD_IN, &cf->in_limit);
@ -250,7 +246,7 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
c->preference = cf->preference;
c->debug = cf->debug;
c->merge_limit = cf->merge_limit;
c->in_keep_filtered = cf->in_keep_filtered;
c->in_keep = cf->in_keep;
c->rpki_reload = cf->rpki_reload;
c->channel_state = CS_DOWN;
@ -320,10 +316,14 @@ static void
channel_roa_in_changed(struct rt_subscription *s)
{
struct channel *c = s->data;
int active = !!c->reload_req.hook;
CD(c, "Reload triggered by RPKI change");
CD(c, "Reload triggered by RPKI change%s", active ? " - already active" : "");
if (!active)
channel_request_reload(c);
else
c->reload_pending = 1;
}
static void
@ -401,7 +401,7 @@ channel_roa_subscribe_filter(struct channel *c, int dir)
#ifdef CONFIG_BGP
/* No automatic reload for BGP channels without in_table / out_table */
if (c->channel == &channel_bgp)
valid = dir ? !!c->in_table : !!c->out_table;
valid = dir ? ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER) : !!c->out_table;
#endif
struct filter_iterator fit;
@ -411,14 +411,8 @@ channel_roa_subscribe_filter(struct channel *c, int dir)
{
switch (fi->fi_code)
{
case FI_ROA_CHECK_IMPLICIT:
tab = fi->i_FI_ROA_CHECK_IMPLICIT.rtc->table;
if (valid) channel_roa_subscribe(c, tab, dir);
found = 1;
break;
case FI_ROA_CHECK_EXPLICIT:
tab = fi->i_FI_ROA_CHECK_EXPLICIT.rtc->table;
case FI_ROA_CHECK:
tab = fi->i_FI_ROA_CHECK.rtc->table;
if (valid) channel_roa_subscribe(c, tab, dir);
found = 1;
break;
@ -483,8 +477,7 @@ channel_start_export(struct channel *c)
{
if (c->out_req.hook)
{
c->restart_export = 1;
log(L_WARN "%s.%s: Fast channel export restart", c->proto->name, c->name);
log(L_WARN "%s.%s: Attempted to start channel's already started export", c->proto->name, c->name);
return;
}
@ -496,6 +489,8 @@ channel_start_export(struct channel *c)
c->out_req = (struct rt_export_request) {
.name = rn,
.list = proto_work_list(c->proto),
.addr = c->out_subprefix,
.addr_mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_dump_export_req,
.log_state_change = channel_export_log_state_change,
@ -527,7 +522,7 @@ channel_start_export(struct channel *c)
}
DBG("%s.%s: Channel start export req=%p\n", c->proto->name, c->name, &c->out_req);
rt_request_export(c->table, &c->out_req);
rt_request_export(&c->table->exporter, &c->out_req);
}
static void
@ -536,7 +531,7 @@ channel_check_stopped(struct channel *c)
switch (c->channel_state)
{
case CS_STOP:
if (c->out_req.hook || c->in_req.hook || c->out_table || c->in_table)
if (c->out_req.hook || c->in_req.hook)
return;
channel_set_state(c, CS_DOWN);
@ -581,19 +576,13 @@ channel_export_stopped(struct rt_export_request *req)
{
c->refeeding = 1;
c->refeed_pending = 0;
rt_request_export(c->table, req);
rt_request_export(&c->table->exporter, req);
return;
}
mb_free(c->out_req.name);
c->out_req.name = NULL;
if (c->restart_export)
{
c->restart_export = 0;
channel_start_export(c);
}
else
channel_check_stopped(c);
}
@ -616,332 +605,66 @@ channel_feed_end(struct channel *c)
return;
}
if (c->out_table)
rt_refresh_end(&c->out_table->push);
else if (c->proto->feed_end)
if (c->proto->feed_end)
c->proto->feed_end(c);
if (c->refeed_pending)
rt_stop_export(req, channel_export_stopped);
}
#define CHANNEL_AUX_TABLE_DUMP_REQ(inout, imex, pgimex, pushget) static void \
channel_##inout##_##pushget##_dump_req(struct rt_##pgimex##_request *req) { \
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, pushget, req); \
debug(" Channel %s.%s " #imex " table " #pushget " request %p\n", cat->c->proto->name, cat->c->name, req); }
CHANNEL_AUX_TABLE_DUMP_REQ(in, import, import, push)
CHANNEL_AUX_TABLE_DUMP_REQ(in, import, export, get)
CHANNEL_AUX_TABLE_DUMP_REQ(out, export, import, push)
CHANNEL_AUX_TABLE_DUMP_REQ(out, export, export, get)
#undef CHANNEL_AUX_TABLE_DUMP_REQ
static uint channel_aux_imex(struct channel_aux_table *cat)
{
if (cat->c->in_table == cat)
return 0;
else if (cat->c->out_table == cat)
return 1;
else
bug("Channel aux table must be in_table or out_table");
c->refeeding = 0;
}
/* Called by protocol for reload from in_table */
void
channel_schedule_reload(struct channel *c)
{
ASSERT(c->in_req.hook);
rt_request_export(&c->table->exporter, &c->reload_req);
}
static void
channel_aux_stopped(void *data)
channel_reload_stopped(struct rt_export_request *req)
{
struct channel_aux_table *cat = data;
struct channel *c = cat->c;
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
if (channel_aux_imex(cat))
c->out_table = NULL;
else
c->in_table = NULL;
mb_free(cat);
return channel_check_stopped(c);
/* Restart reload */
if (c->reload_pending)
channel_request_reload(c);
}
static void
channel_aux_import_stopped(struct rt_import_request *req)
channel_reload_log_state_change(struct rt_export_request *req, u8 state)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
ASSERT_DIE(cat->stop);
if (state == TES_READY)
rt_stop_export(req, channel_reload_stopped);
}
static void
channel_aux_export_stopped(struct rt_export_request *req)
channel_reload_dump_req(struct rt_export_request *req)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
req->hook = NULL;
if (cat->refeed_pending && !cat->stop)
{
cat->refeed_pending = 0;
rt_request_export(cat->tab, req);
}
else
ASSERT_DIE(cat->stop);
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
debug(" Channel %s.%s import reload request %p\n", c->proto->name, c->name, req);
}
static void
channel_aux_stop(struct channel_aux_table *cat)
{
cat->stop = 1;
rt_stop_import(&cat->push, channel_aux_import_stopped);
rt_stop_export(&cat->get, channel_aux_export_stopped);
cat->tab->deleted = channel_aux_stopped;
cat->tab->del_data = cat;
rt_unlock_table(cat->tab);
}
static void
channel_push_log_state_change(struct rt_import_request *req, u8 state)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
const char *imex = channel_aux_imex(cat) ? "export" : "import";
CD(cat->c, "Channel %s table import state changed to %s", imex, rt_import_state_name(state));
}
static void
channel_get_log_state_change(struct rt_export_request *req, u8 state)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
const char *imex = channel_aux_imex(cat) ? "export" : "import";
CD(cat->c, "Channel %s table export state changed to %s", imex, rt_export_state_name(state));
switch (state)
{
case TES_FEEDING:
if (imex && cat->c->proto->feed_begin)
cat->c->proto->feed_begin(cat->c, !cat->c->refeeding);
else if (!imex)
rt_refresh_begin(&cat->c->in_req);
break;
case TES_READY:
if (imex && cat->c->proto->feed_end)
cat->c->proto->feed_end(cat->c);
else if (!imex)
rt_refresh_end(&cat->c->in_req);
if (cat->refeed_pending)
rt_stop_export(&cat->get, channel_aux_export_stopped);
break;
}
}
void rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
static int
channel_aux_export_one_any(struct rt_export_request *req, struct rt_pending_export *rpe, rte **new, rte **old)
{
struct rte_src *src = rpe->new ? rpe->new->rte.src : rpe->old->rte.src;
*old = RTES_OR_NULL(rpe->old);
struct rte_storage *new_stored;
while (rpe)
{
new_stored = rpe->new;
rpe_mark_seen(req->hook, rpe);
rpe = rpe_next(rpe, src);
}
*new = RTES_CLONE(new_stored, *new);
return (*new || *old) && (&new_stored->rte != *old);
}
static int
channel_aux_export_one_best(struct rt_export_request *req, struct rt_pending_export *rpe, rte **new, rte **old)
{
*old = RTES_OR_NULL(rpe->old_best);
struct rte_storage *new_stored;
while (rpe)
{
new_stored = rpe->new_best;
rpe_mark_seen(req->hook, rpe);
rpe = rpe_next(rpe, NULL);
}
*new = RTES_CLONE(new_stored, *new);
return (*new || *old) && (&new_stored->rte != *old);
}
static void
channel_in_export_one_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
rte n0, *new = &n0, *old;
if (channel_aux_export_one_any(req, rpe, &new, &old))
rte_update_direct(cat->c, net, new, old ? old->src : new->src);
}
static void
channel_in_export_one_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
rte n0, *new = &n0, *old;
if (channel_aux_export_one_best(req, rpe, &new, &old))
rte_update_direct(cat->c, net, new, old ? old->src : new->src);
}
static void
channel_in_export_bulk_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
for (uint i=0; i<count; i++)
{
rte n0 = *feed[i];
rte_update_direct(cat->c, net, &n0, n0.src);
}
}
static void
channel_in_export_bulk_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
if (!count)
return;
rte n0 = *feed[0];
rte_update_direct(cat->c, net, &n0, n0.src);
}
void do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte *old);
static void
channel_out_export_one_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
rte n0, *new = &n0, *old;
if (channel_aux_export_one_any(req, rpe, &new, &old))
do_rt_notify_direct(cat->c, net, new, old);
}
static void
channel_out_export_one_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
rte n0, *new = &n0, *old;
if (channel_aux_export_one_best(req, rpe, &new, &old))
do_rt_notify_direct(cat->c, net, new, old);
}
static void
channel_out_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
{
struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
if (cat->c->ra_mode != RA_ANY)
ASSERT_DIE(count <= 1);
for (uint i=0; i<count; i++)
{
rte n0 = *feed[i];
do_rt_notify_direct(cat->c, net, &n0, NULL);
}
}
void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
/* Called by protocol to activate in_table */
void
channel_setup_in_table(struct channel *c, int best)
channel_setup_in_table(struct channel *c)
{
int nlen = sizeof("import") + strlen(c->name) + strlen(c->proto->name) + 3;
struct {
struct channel_aux_table cat;
struct rtable_config tab_cf;
char name[0];
} *cat = mb_allocz(c->proto->pool, sizeof(*cat) + nlen);
bsprintf(cat->name, "%s.%s.import", c->proto->name, c->name);
cat->tab_cf.name = cat->name;
cat->tab_cf.addr_type = c->net_type;
c->in_table = &cat->cat;
c->in_table->push = (struct rt_import_request) {
.name = cat->name,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_in_push_dump_req,
.log_state_change = channel_push_log_state_change,
.preimport = channel_in_preimport,
};
c->in_table->get = (struct rt_export_request) {
.name = cat->name,
c->reload_req = (struct rt_export_request) {
.name = mb_sprintf(c->proto->pool, "%s.%s.import", c->proto->name, c->name),
.list = proto_work_list(c->proto),
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_in_get_dump_req,
.log_state_change = channel_get_log_state_change,
.export_one = best ? channel_in_export_one_best : channel_in_export_one_any,
.export_bulk = best ? channel_in_export_bulk_best : channel_in_export_bulk_any,
.export_bulk = channel_reload_export_bulk,
.dump_req = channel_reload_dump_req,
.log_state_change = channel_reload_log_state_change,
};
c->in_table->c = c;
c->in_table->tab = rt_setup(c->proto->pool, &cat->tab_cf);
self_link(&c->in_table->tab->n);
rt_lock_table(c->in_table->tab);
rt_request_import(c->in_table->tab, &c->in_table->push);
rt_request_export(c->in_table->tab, &c->in_table->get);
c->in_keep |= RIK_PREFILTER;
}
/* Called by protocol to activate out_table */
void
channel_setup_out_table(struct channel *c)
{
int nlen = sizeof("export") + strlen(c->name) + strlen(c->proto->name) + 3;
struct {
struct channel_aux_table cat;
struct rtable_config tab_cf;
char name[0];
} *cat = mb_allocz(c->proto->pool, sizeof(*cat) + nlen);
bsprintf(cat->name, "%s.%s.export", c->proto->name, c->name);
cat->tab_cf.name = cat->name;
cat->tab_cf.addr_type = c->net_type;
c->out_table = &cat->cat;
c->out_table->push = (struct rt_import_request) {
.name = cat->name,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_out_push_dump_req,
.log_state_change = channel_push_log_state_change,
};
c->out_table->get = (struct rt_export_request) {
.name = cat->name,
.list = proto_work_list(c->proto),
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_out_get_dump_req,
.log_state_change = channel_get_log_state_change,
.export_one = (c->ra_mode == RA_ANY) ? channel_out_export_one_any : channel_out_export_one_best,
.export_bulk = channel_out_export_bulk,
};
c->out_table->c = c;
c->out_table->tab = rt_setup(c->proto->pool, &cat->tab_cf);
self_link(&c->out_table->tab->n);
rt_lock_table(c->out_table->tab);
rt_request_import(c->out_table->tab, &c->out_table->push);
rt_request_export(c->out_table->tab, &c->out_table->get);
}
static void
channel_aux_request_refeed(struct channel_aux_table *cat)
{
cat->refeed_pending = 1;
rt_stop_export(&cat->get, channel_aux_export_stopped);
}
static void
channel_do_start(struct channel *c)
@ -967,12 +690,16 @@ channel_do_up(struct channel *c)
static void
channel_do_pause(struct channel *c)
{
/* Need to abort feeding */
if (c->reload_req.hook)
{
c->reload_pending = 0;
rt_stop_export(&c->reload_req, channel_reload_stopped);
}
/* Stop export */
if (c->out_req.hook)
{
rt_stop_export(&c->out_req, channel_export_stopped);
c->refeeding = 0;
}
channel_roa_unsubscribe_all(c);
@ -983,13 +710,6 @@ channel_do_pause(struct channel *c)
static void
channel_do_stop(struct channel *c)
{
/* Drop auxiliary tables */
if (c->in_table)
channel_aux_stop(c->in_table);
if (c->out_table)
channel_aux_stop(c->out_table);
/* Stop import */
if (c->in_req.hook)
rt_stop_import(&c->in_req, channel_import_stopped);
@ -1000,13 +720,12 @@ channel_do_stop(struct channel *c)
CALL(c->channel->shutdown, c);
channel_roa_unsubscribe_all(c);
}
static void
channel_do_down(struct channel *c)
{
ASSERT(!c->out_req.hook && !c->in_req.hook && !c->out_table && !c->in_table);
ASSERT(!c->reload_req.hook);
c->proto->active_channels--;
@ -1014,11 +733,11 @@ channel_do_down(struct channel *c)
memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
CALL(c->channel->cleanup, c);
c->out_table = NULL;
/* This have to be done in here, as channel pool is freed before channel_do_down() */
bmap_free(&c->export_map);
bmap_free(&c->export_reject_map);
/* The in_table and out_table are going to be freed by freeing their resource pools. */
CALL(c->channel->cleanup, c);
/* Schedule protocol shutddown */
if (proto_is_done(c->proto))
@ -1048,7 +767,7 @@ channel_set_state(struct channel *c, uint state)
break;
case CS_UP:
ASSERT(cs == CS_DOWN || cs == CS_START || cs == CS_PAUSE);
ASSERT(cs == CS_DOWN || cs == CS_START);
if (cs == CS_DOWN)
channel_do_start(c);
@ -1098,8 +817,8 @@ channel_set_state(struct channel *c, uint state)
* completed, it will switch back to ES_READY. This function can be called
* even when feeding is already running, in that case it is restarted.
*/
static void
channel_request_table_feeding(struct channel *c)
void
channel_request_feeding(struct channel *c)
{
ASSERT(c->out_req.hook);
@ -1107,23 +826,7 @@ channel_request_table_feeding(struct channel *c)
rt_stop_export(&c->out_req, channel_export_stopped);
}
void
channel_request_feeding(struct channel *c)
{
if (c->gr_wait || !c->proto->rt_notify)
return;
CD(c, "Refeed requested");
ASSERT_DIE(c->out_req.hook);
if (c->out_table)
channel_aux_request_refeed(c->out_table);
else
channel_request_table_feeding(c);
}
void
static void
channel_request_reload(struct channel *c)
{
ASSERT(c->in_req.hook);
@ -1131,29 +834,14 @@ channel_request_reload(struct channel *c)
CD(c, "Reload requested");
if (c->in_table)
channel_aux_request_refeed(c->in_table);
else
c->proto->reload_routes(c);
}
void
channel_refresh_begin(struct channel *c)
{
CD(c, "Channel route refresh begin");
if (c->in_table)
rt_refresh_begin(&c->in_table->push);
else
rt_refresh_begin(&c->in_req);
}
void
channel_refresh_end(struct channel *c)
{
if (c->in_table)
rt_refresh_end(&c->in_table->push);
else
rt_refresh_end(&c->in_req);
/*
* Should this be done before reload_routes() hook?
* Perhaps, but routes are updated asynchronously.
*/
channel_reset_limit(c, &c->rx_limit, PLD_RX);
channel_reset_limit(c, &c->in_limit, PLD_IN);
}
const struct channel_class channel_basic = {
@ -1239,7 +927,12 @@ int
channel_reconfigure(struct channel *c, struct channel_config *cf)
{
/* FIXME: better handle these changes, also handle in_keep_filtered */
if ((c->table != cf->table->table) || (cf->ra_mode && (c->ra_mode != cf->ra_mode)))
if ((c->table != cf->table->table) ||
(cf->ra_mode && (c->ra_mode != cf->ra_mode)) ||
(cf->in_keep != c->in_keep) ||
cf->out_subprefix && c->out_subprefix &&
!net_equal(cf->out_subprefix, c->out_subprefix) ||
(!cf->out_subprefix != !c->out_subprefix))
return 0;
/* Note that filter_same() requires arguments in (new, old) order */
@ -1264,9 +957,9 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
// c->ra_mode = cf->ra_mode;
c->merge_limit = cf->merge_limit;
c->preference = cf->preference;
c->out_req.addr = c->out_subprefix = cf->out_subprefix;
c->debug = cf->debug;
c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug;
c->in_keep_filtered = cf->in_keep_filtered;
c->rpki_reload = cf->rpki_reload;
/* Execute channel-specific reconfigure hook */
@ -1311,7 +1004,7 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
channel_request_reload(c);
if (export_changed)
channel_request_table_feeding(c);
channel_request_feeding(c);
done:
CD(c, "Reconfigured");
@ -1378,7 +1071,6 @@ proto_loop_stopped(void *ptr)
birdloop_enter(&main_birdloop);
p->loop = &main_birdloop;
p->event->list = NULL;
proto_cleanup(p);
birdloop_leave(&main_birdloop);
@ -1461,21 +1153,13 @@ proto_start(struct proto *p)
DBG("Kicking %s up\n", p->name);
PD(p, "Starting");
int ns = strlen("Protocol ") + strlen(p->cf->name) + 1;
void *nb = mb_alloc(proto_pool, ns);
ASSERT_DIE(ns - 1 == bsnprintf(nb, ns, "Protocol %s", p->cf->name));
p->pool = rp_new(proto_pool, nb);
p->pool = rp_newf(proto_pool, "Protocol %s", p->cf->name);
if (graceful_restart_state == GRS_INIT)
p->gr_recovery = 1;
if (p->cf->loop_order != DOMAIN_ORDER(the_bird))
p->loop = birdloop_new(p->pool, p->cf->loop_order, nb);
p->event->list = proto_event_list(p);
mb_move(nb, p->pool);
p->loop = birdloop_new(p->pool, p->cf->loop_order, p->pool->name);
PROTO_LOCKED_FROM_MAIN(p)
proto_notify_state(p, (p->proto->start ? p->proto->start(p) : PS_UP));
@ -2045,7 +1729,7 @@ protos_dump_all(void)
WALK_LIST(p, proto_list)
{
#define DPF(x) (p->x ? " " #x : "")
debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s\n",
debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s%s\n",
p->name, p, p_states[p->proto_state], p->active_channels,
DPF(disabled), DPF(active), DPF(do_stop), DPF(reconfiguring));
#undef DPF
@ -2061,20 +1745,6 @@ protos_dump_all(void)
debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-",
c->out_req.hook ? rt_export_state_name(rt_export_get_state(c->out_req.hook)) : "-");
if (c->in_table)
{
debug("\tInput aux table:\n");
rt_dump_hooks(c->in_table->tab);
rt_dump(c->in_table->tab);
debug("\tEnd of input aux table.\n");
}
if (c->out_table)
{
debug("\tOutput aux table:\n");
rt_dump_hooks(c->in_table->tab);
rt_dump(c->in_table->tab);
debug("\tEnd of output aux table.\n");
}
}
if (p->proto->dump && (p->proto_state != PS_DOWN))
@ -2094,14 +1764,13 @@ void
proto_build(struct protocol *p)
{
add_tail(&protocol_list, &p->n);
ASSERT(p->class);
ASSERT(!class_to_protocol[p->class]);
class_to_protocol[p->class] = p;
}
/* FIXME: convert this call to some protocol hook */
extern void bfd_init_all(void);
void protos_build_gen(void);
/**
* protos_build - build a protocol list
*
@ -2114,44 +1783,7 @@ extern void bfd_init_all(void);
void
protos_build(void)
{
init_list(&proto_list);
init_list(&protocol_list);
proto_build(&proto_device);
#ifdef CONFIG_RADV
proto_build(&proto_radv);
#endif
#ifdef CONFIG_RIP
proto_build(&proto_rip);
#endif
#ifdef CONFIG_STATIC
proto_build(&proto_static);
#endif
#ifdef CONFIG_MRT
proto_build(&proto_mrt);
#endif
#ifdef CONFIG_OSPF
proto_build(&proto_ospf);
#endif
#ifdef CONFIG_PIPE
proto_build(&proto_pipe);
#endif
#ifdef CONFIG_BGP
proto_build(&proto_bgp);
#endif
#ifdef CONFIG_BFD
proto_build(&proto_bfd);
bfd_init_all();
#endif
#ifdef CONFIG_BABEL
proto_build(&proto_babel);
#endif
#ifdef CONFIG_RPKI
proto_build(&proto_rpki);
#endif
#ifdef CONFIG_PERF
proto_build(&proto_perf);
#endif
protos_build_gen();
proto_pool = rp_new(&root_pool, "Protocols");
proto_shutdown_timer = tm_new(proto_pool);
@ -2492,18 +2124,18 @@ channel_show_stats(struct channel *c)
u32 in_routes = c->in_limit.count;
u32 out_routes = c->out_limit.count;
if (c->in_keep_filtered)
if (c->in_keep)
cli_msg(-1006, " Routes: %u imported, %u filtered, %u exported, %u preferred",
in_routes, (rx_routes - in_routes), out_routes, SRI(pref));
else
cli_msg(-1006, " Routes: %u imported, %u exported, %u preferred",
in_routes, out_routes, SRI(pref));
cli_msg(-1006, " Route change stats: received rejected filtered ignored limited accepted");
cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u %10u",
cli_msg(-1006, " Route change stats: received rejected filtered ignored RX limit IN limit accepted");
cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u %10u %10u",
SCI(updates_received), SCI(updates_invalid),
SCI(updates_filtered), SRI(updates_ignored),
SCI(updates_limited_rx) + SCI(updates_limited_in),
SCI(updates_limited_rx), SCI(updates_limited_in),
SRI(updates_accepted));
cli_msg(-1006, " Import withdraws: %10u %10u --- %10u --- %10u",
SCI(withdraws_received), SCI(withdraws_invalid),

View File

@ -12,7 +12,7 @@
#include "lib/lists.h"
#include "lib/resource.h"
#include "lib/event.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/limit.h"
#include "conf/conf.h"
@ -38,38 +38,20 @@ struct symbol;
* Routing Protocol
*/
enum protocol_class {
PROTOCOL_NONE,
PROTOCOL_BABEL,
PROTOCOL_BFD,
PROTOCOL_BGP,
PROTOCOL_DEVICE,
PROTOCOL_DIRECT,
PROTOCOL_KERNEL,
PROTOCOL_OSPF,
PROTOCOL_MRT,
PROTOCOL_PERF,
PROTOCOL_PIPE,
PROTOCOL_RADV,
PROTOCOL_RIP,
PROTOCOL_RPKI,
PROTOCOL_STATIC,
PROTOCOL__MAX
};
extern struct protocol *class_to_protocol[PROTOCOL__MAX];
struct protocol {
node n;
char *name;
char *template; /* Template for automatic generation of names */
int name_counter; /* Counter for automatic name generation */
enum protocol_class class; /* Machine readable protocol class */
uint preference; /* Default protocol preference */
uint channel_mask; /* Mask of accepted channel types (NB_*) */
uint proto_size; /* Size of protocol data structure */
uint config_size; /* Size of protocol config data structure */
uint eattr_begin; /* First ID of registered eattrs */
uint eattr_end; /* End of eattr id zone */
void (*preconfig)(struct protocol *, struct config *); /* Just before configuring */
void (*postconfig)(struct proto_config *); /* After configuring each instance */
struct proto * (*init)(struct proto_config *); /* Create new instance */
@ -79,13 +61,13 @@ struct protocol {
int (*shutdown)(struct proto *); /* Stop the instance */
void (*get_status)(struct proto *, byte *buf); /* Get instance status (for `show protocols' command) */
void (*get_route_info)(struct rte *, byte *buf); /* Get route information (for `show route' command) */
int (*get_attr)(const struct eattr *, byte *buf, int buflen); /* ASCIIfy dynamic attribute (returns GA_*) */
// int (*get_attr)(const struct eattr *, byte *buf, int buflen); /* ASCIIfy dynamic attribute (returns GA_*) */
void (*show_proto_info)(struct proto *); /* Show protocol info (for `show protocols all' command) */
void (*copy_config)(struct proto_config *, struct proto_config *); /* Copy config from given protocol instance */
};
void protos_build(void);
void proto_build(struct protocol *);
void protos_build(void); /* Called from sysdep to initialize protocols */
void proto_build(struct protocol *); /* Called from protocol to register itself */
void protos_preconfig(struct config *);
void protos_commit(struct config *new, struct config *old, int force_restart, int type);
struct proto * proto_spawn(struct proto_config *cf, uint disabled);
@ -152,7 +134,7 @@ struct proto {
u32 debug; /* Debugging flags */
u32 mrtdump; /* MRTDump flags */
uint active_channels; /* Number of active channels */
uint active_coroutines; /* Number of active coroutines */
uint active_loops; /* Number of active IO loops */
byte net_type; /* Protocol network type (NET_*), 0 for undefined */
byte disabled; /* Manually disabled */
byte vrf_set; /* Related VRF instance (above) is defined */
@ -212,7 +194,7 @@ struct proto {
int (*rte_mergable)(struct rte *, struct rte *);
void (*rte_insert)(struct network *, struct rte *);
void (*rte_remove)(struct network *, struct rte *);
u32 (*rte_igp_metric)(struct rte *);
u32 (*rte_igp_metric)(const struct rte *);
/* Hic sunt protocol-specific data */
};
@ -360,7 +342,7 @@ void proto_notify_state(struct proto *p, unsigned state);
*/
static inline int proto_is_inactive(struct proto *p)
{ return (p->active_channels == 0) && (p->active_coroutines == 0); }
{ return (p->active_channels == 0) && (p->active_loops == 0); }
/*
@ -474,9 +456,10 @@ struct channel_config {
struct proto_config *parent; /* Where channel is defined (proto or template) */
struct rtable_config *table; /* Table we're attached to */
const struct filter *in_filter, *out_filter; /* Attached filters */
const net_addr *out_subprefix; /* Export only subprefixes of this net */
struct channel_limit rx_limit; /* Limit for receiving routes from protocol
(relevant when in_keep_filtered is active) */
(relevant when in_keep & RIK_REJECTED) */
struct channel_limit in_limit; /* Limit for importing routes from protocol */
struct channel_limit out_limit; /* Limit for exporting routes to protocol */
@ -485,7 +468,7 @@ struct channel_config {
u16 preference; /* Default route preference */
u32 debug; /* Debugging flags (D_*) */
u8 merge_limit; /* Maximal number of nexthops for RA_MERGED */
u8 in_keep_filtered; /* Routes rejected in import filter are kept */
u8 in_keep; /* Which states of routes to keep (RIK_*) */
u8 rpki_reload; /* RPKI changes trigger channel reload */
};
@ -499,10 +482,11 @@ struct channel {
struct rtable *table;
const struct filter *in_filter; /* Input filter */
const struct filter *out_filter; /* Output filter */
const net_addr *out_subprefix; /* Export only subprefixes of this net */
struct bmap export_map; /* Keeps track which routes were really exported */
struct bmap export_reject_map; /* Keeps track which routes were rejected by export filter */
struct limit rx_limit; /* Receive limit (for in_keep_filtered) */
struct limit rx_limit; /* Receive limit (for in_keep & RIK_REJECTED) */
struct limit in_limit; /* Input limit */
struct limit out_limit; /* Output limit */
@ -539,7 +523,7 @@ struct channel {
u16 preference; /* Default route preference */
u32 debug; /* Debugging flags (D_*) */
u8 merge_limit; /* Maximal number of nexthops for RA_MERGED */
u8 in_keep_filtered; /* Routes rejected in import filter are kept */
u8 in_keep; /* Which states of routes to keep (RIK_*) */
u8 disabled;
u8 stale; /* Used in reconfiguration */
@ -548,29 +532,22 @@ struct channel {
u8 reloadable; /* Hook reload_routes() is allowed on the channel */
u8 gr_lock; /* Graceful restart mechanism should wait for this channel */
u8 gr_wait; /* Route export to channel is postponed until graceful restart */
u8 restart_export; /* Route export should restart as soon as it stops */
btime last_state_change; /* Time of last state transition */
struct channel_aux_table *in_table; /* Internal table for received routes */
struct rt_export_request reload_req; /* Feeder for import reload */
u8 reload_pending; /* Reloading and another reload is scheduled */
u8 refeed_pending; /* Refeeding and another refeed is scheduled */
u8 rpki_reload; /* RPKI changes trigger channel reload */
struct channel_aux_table *out_table; /* Internal table for exported routes */
struct rt_exporter *out_table; /* Internal table for exported routes */
list roa_subscriptions; /* List of active ROA table subscriptions based on filters roa_check() */
};
struct channel_aux_table {
struct channel *c;
struct rt_import_request push;
struct rt_export_request get;
rtable *tab;
u8 stop;
u8 refeed_pending;
};
#define RIK_REJECTED 1 /* Routes rejected in import filter are kept */
#define RIK_PREFILTER (2 | RIK_REJECTED) /* All routes' attribute state before import filter is kept */
/*
* Channel states
@ -636,8 +613,7 @@ struct channel *proto_add_channel(struct proto *p, struct channel_config *cf);
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
void channel_set_state(struct channel *c, uint state);
void channel_setup_in_table(struct channel *c, int best);
void channel_setup_out_table(struct channel *c);
void channel_setup_in_table(struct channel *c);
void channel_schedule_reload(struct channel *c);
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
@ -645,9 +621,6 @@ static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP)
static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); }
void channel_request_feeding(struct channel *c);
void channel_request_reload(struct channel *c);
void channel_refresh_begin(struct channel *c);
void channel_refresh_end(struct channel *c);
void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
void *channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
int channel_reconfigure(struct channel *c, struct channel_config *cf);

View File

@ -1,877 +0,0 @@
/*
* BIRD Internet Routing Daemon -- Routing Table
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
* (c) 2019--2021 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_ROUTE_H_
#define _BIRD_ROUTE_H_
#include "lib/lists.h"
#include "lib/event.h"
#include "lib/bitmap.h"
#include "lib/resource.h"
#include "lib/net.h"
#include <stdatomic.h>
struct ea_list;
struct protocol;
struct proto;
struct channel;
struct rte_src;
struct symbol;
struct timer;
struct filter;
struct cli;
/*
* Generic data structure for storing network prefixes. Also used
* for the master routing table. Currently implemented as a hash
* table.
*
* Available operations:
* - insertion of new entry
* - deletion of entry
* - searching for entry by network prefix
* - asynchronous retrieval of fib contents
*/
struct fib_node {
struct fib_node *next; /* Next in hash chain */
struct fib_iterator *readers; /* List of readers of this node */
net_addr addr[0];
};
struct fib_iterator { /* See lib/slists.h for an explanation */
struct fib_iterator *prev, *next; /* Must be synced with struct fib_node! */
byte efef; /* 0xff to distinguish between iterator and node */
byte pad[3];
struct fib_node *node; /* Or NULL if freshly merged */
uint hash;
};
typedef void (*fib_init_fn)(void *);
struct fib {
pool *fib_pool; /* Pool holding all our data */
slab *fib_slab; /* Slab holding all fib nodes */
struct fib_node **hash_table; /* Node hash table */
uint hash_size; /* Number of hash table entries (a power of two) */
uint hash_order; /* Binary logarithm of hash_size */
uint hash_shift; /* 32 - hash_order */
uint addr_type; /* Type of address data stored in fib (NET_*) */
uint node_size; /* FIB node size, 0 for nonuniform */
uint node_offset; /* Offset of fib_node struct inside of user data */
uint entries; /* Number of entries */
uint entries_min, entries_max; /* Entry count limits (else start rehashing) */
fib_init_fn init; /* Constructor */
};
static inline void * fib_node_to_user(struct fib *f, struct fib_node *e)
{ return e ? (void *) ((char *) e - f->node_offset) : NULL; }
static inline struct fib_node * fib_user_to_node(struct fib *f, void *e)
{ return e ? (void *) ((char *) e + f->node_offset) : NULL; }
void fib_init(struct fib *f, pool *p, uint addr_type, uint node_size, uint node_offset, uint hash_order, fib_init_fn init);
void *fib_find(struct fib *, const net_addr *); /* Find or return NULL if doesn't exist */
void *fib_get_chain(struct fib *f, const net_addr *a); /* Find first node in linked list from hash table */
void *fib_get(struct fib *, const net_addr *); /* Find or create new if nonexistent */
void *fib_route(struct fib *, const net_addr *); /* Longest-match routing lookup */
void fib_delete(struct fib *, void *); /* Remove fib entry */
void fib_free(struct fib *); /* Destroy the fib */
void fib_check(struct fib *); /* Consistency check for debugging */
void fit_init(struct fib_iterator *, struct fib *); /* Internal functions, don't call */
struct fib_node *fit_get(struct fib *, struct fib_iterator *);
void fit_put(struct fib_iterator *, struct fib_node *);
void fit_put_next(struct fib *f, struct fib_iterator *i, struct fib_node *n, uint hpos);
void fit_put_end(struct fib_iterator *i);
void fit_copy(struct fib *f, struct fib_iterator *dst, struct fib_iterator *src);
#define FIB_WALK(fib, type, z) do { \
struct fib_node *fn_, **ff_ = (fib)->hash_table; \
uint count_ = (fib)->hash_size; \
type *z; \
while (count_--) \
for (fn_ = *ff_++; z = fib_node_to_user(fib, fn_); fn_=fn_->next)
#define FIB_WALK_END } while (0)
#define FIB_ITERATE_INIT(it, fib) fit_init(it, fib)
#define FIB_ITERATE_START(fib, it, type, z) do { \
struct fib_node *fn_ = fit_get(fib, it); \
uint count_ = (fib)->hash_size; \
uint hpos_ = (it)->hash; \
type *z; \
for(;;) { \
if (!fn_) \
{ \
if (++hpos_ >= count_) \
break; \
fn_ = (fib)->hash_table[hpos_]; \
continue; \
} \
z = fib_node_to_user(fib, fn_);
#define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
#define FIB_ITERATE_PUT(it) fit_put(it, fn_)
#define FIB_ITERATE_PUT_NEXT(it, fib) fit_put_next(fib, it, fn_, hpos_)
#define FIB_ITERATE_PUT_END(it) fit_put_end(it)
#define FIB_ITERATE_UNLINK(it, fib) fit_get(fib, it)
#define FIB_ITERATE_COPY(dst, src, fib) fit_copy(fib, dst, src)
/*
* Master Routing Tables. Generally speaking, each of them contains a FIB
* with each entry pointing to a list of route entries representing routes
* to given network (with the selected one at the head).
*
* Each of the RTE's contains variable data (the preference and protocol-dependent
* metrics) and a pointer to a route attribute block common for many routes).
*
* It's guaranteed that there is at most one RTE for every (prefix,proto) pair.
*/
struct rtable_config {
node n;
char *name;
struct rtable *table;
struct proto_config *krt_attached; /* Kernel syncer attached to this table */
uint addr_type; /* Type of address data stored in table (NET_*) */
int gc_max_ops; /* Maximum number of operations before GC is run */
int gc_min_time; /* Minimum time between two consecutive GC runs */
byte sorted; /* Routes of network are sorted according to rte_better() */
btime min_settle_time; /* Minimum settle time for notifications */
btime max_settle_time; /* Maximum settle time for notifications */
btime export_settle_time; /* Delay before exports are announced */
};
typedef struct rtable {
resource r;
node n; /* Node in list of all tables */
pool *rp; /* Resource pool to allocate everything from, including itself */
struct slab *rte_slab; /* Slab to allocate route objects */
struct fib fib;
char *name; /* Name of this table */
uint addr_type; /* Type of address data stored in table (NET_*) */
int use_count; /* Number of protocols using this table */
u32 rt_count; /* Number of routes in the table */
list imports; /* Registered route importers */
list exports; /* Registered route exporters */
struct hmap id_map;
struct hostcache *hostcache;
struct rtable_config *config; /* Configuration of this table */
void (*deleted)(void *); /* Table should free itself. Call this when it is done. */
void *del_data;
struct event *rt_event; /* Routing table event */
btime last_rt_change; /* Last time when route changed */
btime base_settle_time; /* Start time of rtable settling interval */
btime gc_time; /* Time of last GC */
int gc_counter; /* Number of operations since last GC */
byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */
byte hcu_scheduled; /* Hostcache update is scheduled */
byte nhu_state; /* Next Hop Update state */
byte export_used; /* Export journal setup scheduled */
struct fib_iterator prune_fit; /* Rtable prune FIB iterator */
struct fib_iterator nhu_fit; /* Next Hop Update FIB iterator */
struct tbf rl_pipe; /* Rate limiting token buffer for pipe collisions */
list subscribers; /* Subscribers for notifications */
struct timer *settle_timer; /* Settle time for notifications */
list pending_exports; /* List of packed struct rt_pending_export */
btime base_export_time; /* When first pending export was announced */
struct timer *export_timer;
struct rt_pending_export *first_export; /* First export to announce */
u64 next_export_seq; /* The next export will have this ID */
} rtable;
struct rt_subscription {
node n;
rtable *tab;
void (*hook)(struct rt_subscription *b);
void *data;
};
#define NHU_CLEAN 0
#define NHU_SCHEDULED 1
#define NHU_RUNNING 2
#define NHU_DIRTY 3
typedef struct network {
struct rte_storage *routes; /* Available routes for this network */
struct rt_pending_export *last, *first; /* Routes with unfinished exports */
struct fib_node n; /* FIB flags reserved for kernel syncer */
} net;
struct hostcache {
slab *slab; /* Slab holding all hostentries */
struct hostentry **hash_table; /* Hash table for hostentries */
unsigned hash_order, hash_shift;
unsigned hash_max, hash_min;
unsigned hash_items;
linpool *lp; /* Linpool for trie */
struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
list hostentries; /* List of all hostentries */
byte update_hostcache;
};
struct hostentry {
node ln;
ip_addr addr; /* IP address of host, part of key */
ip_addr link; /* (link-local) IP address of host, used as gw
if host is directly attached */
struct rtable *tab; /* Dependent table, part of key */
struct hostentry *next; /* Next in hash chain */
unsigned hash_key; /* Hash key */
unsigned uc; /* Use count */
struct rta *src; /* Source rta entry */
byte dest; /* Chosen route destination type (RTD_...) */
byte nexthop_linkable; /* Nexthop list is completely non-device */
u32 igp_metric; /* Chosen route IGP metric */
};
typedef struct rte {
struct rta *attrs; /* Attributes of this route */
const net_addr *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
btime lastmod; /* Last modified (set by table) */
u32 id; /* Table specific route id */
byte flags; /* Table-specific flags */
byte pflags; /* Protocol-specific flags */
u8 generation; /* If this route import is based on other previously exported route,
this value should be 1 + MAX(generation of the parent routes).
Otherwise the route is independent and this value is zero. */
u8 stale_cycle; /* Auxiliary value for route refresh */
} rte;
struct rte_storage {
struct rte_storage *next; /* Next in chain */
struct rte rte; /* Route data */
};
#define RTES_CLONE(r, l) ((r) ? (((*(l)) = (r)->rte), (l)) : NULL)
#define RTES_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
#define REF_FILTERED 2 /* Route is rejected by import filter */
#define REF_USE_STALE 4 /* Do not reset route's stale_cycle to the actual value */
/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); }
/* Route just has REF_FILTERED flag */
static inline int rte_is_filtered(const rte *r) { return !!(r->flags & REF_FILTERED); }
/* Table-channel connections */
struct rt_import_request {
struct rt_import_hook *hook; /* The table part of importer */
char *name;
u8 trace_routes;
void (*dump_req)(struct rt_import_request *req);
void (*log_state_change)(struct rt_import_request *req, u8 state);
/* Preimport is called when the @new route is just-to-be inserted, replacing @old.
* Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
struct rte *(*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
};
struct rt_import_hook {
node n;
rtable *table; /* The connected table */
struct rt_import_request *req; /* The requestor */
struct rt_import_stats {
/* Import - from protocol to core */
u32 pref; /* Number of routes selected as best in the (adjacent) routing table */
u32 updates_ignored; /* Number of route updates rejected as already in route table */
u32 updates_accepted; /* Number of route updates accepted and imported */
u32 withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
} stats;
u64 flush_seq; /* Table export seq when the channel announced flushing */
btime last_state_change; /* Time of last state transition */
u8 import_state; /* IS_* */
u8 stale_set; /* Set this stale_cycle to imported routes */
u8 stale_valid; /* Routes with this stale_cycle and bigger are considered valid */
u8 stale_pruned; /* Last prune finished when this value was set at stale_valid */
u8 stale_pruning; /* Last prune started when this value was set at stale_valid */
void (*stopped)(struct rt_import_request *); /* Stored callback when import is stopped */
};
struct rt_pending_export {
struct rt_pending_export * _Atomic next; /* Next export for the same destination */
struct rte_storage *new, *new_best, *old, *old_best;
u64 seq; /* Sequential ID (table-local) of the pending export */
};
struct rt_export_request {
struct rt_export_hook *hook; /* Table part of the export */
char *name;
u8 trace_routes;
event_list *list; /* Where to schedule export events */
/* There are two methods of export. You can either request feeding every single change
* or feeding the whole route feed. In case of regular export, &export_one is preferred.
* Anyway, when feeding, &export_bulk is preferred, falling back to &export_one.
* Thus, for RA_OPTIMAL, &export_one is only set,
* for RA_MERGED and RA_ACCEPTED, &export_bulk is only set
* and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
*/
void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
void (*export_bulk)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
void (*dump_req)(struct rt_export_request *req);
void (*log_state_change)(struct rt_export_request *req, u8);
};
struct rt_export_hook {
node n;
rtable *table; /* The connected table */
pool *pool;
struct rt_export_request *req; /* The requestor */
struct rt_export_stats {
/* Export - from core to protocol */
u32 updates_received; /* Number of route updates received */
u32 withdraws_received; /* Number of route withdraws received */
} stats;
struct fib_iterator feed_fit; /* Routing table iterator used during feeding */
struct bmap seq_map; /* Keep track which exports were already procesed */
struct rt_pending_export * _Atomic last_export;/* Last export processed */
struct rt_pending_export *rpe_next; /* Next pending export to process */
btime last_state_change; /* Time of last state transition */
u8 refeed_pending; /* Refeeding and another refeed is scheduled */
_Atomic u8 export_state; /* Route export state (TES_*, see below) */
struct event *event; /* Event running all the export operations */
void (*stopped)(struct rt_export_request *); /* Stored callback when export is stopped */
};
#define TIS_DOWN 0
#define TIS_UP 1
#define TIS_STOP 2
#define TIS_FLUSHING 3
#define TIS_WAITING 4
#define TIS_CLEARED 5
#define TIS_MAX 6
#define TES_DOWN 0
#define TES_HUNGRY 1
#define TES_FEEDING 2
#define TES_READY 3
#define TES_STOP 4
#define TES_MAX 5
void rt_request_import(rtable *tab, struct rt_import_request *req);
void rt_request_export(rtable *tab, struct rt_export_request *req);
void rt_stop_import(struct rt_import_request *, void (*stopped)(struct rt_import_request *));
void rt_stop_export(struct rt_export_request *, void (*stopped)(struct rt_export_request *));
const char *rt_import_state_name(u8 state);
const char *rt_export_state_name(u8 state);
static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
/* Get next rpe. If src is given, it must match. */
struct rt_pending_export *rpe_next(struct rt_pending_export *rpe, struct rte_src *src);
/* Mark the pending export processed */
void rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
/* Get pending export seen status */
int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
/* Types of route announcement, also used as flags */
#define RA_UNDEF 0 /* Undefined RA type */
#define RA_OPTIMAL 1 /* Announcement of optimal route change */
#define RA_ACCEPTED 2 /* Announcement of first accepted route */
#define RA_ANY 3 /* Announcement of any route change */
#define RA_MERGED 4 /* Announcement of optimal route merged with next ones */
/* Return value of preexport() callback */
#define RIC_ACCEPT 1 /* Accepted by protocol */
#define RIC_PROCESS 0 /* Process it through import filter */
#define RIC_REJECT -1 /* Rejected by protocol */
#define RIC_DROP -2 /* Silently dropped by protocol */
#define rte_update channel_rte_import
/**
* rte_update - enter a new update to a routing table
* @c: channel doing the update
* @net: network address
* @rte: a &rte representing the new route
* @src: old route source identifier
*
* This function imports a new route to the appropriate table (via the channel).
* Table keys are @net (obligatory) and @rte->attrs->src.
* Both the @net and @rte pointers can be local.
*
* The route attributes (@rte->attrs) are obligatory. They can be also allocated
* locally. Anyway, if you use an already-cached attribute object, you shall
* call rta_clone() on that object yourself. (This semantics may change in future.)
*
* If the route attributes are local, you may set @rte->attrs->src to NULL, then
* the protocol's default route source will be supplied.
*
* When rte_update() gets a route, it automatically validates it. This includes
* checking for validity of the given network and next hop addresses and also
* checking for host-scope or link-scope routes. Then the import filters are
* processed and if accepted, the route is passed to route table recalculation.
*
* The accepted routes are then inserted into the table, replacing the old route
* for the same @net identified by @src. Then the route is announced
* to all the channels connected to the table using the standard export mechanism.
* Setting @rte to NULL makes this a withdraw, otherwise @rte->src must be the same
* as @src.
*
* All memory used for temporary allocations is taken from a special linpool
* @rte_update_pool and freed when rte_update() finishes.
*/
void rte_update(struct channel *c, const net_addr *net, struct rte *rte, struct rte_src *src);
extern list routing_tables;
struct config;
void rt_init(void);
void rt_preconfig(struct config *);
void rt_commit(struct config *new, struct config *old);
void rt_lock_table(rtable *);
void rt_unlock_table(rtable *);
void rt_subscribe(rtable *tab, struct rt_subscription *s);
void rt_unsubscribe(struct rt_subscription *s);
rtable *rt_setup(pool *, struct rtable_config *);
static inline void rt_shutdown(rtable *r) { rfree(r->rp); }
static inline net *net_find(rtable *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
static inline net *net_find_valid(rtable *tab, const net_addr *addr)
{ net *n = net_find(tab, addr); return (n && n->routes && rte_is_valid(&n->routes->rte)) ? n : NULL; }
static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
void *net_route(rtable *tab, const net_addr *n);
int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
rte *rt_export_merged(struct channel *c, rte ** feed, uint count, linpool *pool, int silent);
void rt_refresh_begin(struct rt_import_request *);
void rt_refresh_end(struct rt_import_request *);
void rt_schedule_prune(rtable *t);
void rte_dump(struct rte_storage *);
void rte_free(struct rte_storage *, rtable *);
struct rte_storage *rte_store(const rte *, net *net, rtable *);
void rt_dump(rtable *);
void rt_dump_all(void);
void rt_dump_hooks(rtable *);
void rt_dump_hooks_all(void);
void rt_prune_sync(rtable *t, int all);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
/* Default limit for ECMP next hops, defined in sysdep code */
extern const int rt_default_ecmp;
struct rt_show_data_rtable {
node n;
rtable *table;
struct channel *export_channel;
};
struct rt_show_data {
net_addr *addr;
list tables;
struct rt_show_data_rtable *tab; /* Iterator over table list */
struct rt_show_data_rtable *last_table; /* Last table in output */
struct fib_iterator fit; /* Iterator over networks in table */
int verbose, tables_defined_by;
const struct filter *filter;
struct proto *show_protocol;
struct proto *export_protocol;
struct channel *export_channel;
struct config *running_on_config;
struct krt_proto *kernel;
struct rt_export_hook *kernel_export_hook;
int export_mode, primary_only, filtered, stats, show_for;
int table_open; /* Iteration (fit) is open */
int net_counter, rt_counter, show_counter, table_counter;
int net_counter_last, rt_counter_last, show_counter_last;
};
void rt_show(struct rt_show_data *);
struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t);
/* Value of table definition mode in struct rt_show_data */
#define RSD_TDB_DEFAULT 0 /* no table specified */
#define RSD_TDB_INDIRECT 0 /* show route ... protocol P ... */
#define RSD_TDB_ALL RSD_TDB_SET /* show route ... table all ... */
#define RSD_TDB_DIRECT RSD_TDB_SET | RSD_TDB_NMN /* show route ... table X table Y ... */
#define RSD_TDB_SET 0x1 /* internal: show empty tables */
#define RSD_TDB_NMN 0x2 /* internal: need matching net */
/* Value of export_mode in struct rt_show_data */
#define RSEM_NONE 0 /* Export mode not used */
#define RSEM_PREEXPORT 1 /* Routes ready for export, before filtering */
#define RSEM_EXPORT 2 /* Routes accepted by export filter */
#define RSEM_NOEXPORT 3 /* Routes rejected by export filter */
#define RSEM_EXPORTED 4 /* Routes marked in export map */
/*
* Route Attributes
*
* Beware: All standard BGP attributes must be represented here instead
* of making them local to the route. This is needed to ensure proper
* construction of BGP route attribute lists.
*/
/* Nexthop structure */
struct nexthop {
ip_addr gw; /* Next hop */
struct iface *iface; /* Outgoing interface */
struct nexthop *next;
byte flags;
byte weight;
byte labels_orig; /* Number of labels before hostentry was applied */
byte labels; /* Number of all labels */
u32 label[0];
};
#define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
struct rte_src {
struct rte_src *next; /* Hash chain */
struct proto *proto; /* Protocol the source is based on */
u32 private_id; /* Private ID, assigned by the protocol */
u32 global_id; /* Globally unique ID of the source */
unsigned uc; /* Use count */
};
typedef struct rta {
struct rta *next, **pprev; /* Hash chain */
u32 uc; /* Use count */
u32 hash_key; /* Hash over important fields */
struct ea_list *eattrs; /* Extended Attribute chain */
struct hostentry *hostentry; /* Hostentry for recursive next-hops */
ip_addr from; /* Advertising router */
u32 igp_metric; /* IGP metric to next hop (for iBGP routes) */
u16 cached:1; /* Are attributes cached? */
u16 source:7; /* Route source (RTS_...) */
u16 scope:4; /* Route scope (SCOPE_... -- see ip.h) */
u16 dest:4; /* Route destination type (RTD_...) */
word pref;
struct nexthop nh; /* Next hop */
} rta;
#define RTS_STATIC 1 /* Normal static route */
#define RTS_INHERIT 2 /* Route inherited from kernel */
#define RTS_DEVICE 3 /* Device route */
#define RTS_STATIC_DEVICE 4 /* Static device route */
#define RTS_REDIRECT 5 /* Learned via redirect */
#define RTS_RIP 6 /* RIP route */
#define RTS_OSPF 7 /* OSPF route */
#define RTS_OSPF_IA 8 /* OSPF inter-area route */
#define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
#define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
#define RTS_BGP 11 /* BGP route */
#define RTS_PIPE 12 /* Inter-table wormhole */
#define RTS_BABEL 13 /* Babel route */
#define RTS_RPKI 14 /* Route Origin Authorization */
#define RTS_PERF 15 /* Perf checker */
#define RTS_MAX 16
#define RTD_NONE 0 /* Undefined next hop */
#define RTD_UNICAST 1 /* Next hop is neighbor router */
#define RTD_BLACKHOLE 2 /* Silently drop packets */
#define RTD_UNREACHABLE 3 /* Reject as unreachable */
#define RTD_PROHIBIT 4 /* Administratively prohibited */
#define RTD_MAX 5
#define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
protocol-specific metric is availabe */
extern const char * rta_dest_names[RTD_MAX];
static inline const char *rta_dest_name(uint n)
{ return (n < RTD_MAX) ? rta_dest_names[n] : "???"; }
/* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
static inline int rte_is_reachable(rte *r)
{ return r->attrs->dest == RTD_UNICAST; }
/*
* Extended Route Attributes
*/
typedef struct eattr {
word id; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
byte flags; /* Protocol-dependent flags */
byte type; /* Attribute type and several flags (EAF_...) */
union {
uintptr_t data;
const struct adata *ptr; /* Attribute data elsewhere */
} u;
} eattr;
#define EA_CODE(proto,id) (((proto) << 8) | (id))
#define EA_ID(ea) ((ea) & 0xff)
#define EA_PROTO(ea) ((ea) >> 8)
#define EA_CUSTOM(id) ((id) | EA_CUSTOM_BIT)
#define EA_IS_CUSTOM(ea) ((ea) & EA_CUSTOM_BIT)
#define EA_CUSTOM_ID(ea) ((ea) & ~EA_CUSTOM_BIT)
const char *ea_custom_name(uint ea);
#define EA_GEN_IGP_METRIC EA_CODE(PROTOCOL_NONE, 0)
#define EA_CODE_MASK 0xffff
#define EA_CUSTOM_BIT 0x8000
#define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
#define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
#define EA_BIT_GET(ea) ((ea) >> 24)
#define EAF_TYPE_MASK 0x1f /* Mask with this to get type */
#define EAF_TYPE_INT 0x01 /* 32-bit unsigned integer number */
#define EAF_TYPE_OPAQUE 0x02 /* Opaque byte string (not filterable) */
#define EAF_TYPE_IP_ADDRESS 0x04 /* IP address */
#define EAF_TYPE_ROUTER_ID 0x05 /* Router ID (IPv4 address) */
#define EAF_TYPE_AS_PATH 0x06 /* BGP AS path (encoding per RFC 1771:4.3) */
#define EAF_TYPE_BITFIELD 0x09 /* 32-bit embedded bitfield */
#define EAF_TYPE_INT_SET 0x0a /* Set of u32's (e.g., a community list) */
#define EAF_TYPE_PTR 0x0d /* Pointer to an object */
#define EAF_TYPE_EC_SET 0x0e /* Set of pairs of u32's - ext. community list */
#define EAF_TYPE_LC_SET 0x12 /* Set of triplets of u32's - large community list */
#define EAF_TYPE_UNDEF 0x1f /* `force undefined' entry */
#define EAF_EMBEDDED 0x01 /* Data stored in eattr.u.data (part of type spec) */
#define EAF_VAR_LENGTH 0x02 /* Attribute length is variable (part of type spec) */
#define EAF_ORIGINATED 0x20 /* The attribute has originated locally */
#define EAF_FRESH 0x40 /* An uncached attribute (e.g. modified in export filter) */
typedef struct adata {
uint length; /* Length of data */
byte data[0];
} adata;
extern const adata null_adata; /* adata of length 0 */
static inline struct adata *
lp_alloc_adata(struct linpool *pool, uint len)
{
struct adata *ad = lp_alloc(pool, sizeof(struct adata) + len);
ad->length = len;
return ad;
}
static inline int adata_same(const struct adata *a, const struct adata *b)
{ return (a->length == b->length && !memcmp(a->data, b->data, a->length)); }
typedef struct ea_list {
struct ea_list *next; /* In case we have an override list */
byte flags; /* Flags: EALF_... */
byte rfu;
word count; /* Number of attributes */
eattr attrs[0]; /* Attribute definitions themselves */
} ea_list;
#define EALF_SORTED 1 /* Attributes are sorted by code */
#define EALF_BISECT 2 /* Use interval bisection for searching */
#define EALF_CACHED 4 /* Attributes belonging to cached rta */
struct rte_src *rt_find_source(struct proto *p, u32 id);
struct rte_src *rt_get_source(struct proto *p, u32 id);
static inline void rt_lock_source(struct rte_src *src) { src->uc++; }
static inline void rt_unlock_source(struct rte_src *src) { src->uc--; }
void rt_prune_sources(void);
struct ea_walk_state {
ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
eattr *ea; /* Current eattr, initially NULL */
u32 visited[4]; /* Bitfield, limiting max to 128 */
};
eattr *ea_find(ea_list *, unsigned ea);
eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
uintptr_t ea_get_int(ea_list *, unsigned ea, uintptr_t def);
void ea_dump(ea_list *);
void ea_sort(ea_list *); /* Sort entries in all sub-lists */
unsigned ea_scan(ea_list *); /* How many bytes do we need for merged ea_list */
void ea_merge(ea_list *from, ea_list *to); /* Merge sub-lists to allocated buffer */
int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */
ea_list *ea_append(ea_list *to, ea_list *what);
void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
#define ea_normalize(ea) do { \
if (ea->next) { \
ea_list *t = alloca(ea_scan(ea)); \
ea_merge(ea, t); \
ea = t; \
} \
ea_sort(ea); \
if (ea->count == 0) \
ea = NULL; \
} while(0) \
static inline eattr *
ea_set_attr(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, uintptr_t val)
{
ea_list *a = lp_alloc(pool, sizeof(ea_list) + sizeof(eattr));
eattr *e = &a->attrs[0];
a->flags = EALF_SORTED;
a->count = 1;
a->next = *to;
*to = a;
e->id = id;
e->type = type;
e->flags = flags;
if (type & EAF_EMBEDDED)
e->u.data = (u32) val;
else
e->u.ptr = (struct adata *) val;
return e;
}
static inline void
ea_set_attr_u32(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, u32 val)
{ ea_set_attr(to, pool, id, flags, type, (uintptr_t) val); }
static inline void
ea_set_attr_ptr(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, struct adata *val)
{ ea_set_attr(to, pool, id, flags, type, (uintptr_t) val); }
static inline void
ea_set_attr_data(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, void *data, uint len)
{
struct adata *a = lp_alloc_adata(pool, len);
memcpy(a->data, data, len);
ea_set_attr(to, pool, id, flags, type, (uintptr_t) a);
}
#define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
static inline size_t nexthop_size(const struct nexthop *nh)
{ return sizeof(struct nexthop) + sizeof(u32)*nh->labels; }
int nexthop__same(struct nexthop *x, struct nexthop *y); /* Compare multipath nexthops */
static inline int nexthop_same(struct nexthop *x, struct nexthop *y)
{ return (x == y) || nexthop__same(x, y); }
struct nexthop *nexthop_merge(struct nexthop *x, struct nexthop *y, int rx, int ry, int max, linpool *lp);
struct nexthop *nexthop_sort(struct nexthop *x);
static inline void nexthop_link(struct rta *a, struct nexthop *from)
{ memcpy(&a->nh, from, nexthop_size(from)); }
void nexthop_insert(struct nexthop **n, struct nexthop *y);
int nexthop_is_sorted(struct nexthop *x);
void rta_init(void);
static inline size_t rta_size(const rta *a) { return sizeof(rta) + sizeof(u32)*a->nh.labels; }
#define RTA_MAX_SIZE (sizeof(rta) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
rta *rta_lookup(rta *); /* Get rta equivalent to this one, uc++ */
static inline int rta_is_cached(rta *r) { return r->cached; }
static inline rta *rta_clone(rta *r) { r->uc++; return r; }
void rta__free(rta *r);
static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); }
rta *rta_do_cow(rta *o, linpool *lp);
static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; }
static inline void rta_uncache(rta *r) { r->cached = 0; r->uc = 0; }
void rta_dump(rta *);
void rta_dump_all(void);
void rta_show(struct cli *, rta *);
u32 rt_get_igp_metric(rte *);
struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep);
void rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls);
static inline void
rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, mpls_label_stack *mls)
{
rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), mls);
}
/*
* rta_set_recursive_next_hop() acquires hostentry from hostcache and fills
* rta->hostentry field. New hostentry has zero use count. Cached rta locks its
* hostentry (increases its use count), uncached rta does not lock it. Hostentry
* with zero use count is removed asynchronously during host cache update,
* therefore it is safe to hold such hostentry temorarily. Hostentry holds a
* lock for a 'source' rta, mainly to share multipath nexthops.
*
* There is no need to hold a lock for hostentry->dep table, because that table
* contains routes responsible for that hostentry, and therefore is non-empty if
* given hostentry has non-zero use count. If the hostentry has zero use count,
* the entry is removed before dep is referenced.
*
* The protocol responsible for routes with recursive next hops should hold a
* lock for a 'source' table governing that routes (argument tab to
* rta_set_recursive_next_hop()), because its routes reference hostentries
* (through rta) related to the governing table. When all such routes are
* removed, rtas are immediately removed achieving zero uc. Then the 'source'
* table lock could be immediately released, although hostentries may still
* exist - they will be freed together with the 'source' table.
*/
static inline void rt_lock_hostentry(struct hostentry *he) { if (he) he->uc++; }
static inline void rt_unlock_hostentry(struct hostentry *he) { if (he) he->uc--; }
/*
* Default protocol preferences
*/
#define DEF_PREF_DIRECT 240 /* Directly connected */
#define DEF_PREF_STATIC 200 /* Static route */
#define DEF_PREF_OSPF 150 /* OSPF intra-area, inter-area and type 1 external routes */
#define DEF_PREF_BABEL 130 /* Babel */
#define DEF_PREF_RIP 120 /* RIP */
#define DEF_PREF_BGP 100 /* BGP */
#define DEF_PREF_RPKI 100 /* RPKI */
#define DEF_PREF_INHERITED 10 /* Routes inherited from other routing daemons */
/*
* Route Origin Authorization
*/
#define ROA_UNKNOWN 0
#define ROA_VALID 1
#define ROA_INVALID 2
#endif

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@
#include "nest/bird.h"
#include "nest/iface.h"
#include "nest/protocol.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/rt-dev.h"
#include "conf/conf.h"
#include "lib/resource.h"
@ -79,16 +79,18 @@ dev_ifa_notify(struct proto *P, uint flags, struct ifa *ad)
/* Use iface ID as local source ID */
struct rte_src *src = rt_get_source(P, ad->iface->index);
rta a0 = {
.pref = c->preference,
.source = RTS_DEVICE,
.scope = SCOPE_UNIVERSE,
.dest = RTD_UNICAST,
.nh.iface = ad->iface,
ea_list *ea = NULL;
struct nexthop_adata nhad = {
.nh = { .iface = ad->iface, },
.ad = { .length = (void *) NEXTHOP_NEXT(&nhad.nh) - (void *) nhad.ad.data, },
};
ea_set_attr_u32(&ea, &ea_gen_preference, 0, c->preference);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_DEVICE);
ea_set_attr_data(&ea, &ea_gen_nexthop, 0, nhad.ad.data, nhad.ad.length);
rte e0 = {
.attrs = rta_lookup(&a0),
.attrs = ea,
.src = src,
};
@ -184,7 +186,6 @@ dev_copy_config(struct proto_config *dest, struct proto_config *src)
struct protocol proto_device = {
.name = "Direct",
.template = "direct%d",
.class = PROTOCOL_DIRECT,
.preference = DEF_PREF_DIRECT,
.channel_mask = NB_IP | NB_IP6_SADR,
.proto_size = sizeof(struct rt_dev_proto),
@ -194,3 +195,9 @@ struct protocol proto_device = {
.reconfigure = dev_reconfigure,
.copy_config = dev_copy_config
};
void
dev_build(void)
{
proto_build(&proto_device);
}

View File

@ -55,7 +55,7 @@
#undef LOCAL_DEBUG
#include "nest/bird.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "lib/string.h"
/*
@ -331,7 +331,7 @@ fib_get(struct fib *f, const net_addr *a)
memset(b, 0, f->node_offset);
if (f->init)
f->init(b);
f->init(f, b);
if (f->entries++ > f->entries_max)
fib_rehash(f, HASH_HI_STEP);
@ -475,7 +475,7 @@ fib_delete(struct fib *f, void *E)
}
if (f->fib_slab)
sl_free(f->fib_slab, E);
sl_free(E);
else
mb_free(E);

View File

@ -10,123 +10,92 @@
#undef LOCAL_DEBUG
#include "nest/bird.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/cli.h"
#include "nest/iface.h"
#include "filter/filter.h"
#include "filter/data.h"
#include "sysdep/unix/krt.h"
static void
rt_show_table(struct cli *c, struct rt_show_data *d)
rt_show_table(struct rt_show_data *d)
{
struct cli *c = d->cli;
/* No table blocks in 'show route count' */
if (d->stats == 2)
return;
if (d->last_table) cli_printf(c, -1007, "");
cli_printf(c, -1007, "Table %s:", d->tab->table->name);
cli_printf(c, -1007, "Table %s:",
d->tab->name);
d->last_table = d->tab;
}
static inline struct krt_proto *
rt_show_get_kernel(struct rt_show_data *d)
{
struct proto_config *krt = d->tab->table->config->krt_attached;
return krt ? (struct krt_proto *) krt->proto : NULL;
}
static void
rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary)
{
byte from[IPA_MAX_TEXT_LENGTH+8];
byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
rta *a = e->attrs;
int sync_error = d->kernel ? krt_get_sync_error(d->kernel, e) : 0;
ea_list *a = e->attrs;
int sync_error = d->tab->kernel ? krt_get_sync_error(d->tab->kernel, e) : 0;
void (*get_route_info)(struct rte *, byte *buf);
struct nexthop *nh;
eattr *nhea = net_type_match(e->net, NB_DEST) ?
ea_find(a, &ea_gen_nexthop) : NULL;
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
int dest = nhad ? (NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest) : RTD_NONE;
int flowspec_valid = net_is_flow(e->net) ? rt_get_flowspec_valid(e) : FLOWSPEC_UNKNOWN;
tm_format_time(tm, &config->tf_route, e->lastmod);
if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->nh.gw))
bsprintf(from, " from %I", a->from);
ip_addr a_from = ea_get_ip(a, &ea_gen_from, IPA_NONE);
if (ipa_nonzero(a_from) && (!nhad || !ipa_equal(a_from, nhad->nh.gw)))
bsprintf(from, " from %I", a_from);
else
from[0] = 0;
/* Need to normalize the extended attributes */
if (d->verbose && !rta_is_cached(a) && a->eattrs)
ea_normalize(a->eattrs);
if (d->verbose && !rta_is_cached(a) && a)
a = ea_normalize(a, 0);
get_route_info = e->src->proto->proto->get_route_info;
if (get_route_info)
get_route_info(e, info);
else
bsprintf(info, " (%d)", a->pref);
bsprintf(info, " (%d)", rt_get_preference(e));
if (d->last_table != d->tab)
rt_show_table(c, d);
rt_show_table(d);
cli_printf(c, -1007, "%-20s %s [%s %s%s]%s%s", ia, rta_dest_name(a->dest),
eattr *heea;
struct hostentry_adata *had = NULL;
if (!net_is_flow(e->net) && (dest == RTD_NONE) && (heea = ea_find(a, &ea_gen_hostentry)))
had = (struct hostentry_adata *) heea->u.ptr;
cli_printf(c, -1007, "%-20s %s [%s %s%s]%s%s", ia,
net_is_flow(e->net) ? flowspec_valid_name(flowspec_valid) : had ? "recursive" : rta_dest_name(dest),
e->src->proto->name, tm, from, primary ? (sync_error ? " !" : " *") : "", info);
if (a->dest == RTD_UNICAST)
for (nh = &(a->nh); nh; nh = nh->next)
{
char mpls[MPLS_MAX_LABEL_STACK*12 + 5], *lsp = mpls;
char *onlink = (nh->flags & RNF_ONLINK) ? " onlink" : "";
char weight[16] = "";
if (nh->labels)
{
lsp += bsprintf(lsp, " mpls %d", nh->label[0]);
for (int i=1;i<nh->labels; i++)
lsp += bsprintf(lsp, "/%d", nh->label[i]);
}
*lsp = '\0';
if (a->nh.next)
bsprintf(weight, " weight %d", nh->weight + 1);
if (ipa_nonzero(nh->gw))
cli_printf(c, -1007, "\tvia %I on %s%s%s%s",
nh->gw, nh->iface->name, mpls, onlink, weight);
else
cli_printf(c, -1007, "\tdev %s%s%s",
nh->iface->name, mpls, onlink, weight);
}
if (d->verbose)
{
cli_printf(c, -1008, "\tInternal route ID: %uL %uG %uS", e->src->private_id, e->src->global_id, e->stale_cycle);
rta_show(c, a);
ea_show_list(c, a);
cli_printf(c, -1008, "\tInternal route handling values: %uL %uG %uS id %u",
e->src->private_id, e->src->global_id, e->stale_cycle, e->id);
}
}
static uint
rte_feed_count(net *n)
{
uint count = 0;
for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e)))
count++;
return count;
}
static void
rte_feed_obtain(net *n, rte **feed, uint count)
{
uint i = 0;
for (struct rte_storage *e = n->routes; e; e = e->next)
if (rte_is_valid(RTES_OR_NULL(e)))
else if (dest == RTD_UNICAST)
ea_show_nexthop_list(c, nhad);
else if (had)
{
ASSERT_DIE(i < count);
feed[i++] = &e->rte;
char hetext[256];
ea_show_hostentry(&had->ad, hetext, sizeof hetext);
cli_printf(c, -1007, "\t%s", hetext);
}
ASSERT_DIE(i == count);
}
static void
rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
rt_show_net(struct rt_show_data *d, const net_addr *n, rte **feed, uint count)
{
struct cli *c = d->cli;
byte ia[NET_MAX_TEXT_LENGTH+1];
struct channel *ec = d->tab->export_channel;
@ -135,13 +104,12 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
ASSUME(!d->export_mode || ec);
int first = 1;
int first_show = 1;
int pass = 0;
bsnprintf(ia, sizeof(ia), "%N", n->n.addr);
for (struct rte_storage *er = n->routes; er; er = er->next)
for (uint i = 0; i < count; i++)
{
if (rte_is_filtered(&er->rte) != d->filtered)
if (!d->tab->prefilter && (rte_is_filtered(feed[i]) != d->filtered))
continue;
d->rt_counter++;
@ -151,7 +119,12 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
if (pass)
continue;
struct rte e = er->rte;
struct rte e = *feed[i];
if (d->tab->prefilter)
if (e.sender != d->tab->prefilter->in_req.hook)
continue;
else while (e.attrs->next)
e.attrs = e.attrs->next;
/* Export channel is down, do not try to export routes to it */
if (ec && !ec->out_req.hook)
@ -169,13 +142,7 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
{
/* Special case for merged export */
pass = 1;
uint count = rte_feed_count(n);
if (!count)
goto skip;
rte **feed = alloca(count * sizeof(rte *));
rte_feed_obtain(n, feed, count);
rte *em = rt_export_merged(ec, feed, count, c->show_pool, 1);
rte *em = rt_export_merged(ec, feed, count, tmp_linpool, 1);
if (em)
e = *em;
@ -201,7 +168,7 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
* command may change the export filter and do not update routes.
*/
int do_export = (ic > 0) ||
(f_run(ec->out_filter, &e, c->show_pool, FF_SILENT) <= F_ACCEPT);
(f_run(ec->out_filter, &e, FF_SILENT) <= F_ACCEPT);
if (do_export != (d->export_mode == RSEM_EXPORT))
goto skip;
@ -214,122 +181,190 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
if (d->show_protocol && (d->show_protocol != e.src->proto))
goto skip;
if (f_run(d->filter, &e, c->show_pool, 0) > F_ACCEPT)
if (f_run(d->filter, &e, 0) > F_ACCEPT)
goto skip;
if (d->stats < 2)
rt_show_rte(c, ia, &e, d, (n->routes == er));
d->show_counter++;
{
if (first_show)
net_format(n, ia, sizeof(ia));
else
ia[0] = 0;
skip:
lp_flush(c->show_pool);
rt_show_rte(c, ia, &e, d, !d->tab->prefilter && !i);
first_show = 0;
}
d->show_counter++;
skip:
if (d->primary_only)
break;
}
if ((d->show_counter - d->show_counter_last_flush) > 64)
{
d->show_counter_last_flush = d->show_counter;
cli_write_trigger(d->cli);
}
}
static void
rt_show_cleanup(struct cli *c)
rt_show_net_export_bulk(struct rt_export_request *req, const net_addr *n,
struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
{
struct rt_show_data *d = c->rover;
struct rt_show_data_rtable *tab;
/* Unlink the iterator */
if (d->table_open)
fit_get(&d->tab->table->fib, &d->fit);
/* Unlock referenced tables */
WALK_LIST(tab, d->tables)
rt_unlock_table(tab->table);
struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
return rt_show_net(d, n, feed, count);
}
static void
rt_show_cont(struct cli *c)
rt_show_export_stopped_cleanup(struct rt_export_request *req)
{
struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
/* The hook is now invalid */
req->hook = NULL;
/* And free the CLI (deferred) */
rfree(d->cli->pool);
}
static int
rt_show_cleanup(struct cli *c)
{
struct rt_show_data *d = c->rover;
#ifdef DEBUGGING
unsigned max = 4;
#else
unsigned max = 64;
#endif
struct fib *fib = &d->tab->table->fib;
struct fib_iterator *it = &d->fit;
/* Cancel the feed */
if (d->req.hook)
{
rt_stop_export(&d->req, rt_show_export_stopped_cleanup);
return 1;
}
else
return 0;
}
static void rt_show_export_stopped(struct rt_export_request *req);
static void
rt_show_log_state_change(struct rt_export_request *req, u8 state)
{
if (state == TES_READY)
rt_stop_export(req, rt_show_export_stopped);
}
static void
rt_show_dump_req(struct rt_export_request *req)
{
debug(" CLI Show Route Feed %p\n", req);
}
static void
rt_show_done(struct rt_show_data *d)
{
/* No more action */
d->cli->cleanup = NULL;
d->cli->cont = NULL;
d->cli->rover = NULL;
/* Write pending messages */
cli_write_trigger(d->cli);
}
static void
rt_show_cont(struct rt_show_data *d)
{
struct cli *c = d->cli;
if (d->running_on_config && (d->running_on_config != config))
{
cli_printf(c, 8004, "Stopped due to reconfiguration");
goto done;
return rt_show_done(d);
}
if (!d->table_open)
{
FIB_ITERATE_INIT(&d->fit, &d->tab->table->fib);
d->table_open = 1;
d->req = (struct rt_export_request) {
.addr = d->addr,
.name = "CLI Show Route",
.list = &global_work_list,
.export_bulk = rt_show_net_export_bulk,
.dump_req = rt_show_dump_req,
.log_state_change = rt_show_log_state_change,
.addr_mode = d->addr_mode,
};
d->table_counter++;
d->kernel = rt_show_get_kernel(d);
d->show_counter_last = d->show_counter;
d->rt_counter_last = d->rt_counter;
d->net_counter_last = d->net_counter;
if (d->tables_defined_by & RSD_TDB_SET)
rt_show_table(c, d);
}
rt_show_table(d);
FIB_ITERATE_START(fib, it, net, n)
{
if (!max--)
{
FIB_ITERATE_PUT(it);
return;
}
rt_show_net(c, n, d);
}
FIB_ITERATE_END;
rt_request_export(d->tab->table, &d->req);
}
static void
rt_show_export_stopped(struct rt_export_request *req)
{
struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
/* The hook is now invalid */
req->hook = NULL;
if (d->stats)
{
if (d->last_table != d->tab)
rt_show_table(c, d);
rt_show_table(d);
cli_printf(c, -1007, "%d of %d routes for %d networks in table %s",
cli_printf(d->cli, -1007, "%d of %d routes for %d networks in table %s",
d->show_counter - d->show_counter_last, d->rt_counter - d->rt_counter_last,
d->net_counter - d->net_counter_last, d->tab->table->name);
d->net_counter - d->net_counter_last, d->tab->name);
}
d->kernel = NULL;
d->table_open = 0;
d->tab = NODE_NEXT(d->tab);
if (NODE_VALID(d->tab))
return;
return rt_show_cont(d);
/* Printout total stats */
if (d->stats && (d->table_counter > 1))
{
if (d->last_table) cli_printf(c, -1007, "");
cli_printf(c, 14, "Total: %d of %d routes for %d networks in %d tables",
if (d->last_table) cli_printf(d->cli, -1007, "");
cli_printf(d->cli, 14, "Total: %d of %d routes for %d networks in %d tables",
d->show_counter, d->rt_counter, d->net_counter, d->table_counter);
}
else if (!d->rt_counter && ((d->addr_mode == TE_ADDR_EQUAL) || (d->addr_mode == TE_ADDR_FOR)))
cli_printf(d->cli, 8001, "Network not found");
else
cli_printf(c, 0, "");
cli_printf(d->cli, 0, "");
done:
rt_show_cleanup(c);
c->cont = c->cleanup = NULL;
/* No more route showing */
rt_show_done(d);
}
struct rt_show_data_rtable *
rt_show_add_table(struct rt_show_data *d, rtable *t)
rt_show_add_exporter(struct rt_show_data *d, struct rt_exporter *t, const char *name)
{
struct rt_show_data_rtable *tab = cfg_allocz(sizeof(struct rt_show_data_rtable));
tab->table = t;
tab->name = name;
add_tail(&(d->tables), &(tab->n));
return tab;
}
struct rt_show_data_rtable *
rt_show_add_table(struct rt_show_data *d, struct rtable *t)
{
struct rt_show_data_rtable *rsdr = rt_show_add_exporter(d, &t->exporter, t->name);
struct proto_config *krt = t->config->krt_attached;
if (krt)
rsdr->kernel = (struct krt_proto *) krt->proto;
return rsdr;
}
static inline void
rt_show_get_default_tables(struct rt_show_data *d)
{
@ -384,16 +419,16 @@ rt_show_prepare_tables(struct rt_show_data *d)
if (d->export_mode)
{
if (!tab->export_channel && d->export_channel &&
(tab->table == d->export_channel->table))
(tab->table == &d->export_channel->table->exporter))
tab->export_channel = d->export_channel;
if (!tab->export_channel && d->export_protocol)
tab->export_channel = proto_find_channel_by_table(d->export_protocol, tab->table);
tab->export_channel = proto_find_channel_by_table(d->export_protocol, SKIP_BACK(rtable, exporter, tab->table));
if (!tab->export_channel)
{
if (d->tables_defined_by & RSD_TDB_NMN)
cf_error("No export channel for table %s", tab->table->name);
cf_error("No export channel for table %s", tab->name);
rem_node(&(tab->n));
continue;
@ -404,7 +439,7 @@ rt_show_prepare_tables(struct rt_show_data *d)
if (d->addr && (tab->table->addr_type != d->addr->type))
{
if (d->tables_defined_by & RSD_TDB_NMN)
cf_error("Incompatible type of prefix/ip for table %s", tab->table->name);
cf_error("Incompatible type of prefix/ip for table %s", tab->name);
rem_node(&(tab->n));
continue;
@ -416,48 +451,29 @@ rt_show_prepare_tables(struct rt_show_data *d)
cf_error("No valid tables");
}
static void
rt_show_dummy_cont(struct cli *c UNUSED)
{
/* Explicitly do nothing to prevent CLI from trying to parse another command. */
}
void
rt_show(struct rt_show_data *d)
{
struct rt_show_data_rtable *tab;
net *n;
/* Filtered routes are neither exported nor have sensible ordering */
if (d->filtered && (d->export_mode || d->primary_only))
cf_error("Incompatible show route options");
rt_show_prepare_tables(d);
if (!d->addr)
{
WALK_LIST(tab, d->tables)
rt_lock_table(tab->table);
if (EMPTY_LIST(d->tables))
cf_error("No suitable tables found");
/* There is at least one table */
d->tab = HEAD(d->tables);
this_cli->cont = rt_show_cont;
this_cli->cleanup = rt_show_cleanup;
this_cli->rover = d;
}
else
{
WALK_LIST(tab, d->tables)
{
d->tab = tab;
d->kernel = rt_show_get_kernel(d);
this_cli->cont = rt_show_dummy_cont;
if (d->show_for)
n = net_route(tab->table, d->addr);
else
n = net_find(tab->table, d->addr);
if (n)
rt_show_net(this_cli, n, d);
}
if (d->rt_counter)
cli_msg(0, "");
else
cli_msg(8001, "Network not found");
}
rt_show_cont(d);
}

File diff suppressed because it is too large Load Diff

589
nest/rt.h Normal file
View File

@ -0,0 +1,589 @@
/*
* BIRD Internet Routing Daemon -- Routing Table
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
* (c) 2019--2021 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_NEST_RT_H_
#define _BIRD_NEST_RT_H_
#include "lib/lists.h"
#include "lib/bitmap.h"
#include "lib/resource.h"
#include "lib/net.h"
#include "lib/type.h"
#include "lib/fib.h"
#include "lib/route.h"
#include "lib/event.h"
#include "lib/rcu.h"
#include <stdatomic.h>
struct ea_list;
struct protocol;
struct proto;
struct channel;
struct rte_src;
struct symbol;
struct timer;
struct filter;
struct f_trie;
struct f_trie_walk_state;
struct cli;
struct rt_cork_threshold {
u64 low, high;
};
/*
* Master Routing Tables. Generally speaking, each of them contains a FIB
* with each entry pointing to a list of route entries representing routes
* to given network (with the selected one at the head).
*
* Each of the RTE's contains variable data (the preference and protocol-dependent
* metrics) and a pointer to a route attribute block common for many routes).
*
* It's guaranteed that there is at most one RTE for every (prefix,proto) pair.
*/
struct rtable_config {
node n;
char *name;
struct rtable *table;
struct proto_config *krt_attached; /* Kernel syncer attached to this table */
uint addr_type; /* Type of address data stored in table (NET_*) */
uint gc_threshold; /* Maximum number of operations before GC is run */
uint gc_period; /* Approximate time between two consecutive GC runs */
byte sorted; /* Routes of network are sorted according to rte_better() */
byte trie_used; /* Rtable has attached trie */
btime min_settle_time; /* Minimum settle time for notifications */
btime max_settle_time; /* Maximum settle time for notifications */
btime export_settle_time; /* Delay before exports are announced */
struct rt_cork_threshold cork_threshold; /* Cork threshold values */
};
struct rt_export_hook;
struct rt_export_request;
struct rt_exporter {
list hooks; /* Registered route export hooks */
uint addr_type; /* Type of address data exported (NET_*) */
struct rt_export_hook *(*start)(struct rt_exporter *, struct rt_export_request *);
void (*stop)(struct rt_export_hook *);
void (*done)(struct rt_export_hook *);
void (*used)(struct rt_exporter *);
list pending; /* List of packed struct rt_pending_export */
struct timer *export_timer;
struct rt_pending_export *first; /* First export to announce */
u64 next_seq; /* The next export will have this ID */
};
typedef struct rtable {
resource r;
node n; /* Node in list of all tables */
pool *rp; /* Resource pool to allocate everything from, including itself */
struct slab *rte_slab; /* Slab to allocate route objects */
struct fib fib;
struct f_trie *trie; /* Trie of prefixes defined in fib */
char *name; /* Name of this table */
uint addr_type; /* Type of address data stored in table (NET_*) */
int use_count; /* Number of protocols using this table */
u32 rt_count; /* Number of routes in the table */
list imports; /* Registered route importers */
struct rt_exporter exporter; /* Exporter API structure */
struct hmap id_map;
struct hostcache *hostcache;
struct rtable_config *config; /* Configuration of this table */
struct config *deleted; /* Table doesn't exist in current configuration,
* delete as soon as use_count becomes 0 and remove
* obstacle from this routing table.
*/
struct event *rt_event; /* Routing table event */
struct event *uncork_event; /* Called when uncork happens */
struct timer *prune_timer; /* Timer for periodic pruning / GC */
btime last_rt_change; /* Last time when route changed */
btime base_settle_time; /* Start time of rtable settling interval */
btime gc_time; /* Time of last GC */
uint gc_counter; /* Number of operations since last GC */
byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */
byte prune_trie; /* Prune prefix trie during next table prune */
byte hcu_scheduled; /* Hostcache update is scheduled */
byte hcu_corked; /* Hostcache update is corked with this state */
byte nhu_state; /* Next Hop Update state */
byte nhu_corked; /* Next Hop Update is corked with this state */
byte export_used; /* Pending Export pruning is scheduled */
byte cork_active; /* Cork has been activated */
struct rt_cork_threshold cork_threshold; /* Threshold for table cork */
struct fib_iterator prune_fit; /* Rtable prune FIB iterator */
struct fib_iterator nhu_fit; /* Next Hop Update FIB iterator */
struct f_trie *trie_new; /* New prefix trie defined during pruning */
struct f_trie *trie_old; /* Old prefix trie waiting to be freed */
u32 trie_lock_count; /* Prefix trie locked by walks */
u32 trie_old_lock_count; /* Old prefix trie locked by walks */
struct tbf rl_pipe; /* Rate limiting token buffer for pipe collisions */
list subscribers; /* Subscribers for notifications */
struct timer *settle_timer; /* Settle time for notifications */
list flowspec_links; /* List of flowspec links, src for NET_IPx and dst for NET_FLOWx */
struct f_trie *flowspec_trie; /* Trie for evaluation of flowspec notifications */
} rtable;
struct rt_subscription {
node n;
rtable *tab;
void (*hook)(struct rt_subscription *b);
void *data;
};
struct rt_flowspec_link {
node n;
rtable *src;
rtable *dst;
u32 uc;
};
extern struct rt_cork {
_Atomic uint active;
event_list queue;
event run;
} rt_cork;
static inline void rt_cork_acquire(void)
{
atomic_fetch_add_explicit(&rt_cork.active, 1, memory_order_acq_rel);
}
static inline void rt_cork_release(void)
{
if (atomic_fetch_sub_explicit(&rt_cork.active, 1, memory_order_acq_rel) == 1)
{
synchronize_rcu();
ev_schedule_work(&rt_cork.run);
}
}
static inline int rt_cork_check(event *e)
{
rcu_read_lock();
int corked = (atomic_load_explicit(&rt_cork.active, memory_order_acquire) > 0);
if (corked)
ev_send(&rt_cork.queue, e);
rcu_read_unlock();
return corked;
}
#define NHU_CLEAN 0
#define NHU_SCHEDULED 1
#define NHU_RUNNING 2
#define NHU_DIRTY 3
typedef struct network {
struct rte_storage *routes; /* Available routes for this network */
struct rt_pending_export *first, *last;
struct fib_node n; /* FIB flags reserved for kernel syncer */
} net;
struct hostcache {
slab *slab; /* Slab holding all hostentries */
struct hostentry **hash_table; /* Hash table for hostentries */
unsigned hash_order, hash_shift;
unsigned hash_max, hash_min;
unsigned hash_items;
linpool *lp; /* Linpool for trie */
struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
list hostentries; /* List of all hostentries */
byte update_hostcache;
};
struct hostentry {
node ln;
ip_addr addr; /* IP address of host, part of key */
ip_addr link; /* (link-local) IP address of host, used as gw
if host is directly attached */
struct rtable *tab; /* Dependent table, part of key */
struct hostentry *next; /* Next in hash chain */
unsigned hash_key; /* Hash key */
unsigned uc; /* Use count */
ea_list *src; /* Source attributes */
byte nexthop_linkable; /* Nexthop list is completely non-device */
u32 igp_metric; /* Chosen route IGP metric */
};
struct rte_storage {
struct rte_storage *next; /* Next in chain */
struct rte rte; /* Route data */
};
#define RTE_COPY(r) ((r) ? (r)->rte : (rte) {})
#define RTE_COPY_VALID(r) (((r) && (rte_is_valid(&(r)->rte))) ? (r)->rte : (rte) {})
#define RTE_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
#define RTE_VALID_OR_NULL(r) (((r) && (rte_is_valid(&(r)->rte))) ? &((r)->rte) : NULL)
/* Table-channel connections */
struct rt_import_request {
struct rt_import_hook *hook; /* The table part of importer */
char *name;
u8 trace_routes;
void (*dump_req)(struct rt_import_request *req);
void (*log_state_change)(struct rt_import_request *req, u8 state);
/* Preimport is called when the @new route is just-to-be inserted, replacing @old.
* Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
int (*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
};
struct rt_import_hook {
node n;
rtable *table; /* The connected table */
struct rt_import_request *req; /* The requestor */
struct rt_import_stats {
/* Import - from protocol to core */
u32 pref; /* Number of routes selected as best in the (adjacent) routing table */
u32 updates_ignored; /* Number of route updates rejected as already in route table */
u32 updates_accepted; /* Number of route updates accepted and imported */
u32 withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
} stats;
u64 flush_seq; /* Table export seq when the channel announced flushing */
btime last_state_change; /* Time of last state transition */
u8 import_state; /* IS_* */
u8 stale_set; /* Set this stale_cycle to imported routes */
u8 stale_valid; /* Routes with this stale_cycle and bigger are considered valid */
u8 stale_pruned; /* Last prune finished when this value was set at stale_valid */
u8 stale_pruning; /* Last prune started when this value was set at stale_valid */
void (*stopped)(struct rt_import_request *); /* Stored callback when import is stopped */
};
struct rt_pending_export {
struct rt_pending_export * _Atomic next; /* Next export for the same destination */
struct rte_storage *new, *new_best, *old, *old_best;
u64 seq; /* Sequential ID (table-local) of the pending export */
};
struct rt_export_request {
struct rt_export_hook *hook; /* Table part of the export */
char *name;
const net_addr *addr; /* Network prefilter address */
u8 trace_routes;
u8 addr_mode; /* Network prefilter mode (TE_ADDR_*) */
event_list *list; /* Where to schedule export events */
/* There are two methods of export. You can either request feeding every single change
* or feeding the whole route feed. In case of regular export, &export_one is preferred.
* Anyway, when feeding, &export_bulk is preferred, falling back to &export_one.
* Thus, for RA_OPTIMAL, &export_one is only set,
* for RA_MERGED and RA_ACCEPTED, &export_bulk is only set
* and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
*/
void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
void (*export_bulk)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
void (*dump_req)(struct rt_export_request *req);
void (*log_state_change)(struct rt_export_request *req, u8);
};
struct rt_export_hook {
node n;
struct rt_exporter *table; /* The connected table */
pool *pool;
struct rt_export_request *req; /* The requestor */
struct rt_export_stats {
/* Export - from core to protocol */
u32 updates_received; /* Number of route updates received */
u32 withdraws_received; /* Number of route withdraws received */
} stats;
union {
struct fib_iterator feed_fit; /* Routing table iterator used during feeding */
struct {
struct f_trie_walk_state *walk_state; /* Iterator over networks in trie */
struct f_trie *walk_lock; /* Locked trie for walking */
};
u32 hash_iter; /* Iterator over hash */
};
struct bmap seq_map; /* Keep track which exports were already procesed */
struct rt_pending_export * _Atomic last_export;/* Last export processed */
struct rt_pending_export *rpe_next; /* Next pending export to process */
btime last_state_change; /* Time of last state transition */
u8 refeed_pending; /* Refeeding and another refeed is scheduled */
_Atomic u8 export_state; /* Route export state (TES_*, see below) */
u8 feed_type; /* Which feeding method is used (TFT_*, see below) */
struct event *event; /* Event running all the export operations */
void (*stopped)(struct rt_export_request *); /* Stored callback when export is stopped */
};
#define TIS_DOWN 0
#define TIS_UP 1
#define TIS_STOP 2
#define TIS_FLUSHING 3
#define TIS_WAITING 4
#define TIS_CLEARED 5
#define TIS_MAX 6
#define TES_DOWN 0
#define TES_FEEDING 2
#define TES_READY 3
#define TES_STOP 4
#define TES_MAX 5
/* Value of addr_mode */
#define TE_ADDR_NONE 0 /* No address matching */
#define TE_ADDR_EQUAL 1 /* Exact query - show route <addr> */
#define TE_ADDR_FOR 2 /* Longest prefix match - show route for <addr> */
#define TE_ADDR_IN 3 /* Interval query - show route in <addr> */
#define TFT_FIB 1
#define TFT_TRIE 2
#define TFT_HASH 3
void rt_request_import(rtable *tab, struct rt_import_request *req);
void rt_request_export(struct rt_exporter *tab, struct rt_export_request *req);
void rt_export_once(struct rt_exporter *tab, struct rt_export_request *req);
void rt_stop_import(struct rt_import_request *, void (*stopped)(struct rt_import_request *));
void rt_stop_export(struct rt_export_request *, void (*stopped)(struct rt_export_request *));
const char *rt_import_state_name(u8 state);
const char *rt_export_state_name(u8 state);
static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
void rt_set_export_state(struct rt_export_hook *hook, u8 state);
void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
/* Get next rpe. If src is given, it must match. */
struct rt_pending_export *rpe_next(struct rt_pending_export *rpe, struct rte_src *src);
/* Mark the pending export processed */
void rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
/* Get pending export seen status */
int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
/* Types of route announcement, also used as flags */
#define RA_UNDEF 0 /* Undefined RA type */
#define RA_OPTIMAL 1 /* Announcement of optimal route change */
#define RA_ACCEPTED 2 /* Announcement of first accepted route */
#define RA_ANY 3 /* Announcement of any route change */
#define RA_MERGED 4 /* Announcement of optimal route merged with next ones */
/* Return value of preexport() callback */
#define RIC_ACCEPT 1 /* Accepted by protocol */
#define RIC_PROCESS 0 /* Process it through import filter */
#define RIC_REJECT -1 /* Rejected by protocol */
#define RIC_DROP -2 /* Silently dropped by protocol */
#define rte_update channel_rte_import
/**
* rte_update - enter a new update to a routing table
* @c: channel doing the update
* @net: network address
* @rte: a &rte representing the new route
* @src: old route source identifier
*
* This function imports a new route to the appropriate table (via the channel).
* Table keys are @net (obligatory) and @rte->attrs->src.
* Both the @net and @rte pointers can be local.
*
* The route attributes (@rte->attrs) are obligatory. They can be also allocated
* locally. Anyway, if you use an already-cached attribute object, you shall
* call rta_clone() on that object yourself. (This semantics may change in future.)
*
* If the route attributes are local, you may set @rte->attrs->src to NULL, then
* the protocol's default route source will be supplied.
*
* When rte_update() gets a route, it automatically validates it. This includes
* checking for validity of the given network and next hop addresses and also
* checking for host-scope or link-scope routes. Then the import filters are
* processed and if accepted, the route is passed to route table recalculation.
*
* The accepted routes are then inserted into the table, replacing the old route
* for the same @net identified by @src. Then the route is announced
* to all the channels connected to the table using the standard export mechanism.
* Setting @rte to NULL makes this a withdraw, otherwise @rte->src must be the same
* as @src.
*
* All memory used for temporary allocations is taken from a special linpool
* @rte_update_pool and freed when rte_update() finishes.
*/
void rte_update(struct channel *c, const net_addr *net, struct rte *rte, struct rte_src *src);
extern list routing_tables;
struct config;
void rt_init(void);
void rt_preconfig(struct config *);
void rt_postconfig(struct config *);
void rt_commit(struct config *new, struct config *old);
void rt_lock_table(rtable *);
void rt_unlock_table(rtable *);
struct f_trie * rt_lock_trie(rtable *tab);
void rt_unlock_trie(rtable *tab, struct f_trie *trie);
void rt_subscribe(rtable *tab, struct rt_subscription *s);
void rt_unsubscribe(struct rt_subscription *s);
void rt_flowspec_link(rtable *src, rtable *dst);
void rt_flowspec_unlink(rtable *src, rtable *dst);
rtable *rt_setup(pool *, struct rtable_config *);
static inline void rt_shutdown(rtable *r) { rfree(r->rp); }
static inline net *net_find(rtable *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
static inline net *net_find_valid(rtable *tab, const net_addr *addr)
{ net *n = net_find(tab, addr); return (n && n->routes && rte_is_valid(&n->routes->rte)) ? n : NULL; }
static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
net *net_get(rtable *tab, const net_addr *addr);
net *net_route(rtable *tab, const net_addr *n);
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
rte *rt_export_merged(struct channel *c, rte ** feed, uint count, linpool *pool, int silent);
void rt_refresh_begin(struct rt_import_request *);
void rt_refresh_end(struct rt_import_request *);
void rt_modify_stale(rtable *t, struct rt_import_request *);
void rt_schedule_prune(rtable *t);
void rte_dump(struct rte_storage *);
void rte_free(struct rte_storage *);
struct rte_storage *rte_store(const rte *, net *net, rtable *);
void rt_dump(rtable *);
void rt_dump_all(void);
void rt_dump_hooks(rtable *);
void rt_dump_hooks_all(void);
int rt_reload_channel(struct channel *c);
void rt_reload_channel_abort(struct channel *c);
void rt_refeed_channel(struct channel *c);
void rt_prune_sync(rtable *t, int all);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
static inline int rt_is_ip(rtable *tab)
{ return (tab->addr_type == NET_IP4) || (tab->addr_type == NET_IP6); }
static inline int rt_is_vpn(rtable *tab)
{ return (tab->addr_type == NET_VPN4) || (tab->addr_type == NET_VPN6); }
static inline int rt_is_roa(rtable *tab)
{ return (tab->addr_type == NET_ROA4) || (tab->addr_type == NET_ROA6); }
static inline int rt_is_flow(rtable *tab)
{ return (tab->addr_type == NET_FLOW4) || (tab->addr_type == NET_FLOW6); }
/* Default limit for ECMP next hops, defined in sysdep code */
extern const int rt_default_ecmp;
struct rt_show_data_rtable {
node n;
const char *name;
struct rt_exporter *table;
struct channel *export_channel;
struct channel *prefilter;
struct krt_proto *kernel;
};
struct rt_show_data {
struct cli *cli; /* Pointer back to the CLI */
net_addr *addr;
list tables;
struct rt_show_data_rtable *tab; /* Iterator over table list */
struct rt_show_data_rtable *last_table; /* Last table in output */
struct rt_export_request req; /* Export request in use */
int verbose, tables_defined_by;
const struct filter *filter;
struct proto *show_protocol;
struct proto *export_protocol;
struct channel *export_channel;
struct config *running_on_config;
struct rt_export_hook *kernel_export_hook;
int export_mode, addr_mode, primary_only, filtered, stats;
int net_counter, rt_counter, show_counter, table_counter;
int net_counter_last, rt_counter_last, show_counter_last;
int show_counter_last_flush;
};
void rt_show(struct rt_show_data *);
struct rt_show_data_rtable * rt_show_add_exporter(struct rt_show_data *d, struct rt_exporter *t, const char *name);
struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, struct rtable *t);
/* Value of table definition mode in struct rt_show_data */
#define RSD_TDB_DEFAULT 0 /* no table specified */
#define RSD_TDB_INDIRECT 0 /* show route ... protocol P ... */
#define RSD_TDB_ALL RSD_TDB_SET /* show route ... table all ... */
#define RSD_TDB_DIRECT RSD_TDB_SET | RSD_TDB_NMN /* show route ... table X table Y ... */
#define RSD_TDB_SET 0x1 /* internal: show empty tables */
#define RSD_TDB_NMN 0x2 /* internal: need matching net */
/* Value of export_mode in struct rt_show_data */
#define RSEM_NONE 0 /* Export mode not used */
#define RSEM_PREEXPORT 1 /* Routes ready for export, before filtering */
#define RSEM_EXPORT 2 /* Routes accepted by export filter */
#define RSEM_NOEXPORT 3 /* Routes rejected by export filter */
#define RSEM_EXPORTED 4 /* Routes marked in export map */
/* Host entry: Resolve hook for recursive nexthops */
extern struct ea_class ea_gen_hostentry;
struct hostentry_adata {
adata ad;
struct hostentry *he;
u32 labels[0];
};
void
ea_set_hostentry(ea_list **to, struct rtable *dep, struct rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum]);
void ea_show_hostentry(const struct adata *ad, byte *buf, uint size);
void ea_show_nexthop_list(struct cli *c, struct nexthop_adata *nhad);
/*
* Default protocol preferences
*/
#define DEF_PREF_DIRECT 240 /* Directly connected */
#define DEF_PREF_STATIC 200 /* Static route */
#define DEF_PREF_OSPF 150 /* OSPF intra-area, inter-area and type 1 external routes */
#define DEF_PREF_BABEL 130 /* Babel */
#define DEF_PREF_RIP 120 /* RIP */
#define DEF_PREF_BGP 100 /* BGP */
#define DEF_PREF_RPKI 100 /* RPKI */
#define DEF_PREF_INHERITED 10 /* Routes inherited from other routing daemons */
#define DEF_PREF_UNKNOWN 0 /* Routes with no preference set */
/*
* Route Origin Authorization
*/
#define ROA_UNKNOWN 0
#define ROA_VALID 1
#define ROA_INVALID 2
int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
#endif

View File

@ -2,5 +2,6 @@ src := babel.c packets.c
obj := $(src-o-files)
$(all-daemon)
$(cf-local)
$(call proto-build,babel_build)
tests_objs := $(tests_objs) $(src-o-files)

View File

@ -37,6 +37,7 @@
#include <stdlib.h>
#include "babel.h"
#include "lib/macro.h"
#define LOG_PKT_AUTH(msg, args...) \
log_rl(&p->log_pkt_tbf, L_AUTH "%s: " msg, p->p.name, args)
@ -58,12 +59,14 @@ static void babel_update_cost(struct babel_neighbor *n);
static inline void babel_kick_timer(struct babel_proto *p);
static inline void babel_iface_kick_timer(struct babel_iface *ifa);
static struct ea_class ea_babel_metric, ea_babel_router_id, ea_babel_seqno;
/*
* Functions to maintain data structures
*/
static void
babel_init_entry(void *E)
babel_init_entry(struct fib *f UNUSED, void *E)
{
struct babel_entry *e = E;
@ -119,7 +122,7 @@ babel_get_source(struct babel_proto *p, struct babel_entry *e, u64 router_id)
}
static void
babel_expire_sources(struct babel_proto *p, struct babel_entry *e)
babel_expire_sources(struct babel_proto *p UNUSED, struct babel_entry *e)
{
struct babel_source *n, *nx;
btime now_ = current_time();
@ -129,7 +132,7 @@ babel_expire_sources(struct babel_proto *p, struct babel_entry *e)
if (n->expires && n->expires <= now_)
{
rem_node(NODE n);
sl_free(p->source_slab, n);
sl_free(n);
}
}
}
@ -174,7 +177,7 @@ babel_retract_route(struct babel_proto *p, struct babel_route *r)
}
static void
babel_flush_route(struct babel_proto *p, struct babel_route *r)
babel_flush_route(struct babel_proto *p UNUSED, struct babel_route *r)
{
DBG("Babel: Flush route %N router_id %lR neigh %I\n",
r->e->n.addr, r->router_id, r->neigh->addr);
@ -185,7 +188,7 @@ babel_flush_route(struct babel_proto *p, struct babel_route *r)
if (r->e->selected == r)
r->e->selected = NULL;
sl_free(p->route_slab, r);
sl_free(r);
}
static void
@ -312,6 +315,8 @@ babel_add_seqno_request(struct babel_proto *p, struct babel_entry *e,
/* Found older */
rem_node(NODE sr);
if (sr->nbr)
rem_node(&sr->nbr_node);
goto found;
@ -336,13 +341,13 @@ found:
}
static void
babel_remove_seqno_request(struct babel_proto *p, struct babel_seqno_request *sr)
babel_remove_seqno_request(struct babel_proto *p UNUSED, struct babel_seqno_request *sr)
{
if (sr->nbr)
rem_node(&sr->nbr_node);
rem_node(NODE sr);
sl_free(p->seqno_slab, sr);
sl_free(sr);
}
static int
@ -452,10 +457,7 @@ babel_flush_neighbor(struct babel_proto *p, struct babel_neighbor *nbr)
struct babel_seqno_request *sr;
WALK_LIST_FIRST2(sr, nbr_node, nbr->requests)
{
sr->nbr = NULL;
rem_node(&sr->nbr_node);
}
babel_remove_seqno_request(p, sr);
nbr->ifa = NULL;
rem_node(NODE nbr);
@ -640,37 +642,14 @@ babel_announce_rte(struct babel_proto *p, struct babel_entry *e)
if (r)
{
rta a0 = {
.source = RTS_BABEL,
.scope = SCOPE_UNIVERSE,
.dest = RTD_UNICAST,
.pref = c->preference,
.from = r->neigh->addr,
.nh.gw = r->next_hop,
.nh.iface = r->neigh->ifa->iface,
.eattrs = alloca(sizeof(ea_list) + 3*sizeof(eattr)),
};
*a0.eattrs = (ea_list) { .count = 3 };
a0.eattrs->attrs[0] = (eattr) {
.id = EA_BABEL_METRIC,
.type = EAF_TYPE_INT,
.u.data = r->metric,
};
struct adata *ad = alloca(sizeof(struct adata) + sizeof(u64));
ad->length = sizeof(u64);
memcpy(ad->data, &(r->router_id), sizeof(u64));
a0.eattrs->attrs[1] = (eattr) {
.id = EA_BABEL_ROUTER_ID,
.type = EAF_TYPE_OPAQUE,
.u.ptr = ad,
};
a0.eattrs->attrs[2] = (eattr) {
.id = EA_BABEL_SEQNO,
.type = EAF_TYPE_INT,
.u.data = r->seqno,
struct nexthop_adata nhad = {
.nh = {
.gw = r->next_hop,
.iface = r->neigh->ifa->iface,
},
.ad = {
.length = sizeof nhad - sizeof nhad.ad,
},
};
/*
@ -679,10 +658,26 @@ babel_announce_rte(struct babel_proto *p, struct babel_entry *e)
* have routing work.
*/
if (!neigh_find(&p->p, r->next_hop, r->neigh->ifa->iface, 0))
a0.nh.flags = RNF_ONLINK;
nhad.nh.flags = RNF_ONLINK;
struct {
ea_list l;
eattr a[7];
} eattrs = {
.l.count = ARRAY_SIZE(eattrs.a),
.a = {
EA_LITERAL_EMBEDDED(&ea_gen_preference, 0, c->preference),
EA_LITERAL_STORE_ADATA(&ea_gen_from, 0, &r->neigh->addr, sizeof(r->neigh->addr)),
EA_LITERAL_EMBEDDED(&ea_gen_source, 0, RTS_BABEL),
EA_LITERAL_STORE_ADATA(&ea_gen_nexthop, 0, nhad.ad.data, nhad.ad.length),
EA_LITERAL_EMBEDDED(&ea_babel_metric, 0, r->metric),
EA_LITERAL_STORE_ADATA(&ea_babel_router_id, 0, &r->router_id, sizeof(r->router_id)),
EA_LITERAL_EMBEDDED(&ea_babel_seqno, 0, r->seqno),
}
};
rte e0 = {
.attrs = &a0,
.attrs = &eattrs.l,
.src = p->p.main_source,
};
@ -692,15 +687,14 @@ babel_announce_rte(struct babel_proto *p, struct babel_entry *e)
else if (e->valid && (e->router_id != p->router_id))
{
/* Unreachable */
rta a0 = {
.source = RTS_BABEL,
.scope = SCOPE_UNIVERSE,
.dest = RTD_UNREACHABLE,
.pref = 1,
};
ea_list *ea = NULL;
ea_set_attr_u32(&ea, &ea_gen_preference, 0, 1);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_BABEL);
ea_set_dest(&ea, 0, RTD_UNREACHABLE);
rte e0 = {
.attrs = &a0,
.attrs = ea,
.src = p->p.main_source,
};
@ -862,14 +856,14 @@ babel_send_ihus(struct babel_iface *ifa)
}
static void
babel_send_hello(struct babel_iface *ifa)
babel_send_hello(struct babel_iface *ifa, uint interval)
{
struct babel_proto *p = ifa->proto;
union babel_msg msg = {};
msg.type = BABEL_TLV_HELLO;
msg.hello.seqno = ifa->hello_seqno++;
msg.hello.interval = ifa->cf->hello_interval;
msg.hello.interval = interval ?: ifa->cf->hello_interval;
TRACE(D_PACKETS, "Sending hello on %s with seqno %d interval %t",
ifa->ifname, msg.hello.seqno, (btime) msg.hello.interval);
@ -1577,7 +1571,7 @@ babel_iface_timer(timer *t)
if (now_ >= ifa->next_hello)
{
babel_send_hello(ifa);
babel_send_hello(ifa, 0);
ifa->next_hello += hello_period * (1 + (now_ - ifa->next_hello) / hello_period);
}
@ -1624,7 +1618,7 @@ babel_iface_start(struct babel_iface *ifa)
tm_start(ifa->timer, 100 MS);
ifa->up = 1;
babel_send_hello(ifa);
babel_send_hello(ifa, 0);
babel_send_wildcard_retraction(ifa);
babel_send_wildcard_request(ifa);
babel_send_update(ifa, 0); /* Full update */
@ -1919,7 +1913,7 @@ babel_reconfigure_ifaces(struct babel_proto *p, struct babel_config *cf)
struct babel_iface *ifa = babel_find_iface(p, iface);
struct babel_iface_config *ic = (void *) iface_patt_find(&cf->iface_list, iface, NULL);
if (ic && iface_is_valid(p, iface))
if (ic && !iface_is_valid(p, iface))
ic = NULL;
if (ifa && ic)
@ -2031,39 +2025,42 @@ static void
babel_get_route_info(rte *rte, byte *buf)
{
u64 rid = 0;
eattr *e = ea_find(rte->attrs->eattrs, EA_BABEL_ROUTER_ID);
eattr *e = ea_find(rte->attrs, &ea_babel_router_id);
if (e)
memcpy(&rid, e->u.ptr->data, sizeof(u64));
buf += bsprintf(buf, " (%d/%d) [%lR]", rte->attrs->pref,
ea_get_int(rte->attrs->eattrs, EA_BABEL_METRIC, BABEL_INFINITY), rid);
buf += bsprintf(buf, " (%d/%d) [%lR]",
rt_get_preference(rte),
ea_get_int(rte->attrs, &ea_babel_metric, BABEL_INFINITY), rid);
}
static int
babel_get_attr(const eattr *a, byte *buf, int buflen UNUSED)
static void
babel_router_id_format(const eattr *a, byte *buf, uint len)
{
switch (a->id)
{
case EA_BABEL_SEQNO:
return GA_FULL;
case EA_BABEL_METRIC:
bsprintf(buf, "metric: %d", a->u.data);
return GA_FULL;
case EA_BABEL_ROUTER_ID:
{
u64 rid = 0;
memcpy(&rid, a->u.ptr->data, sizeof(u64));
bsprintf(buf, "router_id: %lR", rid);
return GA_FULL;
}
default:
return GA_UNKNOWN;
}
bsnprintf(buf, len, "%lR", rid);
}
static struct ea_class ea_babel_metric = {
.name = "babel_metric",
.type = T_INT,
};
static struct ea_class ea_babel_router_id = {
.name = "babel_router_id",
.type = T_OPAQUE,
.readonly = 1,
.format = babel_router_id_format,
};
static struct ea_class ea_babel_seqno = {
.name = "babel_seqno",
.type = T_INT,
.readonly = 1,
};
void
babel_show_interfaces(struct proto *P, const char *iff)
{
@ -2260,11 +2257,15 @@ babel_kick_timer(struct babel_proto *p)
static int
babel_preexport(struct channel *c, struct rte *new)
babel_preexport(struct channel *C, struct rte *new)
{
struct rta *a = new->attrs;
if (new->src->proto != C->proto)
return 0;
/* Reject our own unreachable routes */
if ((a->dest == RTD_UNREACHABLE) && (new->src->proto == c->proto))
eattr *ea = ea_find(new->attrs, &ea_gen_nexthop);
struct nexthop_adata *nhad = (void *) ea->u.ptr;
if (!NEXTHOP_IS_REACHABLE(nhad))
return -1;
return 0;
@ -2285,13 +2286,13 @@ babel_rt_notify(struct proto *P, struct channel *c UNUSED, const net_addr *net,
{
/* Update */
uint rt_seqno;
uint rt_metric = ea_get_int(new->attrs->eattrs, EA_BABEL_METRIC, 0);
uint rt_metric = ea_get_int(new->attrs, &ea_babel_metric, 0);
u64 rt_router_id = 0;
if (new->src->proto == P)
{
rt_seqno = ea_find(new->attrs->eattrs, EA_BABEL_SEQNO)->u.data;
eattr *e = ea_find(new->attrs->eattrs, EA_BABEL_ROUTER_ID);
rt_seqno = ea_get_int(new->attrs, &ea_babel_seqno, 0);
eattr *e = ea_find(new->attrs, &ea_babel_router_id);
if (e)
memcpy(&rt_router_id, e->u.ptr->data, sizeof(u64));
}
@ -2342,16 +2343,16 @@ babel_rt_notify(struct proto *P, struct channel *c UNUSED, const net_addr *net,
static int
babel_rte_better(struct rte *new, struct rte *old)
{
uint new_metric = ea_find(new->attrs->eattrs, EA_BABEL_SEQNO)->u.data;
uint old_metric = ea_find(old->attrs->eattrs, EA_BABEL_SEQNO)->u.data;
uint new_metric = ea_get_int(new->attrs, &ea_babel_metric, BABEL_INFINITY);
uint old_metric = ea_get_int(old->attrs, &ea_babel_metric, BABEL_INFINITY);
return new_metric < old_metric;
}
static u32
babel_rte_igp_metric(struct rte *rt)
babel_rte_igp_metric(const rte *rt)
{
return ea_get_int(rt->attrs->eattrs, EA_BABEL_METRIC, BABEL_INFINITY);
return ea_get_int(rt->attrs, &ea_babel_metric, BABEL_INFINITY);
}
@ -2435,6 +2436,11 @@ babel_iface_shutdown(struct babel_iface *ifa)
{
if (ifa->sk)
{
/*
* Retract all our routes and lower the hello interval so peers' neighbour
* state expires quickly
*/
babel_send_hello(ifa, BABEL_MIN_INTERVAL);
babel_send_wildcard_retraction(ifa);
babel_send_queue(ifa);
}
@ -2479,11 +2485,9 @@ babel_reconfigure(struct proto *P, struct proto_config *CF)
return 1;
}
struct protocol proto_babel = {
.name = "Babel",
.template = "babel%d",
.class = PROTOCOL_BABEL,
.preference = DEF_PREF_BABEL,
.channel_mask = NB_IP | NB_IP6_SADR,
.proto_size = sizeof(struct babel_proto),
@ -2495,5 +2499,16 @@ struct protocol proto_babel = {
.shutdown = babel_shutdown,
.reconfigure = babel_reconfigure,
.get_route_info = babel_get_route_info,
.get_attr = babel_get_attr
};
void
babel_build(void)
{
proto_build(&proto_babel);
EA_REGISTER_ALL(
&ea_babel_metric,
&ea_babel_router_id,
&ea_babel_seqno
);
}

View File

@ -16,7 +16,7 @@
#include "nest/bird.h"
#include "nest/cli.h"
#include "nest/iface.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/locks.h"
#include "nest/password.h"
@ -26,10 +26,6 @@
#include "lib/string.h"
#include "lib/timer.h"
#define EA_BABEL_METRIC EA_CODE(PROTOCOL_BABEL, 0)
#define EA_BABEL_ROUTER_ID EA_CODE(PROTOCOL_BABEL, 1)
#define EA_BABEL_SEQNO EA_CODE(PROTOCOL_BABEL, 2)
#define BABEL_MAGIC 42
#define BABEL_VERSION 2
#define BABEL_PORT 6696

View File

@ -24,7 +24,7 @@ CF_DECLS
CF_KEYWORDS(BABEL, INTERFACE, METRIC, RXCOST, HELLO, UPDATE, INTERVAL, PORT,
TYPE, WIRED, WIRELESS, RX, TX, BUFFER, PRIORITY, LENGTH, CHECK, LINK,
NEXT, HOP, IPV4, IPV6, BABEL_METRIC, SHOW, INTERFACES, NEIGHBORS,
NEXT, HOP, IPV4, IPV6, SHOW, INTERFACES, NEIGHBORS,
ENTRIES, RANDOMIZE, ROUTER, ID, AUTHENTICATION, NONE, MAC, PERMISSIVE)
CF_GRAMMAR
@ -163,8 +163,6 @@ babel_iface_opt_list:
babel_iface:
babel_iface_start iface_patt_list_nopx babel_iface_opt_list babel_iface_finish;
dynamic_attr: BABEL_METRIC { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_INT, EA_BABEL_METRIC); } ;
CF_CLI_HELP(SHOW BABEL, ..., [[Show information about Babel protocol]]);
CF_CLI(SHOW BABEL INTERFACES, optproto opttext, [<name>] [\"<interface>\"], [[Show information about Babel interfaces]])

View File

@ -1318,7 +1318,6 @@ babel_send_to(struct babel_iface *ifa, ip_addr dest)
static uint
babel_write_queue(struct babel_iface *ifa, list *queue)
{
struct babel_proto *p = ifa->proto;
struct babel_write_state state = { .next_hop_ip6 = ifa->addr };
if (EMPTY_LIST(*queue))
@ -1346,7 +1345,7 @@ babel_write_queue(struct babel_iface *ifa, list *queue)
pos += len;
rem_node(NODE msg);
sl_free(p->msg_slab, msg);
sl_free(msg);
}
pos += babel_auth_add_tlvs(ifa, (struct babel_tlv *) pos, end - pos);
@ -1507,13 +1506,13 @@ babel_process_packet(struct babel_iface *ifa,
else if (res == PARSE_IGNORE)
{
DBG("Babel: Ignoring TLV of type %d\n", tlv->type);
sl_free(p->msg_slab, msg);
sl_free(msg);
}
else /* PARSE_ERROR */
{
LOG_PKT("Bad TLV from %I via %s type %d pos %d - parse error",
saddr, ifa->iface->name, tlv->type, (int) ((byte *)tlv - (byte *)pkt));
sl_free(p->msg_slab, msg);
sl_free(msg);
break;
}
}
@ -1525,7 +1524,7 @@ babel_process_packet(struct babel_iface *ifa,
if (tlv_data[msg->msg.type].handle_tlv)
tlv_data[msg->msg.type].handle_tlv(&msg->msg, ifa);
rem_node(NODE msg);
sl_free(p->msg_slab, msg);
sl_free(msg);
}
}
@ -2011,7 +2010,7 @@ babel_auth_sign(struct babel_iface *ifa, ip_addr dest)
}
DBG("Added MAC signatures (%d bytes) on ifa %s for dest %I\n",
tot_len, ifa->ifname, dest);
pos - (pkt + len), ifa->ifname, dest);
return pos - (pkt + len);
}

View File

@ -2,5 +2,6 @@ src := bfd.c packets.c
obj := $(src-o-files)
$(all-daemon)
$(cf-local)
$(call proto-build,bfd_build)
tests_objs := $(tests_objs) $(src-o-files)

View File

@ -512,7 +512,7 @@ bfd_remove_session_locked(struct bfd_proto *p, struct bfd_session *s)
TRACE(D_EVENTS, "Session to %I removed", s->addr);
sl_free(p->session_slab, s);
sl_free(s);
}
static void
@ -590,6 +590,9 @@ bfd_get_iface(struct bfd_proto *p, ip_addr local, struct iface *iface)
ifa->sk = bfd_open_tx_sk(p, local, iface);
ifa->uc = 1;
if (cf->strict_bind)
ifa->rx = bfd_open_rx_sk_bound(p, local, iface);
add_tail(&p->iface_list, &ifa->n);
return ifa;
@ -607,6 +610,12 @@ bfd_free_iface(struct bfd_iface *ifa)
rfree(ifa->sk);
}
if (ifa->rx)
{
sk_stop(ifa->rx);
rfree(ifa->rx);
}
rem_node(&ifa->n);
mb_free(ifa);
}
@ -1035,15 +1044,6 @@ bfd_notify_init(struct bfd_proto *p)
* BFD protocol glue
*/
void
bfd_init_all(void)
{
bfd_global.lock = DOMAIN_NEW(rtable, "BFD Global");
init_list(&bfd_global.wait_list);
init_list(&bfd_global.pickup_list);
init_list(&bfd_global.proto_list);
}
static struct proto *
bfd_init(struct proto_config *c)
{
@ -1075,6 +1075,8 @@ bfd_start(struct proto *P)
add_tail(&bfd_global.proto_list, &p->bfd_node);
if (!cf->strict_bind)
{
if (cf->accept_ipv4 && cf->accept_direct)
p->rx4_1 = bfd_open_rx_sk(p, 0, SK_IPV4);
@ -1086,6 +1088,7 @@ bfd_start(struct proto *P)
if (cf->accept_ipv6 && cf->accept_multihop)
p->rx6_m = bfd_open_rx_sk(p, 1, SK_IPV6);
}
bfd_take_requests(p);
@ -1130,7 +1133,8 @@ bfd_reconfigure(struct proto *P, struct proto_config *c)
if ((new->accept_ipv4 != old->accept_ipv4) ||
(new->accept_ipv6 != old->accept_ipv6) ||
(new->accept_direct != old->accept_direct) ||
(new->accept_multihop != old->accept_multihop))
(new->accept_multihop != old->accept_multihop) ||
(new->strict_bind != old->strict_bind))
return 0;
birdloop_mask_wakeups(p->p.loop);
@ -1205,7 +1209,6 @@ bfd_show_sessions(struct proto *P)
struct protocol proto_bfd = {
.name = "BFD",
.template = "bfd%d",
.class = PROTOCOL_BFD,
.proto_size = sizeof(struct bfd_proto),
.config_size = sizeof(struct bfd_config),
.init = bfd_init,
@ -1214,3 +1217,14 @@ struct protocol proto_bfd = {
.reconfigure = bfd_reconfigure,
.copy_config = bfd_copy_config,
};
void
bfd_build(void)
{
proto_build(&proto_bfd);
bfd_global.lock = DOMAIN_NEW(rtable, "BFD Global");
init_list(&bfd_global.wait_list);
init_list(&bfd_global.pickup_list);
init_list(&bfd_global.proto_list);
}

View File

@ -13,7 +13,7 @@
#include "nest/cli.h"
#include "nest/iface.h"
#include "nest/protocol.h"
#include "nest/route.h"
#include "nest/rt.h"
#include "nest/password.h"
#include "conf/conf.h"
#include "lib/hash.h"
@ -47,6 +47,7 @@ struct bfd_config
u8 accept_ipv6;
u8 accept_direct;
u8 accept_multihop;
u8 strict_bind;
};
struct bfd_iface_config
@ -118,6 +119,7 @@ struct bfd_iface
struct bfd_proto *bfd;
sock *sk;
sock *rx;
u32 uc;
u8 changed;
};
@ -223,6 +225,7 @@ void bfd_show_sessions(struct proto *P);
/* packets.c */
void bfd_send_ctl(struct bfd_proto *p, struct bfd_session *s, int final);
sock * bfd_open_rx_sk(struct bfd_proto *p, int multihop, int inet_version);
sock * bfd_open_rx_sk_bound(struct bfd_proto *p, ip_addr local, struct iface *ifa);
sock * bfd_open_tx_sk(struct bfd_proto *p, ip_addr local, struct iface *ifa);

Some files were not shown because too many files have changed in this diff Show More