0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-11-09 20:58:44 +00:00

Merge a branch of fixes and updates needed for stabilization of v3.

Merge commit 'c06ce709' into HEAD
This commit is contained in:
Maria Matejka 2024-05-19 11:28:03 +02:00
commit fe3d7c1a17
16 changed files with 216 additions and 64 deletions

View File

@ -68,3 +68,21 @@ u32_log2(u32 v)
return r;
}
/**
* u32_bitflip - flips bits in number.
* @n: number
*
* This function flips bits in the given number such that MSB becomes LSB and vice versa.
*/
u32
u32_bitflip(u32 n)
{
n = ((n & 0xffff0000) >> 16) | ((n & 0x0000ffff) << 16);
n = ((n & 0xff00ff00) >> 8) | ((n & 0x00ff00ff) << 8);
n = ((n & 0xf0f0f0f0) >> 4) | ((n & 0x0f0f0f0f) << 4);
n = ((n & 0xcccccccc) >> 2) | ((n & 0x33333333) << 2);
n = ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1);
return n;
}

View File

@ -25,6 +25,8 @@ uint u32_masklen(u32 x);
u32 u32_log2(u32 v);
u32 u32_bitflip(u32 n);
static inline u32 u32_hash(u32 v) { return v * 2902958171u; }
static inline u8 u32_popcount(u32 v) { return __builtin_popcount(v); }

View File

@ -110,6 +110,31 @@ t_log2(void)
return 1;
}
static void
check_bitflip(u32 n)
{
u32 rot = u32_bitflip(n);
for (int i = 0; i < 16; i++)
{
bt_assert(!((n >> i) & 1) == !((rot << i) & 0x80000000));
bt_assert(!((rot >> i) & 1) == !((n << i) & 0x80000000));
}
}
static int
t_bitflip(void)
{
u32 i;
for (i = 0; i < MAX_NUM; i++)
{
check_bitflip(i);
check_bitflip((u32) bt_random());
}
return 1;
}
int
main(int argc, char *argv[])
{
@ -118,6 +143,7 @@ main(int argc, char *argv[])
bt_test_suite(t_mkmask, "u32_mkmask()");
bt_test_suite(t_masklen, "u32_masklen()");
bt_test_suite(t_log2, "u32_log2()");
bt_test_suite(t_bitflip, "u32_bitflip()");
return bt_exit_value();
}

View File

@ -160,7 +160,11 @@ ev_postpone(event *e)
ASSERT_DIE(birdloop_inside(sl->loop));
/* Remove from one of these lists. */
ASSERT(ev_remove_from(e, &sl->_executor) || ev_remove_from(e, &sl->receiver));
while (
!ev_remove_from(e, &sl->_executor) &&
!ev_remove_from(e, &sl->receiver))
/* We may need to wait until the sender actually puts the event inside */
birdloop_yield();
/* Mark as inactive */
ASSERT_DIE(sl == atomic_exchange_explicit(&e->list, NULL, memory_order_acq_rel));
@ -242,7 +246,7 @@ ev_send(event_list *l, event *e)
else
bug("Queuing an already queued event to another queue is not supported.");
/* Here should be no concurrent senders */
/* Here should be no concurrent senders of this event */
event *next = atomic_load_explicit(&l->receiver, memory_order_acquire);
edlog(l, e, next, 2, EDL_SEND);
event *old_next = NULL;

View File

@ -16,6 +16,12 @@
extern struct birdloop main_birdloop;
/* Currently running birdloop */
extern _Thread_local struct birdloop *this_birdloop;
/* Check that the task has enough time to do a bit more */
_Bool task_still_in_limit(void);
/* Start a new birdloop owned by given pool and domain */
struct birdloop *birdloop_new(pool *p, uint order, btime max_latency, const char *fmt, ...);
@ -29,6 +35,9 @@ void birdloop_free(struct birdloop *loop);
/* Get birdloop's event list */
event_list *birdloop_event_list(struct birdloop *loop);
/* Run this event in this thread's priority event list */
void ev_send_this_thread(event *e);
/* Get birdloop's time heap */
struct timeloop *birdloop_time_loop(struct birdloop *loop);
#define birdloop_domain(l) (birdloop_time_loop((l))->domain)

View File

@ -81,6 +81,54 @@ extern DOMAIN(the_bird) the_bird_domain;
#define ASSERT_THE_BIRD_LOCKED ({ if (!the_bird_locked()) bug("The BIRD lock must be locked here: %s:%d", __FILE__, __LINE__); })
/* Unwind stored lock state helpers */
struct locking_unwind_status {
struct lock_order *desired;
enum {
LOCKING_UNWIND_SAME,
LOCKING_UNWIND_UNLOCK,
} state;
};
static inline struct locking_unwind_status locking_unwind_helper(struct locking_unwind_status status, uint order)
{
struct domain_generic **lsp = ((void *) &locking_stack) + order;
struct domain_generic **dp = ((void *) status.desired) + order;
if (!status.state)
{
/* Just checking that the rest of the stack is consistent */
if (*lsp != *dp)
bug("Mangled lock unwind state at order %d", order);
}
else if (*dp)
/* Stored state expects locked */
if (*lsp == *dp)
/* Indeed is locked, switch to check mode */
status.state = 0;
else
/* Not locked or locked elsewhere */
bug("Mangled lock unwind state at order %d", order);
else if (*lsp)
/* Stored state expects unlocked but we're locked */
DG_UNLOCK(*lsp);
return status;
}
static inline void locking_unwind(struct lock_order *desired)
{
struct locking_unwind_status status = {
.desired = desired,
.state = LOCKING_UNWIND_UNLOCK,
};
#define LOCK_ORDER_POS_HELPER(x) DOMAIN_ORDER(x),
#define LOCK_ORDER_POS MACRO_FOREACH(LOCK_ORDER_POS_HELPER, LOCK_ORDER)
MACRO_RPACK(locking_unwind_helper, status, LOCK_ORDER_POS);
#undef LOCK_ORDER_POS_HELPER
}
/**
* Objects bound with domains
*

View File

@ -155,6 +155,24 @@ extern const u16 net_max_text_length[];
#define NET_MAX_TEXT_LENGTH 256
#define NET_PTR_UNION(_n) SKIP_BACK(net_addr_union, n, (_n))
#define NET_ASSERT_TYPE(_n, _t) ASSERT_DIE((_n)->type == (_t))
#define NET_PTR_GEN(_n, _t, _u) ({ \
net_addr_union *u = NET_PTR_UNION(_n); \
NET_ASSERT_TYPE(&(u->n), _t); \
&u->_u; })
#define NET_PTR_IP4(_n) NET_PTR_GEN((_n), NET_IP4, ip4)
#define NET_PTR_IP6(_n) NET_PTR_GEN((_n), NET_IP6, ip6)
#define NET_PTR_VPN4(_n) NET_PTR_GEN((_n), NET_VPN4, vpn4)
#define NET_PTR_VPN6(_n) NET_PTR_GEN((_n), NET_VPN6, vpn6)
#define NET_PTR_ROA4(_n) NET_PTR_GEN((_n), NET_ROA4, roa4)
#define NET_PTR_ROA6(_n) NET_PTR_GEN((_n), NET_ROA6, roa6)
#define NET_PTR_FLOW4(_n) NET_PTR_GEN((_n), NET_FLOW4, flow4)
#define NET_PTR_FLOW6(_n) NET_PTR_GEN((_n), NET_FLOW6, flow6)
#define NET_PTR_IP6_SADR(_n) NET_PTR_GEN((_n), NET_IP6_SADR, ip6_sadr)
#define NET_PTR_MPLS(_n) NET_PTR_GEN((_n), NET_MPLS, mpls)
#define NET_ADDR_IP4(prefix,pxlen) \
((net_addr_ip4) { NET_IP4, pxlen, sizeof(net_addr_ip4), prefix })

View File

@ -108,11 +108,11 @@ static int
t_bstrcmp(void)
{
bt_assert(bstrcmp("aa", "aa") == 0);
bt_assert(bstrcmp("aa", "bb") == -1);
bt_assert(bstrcmp("bb", "aa") == 1);
bt_assert(bstrcmp("aa", "bb") < 0);
bt_assert(bstrcmp("bb", "aa") > 0);
bt_assert(bstrcmp(NULL, NULL) == 0);
bt_assert(bstrcmp(NULL, "bb") == -1);
bt_assert(bstrcmp("bb", NULL) == 1);
bt_assert(bstrcmp(NULL, "bb") < 0);
bt_assert(bstrcmp("bb", NULL) > 0);
return 1;
}

View File

@ -117,6 +117,8 @@ void times_update(void);
void timers_init(struct timeloop *loop, pool *p);
void timers_fire(struct timeloop *loop, int io_log);
/* For extra fine precision */
u64 ns_now(void);
struct timeformat {
const char *fmt1, *fmt2;

View File

@ -71,15 +71,14 @@
#define TLIST_NAME(x) MACRO_CONCAT_AFTER(TLIST_PREFIX,_##x)
#ifndef TLIST_LIST_STRUCT
#define TLIST_LIST_STRUCT TLIST_NAME(list)
#define TLIST_LIST_STRUCT struct TLIST_NAME(list)
#endif
typedef struct TLIST_LIST_STRUCT {
TLIST_TYPE *first;
TLIST_TYPE *last;
} TLIST_LIST_STRUCT;
#ifndef TLIST_DEFINED_BEFORE
TLIST_STRUCT_DEF(TLIST_PREFIX, TLIST_TYPE);
#endif
static inline struct TLIST_LIST_STRUCT * TLIST_NAME(enlisted)(TLIST_TYPE *node)
static inline TLIST_LIST_STRUCT * TLIST_NAME(enlisted)(TLIST_TYPE *node)
{
return node->TLIST_ITEM.list;
}
@ -206,6 +205,7 @@ static inline void TLIST_NAME(rem_node)(TLIST_LIST_STRUCT *list, TLIST_TYPE *nod
#endif
#define TLIST_LIST(_name) struct _name##_list
#define TLIST_STRUCT_DEF(_name, _type) TLIST_LIST(_name) { _type *first, *last; }
#define TLIST_NODE_IN(_name, _type) { _type *next; _type *prev; TLIST_LIST(_name) *list; }
#define TLIST_NODE(_name, _type) struct _name##_node TLIST_NODE_IN(_name, _type)
@ -231,5 +231,8 @@ static inline void TLIST_NAME(rem_node)(TLIST_LIST_STRUCT *list, TLIST_TYPE *nod
/* Empty check */
#define EMPTY_TLIST(_name, _list) (!(_list)->first)
/* List length */
#define TLIST_LENGTH(_name, _list) ({ uint _len = 0; WALK_TLIST(_name, _, _list) _len++; _len; })
#endif

View File

@ -83,12 +83,11 @@ dev_ifa_notify(struct proto *P, uint flags, struct ifa *ad)
ea_list *ea = NULL;
struct nexthop_adata nhad = {
.nh = { .iface = ad->iface, },
.ad = { .length = (void *) NEXTHOP_NEXT(&nhad.nh) - (void *) nhad.ad.data, },
};
ea_set_attr_u32(&ea, &ea_gen_preference, 0, c->preference);
ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_DEVICE);
ea_set_attr_data(&ea, &ea_gen_nexthop, 0, nhad.ad.data, nhad.ad.length);
ea_set_attr_data(&ea, &ea_gen_nexthop, 0, nhad.ad.data, (void *) NEXTHOP_NEXT(&nhad.nh) - (void *) nhad.ad.data);
rte e0 = {
.attrs = ea,

View File

@ -734,15 +734,12 @@ do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old)
if (new)
bmap_set(&c->export_map, new->id);
if (p->debug & D_ROUTES)
{
if (new && old)
channel_rte_trace_out(D_ROUTES, c, new, "replaced");
else if (new)
channel_rte_trace_out(D_ROUTES, c, new, "added");
else if (old)
channel_rte_trace_out(D_ROUTES, c, old, "removed");
}
if (new && old)
channel_rte_trace_out(D_ROUTES, c, new, "replaced");
else if (new)
channel_rte_trace_out(D_ROUTES, c, new, "added");
else if (old)
channel_rte_trace_out(D_ROUTES, c, old, "removed");
p->rt_notify(p, c, net, new, old);
}
@ -2024,7 +2021,7 @@ rt_stop_import(struct rt_import_request *req, void (*stopped)(struct rt_import_r
/* Cancel table rr_counter */
if (hook->stale_set != hook->stale_pruned)
tab->rr_counter -= (hook->stale_set - hook->stale_pruned);
tab->rr_counter -= ((int) hook->stale_set - (int) hook->stale_pruned);
tab->rr_counter++;
@ -2302,7 +2299,7 @@ rt_refresh_begin(struct rt_import_request *req)
e->stale_cycle = 0;
/* Smash the route refresh counter and zero everything. */
tab->rr_counter -= hook->stale_set - hook->stale_pruned;
tab->rr_counter -= ((int) hook->stale_set - (int) hook->stale_pruned);
hook->stale_set = hook->stale_valid = hook->stale_pruning = hook->stale_pruned = 0;
}
@ -3036,7 +3033,7 @@ rt_prune_table(struct rtable_private *tab)
}
else if (ih->stale_pruning != ih->stale_pruned)
{
tab->rr_counter -= (ih->stale_pruning - ih->stale_pruned);
tab->rr_counter -= ((int) ih->stale_pruning - (int) ih->stale_pruned);
ih->stale_pruned = ih->stale_pruning;
rt_refresh_trace(tab, ih, "table prune after refresh end");
}
@ -3342,17 +3339,18 @@ ea_set_hostentry(ea_list **to, rtable *dep, rtable *src, ip_addr gw, ip_addr ll,
{
struct {
struct hostentry_adata head;
u32 label_space[lnum];
} h;
memset(&h, 0, sizeof h);
u32 label_space[];
} *h;
u32 sz = sizeof *h + lnum * sizeof(u32);
h = alloca(sz);
memset(h, 0, sz);
RT_LOCKED(src, tab)
h.head.he = rt_get_hostentry(tab, gw, ll, dep);
h->head.he = rt_get_hostentry(tab, gw, ll, dep);
memcpy(h.head.labels, labels, lnum * sizeof(u32));
memcpy(h->head.labels, labels, lnum * sizeof(u32));
ea_set_attr_data(to, &ea_gen_hostentry, 0, h.head.ad.data, (byte *) &h.head.labels[lnum] - h.head.ad.data);
ea_set_attr_data(to, &ea_gen_hostentry, 0, h->head.ad.data, (byte *) &h->head.labels[lnum] - h->head.ad.data);
}
@ -4738,7 +4736,7 @@ rt_update_hostcache(void *data)
hc->req = (struct rt_export_request) {
.name = mb_sprintf(tab->rp, "%s.hcu.notifier", tab->name),
.list = birdloop_event_list(tab->loop),
.pool = tab->rp,
.pool = birdloop_pool(tab->loop),
.trace_routes = tab->config->debug,
.dump_req = hc_notify_dump_req,
.log_state_change = hc_notify_log_state_change,
@ -4799,10 +4797,7 @@ hostentry_tmp_unlock(resource *r)
{
struct hostentry_tmp_lock *l = SKIP_BACK(struct hostentry_tmp_lock, r, r);
RT_LOCKED(l->tab, tab)
{
l->he->uc--;
rt_unlock_table(tab);
}
}
static void
@ -4847,7 +4842,6 @@ rt_get_hostentry(struct rtable_private *tab, ip_addr a, ip_addr ll, rtable *dep)
l->he = he;
l->tab = RT_PUB(tab);
l->he->uc++;
rt_lock_table(tab);
return he;
}

View File

@ -1104,17 +1104,11 @@ bgp_apply_next_hop(struct bgp_parse_state *s, ea_list **to, ip_addr gw, ip_addr
ea_set_attr_u32(to, &ea_gen_igp_metric, 0, c->cf->cost);
struct nexthop_adata_mpls nam = {
.nhad = {
.nh = {
.gw = nbr->addr,
.iface = nbr->iface,
},
.ad = {
.length = NEXTHOP_NEXT(&nam.nhad.nh) - (void *) nam.nhad.ad.data,
},
},
};
struct nexthop_adata_mpls nam;
memset(&nam, 0, sizeof nam);
nam.nhad.nh.gw = nbr->addr;
nam.nhad.nh.iface = nbr->iface;
nam.nhad.ad.length = NEXTHOP_NEXT(&nam.nhad.nh) - (void *) nam.nhad.ad.data;
ea_set_attr_data(to, &ea_gen_nexthop, 0, nam.nhad.ad.data, nam.nhad.ad.length);
}
else /* GW_RECURSIVE */
@ -3506,7 +3500,7 @@ bgp_rx(sock *sk, uint size)
{
sk_pause_rx(p->p.loop, sk);
BGP_TRACE(D_PACKETS, "Corked");
return 0;
break;
}
for(i=0; i<16; i++)
if (pkt_start[i] != 0xff)

View File

@ -34,15 +34,11 @@ unresolved_vlink(ort *ort)
static inline struct nexthop_adata *
new_nexthop(struct ospf_proto *p, ip_addr gw, struct iface *iface, byte weight)
{
struct nexthop_adata *nhad = lp_alloc(p->nhpool, sizeof(struct nexthop_adata));
*nhad = (struct nexthop_adata) {
.ad = { .length = sizeof *nhad - sizeof nhad->ad, },
.nh = {
.gw = gw,
.iface = iface,
.weight = weight,
},
};
struct nexthop_adata *nhad = lp_allocz(p->nhpool, sizeof(struct nexthop_adata));
nhad->ad.length = sizeof *nhad - sizeof nhad->ad;
nhad->nh.gw = gw;
nhad->nh.iface = iface;
nhad->nh.weight = weight;
return nhad;
}

View File

@ -564,13 +564,19 @@ static_shutdown(struct proto *P)
WALK_LIST(r, cf->routes)
static_reset_rte(p, r);
return PS_DOWN;
}
static void
static_cleanup(struct proto *P)
{
struct static_proto *p = (void *) P;
if (p->igp_table_ip4)
rt_unlock_table(p->igp_table_ip4);
if (p->igp_table_ip6)
rt_unlock_table(p->igp_table_ip6);
return PS_DOWN;
}
static void
@ -835,6 +841,7 @@ struct protocol proto_static = {
.dump = static_dump,
.start = static_start,
.shutdown = static_shutdown,
.cleanup = static_cleanup,
.reconfigure = static_reconfigure,
.copy_config = static_copy_config,
};

View File

@ -51,7 +51,7 @@ static void ns_init(void)
#define NSEC_IN_SEC ((u64) (1000 * 1000 * 1000))
static u64 ns_now(void)
u64 ns_now(void)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
@ -817,6 +817,7 @@ bird_thread_main(void *arg)
account_to(&thr->overhead);
birdloop_enter(thr->meta);
this_birdloop = thr->meta;
tmp_init(thr->pool, birdloop_domain(thr->meta));
init_list(&thr->loops);
@ -885,7 +886,7 @@ bird_thread_main(void *arg)
ASSERT_DIE(pfd.loop.used == pfd.pfd.used);
}
/* Nothing to do in at least 5 seconds, flush local hot page cache */
else if ((timeout > 5000) && (timeout < 0))
else if ((timeout > 5000) || (timeout < 0))
flush_local_pages();
bird_thread_busy_update(thr, timeout);
@ -1362,6 +1363,11 @@ cmd_show_threads(int show_loops)
bird_thread_sync_all(&tsd->sync, bird_thread_show, cmd_show_threads_done, "Show Threads");
}
_Bool task_still_in_limit(void)
{
return ns_now() < account_last + this_thread->max_loop_time_ns;
}
/*
* Birdloop
@ -1369,6 +1375,7 @@ cmd_show_threads(int show_loops)
static struct bird_thread main_thread;
struct birdloop main_birdloop = { .thread = &main_thread, };
_Thread_local struct birdloop *this_birdloop;
static void birdloop_enter_locked(struct birdloop *loop);
@ -1396,6 +1403,8 @@ birdloop_init(void)
timers_init(&main_birdloop.time, &root_pool);
birdloop_enter_locked(&main_birdloop);
this_birdloop = &main_birdloop;
this_thread = &main_thread;
}
static void
@ -1441,6 +1450,7 @@ birdloop_stop_internal(struct birdloop *loop)
ASSERT_DIE(!ev_active(&loop->event));
loop->ping_pending = 0;
account_to(&this_thread->overhead);
this_birdloop = this_thread->meta;
birdloop_leave(loop);
/* Request local socket reload */
@ -1461,6 +1471,18 @@ birdloop_run(void *_loop)
struct birdloop *loop = _loop;
account_to(&loop->locking);
birdloop_enter(loop);
this_birdloop = loop;
/* Wait until pingers end to wait for all events to actually arrive */
for (u32 ltt;
ltt = atomic_load_explicit(&loop->thread_transition, memory_order_acquire);
)
{
ASSERT_DIE(ltt == LTT_PING);
birdloop_yield();
}
/* Now we can actually do some work */
u64 dif = account_to(&loop->working);
if (dif > this_thread->max_loop_time_ns)
@ -1489,7 +1511,7 @@ birdloop_run(void *_loop)
repeat += ev_run_list(&loop->event_list);
/* Check end time */
} while (repeat && (ns_now() < account_last + this_thread->max_loop_time_ns));
} while (repeat && task_still_in_limit());
/* Request meta timer */
timer *t = timers_first(&loop->time);
@ -1507,6 +1529,7 @@ birdloop_run(void *_loop)
loop->sock_changed = 0;
account_to(&this_thread->overhead);
this_birdloop = this_thread->meta;
birdloop_leave(loop);
}
@ -1689,3 +1712,12 @@ birdloop_yield(void)
{
usleep(100);
}
void
ev_send_this_thread(event *e)
{
if (this_thread == &main_thread)
ev_send_loop(&main_birdloop, e);
else
ev_send(&this_thread->priority_events, e);
}