0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-11-09 12:48:43 +00:00

Merge commit 'b95dc8f29f18eb177f91fdc4bf0716fac9b15366' into mq-config-ref

Also converted all _Bool's to bool.
This commit is contained in:
Maria Matejka 2024-06-26 17:19:24 +02:00
commit b797444e94
27 changed files with 64 additions and 65 deletions

View File

@ -28,7 +28,7 @@ CF_HDR
CF_DEFINES
static _Bool this_sadr_from_hack_active;
static bool this_sadr_from_hack_active;
static void
check_u16(uint val)

View File

@ -20,8 +20,8 @@ extern struct birdloop main_birdloop;
extern _Thread_local struct birdloop *this_birdloop;
/* Check that the task has enough time to do a bit more */
_Bool task_still_in_limit(void);
_Bool task_before_halftime(void);
bool task_still_in_limit(void);
bool task_before_halftime(void);
#define MAYBE_DEFER_TASK(target, event, fmt, args...) do { \
if (!task_still_in_limit()) { \
@ -54,7 +54,7 @@ pool *birdloop_pool(struct birdloop *loop);
void birdloop_enter(struct birdloop *loop);
void birdloop_leave(struct birdloop *loop);
_Bool birdloop_inside(struct birdloop *loop);
bool birdloop_inside(struct birdloop *loop);
void birdloop_mask_wakeups(struct birdloop *loop);
void birdloop_unmask_wakeups(struct birdloop *loop);

View File

@ -69,7 +69,7 @@ typedef union list { /* In fact two overlayed nodes */
#define EMPTY_LIST(list) (!(list).head->next)
static inline _Bool
static inline bool
enlisted(node *n)
{
switch ((!!n->next) + (!!n->prev))

View File

@ -95,7 +95,7 @@ lfjour_push_commit(struct lfjour *j)
if (end == 0)
{
struct lfjour_block *prev = b->n.prev;
_Bool f = 0;
bool f = 0;
if (prev)
ASSERT_DIE(atomic_compare_exchange_strong_explicit(&prev->not_last, &f, 1,
memory_order_release, memory_order_relaxed));

View File

@ -138,7 +138,7 @@ static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev)
* the critical section of lfuc_unlock(). Then we decide whether the usecount
* is indeed zero or not, and therefore whether the structure is free to be freed.
*/
static inline _Bool
static inline bool
lfuc_finished(struct lfuc *c)
{
u64 uc;
@ -192,7 +192,7 @@ struct lfjour_item {
struct lfjour_block {
TLIST_DEFAULT_NODE;
_Atomic u32 end;
_Atomic _Bool not_last;
_Atomic bool not_last;
struct lfjour_item _block[0];
};
@ -250,7 +250,7 @@ void lfjour_push_commit(struct lfjour *);
struct lfjour_item *lfjour_get(struct lfjour_recipient *);
void lfjour_release(struct lfjour_recipient *, const struct lfjour_item *);
static inline _Bool lfjour_reset_seqno(struct lfjour_recipient *r)
static inline bool lfjour_reset_seqno(struct lfjour_recipient *r)
{
return atomic_fetch_and_explicit(&r->recipient_flags, ~LFJOUR_R_SEQ_RESET, memory_order_acq_rel) & LFJOUR_R_SEQ_RESET;
}

View File

@ -46,7 +46,7 @@ extern _Thread_local struct domain_generic **last_locked;
#define DOMAIN_NEW(type) (DOMAIN(type)) { .type = domain_new(DOMAIN_ORDER(type), 1) }
#define DOMAIN_NEW_RCU_SYNC(type) (DOMAIN(type)) { .type = domain_new(DOMAIN_ORDER(type), 0) }
struct domain_generic *domain_new(uint order, _Bool allow_rcu);
struct domain_generic *domain_new(uint order, bool allow_rcu);
#define DOMAIN_FREE(type, d) domain_free((d).type)
void domain_free(struct domain_generic *);
@ -117,7 +117,7 @@ extern _Thread_local u32 rw_spinlocks_taken_write;
/* Borrowed from lib/timer.h */
btime current_time_now(void);
static inline void rws_mark(rw_spinlock *p, _Bool write, _Bool lock)
static inline void rws_mark(rw_spinlock *p, bool write, bool lock)
{
if (lock) {
ASSERT_DIE(rw_spinlocks_taken_cnt < MAX_RWS_AT_ONCE);

View File

@ -90,7 +90,7 @@ rwspin_thread_run(void *_rtd)
{
struct rws_test_data *d = _rtd;
for (_Bool sorted = 0; !sorted++; )
for (bool sorted = 0; !sorted++; )
{
for (int i=0; (i<RWS_DATASIZE-1) && sorted; i++)
{

View File

@ -223,7 +223,7 @@ net_find_index_fragile(netindex_hash *nh, const net_addr *n)
return SPINHASH_FIND(nh->hash, NETINDEX, h, n);
}
static _Bool
static bool
net_validate_index(netindex_hash *h, struct netindex *ni)
{
struct netindex * _Atomic *block = atomic_load_explicit(&h->block, memory_order_relaxed);

View File

@ -48,7 +48,7 @@ synchronize_rcu(void)
birdloop_yield();
/* Check all threads */
_Bool critical = 0;
bool critical = 0;
for (struct rcu_thread * _Atomic *tp = &rcu_thread_list, *t;
t = atomic_load_explicit(tp, memory_order_acquire);
tp = &t->next)

View File

@ -55,7 +55,7 @@ static inline void rcu_read_unlock(void)
this_rcu_thread.local_ctl--;
}
static inline _Bool rcu_read_active(void)
static inline bool rcu_read_active(void)
{
return !!(this_rcu_thread.local_ctl & RCU_NEST_MASK);
}

View File

@ -113,7 +113,7 @@ t_rcu_basic_writer(void *order_ptr)
next->value = order + i*WRITERS;
spin_lock();
_Bool seen = 0;
bool seen = 0;
for (struct block * _Atomic *bp = &bin, *b;
b = atomic_load_explicit(bp, memory_order_acquire);
bp = &b->next)
@ -139,7 +139,7 @@ t_rcu_basic_writer(void *order_ptr)
/* Remove the object */
spin_lock();
_Bool seen = 0;
bool seen = 0;
for (struct block * _Atomic *bp = &bin, *b;
b = atomic_load_explicit(bp, memory_order_acquire);
bp = &b->next)

View File

@ -395,7 +395,7 @@ ea_set_attr(ea_list **to, eattr a)
}
static inline void
ea_unset_attr(ea_list **to, _Bool local, const struct ea_class *def)
ea_unset_attr(ea_list **to, bool local, const struct ea_class *def)
{
ea_set_attr(to, EA_LITERAL_GENERIC(def->id, 0, 0,
.fresh = local, .originated = local, .undef = 1));

View File

@ -91,7 +91,7 @@ void sk_reloop(sock *, struct birdloop *); /* Move socket to another loop. Both
static inline void sk_close(sock *s) { rfree(&s->r); } /* Explicitly close socket */
int sk_rx_ready(sock *s);
_Bool sk_tx_pending(sock *s);
bool sk_tx_pending(sock *s);
int sk_send(sock *, uint len); /* Send data, <0=err, >0=ok, 0=sleep */
int sk_send_to(sock *, uint len, ip_addr to, uint port); /* sk_send to given destination */
void sk_reallocate(sock *); /* Free and allocate tbuf & rbuf */

View File

@ -884,7 +884,7 @@ static void
mpls_fec_map_cleanup(void *_m)
{
struct mpls_fec_map *m = _m;
_Bool finished = (m->channel->channel_state == CS_STOP);
bool finished = (m->channel->channel_state == CS_STOP);
HASH_WALK_DELSAFE(m->label_hash, next_l, fec)
if (lfuc_finished(&fec->uc))
mpls_free_fec(m, fec);
@ -1436,7 +1436,7 @@ mpls_show_ranges(struct mpls_show_ranges_cmd *cmd)
else
{
struct mpls_domain_pub *m;
_Bool first = 1;
bool first = 1;
WALK_LIST(m, MPLS_GLOBAL->domains)
{
if (first)

View File

@ -742,7 +742,7 @@ channel_do_reload(void *_c)
RT_FEED_WALK(&c->reimporter, f)
{
_Bool seen = 0;
bool seen = 0;
for (uint i = 0; i < f->count_routes; i++)
{
rte *r = &f->block[i];

View File

@ -217,7 +217,7 @@ struct rt_export_union {
struct rt_exporter {
struct lfjour journal; /* Journal for update keeping */
TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
_Bool _Atomic feeders_lock; /* Spinlock for the above list */
bool _Atomic feeders_lock; /* Spinlock for the above list */
u8 trace_routes; /* Debugging flags (D_*) */
u8 net_type; /* Which net this exporter provides */
DOMAIN(rtable) domain; /* Lock this instead of RCU */
@ -226,7 +226,7 @@ struct rt_exporter {
netindex_hash *netindex; /* Table for net <-> id conversion */
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, u32, _Bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, u32, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
};
@ -247,7 +247,7 @@ struct rt_export_feed *rt_export_next_feed(struct rt_export_feeder *);
#define RT_FEED_WALK(_feeder, _f) \
for (const struct rt_export_feed *_f; _f = rt_export_next_feed(_feeder); ) \
static inline _Bool rt_export_feed_active(struct rt_export_feeder *f)
static inline bool rt_export_feed_active(struct rt_export_feeder *f)
{ return !!atomic_load_explicit(&f->exporter, memory_order_acquire); }
/* Full blown exports */
@ -311,7 +311,7 @@ static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr
bug("Crazy prefilter application attempt failed wildly.");
}
static inline _Bool
static inline bool
rt_net_is_feeding_feeder(struct rt_export_feeder *ref, const net_addr *n)
{
if (!rt_prefilter_net(&ref->prefilter, n))
@ -327,7 +327,7 @@ rt_net_is_feeding_feeder(struct rt_export_feeder *ref, const net_addr *n)
return 0;
}
static inline _Bool
static inline bool
rt_net_is_feeding_request(struct rt_export_request *req, const net_addr *n)
{
struct netindex *ni = NET_TO_INDEX(n);
@ -490,7 +490,7 @@ static inline void rt_cork_release(void)
void rt_cork_send_callback(void *_data);
static inline _Bool rt_cork_check(struct rt_uncork_callback *rcc)
static inline bool rt_cork_check(struct rt_uncork_callback *rcc)
{
/* Wait until all uncorks have finished */
while (1)

View File

@ -913,7 +913,7 @@ ea_do_sort(ea_list *e)
while (ss);
}
static _Bool eattr_same_value(const eattr *a, const eattr *b);
static bool eattr_same_value(const eattr *a, const eattr *b);
/**
* In place discard duplicates and undefs in sorted ea_list. We use stable sort
@ -1110,7 +1110,7 @@ ea_normalize(ea_list *e, u32 upto)
return t;
}
static _Bool
static bool
eattr_same_value(const eattr *a, const eattr *b)
{
if (
@ -1130,7 +1130,7 @@ eattr_same_value(const eattr *a, const eattr *b)
return adata_same(a->u.ptr, b->u.ptr);
}
static _Bool
static bool
eattr_same(const eattr *a, const eattr *b)
{
return

View File

@ -538,7 +538,7 @@ rt_exporter_shutdown(struct rt_exporter *e, void (*stopped)(struct rt_exporter *
e->journal.domain = NULL;
/* We have to tell every receiver to stop */
_Bool done = 1;
bool done = 1;
WALK_TLIST(lfjour_recipient, r, &e->journal.recipients)
{
done = 0;

View File

@ -481,7 +481,7 @@ rt_aggregate_roa(void *_rag)
RT_EXPORT_WALK(&rag->src, u) TMP_SAVED
{
_Bool withdraw = 0;
bool withdraw = 0;
const net_addr *nroa = NULL;
switch (u->kind)
{
@ -1082,8 +1082,8 @@ static void
rt_notify_accepted(struct channel *c, const struct rt_export_feed *feed)
{
rte *old_best, *new_best;
_Bool feeding = rt_net_is_feeding(&c->out_req, feed->ni->addr);
_Bool idempotent = 0;
bool feeding = rt_net_is_feeding(&c->out_req, feed->ni->addr);
bool idempotent = 0;
for (uint i = 0; i < feed->count_routes; i++)
{
@ -1168,7 +1168,7 @@ channel_notify_accepted(void *_channel)
rte *
rt_export_merged(struct channel *c, const struct rt_export_feed *feed, linpool *pool, int silent)
{
_Bool feeding = !silent && rt_net_is_feeding(&c->out_req, feed->ni->addr);
bool feeding = !silent && rt_net_is_feeding(&c->out_req, feed->ni->addr);
// struct proto *p = c->proto;
struct nexthop_adata *nhs = NULL;
@ -1492,7 +1492,7 @@ rt_cleanup_find_net(struct rtable_private *tab, struct rt_pending_export *rpe)
return &routes[ni->index];
}
static _Bool
static bool
rt_cleanup_update_pointers(struct rt_net_pending_export *npe, struct rt_pending_export *rpe)
{
struct rt_pending_export *first = atomic_load_explicit(&npe->first, memory_order_relaxed);
@ -1536,7 +1536,7 @@ rt_cleanup_export_all(struct lfjour *j, struct lfjour_item *i)
net *net = rt_cleanup_find_net(tab, rpe);
/* Update the first and last pointers */
_Bool is_last = rt_cleanup_update_pointers(&net->all, rpe);
bool is_last = rt_cleanup_update_pointers(&net->all, rpe);
/* Free the old route */
if (rpe->it.old)
@ -2233,7 +2233,7 @@ rt_net_feed_validate_first(
}
static struct rt_export_feed *
rt_net_feed_index(struct rtable_reading *tr, net *n, _Bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
rt_net_feed_index(struct rtable_reading *tr, net *n, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
{
/* Get the feed itself. It may change under our hands tho. */
struct rt_pending_export *first_in_net, *last_in_net;
@ -2309,7 +2309,7 @@ rt_net_feed_index(struct rtable_reading *tr, net *n, _Bool (*prefilter)(struct r
}
static struct rt_export_feed *
rt_net_feed_internal(struct rtable_reading *tr, u32 index, _Bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
rt_net_feed_internal(struct rtable_reading *tr, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
{
net *n = rt_net_feed_get_net(tr, index);
if (!n)
@ -2327,7 +2327,7 @@ rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
}
static struct rt_export_feed *
rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, _Bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
return rt_net_feed_internal(tr, index, prefilter, f, SKIP_BACK(const struct rt_pending_export, it, _first));
@ -2355,7 +2355,7 @@ rt_net_best(rtable *t, const net_addr *a)
}
static struct rt_export_feed *
rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, _Bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
SKIP_BACK_DECLARE(rtable, t, export_best, e);
SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
@ -2909,7 +2909,7 @@ rt_flowspec_unlink(rtable *src, rtable *dst)
{
birdloop_enter(dst->loop);
_Bool unlock_dst = 0;
bool unlock_dst = 0;
struct rt_flowspec_link *ln;
RT_LOCKED(src, t)
@ -3262,7 +3262,7 @@ rt_init(void)
ea_register_init(&ea_roa_aggregated);
}
static _Bool
static bool
rt_prune_net(struct rtable_private *tab, struct network *n)
{
NET_WALK_ROUTES(tab, n, ep, e)
@ -3270,7 +3270,7 @@ rt_prune_net(struct rtable_private *tab, struct network *n)
ASSERT_DIE(!(e->flags & REF_OBSOLETE));
struct rt_import_hook *s = e->rte.sender;
_Bool stale = (s->import_state == TIS_FLUSHING);
bool stale = (s->import_state == TIS_FLUSHING);
if (!stale)
{
@ -4105,7 +4105,7 @@ rt_next_hop_update_net(struct rtable_private *tab, struct netindex *ni, net *n)
&updates[i].new_stored->rte, &updates[i].old->rte);
ASSERT_DIE(this_rpe);
_Bool nb = (new_best->rte.src == updates[i].new.src), ob = (i == 0);
bool nb = (new_best->rte.src == updates[i].new.src), ob = (i == 0);
char info[96];
char best_indicator[2][2] = { { ' ', '+' }, { '-', '=' } };
bsnprintf(info, sizeof info, "autoupdated [%cbest]", best_indicator[ob][nb]);
@ -4525,7 +4525,7 @@ rt_commit(struct config *new, struct config *old)
{
WALK_LIST(o, old->tables)
{
_Bool ok;
bool ok;
RT_LOCKED(o->table, tab)
{
r = OBSREF_GET(tab->deleted) ? NULL : rt_find_table_config(new, o->name);

View File

@ -1880,7 +1880,7 @@ bgp_out_item_done(struct lfjour *j, struct lfjour_item *i)
{}
static struct rt_export_feed *
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, _Bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
ASSERT_DIE(u == NULL);
SKIP_BACK_DECLARE(struct bgp_ptx_private, c, exporter, e);

View File

@ -18,8 +18,6 @@
#include "lib/socket.h"
#include "proto/bmp/map.h"
#include <stdbool.h>
// Max length of MIB-II description object
#define MIB_II_STR_LEN 255

View File

@ -32,6 +32,7 @@
/* Types */
#include <stdbool.h>
#include <stdint.h>
typedef int8_t s8;
typedef uint8_t u8;

View File

@ -38,8 +38,8 @@ long page_size = 0;
STATIC_ASSERT(KEEP_PAGES_MIN * 4 < KEEP_PAGES_MAX);
STATIC_ASSERT(ALLOC_PAGES_AT_ONCE < KEEP_PAGES_MAX_LOCAL);
static _Bool use_fake = 0;
static _Bool initialized = 0;
static bool use_fake = 0;
static bool initialized = 0;
# define PROTECT_PAGE(pg)
# define UNPROTECT_PAGE(pg)

View File

@ -50,7 +50,7 @@ _Thread_local struct domain_generic **last_locked = NULL;
struct domain_generic {
pthread_mutex_t mutex;
uint order;
_Bool forbidden_when_reading_rcu;
bool forbidden_when_reading_rcu;
struct domain_generic **prev;
struct lock_order *locked_by;
const char *name;
@ -68,7 +68,7 @@ static struct domain_generic the_bird_domain_gen = DOMAIN_INIT(OFFSETOF(struct l
DOMAIN(the_bird) the_bird_domain = { .the_bird = &the_bird_domain_gen };
struct domain_generic *
domain_new(uint order, _Bool allow_rcu)
domain_new(uint order, bool allow_rcu)
{
ASSERT_DIE(order < sizeof(struct lock_order));
struct domain_generic *dg = xmalloc(sizeof(struct domain_generic));

View File

@ -178,7 +178,7 @@ birdloop_pool(struct birdloop *loop)
return loop->pool;
}
_Bool
bool
birdloop_inside(struct birdloop *loop)
{
for (struct birdloop *c = birdloop_current; c; c = c->prev_loop)
@ -188,7 +188,7 @@ birdloop_inside(struct birdloop *loop)
return 0;
}
_Bool
bool
birdloop_in_this_thread(struct birdloop *loop)
{
return pthread_equal(pthread_self(), loop->thread->thread_id);
@ -303,7 +303,7 @@ wakeup_free(struct bird_thread *loop)
pipe_free(&loop->wakeup);
}
static inline _Bool
static inline bool
birdloop_try_ping(struct birdloop *loop, u32 ltt)
{
/* Somebody else is already pinging, be idempotent */
@ -513,7 +513,7 @@ int sk_write(sock *s);
void sk_err(sock *s, int revents);
static void
sockets_fire(struct birdloop *loop, _Bool read, _Bool write)
sockets_fire(struct birdloop *loop, bool read, bool write)
{
if (EMPTY_LIST(loop->sock_list))
return;
@ -664,7 +664,7 @@ bird_thread_pickup_next(struct birdloop_pickup_group *group)
wakeup_do_kick(SKIP_BACK(struct bird_thread, n, HEAD(group->threads)));
}
static _Bool
static bool
birdloop_hot_potato(struct birdloop *loop)
{
if (!loop)
@ -1137,7 +1137,7 @@ bird_thread_commit(struct config *new, struct config *old UNUSED)
LOCK_DOMAIN(attrs, group->domain);
int dif = group->thread_count - (thread_dropper_goal = new->thread_count);
_Bool thread_dropper_running = !!thread_dropper;
bool thread_dropper_running = !!thread_dropper;
UNLOCK_DOMAIN(attrs, group->domain);
@ -1384,7 +1384,7 @@ cmd_show_threads(int show_loops)
bird_thread_sync_all(&tsd->sync, bird_thread_show, cmd_show_threads_done, "Show Threads");
}
_Bool task_still_in_limit(void)
bool task_still_in_limit(void)
{
static u64 main_counter = 0;
if (this_birdloop == &main_birdloop)
@ -1393,7 +1393,7 @@ _Bool task_still_in_limit(void)
return ns_now() < account_last + this_thread->max_loop_time_ns;
}
_Bool task_before_halftime(void)
bool task_before_halftime(void)
{
return ns_now() < account_last + this_thread->max_loop_time_ns / 2;
}

View File

@ -1827,7 +1827,7 @@ sk_recvmsg(sock *s)
static inline void reset_tx_buffer(sock *s) { s->ttx = s->tpos = s->tbuf; }
_Bool
bool
sk_tx_pending(sock *s)
{
return s->ttx != s->tpos;

View File

@ -445,7 +445,7 @@ done:;
lp_flush(krt_filter_lp);
}
static _Bool
static bool
krt_init_scan(struct krt_proto *p)
{
switch (p->sync_state)
@ -536,7 +536,7 @@ krt_got_route_async(struct krt_proto *p, rte *e, int new, s8 src)
static timer *krt_scan_all_timer;
static int krt_scan_all_count;
static _Bool krt_scan_all_tables;
static bool krt_scan_all_tables;
static void
krt_scan_all(timer *t UNUSED)