0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

Fixed all implicit seq_cst warnings caused by the previous commit

This commit is contained in:
Maria Matejka 2024-04-01 16:01:26 +02:00 committed by Katerina Kubecova
parent 51795c3e05
commit a66be7641e
9 changed files with 39 additions and 32 deletions

View File

@ -202,7 +202,7 @@ config_free(struct config *c)
if (!c)
return;
ASSERT(!c->obstacle_count);
ASSERT(!atomic_load_explicit(&c->obstacle_count, memory_order_relaxed));
rp_free(c->pool);
}
@ -218,7 +218,7 @@ config_free(struct config *c)
void
config_free_old(void)
{
if (!old_config || old_config->obstacle_count)
if (!old_config || atomic_load_explicit(&old_config->obstacle_count, memory_order_acquire))
return;
tm_stop(config_timer);
@ -231,15 +231,16 @@ config_free_old(void)
void
config_add_obstacle(struct config *c)
{
DBG("+++ adding obstacle %d\n", c->obstacle_count);
atomic_fetch_add_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
UNUSED int obs = atomic_fetch_add_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
DBG("+++ adding obstacle %d\n", obs);
}
void
config_del_obstacle(struct config *c)
{
DBG("+++ deleting obstacle %d\n", c->obstacle_count);
if (atomic_fetch_sub_explicit(&c->obstacle_count, 1, memory_order_acq_rel) == 1)
int obs = atomic_fetch_sub_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
DBG("+++ deleting obstacle %d\n", obs);
if (obs == 1)
ev_send_loop(&main_birdloop, &c->done_event);
}
@ -294,7 +295,7 @@ config_do_commit(struct config *c, int type)
log(L_INFO "Reconfiguring");
if (old_config)
old_config->obstacle_count++;
config_add_obstacle(old_config);
DBG("filter_commit\n");
filter_commit(c, old_config);
@ -307,10 +308,9 @@ config_do_commit(struct config *c, int type)
rt_commit(c, old_config);
DBG("protos_commit\n");
protos_commit(c, old_config, force_restart, type);
int obs = 0;
if (old_config)
obs = --old_config->obstacle_count;
int obs = old_config ?
atomic_fetch_sub_explicit(&old_config->obstacle_count, 1, memory_order_acq_rel) - 1
: 0;
DBG("do_commit finished with %d obstacles remaining\n", obs);
return !obs;

View File

@ -29,7 +29,7 @@ static int
rcu_gp_ongoing(_Atomic uint *ctl)
{
uint val = atomic_load(ctl);
return (val & RCU_NEST_CNT) && ((val ^ rcu_gp_ctl) & RCU_GP_PHASE);
return (val & RCU_NEST_CNT) && ((val ^ atomic_load_explicit(&rcu_gp_ctl, memory_order_acquire)) & RCU_GP_PHASE);
}
static void

View File

@ -1082,7 +1082,7 @@ channel_do_pause(struct channel *c)
/* Need to abort feeding */
c->reload_pending = 0;
if (c->reload_req.hook && c->reload_req.hook->export_state != TES_STOP)
if (c->reload_req.hook && atomic_load_explicit(&c->reload_req.hook->export_state, memory_order_acquire) != TES_STOP)
rt_stop_export(&c->reload_req, channel_reload_stopped);
/* Stop export */
@ -1236,10 +1236,10 @@ channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_
static void
channel_stop_export(struct channel *c)
{
if (c->refeed_req.hook && (c->refeed_req.hook->export_state != TES_STOP))
if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) != TES_STOP))
rt_stop_export(&c->refeed_req, channel_refeed_stopped);
if (c->out_req.hook && (c->out_req.hook->export_state != TES_STOP))
if (c->out_req.hook && (atomic_load_explicit(&c->out_req.hook->export_state, memory_order_acquire) != TES_STOP))
rt_stop_export(&c->out_req, channel_export_stopped);
}

View File

@ -415,7 +415,7 @@ const char *rt_import_state_name(u8 state);
const char *rt_export_state_name(u8 state);
static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? atomic_load_explicit(&eh->export_state, memory_order_acquire) : TES_DOWN; }
u8 rt_set_export_state(struct rt_export_hook *hook, u32 expected_mask, u8 state);

View File

@ -1516,7 +1516,8 @@ ea_dump(ea_list *e)
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
(e->flags & EALF_CACHED) ? 'C' : 'c',
s ? s->uc : 0, s ? s->hash_key : 0);
s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
s ? s->hash_key : 0);
for(i=0; i<e->count; i++)
{
eattr *a = &e->attrs[i];
@ -1714,7 +1715,7 @@ ea_lookup(ea_list *o, int overlay)
r->l->flags |= EALF_CACHED | huge;
r->hash_key = h;
r->uc = 1;
atomic_store_explicit(&r->uc, 1, memory_order_release);
rta_insert(r);

View File

@ -779,7 +779,7 @@ channel_rpe_mark_seen(struct channel *c, struct rt_pending_export *rpe)
ASSERT_DIE(c->out_req.hook);
rpe_mark_seen(c->out_req.hook, rpe);
if (c->refeed_req.hook && (c->refeed_req.hook->export_state == TES_FEEDING))
if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) == TES_FEEDING))
rpe_mark_seen(c->refeed_req.hook, rpe);
if (rpe->old)
@ -3916,7 +3916,7 @@ rt_prepare_feed(struct rt_export_hook *c, net *n, rt_feed_block *b)
struct rt_pending_export *last_seen = last_seen_item ? SKIP_BACK(struct rt_pending_export, li, last_seen_item) : NULL;
while (last_seen && first && (first->seq <= last_seen->seq))
first = first->next;
first = atomic_load_explicit(&first->next, memory_order_relaxed);
b->aux[b->pos++] = (struct rt_feed_block_aux) {
.start = b->cnt,

View File

@ -180,7 +180,7 @@ alloc_page(void)
struct free_page *fp = local_page_stack;
if (fp)
{
local_page_stack = fp->next;
local_page_stack = atomic_load_explicit(&fp->next, memory_order_relaxed);
atomic_fetch_sub_explicit(&pages_kept_locally, 1, memory_order_relaxed);
pages_kept_here--;
UNPROTECT_PAGE(fp);
@ -194,14 +194,14 @@ alloc_page(void)
if (fp = PAGE_STACK_GET)
{
/* Reinstate the stack with the next page in list */
PAGE_STACK_PUT(fp->next);
PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed));
/* Update the counters */
UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
/* Release the page */
UNPROTECT_PAGE(fp);
ajlog(fp, fp->next, pk, AJT_ALLOC_GLOBAL_HOT);
ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT);
return fp;
}
@ -302,7 +302,7 @@ flush_local_pages(void)
* Also, we need to know the last page. */
struct free_page *last = local_page_stack, *next;
int check_count = 1;
while (next = last->next)
while (next = atomic_load_explicit(&last->next, memory_order_relaxed))
{
check_count++;
last = next;
@ -313,7 +313,7 @@ flush_local_pages(void)
/* Block the stack by a cork */
UNPROTECT_PAGE(last);
last->next = PAGE_STACK_GET;
atomic_store_explicit(&last->next, PAGE_STACK_GET, memory_order_relaxed);
PROTECT_PAGE(last);
/* Update the stack */
@ -355,7 +355,7 @@ page_cleanup(void *_ UNUSED)
do {
struct free_page *fp = stack;
stack = fp->next;
stack = atomic_load_explicit(&fp->next, memory_order_relaxed);
LOCK_DOMAIN(resource, empty_pages_domain);
/* Empty pages are stored as pointers. To store them, we need a pointer block. */
@ -397,7 +397,7 @@ page_cleanup(void *_ UNUSED)
while (stack)
{
struct free_page *f = stack;
stack = f->next;
stack = atomic_load_explicit(&f->next, memory_order_acquire);
UNPROTECT_PAGE(f);
free_page(f);

View File

@ -151,7 +151,7 @@ rf_open(pool *p, const char *name, enum rf_mode mode, off_t limit)
{
case RF_APPEND:
rf_stat(r);
r->pos = S_ISREG(r->stat.st_mode) ? r->stat.st_size : 0;
atomic_store_explicit(&r->pos, S_ISREG(r->stat.st_mode) ? r->stat.st_size : 0, memory_order_relaxed);
break;
case RF_FIXED:

View File

@ -565,10 +565,11 @@ log_switch(int initial, list *logs, const char *new_syslog_name)
pprev = &ol->next)
{
ol->new_mask = 0;
if (ol->rf)
struct rfile *orf = atomic_load_explicit(&ol->rf, memory_order_relaxed);
if (orf)
{
WALK_LIST(l, *logs)
if (l->rf && rf_same(l->rf, ol->rf))
if (l->rf && rf_same(l->rf, orf))
{
/* Merge the mask */
ol->new_mask |= l->mask;
@ -657,7 +658,9 @@ log_switch(int initial, list *logs, const char *new_syslog_name)
/* Find more */
for (struct log_config *ll = NODE_NEXT(l); NODE_VALID(ll); ll = NODE_NEXT(ll))
if (ll->filename && ll->rf && rf_same(lc->rf, ll->rf))
{
struct rfile *crf = atomic_load_explicit(&lc->rf, memory_order_relaxed);
if (ll->filename && ll->rf && rf_same(crf, ll->rf))
{
/* Merged with this channel */
lc->new_mask |= ll->mask;
@ -671,6 +674,7 @@ log_switch(int initial, list *logs, const char *new_syslog_name)
}
ll->rf = NULL;
}
}
}
else if (l->udp_port)
{
@ -765,7 +769,9 @@ resolve_fail:
atomic_store_explicit(&ol->mask, ol->new_mask, memory_order_release);
/* Never close syslog channel or debug */
if (ol->new_mask || (!ol->rf && !ol->udp_sk) || (ol->rf == dbg_rf))
struct rfile *orf = atomic_load_explicit(&ol->rf, memory_order_relaxed);
sock *ousk = atomic_load_explicit(&ol->udp_sk, memory_order_relaxed);
if (ol->new_mask || (!orf && !ousk) || (orf == dbg_rf))
{
pprev = &ol->next;
ol = atomic_load_explicit(pprev, memory_order_acquire);