mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-11-08 04:08:42 +00:00
SKIP_BACK_DECLARE: easier embedded-to-parent typecasting
This commit is contained in:
parent
de3bd705f8
commit
99afca53e3
@ -31,6 +31,7 @@
|
||||
s *_ptr = ((s *)((char *)_orig - OFFSETOF(s, i))); \
|
||||
SAME_TYPE(&_ptr->i, _orig); \
|
||||
_ptr; })
|
||||
#define SKIP_BACK_DECLARE(s, n, i, p) s *n = SKIP_BACK(s, i, p)
|
||||
#define BIRD_ALIGN(s, a) (((s)+a-1)&~(a-1))
|
||||
#define BIRD_SET_ALIGNED_POINTER(ptr, val) do { \
|
||||
size_t _alignment = _Alignof(typeof(*ptr)); \
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
void lfuc_unlock_deferred(struct deferred_call *dc)
|
||||
{
|
||||
struct lfuc_unlock_queue_item *luqi = SKIP_BACK(struct lfuc_unlock_queue_item, dc, dc);
|
||||
SKIP_BACK_DECLARE(struct lfuc_unlock_queue_item, luqi, dc, dc);
|
||||
lfuc_unlock_immediately(luqi->c, luqi->el, luqi->ev);
|
||||
}
|
||||
|
||||
|
@ -489,7 +489,7 @@ mb_allocz(pool *p, unsigned size)
|
||||
void *
|
||||
mb_realloc(void *m, unsigned size)
|
||||
{
|
||||
struct mblock *b = SKIP_BACK(struct mblock, data, m);
|
||||
SKIP_BACK_DECLARE(struct mblock, b, data, m);
|
||||
struct pool *p = resource_parent(&b->r);
|
||||
|
||||
ASSERT_DIE(DG_IS_LOCKED(p->domain));
|
||||
@ -514,7 +514,7 @@ mb_free(void *m)
|
||||
if (!m)
|
||||
return;
|
||||
|
||||
struct mblock *b = SKIP_BACK(struct mblock, data, m);
|
||||
SKIP_BACK_DECLARE(struct mblock, b, data, m);
|
||||
rfree(&b->r);
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ sl_allocz(slab *s)
|
||||
void
|
||||
sl_free(void *oo)
|
||||
{
|
||||
struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
|
||||
SKIP_BACK_DECLARE(struct sl_obj, o, data, oo);
|
||||
|
||||
rem_node(&o->n);
|
||||
xfree(o);
|
||||
|
@ -286,7 +286,7 @@ if_enqueue_notify(struct iface_notification x)
|
||||
static inline void
|
||||
ifa_send_notify(struct iface_subscription *s, unsigned c, struct ifa *a)
|
||||
{
|
||||
struct proto *p = SKIP_BACK(struct proto, iface_sub, s);
|
||||
SKIP_BACK_DECLARE(struct proto, p, iface_sub, s);
|
||||
|
||||
if (s->ifa_notify &&
|
||||
(p->proto_state != PS_DOWN) &&
|
||||
@ -328,7 +328,7 @@ ifa_notify_change(unsigned c, struct ifa *a)
|
||||
static inline void
|
||||
if_send_notify(struct iface_subscription *s, unsigned c, struct iface *i)
|
||||
{
|
||||
struct proto *p = SKIP_BACK(struct proto, iface_sub, s);
|
||||
SKIP_BACK_DECLARE(struct proto, p, iface_sub, s);
|
||||
|
||||
if (s->if_notify &&
|
||||
(p->proto_state != PS_DOWN) &&
|
||||
@ -655,7 +655,7 @@ iface_unsubscribe(struct iface_subscription *s)
|
||||
{
|
||||
IFACE_LOCK;
|
||||
|
||||
struct proto *p = SKIP_BACK(struct proto, iface_sub, s);
|
||||
SKIP_BACK_DECLARE(struct proto, p, iface_sub, s);
|
||||
WALK_TLIST_DELSAFE(proto_neigh, n, &p->neighbors)
|
||||
neigh_unlink(n);
|
||||
|
||||
|
@ -58,7 +58,7 @@ static void
|
||||
olock_free(resource *r)
|
||||
{
|
||||
/* Called externally from rfree() */
|
||||
struct object_lock *l = SKIP_BACK(struct object_lock, r, r);
|
||||
SKIP_BACK_DECLARE(struct object_lock, l, r, r);
|
||||
node *n;
|
||||
|
||||
OBJ_LOCK;
|
||||
@ -78,7 +78,7 @@ olock_free(resource *r)
|
||||
n = HEAD(l->waiters);
|
||||
if (NODE_VALID(n))
|
||||
{
|
||||
struct object_lock *q = SKIP_BACK(struct object_lock, n, n);
|
||||
SKIP_BACK_DECLARE(struct object_lock, q, n, n);
|
||||
|
||||
/* Remove this candidate from waiters list */
|
||||
rem_node(n);
|
||||
|
@ -1241,7 +1241,7 @@ mpls_announce_fec(struct mpls_fec_map *m, struct mpls_fec *fec, ea_list *src)
|
||||
const struct eattr *heea = ea_find_by_class(src, &ea_gen_hostentry);
|
||||
if (heea) {
|
||||
/* The same hostentry, but different dependent table */
|
||||
struct hostentry_adata *head = SKIP_BACK(struct hostentry_adata, ad, heea->u.ad);
|
||||
SKIP_BACK_DECLARE(struct hostentry_adata, head, ad, heea->u.ad);
|
||||
struct hostentry *he = head->he;
|
||||
ea_set_hostentry(&e.attrs, m->channel->table, he->owner, he->addr, he->link,
|
||||
HOSTENTRY_LABEL_COUNT(head), head->labels);
|
||||
@ -1295,7 +1295,7 @@ mpls_apply_fec(rte *r, struct mpls_fec *fec)
|
||||
int
|
||||
mpls_handle_rte(struct channel *c, const net_addr *n, rte *r, struct mpls_fec **fecp)
|
||||
{
|
||||
struct mpls_channel *mc = SKIP_BACK(struct mpls_channel, c, c->proto->mpls_channel);
|
||||
SKIP_BACK_DECLARE(struct mpls_channel, mc, c, c->proto->mpls_channel);
|
||||
struct mpls_fec_map *m = mc->mpls_map;
|
||||
struct mpls_fec *fec = *fecp = NULL;
|
||||
|
||||
|
38
nest/proto.c
38
nest/proto.c
@ -79,14 +79,14 @@ channel_log_state_change(struct channel *c)
|
||||
void
|
||||
channel_import_log_state_change(struct rt_import_request *req, u8 state)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, in_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, in_req, req);
|
||||
CD(c, "Channel import state changed to %s", rt_import_state_name(state));
|
||||
}
|
||||
|
||||
void
|
||||
channel_export_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, out_req, req);
|
||||
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
||||
|
||||
switch (state)
|
||||
@ -104,7 +104,7 @@ channel_export_log_state_change(struct rt_export_request *req, u8 state)
|
||||
void
|
||||
channel_refeed_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
|
||||
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
|
||||
|
||||
switch (state)
|
||||
@ -123,21 +123,21 @@ channel_refeed_log_state_change(struct rt_export_request *req, u8 state)
|
||||
static void
|
||||
channel_dump_import_req(struct rt_import_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, in_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, in_req, req);
|
||||
debug(" Channel %s.%s import request %p\n", c->proto->name, c->name, req);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_dump_export_req(struct rt_export_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, out_req, req);
|
||||
debug(" Channel %s.%s export request %p\n", c->proto->name, c->name, req);
|
||||
}
|
||||
|
||||
static void
|
||||
channel_dump_refeed_req(struct rt_export_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
|
||||
debug(" Channel %s.%s refeed request %p\n", c->proto->name, c->name, req);
|
||||
}
|
||||
|
||||
@ -448,7 +448,7 @@ channel_roa_in_reload_done(struct channel_import_request *req)
|
||||
static void
|
||||
channel_roa_in_changed(struct settle *se)
|
||||
{
|
||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, settle, se);
|
||||
SKIP_BACK_DECLARE(struct roa_subscription, s, settle, se);
|
||||
struct channel *c = s->c;
|
||||
|
||||
CD(c, "Reload triggered by RPKI change");
|
||||
@ -472,7 +472,7 @@ channel_roa_out_reload_done(struct channel_feeding_request *req)
|
||||
static void
|
||||
channel_roa_out_changed(struct settle *se)
|
||||
{
|
||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, settle, se);
|
||||
SKIP_BACK_DECLARE(struct roa_subscription, s, settle, se);
|
||||
struct channel *c = s->c;
|
||||
|
||||
CD(c, "Feeding triggered by RPKI change");
|
||||
@ -495,7 +495,7 @@ channel_roa_out_changed(struct settle *se)
|
||||
static void
|
||||
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
|
||||
{
|
||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
||||
SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
|
||||
|
||||
switch (net->type)
|
||||
{
|
||||
@ -517,7 +517,7 @@ channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struc
|
||||
static void
|
||||
channel_dump_roa_req(struct rt_export_request *req)
|
||||
{
|
||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
||||
SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
|
||||
struct channel *c = s->c;
|
||||
|
||||
debug(" Channel %s.%s ROA %s change notifier request %p\n",
|
||||
@ -573,7 +573,7 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
|
||||
static void
|
||||
channel_roa_unsubscribed(struct rt_export_request *req)
|
||||
{
|
||||
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
|
||||
SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
|
||||
struct channel *c = s->c;
|
||||
|
||||
rem_node(&s->roa_node);
|
||||
@ -781,7 +781,7 @@ channel_del_obstacle(struct channel *c)
|
||||
void
|
||||
channel_import_stopped(struct rt_import_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, in_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, in_req, req);
|
||||
|
||||
mb_free(c->in_req.name);
|
||||
c->in_req.name = NULL;
|
||||
@ -792,7 +792,7 @@ channel_import_stopped(struct rt_import_request *req)
|
||||
static void
|
||||
channel_export_stopped(struct rt_export_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, out_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, out_req, req);
|
||||
|
||||
/* The hook has already stopped */
|
||||
req->hook = NULL;
|
||||
@ -825,7 +825,7 @@ channel_export_stopped(struct rt_export_request *req)
|
||||
static void
|
||||
channel_refeed_stopped(struct rt_export_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
|
||||
|
||||
req->hook = NULL;
|
||||
|
||||
@ -996,7 +996,7 @@ channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
|
||||
static void
|
||||
channel_reload_stopped(struct rt_export_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
|
||||
|
||||
req->hook = NULL;
|
||||
|
||||
@ -1014,7 +1014,7 @@ channel_reload_stopped(struct rt_export_request *req)
|
||||
static void
|
||||
channel_reload_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
|
||||
|
||||
if (state == TES_READY)
|
||||
{
|
||||
@ -1028,7 +1028,7 @@ channel_reload_log_state_change(struct rt_export_request *req, u8 state)
|
||||
static void
|
||||
channel_reload_dump_req(struct rt_export_request *req)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
|
||||
debug(" Channel %s.%s import reload request %p\n", c->proto->name, c->name, req);
|
||||
}
|
||||
|
||||
@ -2893,7 +2893,7 @@ struct channel_cmd_reload_import_request {
|
||||
static void
|
||||
channel_reload_out_done(struct channel_feeding_request *cfr)
|
||||
{
|
||||
struct channel_cmd_reload_feeding_request *ccrfr = SKIP_BACK(struct channel_cmd_reload_feeding_request, cfr, cfr);
|
||||
SKIP_BACK_DECLARE(struct channel_cmd_reload_feeding_request, ccrfr, cfr, cfr);
|
||||
if (atomic_fetch_sub_explicit(&ccrfr->prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &ccrfr->prr->ev);
|
||||
}
|
||||
@ -2901,7 +2901,7 @@ channel_reload_out_done(struct channel_feeding_request *cfr)
|
||||
static void
|
||||
channel_reload_in_done(struct channel_import_request *cir)
|
||||
{
|
||||
struct channel_cmd_reload_import_request *ccrir = SKIP_BACK(struct channel_cmd_reload_import_request, cir, cir);
|
||||
SKIP_BACK_DECLARE(struct channel_cmd_reload_import_request, ccrir, cir, cir);
|
||||
if (atomic_fetch_sub_explicit(&ccrir->prr->counter, 1, memory_order_acq_rel) == 1)
|
||||
ev_send_loop(&main_birdloop, &ccrir->prr->ev);
|
||||
}
|
||||
|
@ -628,7 +628,7 @@ ea_class_free(struct ea_class *cl)
|
||||
static void
|
||||
ea_class_ref_free(resource *r)
|
||||
{
|
||||
struct ea_class_ref *ref = SKIP_BACK(struct ea_class_ref, r, r);
|
||||
SKIP_BACK_DECLARE(struct ea_class_ref, ref, r, r);
|
||||
if (!--ref->class->uc)
|
||||
ea_class_free(ref->class);
|
||||
}
|
||||
@ -636,7 +636,7 @@ ea_class_ref_free(resource *r)
|
||||
static void
|
||||
ea_class_ref_dump(resource *r, unsigned indent UNUSED)
|
||||
{
|
||||
struct ea_class_ref *ref = SKIP_BACK(struct ea_class_ref, r, r);
|
||||
SKIP_BACK_DECLARE(struct ea_class_ref, ref, r, r);
|
||||
debug("name \"%s\", type=%d\n", ref->class->name, ref->class->type);
|
||||
}
|
||||
|
||||
|
@ -223,14 +223,14 @@ rt_show_net_export_bulk(struct rt_export_request *req, const net_addr *n,
|
||||
struct rt_pending_export *first UNUSED, struct rt_pending_export *last UNUSED,
|
||||
const rte **feed, uint count)
|
||||
{
|
||||
struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
|
||||
SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
|
||||
return rt_show_net(d, n, feed, count);
|
||||
}
|
||||
|
||||
static void
|
||||
rt_show_export_stopped_cleanup(struct rt_export_request *req)
|
||||
{
|
||||
struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
|
||||
SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
|
||||
|
||||
/* The hook is now invalid */
|
||||
req->hook = NULL;
|
||||
@ -319,7 +319,7 @@ rt_show_cont(struct rt_show_data *d)
|
||||
static void
|
||||
rt_show_export_stopped(struct rt_export_request *req)
|
||||
{
|
||||
struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
|
||||
SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
|
||||
|
||||
/* The hook is now invalid */
|
||||
req->hook = NULL;
|
||||
|
@ -396,7 +396,7 @@ static net *
|
||||
net_route(struct rtable_reading *tr, const net_addr *n)
|
||||
{
|
||||
ASSERT(tr->t->addr_type == n->type);
|
||||
net_addr_union *nu = SKIP_BACK(net_addr_union, n, n);
|
||||
SKIP_BACK_DECLARE(net_addr_union, nu, n, n);
|
||||
|
||||
const struct f_trie *trie = atomic_load_explicit(&tr->t->trie, memory_order_acquire);
|
||||
|
||||
@ -471,7 +471,7 @@ net_route(struct rtable_reading *tr, const net_addr *n)
|
||||
int
|
||||
net_roa_check(rtable *tp, const net_addr *n, u32 asn)
|
||||
{
|
||||
net_addr_union *nu = SKIP_BACK(net_addr_union, n, n);
|
||||
SKIP_BACK_DECLARE(net_addr_union, nu, n, n);
|
||||
int anything = 0;
|
||||
|
||||
#define TW(ipv) do { \
|
||||
@ -1195,7 +1195,7 @@ rte_export(struct rt_export_hook *hook, struct rt_pending_export *rpe)
|
||||
uint count = 0;
|
||||
const rte **feed = NULL;
|
||||
|
||||
const struct netindex *i = SKIP_BACK(struct netindex, addr, (net_addr (*)[0]) n);
|
||||
const SKIP_BACK_DECLARE(struct netindex, i, addr, (net_addr (*)[0]) n);
|
||||
ASSERT_DIE(i->index < atomic_load_explicit(&hook->tab->routes_block_size, memory_order_relaxed));
|
||||
|
||||
struct rt_pending_export *last;
|
||||
@ -1276,7 +1276,7 @@ rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, con
|
||||
if (old_best_valid)
|
||||
old_best->sender->stats.pref--;
|
||||
|
||||
struct rt_pending_export *rpe = SKIP_BACK(struct rt_pending_export, li, lfjour_push_prepare(&tab->journal));
|
||||
SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, lfjour_push_prepare(&tab->journal));
|
||||
|
||||
if (!rpe)
|
||||
{
|
||||
@ -1355,8 +1355,8 @@ rt_send_export_event(struct rt_export_hook *hook)
|
||||
static void
|
||||
rt_cleanup_export(struct lfjour *j, struct lfjour_item *i)
|
||||
{
|
||||
struct rtable_private *tab = SKIP_BACK(struct rtable_private, journal, j);
|
||||
struct rt_pending_export *rpe = SKIP_BACK(struct rt_pending_export, li, i);
|
||||
SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
|
||||
SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, i);
|
||||
|
||||
/* Unlink this export from struct network */
|
||||
ASSERT_DIE(rpe->new || rpe->old);
|
||||
@ -1425,7 +1425,7 @@ rt_import_cleared(void *_ih)
|
||||
static void
|
||||
rt_cleanup_done(struct lfjour *j, u64 begin_seq, u64 end_seq)
|
||||
{
|
||||
struct rtable_private *tab = SKIP_BACK(struct rtable_private, journal, j);
|
||||
SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
|
||||
ASSERT_DIE(DG_IS_LOCKED(tab->lock.rtable));
|
||||
|
||||
if (~end_seq)
|
||||
@ -1848,7 +1848,7 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, struct n
|
||||
int
|
||||
channel_preimport(struct rt_import_request *req, rte *new, const rte *old)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, in_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, in_req, req);
|
||||
|
||||
if (new && !old)
|
||||
if (CHANNEL_LIMIT_PUSH(c, RX))
|
||||
@ -2339,7 +2339,7 @@ rt_table_export_start_feed(struct rtable_private *tab, struct rt_export_hook *ho
|
||||
};
|
||||
lfjour_register(&tab->journal, &hook->recipient);
|
||||
|
||||
struct rt_pending_export *rpe = SKIP_BACK(struct rt_pending_export, li, atomic_load_explicit(&hook->recipient.last, memory_order_relaxed));
|
||||
SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, atomic_load_explicit(&hook->recipient.last, memory_order_relaxed));
|
||||
req_trace(req, D_STATES, "Export initialized, last export %p (%lu)", rpe, rpe ? rpe->seq : 0);
|
||||
|
||||
bmap_init(&hook->seq_map, hook->pool, 16);
|
||||
@ -2589,7 +2589,7 @@ rt_dump_hooks(rtable *tp)
|
||||
|
||||
WALK_TLIST(lfjour_recipient, r, &tab->journal.recipients)
|
||||
{
|
||||
struct rt_export_hook *eh = SKIP_BACK(struct rt_export_hook, recipient, r);
|
||||
SKIP_BACK_DECLARE(struct rt_export_hook, eh, recipient, r);
|
||||
eh->req->dump_req(eh->req);
|
||||
debug(" Export hook %p requested by %p:"
|
||||
" refeed_pending=%u last_state_change=%t export_state=%u\n",
|
||||
@ -2687,7 +2687,7 @@ struct rt_flowspec_link {
|
||||
static void
|
||||
rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
|
||||
{
|
||||
struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
|
||||
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
|
||||
rtable *dst_pub = ln->dst;
|
||||
ASSUME(rt_is_flow(dst_pub));
|
||||
|
||||
@ -2722,14 +2722,14 @@ rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struc
|
||||
static void
|
||||
rt_flowspec_dump_req(struct rt_export_request *req)
|
||||
{
|
||||
struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
|
||||
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
|
||||
debug(" Flowspec link for table %s (%p)\n", ln->dst->name, req);
|
||||
}
|
||||
|
||||
static void
|
||||
rt_flowspec_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
|
||||
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
|
||||
rt_trace(ln->dst, D_STATES, "Flowspec link from %s export state changed to %s",
|
||||
ln->src->name, rt_export_state_name(state));
|
||||
}
|
||||
@ -2798,7 +2798,7 @@ rt_flowspec_link(rtable *src_pub, rtable *dst_pub)
|
||||
static void
|
||||
rt_flowspec_link_stopped(struct rt_export_request *req)
|
||||
{
|
||||
struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
|
||||
SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
|
||||
rtable *dst = ln->dst;
|
||||
|
||||
mb_free(ln);
|
||||
@ -2842,7 +2842,7 @@ rt_flowspec_reset_trie(struct rtable_private *tab)
|
||||
static void
|
||||
rt_free(resource *_r)
|
||||
{
|
||||
struct rtable_private *r = SKIP_BACK(struct rtable_private, r, _r);
|
||||
SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
|
||||
|
||||
DBG("Deleting routing table %s\n", r->name);
|
||||
ASSERT_DIE(r->use_count == 0);
|
||||
@ -2864,7 +2864,7 @@ rt_free(resource *_r)
|
||||
static void
|
||||
rt_res_dump(resource *_r, unsigned indent)
|
||||
{
|
||||
struct rtable_private *r = SKIP_BACK(struct rtable_private, r, _r);
|
||||
SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
|
||||
|
||||
debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
|
||||
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
|
||||
@ -3620,7 +3620,7 @@ rt_flowspec_update_rte(struct rtable_private *tab, const rte *r, rte *new)
|
||||
if (!bc->base_table)
|
||||
return 0;
|
||||
|
||||
struct bgp_proto *p = SKIP_BACK(struct bgp_proto, p, bc->c.proto);
|
||||
SKIP_BACK_DECLARE(struct bgp_proto, p, p, bc->c.proto);
|
||||
|
||||
enum flowspec_valid old = rt_get_flowspec_valid(r),
|
||||
valid = rt_flowspec_check(bc->base_table, tab, r->net, r->attrs, p->is_interior);
|
||||
@ -3648,7 +3648,7 @@ rt_flowspec_resolve_rte(rte *r, struct channel *c)
|
||||
&& (c->class == &channel_bgp)
|
||||
&& (bc->base_table))
|
||||
{
|
||||
struct bgp_proto *p = SKIP_BACK(struct bgp_proto, p, bc->c.proto);
|
||||
SKIP_BACK_DECLARE(struct bgp_proto, p, p, bc->c.proto);
|
||||
RT_LOCKED(c->in_req.hook->table, tab)
|
||||
valid = rt_flowspec_check(
|
||||
bc->base_table, tab,
|
||||
@ -4377,7 +4377,7 @@ void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *n
|
||||
struct rt_pending_export *first, struct rt_pending_export *last,
|
||||
const rte **feed, uint count)
|
||||
{
|
||||
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
|
||||
SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
|
||||
|
||||
for (uint i=0; i<count; i++)
|
||||
if (feed[i]->sender == c->in_req.hook)
|
||||
@ -4507,14 +4507,14 @@ hc_notify_dump_req(struct rt_export_request *req)
|
||||
static void
|
||||
hc_notify_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
struct hostcache *hc = SKIP_BACK(struct hostcache, req, req);
|
||||
SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
|
||||
rt_trace(hc->tab, D_STATES, "HCU Export state changed to %s", rt_export_state_name(state));
|
||||
}
|
||||
|
||||
static void
|
||||
hc_notify_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
|
||||
{
|
||||
struct hostcache *hc = SKIP_BACK(struct hostcache, req, req);
|
||||
SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
|
||||
|
||||
RT_LOCKED(hc->tab, tab)
|
||||
if (ev_active(tab->hcu_event) || !trie_match_net(hc->trie, net))
|
||||
@ -4586,7 +4586,7 @@ rt_free_hostcache(struct rtable_private *tab)
|
||||
node *n;
|
||||
WALK_LIST(n, hc->hostentries)
|
||||
{
|
||||
struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
|
||||
SKIP_BACK_DECLARE(struct hostentry, he, ln, n);
|
||||
ea_free(he->src);
|
||||
|
||||
if (!lfuc_finished(&he->uc))
|
||||
|
@ -210,7 +210,7 @@ HASH_DEFINE_REHASH_FN(AGGR_BUCK, struct aggregator_bucket);
|
||||
static void
|
||||
aggregator_rt_notify(struct proto *P, struct channel *src_ch, const net_addr *net, rte *new, const rte *old)
|
||||
{
|
||||
struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
|
||||
SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
|
||||
ASSERT_DIE(src_ch == p->src);
|
||||
struct aggregator_bucket *new_bucket = NULL, *old_bucket = NULL;
|
||||
struct aggregator_route *old_route = NULL;
|
||||
@ -316,7 +316,7 @@ aggregator_rt_notify(struct proto *P, struct channel *src_ch, const net_addr *ne
|
||||
static int
|
||||
aggregator_preexport(struct channel *C, struct rte *new)
|
||||
{
|
||||
struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, C->proto);
|
||||
SKIP_BACK_DECLARE(struct aggregator_proto, p, p, C->proto);
|
||||
/* Reject our own routes */
|
||||
if (new->sender == p->dst->in_req.hook)
|
||||
return -1;
|
||||
@ -334,7 +334,7 @@ aggregator_preexport(struct channel *C, struct rte *new)
|
||||
static void
|
||||
aggregator_postconfig(struct proto_config *CF)
|
||||
{
|
||||
struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
|
||||
SKIP_BACK_DECLARE(struct aggregator_config, cf, c, CF);
|
||||
|
||||
if (!cf->dst->table)
|
||||
cf_error("Source table not specified");
|
||||
@ -357,8 +357,8 @@ static struct proto *
|
||||
aggregator_init(struct proto_config *CF)
|
||||
{
|
||||
struct proto *P = proto_new(CF);
|
||||
struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
|
||||
struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
|
||||
SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
|
||||
SKIP_BACK_DECLARE(struct aggregator_config, cf, c, CF);
|
||||
|
||||
proto_configure_channel(P, &p->src, cf->src);
|
||||
proto_configure_channel(P, &p->dst, cf->dst);
|
||||
@ -377,7 +377,7 @@ aggregator_init(struct proto_config *CF)
|
||||
static int
|
||||
aggregator_start(struct proto *P)
|
||||
{
|
||||
struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
|
||||
SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
|
||||
|
||||
p->bucket_slab = sl_new(P->pool, sizeof(struct aggregator_bucket) + AGGR_DATA_MEMSIZE);
|
||||
HASH_INIT(p->buckets, P->pool, AGGR_BUCK_ORDER);
|
||||
@ -396,7 +396,7 @@ aggregator_start(struct proto *P)
|
||||
static int
|
||||
aggregator_shutdown(struct proto *P)
|
||||
{
|
||||
struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
|
||||
SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
|
||||
|
||||
HASH_WALK_DELSAFE(p->buckets, next_hash, b)
|
||||
{
|
||||
@ -424,8 +424,8 @@ aggregator_shutdown(struct proto *P)
|
||||
static int
|
||||
aggregator_reconfigure(struct proto *P, struct proto_config *CF)
|
||||
{
|
||||
struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
|
||||
struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
|
||||
SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
|
||||
SKIP_BACK_DECLARE(struct aggregator_config, cf, c, CF);
|
||||
|
||||
TRACE(D_EVENTS, "Reconfiguring");
|
||||
|
||||
|
@ -530,7 +530,7 @@ bfd_reconfigure_session(struct bfd_proto *p, struct bfd_session *s)
|
||||
|
||||
ASSERT_DIE(birdloop_inside(p->p.loop));
|
||||
|
||||
struct bfd_request *req = SKIP_BACK(struct bfd_request, n, HEAD(s->request_list));
|
||||
SKIP_BACK_DECLARE(struct bfd_request, req, n, HEAD(s->request_list));
|
||||
s->cf = bfd_merge_options(s->ifa->cf, &req->opts);
|
||||
|
||||
u32 tx = (s->loc_state == BFD_STATE_UP) ? s->cf.min_tx_int : s->cf.idle_tx_int;
|
||||
@ -743,7 +743,7 @@ bfd_pickup_requests(void *_data UNUSED)
|
||||
node *n;
|
||||
WALK_LIST(n, bfd_global.proto_list)
|
||||
{
|
||||
struct bfd_proto *p = SKIP_BACK(struct bfd_proto, bfd_node, n);
|
||||
SKIP_BACK_DECLARE(struct bfd_proto, p, bfd_node, n);
|
||||
birdloop_enter(p->p.loop);
|
||||
BFD_LOCK;
|
||||
|
||||
@ -808,7 +808,7 @@ bfd_drop_requests(struct bfd_proto *p)
|
||||
{
|
||||
WALK_LIST_FIRST(n, s->request_list)
|
||||
{
|
||||
struct bfd_request *req = SKIP_BACK(struct bfd_request, n, n);
|
||||
SKIP_BACK_DECLARE(struct bfd_request, req, n, n);
|
||||
rem_node(&req->n);
|
||||
add_tail(&bfd_global.pickup_list, &req->n);
|
||||
req->session = NULL;
|
||||
|
@ -1860,7 +1860,7 @@ bgp_done_prefix(struct bgp_channel *c, struct bgp_prefix *px, struct bgp_bucket
|
||||
static void
|
||||
bgp_pending_tx_rfree(resource *r)
|
||||
{
|
||||
struct bgp_pending_tx *ptx = SKIP_BACK(struct bgp_pending_tx, r, r);
|
||||
SKIP_BACK_DECLARE(struct bgp_pending_tx, ptx, r, r);
|
||||
|
||||
HASH_WALK(ptx->prefix_hash, next, n)
|
||||
rt_unlock_source(rt_find_source_global(n->path_id));
|
||||
@ -1913,7 +1913,7 @@ static void
|
||||
bgp_out_table_feed(void *data)
|
||||
{
|
||||
struct bgp_out_export_hook *hook = data;
|
||||
struct bgp_channel *bc = SKIP_BACK(struct bgp_channel, prefix_exporter, hook->h.table);
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, bc, prefix_exporter, hook->h.table);
|
||||
struct bgp_pending_tx *c = bc->ptx;
|
||||
|
||||
int max = 512;
|
||||
@ -2012,7 +2012,7 @@ bgp_out_table_export_start(struct rt_exporter *re, struct rt_export_request *req
|
||||
req->hook = rt_alloc_export(re, req->pool, sizeof(struct bgp_out_export_hook));
|
||||
req->hook->req = req;
|
||||
|
||||
struct bgp_out_export_hook *hook = SKIP_BACK(struct bgp_out_export_hook, h, req->hook);
|
||||
SKIP_BACK_DECLARE(struct bgp_out_export_hook, hook, h, req->hook);
|
||||
|
||||
hook->h.event.hook = bgp_out_table_feed;
|
||||
rt_init_export(re, req->hook);
|
||||
@ -2720,7 +2720,7 @@ bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n,
|
||||
struct rt_pending_export *first, struct rt_pending_export *last,
|
||||
const rte **feed, uint count)
|
||||
{
|
||||
struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
|
||||
struct rt_import_hook *irh = c->c.in_req.hook;
|
||||
|
||||
/* Find our routes among others */
|
||||
|
@ -268,7 +268,7 @@ bgp_listen_create(void *_ UNUSED)
|
||||
|
||||
/* Get the first request to match */
|
||||
struct bgp_listen_request *req = HEAD(bgp_listen_pending);
|
||||
struct bgp_proto *p = SKIP_BACK(struct bgp_proto, listen, req);
|
||||
SKIP_BACK_DECLARE(struct bgp_proto, p, listen, req);
|
||||
rem_node(&req->n);
|
||||
|
||||
/* First try to find existing socket */
|
||||
@ -922,14 +922,14 @@ bgp_graceful_restart_feed_done(struct rt_export_request *req)
|
||||
static void
|
||||
bgp_graceful_restart_feed_dump_req(struct rt_export_request *req)
|
||||
{
|
||||
struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
|
||||
debug(" BGP-GR %s.%s export request %p\n", c->c.proto->name, c->c.name, req);
|
||||
}
|
||||
|
||||
static void
|
||||
bgp_graceful_restart_feed_log_state_change(struct rt_export_request *req, u8 state)
|
||||
{
|
||||
struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
|
||||
SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
|
||||
struct bgp_proto *p = (void *) c->c.proto;
|
||||
BGP_TRACE(D_EVENTS, "Long-lived graceful restart export state changed to %s", rt_export_state_name(state));
|
||||
|
||||
@ -1338,7 +1338,7 @@ bgp_find_proto(sock *sk)
|
||||
|
||||
WALK_LIST(req, bs->requests)
|
||||
{
|
||||
struct bgp_proto *p = SKIP_BACK(struct bgp_proto, listen, req);
|
||||
SKIP_BACK_DECLARE(struct bgp_proto, p, listen, req);
|
||||
if ((p->p.proto == &proto_bgp) &&
|
||||
(ipa_equal(p->remote_ip, sk->daddr) || bgp_is_dynamic(p)) &&
|
||||
(!p->cf->remote_range || ipa_in_netX(sk->daddr, p->cf->remote_range)) &&
|
||||
|
@ -1149,7 +1149,7 @@ bgp_apply_mpls_labels(struct bgp_parse_state *s, ea_list **to, u32 lnum, u32 lab
|
||||
if (s->channel->cf->gw_mode == GW_DIRECT)
|
||||
{
|
||||
eattr *e = ea_find(*to, &ea_gen_nexthop);
|
||||
struct nexthop_adata_mpls *namp = SKIP_BACK(struct nexthop_adata_mpls, nhad.ad, e->u.ptr);
|
||||
SKIP_BACK_DECLARE(struct nexthop_adata_mpls, namp, nhad.ad, e->u.ptr);
|
||||
|
||||
namp->nhad.nh.labels = lnum;
|
||||
memcpy(namp->nhad.nh.label, labels, lnum * sizeof(u32));
|
||||
|
@ -99,7 +99,7 @@ pipe_preexport(struct channel *C, rte *e)
|
||||
void
|
||||
pipe_import_by_refeed_free(struct channel_feeding_request *cfr)
|
||||
{
|
||||
struct import_to_export_reload *reload = SKIP_BACK(struct import_to_export_reload, cfr, cfr);
|
||||
SKIP_BACK_DECLARE(struct import_to_export_reload, reload, cfr, cfr);
|
||||
reload->cir->done(reload->cir);
|
||||
}
|
||||
|
||||
|
@ -482,7 +482,7 @@ sockets_prepare(struct birdloop *loop, struct pfd *pfd)
|
||||
node *n;
|
||||
WALK_LIST(n, loop->sock_list)
|
||||
{
|
||||
sock *s = SKIP_BACK(sock, n, n);
|
||||
SKIP_BACK_DECLARE(sock, s, n, n);
|
||||
uint w = sk_want_events(s);
|
||||
|
||||
if (!w)
|
||||
@ -1244,7 +1244,7 @@ bird_thread_show_loop(struct bird_thread_show_data *tsd, struct birdloop *loop)
|
||||
static void
|
||||
bird_thread_show(struct bird_thread_syncer *sync)
|
||||
{
|
||||
struct bird_thread_show_data *tsd = SKIP_BACK(struct bird_thread_show_data, sync, sync);
|
||||
SKIP_BACK_DECLARE(struct bird_thread_show_data, tsd, sync, sync);
|
||||
|
||||
if (!tsd->lp)
|
||||
tsd->lp = lp_new(tsd->sync.pool);
|
||||
@ -1276,7 +1276,7 @@ bird_thread_show(struct bird_thread_syncer *sync)
|
||||
static void
|
||||
cmd_show_threads_done(struct bird_thread_syncer *sync)
|
||||
{
|
||||
struct bird_thread_show_data *tsd = SKIP_BACK(struct bird_thread_show_data, sync, sync);
|
||||
SKIP_BACK_DECLARE(struct bird_thread_show_data, tsd, sync, sync);
|
||||
ASSERT_DIE(birdloop_inside(&main_birdloop));
|
||||
|
||||
tsd->cli->cont = NULL;
|
||||
|
@ -110,7 +110,7 @@ struct log_thread_syncer {
|
||||
static void
|
||||
lts_done(struct bird_thread_syncer *sync)
|
||||
{
|
||||
struct log_thread_syncer *lts = SKIP_BACK(struct log_thread_syncer, sync, sync);
|
||||
SKIP_BACK_DECLARE(struct log_thread_syncer, lts, sync, sync);
|
||||
|
||||
log_lock();
|
||||
if (lts->lc_close)
|
||||
|
Loading…
Reference in New Issue
Block a user