mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-12-22 17:51:53 +00:00
Route table objects use the new locked object macro stack
This commit is contained in:
parent
44cb7ad783
commit
5c9b16f86c
50
nest/route.h
50
nest/route.h
@ -101,20 +101,22 @@ extern uint rtable_max_id;
|
|||||||
DEFINE_DOMAIN(rtable);
|
DEFINE_DOMAIN(rtable);
|
||||||
|
|
||||||
/* The public part of rtable structure */
|
/* The public part of rtable structure */
|
||||||
#define RTABLE_PUBLIC \
|
struct rtable_public {
|
||||||
resource r; \
|
resource r;
|
||||||
node n; /* Node in list of all tables */ \
|
node n; /* Node in list of all tables */
|
||||||
char *name; /* Name of this table */ \
|
char *name; /* Name of this table */
|
||||||
uint addr_type; /* Type of address data stored in table (NET_*) */ \
|
uint addr_type; /* Type of address data stored in table (NET_*) */
|
||||||
uint id; /* Integer table ID for fast lookup */ \
|
uint id; /* Integer table ID for fast lookup */
|
||||||
DOMAIN(rtable) lock; /* Lock to take to access the private parts */ \
|
DOMAIN(rtable) lock; /* Lock to take to access the private parts */
|
||||||
struct rtable_config *config; /* Configuration of this table */ \
|
struct rtable_config *config; /* Configuration of this table */
|
||||||
struct birdloop *loop; /* Service thread */ \
|
struct birdloop *loop; /* Service thread */
|
||||||
|
};
|
||||||
|
|
||||||
/* The complete rtable structure */
|
/* The complete rtable structure */
|
||||||
struct rtable_private {
|
struct rtable_private {
|
||||||
/* Once more the public part */
|
/* Once more the public part */
|
||||||
RTABLE_PUBLIC;
|
struct rtable_public;
|
||||||
|
struct rtable_private **locked_at;
|
||||||
|
|
||||||
/* Here the private items not to be accessed without locking */
|
/* Here the private items not to be accessed without locking */
|
||||||
pool *rp; /* Resource pool to allocate everything from, including itself */
|
pool *rp; /* Resource pool to allocate everything from, including itself */
|
||||||
@ -166,29 +168,23 @@ struct rtable_private {
|
|||||||
|
|
||||||
/* The final union private-public rtable structure */
|
/* The final union private-public rtable structure */
|
||||||
typedef union rtable {
|
typedef union rtable {
|
||||||
struct {
|
struct rtable_public;
|
||||||
RTABLE_PUBLIC;
|
|
||||||
};
|
|
||||||
struct rtable_private priv;
|
struct rtable_private priv;
|
||||||
} rtable;
|
} rtable;
|
||||||
|
|
||||||
#define RT_IS_LOCKED(tab) DOMAIN_IS_LOCKED(rtable, (tab)->lock)
|
/* Define the lock cleanup function */
|
||||||
|
LOBJ_UNLOCK_CLEANUP(rtable, rtable);
|
||||||
|
|
||||||
|
#define RT_IS_LOCKED(tab) LOBJ_IS_LOCKED((tab), rtable)
|
||||||
|
#define RT_LOCKED(tab, tp) LOBJ_LOCKED((tab), tp, rtable, rtable)
|
||||||
|
|
||||||
|
#define RT_LOCK_SIMPLE(tab) LOBJ_LOCK_SIMPLE((tab), rtable)
|
||||||
|
#define RT_UNLOCK_SIMPLE(tab) LOBJ_UNLOCK_SIMPLE((tab), rtable)
|
||||||
|
|
||||||
|
#define RT_UNLOCKED_TEMPORARILY(tab, tp) LOBJ_UNLOCKED_TEMPORARILY((tab), tp, rtable, rtable)
|
||||||
|
|
||||||
#define RT_LOCK(tab) ({ LOCK_DOMAIN(rtable, (tab)->lock); &(tab)->priv; })
|
|
||||||
#define RT_UNLOCK(tab) UNLOCK_DOMAIN(rtable, (tab)->lock)
|
|
||||||
#define RT_PRIV(tab) ({ ASSERT_DIE(RT_IS_LOCKED((tab))); &(tab)->priv; })
|
|
||||||
#define RT_PUB(tab) SKIP_BACK(rtable, priv, tab)
|
#define RT_PUB(tab) SKIP_BACK(rtable, priv, tab)
|
||||||
|
|
||||||
#define RT_LOCKED(tpub, tpriv) for (struct rtable_private *tpriv = RT_LOCK(tpub); tpriv; RT_UNLOCK(tpriv), (tpriv = NULL))
|
|
||||||
#define RT_LOCKED_IF_NEEDED(_tpub, _tpriv) for ( \
|
|
||||||
struct rtable_private *_al = RT_IS_LOCKED(_tpub) ? &(_tpub)->priv : NULL, *_tpriv = _al ?: RT_LOCK(_tpub); \
|
|
||||||
_tpriv; \
|
|
||||||
_al ?: RT_UNLOCK(_tpriv), (_tpriv = NULL))
|
|
||||||
|
|
||||||
#define RT_RETURN(tpriv, ...) do { RT_UNLOCK(tpriv); return __VA_ARGS__; } while (0)
|
|
||||||
|
|
||||||
#define RT_PRIV_SAME(tpriv, tpub) (&(tpub)->priv == (tpriv))
|
|
||||||
|
|
||||||
/* Flags for birdloop_flag() */
|
/* Flags for birdloop_flag() */
|
||||||
#define RTF_CLEANUP 1
|
#define RTF_CLEANUP 1
|
||||||
#define RTF_NHU 2
|
#define RTF_NHU 2
|
||||||
|
@ -1277,17 +1277,22 @@ rte_export(struct rt_table_export_hook *th, struct rt_pending_export *rpe)
|
|||||||
hook->req->export_one(hook->req, n, rpe);
|
hook->req->export_one(hook->req, n, rpe);
|
||||||
else if (hook->req->export_bulk)
|
else if (hook->req->export_bulk)
|
||||||
{
|
{
|
||||||
net *net = SKIP_BACK(struct network, n.addr, (net_addr (*)[0]) n);
|
struct rt_pending_export *last = NULL;
|
||||||
RT_LOCK(tab);
|
uint count = 0;
|
||||||
struct rt_pending_export *last = net->last;
|
|
||||||
uint count = rte_feed_count(net);
|
|
||||||
const rte **feed = NULL;
|
const rte **feed = NULL;
|
||||||
if (count)
|
|
||||||
|
net *net = SKIP_BACK(struct network, n.addr, (net_addr (*)[0]) n);
|
||||||
|
|
||||||
|
RT_LOCKED(tab, tp)
|
||||||
|
{
|
||||||
|
last = net->last;
|
||||||
|
if (count = rte_feed_count(net))
|
||||||
{
|
{
|
||||||
feed = alloca(count * sizeof(rte *));
|
feed = alloca(count * sizeof(rte *));
|
||||||
rte_feed_obtain(net, feed, count);
|
rte_feed_obtain(net, feed, count);
|
||||||
}
|
}
|
||||||
RT_UNLOCK(tab);
|
}
|
||||||
|
|
||||||
hook->req->export_bulk(hook->req, n, rpe, last, feed, count);
|
hook->req->export_bulk(hook->req, n, rpe, last, feed, count);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -1568,18 +1573,15 @@ rt_export_hook(void *_data)
|
|||||||
ASSERT_DIE(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_READY);
|
ASSERT_DIE(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_READY);
|
||||||
|
|
||||||
if (!c->rpe_next)
|
if (!c->rpe_next)
|
||||||
|
RT_LOCKED(tab, tp)
|
||||||
{
|
{
|
||||||
RT_LOCK(tab);
|
|
||||||
c->rpe_next = rt_next_export(c, c->table);
|
c->rpe_next = rt_next_export(c, c->table);
|
||||||
|
|
||||||
if (!c->rpe_next)
|
if (!c->rpe_next)
|
||||||
{
|
{
|
||||||
rt_export_used(c->table, c->h.req->name, "done exporting");
|
rt_export_used(c->table, c->h.req->name, "done exporting");
|
||||||
RT_UNLOCK(tab);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
RT_UNLOCK(tab);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int used = 0;
|
int used = 0;
|
||||||
@ -2021,7 +2023,7 @@ rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rt
|
|||||||
req->hook->stats.withdraws_ignored++;
|
req->hook->stats.withdraws_ignored++;
|
||||||
if (req->trace_routes & D_ROUTES)
|
if (req->trace_routes & D_ROUTES)
|
||||||
log(L_TRACE "%s > ignored %N withdraw", req->name, n);
|
log(L_TRACE "%s > ignored %N withdraw", req->name, n);
|
||||||
RT_RETURN(tab);
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Recalculate the best route */
|
/* Recalculate the best route */
|
||||||
@ -2363,7 +2365,11 @@ rt_table_export_stop(struct rt_export_hook *hh)
|
|||||||
struct rt_table_export_hook *hook = SKIP_BACK(struct rt_table_export_hook, h, hh);
|
struct rt_table_export_hook *hook = SKIP_BACK(struct rt_table_export_hook, h, hh);
|
||||||
int ok = 0;
|
int ok = 0;
|
||||||
|
|
||||||
RT_LOCKED_IF_NEEDED(SKIP_BACK(rtable, priv.exporter, hook->table), tab)
|
rtable *t = SKIP_BACK(rtable, priv.exporter, hook->table);
|
||||||
|
if (RT_IS_LOCKED(t))
|
||||||
|
ok = rt_table_export_stop_locked(hh);
|
||||||
|
else
|
||||||
|
RT_LOCKED(t, tab)
|
||||||
ok = rt_table_export_stop_locked(hh);
|
ok = rt_table_export_stop_locked(hh);
|
||||||
|
|
||||||
if (ok)
|
if (ok)
|
||||||
@ -2713,13 +2719,14 @@ rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struc
|
|||||||
struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
|
struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
|
||||||
rtable *dst_pub = ln->dst;
|
rtable *dst_pub = ln->dst;
|
||||||
ASSUME(rt_is_flow(dst_pub));
|
ASSUME(rt_is_flow(dst_pub));
|
||||||
struct rtable_private *dst = RT_LOCK(dst_pub);
|
|
||||||
|
RT_LOCKED(dst_pub, dst)
|
||||||
|
{
|
||||||
|
|
||||||
/* No need to inspect it further if recalculation is already scheduled */
|
/* No need to inspect it further if recalculation is already scheduled */
|
||||||
if ((dst->nhu_state == NHU_SCHEDULED) || (dst->nhu_state == NHU_DIRTY)
|
if ((dst->nhu_state == NHU_SCHEDULED) || (dst->nhu_state == NHU_DIRTY)
|
||||||
|| !trie_match_net(dst->flowspec_trie, net))
|
|| !trie_match_net(dst->flowspec_trie, net))
|
||||||
{
|
{
|
||||||
RT_UNLOCK(dst_pub);
|
|
||||||
rpe_mark_seen_all(req->hook, first, NULL, NULL);
|
rpe_mark_seen_all(req->hook, first, NULL, NULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2738,7 +2745,7 @@ rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struc
|
|||||||
if (o != RTE_VALID_OR_NULL(new_best))
|
if (o != RTE_VALID_OR_NULL(new_best))
|
||||||
rt_schedule_nhu(dst);
|
rt_schedule_nhu(dst);
|
||||||
|
|
||||||
RT_UNLOCK(dst_pub);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -3758,10 +3765,10 @@ rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, ea_list *
|
|||||||
|
|
||||||
const rte *rc = &nc->routes->rte;
|
const rte *rc = &nc->routes->rte;
|
||||||
if (rt_get_source_attr(rc) != RTS_BGP)
|
if (rt_get_source_attr(rc) != RTS_BGP)
|
||||||
RT_RETURN(tip, FLOWSPEC_INVALID);
|
return FLOWSPEC_INVALID;
|
||||||
|
|
||||||
if (rta_get_first_asn(rc->attrs) != asn_b)
|
if (rta_get_first_asn(rc->attrs) != asn_b)
|
||||||
RT_RETURN(tip, FLOWSPEC_INVALID);
|
return FLOWSPEC_INVALID;
|
||||||
}
|
}
|
||||||
TRIE_WALK_END;
|
TRIE_WALK_END;
|
||||||
}
|
}
|
||||||
@ -3862,18 +3869,21 @@ rt_next_hop_update_net(struct rtable_private *tab, net *n)
|
|||||||
* is being computed. Statistically, this should almost never happen. In such
|
* is being computed. Statistically, this should almost never happen. In such
|
||||||
* case, we just drop all the computed changes and do it once again.
|
* case, we just drop all the computed changes and do it once again.
|
||||||
* */
|
* */
|
||||||
RT_UNLOCK(tab);
|
|
||||||
|
|
||||||
uint mod = 0;
|
uint mod = 0;
|
||||||
|
RT_UNLOCKED_TEMPORARILY(tpub, tab)
|
||||||
|
{
|
||||||
|
/* DO NOT RETURN OR BREAK OUT OF THIS BLOCK */
|
||||||
|
|
||||||
if (is_flow)
|
if (is_flow)
|
||||||
for (uint i = 0; i < pos; i++)
|
for (uint i = 0; i < pos; i++)
|
||||||
mod += rt_flowspec_update_rte(RT_PUB(tab), &updates[i].old->rte, &updates[i].new);
|
mod += rt_flowspec_update_rte(tpub, &updates[i].old->rte, &updates[i].new);
|
||||||
|
|
||||||
else
|
else
|
||||||
for (uint i = 0; i < pos; i++)
|
for (uint i = 0; i < pos; i++)
|
||||||
mod += rt_next_hop_update_rte(&updates[i].old->rte, &updates[i].new);
|
mod += rt_next_hop_update_rte(&updates[i].old->rte, &updates[i].new);
|
||||||
|
|
||||||
RT_LOCK(RT_PUB(tab));
|
}
|
||||||
|
|
||||||
if (!mod)
|
if (!mod)
|
||||||
return 0;
|
return 0;
|
||||||
@ -4155,11 +4165,11 @@ rt_delete(void *tab_)
|
|||||||
/* We assume that nobody holds the table reference now as use_count is zero.
|
/* We assume that nobody holds the table reference now as use_count is zero.
|
||||||
* Anyway the last holder may still hold the lock. Therefore we lock and
|
* Anyway the last holder may still hold the lock. Therefore we lock and
|
||||||
* unlock it the last time to be sure that nobody is there. */
|
* unlock it the last time to be sure that nobody is there. */
|
||||||
struct rtable_private *tab = RT_LOCK((rtable *) tab_);
|
struct rtable_private *tab = RT_LOCK_SIMPLE((rtable *) tab_);
|
||||||
struct config *conf = tab->deleted;
|
struct config *conf = tab->deleted;
|
||||||
DOMAIN(rtable) dom = tab->lock;
|
DOMAIN(rtable) dom = tab->lock;
|
||||||
|
|
||||||
RT_UNLOCK(RT_PUB(tab));
|
RT_UNLOCK_SIMPLE(RT_PUB(tab));
|
||||||
|
|
||||||
/* Everything is freed by freeing the loop */
|
/* Everything is freed by freeing the loop */
|
||||||
birdloop_free(tab->loop);
|
birdloop_free(tab->loop);
|
||||||
@ -4259,21 +4269,14 @@ rt_commit(struct config *new, struct config *old)
|
|||||||
if (old)
|
if (old)
|
||||||
{
|
{
|
||||||
WALK_LIST(o, old->tables)
|
WALK_LIST(o, old->tables)
|
||||||
|
RT_LOCKED(o->table, tab)
|
||||||
{
|
{
|
||||||
struct rtable_private *tab = RT_LOCK(o->table);
|
|
||||||
|
|
||||||
if (tab->deleted)
|
if (tab->deleted)
|
||||||
{
|
|
||||||
RT_UNLOCK(tab);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
r = rt_find_table_config(new, o->name);
|
r = rt_find_table_config(new, o->name);
|
||||||
if (r && !new->shutdown && rt_reconfigure(tab, r, o))
|
if (r && !new->shutdown && rt_reconfigure(tab, r, o))
|
||||||
{
|
|
||||||
RT_UNLOCK(tab);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
DBG("\t%s: deleted\n", o->name);
|
DBG("\t%s: deleted\n", o->name);
|
||||||
tab->deleted = old;
|
tab->deleted = old;
|
||||||
@ -4287,7 +4290,6 @@ rt_commit(struct config *new, struct config *old)
|
|||||||
|
|
||||||
/* Force one more loop run */
|
/* Force one more loop run */
|
||||||
birdloop_flag(tab->loop, RTF_DELETE);
|
birdloop_flag(tab->loop, RTF_DELETE);
|
||||||
RT_UNLOCK(tab);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4416,6 +4418,8 @@ rt_feed_by_fib(void *data)
|
|||||||
struct fib_iterator *fit = &c->feed_fit;
|
struct fib_iterator *fit = &c->feed_fit;
|
||||||
rt_feed_block block = {};
|
rt_feed_block block = {};
|
||||||
|
|
||||||
|
_Bool done = 1;
|
||||||
|
|
||||||
ASSERT(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_FEEDING);
|
ASSERT(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_FEEDING);
|
||||||
|
|
||||||
RT_LOCKED(RT_PUB(SKIP_BACK(struct rtable_private, exporter, c->table)), tab)
|
RT_LOCKED(RT_PUB(SKIP_BACK(struct rtable_private, exporter, c->table)), tab)
|
||||||
@ -4428,10 +4432,8 @@ rt_feed_by_fib(void *data)
|
|||||||
if (!rt_prepare_feed(c, n, &block))
|
if (!rt_prepare_feed(c, n, &block))
|
||||||
{
|
{
|
||||||
FIB_ITERATE_PUT(fit);
|
FIB_ITERATE_PUT(fit);
|
||||||
RT_UNLOCK(tab);
|
done = 0;
|
||||||
rt_process_feed(c, &block);
|
break;
|
||||||
rt_send_export_event(&c->h);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -4441,7 +4443,11 @@ rt_feed_by_fib(void *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rt_process_feed(c, &block);
|
rt_process_feed(c, &block);
|
||||||
|
|
||||||
|
if (done)
|
||||||
rt_feed_done(&c->h);
|
rt_feed_done(&c->h);
|
||||||
|
else
|
||||||
|
rt_send_export_event(&c->h);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -4467,12 +4473,9 @@ rt_feed_by_trie(void *data)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!rt_prepare_feed(c, n, &block))
|
if (!rt_prepare_feed(c, n, &block))
|
||||||
{
|
return
|
||||||
RT_UNLOCK(tab);
|
rt_process_feed(c, &block),
|
||||||
rt_process_feed(c, &block);
|
|
||||||
rt_send_export_event(&c->h);
|
rt_send_export_event(&c->h);
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
while (trie_walk_next(ws, &c->walk_last));
|
while (trie_walk_next(ws, &c->walk_last));
|
||||||
|
|
||||||
@ -4876,12 +4879,12 @@ rt_update_hostcache(void *data)
|
|||||||
|
|
||||||
/* Shutdown shortcut */
|
/* Shutdown shortcut */
|
||||||
if (!hc->req.hook)
|
if (!hc->req.hook)
|
||||||
RT_RETURN(tab);
|
return;
|
||||||
|
|
||||||
if (rt_cork_check(&hc->update))
|
if (rt_cork_check(&hc->update))
|
||||||
{
|
{
|
||||||
rt_trace(tab, D_STATES, "Hostcache update corked");
|
rt_trace(tab, D_STATES, "Hostcache update corked");
|
||||||
RT_RETURN(tab);
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Destination schedule map */
|
/* Destination schedule map */
|
||||||
|
Loading…
Reference in New Issue
Block a user