0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2025-01-15 05:21:54 +00:00

beautifing bmp

This commit is contained in:
Katerina Kubecova 2024-09-19 16:01:46 +02:00
parent 6c8e059935
commit 5ca2e07d6b
5 changed files with 41 additions and 193 deletions

View File

@ -2989,7 +2989,6 @@ init_journals(void)
ea_list *
proto_state_to_eattr(struct proto *p, int old_state, int proto_deleting)
{
//log("protocol %s to eattr", p->name);
struct {
ea_list l;
eattr a[9+19];
@ -2998,7 +2997,6 @@ proto_state_to_eattr(struct proto *p, int old_state, int proto_deleting)
eattrs.l = (ea_list) {};
eattrs.a[eattrs.l.count++] = EA_LITERAL_STORE_STRING(&ea_name, 0, p->name);
//eattrs.a[eattrs.l.count++] = EA_LITERAL_STORE_STRING(&ea_protocol_name, 0, p->proto->name); this info is stored in ea_protocol_type
eattrs.a[eattrs.l.count++] = EA_LITERAL_STORE_PTR(&ea_protocol_type, 0, p->proto);
if (p->main_channel)
eattrs.a[eattrs.l.count++] = EA_LITERAL_STORE_STRING(&ea_table, 0, p->main_channel->table->name);
@ -3030,15 +3028,12 @@ channel_state_to_eattr(struct channel *ch, int proto_deleting)
eattrs.a[eattrs.l.count++] = EA_LITERAL_EMBEDDED(&ea_in_keep, 0, ch->in_keep);
eattrs.a[eattrs.l.count++] = EA_LITERAL_STORE_PTR(&ea_rtable, 0, ch->table);
log("ea_rtable added (%x), eattrs %x, str %x", ch->table, eattrs.l, eattrs.a[0].u.ad->data);
if (ch->proto->proto == &proto_bgp && ch != ch->proto->mpls_channel)
{
struct bgp_channel *bc = (struct bgp_channel *) ch;
eattrs.a[eattrs.l.count++] = EA_LITERAL_EMBEDDED(&ea_bgp_afi, 0, bc->afi);
}
//log("to lookup goes %i (%i)", eattrs.l, &eattrs.l);
return ea_lookup_slow(&eattrs.l, 0, EALS_CUSTOM);
}
@ -3047,8 +3042,6 @@ channel_state_to_eattr(struct channel *ch, int proto_deleting)
void
proto_journal_state_push(ea_list *attr, struct proto *p, int save_to_jour)
{
//log("proto_journal_state_push %i", p->id);
//log("push state for %s", ea_get_adata(attr, &ea_name)->data);
ea_set_attr(&attr, EA_LITERAL_STORE_ADATA(&ea_last_modified, 0, &p->last_state_change, sizeof(btime)));
attr = ea_lookup(attr, 0, EALS_CUSTOM);

View File

@ -80,12 +80,10 @@ static union bgp_attr_desc bgp_attr_table[];
static inline const union bgp_attr_desc *bgp_find_attr_desc(eattr *a)
{
const struct ea_class *class = ea_class_find(a->id);
log("ea %x class %s", a, class->name);
if ((class < &bgp_attr_table[0].class) || (class >= &bgp_attr_table[BGP_ATTR_MAX].class))
return NULL;
log("desc returned");
return (const union bgp_attr_desc *) class;
}
@ -544,7 +542,6 @@ bgp_encode_next_hop(struct bgp_write_state *s, eattr *a, byte *buf, uint size)
}
else
{
log("mp next hop set %x", a);
s->mp_next_hop = a;
return 0;
}
@ -1347,7 +1344,6 @@ bgp_encode_attrs(struct bgp_write_state *s, ea_list *attrs, byte *buf, byte *end
byte *pos = buf;
int i, len;
log("attrs count %i", attrs->count);
for (i = 0; i < attrs->count; i++)
{
len = bgp_encode_attr(s, &attrs->attrs[i], pos, end - pos);
@ -1892,7 +1888,7 @@ bgp_out_item_done(struct lfjour *j UNUSED, struct lfjour_item *i UNUSED)
{}
static struct rt_export_feed *
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, UNUSED const struct rt_export_item *_first)
{
ASSERT_DIE(u == NULL);
SKIP_BACK_DECLARE(struct bgp_ptx_private, c, exporter, e);

View File

@ -676,13 +676,14 @@ bgp_conn_enter_established_state(struct bgp_conn *conn)
conn->sk->fast_rx = 0;
p->conn = conn;
ea_list *eal = proto_state_table->attrs[p->p.id];
ea_set_attr(&eal, EA_LITERAL_STORE_PTR(&ea_bgp_conn, 0, p->conn));
proto_journal_state_push(eal, &p->p, 1);
p->last_error_class = 0;
p->last_error_code = 0;
p->as4_session = conn->as4_session;
ea_list *eal = proto_state_table->attrs[p->p.id];
ea_set_attr(&eal, EA_LITERAL_STORE_PTR(&ea_bgp_conn, 0, p->conn));
ea_set_attr(&eal, EA_LITERAL_EMBEDDED(&ea_bgp_as4_session, 0, p->as4_session));
proto_journal_state_push(eal, &p->p, 1);
p->route_refresh = peer->route_refresh;
p->enhanced_refresh = local->enhanced_refresh && peer->enhanced_refresh;
@ -795,11 +796,8 @@ bgp_conn_enter_established_state(struct bgp_conn *conn)
bgp_conn_set_state(conn, BS_ESTABLISHED);
proto_notify_state(&p->p, PS_UP);
log("bgp domain %i, locked %i", p->p.pool->domain, DG_IS_LOCKED(p->p.pool->domain));
#ifdef CONFIG_BMP
//TODO not to repeat done eatters
log("in bgp nesmysl, protocol %s id %i", p->p.name, p->p.id);
ea_list *ea_l = proto_state_table->attrs[p->p.id];
ea_set_attr(&ea_l, EA_LITERAL_STORE_ADATA(&ea_bgp_local_open_msg, 0, conn->local_open_msg, conn->local_open_length));
ea_set_attr(&ea_l, EA_LITERAL_STORE_ADATA(&ea_bgp_remote_open_msg, 0, conn->remote_open_msg, conn->remote_open_length));
@ -807,10 +805,6 @@ bgp_conn_enter_established_state(struct bgp_conn *conn)
ea_set_attr(&ea_l, EA_LITERAL_EMBEDDED(&ea_bgp_remote_open_msg_len, 0, conn->remote_open_length));
ea_l = ea_lookup(ea_l, 0, EALS_CUSTOM);
proto_journal_state_push(ea_l, &p->p, 0);
log("send to bmp locked %i (p %i), msg len loc %i rem %i", locking_stack.service, p->p.id, conn->local_open_length, conn->remote_open_length);
log("in bgp id %i, tx = %i, l= %i", p->p.id, ea_get_adata(ea_l, &ea_bgp_local_open_msg)->data, ea_get_int(ea_l, &ea_bgp_local_open_msg_len, 0));
//bmp_peer_up(proto_state_table->attrs[p->p.id], conn->local_open_msg, conn->local_open_length,
// conn->remote_open_msg, conn->remote_open_length);
#endif
}
@ -839,6 +833,7 @@ bgp_conn_leave_established_state(struct bgp_conn *conn, struct bgp_proto *p)
memcpy(to_ea.data, conn->notify_data, conn->notify_size);
ea_list *eal = proto_state_table->attrs[p->p.id];
ea_set_attr(&eal, EA_LITERAL_STORE_PTR(&ea_bgp_conn, 0, p->conn));
ea_set_attr(&eal, EA_LITERAL_STORE_ADATA(&ea_bgp_close_bmp, 0, &to_ea.closing_struct, sizeof(to_ea)));
ea_set_attr(&eal, EA_LITERAL_EMBEDDED(&ea_bgp_close_bmp_set, 0, 1));
proto_journal_state_push(eal, &p->p, 0);
@ -1771,6 +1766,7 @@ bgp_start(struct proto *P)
ea_set_attr(&eal, EA_LITERAL_EMBEDDED(&ea_bgp_rem_id, 0, p->remote_id));
ea_set_attr(&eal, EA_LITERAL_EMBEDDED(&ea_bgp_loc_as, 0, p->local_as));
ea_set_attr(&eal, EA_LITERAL_EMBEDDED(&ea_bgp_rem_as, 0, p->remote_as));
ea_set_attr(&eal, EA_LITERAL_STORE_ADATA(&ea_bgp_rem_ip, 0, &p->remote_ip, sizeof(ip_addr)));
proto_journal_state_push(eal, &p->p, 1);
/* Lock all channels when in GR recovery mode */
@ -2613,7 +2609,6 @@ bgp_state_to_eattr(struct proto *P, ea_list *l, eattr *attributes)
{
struct bgp_proto *p = (struct bgp_proto *) P;
attributes[l->count++] = EA_LITERAL_EMBEDDED(&ea_bgp_rem_id, 0, p->remote_id);
attributes[l->count++] = EA_LITERAL_EMBEDDED(&ea_bgp_rem_as, 0, p->remote_as);
attributes[l->count++] = EA_LITERAL_STORE_ADATA(&ea_bgp_rem_ip, 0, &p->remote_ip, sizeof(ip_addr));
attributes[l->count++] = EA_LITERAL_EMBEDDED(&ea_bgp_peer_type, 0, p->cf->peer_type);
attributes[l->count++] = EA_LITERAL_EMBEDDED(&ea_bgp_loc_as, 0, p->local_as);

View File

@ -131,7 +131,6 @@ bgp_estimate_add_path(struct bgp_proto *p, byte *pkt, uint len)
BGP_TRACE(D_PACKETS, "MRT processing noticed invalid packet");
return 0;
}
return c->add_path_rx;
}
@ -1608,7 +1607,6 @@ bgp_encode_nlri_ip4(struct bgp_write_state *s, struct bgp_bucket *buck, byte *bu
while (!EMPTY_LIST(buck->prefixes) && (size >= BGP_NLRI_MAX))
{
log("list_length(buck->prefixes) %i size %i >= BGP_NLRI_MAX %i", list_length(&buck->prefixes), size, BGP_NLRI_MAX);
struct bgp_prefix *px = HEAD(buck->prefixes);
struct net_addr_ip4 *net = NET_PTR_IP4(&px->ni->addr[0]);
@ -2333,7 +2331,6 @@ bgp_create_ip_reach(struct bgp_write_state *s, struct bgp_bucket *buck, byte *bu
lr = bgp_encode_nlri(s, buck, buf+4+la, end);
log("buf+4+la+lr = %i %i %i %i", buf, 4, la, lr);
return buf+4+la+lr;
}
@ -2341,7 +2338,6 @@ bgp_create_ip_reach(struct bgp_write_state *s, struct bgp_bucket *buck, byte *bu
static byte *
bgp_create_mp_reach(struct bgp_write_state *s, struct bgp_bucket *buck, byte *buf, byte *end)
{
log("in reach check there is no next hop %i", s->mp_next_hop);
ASSERT_DIE((s->ptx->bmp) || (s->ptx->withdraw_bucket != buck));
/*
@ -2369,12 +2365,11 @@ bgp_create_mp_reach(struct bgp_write_state *s, struct bgp_bucket *buck, byte *bu
put_u16(buf+6, 0); /* Will be fixed later */
put_af3(buf+8, s->ptx->c->afi);
byte *pos = buf+11;
log("in reach next hop %i", s->mp_next_hop);
/* Encode attributes to temporary buffer */
byte *abuf = alloca(MAX_ATTRS_LENGTH);
la = bgp_encode_attrs(s, buck->eattrs, abuf, abuf + MAX_ATTRS_LENGTH);
log("in reach next hop %i encode", s->mp_next_hop);
if (la < 0)
{
/* Attribute list too long */
@ -2383,7 +2378,6 @@ bgp_create_mp_reach(struct bgp_write_state *s, struct bgp_bucket *buck, byte *bu
}
/* Encode the next hop */
log("before next hop fn check there is no next hop %i", s->mp_next_hop);
lh = bgp_encode_next_hop(s, s->mp_next_hop, pos+1);
*pos = lh;
pos += 1+lh;
@ -2495,9 +2489,6 @@ bgp_create_update_bmp(ea_list *channel_ea, struct bgp_proto *bgp_p, byte *buf, s
.mpls = c->desc->mpls,
.ignore_non_bgp_attrs = 1,
};
log("next hop %x (0)", s.mp_next_hop);
log("check there is no next hop %i, channel fce %i", s.mp_next_hop, c->desc->encode_next_hop);
log("desc %x, bgp_af_desc bgp_af_table %x", c->desc, bgp_af_table);
if (!update)
{
@ -2528,7 +2519,6 @@ bgp_bmp_prepare_bgp_hdr(byte *buf, const u16 msg_size, const u8 msg_type)
byte *
bgp_bmp_encode_rte(ea_list *c, struct bgp_proto *bgp_p, byte *buf, const struct rte *new)
{
// struct bgp_proto *p = (void *) c->c.proto;
byte *pkt = buf + BGP_HEADER_LENGTH;
uint ea_size = new->attrs ? (sizeof(ea_list) + new->attrs->count * sizeof(eattr)) : 0;
@ -2549,7 +2539,7 @@ bgp_bmp_encode_rte(ea_list *c, struct bgp_proto *bgp_p, byte *buf, const struct
px->src = tmp_allocz(sizeof(struct rte_src));
memcpy(px->src, new->src, sizeof(struct rte_src));
px->ni = NET_TO_INDEX(new->net);
add_tail(&b->prefixes, &px->buck_node); // why was there _xx ?
add_tail(&b->prefixes, &px->buck_node);
byte *end = bgp_create_update_bmp(c, bgp_p, pkt, b, !!new->attrs);

View File

@ -283,12 +283,8 @@ bmp_schedule_tx_packet(struct bmp_proto *p, const byte *payload, const size_t si
struct bmp_data_node *tx_data = mb_allocz(p->tx_mem_pool, sizeof (struct bmp_data_node));
tx_data->data = mb_allocz(p->tx_mem_pool, size);
log("tx_data %x data %x", tx_data, tx_data->data);
memcpy(tx_data->data, payload, size);
tx_data->data_size = size;
log("schedule tx packet size %i", size);
if (size > 5000)
log("too big");
add_tail(&p->tx_queue, &tx_data->n);
if (sk_tx_buffer_empty(p->sk)
@ -301,7 +297,6 @@ bmp_schedule_tx_packet(struct bmp_proto *p, const byte *payload, const size_t si
static void
bmp_fire_tx(void *p_)
{
log("fire");
struct bmp_proto *p = p_;
if (!p->started)
@ -317,7 +312,6 @@ bmp_fire_tx(void *p_)
struct bmp_data_node *tx_data_next;
WALK_LIST_DELSAFE(tx_data, tx_data_next, p->tx_queue)
{
log("WALK_LIST_DELSAFE");
if (tx_data->data_size > p->sk->tbsize)
{
sk_set_tbsize(p->sk, tx_data->data_size);
@ -329,8 +323,6 @@ bmp_fire_tx(void *p_)
rem_node((node *) tx_data);
mb_free(tx_data);
log("walk ifs for returns socket %i, data size %i", p->sk, data_size);
if (sk_send(p->sk, data_size) <= 0)
return;
@ -511,7 +503,6 @@ static struct bmp_table *
bmp_add_table(struct bmp_proto *p, rtable *tab)
{
struct bmp_table *bt = mb_allocz(p->p.pool, sizeof(struct bmp_table));
log("adding table %x", bt);
bt->table = tab;
bt->p = p;
rt_lock_table(bt->table);
@ -519,14 +510,12 @@ bmp_add_table(struct bmp_proto *p, rtable *tab)
HASH_INSERT(p->table_map, HASH_TABLE, bt);
bt->event.data = bt;
log("name %s post %i pre %i", p->p.name, p->monitoring_rib.in_post_policy, p->monitoring_rib.in_pre_policy);
//if(p->monitoring_rib.in_post_policy){
bt->event.hook = bmp_check_routes;
bt->out_req = (struct rt_export_request) {
.name = mb_sprintf(p->p.pool, "%s.exp_request", p->p.name),
.r = (struct lfjour_recipient) {
.target = proto_event_list(&p->p),//&p->p.loop->event_list,
.target = proto_event_list(&p->p),
.event = &bt->event,
},
.pool = p->p.pool,
@ -534,55 +523,34 @@ bmp_add_table(struct bmp_proto *p, rtable *tab)
//.dump = channel_dump_export_req, TODO: this will crash on `dump tables` from CLI
.fed = bmp_feed_end,
};
rt_export_subscribe(tab, all, &bt->out_req);//}
rt_export_subscribe(tab, all, &bt->out_req);
return bt;
}
/*static void
bmp_remove_table(struct bmp_proto *p, struct bmp_table *bt)
{
log("removing table - bmp table %x chann %x, (subscr %x uc %x)",bt, bt->channel, &bt->channel->roa_subscriptions, &bt->uc);
channel_set_state(bt->channel, CS_STOP);
channel_set_state(bt->channel, CS_DOWN);
proto_remove_channel(&p->p, bt->channel);
HASH_REMOVE(p->table_map, HASH_TABLE, bt);
rt_unlock_table(bt->table);
bt->table = NULL;
log("free table %x", bt);
mb_free(bt);
}*/
static void
bmp_remove_table_rt(struct bmp_proto *p, struct bmp_table *bt) // still falling in krt routes... The thread synchronization was maybe not needed...
bmp_remove_table(struct bmp_proto *p, struct bmp_table *bt)
{
log("removing table - bmp table %x chann %x, (subscr %x uc %x)",bt, bt->channel, &bt->channel->roa_subscriptions, &bt->uc);
if (bt->channel)
{
channel_set_state(bt->channel, CS_STOP);
channel_set_state(bt->channel, CS_DOWN);
}
//if (p->monitoring_rib.in_post_policy)
rt_export_unsubscribe(all, &bt->out_req);
//else
//rt_feeder_unsubscribe(&bt->in_req);
HASH_REMOVE(p->table_map, HASH_TABLE, bt);
rt_unlock_table(bt->table);
bt->table = NULL;
log("free table %x", bt);
mb_free(bt);
//log("free done");
}
static inline void bmp_lock_table(struct bmp_proto *p UNUSED, struct bmp_table *bt)
{ bt->uc++; }
struct bmp_table *bmp_get_table(struct bmp_proto *p, rtable *tab)
struct bmp_table *
bmp_get_table(struct bmp_proto *p, rtable *tab)
{
struct bmp_table *bt = bmp_find_table(p, tab);
if (bt)
@ -607,7 +575,7 @@ struct bmp_table *bmp_get_table(struct bmp_proto *p, rtable *tab)
static inline void bmp_unlock_table(struct bmp_proto *p, struct bmp_table *bt)
{ atomic_int i = 1;
if (atomic_compare_exchange_strong_explicit(&bt->uc, &i, 0, memory_order_acq_rel, memory_order_relaxed))
bmp_remove_table_rt(p, bt);
bmp_remove_table(p, bt);
else
bt->uc--;
}
@ -620,9 +588,6 @@ static inline void bmp_unlock_table(struct bmp_proto *p, struct bmp_table *bt)
static inline u32 bmp_stream_key(u32 afi, bool policy)
{ return afi ^ (policy ? BMP_STREAM_KEY_POLICY : 0); }
//static inline u32 bmp_stream_afi(struct bmp_stream *bs)
//{ return bs->key & ~BMP_STREAM_KEY_POLICY; }
static inline bool bmp_stream_policy(struct bmp_stream *bs)
{ return !!(bs->key & BMP_STREAM_KEY_POLICY); }
@ -631,14 +596,12 @@ bmp_find_stream(struct bmp_proto *p, const struct bgp_proto *bgp, u32 afi, bool
{
ea_list *bgp_attr = proto_state_table->attrs[bgp->p.id];
return HASH_FIND(p->stream_map, HASH_STREAM, bgp_attr, bmp_stream_key(afi, policy));
}
static struct bmp_stream *
bmp_add_stream(struct bmp_proto *p, struct bmp_peer *bp, u32 afi, bool policy, rtable *tab, ea_list *sender, int in_pre_policy)
{
struct bmp_stream *bs = mb_allocz(p->p.pool, sizeof(struct bmp_stream));
log("add stream to p %i bs %x", p, bs);
bs->bgp = bp->bgp;
bs->key = bmp_stream_key(afi, policy);
@ -646,7 +609,6 @@ bmp_add_stream(struct bmp_proto *p, struct bmp_peer *bp, u32 afi, bool policy, r
HASH_INSERT(p->stream_map, HASH_STREAM, bs);
bs->table = bmp_get_table(p, tab);
//bmp_lock_table(p, bs->table);
bs->sender = sender;
bs->sync = false;
@ -658,7 +620,6 @@ bmp_add_stream(struct bmp_proto *p, struct bmp_peer *bp, u32 afi, bool policy, r
static void
bmp_remove_stream(struct bmp_proto *p, struct bmp_stream *bs)
{
log("go to unlock table");
rem_node(&bs->n);
HASH_REMOVE(p->stream_map, HASH_STREAM, bs);
@ -682,7 +643,6 @@ bmp_find_peer(struct bmp_proto *p, ea_list *bgp_attr)
static struct bmp_peer *
bmp_add_peer(struct bmp_proto *p, ea_list *bgp_attr)
{
log("domain is locked %i, service is locked %i", DG_IS_LOCKED(p->p.pool->domain), locking_stack.service);
struct bmp_peer *bp;
if (DG_IS_LOCKED(p->p.pool->domain))
bp = mb_allocz(p->p.pool, sizeof(struct bmp_peer));
@ -692,7 +652,6 @@ bmp_add_peer(struct bmp_proto *p, ea_list *bgp_attr)
bp = mb_allocz(p->p.pool, sizeof(struct bmp_peer));
DG_UNLOCK(p->p.pool->domain);
}
log("bmp_peer %x", bp);
bp->bgp = bgp_attr;
init_list(&bp->streams);
@ -701,36 +660,22 @@ bmp_add_peer(struct bmp_proto *p, ea_list *bgp_attr)
int proto_id = ea_get_int(bgp_attr, &ea_proto_id, 0);
struct channel_attrs *chan_attr;
log("before while id %i, eattrs %i", proto_id, proto_state_table->channels_attrs[proto_id]);
WALK_TLIST(channel_attrs, chan_attr, &proto_state_table->channels_attrs[proto_id])
{
log("chan_attr in bmp_add_peer %i, attrs %i", chan_attr, chan_attr->attrs);
rtable *ch_table = (rtable *) ea_get_ptr(chan_attr->attrs, &ea_rtable, 0);
const char *name = ea_get_adata(chan_attr->attrs, &ea_name)->data;
int in_keep = ea_get_int(chan_attr->attrs, &ea_in_keep, 0);
log("name %s ch_table %i first if %i second if %i in keep %i", name, ch_table, p->monitoring_rib.in_pre_policy, p->monitoring_rib.in_post_policy, in_keep);
if (p->monitoring_rib.in_pre_policy && ch_table)
{
log("in pre");
if (in_keep == RIK_PREFILTER)
{
log("add stream in keep %i", in_keep);
bmp_add_stream(p, bp, ea_get_int(chan_attr->attrs, &ea_bgp_afi, 0), false, ch_table, chan_attr->attrs, 1);
}
else
log(L_WARN "%s: Try to do pre policy with disabled import tables (channel %s)", p->p.name, name);
}
if (p->monitoring_rib.in_post_policy && ch_table)
{
log("in post");
bmp_add_stream(p, bp, ea_get_int(chan_attr->attrs, &ea_bgp_afi, 0), true, ch_table, chan_attr->attrs, 0);
}
}
return bp;
@ -741,7 +686,7 @@ bmp_remove_peer(struct bmp_proto *p, struct bmp_peer *bp)
{
struct bmp_stream *bs, *bs_next;
WALK_LIST_DELSAFE(bs, bs_next, bp->streams)
bmp_remove_stream(p, bs);//TODO//TODO//TODO
bmp_remove_stream(p, bs);
HASH_REMOVE(p->peer_map, HASH_PEER, bp);
@ -783,16 +728,6 @@ bmp_peer_up_(struct bmp_proto *p, ea_list *bgp_attr, bool sync,
}
}
//void
//bmp_peer_up(ea_list *bgp,
// const byte *tx_open_msg, uint tx_open_length,
// const byte *rx_open_msg, uint rx_open_length)
//{
// struct bmp_proto *p; node *n;
// WALK_LIST2(p, n, bmp_proto_list, bmp_node)
// bmp_peer_up_(p, bgp, true, tx_open_msg, tx_open_length, rx_open_msg, rx_open_length);
//}
static void
bmp_peer_init(struct bmp_proto *p, ea_list *bgp_attr)
{
@ -806,8 +741,6 @@ bmp_peer_init(struct bmp_proto *p, ea_list *bgp_attr)
conn->remote_open_msg, conn->remote_open_length);
}
static const struct birdsock *
bmp_get_birdsock(ea_list *bgp)
{
@ -876,7 +809,7 @@ bmp_is_peer_global_instance(ea_list *bgp)
int peer_type = ea_get_int(bgp, &ea_bgp_peer_type, 0);
int local_as = ea_get_int(bgp, &ea_bgp_loc_as, 0);
int remote_as = ea_get_int(bgp, &ea_bgp_rem_as, 0);
log("bmp_is_peer_global_instance loc as %i rem as %i peer_type %i (constants BGP_PT_EXTERNAL %i, BGP_PT_INTERNAL %i)", local_as, remote_as, peer_type, BGP_PT_EXTERNAL, BGP_PT_INTERNAL);
return (peer_type != BGP_PT_EXTERNAL &&
peer_type != BGP_PT_INTERNAL)
? (local_as != remote_as)
@ -898,9 +831,9 @@ bmp_send_peer_up_notif_msg(struct bmp_proto *p, ea_list *bgp,
const int rem_as = ea_get_int(bgp, &ea_bgp_rem_as, 0);
const int rem_id = ea_get_int(bgp, &ea_bgp_rem_id, 0);
log("bmp_send_peer_up_notif_msg rem_as %i rem_id %i", rem_as, rem_id);
const bool is_global_instance_peer = bmp_is_peer_global_instance(bgp);
buffer payload = bmp_buffer_alloc(p->buffer_mpool, DEFAULT_MEM_BLOCK_SIZE);
bmp_peer_up_notif_msg_serialize(&payload, is_global_instance_peer,
rem_as, rem_id, 1,
sk->saddr, sk->daddr, sk->sport, sk->dport, tx_data, tx_data_size,
@ -915,7 +848,6 @@ bmp_route_monitor_put_update(struct bmp_proto *p, struct bmp_stream *bs, const b
struct bmp_data_node *upd_msg = mb_allocz(p->update_msg_mem_pool,
sizeof (struct bmp_data_node));
upd_msg->data = mb_alloc(p->update_msg_mem_pool, length);
log("upd_msg %x, data %x", upd_msg, upd_msg->data);
memcpy(upd_msg->data, data, length);
upd_msg->data_size = length;
@ -938,8 +870,7 @@ bmp_route_monitor_put_update(struct bmp_proto *p, struct bmp_stream *bs, const b
static void
bmp_route_monitor_notify(struct bmp_proto *p, struct bgp_proto *bgp_p, struct bmp_stream *bs, const struct rte *new)
{
log("notified");
bmp_route_monitor_end_of_rib(p, bs);
bmp_route_monitor_end_of_rib(p, bs);
byte buf[BGP_MAX_EXT_MSG_LENGTH];
byte *end = bgp_bmp_encode_rte(bs->sender, bgp_p, buf, new);
@ -1116,7 +1047,6 @@ bmp_send_termination_msg(struct bmp_proto *p,
"Failed to send BMP termination message"
);
log("bmp_send_termination_msg free %x", stream);
bmp_buffer_free(&stream);
}
@ -1141,23 +1071,8 @@ bmp_preexport(struct channel *C UNUSED, rte *e)
return 1;
}
int
bgp_next_hop_present(const rte *n)
{
for (int i = 0; i < n->attrs->count; i++)
{
eattr a = n->attrs->attrs[i];
const struct ea_class *class = ea_class_find(a.id);
log("ea class %s", class->name);
if (class == bgp_next_hop_ea_class)
return 1;
}
log("BAD RTE, BAD.");
return 0;
}
static void
solve_for_post_and_pre(struct bmp_proto *p, const rte *new, const rte *old)
bmp_split_policy(struct bmp_proto *p, const rte *new, const rte *old)
{
rte loc = *(new ?: old);
@ -1166,8 +1081,6 @@ solve_for_post_and_pre(struct bmp_proto *p, const rte *new, const rte *old)
struct bgp_channel *src_ch = SKIP_BACK(struct bgp_channel, c.in_req, loc.sender->req);
ASSERT_DIE(src_ch->c.proto == rte_proto); /* No pipes supported for now */
log("solve_for_post_and_pre %s", rte_proto->name);
/* Ignore non-BGP routes */
if (rte_proto->proto != &proto_bgp)
return;
@ -1185,16 +1098,15 @@ solve_for_post_and_pre(struct bmp_proto *p, const rte *new, const rte *old)
{
/* The attributes are actually different, announce the change */
if (ea_same(new_attrs, old_attrs))
bug("Two attribute sets are same in the attribute cache.");
bug("Two attribute sets are same in the attribute cache.");
struct bmp_stream *bs = bmp_find_stream(p, bgp, src_ch->afi, false);
if (bs)
{
log("s true bmp stream found feed, pre policy %i proto %s", bs->in_pre_policy, p->p.name);
if (bmp_find_peer(p, proto_state_table->attrs[bgp->p.id]) == NULL)
bug("not implemented");
if (bmp_find_peer(p, proto_state_table->attrs[bgp->p.id]) == NULL)
bug("Bmp got a route which belongs to a channel we do not know yet. It is more complicated state and needs to be implemented."); //TODO
bmp_route_monitor_notify(p, bgp, bs, &loc);
bmp_route_monitor_notify(p, bgp, bs, &loc);
}
}
}
@ -1216,11 +1128,10 @@ solve_for_post_and_pre(struct bmp_proto *p, const rte *new, const rte *old)
struct bmp_stream *bs = bmp_find_stream(p, bgp, src_ch->afi, true);
if (bs)
{
log("s true bmp stream found feed, pre policy %i proto %s", bs->in_pre_policy, p->p.name);
if (bmp_find_peer(p, proto_state_table->attrs[bgp->p.id]) == NULL)
bug("not implemented");
if (bmp_find_peer(p, proto_state_table->attrs[bgp->p.id]) == NULL)
bug("Bmp got a route which belongs to a channel we do not know yet. It is more complicated state and needs to be implemented."); //TODO
bmp_route_monitor_notify(p, bgp, bs, &loc);
bmp_route_monitor_notify(p, bgp, bs, &loc);
}
}
}
@ -1229,42 +1140,33 @@ solve_for_post_and_pre(struct bmp_proto *p, const rte *new, const rte *old)
static void
bmp_check_routes(void *bt_)
{
log("bmp_check_routes");
struct bmp_table *bt = (struct bmp_table *)bt_;
struct bmp_proto *p = bt->p;
RT_EXPORT_WALK(&bt->out_req, u) //const struct rt_export_union *_u;
RT_EXPORT_WALK(&bt->out_req, u)
{
log("feeder %x rte ptr %x", &bt->out_req, u);
switch (u->kind)
{
case RT_EXPORT_STOP:
bug("Main table export stopped");
case RT_EXPORT_FEED:
log("export feed");
uint oldpos = 0;
while ((oldpos < u->feed->count_routes) && !(u->feed->block[oldpos].flags & REF_OBSOLETE))
oldpos++;
/* Send updates one after another */
log("oldpos %i", oldpos);
for (uint i = 0; i < oldpos; i++)
{
rte *new = &u->feed->block[i];
log("pre policy %i rte attr %x", p->monitoring_rib.in_pre_policy, new->attrs);
solve_for_post_and_pre(p, new, NULL);
bmp_split_policy(p, new, NULL);
}
break;
case RT_EXPORT_UPDATE:
log("export update");
solve_for_post_and_pre(p, u->update->new, u->update->old);
bmp_split_policy(p, u->update->new, u->update->old);
break;
}
}
log("end of notify fce");
}
static void
@ -1274,7 +1176,6 @@ bmp_feed_end(struct rt_export_request *req)
struct bmp_proto *p = bt->p;
log("bmp table found");
/*
* Unsynced streams are added in one moment during BMP session establishment,
* therefore we can assume that all unsynced streams (for given channel)
@ -1311,7 +1212,6 @@ bmp_startup(struct bmp_proto *p)
TRACE(D_EVENTS, "BMP session established");
log("bmp startup %s %i", p->p.name, p->p.id);
proto_notify_state(&p->p, PS_UP);
/* Send initiation message */
@ -1324,20 +1224,17 @@ bmp_startup(struct bmp_proto *p)
for (u32 i = 0; i < proto_state_table->length; i++)
{
ea_list *proto_attr = proto_state_table->attrs[i];
log("startup proto attr %i", proto_attr);
if (proto_attr == NULL)
continue;
struct protocol *proto = (struct protocol *) ea_get_ptr(proto_attr, &ea_protocol_type, 0);
const int state = ea_get_int(proto_attr, &ea_state, 0);
log("proto bgp and up %i %i", proto != &proto_bgp, state != PS_UP);
if (proto != &proto_bgp || state != PS_UP)
continue;
bmp_peer_init(p, proto_attr);
}
/*struct proto *peer;
WALK_LIST(peer, proto_list)
if ((peer->proto != &proto_bgp) && (peer->proto_state == PS_UP))
bmp_peer_init(p, (struct bgp_proto *) peer);*/
}
/**
@ -1352,20 +1249,15 @@ bmp_down(struct bmp_proto *p)
{
ASSERT(p->started);
p->started = false;
log("_!p->peer_map.count (%i) && !p->stream_map.count (%i)&& !p->table_map.count(%i)", p->peer_map.count, p->stream_map.count, p->table_map.count);
TRACE(D_EVENTS, "BMP session closed");
/* Unregister existing peer structures */
HASH_WALK_DELSAFE(p->peer_map, next, bp)
{
log("bmp_remove_peer(p %x, bp %x)", p, bp);
bmp_remove_peer(p, bp);
}
HASH_WALK_END;
/* Removing peers should also remove all streams and tables */ //TODO
log("!p->peer_map.count (%i) && !p->stream_map.count (%i)&& !p->table_map.count(%i)", p->peer_map.count, p->stream_map.count, p->table_map.count);
ASSERT(!p->peer_map.count && !p->stream_map.count && !p->table_map.count);
}
@ -1486,7 +1378,6 @@ fc_for_bmp_recipient(void *_p)
ASSERT_DIE(birdloop_inside(p->p.loop));
//log("received update, locked %i", locking_stack.service);
struct lfjour_item *last_up;
struct proto_pending_update *pupdate;
while (last_up = lfjour_get(&p->proto_state_reader))
@ -1495,21 +1386,12 @@ fc_for_bmp_recipient(void *_p)
const byte *tx_open_msg = ea_get_adata(pupdate->proto_attr, &ea_bgp_local_open_msg)->data;
int id = ea_get_int(pupdate->proto_attr, &ea_proto_id, 0);
//log("id %i, tx = %i, l %i", id, tx_open_msg, ea_get_int(pupdate->proto_attr, &ea_bgp_local_open_msg_len, 0)); //FIXME why WHY are bmp eattrs able to give not null pointer?
if (ea_get_int(pupdate->proto_attr, &ea_bgp_local_open_msg_len, 0))
{
struct protocol *proto = (struct protocol *) ea_get_ptr(pupdate->proto_attr, &ea_protocol_type, 0);
//if (proto != &proto_bgp)
//{
//log("protocol %s is bgp %i", proto->name, proto == &proto_bgp);
//bug("wrong protocol");
//}
//int id = ea_get_int(pupdate->proto_attr, &ea_proto_id, 0);
const byte *rx_open_msg = ea_get_adata(pupdate->proto_attr, &ea_bgp_remote_open_msg)->data;
int l_len = ea_get_int(pupdate->proto_attr, &ea_bgp_remote_open_msg_len, 0);
int r_len = ea_get_int(pupdate->proto_attr, &ea_bgp_remote_open_msg_len, 0);
bmp_peer_up_(p, proto_state_table->attrs[id], true, tx_open_msg, l_len, rx_open_msg, r_len);
}
else if (ea_get_int(pupdate->proto_attr, &ea_bgp_close_bmp_set, 0))
{
@ -1518,7 +1400,6 @@ fc_for_bmp_recipient(void *_p)
closing->err_class, closing->err_code, closing->err_subcode, closing->data, closing->length);
}
lfjour_release(&p->proto_state_reader, last_up);
}
}
@ -1529,7 +1410,6 @@ create_bmp_recipient(struct bmp_proto *p)
struct lfjour_recipient *r = &p->proto_state_reader;
r->event = &p->proto_state_changed;
*r->event = (event) { .hook = fc_for_bmp_recipient, .data = p };
log("p->p.loop %x", p->p.loop);
r->target = birdloop_event_list(p->p.loop);
LOCK_DOMAIN(rtable, proto_journal_domain);
@ -1542,7 +1422,6 @@ create_bmp_recipient(struct bmp_proto *p)
static struct proto *
bmp_init(struct proto_config *CF)
{
log("start init locked %i", locking_stack.service);
struct proto *P = proto_new(CF);
struct bmp_proto *p = (void *) P;
struct bmp_config *cf = (void *) CF;
@ -1558,8 +1437,6 @@ bmp_init(struct proto_config *CF)
strcpy(p->sys_name, cf->sys_name);
p->monitoring_rib.in_pre_policy = cf->monitoring_rib_in_pre_policy;
p->monitoring_rib.in_post_policy = cf->monitoring_rib_in_post_policy;
//create_bmp_recipient(p); //should be here, but.
log("new proto created locked %i", locking_stack.service);
return P;
}
@ -1571,7 +1448,6 @@ bmp_init(struct proto_config *CF)
static int
bmp_start(struct proto *P)
{
log("start locked %i", locking_stack.service);
struct bmp_proto *p = (void *) P;
p->buffer_mpool = rp_new(P->pool, proto_domain(&p->p), "BMP Buffer");
@ -1594,7 +1470,6 @@ bmp_start(struct proto *P)
tm_start(p->connect_retry_timer, CONNECT_INIT_TIME);
log("end of start locked %i", locking_stack.service);
if (p->lf_jour_inited == 0)
create_bmp_recipient(p);
return PS_START;
@ -1680,7 +1555,6 @@ bmp_show_proto_info(struct proto *P)
struct protocol proto_bmp = {
.name = "BMP",
.template = "bmp%d",
//.class = PROTOCOL_BMP, looks like there are no classes for protocols anymore
.proto_size = sizeof(struct bmp_proto),
.config_size = sizeof(struct bmp_config),
.postconfig = bmp_postconfig,