0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 09:41:54 +00:00

Merge branch 'thread-next' of gitlab.nic.cz:labs/bird into thread-next

This commit is contained in:
Katerina Kubecova 2024-12-11 09:27:16 +01:00
commit 3f62ec5477
24 changed files with 1156 additions and 1027 deletions

View File

@ -3844,6 +3844,15 @@ by default and have to be enabled during installation by the configure option
routes (in <ref id="bgp-import-table" name="BGP import tables">) and post-policy
routes (in regular routing tables). All BGP protocols are monitored automatically.
<sect1>Configuration (incomplete)
<label id="bmp-config">
<p><descrip>
<tag><label id="bmp-tx-buffer-limit">tx buffer limit <m/number/</tag>
How much data we are going to queue before we call the session stuck
and restart it, in megabytes. Default value: 1024 (effectively 1 gigabyte).
</descrip>
<sect1>Example
<label id="bmp-exam">
@ -3857,6 +3866,9 @@ protocol bmp {
# Monitor accepted routes (passed import filters)
monitoring rib in post_policy;
# Allow only 64M of pending data
tx buffer limit 64;
}
</code>

View File

@ -38,6 +38,27 @@ void lfuc_unlock_deferred(struct deferred_call *dc)
off / s; \
})
static void
lfjour_return_cleanup_token(struct lfjour *j, struct lfjour_recipient *r)
{
/* Eligible for requesting cleanup */
if (!(atomic_fetch_and_explicit(&r->recipient_flags, ~LFJOUR_R_LAST_RUNNER, memory_order_acq_rel) & LFJOUR_R_LAST_RUNNER))
return;
/* Return the lastrunner's token. */
u64 pings = atomic_fetch_sub_explicit(&j->issued_tokens, 1, memory_order_acq_rel);
/* Is this the last token? */
if (pings > 1)
return;
ASSERT_DIE(pings != 0);
/* No more cleanup tokens issued, request cleanup. */
lfjour_schedule_cleanup(j);
}
struct lfjour_item *
lfjour_push_prepare(struct lfjour *j)
{
@ -228,7 +249,7 @@ void lfjour_release(struct lfjour_recipient *r, const struct lfjour_item *it)
/* Releasing this export for cleanup routine */
if (pos + 1 == end)
{
{
lfjour_debug("lfjour(%p)_release(recipient=%p) of %p, seq=%lu (end)",
j, r, r->cur, r->cur->seq);
}
@ -242,7 +263,7 @@ void lfjour_release(struct lfjour_recipient *r, const struct lfjour_item *it)
/* The last block may be available to free */
if ((pos + 1 == end) || last && (last_block != block))
lfjour_schedule_cleanup(j);
lfjour_return_cleanup_token(j, r);
r->first_holding_seq = 0;
r->cur = NULL;
@ -299,6 +320,13 @@ lfjour_register(struct lfjour *j, struct lfjour_recipient *r)
ASSERT_DIE(!r->cur);
lfjour_recipient_add_tail(&j->recipients, r);
if (j->issued_tokens < j->max_tokens)
{
/* Cleanup hook does not run, so we are sure j->issued_tokens is not increasing */
atomic_fetch_add_explicit(&j->issued_tokens, 1, memory_order_acq_rel);
atomic_fetch_or_explicit(&r->recipient_flags, LFJOUR_R_LAST_RUNNER, memory_order_acq_rel);
}
}
void
@ -311,7 +339,7 @@ lfjour_unregister(struct lfjour_recipient *r)
lfjour_release(r, r->cur);
lfjour_recipient_rem_node(&j->recipients, r);
lfjour_schedule_cleanup(j);
lfjour_return_cleanup_token(j, r);
}
static inline void lfjour_cleanup_unlock_helper(struct domain_generic **dg)
@ -320,6 +348,110 @@ static inline void lfjour_cleanup_unlock_helper(struct domain_generic **dg)
DG_UNLOCK(*dg);
}
static bool
lfjour_issue_cleanup_token(struct lfjour *j, struct lfjour_recipient* rec)
{
/* This function keeps the invariant that the total number of issued
* LFJOUR_R_LAST_RUNNER flags is the same as j->issued_tokens.
*
* Returs true if the token was successfully issued.
*/
/* The journal is not empty and the recipient is on seq_max,
* so it won't call cleanup */
const struct lfjour_item *last = atomic_load_explicit(&rec->last, memory_order_acquire);
if (last && last->seq == (j->next_seq - 1))
return false;
/* Take a token from the pile */
if (atomic_fetch_add_explicit(&j->issued_tokens, 1, memory_order_acq_rel) >= j->max_tokens)
{
/* No more tokens to issue, already at max */
atomic_fetch_sub_explicit(&j->issued_tokens, 1, memory_order_acq_rel);
return false;
}
/* Trying to give the token */
if (atomic_fetch_or_explicit(&rec->recipient_flags, LFJOUR_R_LAST_RUNNER, memory_order_acq_rel) & LFJOUR_R_LAST_RUNNER)
{
/* This recipient already has the token */
atomic_fetch_sub_explicit(&j->issued_tokens, 1, memory_order_acq_rel);
return false;
}
/* Has't the recipient finished inbetween? (Recheck.) */
last = atomic_load_explicit(&rec->last, memory_order_acquire);
if (last && last->seq == (j->next_seq - 1))
{
/* It has! Retreat! */
if (atomic_fetch_and_explicit(&rec->recipient_flags, ~LFJOUR_R_LAST_RUNNER, memory_order_acq_rel) & LFJOUR_R_LAST_RUNNER)
{
/* The flag was still set, we managed to get it back safely.
* Now the recipient won't call the cleanup. */
atomic_fetch_sub_explicit(&j->issued_tokens, 1, memory_order_acq_rel);
return false;
}
/* The recipient was quick enough to finish the task and
* grab the flag. Now we are going to get pinged anyway
* so we consider the flag to be issued. */
}
/* Now the token is issued. */
return true;
}
static void
lfjour_cleanup_done(struct lfjour *j)
{
/* Returning the cleanup token. */
u64 pings = atomic_fetch_sub_explicit(&j->issued_tokens, 1, memory_order_acq_rel);
/* Somebody else is also holding a token, nothing to do.
* They'll call us when the time comes. */
if (pings > 1)
return;
ASSERT_DIE(pings == 1);
/* We'll need to know whether the journal is empty or not. */
struct lfjour_item *first = atomic_load_explicit(&j->first, memory_order_acquire);
/* No recipients, schedule one more cleanup if not empty */
if (EMPTY_TLIST(lfjour_recipient, &j->recipients))
{
if (first)
lfjour_schedule_cleanup(j);
return;
}
/* There are some recipients but nothing to clean.
* Somebody is going to wake us, let's just throw
* the token to the crowd. */
if (!first)
{
WALK_TLIST(lfjour_recipient, r, &j->recipients)
/* If we failed to issue a cleanup token, there is definitely
* somebody holding it, no more tokens needed. */
if (! lfjour_issue_cleanup_token(j, r))
{
ASSERT_DIE(atomic_load_explicit(&j->issued_tokens, memory_order_acquire) > 0);
return;
}
}
/* We have to find some recipient which has not
* yet finished. When we find it, it will happily
* accept the token so we are done. */
WALK_TLIST(lfjour_recipient, r, &j->recipients)
if (lfjour_issue_cleanup_token(j, r))
return;
/* Nobody needs anything, but the journal is not empty.
* Run cleanup again. */
lfjour_schedule_cleanup(j);
}
static void
lfjour_cleanup_hook(void *_j)
{
@ -328,15 +460,22 @@ lfjour_cleanup_hook(void *_j)
CLEANUP(lfjour_cleanup_unlock_helper) struct domain_generic *_locked = j->domain;
if (_locked) DG_LOCK(_locked);
/* We consider ourselves holding one token to keep recipients
* from scheduling us too early. */
atomic_fetch_add_explicit(&j->issued_tokens, 1, memory_order_acq_rel);
u64 min_seq = ~((u64) 0);
const struct lfjour_item *last_item_to_free = NULL;
struct lfjour_item *first = atomic_load_explicit(&j->first, memory_order_acquire);
if (!first)
{
lfjour_cleanup_done(j);
/* Nothing to cleanup, actually, just call the done callback */
ASSERT_DIE(EMPTY_TLIST(lfjour_block, &j->pending));
CALL(j->cleanup_done, j, 0, ~((u64) 0));
return;
}
@ -345,8 +484,14 @@ lfjour_cleanup_hook(void *_j)
const struct lfjour_item *last = atomic_load_explicit(&r->last, memory_order_acquire);
if (!last)
/* No last export means that the channel has exported nothing since last cleanup */
{
/* No last export means that the channel has exported nothing since
* last cleanup. Let's try to give them the token,
* and we're done anyway. */
lfjour_issue_cleanup_token(j, r);
lfjour_cleanup_done(j);
return;
}
else if (min_seq > last->seq)
{
@ -373,6 +518,9 @@ lfjour_cleanup_hook(void *_j)
memory_order_acq_rel, memory_order_acquire))
{
lfjour_debug("lfjour(%p)_cleanup(recipient=%p): store last=NULL", j, r);
/* The most-behind-one gets the cleanup token */
lfjour_issue_cleanup_token(j, r);
}
else
{
@ -429,6 +577,8 @@ lfjour_cleanup_hook(void *_j)
first = next;
}
lfjour_cleanup_done(j);
CALL(j->cleanup_done, j, orig_first_seq, first ? first->seq : ~((u64) 0));
}
@ -452,4 +602,5 @@ lfjour_init(struct lfjour *j, struct settle_config *scf)
.hook = lfjour_cleanup_hook,
.data = j,
};
j->max_tokens = 20;
}

View File

@ -219,6 +219,7 @@ struct lfjour_recipient {
enum lfjour_recipient_flags {
LFJOUR_R_SEQ_RESET = 1, /* Signalling of sequence number reset */
LFJOUR_R_LAST_RUNNER = 2, /* Set if this recipient is supposed to ping cleanup hook */
};
/* Defines lfjour_recipient_list */
@ -237,6 +238,8 @@ struct lfjour {
event announce_kick_event; /* Kicks announce_timer */
struct settle announce_timer; /* Announces changes to recipients */
event cleanup_event; /* Runs the journal cleanup routine */
u64 max_tokens; /* Maximum number of cleanup tokens to issue */
_Atomic u64 issued_tokens; /* Current count of issued tokens */
/* Callback on item removal from journal */
void (*item_done)(struct lfjour *, struct lfjour_item *);

View File

@ -8,7 +8,8 @@
*/
#include "nest/bird.h"
#include "string.h"
#include "lib/macro.h"
#include "lib/string.h"
#include <errno.h>
@ -631,3 +632,121 @@ char *lp_sprintf(linpool *p, const char *fmt, ...)
va_end(args);
return out;
}
static const u64 decadic_multiplier[] = {
1, 10, 100, 1000,
10000, 100000, 1000000, 10000000,
#if 0
100000000, 1000000000, 10000000000, 100000000000,
1000000000000, 10000000000000, 100000000000000, 1000000000000000,
10000000000000000, 100000000000000000, 1000000000000000000,
#endif
};
static const u64 decmul_limit[] = {
~0ULL / 1, ~0ULL / 10,
~0ULL / 100, ~0ULL / 1000,
~0ULL / 10000, ~0ULL / 100000,
~0ULL / 1000000, ~0ULL / 10000000,
#if 0
~0ULL / 100000000, ~0ULL / 1000000000,
~0ULL / 10000000000, ~0ULL / 100000000000,
~0ULL / 1000000000000, ~0ULL / 10000000000000,
~0ULL / 100000000000000, ~0ULL / 1000000000000000,
~0ULL / 10000000000000000, ~0ULL / 100000000000000000,
~0ULL / 1000000000000000000,
#endif
};
STATIC_ASSERT(sizeof decadic_multiplier == sizeof decmul_limit);
char *fmt_order(u64 value, uint decimals, u64 kb_threshold)
{
bool too_big = (value + 512 < 512ULL);
u64 mv = value;
uint magnitude = 0;
while (mv > kb_threshold)
{
magnitude++;
mv = (mv + (too_big ? 0 : 512)) / 1024;
}
uint shift = magnitude * 10;
/* The trivial case */
if (magnitude == 0)
return tmp_sprintf("%lu ", value);
/* Now we can find the suffix and the main divisor */
ASSERT_DIE(magnitude < 7);
char suffix = " kMGTPE"[magnitude];
/* The value before the dot is available just by dividing */
u64 before_dot = value >> shift;
/* Remainder is more tricky. First we need to know it. */
u64 remainder = value - (before_dot << shift);
/* We would like to compute (remainder * decmul) / divisor
* in integers but it's tricky because of u64 limits. */
ASSERT_DIE(decimals < ARRAY_SIZE(decadic_multiplier));
u64 decmul = decadic_multiplier[decimals];
u64 product;
if (remainder < decmul_limit[decimals])
{
/* The easier version: Everything fits into u64 */
product = remainder * decmul;
product >>= shift - 1;
product++;
product >>= 1;
}
else
{
/* Harder version: We have to multiply by parts.
* Fortunately, decmul always fits into 32 bits. */
/* After this, product = lower + (upper << 32). */
u64 lower = (remainder & ((1ULL << 32) - 1)) * decmul;
u64 upper = (remainder >> 32) * decmul;
if (shift < 33)
{
/* Divide lower */
lower >>= shift - 1;
/* Add the full upper part, not shifted enough to lose any bits */
lower += upper << (33 - shift);
}
else
{
/* First move the shifted-out bits from upper to lower */
lower += (upper & ((1ULL << (shift - 32)) - 1)) << 32;
/* Then we can divide */
lower >>= shift - 1;
/* And add the shifted upper part */
lower += upper >> (shift - 33);
}
/* Now we finish the division by rounding */
product = (lower + 1) >> 1;
}
if (product == decmul)
{
product = 0;
before_dot++;
}
ASSERT_DIE(product < decmul);
/* And now we finally have all the numbers to print! */
if (decimals)
return tmp_sprintf("%lu.%0*lu %c",
before_dot, decimals, product, suffix
);
else
return tmp_sprintf("%lu %c", before_dot, suffix);
}

View File

@ -117,6 +117,68 @@ t_bstrcmp(void)
return 1;
}
static int
t_fmt_order(void)
{
struct fmt_order_tv {
u64 value;
int decimals;
u64 threshold;
const char *expect;
} test_vectors [] = {
{ 9999, 1, 10000, "9999 " },
{ 10001, 1, 10000, "9.8 k" },
{ 10001, 2, 10000, "9.77 k" },
{ 10001, 3, 10000, "9.767 k" },
{ 1048575, 0, 10000, "1024 k" },
{ 1048575, 1, 10000, "1024.0 k" },
{ 1048575, 2, 10000, "1024.00 k" },
{ 1048575, 3, 10000, "1023.999 k" },
{ 1048575, 4, 10000, "1023.9990 k" },
{ 1048575, 5, 10000, "1023.99902 k" },
{ 1048575, 6, 10000, "1023.999023 k" },
{ 1048575, 0, 1000, "1 M" },
{ 1048575, 1, 1000, "1.0 M" },
{ 1048575, 2, 1000, "1.00 M" },
{ 1048575, 3, 1000, "1.000 M" },
{ 1048575, 4, 1000, "1.0000 M" },
{ 1048575, 5, 1000, "1.00000 M" },
{ 1048575, 6, 1000, "0.999999 M" },
{ 1048577, 6, 10000, "1024.000977 k" },
{ 1048577, 6, 1000, "1.000001 M" },
{ 1048577, 6, 100, "1.000001 M" },
{ 1048577, 6, 10, "1.000001 M" },
{ 1048577, 6, 1, "1.000001 M" },
{ 10000000000000, 6, 10000, "9313.225746 G" },
{ 10000000000000, 6, 1000, "9.094947 T" },
{ 123456789123456789, 0, 1000, "110 P" },
{ 123456789123456789, 4, 1000, "109.6517 P" },
{ 123456789123456789, 7, 1000, "109.6516559 P" },
{ 1234567890123456789, 0, 1000, "1 E" },
{ 1234567890123456789, 1, 1000, "1.1 E" },
{ 1234567890123456789, 2, 1000, "1.07 E" },
{ 1234567890123456789, 3, 1000, "1.071 E" },
{ 1234567890123456789, 4, 1000, "1.0708 E" },
{ 1234567890123456789, 5, 1000, "1.07082 E" },
{ 1234567890123456789, 6, 1000, "1.070817 E" },
{ 1234567890123456789, 7, 1000, "1.0708170 E" },
{ 9444732965739290427U, 3, 1000, "8.192 E" },
{ 9444732965739290427U, 6, 1000, "8.192000 E" },
{ 18446744073709551615U, 2, 1000, "16.00 E" },
};
for (int i = 0; i < (int)ARRAY_SIZE(test_vectors); i++)
{
const char *result = fmt_order(test_vectors[i].value, test_vectors[i].decimals, test_vectors[i].threshold);
const char *expect = test_vectors[i].expect;
bt_assert_msg(strncmp(result, expect, strlen(expect)) == 0,
"case %d, result \"%s\", expect \"%s\"", i, result, expect);
}
return 1;
}
int
main(int argc, char *argv[])
{
@ -126,6 +188,7 @@ main(int argc, char *argv[])
bt_test_suite(t_router_id, "print router id");
bt_test_suite(t_time, "print time");
bt_test_suite(t_bstrcmp, "bstrcmp");
bt_test_suite(t_fmt_order, "fmt_order");
return bt_exit_value();
}

View File

@ -423,7 +423,7 @@ ea_set_attr_u32(ea_list **to, const struct ea_class *def, uint flags, u64 data)
{ ea_set_attr(to, EA_LITERAL_EMBEDDED(def, flags, data)); }
static inline void
ea_set_attr_ptr(ea_list **to, const struct ea_class *def, uint flags, const void *data)
ea_set_attr_ptr(ea_list **to, const struct ea_class *def, uint flags, void *data)
{ ea_set_attr(to, EA_LITERAL_STORE_PTR(def, flags, data)); }
static inline void

View File

@ -24,6 +24,8 @@ char *mb_sprintf(pool *p, const char *fmt, ...);
char *mb_vsprintf(pool *p, const char *fmt, va_list args);
char *lp_sprintf(linpool *p, const char *fmt, ...);
char *lp_vsprintf(linpool *p, const char *fmt, va_list args);
#define tmp_sprintf(...) lp_sprintf(tmp_linpool, __VA_ARGS__)
#define tmp_vsprintf(...) lp_vsprintf(tmp_linpool, __VA_ARGS__)
int buffer_vprint(buffer *buf, const char *fmt, va_list args);
int buffer_print(buffer *buf, const char *fmt, ...);
@ -33,6 +35,8 @@ u64 bstrtoul10(const char *str, char **end);
u64 bstrtoul16(const char *str, char **end);
byte bstrtobyte16(const char *str);
char *fmt_order(u64 value, uint decimals, u64 kb_threshold);
int bstrhextobin(const char *s, byte *b);
int bstrbintohex(const byte *b, size_t len, char *buf, size_t size, char delim) ACCESS_READ(1, 2) ACCESS_WRITE(3, 4);

View File

@ -24,7 +24,7 @@ union bval {
}; \
const struct adata *ptr; /* Generic attribute data inherited from eattrs */ \
const struct adata *ad; /* Generic attribute data inherited from filters */ \
const void * v_ptr; /* Stored pointer */ \
void * v_ptr; /* Stored pointer */ \
BVAL_ITEMS;
};

View File

@ -260,21 +260,21 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
hmap_set(&ts->channel_id_map, c->id);
/* The current channel state table may be too small */
if (c->id >= ts->length_channels)
if (c->id >= ts->channels_len)
{
ea_list **l = mb_allocz(ts->pool, sizeof(ea_list*) * ts->length_channels * 2);
memcpy(l, ts->channels, sizeof(ea_list*) * ts->length_channels);
mb_free(ts->channels);
ea_list **l = mb_allocz(ts->pool, sizeof(ea_list*) * ts->channels_len * 2);
memcpy(l, ts->channel_states, sizeof(ea_list*) * ts->channels_len);
mb_free(ts->channel_states);
ts->channels = l;
ts->length_channels = ts->length_channels * 2;
ts->channel_states = l;
ts->channels_len = ts->channels_len * 2;
}
ASSERT_DIE(c->id < ts->length_channels);
ASSERT_DIE(ts->channels[c->id] == NULL);
ASSERT_DIE(c->id < ts->channels_len);
ASSERT_DIE(ts->channel_states[c->id] == NULL);
/* Set the channel info */
ts->channels[c->id] = ea_lookup_slow(ca, 0, EALS_IN_TABLE);
ts->channel_states[c->id] = ea_lookup_slow(ca, 0, EALS_IN_TABLE);
}
/* Update channel list in protocol state */
@ -317,9 +317,9 @@ proto_remove_channel(struct proto *p UNUSED, struct channel *c)
*/
PST_LOCKED(ts)
{
ASSERT_DIE(c->id < ts->length_channels);
ea_free_later(ts->channels[c->id]);
ts->channels[c->id] = NULL;
ASSERT_DIE(c->id < ts->channels_len);
ea_free_later(ts->channel_states[c->id]);
ts->channel_states[c->id] = NULL;
hmap_clear(&ts->channel_id_map, c->id);
}
@ -1348,15 +1348,15 @@ proto_new(struct proto_config *cf)
p->id = hmap_first_zero(&tp->proto_id_map);
hmap_set(&tp->proto_id_map, p->id);
if (p->id >= tp->length_states)
if (p->id >= tp->proto_len)
{
/* Grow the states array */
ea_list **new_states = mb_allocz(tp->pool, sizeof *new_states * tp->length_states * 2);
memcpy(new_states, tp->states, tp->length_states * sizeof *new_states);
ea_list **new_states = mb_allocz(tp->pool, sizeof *new_states * tp->proto_len * 2);
memcpy(new_states, tp->proto_states, tp->proto_len * sizeof *new_states);
mb_free(tp->states);
tp->states = new_states;
tp->length_states *= 2;
mb_free(tp->proto_states);
tp->proto_states = new_states;
tp->proto_len *= 2;
}
}
@ -2140,15 +2140,19 @@ protos_build(void)
PST_LOCKED(ts)
{
ts->length_channels = 64;
ts->length_states = 32;
ts->channels_len = 64;
ts->proto_len = 32;
hmap_init(&ts->proto_id_map, p, ts->length_states); /* for proto ids. Value of proto id is the same as index of that proto in ptoto_state_table->attrs */
hmap_init(&ts->channel_id_map, p, ts->length_channels);
hmap_init(&ts->proto_id_map, p, ts->proto_len); /* for proto ids. Value of proto id is the same as index of that proto in ptoto_state_table->attrs */
hmap_init(&ts->channel_id_map, p, ts->channels_len);
/* Zeros should be reserved for easier undef manipulation */
hmap_set(&ts->proto_id_map, 0);
hmap_set(&ts->channel_id_map, 0);
ts->pool = p;
ts->states = mb_allocz(p, sizeof(ea_list *) * ts->length_states);
ts->channels = mb_allocz(p, sizeof(ea_list *) * ts->length_channels * 2);
ts->proto_states = mb_allocz(p, sizeof(ea_list *) * ts->proto_len);
ts->channel_states = mb_allocz(p, sizeof(ea_list *) * ts->channels_len * 2);
}
/* Init proto state journal */
@ -2939,7 +2943,7 @@ proto_journal_item_cleanup_(bool withdrawal, ea_list *old_attr)
{
u32 id = ea_get_int(old_attr, &ea_proto_id, 0);
ASSERT_DIE(id);
ASSERT_DIE(tp->states[id] == NULL);
ASSERT_DIE(tp->proto_states[id] == NULL);
hmap_clear(&tp->proto_id_map, id);
}
}
@ -2955,7 +2959,7 @@ proto_journal_item_cleanup(struct lfjour * journal UNUSED, struct lfjour_item *i
/*
* Protocol state announcement.
*
* The authoritative protocol state is always stored in ts->states[p->id]
* The authoritative protocol state is always stored in ts->proto_states[p->id]
* and it holds a reference. But sometimes it's too clumsy to announce all
* protocol changes happening in a fast succession, so there is a
* state-to-be-announced stored in the protocol itself, in p->ea_state.
@ -2992,7 +2996,7 @@ struct proto_announce_state_deferred {
struct proto *p;
};
void
static void
proto_announce_state_locked(struct proto_state_table_private* ts, struct proto *p, ea_list *new_state)
{
ASSERT_DIE(birdloop_inside(p->loop));
@ -3014,15 +3018,15 @@ proto_announce_state_locked(struct proto_state_table_private* ts, struct proto *
}
/* Then we check the public state */
ASSERT_DIE(p->id < ts->length_states);
ea_list *old_state = ts->states[p->id];
ASSERT_DIE(p->id < ts->proto_len);
ea_list *old_state = ts->proto_states[p->id];
/* Nothing has changed? */
if (p->ea_state == old_state)
return;
/* Set the new state */
ts->states[p->id] = p->ea_state ? ea_ref(p->ea_state) : NULL;
ts->proto_states[p->id] = p->ea_state ? ea_ref(p->ea_state) : NULL;
/* Announce the new state */
struct lfjour_item *li = lfjour_push_prepare(&proto_state_table_pub.journal);
@ -3094,9 +3098,9 @@ channel_get_state(int id)
{
PST_LOCKED(ts)
{
ASSERT_DIE((u32) id < ts->length_channels);
if (ts->channels[id])
return ea_ref_tmp(ts->channels[id]);
ASSERT_DIE((u32) id < ts->channels_len);
if (ts->channel_states[id])
return ea_ref_tmp(ts->channel_states[id]);
}
return NULL;
}
@ -3107,8 +3111,8 @@ proto_get_state(int id)
ea_list *eal;
PST_LOCKED(ts)
{
ASSERT_DIE((u32)id < ts->length_states);
eal = ts->states[id];
ASSERT_DIE((u32)id < ts->proto_len);
eal = ts->proto_states[id];
}
if (eal)
return ea_ref_tmp(eal);

View File

@ -412,10 +412,10 @@ struct proto_state_table_private {
PROTO_STATE_TABLE_PUBLIC;
};
struct proto_state_table_private **locked_at;
ea_list ** states;
ea_list ** channels;
u32 length_states;
u32 length_channels;
ea_list ** proto_states;
ea_list ** channel_states;
u32 proto_len;
u32 channels_len;
struct hmap proto_id_map;
struct hmap channel_id_map;
pool *pool;
@ -442,9 +442,7 @@ struct proto_pending_update {
ea_list *new, *old;
};
void proto_announce_state_locked(struct proto_state_table_private *ts, struct proto *p, ea_list *attr);
void proto_announce_state(struct proto *p, ea_list *attr);
void proto_announce_state_later_internal(struct proto *p, ea_list *attr);
#if 0
#define proto_announce_state_later(p, a) ( log(L_INFO "proto_announce_state_later(%s (%p), %p) at %s:%d", (p)->name, (p), (a), __FILE__, __LINE__), proto_announce_state_later_internal((p), (a)) )

View File

@ -1964,10 +1964,8 @@ rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, struct n
}
/* We need to add a spinlock sentinel to the beginning */
struct rte_storage local_sentinel = {
.flags = REF_OBSOLETE,
.next = old_best_stored,
};
_Thread_local static struct rte_storage local_sentinel = { .flags = REF_OBSOLETE, };
atomic_store_explicit(&local_sentinel.next, old_best_stored, memory_order_release);
atomic_store_explicit(&net->routes, &local_sentinel, memory_order_release);
/* Mark also the old route as obsolete. */

View File

@ -1318,6 +1318,16 @@ struct ea_class ea_bgp_as4_out_conn = {
.type = T_INT,
};
struct ea_class ea_bgp_extended_next_hop = {
.name = "bgp_extended_next_hop",
.type = T_INT,
};
struct ea_class ea_bgp_add_path_rx = {
.name = "bgp_add_path_rx",
.type = T_INT,
};
void
bgp_register_attrs(void)
{
@ -1340,6 +1350,7 @@ bgp_register_attrs(void)
EA_REGISTER_ALL(
&ea_bgp_rem_id, &ea_bgp_rem_as, &ea_bgp_loc_as, &ea_bgp_rem_ip, &ea_bgp_peer_type, &ea_bgp_afi,
&ea_bgp_extended_next_hop, &ea_bgp_add_path_rx,
&ea_bgp_in_conn_local_open_msg, &ea_bgp_out_conn_local_open_msg, &ea_bgp_in_conn_remote_open_msg,
&ea_bgp_out_conn_remote_open_msg, &ea_bgp_close_bmp, &ea_bgp_as4_session,
&ea_bgp_state_startup, &ea_bgp_in_conn_state, &ea_bgp_out_conn_state,

View File

@ -801,6 +801,24 @@ bgp_conn_enter_established_state(struct bgp_conn *conn)
c->c.ra_mode = RA_ACCEPTED;
else
c->c.ra_mode = RA_OPTIMAL;
ea_list *state = NULL;
if (c->ext_next_hop)
ea_set_attr_u32(&state, &ea_bgp_extended_next_hop, 0, 1);
if (c->add_path_rx)
ea_set_attr_u32(&state, &ea_bgp_add_path_rx, 0, 1);
if (state)
{
ea_list *sb = state;
while (sb->next)
sb = sb->next;
PST_LOCKED(ts) {
sb->next = ea_free_later(ts->channel_states[c->c.id]);
ts->channel_states[c->c.id] = ea_lookup_slow(state, 0, EALS_IN_TABLE);
}
}
}
p->afi_map = mb_alloc(p->p.pool, num * sizeof(u32));
@ -2025,9 +2043,9 @@ bgp_channel_init(struct channel *C, struct channel_config *CF)
PST_LOCKED(ts)
{
ea_list *eal = ea_free_later(ts->channels[c->c.id]);
ea_list *eal = ea_free_later(ts->channel_states[c->c.id]);
ea_set_attr(&eal, EA_LITERAL_EMBEDDED(&ea_bgp_afi, 0, c->afi));
ts->channels[c->c.id] = ea_lookup_slow(eal, 0, EALS_IN_TABLE);
ts->channel_states[c->c.id] = ea_lookup_slow(eal, 0, EALS_IN_TABLE);
}
}

View File

@ -508,7 +508,6 @@ struct bgp_export_state {
};
struct bgp_write_state {
struct bgp_proto *proto;
struct bgp_ptx_private *ptx;
struct linpool *pool;
@ -703,7 +702,7 @@ static inline struct bgp_proto *bgp_rte_proto(const rte *rte)
SKIP_BACK(struct bgp_proto, p.sources, rte->src->owner) : NULL;
}
byte * bgp_bmp_encode_rte(ea_list *c, struct bgp_proto *bgp_p, byte *buf, const struct rte *new);
byte * bgp_bmp_encode_rte(ea_list *channel_state, byte *buf, byte *end, const struct rte *new);
#define BGP_AIGP_METRIC 1
#define BGP_AIGP_MAX U64(0xffffffffffffffff)
@ -722,6 +721,7 @@ bgp_total_aigp_metric(const rte *e)
extern struct ea_class ea_bgp_rem_id, ea_bgp_rem_as, ea_bgp_loc_as, ea_bgp_rem_ip, ea_bgp_peer_type, ea_bgp_afi,
ea_bgp_in_conn_local_open_msg, ea_bgp_out_conn_local_open_msg, ea_bgp_in_conn_remote_open_msg,
ea_bgp_out_conn_remote_open_msg, ea_bgp_close_bmp, ea_bgp_close_bmp_set, ea_bgp_as4_session,
ea_bgp_extended_next_hop, ea_bgp_add_path_rx,
ea_bgp_state_startup, ea_bgp_in_conn_state, ea_bgp_out_conn_state,
ea_bgp_in_conn_sk, ea_bgp_out_conn_sk, ea_bgp_as4_out_conn, ea_bgp_as4_in_conn;

View File

@ -2480,34 +2480,27 @@ bgp_create_mp_unreach(struct bgp_write_state *s, struct bgp_bucket *buck, byte *
#ifdef CONFIG_BMP
static byte *
bgp_create_update_bmp(ea_list *channel_ea, struct bgp_proto *bgp_p, byte *buf, struct bgp_bucket *buck, bool update)
bgp_create_update_bmp(ea_list *channel_ea, byte *buf, byte *end, struct bgp_bucket *buck, bool update)
{
struct bgp_channel *c;
u32 c_id = ea_get_int(channel_ea, &ea_channel_id, 0);
BGP_WALK_CHANNELS(bgp_p, c)
if (c->c.id == c_id)
break;
byte *end = buf + (BGP_MAX_EXT_MSG_LENGTH - BGP_HEADER_LENGTH);
byte *res = NULL;
/* FIXME: must be a bit shorter */
struct bgp_caps *peer = bgp_p->conn->remote_caps;
const struct bgp_af_caps *rem = bgp_find_af_caps(peer, c->afi);
struct bgp_ptx_private ptx = {
.bmp = 1,
.c = c,
};
u32 afi = ea_get_int(channel_ea, &ea_bgp_afi, 0);
ASSERT_DIE(afi);
const struct bgp_af_desc *desc = bgp_get_af_desc(afi);
ASSERT_DIE(desc);
struct bgp_write_state s = {
.proto = bgp_p,
.ptx = &ptx,
.pool = tmp_linpool,
.mp_reach = (c->afi != BGP_AF_IPV4) || rem->ext_next_hop,
.mp_reach = (afi != BGP_AF_IPV4) || ea_get_int(channel_ea, &ea_bgp_extended_next_hop, 0),
.as4_session = 1,
.add_path = c->add_path_rx,
.mpls = c->desc->mpls,
.add_path = ea_get_int(channel_ea, &ea_bgp_add_path_rx, 0),
.mpls = desc->mpls,
.ignore_non_bgp_attrs = 1,
};
@ -2527,21 +2520,9 @@ bgp_create_update_bmp(ea_list *channel_ea, struct bgp_proto *bgp_p, byte *buf, s
return res;
}
static byte *
bgp_bmp_prepare_bgp_hdr(byte *buf, const u16 msg_size, const u8 msg_type)
{
memset(buf + BGP_MSG_HDR_MARKER_POS, 0xff, BGP_MSG_HDR_MARKER_SIZE);
put_u16(buf + BGP_MSG_HDR_LENGTH_POS, msg_size);
put_u8(buf + BGP_MSG_HDR_TYPE_POS, msg_type);
return buf + BGP_MSG_HDR_TYPE_POS + BGP_MSG_HDR_TYPE_SIZE;
}
byte *
bgp_bmp_encode_rte(ea_list *c, struct bgp_proto *bgp_p, byte *buf, const struct rte *new)
bgp_bmp_encode_rte(ea_list *c, byte *buf, byte *end, const struct rte *new)
{
byte *pkt = buf + BGP_HEADER_LENGTH;
uint ea_size = new->attrs ? (sizeof(ea_list) + new->attrs->count * sizeof(eattr)) : 0;
uint prefix_size = sizeof(struct bgp_prefix) + new->net->length;
@ -2562,10 +2543,7 @@ bgp_bmp_encode_rte(ea_list *c, struct bgp_proto *bgp_p, byte *buf, const struct
px->ni = NET_TO_INDEX(new->net);
add_tail(&b->prefixes, &px->buck_node);
byte *end = bgp_create_update_bmp(c, bgp_p, pkt, b, !!new->attrs);
if (end)
bgp_bmp_prepare_bgp_hdr(buf, end - buf, PKT_UPDATE);
end = bgp_create_update_bmp(c, buf, end, b, !!new->attrs);
lp_restore(tmp_linpool, tmpp);
@ -2594,7 +2572,6 @@ again:
/* Initialize write state */
struct bgp_write_state s = {
.proto = p,
.ptx = ptx,
.pool = tmp_linpool,
.mp_reach = (c->afi != BGP_AF_IPV4) || c->ext_next_hop,

View File

@ -1,6 +1,6 @@
src := bmp.c buffer.c map.c
src := bmp.c
obj := $(src-o-files)
$(all-daemon)
$(cf-local)
tests_objs := $(tests_objs) $(src-o-files)
tests_objs := $(tests_objs) $(src-o-files)

File diff suppressed because it is too large Load Diff

View File

@ -12,15 +12,26 @@
#include "nest/bird.h"
#include "nest/protocol.h"
#include "lib/lists.h"
#include "lib/tlists.h"
#include "nest/route.h"
#include "lib/event.h"
#include "lib/hash.h"
#include "lib/socket.h"
#include "proto/bmp/map.h"
#include "proto/bgp/bgp.h"
// Max length of MIB-II description object
#define MIB_II_STR_LEN 255
// Total size of Common Header
#define BMP_COMMON_HDR_SIZE 6
/* BMP Per-Peer Header [RFC 7854 - Section 4.2] */
// Total size of Per-Peer Header
#define BMP_PER_PEER_HDR_SIZE 42
// Maximum length of BMP message altogether
#define BMP_MSGBUF_LEN (BGP_MAX_EXT_MSG_LENGTH + BMP_PER_PEER_HDR_SIZE + BMP_COMMON_HDR_SIZE + 1)
// The following fields of this structure controls whether there will be put
// specific routes into Route Monitoring message and send to BMP collector
struct monitoring_rib {
@ -38,12 +49,9 @@ struct bmp_config {
u16 station_port; // Monitoring station TCP port
bool monitoring_rib_in_pre_policy; // Route monitoring pre-policy Adj-Rib-In
bool monitoring_rib_in_post_policy; // Route monitoring post-policy Adj-Rib-In
uint tx_pending_limit; // Maximum on pending TX buffer count
};
/* Forward declarations */
struct bgp_proto;
struct bmp_proto;
struct bmp_proto {
struct proto p; // Parent proto
const struct bmp_config *cf; // Shortcut to BMP configuration
@ -62,74 +70,71 @@ struct bmp_proto {
u16 station_port; // Monitoring station TCP port
struct monitoring_rib monitoring_rib;
// Below fields are for internal use
// struct bmp_peer_map bgp_peers; // Stores 'bgp_proto' structure per BGP peer
pool *buffer_mpool; // Memory pool used for BMP buffer allocations
pool *map_mem_pool; // Memory pool used for BMP map allocations
pool *tx_mem_pool; // Memory pool used for packet allocations designated to BMP collector
pool *update_msg_mem_pool; // Memory pool used for BPG UPDATE MSG allocations
list tx_queue; // Stores queued packets going to be sent
struct bmp_tx_buffer *tx_pending;// This buffer waits for socket to flush
struct bmp_tx_buffer *tx_last; // This buffer is the last to flush
uint tx_pending_count; // How many buffers waiting for flush
uint tx_pending_limit; // Maximum on buffer count
u64 tx_sent; // Amount of data sent
u64 tx_sent_total; // Amount of data sent accumulated over reconnections
event *tx_overflow_event; // Too many buffers waiting for flush
timer *connect_retry_timer; // Timer for retrying connection to the BMP collector
list update_msg_queue; // Stores all composed BGP UPDATE MSGs
bool started; // Flag that stores running status of BMP instance
int sock_err; // Last socket error code
struct lfjour_recipient proto_state_reader; // Reader of protocol states
event proto_state_changed;
};
struct bmp_peer {
ea_list *bgp;
struct bmp_peer *next;
list streams;
byte msgbuf[BMP_MSGBUF_LEN]; // Buffer for preparing the messages before sending them out
};
struct bmp_stream {
node n;
ea_list *bgp;
u32 key;
TLIST_NODE(bmp_peer_stream, struct bmp_stream) peer_node;
TLIST_NODE(bmp_table_stream, struct bmp_stream) table_node;
bool sync;
bool shutting_down;
struct bmp_stream *next;
struct bmp_table *table;
ea_list *sender;
int in_pre_policy;
struct bmp_stream_info {
u32 channel_id;
ea_list *channel_state;
const char *channel_name;
u32 afi;
enum bmp_stream_policy {
BMP_STREAM_PRE_POLICY = 1,
BMP_STREAM_POST_POLICY,
} mode;
} info;
};
#define TLIST_PREFIX bmp_peer_stream
#define TLIST_TYPE struct bmp_stream
#define TLIST_ITEM peer_node
#define TLIST_WANT_ADD_TAIL
#include "lib/tlists.h"
#define TLIST_PREFIX bmp_table_stream
#define TLIST_TYPE struct bmp_stream
#define TLIST_ITEM table_node
#define TLIST_WANT_ADD_TAIL
#include "lib/tlists.h"
struct bmp_peer {
struct bmp_peer *next;
struct bmp_peer_info {
u32 proto_id;
ea_list *proto_state;
const char *proto_name;
} info;
TLIST_LIST(bmp_peer_stream) streams;
};
struct bmp_table {
rtable *table;
struct bmp_table *next;
struct channel *channel;
struct rt_export_request out_req;
struct bmp_proto *p;
struct rt_export_feeder in_req;
rtable *table;
struct rt_export_request out_req;
event event;
atomic_int uc;
TLIST_LIST(bmp_table_stream) streams;
};
#ifdef CONFIG_BMP
/**
* bmp_peer_up - send notification that BGP peer connection is established
*/
void
bmp_peer_up(struct ea_list *bgp,
const byte *tx_open_msg, uint tx_open_length,
const byte *rx_open_msg, uint rx_open_length);
/**
* bmp_peer_down - send notification that BGP peer connection is not in
* established state
*/
void
bmp_peer_down(const struct bgp_proto *bgp, int err_class, int code, int subcode, const byte *data, int length);
#else /* BMP build disabled */
static inline void bmp_peer_up(struct bgp_proto *bgp UNUSED, const byte *tx_open_msg UNUSED, uint tx_open_length UNUSED, const byte *rx_open_msg UNUSED, uint rx_open_length UNUSED) { }
static inline void bmp_peer_down(const struct bgp_proto *bgp UNUSED, const int err_class UNUSED, int code UNUSED, int subcode UNUSED, const byte *data UNUSED, int length UNUSED) { }
#endif /* CONFIG_BMP */
#endif /* _BIRD_BMP_H_ */

View File

@ -1,65 +0,0 @@
/*
* BIRD -- The BGP Monitoring Protocol (BMP)
*
* (c) 2020 Akamai Technologies, Inc. (Pawel Maslanka, pmaslank@akamai.com)
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#include "proto/bmp/buffer.h"
buffer
bmp_buffer_alloc(pool *ppool, const size_t n)
{
buffer buf;
buf.start = mb_alloc(ppool, n);
buf.pos = buf.start;
buf.end = buf.start + n;
return buf;
}
void
bmp_buffer_free(buffer *buf)
{
mb_free(buf->start);
buf->start = buf->pos = buf->end = NULL;
}
/**
* @brief bmp_buffer_grow
* @param buf - buffer to grow
* @param n - required amount of available space
* Resize buffer in a way that there is at least @n bytes of available space.
*/
static void
bmp_buffer_grow(buffer *buf, const size_t n)
{
size_t pos = bmp_buffer_pos(buf);
size_t size = bmp_buffer_size(buf);
size_t req = pos + n;
while (size < req)
size = size * 3 / 2;
buf->start = mb_realloc(buf->start, size);
buf->pos = buf->start + pos;
buf->end = buf->start + size;
}
void
bmp_buffer_need(buffer *buf, const size_t n)
{
if (bmp_buffer_avail(buf) < n)
bmp_buffer_grow(buf, n);
}
void
bmp_put_data(buffer *buf, const void *src, const size_t n)
{
if (!n)
return;
bmp_buffer_need(buf, n);
memcpy(buf->pos, src, n);
buf->pos += n;
}

View File

@ -1,77 +0,0 @@
/*
* BIRD -- The BGP Monitoring Protocol (BMP)
*
* (c) 2020 Akamai Technologies, Inc. (Pawel Maslanka, pmaslank@akamai.com)
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#ifndef _BIRD_BMP_BUFFER_H_
#define _BIRD_BMP_BUFFER_H_
#include "proto/bmp/bmp.h"
#include <stdlib.h>
#include "lib/resource.h"
buffer
bmp_buffer_alloc(pool *ppool, const size_t n);
void
bmp_buffer_free(buffer *buf);
static inline void
bmp_buffer_flush(buffer *buf)
{
buf->pos = buf->start;
}
static inline size_t
bmp_buffer_size(const buffer *buf)
{
return buf->end - buf->start;
}
static inline size_t
bmp_buffer_avail(const buffer *buf)
{
return buf->end - buf->pos;
}
static inline size_t
bmp_buffer_pos(const buffer *buf)
{
return buf->pos - buf->start;
}
static inline byte *
bmp_buffer_data(const buffer *buf)
{
return buf->start;
}
void
bmp_buffer_need(buffer *buf, const size_t n);
// Idea for following macros has been taken from |proto/mrt/mrt.c|
#define BMP_DEFINE_PUT_FUNC(S, T) \
static inline void \
bmp_put_##S(buffer *b, const T x) \
{ \
bmp_buffer_need(b, sizeof(T)); \
put_##S(b->pos, x); \
b->pos += sizeof(T); \
}
BMP_DEFINE_PUT_FUNC(u8, u8)
BMP_DEFINE_PUT_FUNC(u16, u16)
BMP_DEFINE_PUT_FUNC(u32, u32)
BMP_DEFINE_PUT_FUNC(u64, u64)
BMP_DEFINE_PUT_FUNC(ip4, ip4_addr)
BMP_DEFINE_PUT_FUNC(ip6, ip6_addr)
void
bmp_put_data(buffer *buf, const void *src, const size_t n);
#endif /* _BIRD_BMP_BUFFER_H_ */

View File

@ -28,6 +28,7 @@ bmp_proto_start: proto_start BMP {
this_proto->loop_order = DOMAIN_ORDER(proto);
BMP_CFG->sys_descr = "Not defined";
BMP_CFG->sys_name = "Not defined";
BMP_CFG->tx_pending_limit = (1 << 30) / page_size;
}
;
@ -72,6 +73,10 @@ bmp_proto:
| bmp_proto MONITORING RIB IN POST_POLICY bool ';' {
BMP_CFG->monitoring_rib_in_post_policy = $6;
}
| bmp_proto TX BUFFER LIMIT expr ';' {
BMP_CFG->tx_pending_limit = $5 * (u64) (1 << 20) / page_size;
if ($5 < 1) cf_error("BMP TX buffer limit must be at least 1 megabyte");
}
;
CF_CODE

View File

@ -1,119 +0,0 @@
/*
* BIRD -- The BGP Monitoring Protocol (BMP)
*
* (c) 2020 Akamai Technologies, Inc. (Pawel Maslanka, pmaslank@akamai.com)
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
#include "proto/bmp/map.h"
/* Peer Index Table */
#define PEER_KEY(n) (n)->peer_as, (n)->peer_ip
#define PEER_NEXT(n) (n)->next
#define PEER_EQ(as1,ip1,as2,ip2) \
(as1) == (as2) && ipa_equal(ip1, ip2)
#define PEER_FN(as,ip) ipa_hash(ip)
#define PEER_REHASH bmp_peer_rehash
#define PEER_PARAMS /8, *2, 2, 2, 6, 20
HASH_DEFINE_REHASH_FN(PEER, struct bmp_peer_map_key)
#define PEER_INIT_ORDER 6
void
bmp_peer_map_init(struct bmp_peer_map *map, pool *mpool)
{
map->mpool = mpool;
HASH_INIT(map->peer_hash, map->mpool, PEER_INIT_ORDER);
}
struct bmp_peer_map_key
bmp_peer_map_key_create(const ip_addr peer_ip, const u32 peer_as)
{
struct bmp_peer_map_key key;
key.next = NULL;
key.peer_ip = peer_ip;
key.peer_as = peer_as;
return key;
}
void
bmp_peer_map_flush(struct bmp_peer_map *map)
{
struct bmp_peer_map_entry *entry;
HASH_WALK_DELSAFE(map->peer_hash, next, e)
{
entry = (struct bmp_peer_map_entry *) e;
mb_free(entry->data.buf);
HASH_DELETE(map->peer_hash, PEER, PEER_KEY(&entry->key));
mb_free(entry);
}
HASH_WALK_DELSAFE_END;
HASH_MAY_RESIZE_DOWN(map->peer_hash, PEER, map->mpool);
}
void
bmp_peer_map_free(struct bmp_peer_map *map)
{
bmp_peer_map_flush(map);
HASH_FREE(map->peer_hash);
}
void
bmp_peer_map_insert(struct bmp_peer_map *map, const struct bmp_peer_map_key key,
const byte *data, const size_t data_size)
{
struct bmp_peer_map_entry *entry
= (void *) HASH_FIND(map->peer_hash, PEER, PEER_KEY(&key));
if (entry)
{
mb_free(entry->data.buf);
entry->data.buf = mb_alloc(map->mpool, data_size);
memcpy(entry->data.buf, data, data_size);
entry->data.buf_size = data_size;
return;
}
entry = mb_alloc(map->mpool, sizeof (struct bmp_peer_map_entry));
entry->data.buf = mb_alloc(map->mpool, data_size);
memcpy(entry->data.buf, data, data_size);
entry->data.buf_size = data_size;
entry->key = key;
HASH_INSERT2(map->peer_hash, PEER, map->mpool, &entry->key);
}
void
bmp_peer_map_remove(struct bmp_peer_map *map, const struct bmp_peer_map_key key)
{
struct bmp_peer_map_entry *entry
= (void *) HASH_DELETE(map->peer_hash, PEER, PEER_KEY(&key));
if (!entry)
return;
mb_free(entry->data.buf);
mb_free(entry);
}
const struct bmp_peer_map_entry *
bmp_peer_map_get(struct bmp_peer_map *map, const struct bmp_peer_map_key key)
{
return (struct bmp_peer_map_entry *) HASH_FIND(map->peer_hash, PEER, PEER_KEY(&key));
}
void
bmp_peer_map_walk(const struct bmp_peer_map *map, bmp_peer_map_walk_action action, void *arg)
{
struct bmp_peer_map_entry *entry;
HASH_WALK_FILTER(map->peer_hash, next, e, _)
{
entry = (struct bmp_peer_map_entry *) e;
action(entry->key, entry->data.buf, entry->data.buf_size, arg);
}
HASH_WALK_FILTER_END;
}

View File

@ -1,68 +0,0 @@
/*
* BIRD -- The BGP Monitoring Protocol (BMP)
*
* (c) 2020 Akamai Technologies, Inc. (Pawel Maslanka, pmaslank@akamai.com)
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
/**
* This map implementation binds peer IP address as container key with custom data.
*/
#ifndef _BIRD_BMP_MAP_H_
#define _BIRD_BMP_MAP_H_
#include "nest/bird.h"
#include "lib/hash.h"
#include "lib/resource.h"
struct bmp_peer_map_key {
struct bmp_peer_map_key *next;
ip_addr peer_ip;
u32 peer_as;
};
struct bmp_peer_map_data {
void *buf;
size_t buf_size;
};
struct bmp_peer_map_entry {
struct bmp_peer_map_key key;
struct bmp_peer_map_data data;
};
struct bmp_peer_map {
pool *mpool; // Memory pool for peer entries in peer_hash
HASH(struct bmp_peer_map_key) peer_hash; // Hash for peers to find the index
};
void
bmp_peer_map_init(struct bmp_peer_map *map, pool *mpool);
struct bmp_peer_map_key
bmp_peer_map_key_create(const ip_addr peer_ip, const u32 peer_as);
void
bmp_peer_map_free(struct bmp_peer_map *map);
void
bmp_peer_map_flush(struct bmp_peer_map *map);
void
bmp_peer_map_insert(struct bmp_peer_map *map, const struct bmp_peer_map_key key,
const byte *data, const size_t data_size);
void
bmp_peer_map_remove(struct bmp_peer_map *map, const struct bmp_peer_map_key key);
const struct bmp_peer_map_entry *
bmp_peer_map_get(struct bmp_peer_map *map, const struct bmp_peer_map_key key);
typedef void (*bmp_peer_map_walk_action)(const struct bmp_peer_map_key key,
const byte *data, const size_t data_size, void *arg);
void
bmp_peer_map_walk(const struct bmp_peer_map *map, bmp_peer_map_walk_action action, void *arg);
#endif /* _BIRD_BMP_MAP_H_ */

View File

@ -335,9 +335,9 @@ mrt_peer_table_dump(struct mrt_table_dump_state *s)
#ifdef CONFIG_BGP
PST_LOCKED(ts)
{
for(u32 i = 0; i < ts->length_states; i++)
for(u32 i = 0; i < ts->proto_len; i++)
{
ea_list *eal = ts->states[i];
ea_list *eal = ts->proto_states[i];
if (eal)
ea_free_later(ea_ref(eal));
else