0
0
mirror of https://gitlab.nic.cz/labs/bird.git synced 2024-12-22 17:51:53 +00:00

EAttr normalization rewritten to use bucket sort

The EAttr ID space is dense so we can just walk once, sweep the whole
input and go home.

There is a little bit of memory inefficiency in allocating always the
largest possible block, yet it isn't too bad.

There are also unit tests for this.
This commit is contained in:
Katerina Kubecova 2024-11-20 16:53:13 +01:00 committed by Maria Matejka
parent 6e940c259d
commit 4af3ee1f2f
3 changed files with 305 additions and 242 deletions

View File

@ -2,6 +2,6 @@ src := a-path.c a-set.c bitmap.c bitops.c blake2s.c blake2b.c checksum.c defer.c
obj := $(src-o-files) obj := $(src-o-files)
$(all-daemon) $(all-daemon)
tests_src := a-set_test.c a-path_test.c attribute_cleanup_test.c bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c checksum_test.c lists_test.c locking_test.c mac_test.c ip_test.c hash_test.c printf_test.c rcu_test.c slab_test.c tlists_test.c type_test.c tests_src := a-set_test.c a-path_test.c attribute_cleanup_test.c bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c rt-normalize_test.c checksum_test.c lists_test.c locking_test.c mac_test.c ip_test.c hash_test.c printf_test.c rcu_test.c slab_test.c tlists_test.c type_test.c
tests_targets := $(tests_targets) $(tests-target-files) tests_targets := $(tests_targets) $(tests-target-files)
tests_objs := $(tests_objs) $(src-o-files) tests_objs := $(tests_objs) $(src-o-files)

206
lib/rt-normalize_test.c Normal file
View File

@ -0,0 +1,206 @@
#include "test/birdtest.h"
#include "nest/route.h"
static _Bool
eattr_same_value2(const eattr *a, const eattr *b)
{
// this function comes from rt-attr.c
if (
a->id != b->id ||
a->flags != b->flags ||
a->type != b->type ||
a->undef != b->undef
)
return 0;
if (a->undef)
return 1;
if (a->type & EAF_EMBEDDED)
return a->u.data == b->u.data;
else
return adata_same(a->u.ptr, b->u.ptr);
}
void
init_ea_list(struct ea_list *eal, int count)
{
eal->flags = 0;
eal->count = count;
eal->next = NULL;
}
void
init_ea_with_3eattr(struct ea_list *eal)
{
init_ea_list(eal, 3);
eal->attrs[0] = EA_LITERAL_EMBEDDED(&ea_gen_preference, 0, 1234);
eal->attrs[1] = EA_LITERAL_EMBEDDED(&ea_gen_source, 0, 5678);
ip_addr dummy;
dummy.addr[0] = 123;
eal->attrs[2] = EA_LITERAL_STORE_ADATA(&ea_gen_from, 0, &dummy, sizeof(ip_addr));
eal->attrs[0].originated = 0;
eal->attrs[1].originated = 1;
}
static int
t_normalize_one_layer(void)
{
struct ea_list *eal = xmalloc(sizeof(struct ea_list) + 3 * sizeof(eattr));
init_ea_with_3eattr(eal);
struct ea_list *new_eal = ea_normalize(eal, 0);
eattr *result[] = {&eal->attrs[0], &eal->attrs[2], &eal->attrs[1]};
if (new_eal->count != 3)
return 0;
for(uint i = 0; i < new_eal->count; i++)
if (!(eattr_same_value2(&new_eal->attrs[i], result[i]) &&
new_eal->attrs[i].originated == result[i]->originated &&
new_eal->attrs[i].fresh == 0))
return 0;
if (new_eal->flags != EALF_SORTED)
return 0;
return 1;
}
static int
t_normalize_two_layers(void)
{
struct ea_list *eal1 = xmalloc(sizeof(struct ea_list) + 4 * sizeof(eattr));
struct ea_list *eal2 = xmalloc(sizeof(struct ea_list) + 5 * sizeof(eattr));
init_ea_with_3eattr(eal1);
struct nexthop_adata nhad = NEXTHOP_DEST_LITERAL(1357);
eal1->attrs[3] = EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nhad.ad);
eal1->attrs[3].originated = 1;
eal1->count++;
// ids are 4, 7, 6, 1 in this order
nhad = NEXTHOP_DEST_LITERAL(13);
eal2->attrs[0] = EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nhad.ad);
eal2->attrs[0].originated = 0;
eal2->attrs[1] = EA_LITERAL_EMBEDDED(&ea_gen_source, 0, 8765);
eal2->attrs[2] = EA_LITERAL_EMBEDDED(&ea_gen_igp_metric, 0, 45);
eal2->attrs[3] = EA_LITERAL_EMBEDDED(&ea_gen_mpls_policy, 0, 57);
eal2->attrs[3].originated = 0;
eal2->attrs[4] = EA_LITERAL_EMBEDDED(&ea_gen_preference, 0, 0);
eal2->attrs[4].undef = 1;
// ids are 1, 7, 5, 9, 4 in this order
eal2->count = 5;
eal2->next = eal1;
struct ea_list *new_eal = ea_normalize(eal2, 0);
if (new_eal->count != 5)
return 0;
eattr result[5];
result[0] = eal2->attrs[0]; // id 1
result[0].originated = 1;
result[1] = eal2->attrs[2]; // id 5, eattr with id 4 was undefed
result[2] = eal1->attrs[2]; // id 6
result[3] = eal2->attrs[1]; // id 7
result[3].originated = 1;
result[4] = eal2->attrs[3]; // id 9
for(uint i = 0; i < new_eal->count; i++)
if (!(eattr_same_value2(&new_eal->attrs[i], &result[i]) &&
new_eal->attrs[i].originated == result[i].originated &&
new_eal->attrs[i].fresh == 0))
return 0;
if (new_eal->flags != EALF_SORTED)
return 0;
return 1;
}
static int
normalize_two_leave_last(void)
{
struct ea_list *eal1 = xmalloc(sizeof(struct ea_list) + 4 * sizeof(eattr));
struct ea_list *eal2 = xmalloc(sizeof(struct ea_list) + 5 * sizeof(eattr));
struct ea_list *base = xmalloc(sizeof(struct ea_list) + 4 * sizeof(eattr));
struct nexthop_adata nhad = NEXTHOP_DEST_LITERAL(13);
base->attrs[0] = EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nhad.ad); // changes
base->attrs[0].originated = 0;
base->attrs[1] = EA_LITERAL_EMBEDDED(&ea_gen_source, 0, 8765); // remains
base->attrs[2] = EA_LITERAL_EMBEDDED(&ea_gen_mpls_policy, 0, 57); // will be set
base->attrs[2].originated = 0;
base->attrs[3].undef = 1;
base->attrs[3] = EA_LITERAL_EMBEDDED(&ea_gen_preference, 0, 0); // remains unset (set ad unset)
base->attrs[3].undef = 1;
struct nexthop_adata nnnhad = NEXTHOP_DEST_LITERAL(31);
eal1->attrs[0] = EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nnnhad.ad);
eal1->attrs[1] = EA_LITERAL_EMBEDDED(&ea_gen_source, 0, 8765);
eal1->attrs[2] = EA_LITERAL_EMBEDDED(&ea_gen_igp_metric, 0, 66);
eal1->attrs[3] = EA_LITERAL_EMBEDDED(&ea_gen_preference, 0, 36);
struct nexthop_adata nnhad = NEXTHOP_DEST_LITERAL(333);
eal2->attrs[0] = EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nnhad.ad);
eal2->attrs[1] = EA_LITERAL_EMBEDDED(&ea_gen_igp_metric, 0, 45);
eal2->attrs[1].undef = 1;
eal2->attrs[2] = EA_LITERAL_EMBEDDED(&ea_gen_mpls_policy, 0, 58);
eal2->attrs[3] = EA_LITERAL_EMBEDDED(&ea_gen_preference, 0, 0);
eal2->attrs[3].undef = 1;
ip_addr dummy;
dummy.addr[0] = 123;
eal2->attrs[4] = EA_LITERAL_STORE_ADATA(&ea_gen_from, 0, &dummy, sizeof(ip_addr));
eattr result[3];
result[0] = eal2->attrs[0]; // 1
result[1] = eal2->attrs[4]; // 6
result[2] = eal2->attrs[2]; // 9
base->count = 4;
base->next = NULL;
base->stored = EALS_CUSTOM;
eal1->count = 4;
eal1->next = base;
eal1->stored = 0;
eal2->count = 5;
eal2->next = eal1;
eal2->stored = 0;
struct ea_list *new_eal = ea_normalize(eal2, BIT32_ALL(EALS_CUSTOM));
for(uint i = 0; i < new_eal->count; i++)
log("two l %i ", new_eal->attrs[i].id);
if (new_eal->count != 3)
return 0;
return 1;
for(uint i = 0; i < new_eal->count; i++)
if (!(eattr_same_value2(&new_eal->attrs[i], &result[i]) &&
new_eal->attrs[i].originated == result[i].originated &&
new_eal->attrs[i].fresh == 0))
return 0;
if (new_eal->flags != EALF_SORTED)
return 0;
return 1;
}
int
main(int argc, char *argv[])
{
bt_init(argc, argv);
rta_init();
bt_test_suite(t_normalize_one_layer, "One layer normalization");
bt_test_suite(t_normalize_two_layers, "Two layers normalization");
bt_test_suite(normalize_two_leave_last, "Two layers normalization with base layer");
return bt_exit_value();
}

View File

@ -881,247 +881,6 @@ ea_walk(struct ea_walk_state *s, uint id, uint max)
return NULL; return NULL;
} }
static inline void
ea_do_sort(ea_list *e)
{
unsigned n = e->count;
eattr *a = e->attrs;
eattr *b = alloca(n * sizeof(eattr));
unsigned s, ss;
/* We need to use a stable sorting algorithm, hence mergesort */
do
{
s = ss = 0;
while (s < n)
{
eattr *p, *q, *lo, *hi;
p = b;
ss = s;
*p++ = a[s++];
while (s < n && p[-1].id <= a[s].id)
*p++ = a[s++];
if (s < n)
{
q = p;
*p++ = a[s++];
while (s < n && p[-1].id <= a[s].id)
*p++ = a[s++];
lo = b;
hi = q;
s = ss;
while (lo < q && hi < p)
if (lo->id <= hi->id)
a[s++] = *lo++;
else
a[s++] = *hi++;
while (lo < q)
a[s++] = *lo++;
while (hi < p)
a[s++] = *hi++;
}
}
}
while (ss);
}
static bool eattr_same_value(const eattr *a, const eattr *b);
/**
* In place discard duplicates and undefs in sorted ea_list. We use stable sort
* for this reason.
**/
static inline void
ea_do_prune(ea_list *e)
{
eattr *s, *d, *l, *s0;
int i = 0;
#if 0
debug("[[prune]] ");
ea_dump(e);
debug(" ----> ");
#endif
/* Prepare underlay stepper */
uint ulc = 0;
for (ea_list *u = e->next; u; u = u->next)
ulc++;
struct { eattr *cur, *end; } uls[ulc];
{
ea_list *u = e->next;
for (uint i = 0; i < ulc; i++)
{
ASSERT_DIE(u->flags & EALF_SORTED);
uls[i].cur = u->attrs;
uls[i].end = u->attrs + u->count;
u = u->next;
/* debug(" [[prev %d: %p to %p]] ", i, uls[i].cur, uls[i].end); */
}
}
s = d = e->attrs; /* Beginning of the list. @s is source, @d is destination. */
l = e->attrs + e->count; /* End of the list */
/* Walk from begin to end. */
while (s < l)
{
s0 = s++;
/* Find a consecutive block of the same attribute */
while (s < l && s->id == s[-1].id)
s++;
/* Now s0 is the most recent version, s[-1] the oldest one */
/* Find the attribute's underlay version */
eattr *prev = NULL;
for (uint i = 0; i < ulc; i++)
{
while ((uls[i].cur < uls[i].end) && (uls[i].cur->id < s0->id))
{
uls[i].cur++;
/* debug(" [[prev %d: %p (%s/%d)]] ", i, uls[i].cur, ea_class_global[uls[i].cur->id]->name, uls[i].cur->id); */
}
if ((uls[i].cur >= uls[i].end) || (uls[i].cur->id > s0->id))
continue;
prev = uls[i].cur;
break;
}
/* Drop identicals */
if (prev && eattr_same_value(s0, prev))
{
/* debug(" [[drop identical %s]] ", ea_class_global[s0->id]->name); */
continue;
}
/* Drop undefs (identical undefs already dropped before) */
if (!prev && s0->undef)
{
/* debug(" [[drop undef %s]] ", ea_class_global[s0->id]->name); */
continue;
}
/* Copy the newest version to destination */
*d = *s0;
/* Preserve info whether it originated locally */
d->originated = s[-1].originated;
/* Not fresh any more, we prefer surstroemming */
d->fresh = 0;
/* Next destination */
d++;
i++;
}
e->count = i;
}
/**
* ea_sort - sort an attribute list
* @e: list to be sorted
*
* This function takes a &ea_list chain and sorts the attributes
* within each of its entries.
*
* If an attribute occurs multiple times in a single &ea_list,
* ea_sort() leaves only the first (the only significant) occurrence.
*/
static void
ea_sort(ea_list *e)
{
if (!(e->flags & EALF_SORTED))
{
ea_do_sort(e);
ea_do_prune(e);
e->flags |= EALF_SORTED;
}
if (e->count > 5)
e->flags |= EALF_BISECT;
}
/**
* ea_scan - estimate attribute list size
* @e: attribute list
*
* This function calculates an upper bound of the size of
* a given &ea_list after merging with ea_merge().
*/
static unsigned
ea_scan(const ea_list *e, u32 upto)
{
unsigned cnt = 0;
while (e)
{
cnt += e->count;
e = e->next;
if (e && BIT32_TEST(&upto, e->stored))
break;
}
return sizeof(ea_list) + sizeof(eattr)*cnt;
}
/**
* ea_merge - merge segments of an attribute list
* @e: attribute list
* @t: buffer to store the result to
*
* This function takes a possibly multi-segment attribute list
* and merges all of its segments to one.
*
* The primary use of this function is for &ea_list normalization:
* first call ea_scan() to determine how much memory will the result
* take, then allocate a buffer (usually using alloca()), merge the
* segments with ea_merge() and finally sort and prune the result
* by calling ea_sort().
*/
static void
ea_merge(ea_list *e, ea_list *t, u32 upto)
{
eattr *d = t->attrs;
t->flags = 0;
t->count = 0;
while (e)
{
memcpy(d, e->attrs, sizeof(eattr)*e->count);
t->count += e->count;
d += e->count;
e = e->next;
if (e && BIT32_TEST(&upto, e->stored))
break;
}
t->next = e;
}
ea_list *
ea_normalize(ea_list *e, u32 upto)
{
#if 0
debug("(normalize)");
ea_dump(e);
debug(" ----> ");
#endif
ea_list *t = tmp_allocz(ea_scan(e, upto));
ea_merge(e, t, upto);
ea_sort(t);
#if 0
ea_dump(t);
debug("\n");
#endif
return t;
}
static bool static bool
eattr_same_value(const eattr *a, const eattr *b) eattr_same_value(const eattr *a, const eattr *b)
{ {
@ -1197,6 +956,104 @@ ea_list_size(ea_list *o)
return elen; return elen;
} }
/**
* ea_normalize - create a normalized version of attributes
* @e: input attributes
* @upto: bitmask of layers which should stay as an underlay
*
* This function squashes all updates done atop some ea_list
* and creates the final structure useful for storage or fast searching.
* The method is a bucket sort.
*
* Returns the final ea_list with some excess memory at the end,
* allocated from the tmp_linpool. The adata is linked from the original places.
*/
ea_list *
ea_normalize(ea_list *e, u32 upto)
{
/* We expect some work to be actually needed. */
ASSERT_DIE(!BIT32_TEST(&upto, e->stored));
/* Allocate the output */
ea_list *out = tmp_allocz(ea_class_max * sizeof(eattr) + sizeof(ea_list));
*out = (ea_list) {
.flags = EALF_SORTED,
};
uint min_id = ~0, max_id = 0;
eattr *buckets = out->attrs;
/* Walk the attribute lists, one after another. */
for (; e; e = e->next)
{
if (!out->next && BIT32_TEST(&upto, e->stored))
out->next = e;
for (int i = 0; i < e->count; i++)
{
uint id = e->attrs[i].id;
if (id > max_id)
max_id = id;
if (id < min_id)
min_id = id;
if (out->next)
{
/* Underlay: check whether the value is duplicate */
if (buckets[id].id && buckets[id].fresh)
if (eattr_same_value(&e->attrs[i], &buckets[id]))
/* Duplicate value treated as no change at all */
buckets[id] = (eattr) {};
else
/* This value is actually needed */
buckets[id].fresh = 0;
}
else
{
/* Overlay: not seen yet -> copy the eattr */
if (!buckets[id].id)
{
buckets[id] = e->attrs[i];
buckets[id].fresh = 1;
}
}
/* The originated information is relevant from the lowermost one */
buckets[id].originated = e->attrs[i].originated;
}
}
/* And now we just walk the list from beginning to end and collect
* everything to the beginning of the list.
* Walking just that part which is inhabited for sure. */
for (uint id = min_id; id <= max_id; id++)
{
/* Nothing to see for this ID */
if (!buckets[id].id)
continue;
/* Drop unnecessary undefs */
if (buckets[id].undef && buckets[id].fresh)
continue;
/* Now the freshness is lost, finally */
buckets[id].fresh = 0;
/* Move the attribute to the beginning */
ASSERT_DIE(out->count < id);
buckets[out->count++] = buckets[id];
}
/* We want to bisect only if the list is long enough */
if (out->count > 5)
out->flags |= EALF_BISECT;
return out;
}
void void
ea_list_copy(ea_list *n, ea_list *o, uint elen) ea_list_copy(ea_list *n, ea_list *o, uint elen)
{ {