diff --git a/nest/proto.c b/nest/proto.c index 8566d297..cf6ab925 100644 --- a/nest/proto.c +++ b/nest/proto.c @@ -836,14 +836,18 @@ static void proto_schedule_flush_loop(void) { struct proto *p; + struct announce_hook *h; if (flush_loop_state) return; flush_loop_state = 1; - rt_schedule_prune_all(); WALK_LIST(p, flush_proto_list) + { p->flushing = 1; + for (h=p->ahooks; h; h=h->next) + h->table->prune_state = 1; + } ev_schedule(proto_flush_event); } diff --git a/nest/route.h b/nest/route.h index f515b6e6..b5dd7fb0 100644 --- a/nest/route.h +++ b/nest/route.h @@ -161,7 +161,7 @@ typedef struct rtable { int gc_counter; /* Number of operations since last GC */ bird_clock_t gc_time; /* Time of last GC */ byte gc_scheduled; /* GC is scheduled */ - byte prune_state; /* Table prune state, 1 -> prune is running */ + byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */ byte hcu_scheduled; /* Hostcache update is scheduled */ byte nhu_state; /* Next Hop Update state */ struct fib_iterator prune_fit; /* Rtable prune FIB iterator */ @@ -307,7 +307,6 @@ void rt_dump(rtable *); void rt_dump_all(void); int rt_feed_baby(struct proto *p); void rt_feed_baby_abort(struct proto *p); -void rt_schedule_prune_all(void); int rt_prune_loop(void); struct rtable_config *rt_new_table(struct symbol *s, int addr_type); static inline int rt_match(int rt, u32 rtlist) { return !rtlist || ((1 << rt) & rtlist); } diff --git a/nest/rt-table.c b/nest/rt-table.c index e3471ce9..2606fd14 100644 --- a/nest/rt-table.c +++ b/nest/rt-table.c @@ -1332,19 +1332,8 @@ rt_init(void) } -/* Called from proto_schedule_flush_loop() only, - ensuring that all prune states are zero */ -void -rt_schedule_prune_all(void) -{ - rtable *t; - - WALK_LIST(t, routing_tables) - t->prune_state = 1; -} - static inline int -rt_prune_step(rtable *tab, int *max_feed) +rt_prune_step(rtable *tab, int step, int *max_feed) { struct fib_iterator *fit = &tab->prune_fit; @@ -1370,8 +1359,8 @@ again: rescan: for (e=n->routes; e; e=e->next) - if (e->sender->proto->core_state != FS_HAPPY && - e->sender->proto->core_state != FS_FEEDING) + if (e->sender->proto->flushing || + (step && e->attrs->proto->flushing)) { if (*max_feed <= 0) { @@ -1379,6 +1368,10 @@ again: return 0; } + if (step) + log(L_WARN "Route %I/%d from %s still in %s after flush", + n->n.prefix, n->n.pxlen, e->attrs->proto->name, tab->name); + rte_discard(tab, e); (*max_feed)--; @@ -1403,23 +1396,42 @@ again: /** * rt_prune_loop - prune routing tables - * @tab: routing table to be pruned * * The prune loop scans routing tables and removes routes belonging to - * inactive protocols and also stale network entries. Returns 1 when + * flushing protocols and also stale network entries. Returns 1 when * all such routes are pruned. It is a part of the protocol flushing * loop. + * + * The prune loop runs in two steps. In the first step it prunes just + * the routes with flushing senders (in explicitly marked tables) so + * the route removal is propagated as usual. In the second step, all + * remaining relevant routes are removed. Ideally, there shouldn't be + * any, but it happens when pipe filters are changed. */ int rt_prune_loop(void) { - rtable *t; + static int step = 0; int max_feed = 512; + rtable *t; + again: WALK_LIST(t, routing_tables) - if (! rt_prune_step(t, &max_feed)) + if (! rt_prune_step(t, step, &max_feed)) return 0; + if (step == 0) + { + /* Prepare for the second step */ + WALK_LIST(t, routing_tables) + t->prune_state = 1; + + step = 1; + goto again; + } + + /* Done */ + step = 0; return 1; } diff --git a/proto/bgp/bgp.c b/proto/bgp/bgp.c index b1594b92..32153452 100644 --- a/proto/bgp/bgp.c +++ b/proto/bgp/bgp.c @@ -384,10 +384,12 @@ bgp_conn_enter_close_state(struct bgp_conn *conn) int os = conn->state; bgp_conn_set_state(conn, BS_CLOSE); - tm_stop(conn->hold_timer); tm_stop(conn->keepalive_timer); conn->sk->rx_hook = NULL; + /* Timeout for CLOSE state, if we cannot send notification soon then we just hangup */ + bgp_start_timer(conn->hold_timer, 10); + if (os == BS_ESTABLISHED) bgp_conn_leave_established_state(p); } @@ -478,9 +480,18 @@ static void bgp_hold_timeout(timer *t) { struct bgp_conn *conn = t->data; + struct bgp_proto *p = conn->bgp; DBG("BGP: Hold timeout\n"); + /* We are already closing the connection - just do hangup */ + if (conn->state == BS_CLOSE) + { + BGP_TRACE(D_EVENTS, "Connection stalled"); + bgp_conn_enter_idle_state(conn); + return; + } + /* If there is something in input queue, we are probably congested and perhaps just not processed BGP packets in time. */ diff --git a/proto/radv/packets.c b/proto/radv/packets.c index efaa69bb..b0af9266 100644 --- a/proto/radv/packets.c +++ b/proto/radv/packets.c @@ -395,7 +395,7 @@ static void radv_err_hook(sock *sk, int err) { struct radv_iface *ifa = sk->data; - log(L_ERR "%s: Socket error: %m", ifa->ra->p.name, err); + log(L_ERR "%s: Socket error on %s: %M", ifa->ra->p.name, ifa->iface->name, err); } int diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index 85b16b09..b448cc07 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -17,10 +17,10 @@ #include #include #include -#include #include #include #include +#include #include #include #include