mirror of
https://gitlab.nic.cz/labs/bird.git
synced 2024-11-14 15:18:44 +00:00
9efaf6bafe
The learnt routes are now pushed all into the connected table, not only the best one. This shouldn't do any damage in well managed setups, yet it should be noted that it is a change of behavior. If anybody misses a feature which they implemented by misusing this internal learn table, let us know, we'll consider implementing it in a better way.
2341 lines
56 KiB
C
2341 lines
56 KiB
C
/*
|
|
* BIRD -- Linux Netlink Interface
|
|
*
|
|
* (c) 1999--2000 Martin Mares <mj@ucw.cz>
|
|
*
|
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
|
*/
|
|
|
|
#include <alloca.h>
|
|
#include <stdio.h>
|
|
#include <unistd.h>
|
|
#include <fcntl.h>
|
|
#include <sys/socket.h>
|
|
#include <sys/uio.h>
|
|
#include <errno.h>
|
|
|
|
#undef LOCAL_DEBUG
|
|
|
|
#include "nest/bird.h"
|
|
#include "nest/rt.h"
|
|
#include "nest/protocol.h"
|
|
#include "nest/iface.h"
|
|
#include "lib/alloca.h"
|
|
#include "sysdep/unix/unix.h"
|
|
#include "sysdep/unix/krt.h"
|
|
#include "lib/socket.h"
|
|
#include "lib/string.h"
|
|
#include "lib/hash.h"
|
|
#include "lib/macro.h"
|
|
#include "conf/conf.h"
|
|
|
|
#include <asm/types.h>
|
|
#include <linux/if.h>
|
|
#include <linux/netlink.h>
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
#include <linux/lwtunnel.h>
|
|
#endif
|
|
|
|
#ifndef MSG_TRUNC /* Hack: Several versions of glibc miss this one :( */
|
|
#define MSG_TRUNC 0x20
|
|
#endif
|
|
|
|
#ifndef IFA_FLAGS
|
|
#define IFA_FLAGS 8
|
|
#endif
|
|
|
|
#ifndef IFF_LOWER_UP
|
|
#define IFF_LOWER_UP 0x10000
|
|
#endif
|
|
|
|
#ifndef RTA_TABLE
|
|
#define RTA_TABLE 15
|
|
#endif
|
|
|
|
#ifndef RTA_VIA
|
|
#define RTA_VIA 18
|
|
#endif
|
|
|
|
#ifndef RTA_NEWDST
|
|
#define RTA_NEWDST 19
|
|
#endif
|
|
|
|
#ifndef RTA_ENCAP_TYPE
|
|
#define RTA_ENCAP_TYPE 21
|
|
#endif
|
|
|
|
#ifndef RTA_ENCAP
|
|
#define RTA_ENCAP 22
|
|
#endif
|
|
|
|
#ifndef NETLINK_GET_STRICT_CHK
|
|
#define NETLINK_GET_STRICT_CHK 12
|
|
#endif
|
|
|
|
#define krt_ipv4(p) ((p)->af == AF_INET)
|
|
#define krt_ecmp6(p) ((p)->af == AF_INET6)
|
|
|
|
const int rt_default_ecmp = 16;
|
|
|
|
/*
|
|
* Structure nl_parse_state keeps state of received route processing. Ideally,
|
|
* we could just independently parse received Netlink messages and immediately
|
|
* propagate received routes to the rest of BIRD, but older Linux kernel (before
|
|
* version 4.11) represents and announces IPv6 ECMP routes not as one route with
|
|
* multiple next hops (like RTA_MULTIPATH in IPv4 ECMP), but as a sequence of
|
|
* routes with the same prefix. More recent kernels work as with IPv4.
|
|
*
|
|
* Therefore, BIRD keeps currently processed route in nl_parse_state structure
|
|
* and postpones its propagation until we expect it to be final; i.e., when
|
|
* non-matching route is received or when the scan ends. When another matching
|
|
* route is received, it is merged with the already processed route to form an
|
|
* ECMP route. Note that merging is done only for IPv6 (merge == 1), but the
|
|
* postponing is done in both cases (for simplicity). All IPv4 routes or IPv6
|
|
* routes with RTA_MULTIPATH set are just considered non-matching.
|
|
*
|
|
* This is ignored for asynchronous notifications (every notification is handled
|
|
* as a separate route). It is not an issue for our routes, as we ignore such
|
|
* notifications anyways. But importing alien IPv6 ECMP routes does not work
|
|
* properly with older kernels.
|
|
*
|
|
* Whatever the kernel version is, IPv6 ECMP routes are sent as multiple routes
|
|
* for the same prefix.
|
|
*/
|
|
|
|
struct nl_parse_state
|
|
{
|
|
struct linpool *pool;
|
|
int scan;
|
|
int merge;
|
|
|
|
net_addr *net;
|
|
ea_list *attrs;
|
|
struct krt_proto *proto;
|
|
s8 new;
|
|
s8 krt_src;
|
|
u8 krt_type;
|
|
u8 krt_proto;
|
|
u32 krt_metric;
|
|
|
|
u32 rta_flow; /* Used during parsing */
|
|
};
|
|
|
|
/*
|
|
* Netlink eattr definitions
|
|
*/
|
|
|
|
#define KRT_METRICS_MAX ARRAY_SIZE(ea_krt_metrics)
|
|
#define KRT_FEATURES_MAX 4
|
|
|
|
static void krt_bitfield_format(const eattr *e, byte *buf, uint buflen);
|
|
|
|
static struct ea_class
|
|
ea_krt_prefsrc = {
|
|
.name = "krt_prefsrc",
|
|
.type = T_IP,
|
|
},
|
|
ea_krt_realm = {
|
|
.name = "krt_realm",
|
|
.type = T_INT,
|
|
},
|
|
ea_krt_scope = {
|
|
.name = "krt_scope",
|
|
.type = T_INT,
|
|
};
|
|
|
|
static struct ea_class ea_krt_metrics[] = {
|
|
[RTAX_LOCK] = {
|
|
.name = "krt_lock",
|
|
.type = T_INT,
|
|
.format = krt_bitfield_format,
|
|
},
|
|
[RTAX_FEATURES] = {
|
|
.name = "krt_features",
|
|
.type = T_INT,
|
|
.format = krt_bitfield_format,
|
|
},
|
|
#define KRT_METRIC_INT(_rtax, _name) [_rtax] = { .name = _name, .type = T_INT }
|
|
KRT_METRIC_INT(RTAX_MTU, "krt_mtu"),
|
|
KRT_METRIC_INT(RTAX_WINDOW, "krt_window"),
|
|
KRT_METRIC_INT(RTAX_RTT, "krt_rtt"),
|
|
KRT_METRIC_INT(RTAX_RTTVAR, "krt_rttvar"),
|
|
KRT_METRIC_INT(RTAX_SSTHRESH, "krt_sstresh"),
|
|
KRT_METRIC_INT(RTAX_CWND, "krt_cwnd"),
|
|
KRT_METRIC_INT(RTAX_ADVMSS, "krt_advmss"),
|
|
KRT_METRIC_INT(RTAX_REORDERING, "krt_reordering"),
|
|
KRT_METRIC_INT(RTAX_HOPLIMIT, "krt_hoplimit"),
|
|
KRT_METRIC_INT(RTAX_INITCWND, "krt_initcwnd"),
|
|
KRT_METRIC_INT(RTAX_RTO_MIN, "krt_rto_min"),
|
|
KRT_METRIC_INT(RTAX_INITRWND, "krt_initrwnd"),
|
|
KRT_METRIC_INT(RTAX_QUICKACK, "krt_quickack"),
|
|
#undef KRT_METRIC_INT
|
|
};
|
|
|
|
static const char *krt_metrics_names[KRT_METRICS_MAX] = {
|
|
NULL, "lock", "mtu", "window", "rtt", "rttvar", "sstresh", "cwnd", "advmss",
|
|
"reordering", "hoplimit", "initcwnd", "features", "rto_min", "initrwnd", "quickack"
|
|
};
|
|
|
|
static const char *krt_features_names[KRT_FEATURES_MAX] = {
|
|
"ecn", NULL, NULL, "allfrag"
|
|
};
|
|
|
|
static void
|
|
krt_bitfield_format(const eattr *a, byte *buf, uint buflen)
|
|
{
|
|
if (a->id == ea_krt_metrics[RTAX_LOCK].id)
|
|
ea_format_bitfield(a, buf, buflen, krt_metrics_names, 2, KRT_METRICS_MAX);
|
|
else if (a->id == ea_krt_metrics[RTAX_FEATURES].id)
|
|
ea_format_bitfield(a, buf, buflen, krt_features_names, 0, KRT_FEATURES_MAX);
|
|
}
|
|
|
|
static void
|
|
nl_ea_register(void)
|
|
{
|
|
EA_REGISTER_ALL(
|
|
&ea_krt_prefsrc,
|
|
&ea_krt_realm,
|
|
&ea_krt_scope
|
|
);
|
|
|
|
for (uint i = 0; i < KRT_METRICS_MAX; i++)
|
|
{
|
|
if (!ea_krt_metrics[i].name)
|
|
ea_krt_metrics[i] = (struct ea_class) {
|
|
.name = mb_sprintf(&root_pool, "krt_metric_%d", i),
|
|
.type = T_INT,
|
|
};
|
|
|
|
ea_register_init(&ea_krt_metrics[i]);
|
|
}
|
|
|
|
for (uint i = 1; i < KRT_METRICS_MAX; i++)
|
|
ASSERT_DIE(ea_krt_metrics[i].id == ea_krt_metrics[0].id + i);
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
* Synchronous Netlink interface
|
|
*/
|
|
|
|
struct nl_sock
|
|
{
|
|
int fd;
|
|
u32 seq;
|
|
byte *rx_buffer; /* Receive buffer */
|
|
struct nlmsghdr *last_hdr; /* Recently received packet */
|
|
uint last_size;
|
|
};
|
|
|
|
#define NL_RX_SIZE 32768
|
|
|
|
#define NL_OP_DELETE 0
|
|
#define NL_OP_ADD (NLM_F_CREATE|NLM_F_EXCL)
|
|
#define NL_OP_REPLACE (NLM_F_CREATE|NLM_F_REPLACE)
|
|
#define NL_OP_APPEND (NLM_F_CREATE|NLM_F_APPEND)
|
|
|
|
static linpool *nl_linpool;
|
|
|
|
static struct nl_sock nl_scan = {.fd = -1}; /* Netlink socket for synchronous scan */
|
|
static struct nl_sock nl_req = {.fd = -1}; /* Netlink socket for requests */
|
|
|
|
static void
|
|
nl_open_sock(struct nl_sock *nl)
|
|
{
|
|
if (nl->fd < 0)
|
|
{
|
|
nl->fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
|
|
if (nl->fd < 0)
|
|
die("Unable to open rtnetlink socket: %m");
|
|
nl->seq = (u32) (current_time() TO_S); /* Or perhaps random_u32() ? */
|
|
nl->rx_buffer = xmalloc(NL_RX_SIZE);
|
|
nl->last_hdr = NULL;
|
|
nl->last_size = 0;
|
|
}
|
|
}
|
|
|
|
static void
|
|
nl_set_strict_dump(struct nl_sock *nl UNUSED, int strict UNUSED)
|
|
{
|
|
/*
|
|
* Strict checking is not necessary, it improves behavior on newer kernels.
|
|
* If it is not available (missing SOL_NETLINK compile-time, or ENOPROTOOPT
|
|
* run-time), we can just ignore it.
|
|
*/
|
|
#ifdef SOL_NETLINK
|
|
setsockopt(nl->fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, &strict, sizeof(strict));
|
|
#endif
|
|
}
|
|
|
|
static void
|
|
nl_set_rcvbuf(int fd, uint val)
|
|
{
|
|
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUFFORCE, &val, sizeof(val)) < 0)
|
|
log(L_WARN "KRT: Cannot set netlink rx buffer size to %u: %m", val);
|
|
}
|
|
|
|
static uint
|
|
nl_cfg_rx_buffer_size(struct config *cfg)
|
|
{
|
|
uint bufsize = 0;
|
|
|
|
struct proto_config *pc;
|
|
WALK_LIST(pc, cfg->protos)
|
|
if ((pc->protocol == &proto_unix_kernel) && !pc->disabled)
|
|
bufsize = MAX(bufsize, ((struct krt_config *) pc)->sys.netlink_rx_buffer);
|
|
|
|
return bufsize;
|
|
}
|
|
|
|
|
|
static void
|
|
nl_open(void)
|
|
{
|
|
nl_open_sock(&nl_scan);
|
|
nl_open_sock(&nl_req);
|
|
|
|
nl_set_strict_dump(&nl_scan, 1);
|
|
}
|
|
|
|
static void
|
|
nl_send(struct nl_sock *nl, struct nlmsghdr *nh)
|
|
{
|
|
struct sockaddr_nl sa;
|
|
|
|
memset(&sa, 0, sizeof(sa));
|
|
sa.nl_family = AF_NETLINK;
|
|
nh->nlmsg_pid = 0;
|
|
nh->nlmsg_seq = ++(nl->seq);
|
|
nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len);
|
|
if (sendto(nl->fd, nh, nh->nlmsg_len, 0, (struct sockaddr *)&sa, sizeof(sa)) < 0)
|
|
die("rtnetlink sendto: %m");
|
|
nl->last_hdr = NULL;
|
|
}
|
|
|
|
static void
|
|
nl_request_dump_link(void)
|
|
{
|
|
struct {
|
|
struct nlmsghdr nh;
|
|
struct ifinfomsg ifi;
|
|
} req = {
|
|
.nh.nlmsg_type = RTM_GETLINK,
|
|
.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
|
|
.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP,
|
|
.nh.nlmsg_seq = ++(nl_scan.seq),
|
|
.ifi.ifi_family = AF_UNSPEC,
|
|
};
|
|
|
|
send(nl_scan.fd, &req, sizeof(req), 0);
|
|
nl_scan.last_hdr = NULL;
|
|
}
|
|
|
|
static void
|
|
nl_request_dump_addr(int af)
|
|
{
|
|
struct {
|
|
struct nlmsghdr nh;
|
|
struct ifaddrmsg ifa;
|
|
} req = {
|
|
.nh.nlmsg_type = RTM_GETADDR,
|
|
.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
|
|
.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP,
|
|
.nh.nlmsg_seq = ++(nl_scan.seq),
|
|
.ifa.ifa_family = af,
|
|
};
|
|
|
|
send(nl_scan.fd, &req, sizeof(req), 0);
|
|
nl_scan.last_hdr = NULL;
|
|
}
|
|
|
|
static void
|
|
nl_request_dump_route(int af)
|
|
{
|
|
struct {
|
|
struct nlmsghdr nh;
|
|
struct rtmsg rtm;
|
|
} req = {
|
|
.nh.nlmsg_type = RTM_GETROUTE,
|
|
.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)),
|
|
.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP,
|
|
.nh.nlmsg_seq = ++(nl_scan.seq),
|
|
.rtm.rtm_family = af,
|
|
};
|
|
|
|
send(nl_scan.fd, &req, sizeof(req), 0);
|
|
nl_scan.last_hdr = NULL;
|
|
}
|
|
|
|
|
|
static struct nlmsghdr *
|
|
nl_get_reply(struct nl_sock *nl)
|
|
{
|
|
for(;;)
|
|
{
|
|
if (!nl->last_hdr)
|
|
{
|
|
struct iovec iov = { nl->rx_buffer, NL_RX_SIZE };
|
|
struct sockaddr_nl sa;
|
|
struct msghdr m = {
|
|
.msg_name = &sa,
|
|
.msg_namelen = sizeof(sa),
|
|
.msg_iov = &iov,
|
|
.msg_iovlen = 1,
|
|
};
|
|
int x = recvmsg(nl->fd, &m, 0);
|
|
if (x < 0)
|
|
die("nl_get_reply: %m");
|
|
if (sa.nl_pid) /* It isn't from the kernel */
|
|
{
|
|
DBG("Non-kernel packet\n");
|
|
continue;
|
|
}
|
|
nl->last_size = x;
|
|
nl->last_hdr = (void *) nl->rx_buffer;
|
|
if (m.msg_flags & MSG_TRUNC)
|
|
bug("nl_get_reply: got truncated reply which should be impossible");
|
|
}
|
|
if (NLMSG_OK(nl->last_hdr, nl->last_size))
|
|
{
|
|
struct nlmsghdr *h = nl->last_hdr;
|
|
nl->last_hdr = NLMSG_NEXT(h, nl->last_size);
|
|
if (h->nlmsg_seq != nl->seq)
|
|
{
|
|
log(L_WARN "nl_get_reply: Ignoring out of sequence netlink packet (%x != %x)",
|
|
h->nlmsg_seq, nl->seq);
|
|
continue;
|
|
}
|
|
return h;
|
|
}
|
|
if (nl->last_size)
|
|
log(L_WARN "nl_get_reply: Found packet remnant of size %d", nl->last_size);
|
|
nl->last_hdr = NULL;
|
|
}
|
|
}
|
|
|
|
static struct tbf rl_netlink_err = TBF_DEFAULT_LOG_LIMITS;
|
|
|
|
static int
|
|
nl_error(struct nlmsghdr *h, int ignore_esrch)
|
|
{
|
|
struct nlmsgerr *e;
|
|
int ec;
|
|
|
|
if (h->nlmsg_len < NLMSG_LENGTH(sizeof(struct nlmsgerr)))
|
|
{
|
|
log(L_WARN "Netlink: Truncated error message received");
|
|
return ENOBUFS;
|
|
}
|
|
e = (struct nlmsgerr *) NLMSG_DATA(h);
|
|
ec = -e->error;
|
|
if (ec && !(ignore_esrch && (ec == ESRCH)))
|
|
log_rl(&rl_netlink_err, L_WARN "Netlink: %s", strerror(ec));
|
|
return ec;
|
|
}
|
|
|
|
static struct nlmsghdr *
|
|
nl_get_scan(void)
|
|
{
|
|
struct nlmsghdr *h = nl_get_reply(&nl_scan);
|
|
|
|
if (h->nlmsg_type == NLMSG_DONE)
|
|
return NULL;
|
|
if (h->nlmsg_type == NLMSG_ERROR)
|
|
{
|
|
nl_error(h, 0);
|
|
return NULL;
|
|
}
|
|
return h;
|
|
}
|
|
|
|
static int
|
|
nl_exchange(struct nlmsghdr *pkt, int ignore_esrch)
|
|
{
|
|
struct nlmsghdr *h;
|
|
|
|
nl_send(&nl_req, pkt);
|
|
for(;;)
|
|
{
|
|
h = nl_get_reply(&nl_req);
|
|
if (h->nlmsg_type == NLMSG_ERROR)
|
|
break;
|
|
log(L_WARN "nl_exchange: Unexpected reply received");
|
|
}
|
|
return nl_error(h, ignore_esrch) ? -1 : 0;
|
|
}
|
|
|
|
/*
|
|
* Netlink attributes
|
|
*/
|
|
|
|
static int nl_attr_len;
|
|
|
|
static void *
|
|
nl_checkin(struct nlmsghdr *h, int lsize)
|
|
{
|
|
nl_attr_len = h->nlmsg_len - NLMSG_LENGTH(lsize);
|
|
if (nl_attr_len < 0)
|
|
{
|
|
log(L_ERR "nl_checkin: underrun by %d bytes", -nl_attr_len);
|
|
return NULL;
|
|
}
|
|
return NLMSG_DATA(h);
|
|
}
|
|
|
|
struct nl_want_attrs {
|
|
u8 defined:1;
|
|
u8 checksize:1;
|
|
u8 size;
|
|
};
|
|
|
|
|
|
#define BIRD_IFLA_MAX (IFLA_WIRELESS+1)
|
|
|
|
static struct nl_want_attrs ifla_attr_want[BIRD_IFLA_MAX] = {
|
|
[IFLA_IFNAME] = { 1, 0, 0 },
|
|
[IFLA_MTU] = { 1, 1, sizeof(u32) },
|
|
[IFLA_MASTER] = { 1, 1, sizeof(u32) },
|
|
[IFLA_WIRELESS] = { 1, 0, 0 },
|
|
};
|
|
|
|
|
|
#define BIRD_IFA_MAX (IFA_FLAGS+1)
|
|
|
|
static struct nl_want_attrs ifa_attr_want4[BIRD_IFA_MAX] = {
|
|
[IFA_ADDRESS] = { 1, 1, sizeof(ip4_addr) },
|
|
[IFA_LOCAL] = { 1, 1, sizeof(ip4_addr) },
|
|
[IFA_BROADCAST] = { 1, 1, sizeof(ip4_addr) },
|
|
[IFA_FLAGS] = { 1, 1, sizeof(u32) },
|
|
};
|
|
|
|
static struct nl_want_attrs ifa_attr_want6[BIRD_IFA_MAX] = {
|
|
[IFA_ADDRESS] = { 1, 1, sizeof(ip6_addr) },
|
|
[IFA_LOCAL] = { 1, 1, sizeof(ip6_addr) },
|
|
[IFA_FLAGS] = { 1, 1, sizeof(u32) },
|
|
};
|
|
|
|
|
|
#define BIRD_RTA_MAX (RTA_ENCAP+1)
|
|
|
|
static struct nl_want_attrs nexthop_attr_want4[BIRD_RTA_MAX] = {
|
|
[RTA_GATEWAY] = { 1, 1, sizeof(ip4_addr) },
|
|
[RTA_VIA] = { 1, 0, 0 },
|
|
[RTA_FLOW] = { 1, 1, sizeof(u32) },
|
|
[RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
|
|
[RTA_ENCAP] = { 1, 0, 0 },
|
|
};
|
|
|
|
static struct nl_want_attrs nexthop_attr_want6[BIRD_RTA_MAX] = {
|
|
[RTA_GATEWAY] = { 1, 1, sizeof(ip6_addr) },
|
|
[RTA_VIA] = { 1, 0, 0 },
|
|
[RTA_FLOW] = { 1, 1, sizeof(u32) },
|
|
[RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
|
|
[RTA_ENCAP] = { 1, 0, 0 },
|
|
};
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
static struct nl_want_attrs nexthop_attr_want_mpls[BIRD_RTA_MAX] = {
|
|
[RTA_VIA] = { 1, 0, 0 },
|
|
[RTA_NEWDST] = { 1, 0, 0 },
|
|
};
|
|
|
|
static struct nl_want_attrs encap_mpls_want[BIRD_RTA_MAX] = {
|
|
[RTA_DST] = { 1, 0, 0 },
|
|
};
|
|
#endif
|
|
|
|
static struct nl_want_attrs rtm_attr_want4[BIRD_RTA_MAX] = {
|
|
[RTA_DST] = { 1, 1, sizeof(ip4_addr) },
|
|
[RTA_OIF] = { 1, 1, sizeof(u32) },
|
|
[RTA_GATEWAY] = { 1, 1, sizeof(ip4_addr) },
|
|
[RTA_PRIORITY] = { 1, 1, sizeof(u32) },
|
|
[RTA_PREFSRC] = { 1, 1, sizeof(ip4_addr) },
|
|
[RTA_METRICS] = { 1, 0, 0 },
|
|
[RTA_MULTIPATH] = { 1, 0, 0 },
|
|
[RTA_FLOW] = { 1, 1, sizeof(u32) },
|
|
[RTA_TABLE] = { 1, 1, sizeof(u32) },
|
|
[RTA_VIA] = { 1, 0, 0 },
|
|
[RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
|
|
[RTA_ENCAP] = { 1, 0, 0 },
|
|
};
|
|
|
|
static struct nl_want_attrs rtm_attr_want6[BIRD_RTA_MAX] = {
|
|
[RTA_DST] = { 1, 1, sizeof(ip6_addr) },
|
|
[RTA_SRC] = { 1, 1, sizeof(ip6_addr) },
|
|
[RTA_IIF] = { 1, 1, sizeof(u32) },
|
|
[RTA_OIF] = { 1, 1, sizeof(u32) },
|
|
[RTA_GATEWAY] = { 1, 1, sizeof(ip6_addr) },
|
|
[RTA_PRIORITY] = { 1, 1, sizeof(u32) },
|
|
[RTA_PREFSRC] = { 1, 1, sizeof(ip6_addr) },
|
|
[RTA_METRICS] = { 1, 0, 0 },
|
|
[RTA_MULTIPATH] = { 1, 0, 0 },
|
|
[RTA_FLOW] = { 1, 1, sizeof(u32) },
|
|
[RTA_TABLE] = { 1, 1, sizeof(u32) },
|
|
[RTA_VIA] = { 1, 0, 0 },
|
|
[RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
|
|
[RTA_ENCAP] = { 1, 0, 0 },
|
|
};
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
static struct nl_want_attrs rtm_attr_want_mpls[BIRD_RTA_MAX] = {
|
|
[RTA_DST] = { 1, 1, sizeof(u32) },
|
|
[RTA_IIF] = { 1, 1, sizeof(u32) },
|
|
[RTA_OIF] = { 1, 1, sizeof(u32) },
|
|
[RTA_PRIORITY] = { 1, 1, sizeof(u32) },
|
|
[RTA_METRICS] = { 1, 0, 0 },
|
|
[RTA_MULTIPATH] = { 1, 0, 0 },
|
|
[RTA_FLOW] = { 1, 1, sizeof(u32) },
|
|
[RTA_TABLE] = { 1, 1, sizeof(u32) },
|
|
[RTA_VIA] = { 1, 0, 0 },
|
|
[RTA_NEWDST] = { 1, 0, 0 },
|
|
};
|
|
#endif
|
|
|
|
|
|
static int
|
|
nl_parse_attrs(struct rtattr *a, struct nl_want_attrs *want, struct rtattr **k, int ksize)
|
|
{
|
|
int max = ksize / sizeof(struct rtattr *);
|
|
bzero(k, ksize);
|
|
|
|
for ( ; RTA_OK(a, nl_attr_len); a = RTA_NEXT(a, nl_attr_len))
|
|
{
|
|
if ((a->rta_type >= max) || !want[a->rta_type].defined)
|
|
continue;
|
|
|
|
if (want[a->rta_type].checksize && (RTA_PAYLOAD(a) != want[a->rta_type].size))
|
|
{
|
|
log(L_ERR "nl_parse_attrs: Malformed attribute received");
|
|
return 0;
|
|
}
|
|
|
|
k[a->rta_type] = a;
|
|
}
|
|
|
|
if (nl_attr_len)
|
|
{
|
|
log(L_ERR "nl_parse_attrs: remnant of size %d", nl_attr_len);
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static inline u16 rta_get_u16(struct rtattr *a)
|
|
{ return *(u16 *) RTA_DATA(a); }
|
|
|
|
static inline u32 rta_get_u32(struct rtattr *a)
|
|
{ return *(u32 *) RTA_DATA(a); }
|
|
|
|
static inline ip4_addr rta_get_ip4(struct rtattr *a)
|
|
{ return ip4_ntoh(*(ip4_addr *) RTA_DATA(a)); }
|
|
|
|
static inline ip6_addr rta_get_ip6(struct rtattr *a)
|
|
{ return ip6_ntoh(*(ip6_addr *) RTA_DATA(a)); }
|
|
|
|
static inline ip_addr rta_get_ipa(struct rtattr *a)
|
|
{
|
|
if (RTA_PAYLOAD(a) == sizeof(ip4_addr))
|
|
return ipa_from_ip4(rta_get_ip4(a));
|
|
else
|
|
return ipa_from_ip6(rta_get_ip6(a));
|
|
}
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
static inline ip_addr rta_get_via(struct rtattr *a)
|
|
{
|
|
struct rtvia *v = RTA_DATA(a);
|
|
switch(v->rtvia_family) {
|
|
case AF_INET: return ipa_from_ip4(ip4_ntoh(*(ip4_addr *) v->rtvia_addr));
|
|
case AF_INET6: return ipa_from_ip6(ip6_ntoh(*(ip6_addr *) v->rtvia_addr));
|
|
}
|
|
return IPA_NONE;
|
|
}
|
|
|
|
static u32 rta_mpls_stack[MPLS_MAX_LABEL_STACK];
|
|
static inline int rta_get_mpls(struct rtattr *a, u32 *stack)
|
|
{
|
|
if (!a)
|
|
return 0;
|
|
|
|
if (RTA_PAYLOAD(a) % 4)
|
|
log(L_WARN "KRT: Strange length of received MPLS stack: %u", RTA_PAYLOAD(a));
|
|
|
|
int labels = mpls_get(RTA_DATA(a), RTA_PAYLOAD(a) & ~0x3, stack);
|
|
|
|
if (labels < 0)
|
|
{
|
|
log(L_WARN "KRT: Too long MPLS stack received, ignoring");
|
|
labels = 0;
|
|
}
|
|
|
|
return labels;
|
|
}
|
|
#endif
|
|
|
|
struct rtattr *
|
|
nl_add_attr(struct nlmsghdr *h, uint bufsize, uint code, const void *data, uint dlen)
|
|
{
|
|
uint pos = NLMSG_ALIGN(h->nlmsg_len);
|
|
uint len = RTA_LENGTH(dlen);
|
|
|
|
if (pos + len > bufsize)
|
|
bug("nl_add_attr: packet buffer overflow");
|
|
|
|
struct rtattr *a = (struct rtattr *)((char *)h + pos);
|
|
a->rta_type = code;
|
|
a->rta_len = len;
|
|
h->nlmsg_len = pos + len;
|
|
|
|
if (dlen > 0)
|
|
memcpy(RTA_DATA(a), data, dlen);
|
|
|
|
return a;
|
|
}
|
|
|
|
static inline struct rtattr *
|
|
nl_open_attr(struct nlmsghdr *h, uint bufsize, uint code)
|
|
{
|
|
return nl_add_attr(h, bufsize, code, NULL, 0);
|
|
}
|
|
|
|
static inline void
|
|
nl_close_attr(struct nlmsghdr *h, struct rtattr *a)
|
|
{
|
|
a->rta_len = (void *)h + NLMSG_ALIGN(h->nlmsg_len) - (void *)a;
|
|
}
|
|
|
|
static inline void
|
|
nl_add_attr_u16(struct nlmsghdr *h, uint bufsize, int code, u16 data)
|
|
{
|
|
nl_add_attr(h, bufsize, code, &data, 2);
|
|
}
|
|
|
|
static inline void
|
|
nl_add_attr_u32(struct nlmsghdr *h, uint bufsize, int code, u32 data)
|
|
{
|
|
nl_add_attr(h, bufsize, code, &data, 4);
|
|
}
|
|
|
|
static inline void
|
|
nl_add_attr_ip4(struct nlmsghdr *h, uint bufsize, int code, ip4_addr ip4)
|
|
{
|
|
ip4 = ip4_hton(ip4);
|
|
nl_add_attr(h, bufsize, code, &ip4, sizeof(ip4));
|
|
}
|
|
|
|
static inline void
|
|
nl_add_attr_ip6(struct nlmsghdr *h, uint bufsize, int code, ip6_addr ip6)
|
|
{
|
|
ip6 = ip6_hton(ip6);
|
|
nl_add_attr(h, bufsize, code, &ip6, sizeof(ip6));
|
|
}
|
|
|
|
static inline void
|
|
nl_add_attr_ipa(struct nlmsghdr *h, uint bufsize, int code, ip_addr ipa)
|
|
{
|
|
if (ipa_is_ip4(ipa))
|
|
nl_add_attr_ip4(h, bufsize, code, ipa_to_ip4(ipa));
|
|
else
|
|
nl_add_attr_ip6(h, bufsize, code, ipa_to_ip6(ipa));
|
|
}
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
static inline void
|
|
nl_add_attr_mpls(struct nlmsghdr *h, uint bufsize, int code, int len, u32 *stack)
|
|
{
|
|
char buf[len*4];
|
|
mpls_put(buf, len, stack);
|
|
nl_add_attr(h, bufsize, code, buf, len*4);
|
|
}
|
|
|
|
static inline void
|
|
nl_add_attr_mpls_encap(struct nlmsghdr *h, uint bufsize, int len, u32 *stack)
|
|
{
|
|
nl_add_attr_u16(h, bufsize, RTA_ENCAP_TYPE, LWTUNNEL_ENCAP_MPLS);
|
|
|
|
struct rtattr *nest = nl_open_attr(h, bufsize, RTA_ENCAP);
|
|
nl_add_attr_mpls(h, bufsize, RTA_DST, len, stack);
|
|
nl_close_attr(h, nest);
|
|
}
|
|
|
|
static inline void
|
|
nl_add_attr_via(struct nlmsghdr *h, uint bufsize, ip_addr ipa)
|
|
{
|
|
struct rtvia *via = alloca(sizeof(struct rtvia) + 16);
|
|
|
|
if (ipa_is_ip4(ipa))
|
|
{
|
|
via->rtvia_family = AF_INET;
|
|
put_ip4(via->rtvia_addr, ipa_to_ip4(ipa));
|
|
nl_add_attr(h, bufsize, RTA_VIA, via, sizeof(struct rtvia) + 4);
|
|
}
|
|
else
|
|
{
|
|
via->rtvia_family = AF_INET6;
|
|
put_ip6(via->rtvia_addr, ipa_to_ip6(ipa));
|
|
nl_add_attr(h, bufsize, RTA_VIA, via, sizeof(struct rtvia) + 16);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
static inline struct rtnexthop *
|
|
nl_open_nexthop(struct nlmsghdr *h, uint bufsize)
|
|
{
|
|
uint pos = NLMSG_ALIGN(h->nlmsg_len);
|
|
uint len = RTNH_LENGTH(0);
|
|
|
|
if (pos + len > bufsize)
|
|
bug("nl_open_nexthop: packet buffer overflow");
|
|
|
|
h->nlmsg_len = pos + len;
|
|
|
|
return (void *)h + pos;
|
|
}
|
|
|
|
static inline void
|
|
nl_close_nexthop(struct nlmsghdr *h, struct rtnexthop *nh)
|
|
{
|
|
nh->rtnh_len = (void *)h + NLMSG_ALIGN(h->nlmsg_len) - (void *)nh;
|
|
}
|
|
|
|
static inline void
|
|
nl_add_nexthop(struct nlmsghdr *h, uint bufsize, struct nexthop *nh, int af UNUSED)
|
|
{
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
if (nh->labels > 0)
|
|
if (af == AF_MPLS)
|
|
nl_add_attr_mpls(h, bufsize, RTA_NEWDST, nh->labels, nh->label);
|
|
else
|
|
nl_add_attr_mpls_encap(h, bufsize, nh->labels, nh->label);
|
|
|
|
if (ipa_nonzero(nh->gw))
|
|
{
|
|
if (af == (ipa_is_ip4(nh->gw) ? AF_INET : AF_INET6))
|
|
nl_add_attr_ipa(h, bufsize, RTA_GATEWAY, nh->gw);
|
|
else
|
|
nl_add_attr_via(h, bufsize, nh->gw);
|
|
}
|
|
#else
|
|
|
|
if (ipa_nonzero(nh->gw))
|
|
nl_add_attr_ipa(h, bufsize, RTA_GATEWAY, nh->gw);
|
|
#endif
|
|
}
|
|
|
|
static void
|
|
nl_add_multipath(struct nlmsghdr *h, uint bufsize, struct nexthop_adata *nhad, int af, ea_list *eattrs)
|
|
{
|
|
struct rtattr *a = nl_open_attr(h, bufsize, RTA_MULTIPATH);
|
|
eattr *flow = ea_find(eattrs, &ea_krt_realm);
|
|
|
|
NEXTHOP_WALK(nh, nhad)
|
|
{
|
|
struct rtnexthop *rtnh = nl_open_nexthop(h, bufsize);
|
|
|
|
rtnh->rtnh_flags = 0;
|
|
rtnh->rtnh_hops = nh->weight;
|
|
rtnh->rtnh_ifindex = nh->iface->index;
|
|
|
|
nl_add_nexthop(h, bufsize, nh, af);
|
|
|
|
if (nh->flags & RNF_ONLINK)
|
|
rtnh->rtnh_flags |= RTNH_F_ONLINK;
|
|
|
|
/* Our KRT_REALM is per-route, but kernel RTA_FLOW is per-nexthop.
|
|
Therefore, we need to attach the same attribute to each nexthop. */
|
|
if (flow)
|
|
nl_add_attr_u32(h, bufsize, RTA_FLOW, flow->u.data);
|
|
|
|
nl_close_nexthop(h, rtnh);
|
|
}
|
|
|
|
nl_close_attr(h, a);
|
|
}
|
|
|
|
static struct nexthop_adata *
|
|
nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, const net_addr *n, struct rtattr *ra, int af, int krt_src)
|
|
{
|
|
struct rtattr *a[BIRD_RTA_MAX];
|
|
struct rtnexthop *nh, *orig_nh = RTA_DATA(ra);
|
|
unsigned len, orig_len = RTA_PAYLOAD(ra);
|
|
uint cnt = 0;
|
|
|
|
/* First count the nexthops */
|
|
for (len = orig_len, nh = orig_nh; len; len -= NLMSG_ALIGN(nh->rtnh_len), nh = RTNH_NEXT(nh))
|
|
{
|
|
/* Use RTNH_OK(nh,len) ?? */
|
|
if ((len < sizeof(*nh)) || (len < nh->rtnh_len))
|
|
goto err;
|
|
|
|
if ((nh->rtnh_flags & RTNH_F_DEAD) && (krt_src != KRT_SRC_BIRD))
|
|
;
|
|
else
|
|
cnt++;
|
|
}
|
|
|
|
struct nexthop_adata *nhad = lp_allocz(s->pool, cnt * NEXTHOP_MAX_SIZE + sizeof *nhad);
|
|
struct nexthop *rv = &nhad->nh;
|
|
|
|
for (len = orig_len, nh = orig_nh; len; len -= NLMSG_ALIGN(nh->rtnh_len), nh = RTNH_NEXT(nh))
|
|
{
|
|
/* Use RTNH_OK(nh,len) ?? */
|
|
if ((len < sizeof(*nh)) || (len < nh->rtnh_len))
|
|
goto err;
|
|
|
|
if ((nh->rtnh_flags & RTNH_F_DEAD) && (krt_src != KRT_SRC_BIRD))
|
|
continue;
|
|
|
|
*rv = (struct nexthop) {
|
|
.weight = nh->rtnh_hops,
|
|
.iface = if_find_by_index(nh->rtnh_ifindex),
|
|
};
|
|
|
|
if (!rv->iface)
|
|
{
|
|
log(L_ERR "KRT: Received route %N with unknown ifindex %u", n, nh->rtnh_ifindex);
|
|
return NULL;
|
|
}
|
|
|
|
/* Nonexistent RTNH_PAYLOAD ?? */
|
|
nl_attr_len = nh->rtnh_len - RTNH_LENGTH(0);
|
|
switch (af)
|
|
{
|
|
case AF_INET:
|
|
if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want4, a, sizeof(a)))
|
|
goto err;
|
|
break;
|
|
|
|
case AF_INET6:
|
|
if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want6, a, sizeof(a)))
|
|
goto err;
|
|
break;
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
case AF_MPLS:
|
|
if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want_mpls, a, sizeof(a)))
|
|
goto err;
|
|
|
|
if (a[RTA_NEWDST])
|
|
rv->labels = rta_get_mpls(a[RTA_NEWDST], rv->label);
|
|
|
|
break;
|
|
#endif
|
|
|
|
default:
|
|
goto err;
|
|
}
|
|
|
|
if (a[RTA_GATEWAY])
|
|
rv->gw = rta_get_ipa(a[RTA_GATEWAY]);
|
|
|
|
if (a[RTA_FLOW])
|
|
s->rta_flow = rta_get_u32(a[RTA_FLOW]);
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
if (a[RTA_VIA])
|
|
rv->gw = rta_get_via(a[RTA_VIA]);
|
|
#endif
|
|
|
|
if (ipa_nonzero(rv->gw))
|
|
{
|
|
if (nh->rtnh_flags & RTNH_F_ONLINK)
|
|
rv->flags |= RNF_ONLINK;
|
|
|
|
neighbor *nbr;
|
|
nbr = neigh_find(&p->p, rv->gw, rv->iface,
|
|
(rv->flags & RNF_ONLINK) ? NEF_ONLINK : 0);
|
|
if (!nbr || (nbr->scope == SCOPE_HOST))
|
|
{
|
|
log(L_ERR "KRT: Received route %N with strange next-hop %I", n, rv->gw);
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
if (a[RTA_ENCAP] && a[RTA_ENCAP_TYPE])
|
|
{
|
|
if (rta_get_u16(a[RTA_ENCAP_TYPE]) != LWTUNNEL_ENCAP_MPLS)
|
|
{
|
|
log(L_WARN "KRT: Received route %N with unknown encapsulation method %d",
|
|
n, rta_get_u16(a[RTA_ENCAP_TYPE]));
|
|
return NULL;
|
|
}
|
|
|
|
struct rtattr *enca[BIRD_RTA_MAX];
|
|
nl_attr_len = RTA_PAYLOAD(a[RTA_ENCAP]);
|
|
nl_parse_attrs(RTA_DATA(a[RTA_ENCAP]), encap_mpls_want, enca, sizeof(enca));
|
|
rv->labels = rta_get_mpls(enca[RTA_DST], rv->label);
|
|
}
|
|
#endif
|
|
|
|
rv = NEXTHOP_NEXT(rv);
|
|
}
|
|
|
|
/* Store final length */
|
|
nhad->ad.length = (void *) rv - (void *) nhad->ad.data;
|
|
|
|
/* Ensure nexthops are sorted to satisfy nest invariant */
|
|
return nexthop_is_sorted(nhad) ? nhad : nexthop_sort(nhad, s->pool);
|
|
|
|
err:
|
|
log(L_ERR "KRT: Received strange multipath route %N", n);
|
|
return NULL;
|
|
}
|
|
|
|
static void
|
|
nl_add_metrics(struct nlmsghdr *h, uint bufsize, u32 *metrics, int max)
|
|
{
|
|
struct rtattr *a = nl_open_attr(h, bufsize, RTA_METRICS);
|
|
int t;
|
|
|
|
for (t = 1; t < max; t++)
|
|
if (metrics[0] & (1 << t))
|
|
nl_add_attr_u32(h, bufsize, t, metrics[t]);
|
|
|
|
nl_close_attr(h, a);
|
|
}
|
|
|
|
static int
|
|
nl_parse_metrics(struct rtattr *hdr, u32 *metrics, int max)
|
|
{
|
|
struct rtattr *a = RTA_DATA(hdr);
|
|
int len = RTA_PAYLOAD(hdr);
|
|
|
|
metrics[0] = 0;
|
|
for (; RTA_OK(a, len); a = RTA_NEXT(a, len))
|
|
{
|
|
if (a->rta_type == RTA_UNSPEC)
|
|
continue;
|
|
|
|
if (a->rta_type >= max)
|
|
continue;
|
|
|
|
if (RTA_PAYLOAD(a) != 4)
|
|
return -1;
|
|
|
|
metrics[0] |= 1 << a->rta_type;
|
|
metrics[a->rta_type] = rta_get_u32(a);
|
|
}
|
|
|
|
if (len > 0)
|
|
return -1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
/*
|
|
* Scanning of interfaces
|
|
*/
|
|
|
|
static void
|
|
nl_parse_link(struct nlmsghdr *h, int scan)
|
|
{
|
|
struct ifinfomsg *i;
|
|
struct rtattr *a[BIRD_IFLA_MAX];
|
|
int new = h->nlmsg_type == RTM_NEWLINK;
|
|
struct iface f = {};
|
|
struct iface *ifi;
|
|
char *name;
|
|
u32 mtu, master = 0;
|
|
uint fl;
|
|
|
|
if (!(i = nl_checkin(h, sizeof(*i))) || !nl_parse_attrs(IFLA_RTA(i), ifla_attr_want, a, sizeof(a)))
|
|
return;
|
|
if (!a[IFLA_IFNAME] || (RTA_PAYLOAD(a[IFLA_IFNAME]) < 2) || !a[IFLA_MTU])
|
|
{
|
|
/*
|
|
* IFLA_IFNAME and IFLA_MTU are required, in fact, but there may also come
|
|
* a message with IFLA_WIRELESS set, where (e.g.) no IFLA_IFNAME exists.
|
|
* We simply ignore all such messages with IFLA_WIRELESS without notice.
|
|
*/
|
|
|
|
if (a[IFLA_WIRELESS])
|
|
return;
|
|
|
|
log(L_ERR "KIF: Malformed message received");
|
|
return;
|
|
}
|
|
|
|
name = RTA_DATA(a[IFLA_IFNAME]);
|
|
mtu = rta_get_u32(a[IFLA_MTU]);
|
|
|
|
if (a[IFLA_MASTER])
|
|
master = rta_get_u32(a[IFLA_MASTER]);
|
|
|
|
ifi = if_find_by_index(i->ifi_index);
|
|
if (!new)
|
|
{
|
|
DBG("KIF: IF%d(%s) goes down\n", i->ifi_index, name);
|
|
if (!ifi)
|
|
return;
|
|
|
|
if_delete(ifi);
|
|
}
|
|
else
|
|
{
|
|
DBG("KIF: IF%d(%s) goes up (mtu=%d,flg=%x)\n", i->ifi_index, name, mtu, i->ifi_flags);
|
|
if (ifi && strncmp(ifi->name, name, sizeof(ifi->name)-1))
|
|
if_delete(ifi);
|
|
|
|
strncpy(f.name, name, sizeof(f.name)-1);
|
|
f.index = i->ifi_index;
|
|
f.mtu = mtu;
|
|
|
|
f.master_index = master;
|
|
f.master = if_find_by_index(master);
|
|
|
|
fl = i->ifi_flags;
|
|
if (fl & IFF_UP)
|
|
f.flags |= IF_ADMIN_UP;
|
|
if (fl & IFF_LOWER_UP)
|
|
f.flags |= IF_LINK_UP;
|
|
if (fl & IFF_LOOPBACK) /* Loopback */
|
|
f.flags |= IF_MULTIACCESS | IF_LOOPBACK | IF_IGNORE;
|
|
else if (fl & IFF_POINTOPOINT) /* PtP */
|
|
f.flags |= IF_MULTICAST;
|
|
else if (fl & IFF_BROADCAST) /* Broadcast */
|
|
f.flags |= IF_MULTIACCESS | IF_BROADCAST | IF_MULTICAST;
|
|
else
|
|
f.flags |= IF_MULTIACCESS; /* NBMA */
|
|
|
|
if (fl & IFF_MULTICAST)
|
|
f.flags |= IF_MULTICAST;
|
|
|
|
ifi = if_update(&f);
|
|
|
|
if (!scan)
|
|
if_end_partial_update(ifi);
|
|
}
|
|
}
|
|
|
|
static void
|
|
nl_parse_addr4(struct ifaddrmsg *i, int scan, int new)
|
|
{
|
|
struct rtattr *a[BIRD_IFA_MAX];
|
|
struct iface *ifi;
|
|
u32 ifa_flags;
|
|
int scope;
|
|
|
|
if (!nl_parse_attrs(IFA_RTA(i), ifa_attr_want4, a, sizeof(a)))
|
|
return;
|
|
|
|
if (!a[IFA_LOCAL])
|
|
{
|
|
log(L_ERR "KIF: Malformed message received (missing IFA_LOCAL)");
|
|
return;
|
|
}
|
|
if (!a[IFA_ADDRESS])
|
|
{
|
|
log(L_ERR "KIF: Malformed message received (missing IFA_ADDRESS)");
|
|
return;
|
|
}
|
|
|
|
ifi = if_find_by_index(i->ifa_index);
|
|
if (!ifi)
|
|
{
|
|
log(L_ERR "KIF: Received address message for unknown interface %d", i->ifa_index);
|
|
return;
|
|
}
|
|
|
|
if (a[IFA_FLAGS])
|
|
ifa_flags = rta_get_u32(a[IFA_FLAGS]);
|
|
else
|
|
ifa_flags = i->ifa_flags;
|
|
|
|
struct ifa ifa;
|
|
bzero(&ifa, sizeof(ifa));
|
|
ifa.iface = ifi;
|
|
if (ifa_flags & IFA_F_SECONDARY)
|
|
ifa.flags |= IA_SECONDARY;
|
|
|
|
ifa.ip = rta_get_ipa(a[IFA_LOCAL]);
|
|
|
|
if (i->ifa_prefixlen > IP4_MAX_PREFIX_LENGTH)
|
|
{
|
|
log(L_ERR "KIF: Invalid prefix length for interface %s: %d", ifi->name, i->ifa_prefixlen);
|
|
new = 0;
|
|
}
|
|
if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH)
|
|
{
|
|
ifa.brd = rta_get_ipa(a[IFA_ADDRESS]);
|
|
net_fill_ip4(&ifa.prefix, rta_get_ip4(a[IFA_ADDRESS]), i->ifa_prefixlen);
|
|
|
|
/* It is either a host address or a peer address */
|
|
if (ipa_equal(ifa.ip, ifa.brd))
|
|
ifa.flags |= IA_HOST;
|
|
else
|
|
{
|
|
ifa.flags |= IA_PEER;
|
|
ifa.opposite = ifa.brd;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
net_fill_ip4(&ifa.prefix, ipa_to_ip4(ifa.ip), i->ifa_prefixlen);
|
|
net_normalize(&ifa.prefix);
|
|
|
|
if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH - 1)
|
|
ifa.opposite = ipa_opposite_m1(ifa.ip);
|
|
|
|
if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH - 2)
|
|
ifa.opposite = ipa_opposite_m2(ifa.ip);
|
|
|
|
if (ifi->flags & IF_BROADCAST)
|
|
{
|
|
/* If kernel offers us a broadcast address, we trust it */
|
|
if (a[IFA_BROADCAST])
|
|
ifa.brd = ipa_from_ip4(rta_get_ip4(a[IFA_BROADCAST]));
|
|
/* Otherwise we create one (except for /31) */
|
|
else if (i->ifa_prefixlen < (IP4_MAX_PREFIX_LENGTH - 1))
|
|
ifa.brd = ipa_from_ip4(ip4_or(ipa_to_ip4(ifa.ip),
|
|
ip4_not(ip4_mkmask(i->ifa_prefixlen))));
|
|
}
|
|
}
|
|
|
|
scope = ipa_classify(ifa.ip);
|
|
if (scope < 0)
|
|
{
|
|
log(L_ERR "KIF: Invalid interface address %I for %s", ifa.ip, ifi->name);
|
|
return;
|
|
}
|
|
ifa.scope = scope & IADDR_SCOPE_MASK;
|
|
|
|
DBG("KIF: IF%d(%s): %s IPA %I, flg %x, net %N, brd %I, opp %I\n",
|
|
ifi->index, ifi->name,
|
|
new ? "added" : "removed",
|
|
ifa.ip, ifa.flags, &ifa.prefix, ifa.brd, ifa.opposite);
|
|
|
|
if (new)
|
|
ifa_update(&ifa);
|
|
else
|
|
ifa_delete(&ifa);
|
|
|
|
if (!scan)
|
|
if_end_partial_update(ifi);
|
|
}
|
|
|
|
static void
|
|
nl_parse_addr6(struct ifaddrmsg *i, int scan, int new)
|
|
{
|
|
struct rtattr *a[BIRD_IFA_MAX];
|
|
struct iface *ifi;
|
|
u32 ifa_flags;
|
|
int scope;
|
|
|
|
if (!nl_parse_attrs(IFA_RTA(i), ifa_attr_want6, a, sizeof(a)))
|
|
return;
|
|
|
|
if (!a[IFA_ADDRESS])
|
|
{
|
|
log(L_ERR "KIF: Malformed message received (missing IFA_ADDRESS)");
|
|
return;
|
|
}
|
|
|
|
ifi = if_find_by_index(i->ifa_index);
|
|
if (!ifi)
|
|
{
|
|
log(L_ERR "KIF: Received address message for unknown interface %d", i->ifa_index);
|
|
return;
|
|
}
|
|
|
|
if (a[IFA_FLAGS])
|
|
ifa_flags = rta_get_u32(a[IFA_FLAGS]);
|
|
else
|
|
ifa_flags = i->ifa_flags;
|
|
|
|
struct ifa ifa;
|
|
bzero(&ifa, sizeof(ifa));
|
|
ifa.iface = ifi;
|
|
if (ifa_flags & IFA_F_SECONDARY)
|
|
ifa.flags |= IA_SECONDARY;
|
|
|
|
/* Ignore tentative addresses silently */
|
|
if (ifa_flags & IFA_F_TENTATIVE)
|
|
return;
|
|
|
|
/* IFA_LOCAL can be unset for IPv6 interfaces */
|
|
ifa.ip = rta_get_ipa(a[IFA_LOCAL] ? : a[IFA_ADDRESS]);
|
|
|
|
if (i->ifa_prefixlen > IP6_MAX_PREFIX_LENGTH)
|
|
{
|
|
log(L_ERR "KIF: Invalid prefix length for interface %s: %d", ifi->name, i->ifa_prefixlen);
|
|
new = 0;
|
|
}
|
|
if (i->ifa_prefixlen == IP6_MAX_PREFIX_LENGTH)
|
|
{
|
|
ifa.brd = rta_get_ipa(a[IFA_ADDRESS]);
|
|
net_fill_ip6(&ifa.prefix, rta_get_ip6(a[IFA_ADDRESS]), i->ifa_prefixlen);
|
|
|
|
/* It is either a host address or a peer address */
|
|
if (ipa_equal(ifa.ip, ifa.brd))
|
|
ifa.flags |= IA_HOST;
|
|
else
|
|
{
|
|
ifa.flags |= IA_PEER;
|
|
ifa.opposite = ifa.brd;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
net_fill_ip6(&ifa.prefix, ipa_to_ip6(ifa.ip), i->ifa_prefixlen);
|
|
net_normalize(&ifa.prefix);
|
|
|
|
if (i->ifa_prefixlen == IP6_MAX_PREFIX_LENGTH - 1)
|
|
ifa.opposite = ipa_opposite_m1(ifa.ip);
|
|
}
|
|
|
|
scope = ipa_classify(ifa.ip);
|
|
if (scope < 0)
|
|
{
|
|
log(L_ERR "KIF: Invalid interface address %I for %s", ifa.ip, ifi->name);
|
|
return;
|
|
}
|
|
ifa.scope = scope & IADDR_SCOPE_MASK;
|
|
|
|
DBG("KIF: IF%d(%s): %s IPA %I, flg %x, net %N, brd %I, opp %I\n",
|
|
ifi->index, ifi->name,
|
|
new ? "added" : "removed",
|
|
ifa.ip, ifa.flags, &ifa.prefix, ifa.brd, ifa.opposite);
|
|
|
|
if (new)
|
|
ifa_update(&ifa);
|
|
else
|
|
ifa_delete(&ifa);
|
|
|
|
if (!scan)
|
|
if_end_partial_update(ifi);
|
|
}
|
|
|
|
static void
|
|
nl_parse_addr(struct nlmsghdr *h, int scan)
|
|
{
|
|
struct ifaddrmsg *i;
|
|
|
|
if (!(i = nl_checkin(h, sizeof(*i))))
|
|
return;
|
|
|
|
int new = (h->nlmsg_type == RTM_NEWADDR);
|
|
|
|
switch (i->ifa_family)
|
|
{
|
|
case AF_INET:
|
|
return nl_parse_addr4(i, scan, new);
|
|
|
|
case AF_INET6:
|
|
return nl_parse_addr6(i, scan, new);
|
|
}
|
|
}
|
|
|
|
void
|
|
kif_do_scan(struct kif_proto *p UNUSED)
|
|
{
|
|
struct nlmsghdr *h;
|
|
|
|
if_start_update();
|
|
|
|
nl_request_dump_link();
|
|
while (h = nl_get_scan())
|
|
if (h->nlmsg_type == RTM_NEWLINK || h->nlmsg_type == RTM_DELLINK)
|
|
nl_parse_link(h, 1);
|
|
else
|
|
log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type);
|
|
|
|
/* Re-resolve master interface for slaves */
|
|
struct iface *i;
|
|
WALK_LIST(i, iface_list)
|
|
if (i->master_index)
|
|
{
|
|
struct iface f = {
|
|
.flags = i->flags,
|
|
.mtu = i->mtu,
|
|
.index = i->index,
|
|
.master_index = i->master_index,
|
|
.master = if_find_by_index(i->master_index)
|
|
};
|
|
|
|
if (f.master != i->master)
|
|
{
|
|
memcpy(f.name, i->name, sizeof(f.name));
|
|
if_update(&f);
|
|
}
|
|
}
|
|
|
|
nl_request_dump_addr(AF_INET);
|
|
while (h = nl_get_scan())
|
|
if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR)
|
|
nl_parse_addr(h, 1);
|
|
else
|
|
log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type);
|
|
|
|
nl_request_dump_addr(AF_INET6);
|
|
while (h = nl_get_scan())
|
|
if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR)
|
|
nl_parse_addr(h, 1);
|
|
else
|
|
log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type);
|
|
|
|
if_end_update();
|
|
}
|
|
|
|
/*
|
|
* Routes
|
|
*/
|
|
|
|
static inline u32
|
|
krt_table_id(struct krt_proto *p)
|
|
{
|
|
return KRT_CF->sys.table_id;
|
|
}
|
|
|
|
static HASH(struct krt_proto) nl_table_map;
|
|
|
|
#define RTH_KEY(p) p->af, krt_table_id(p)
|
|
#define RTH_NEXT(p) p->sys.hash_next
|
|
#define RTH_EQ(a1,i1,a2,i2) a1 == a2 && i1 == i2
|
|
#define RTH_FN(a,i) a ^ u32_hash(i)
|
|
|
|
#define RTH_REHASH rth_rehash
|
|
#define RTH_PARAMS /8, *2, 2, 2, 6, 20
|
|
|
|
HASH_DEFINE_REHASH_FN(RTH, struct krt_proto)
|
|
|
|
int
|
|
krt_capable(rte *e)
|
|
{
|
|
eattr *ea = ea_find(e->attrs, &ea_gen_nexthop);
|
|
if (!ea)
|
|
return 0;
|
|
|
|
struct nexthop_adata *nhad = (void *) ea->u.ptr;
|
|
if (NEXTHOP_IS_REACHABLE(nhad))
|
|
return 1;
|
|
|
|
switch (nhad->dest)
|
|
{
|
|
case RTD_BLACKHOLE:
|
|
case RTD_UNREACHABLE:
|
|
case RTD_PROHIBIT:
|
|
return 1;
|
|
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static inline int
|
|
nh_bufsize(struct nexthop_adata *nhad)
|
|
{
|
|
int rv = 0;
|
|
NEXTHOP_WALK(nh, nhad)
|
|
rv += RTNH_LENGTH(RTA_LENGTH(sizeof(ip_addr)));
|
|
return rv;
|
|
}
|
|
|
|
static int
|
|
nl_send_route(struct krt_proto *p, const rte *e, int op, int dest, struct nexthop_adata *nh)
|
|
{
|
|
eattr *ea;
|
|
ea_list *eattrs = e->attrs;
|
|
|
|
int bufsize = 128 + KRT_METRICS_MAX*8 + (nh ? nh_bufsize(nh) : 0);
|
|
u32 priority = 0;
|
|
|
|
struct {
|
|
struct nlmsghdr h;
|
|
struct rtmsg r;
|
|
char buf[0];
|
|
} *r;
|
|
|
|
int rsize = sizeof(*r) + bufsize;
|
|
r = alloca(rsize);
|
|
|
|
DBG("nl_send_route(%N,op=%x)\n", e->net, op);
|
|
|
|
bzero(&r->h, sizeof(r->h));
|
|
bzero(&r->r, sizeof(r->r));
|
|
r->h.nlmsg_type = op ? RTM_NEWROUTE : RTM_DELROUTE;
|
|
r->h.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
|
|
r->h.nlmsg_flags = op | NLM_F_REQUEST | NLM_F_ACK;
|
|
|
|
r->r.rtm_family = p->af;
|
|
r->r.rtm_dst_len = net_pxlen(e->net);
|
|
r->r.rtm_protocol = RTPROT_BIRD;
|
|
r->r.rtm_scope = RT_SCOPE_NOWHERE;
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
if (p->af == AF_MPLS)
|
|
{
|
|
/*
|
|
* Kernel MPLS code is a bit picky. We must:
|
|
* 1) Always set RT_SCOPE_UNIVERSE and RTN_UNICAST (even for RTM_DELROUTE)
|
|
* 2) Never use RTA_PRIORITY
|
|
*/
|
|
|
|
u32 label = net_mpls(e->net);
|
|
nl_add_attr_mpls(&r->h, rsize, RTA_DST, 1, &label);
|
|
r->r.rtm_scope = RT_SCOPE_UNIVERSE;
|
|
r->r.rtm_type = RTN_UNICAST;
|
|
}
|
|
else
|
|
#endif
|
|
{
|
|
nl_add_attr_ipa(&r->h, rsize, RTA_DST, net_prefix(e->net));
|
|
|
|
/* Add source address for IPv6 SADR routes */
|
|
if (e->net->type == NET_IP6_SADR)
|
|
{
|
|
net_addr_ip6_sadr *a = (void *) &e->net;
|
|
nl_add_attr_ip6(&r->h, rsize, RTA_SRC, a->src_prefix);
|
|
r->r.rtm_src_len = a->src_pxlen;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Strange behavior for RTM_DELROUTE:
|
|
* 1) rtm_family is ignored in IPv6, works for IPv4
|
|
* 2) not setting RTA_PRIORITY is different from setting default value (on IPv6)
|
|
* 3) not setting RTA_PRIORITY is equivalent to setting 0, which is wildcard
|
|
*/
|
|
|
|
if (krt_table_id(p) < 256)
|
|
r->r.rtm_table = krt_table_id(p);
|
|
else
|
|
nl_add_attr_u32(&r->h, rsize, RTA_TABLE, krt_table_id(p));
|
|
|
|
if (p->af == AF_MPLS)
|
|
priority = 0;
|
|
else if (KRT_CF->sys.metric)
|
|
priority = KRT_CF->sys.metric;
|
|
else if ((op != NL_OP_DELETE) && (ea = ea_find(eattrs, &ea_krt_metric)))
|
|
priority = ea->u.data;
|
|
|
|
if (priority)
|
|
nl_add_attr_u32(&r->h, rsize, RTA_PRIORITY, priority);
|
|
|
|
/* For route delete, we do not specify remaining route attributes */
|
|
if (op == NL_OP_DELETE)
|
|
goto dest;
|
|
|
|
/* Default scope is LINK for device routes, UNIVERSE otherwise */
|
|
if (p->af == AF_MPLS)
|
|
r->r.rtm_scope = RT_SCOPE_UNIVERSE;
|
|
else if (ea = ea_find(eattrs, &ea_krt_scope))
|
|
r->r.rtm_scope = ea->u.data;
|
|
else
|
|
r->r.rtm_scope = (dest == RTD_UNICAST && ipa_zero(nh->nh.gw)) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE;
|
|
|
|
if (ea = ea_find(eattrs, &ea_krt_prefsrc))
|
|
nl_add_attr_ipa(&r->h, rsize, RTA_PREFSRC, *(ip_addr *)ea->u.ptr->data);
|
|
|
|
if (ea = ea_find(eattrs, &ea_krt_realm))
|
|
nl_add_attr_u32(&r->h, rsize, RTA_FLOW, ea->u.data);
|
|
|
|
|
|
u32 metrics[KRT_METRICS_MAX];
|
|
metrics[0] = 0;
|
|
|
|
struct ea_walk_state ews = { .eattrs = eattrs };
|
|
while (ea = ea_walk(&ews, ea_krt_metrics[0].id, KRT_METRICS_MAX))
|
|
{
|
|
int id = ea->id - ea_krt_metrics[0].id;
|
|
metrics[0] |= 1 << id;
|
|
metrics[id] = ea->u.data;
|
|
}
|
|
|
|
if (metrics[0])
|
|
nl_add_metrics(&r->h, rsize, metrics, KRT_METRICS_MAX);
|
|
|
|
|
|
dest:
|
|
switch (dest)
|
|
{
|
|
case RTD_UNICAST:
|
|
r->r.rtm_type = RTN_UNICAST;
|
|
if (!NEXTHOP_ONE(nh) && !krt_ecmp6(p))
|
|
nl_add_multipath(&r->h, rsize, nh, p->af, eattrs);
|
|
else
|
|
{
|
|
nl_add_attr_u32(&r->h, rsize, RTA_OIF, nh->nh.iface->index);
|
|
nl_add_nexthop(&r->h, rsize, &nh->nh, p->af);
|
|
|
|
if (nh->nh.flags & RNF_ONLINK)
|
|
r->r.rtm_flags |= RTNH_F_ONLINK;
|
|
}
|
|
break;
|
|
case RTD_BLACKHOLE:
|
|
r->r.rtm_type = RTN_BLACKHOLE;
|
|
break;
|
|
case RTD_UNREACHABLE:
|
|
r->r.rtm_type = RTN_UNREACHABLE;
|
|
break;
|
|
case RTD_PROHIBIT:
|
|
r->r.rtm_type = RTN_PROHIBIT;
|
|
break;
|
|
case RTD_NONE:
|
|
break;
|
|
default:
|
|
bug("krt_capable inconsistent with nl_send_route");
|
|
}
|
|
|
|
/* Ignore missing for DELETE */
|
|
return nl_exchange(&r->h, (op == NL_OP_DELETE));
|
|
}
|
|
|
|
static inline int
|
|
nl_add_rte(struct krt_proto *p, rte *e)
|
|
{
|
|
ea_list *ea = e->attrs;
|
|
int err = 0;
|
|
|
|
eattr *nhea = ea_find(ea, &ea_gen_nexthop);
|
|
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
|
|
|
|
if (krt_ecmp6(p) && nhad && NEXTHOP_IS_REACHABLE(nhad) && !NEXTHOP_ONE(nhad))
|
|
{
|
|
uint cnt = 0;
|
|
NEXTHOP_WALK(nh, nhad)
|
|
{
|
|
struct {
|
|
struct nexthop_adata nhad;
|
|
u32 labels[MPLS_MAX_LABEL_STACK];
|
|
} nhx;
|
|
memcpy(&nhx.nhad.nh, nh, NEXTHOP_SIZE(nh));
|
|
nhx.nhad.ad.length = (void *) NEXTHOP_NEXT(&nhx.nhad.nh) - (void *) nhx.nhad.ad.data;
|
|
|
|
if (!cnt++)
|
|
{
|
|
err = nl_send_route(p, e, NL_OP_ADD, RTD_UNICAST, &nhx.nhad);
|
|
if (err < 0)
|
|
return err;
|
|
}
|
|
else
|
|
err += nl_send_route(p, e, NL_OP_APPEND, RTD_UNICAST, &nhx.nhad);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
return nl_send_route(p, e, NL_OP_ADD,
|
|
NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest, nhad);
|
|
}
|
|
|
|
static inline int
|
|
nl_delete_rte(struct krt_proto *p, const rte *e)
|
|
{
|
|
int err = 0;
|
|
|
|
/* For IPv6, we just repeatedly request DELETE until we get error */
|
|
do
|
|
err = nl_send_route(p, e, NL_OP_DELETE, RTD_NONE, NULL);
|
|
while (krt_ecmp6(p) && !err);
|
|
|
|
return err;
|
|
}
|
|
|
|
static inline int
|
|
nl_replace_rte(struct krt_proto *p, rte *e)
|
|
{
|
|
eattr *nhea = ea_find(e->attrs, &ea_gen_nexthop);
|
|
struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
|
|
return nl_send_route(p, e, NL_OP_REPLACE,
|
|
NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest, nhad);
|
|
}
|
|
|
|
|
|
void
|
|
krt_replace_rte(struct krt_proto *p, const net_addr *n UNUSED, rte *new, const rte *old)
|
|
{
|
|
int err = 0;
|
|
|
|
/*
|
|
* We use NL_OP_REPLACE for IPv4, it has an issue with not checking for
|
|
* matching rtm_protocol, but that is OK when dedicated priority is used.
|
|
*
|
|
* We do not use NL_OP_REPLACE for IPv6, as it has broken semantics for ECMP
|
|
* and with some kernel versions ECMP replace crashes kernel. Would need more
|
|
* testing and checks for kernel versions.
|
|
*
|
|
* For IPv6, we use NL_OP_DELETE and then NL_OP_ADD. We also do not trust the
|
|
* old route value, so we do not try to optimize IPv6 ECMP reconfigurations.
|
|
*/
|
|
|
|
if (krt_ipv4(p) && old && new)
|
|
{
|
|
err = nl_replace_rte(p, new);
|
|
}
|
|
else
|
|
{
|
|
if (old)
|
|
nl_delete_rte(p, old);
|
|
|
|
if (new)
|
|
err = nl_add_rte(p, new);
|
|
}
|
|
|
|
if (new)
|
|
{
|
|
if (err < 0)
|
|
bmap_clear(&p->sync_map, new->id);
|
|
else
|
|
bmap_set(&p->sync_map, new->id);
|
|
}
|
|
}
|
|
|
|
static int
|
|
nl_mergable_route(struct nl_parse_state *s, const net_addr *net, struct krt_proto *p, uint priority, uint krt_type, uint rtm_family)
|
|
{
|
|
/* Route merging is used for IPv6 scans */
|
|
if (!s->scan || (rtm_family != AF_INET6))
|
|
return 0;
|
|
|
|
/* Saved and new route must have same network, proto/table, and priority */
|
|
if ((s->net != net) || (s->proto != p) || (s->krt_metric != priority))
|
|
return 0;
|
|
|
|
/* Both must be regular unicast routes */
|
|
if ((s->krt_type != RTN_UNICAST) || (krt_type != RTN_UNICAST))
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static void
|
|
nl_announce_route(struct nl_parse_state *s)
|
|
{
|
|
rte e0 = {
|
|
.attrs = s->attrs,
|
|
.net = s->net,
|
|
};
|
|
|
|
EA_LOCAL_LIST(2) ea = {
|
|
.l = { .count = 2, .next = e0.attrs },
|
|
.a = {
|
|
EA_LITERAL_EMBEDDED(&ea_krt_source, 0, s->krt_proto),
|
|
EA_LITERAL_EMBEDDED(&ea_krt_metric, 0, s->krt_metric),
|
|
},
|
|
};
|
|
|
|
e0.attrs = &ea.l;
|
|
|
|
if (s->scan)
|
|
krt_got_route(s->proto, &e0, s->krt_src);
|
|
else
|
|
krt_got_route_async(s->proto, &e0, s->new, s->krt_src);
|
|
|
|
s->net = NULL;
|
|
s->attrs = NULL;
|
|
s->proto = NULL;
|
|
lp_flush(s->pool);
|
|
}
|
|
|
|
static inline void
|
|
nl_parse_begin(struct nl_parse_state *s, int scan)
|
|
{
|
|
memset(s, 0, sizeof (struct nl_parse_state));
|
|
s->pool = nl_linpool;
|
|
s->scan = scan;
|
|
}
|
|
|
|
static inline void
|
|
nl_parse_end(struct nl_parse_state *s)
|
|
{
|
|
if (s->net)
|
|
nl_announce_route(s);
|
|
}
|
|
|
|
|
|
#define SKIP0(ARG, ...) do { DBG("KRT: Ignoring route - " ARG, ##__VA_ARGS__); return; } while(0)
|
|
#define SKIP(ARG, ...) do { DBG("KRT: Ignoring route %N - " ARG, &dst, ##__VA_ARGS__); return; } while(0)
|
|
|
|
static void
|
|
nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
|
|
{
|
|
struct krt_proto *p;
|
|
struct rtmsg *i;
|
|
struct rtattr *a[BIRD_RTA_MAX];
|
|
int new = h->nlmsg_type == RTM_NEWROUTE;
|
|
|
|
net_addr dst, src = {};
|
|
u32 oif = ~0;
|
|
u32 table_id;
|
|
u32 priority = 0;
|
|
u32 def_scope = RT_SCOPE_UNIVERSE;
|
|
int krt_src;
|
|
|
|
if (!(i = nl_checkin(h, sizeof(*i))))
|
|
return;
|
|
|
|
switch (i->rtm_family)
|
|
{
|
|
case AF_INET:
|
|
if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want4, a, sizeof(a)))
|
|
return;
|
|
|
|
if (a[RTA_DST])
|
|
net_fill_ip4(&dst, rta_get_ip4(a[RTA_DST]), i->rtm_dst_len);
|
|
else
|
|
net_fill_ip4(&dst, IP4_NONE, 0);
|
|
break;
|
|
|
|
case AF_INET6:
|
|
if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want6, a, sizeof(a)))
|
|
return;
|
|
|
|
if (a[RTA_DST])
|
|
net_fill_ip6(&dst, rta_get_ip6(a[RTA_DST]), i->rtm_dst_len);
|
|
else
|
|
net_fill_ip6(&dst, IP6_NONE, 0);
|
|
|
|
if (a[RTA_SRC])
|
|
net_fill_ip6(&src, rta_get_ip6(a[RTA_SRC]), i->rtm_src_len);
|
|
else
|
|
net_fill_ip6(&src, IP6_NONE, 0);
|
|
break;
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
case AF_MPLS:
|
|
if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want_mpls, a, sizeof(a)))
|
|
return;
|
|
|
|
if (!a[RTA_DST])
|
|
SKIP0("MPLS route without RTA_DST\n");
|
|
|
|
if (rta_get_mpls(a[RTA_DST], rta_mpls_stack) != 1)
|
|
SKIP0("MPLS route with multi-label RTA_DST\n");
|
|
|
|
net_fill_mpls(&dst, rta_mpls_stack[0]);
|
|
break;
|
|
#endif
|
|
|
|
default:
|
|
return;
|
|
}
|
|
|
|
if (a[RTA_OIF])
|
|
oif = rta_get_u32(a[RTA_OIF]);
|
|
|
|
if (a[RTA_TABLE])
|
|
table_id = rta_get_u32(a[RTA_TABLE]);
|
|
else
|
|
table_id = i->rtm_table;
|
|
|
|
if (i->rtm_flags & RTM_F_CLONED)
|
|
SKIP("cloned\n");
|
|
|
|
/* Do we know this table? */
|
|
p = HASH_FIND(nl_table_map, RTH, i->rtm_family, table_id);
|
|
if (!p)
|
|
SKIP("unknown table %u\n", table_id);
|
|
|
|
if (a[RTA_SRC] && (p->p.net_type != NET_IP6_SADR))
|
|
SKIP("src prefix for non-SADR channel\n");
|
|
|
|
if (a[RTA_IIF])
|
|
SKIP("IIF set\n");
|
|
|
|
if (i->rtm_tos != 0) /* We don't support TOS */
|
|
SKIP("TOS %02x\n", i->rtm_tos);
|
|
|
|
if (s->scan && !new)
|
|
SKIP("RTM_DELROUTE in scan\n");
|
|
|
|
if (a[RTA_PRIORITY])
|
|
priority = rta_get_u32(a[RTA_PRIORITY]);
|
|
|
|
int c = net_classify(&dst);
|
|
if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
|
|
SKIP("strange class/scope\n");
|
|
|
|
switch (i->rtm_protocol)
|
|
{
|
|
case RTPROT_UNSPEC:
|
|
SKIP("proto unspec\n");
|
|
|
|
case RTPROT_REDIRECT:
|
|
krt_src = KRT_SRC_REDIRECT;
|
|
break;
|
|
|
|
case RTPROT_KERNEL:
|
|
krt_src = KRT_SRC_KERNEL;
|
|
return;
|
|
|
|
case RTPROT_BIRD:
|
|
if (!s->scan)
|
|
SKIP("echo\n");
|
|
krt_src = KRT_SRC_BIRD;
|
|
break;
|
|
|
|
case RTPROT_BOOT:
|
|
default:
|
|
krt_src = KRT_SRC_ALIEN;
|
|
}
|
|
|
|
net_addr *net = &dst;
|
|
if (p->p.net_type == NET_IP6_SADR)
|
|
{
|
|
net = alloca(sizeof(net_addr_ip6_sadr));
|
|
net_fill_ip6_sadr(net, net6_prefix(&dst), net6_pxlen(&dst),
|
|
net6_prefix(&src), net6_pxlen(&src));
|
|
}
|
|
|
|
if (s->net && !nl_mergable_route(s, net, p, priority, i->rtm_type, i->rtm_family))
|
|
nl_announce_route(s);
|
|
|
|
ea_list *ra = NULL;
|
|
ea_set_attr_u32(&ra, &ea_gen_source, 0, RTS_INHERIT);
|
|
|
|
if (a[RTA_FLOW])
|
|
s->rta_flow = rta_get_u32(a[RTA_FLOW]);
|
|
else
|
|
s->rta_flow = 0;
|
|
|
|
union {
|
|
struct {
|
|
struct adata ad;
|
|
struct nexthop nh;
|
|
u32 labels[MPLS_MAX_LABEL_STACK];
|
|
};
|
|
struct nexthop_adata nhad;
|
|
} nhad = {};
|
|
|
|
switch (i->rtm_type)
|
|
{
|
|
case RTN_UNICAST:
|
|
if (a[RTA_MULTIPATH])
|
|
{
|
|
struct nexthop_adata *nh = nl_parse_multipath(s, p, net, a[RTA_MULTIPATH], i->rtm_family, krt_src);
|
|
if (!nh)
|
|
SKIP("strange RTA_MULTIPATH\n");
|
|
|
|
ea_set_attr(&ra, EA_LITERAL_DIRECT_ADATA(
|
|
&ea_gen_nexthop, 0, &nh->ad));
|
|
break;
|
|
}
|
|
|
|
if ((i->rtm_flags & RTNH_F_DEAD) && (krt_src != KRT_SRC_BIRD))
|
|
SKIP("ignore RTNH_F_DEAD\n");
|
|
|
|
nhad.nh.iface = if_find_by_index(oif);
|
|
if (!nhad.nh.iface)
|
|
{
|
|
log(L_ERR "KRT: Received route %N with unknown ifindex %u", net, oif);
|
|
return;
|
|
}
|
|
|
|
if (a[RTA_GATEWAY])
|
|
nhad.nh.gw = rta_get_ipa(a[RTA_GATEWAY]);
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
if (a[RTA_VIA])
|
|
nhad.nh.gw = rta_get_via(a[RTA_VIA]);
|
|
#endif
|
|
|
|
if (ipa_nonzero(nhad.nh.gw))
|
|
{
|
|
/* Silently skip strange 6to4 routes */
|
|
const net_addr_ip6 sit = NET_ADDR_IP6(IP6_NONE, 96);
|
|
if ((i->rtm_family == AF_INET6) && ipa_in_netX(nhad.nh.gw, (net_addr *) &sit))
|
|
return;
|
|
|
|
if (i->rtm_flags & RTNH_F_ONLINK)
|
|
nhad.nh.flags |= RNF_ONLINK;
|
|
|
|
neighbor *nbr;
|
|
nbr = neigh_find(&p->p, nhad.nh.gw, nhad.nh.iface,
|
|
(nhad.nh.flags & RNF_ONLINK) ? NEF_ONLINK : 0);
|
|
if (!nbr || (nbr->scope == SCOPE_HOST))
|
|
{
|
|
log(L_ERR "KRT: Received route %N with strange next-hop %I", net,
|
|
nhad.nh.gw);
|
|
return;
|
|
}
|
|
}
|
|
|
|
#ifdef HAVE_MPLS_KERNEL
|
|
if ((i->rtm_family == AF_MPLS) && a[RTA_NEWDST] && !a[RTA_MULTIPATH])
|
|
nhad.nh.labels = rta_get_mpls(a[RTA_NEWDST], nhad.nh.label);
|
|
|
|
if (a[RTA_ENCAP] && a[RTA_ENCAP_TYPE] && !a[RTA_MULTIPATH])
|
|
{
|
|
switch (rta_get_u16(a[RTA_ENCAP_TYPE]))
|
|
{
|
|
case LWTUNNEL_ENCAP_MPLS:
|
|
{
|
|
struct rtattr *enca[BIRD_RTA_MAX];
|
|
nl_attr_len = RTA_PAYLOAD(a[RTA_ENCAP]);
|
|
nl_parse_attrs(RTA_DATA(a[RTA_ENCAP]), encap_mpls_want, enca, sizeof(enca));
|
|
nhad.nh.labels = rta_get_mpls(enca[RTA_DST], nhad.nh.label);
|
|
break;
|
|
}
|
|
default:
|
|
SKIP("unknown encapsulation method %d\n", rta_get_u16(a[RTA_ENCAP_TYPE]));
|
|
break;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* Finalize the nexthop */
|
|
nhad.ad.length = (void *) NEXTHOP_NEXT(&nhad.nh) - (void *) nhad.ad.data;
|
|
break;
|
|
case RTN_BLACKHOLE:
|
|
nhad.nhad = NEXTHOP_DEST_LITERAL(RTD_BLACKHOLE);
|
|
break;
|
|
case RTN_UNREACHABLE:
|
|
nhad.nhad = NEXTHOP_DEST_LITERAL(RTD_UNREACHABLE);
|
|
break;
|
|
case RTN_PROHIBIT:
|
|
nhad.nhad = NEXTHOP_DEST_LITERAL(RTD_PROHIBIT);
|
|
break;
|
|
/* FIXME: What about RTN_THROW? */
|
|
default:
|
|
SKIP("type %d\n", i->rtm_type);
|
|
return;
|
|
}
|
|
|
|
if (i->rtm_scope != def_scope)
|
|
ea_set_attr(&ra,
|
|
EA_LITERAL_EMBEDDED(&ea_krt_scope, 0, i->rtm_scope));
|
|
|
|
if (a[RTA_PREFSRC])
|
|
{
|
|
ip_addr ps = rta_get_ipa(a[RTA_PREFSRC]);
|
|
|
|
ea_set_attr(&ra,
|
|
EA_LITERAL_STORE_ADATA(&ea_krt_prefsrc, 0, &ps, sizeof(ps)));
|
|
}
|
|
|
|
/* Can be set per-route or per-nexthop */
|
|
if (s->rta_flow)
|
|
ea_set_attr(&ra,
|
|
EA_LITERAL_EMBEDDED(&ea_krt_realm, 0, s->rta_flow));
|
|
|
|
if (a[RTA_METRICS])
|
|
{
|
|
u32 metrics[KRT_METRICS_MAX];
|
|
if (nl_parse_metrics(a[RTA_METRICS], metrics, ARRAY_SIZE(metrics)) < 0)
|
|
{
|
|
log(L_ERR "KRT: Received route %N with strange RTA_METRICS attribute", net);
|
|
return;
|
|
}
|
|
|
|
for (uint t = 1; t < KRT_METRICS_MAX; t++)
|
|
if (metrics[0] & (1 << t))
|
|
ea_set_attr(&ra,
|
|
EA_LITERAL_EMBEDDED(&ea_krt_metrics[t], 0, metrics[t]));
|
|
}
|
|
|
|
/*
|
|
* Ideally, now we would send the received route to the rest of kernel code.
|
|
* But IPv6 ECMP routes before 4.11 are sent as a sequence of routes, so we
|
|
* postpone it and merge next hops until the end of the sequence. Note that
|
|
* when doing merging of next hops, we expect the new route to be unipath.
|
|
* Otherwise, we ignore additional next hops in nexthop_insert().
|
|
*/
|
|
|
|
if (!s->net)
|
|
{
|
|
/* Store the new route */
|
|
s->net = lp_alloc(s->pool, net->length);
|
|
net_copy(s->net, net);
|
|
|
|
ea_set_attr_data(&ra, &ea_gen_nexthop, 0,
|
|
nhad.ad.data, nhad.ad.length);
|
|
|
|
s->attrs = ra;
|
|
s->proto = p;
|
|
s->new = new;
|
|
s->krt_src = krt_src;
|
|
s->krt_type = i->rtm_type;
|
|
s->krt_proto = i->rtm_protocol;
|
|
s->krt_metric = priority;
|
|
}
|
|
else
|
|
{
|
|
/* Merge next hops with the stored route */
|
|
eattr *nhea = ea_find(s->attrs, &ea_gen_nexthop);
|
|
struct nexthop_adata *nhad_old = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
|
|
|
|
if (nhad_old)
|
|
ea_set_attr(&s->attrs,
|
|
EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0,
|
|
&(nexthop_merge(nhad_old, &nhad.nhad,
|
|
KRT_CF->merge_paths, s->pool)->ad)
|
|
));
|
|
else
|
|
ea_set_attr_data(&s->attrs, &ea_gen_nexthop, 0,
|
|
nhad.ad.data, nhad.ad.length);
|
|
}
|
|
}
|
|
|
|
void
|
|
krt_do_scan(struct krt_proto *p UNUSED) /* CONFIG_ALL_TABLES_AT_ONCE => p is NULL */
|
|
{
|
|
struct nlmsghdr *h;
|
|
struct nl_parse_state s;
|
|
|
|
nl_parse_begin(&s, 1);
|
|
nl_request_dump_route(AF_UNSPEC);
|
|
while (h = nl_get_scan())
|
|
if (h->nlmsg_type == RTM_NEWROUTE || h->nlmsg_type == RTM_DELROUTE)
|
|
nl_parse_route(&s, h);
|
|
else
|
|
log(L_DEBUG "nl_scan_fire: Unknown packet received (type=%d)", h->nlmsg_type);
|
|
nl_parse_end(&s);
|
|
}
|
|
|
|
/*
|
|
* Asynchronous Netlink interface
|
|
*/
|
|
|
|
static sock *nl_async_sk; /* BIRD socket for asynchronous notifications */
|
|
static byte *nl_async_rx_buffer; /* Receive buffer */
|
|
static uint nl_async_bufsize; /* Kernel rx buffer size for the netlink socket */
|
|
static struct config *nl_last_config; /* For tracking changes to nl_async_bufsize */
|
|
|
|
static void
|
|
nl_async_msg(struct nlmsghdr *h)
|
|
{
|
|
struct nl_parse_state s;
|
|
|
|
switch (h->nlmsg_type)
|
|
{
|
|
case RTM_NEWROUTE:
|
|
case RTM_DELROUTE:
|
|
DBG("KRT: Received async route notification (%d)\n", h->nlmsg_type);
|
|
nl_parse_begin(&s, 0);
|
|
nl_parse_route(&s, h);
|
|
nl_parse_end(&s);
|
|
break;
|
|
case RTM_NEWLINK:
|
|
case RTM_DELLINK:
|
|
DBG("KRT: Received async link notification (%d)\n", h->nlmsg_type);
|
|
if (kif_proto)
|
|
nl_parse_link(h, 0);
|
|
break;
|
|
case RTM_NEWADDR:
|
|
case RTM_DELADDR:
|
|
DBG("KRT: Received async address notification (%d)\n", h->nlmsg_type);
|
|
if (kif_proto)
|
|
nl_parse_addr(h, 0);
|
|
break;
|
|
default:
|
|
DBG("KRT: Received unknown async notification (%d)\n", h->nlmsg_type);
|
|
}
|
|
}
|
|
|
|
static int
|
|
nl_async_hook(sock *sk, uint size UNUSED)
|
|
{
|
|
struct iovec iov = { nl_async_rx_buffer, NL_RX_SIZE };
|
|
struct sockaddr_nl sa;
|
|
struct msghdr m = {
|
|
.msg_name = &sa,
|
|
.msg_namelen = sizeof(sa),
|
|
.msg_iov = &iov,
|
|
.msg_iovlen = 1,
|
|
};
|
|
struct nlmsghdr *h;
|
|
int x;
|
|
uint len;
|
|
|
|
x = recvmsg(sk->fd, &m, 0);
|
|
if (x < 0)
|
|
{
|
|
if (errno == ENOBUFS)
|
|
{
|
|
/*
|
|
* Netlink reports some packets have been thrown away.
|
|
* One day we might react to it by asking for route table
|
|
* scan in near future.
|
|
*/
|
|
log(L_WARN "Kernel dropped some netlink messages, will resync on next scan.");
|
|
return 1; /* More data are likely to be ready */
|
|
}
|
|
else if (errno != EWOULDBLOCK)
|
|
log(L_ERR "Netlink recvmsg: %m");
|
|
return 0;
|
|
}
|
|
if (sa.nl_pid) /* It isn't from the kernel */
|
|
{
|
|
DBG("Non-kernel packet\n");
|
|
return 1;
|
|
}
|
|
h = (void *) nl_async_rx_buffer;
|
|
len = x;
|
|
if (m.msg_flags & MSG_TRUNC)
|
|
{
|
|
log(L_WARN "Netlink got truncated asynchronous message");
|
|
return 1;
|
|
}
|
|
while (NLMSG_OK(h, len))
|
|
{
|
|
nl_async_msg(h);
|
|
h = NLMSG_NEXT(h, len);
|
|
}
|
|
if (len)
|
|
log(L_WARN "nl_async_hook: Found packet remnant of size %d", len);
|
|
return 1;
|
|
}
|
|
|
|
static void
|
|
nl_async_err_hook(sock *sk, int e UNUSED)
|
|
{
|
|
nl_async_hook(sk, 0);
|
|
}
|
|
|
|
static void
|
|
nl_open_async(void)
|
|
{
|
|
sock *sk;
|
|
struct sockaddr_nl sa;
|
|
int fd;
|
|
|
|
if (nl_async_sk)
|
|
return;
|
|
|
|
DBG("KRT: Opening async netlink socket\n");
|
|
|
|
fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
|
|
if (fd < 0)
|
|
{
|
|
log(L_ERR "Unable to open asynchronous rtnetlink socket: %m");
|
|
return;
|
|
}
|
|
|
|
bzero(&sa, sizeof(sa));
|
|
sa.nl_family = AF_NETLINK;
|
|
sa.nl_groups = RTMGRP_LINK |
|
|
RTMGRP_IPV4_IFADDR | RTMGRP_IPV4_ROUTE |
|
|
RTMGRP_IPV6_IFADDR | RTMGRP_IPV6_ROUTE;
|
|
|
|
if (bind(fd, (struct sockaddr *) &sa, sizeof(sa)) < 0)
|
|
{
|
|
log(L_ERR "Unable to bind asynchronous rtnetlink socket: %m");
|
|
close(fd);
|
|
return;
|
|
}
|
|
|
|
nl_async_rx_buffer = xmalloc(NL_RX_SIZE);
|
|
|
|
sk = nl_async_sk = sk_new(krt_pool);
|
|
sk->type = SK_MAGIC;
|
|
sk->rx_hook = nl_async_hook;
|
|
sk->err_hook = nl_async_err_hook;
|
|
sk->fd = fd;
|
|
if (sk_open(sk) < 0)
|
|
bug("Netlink: sk_open failed");
|
|
}
|
|
|
|
static void
|
|
nl_update_async_bufsize(void)
|
|
{
|
|
/* No async socket */
|
|
if (!nl_async_sk)
|
|
return;
|
|
|
|
/* Already reconfigured */
|
|
if (nl_last_config == config)
|
|
return;
|
|
|
|
/* Update netlink buffer size */
|
|
uint bufsize = nl_cfg_rx_buffer_size(config);
|
|
if (bufsize && (bufsize != nl_async_bufsize))
|
|
{
|
|
/* Log message for reconfigurations only */
|
|
if (nl_last_config)
|
|
log(L_INFO "KRT: Changing netlink rx buffer size to %u", bufsize);
|
|
|
|
nl_set_rcvbuf(nl_async_sk->fd, bufsize);
|
|
nl_async_bufsize = bufsize;
|
|
}
|
|
|
|
nl_last_config = config;
|
|
}
|
|
|
|
|
|
/*
|
|
* Interface to the UNIX krt module
|
|
*/
|
|
|
|
void
|
|
krt_sys_io_init(void)
|
|
{
|
|
nl_linpool = lp_new_default(krt_pool);
|
|
HASH_INIT(nl_table_map, krt_pool, 6);
|
|
|
|
nl_ea_register();
|
|
}
|
|
|
|
int
|
|
krt_sys_start(struct krt_proto *p)
|
|
{
|
|
struct krt_proto *old = HASH_FIND(nl_table_map, RTH, p->af, krt_table_id(p));
|
|
|
|
if (old)
|
|
{
|
|
log(L_ERR "%s: Kernel table %u already registered by %s",
|
|
p->p.name, krt_table_id(p), old->p.name);
|
|
return 0;
|
|
}
|
|
|
|
HASH_INSERT2(nl_table_map, RTH, krt_pool, p);
|
|
|
|
nl_open();
|
|
nl_open_async();
|
|
nl_update_async_bufsize();
|
|
|
|
return 1;
|
|
}
|
|
|
|
void
|
|
krt_sys_shutdown(struct krt_proto *p)
|
|
{
|
|
nl_update_async_bufsize();
|
|
|
|
HASH_REMOVE2(nl_table_map, RTH, krt_pool, p);
|
|
}
|
|
|
|
int
|
|
krt_sys_reconfigure(struct krt_proto *p UNUSED, struct krt_config *n, struct krt_config *o)
|
|
{
|
|
nl_update_async_bufsize();
|
|
|
|
return (n->sys.table_id == o->sys.table_id) && (n->sys.metric == o->sys.metric);
|
|
}
|
|
|
|
void
|
|
krt_sys_init_config(struct krt_config *cf)
|
|
{
|
|
cf->sys.table_id = RT_TABLE_MAIN;
|
|
cf->sys.metric = 32;
|
|
}
|
|
|
|
void
|
|
krt_sys_copy_config(struct krt_config *d, struct krt_config *s)
|
|
{
|
|
d->sys.table_id = s->sys.table_id;
|
|
d->sys.metric = s->sys.metric;
|
|
}
|
|
|
|
void
|
|
kif_sys_start(struct kif_proto *p UNUSED)
|
|
{
|
|
nl_open();
|
|
nl_open_async();
|
|
}
|
|
|
|
void
|
|
kif_sys_shutdown(struct kif_proto *p UNUSED)
|
|
{
|
|
}
|
|
|
|
int
|
|
kif_update_sysdep_addr(struct iface *i UNUSED)
|
|
{
|
|
return 0;
|
|
}
|