1998-05-24 14:50:18 +00:00
|
|
|
/*
|
|
|
|
* BIRD Internet Routing Daemon -- Unix I/O
|
|
|
|
*
|
2004-05-31 21:48:19 +00:00
|
|
|
* (c) 1998--2004 Martin Mares <mj@ucw.cz>
|
2004-05-31 13:25:00 +00:00
|
|
|
* (c) 2004 Ondrej Filip <feela@network.cz>
|
1998-05-24 14:50:18 +00:00
|
|
|
*
|
|
|
|
* Can be freely distributed and used under the terms of the GNU GPL.
|
|
|
|
*/
|
|
|
|
|
2010-04-14 13:35:08 +00:00
|
|
|
/* Unfortunately, some glibc versions hide parts of RFC 3542 API
|
|
|
|
if _GNU_SOURCE is not defined. */
|
2016-11-08 16:46:29 +00:00
|
|
|
#ifndef _GNU_SOURCE
|
|
|
|
#define _GNU_SOURCE
|
|
|
|
#endif
|
2010-04-14 13:35:08 +00:00
|
|
|
|
1998-05-24 14:50:18 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2001-03-06 13:40:39 +00:00
|
|
|
#include <time.h>
|
2023-08-24 15:00:54 +00:00
|
|
|
#include <sys/mman.h>
|
1998-05-24 14:50:18 +00:00
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/types.h>
|
2023-08-21 16:44:10 +00:00
|
|
|
#include <sys/stat.h>
|
1998-05-24 14:50:18 +00:00
|
|
|
#include <sys/socket.h>
|
2010-04-03 08:45:21 +00:00
|
|
|
#include <sys/uio.h>
|
1999-10-29 12:09:29 +00:00
|
|
|
#include <sys/un.h>
|
2016-03-09 11:12:02 +00:00
|
|
|
#include <poll.h>
|
1998-05-24 14:50:18 +00:00
|
|
|
#include <unistd.h>
|
2013-07-24 12:20:46 +00:00
|
|
|
#include <fcntl.h>
|
1998-05-24 14:50:18 +00:00
|
|
|
#include <errno.h>
|
2014-05-18 09:42:26 +00:00
|
|
|
#include <net/if.h>
|
2011-03-23 16:15:11 +00:00
|
|
|
#include <netinet/in.h>
|
2014-02-06 16:46:01 +00:00
|
|
|
#include <netinet/tcp.h>
|
|
|
|
#include <netinet/udp.h>
|
2011-03-13 10:33:50 +00:00
|
|
|
#include <netinet/icmp6.h>
|
2023-12-13 02:48:12 +00:00
|
|
|
#include <netdb.h>
|
1998-05-24 14:50:18 +00:00
|
|
|
|
|
|
|
#include "nest/bird.h"
|
|
|
|
#include "lib/lists.h"
|
|
|
|
#include "lib/resource.h"
|
|
|
|
#include "lib/socket.h"
|
1999-02-11 21:18:26 +00:00
|
|
|
#include "lib/event.h"
|
2021-02-04 14:52:42 +00:00
|
|
|
#include "lib/locking.h"
|
2017-05-30 17:12:35 +00:00
|
|
|
#include "lib/timer.h"
|
1999-12-01 11:59:00 +00:00
|
|
|
#include "lib/string.h"
|
1998-05-24 14:50:18 +00:00
|
|
|
#include "nest/iface.h"
|
2017-06-06 14:47:30 +00:00
|
|
|
#include "conf/conf.h"
|
1998-05-24 14:50:18 +00:00
|
|
|
|
2016-04-12 09:14:54 +00:00
|
|
|
#include "sysdep/unix/unix.h"
|
2021-06-19 18:50:18 +00:00
|
|
|
#include "sysdep/unix/io-loop.h"
|
1998-05-24 14:50:18 +00:00
|
|
|
|
2009-10-11 16:56:16 +00:00
|
|
|
/* Maximum number of calls of tx handler for one socket in one
|
2016-03-09 11:12:02 +00:00
|
|
|
* poll iteration. Should be small enough to not monopolize CPU by
|
2009-06-19 22:59:32 +00:00
|
|
|
* one protocol instance.
|
|
|
|
*/
|
|
|
|
#define MAX_STEPS 4
|
|
|
|
|
2016-03-09 11:12:02 +00:00
|
|
|
/* Maximum number of calls of rx handler for all sockets in one poll
|
2009-10-11 16:56:16 +00:00
|
|
|
iteration. RX callbacks are often much more costly so we limit
|
|
|
|
this to gen small latencies */
|
|
|
|
#define MAX_RX_STEPS 4
|
|
|
|
|
2018-11-13 17:13:11 +00:00
|
|
|
|
1999-12-06 13:43:47 +00:00
|
|
|
/*
|
|
|
|
* Tracked Files
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct rfile {
|
|
|
|
resource r;
|
2023-08-21 16:44:10 +00:00
|
|
|
struct stat stat;
|
2023-08-16 13:05:36 +00:00
|
|
|
int fd;
|
2023-08-24 13:38:44 +00:00
|
|
|
off_t limit;
|
2023-08-21 16:44:10 +00:00
|
|
|
_Atomic off_t pos;
|
2023-08-24 15:00:54 +00:00
|
|
|
void *mapping;
|
2023-08-16 13:05:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct rfile rf_stderr = {
|
|
|
|
.fd = 2,
|
1999-12-06 13:43:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
rf_free(resource *r)
|
|
|
|
{
|
|
|
|
struct rfile *a = (struct rfile *) r;
|
|
|
|
|
2023-08-24 15:00:54 +00:00
|
|
|
if (a->mapping)
|
|
|
|
munmap(a->mapping, a->limit);
|
|
|
|
|
2023-08-16 13:05:36 +00:00
|
|
|
close(a->fd);
|
1999-12-06 13:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2023-02-28 09:42:47 +00:00
|
|
|
rf_dump(resource *r, unsigned indent UNUSED)
|
1999-12-06 13:43:47 +00:00
|
|
|
{
|
|
|
|
struct rfile *a = (struct rfile *) r;
|
|
|
|
|
2023-08-16 13:05:36 +00:00
|
|
|
debug("(fd %d)\n", a->fd);
|
1999-12-06 13:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct resclass rf_class = {
|
|
|
|
"FILE",
|
|
|
|
sizeof(struct rfile),
|
|
|
|
rf_free,
|
2010-02-21 13:34:53 +00:00
|
|
|
rf_dump,
|
2010-06-02 20:20:40 +00:00
|
|
|
NULL,
|
2010-02-21 13:34:53 +00:00
|
|
|
NULL
|
1999-12-06 13:43:47 +00:00
|
|
|
};
|
|
|
|
|
2023-08-21 16:44:10 +00:00
|
|
|
static int
|
|
|
|
rf_open_get_fd(const char *name, enum rf_mode mode)
|
1999-12-06 13:43:47 +00:00
|
|
|
{
|
2023-08-16 13:05:36 +00:00
|
|
|
int omode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
|
|
|
|
int flags;
|
|
|
|
|
|
|
|
switch (mode)
|
|
|
|
{
|
|
|
|
case RF_APPEND:
|
|
|
|
flags = O_WRONLY | O_CREAT | O_APPEND;
|
|
|
|
break;
|
|
|
|
|
2023-08-24 15:00:54 +00:00
|
|
|
case RF_FIXED:
|
|
|
|
flags = O_RDWR | O_CREAT;
|
|
|
|
break;
|
|
|
|
|
2023-08-16 13:05:36 +00:00
|
|
|
default:
|
|
|
|
bug("rf_open() must have the mode set");
|
|
|
|
}
|
1999-12-06 13:43:47 +00:00
|
|
|
|
2023-08-21 16:44:10 +00:00
|
|
|
return open(name, flags, omode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rf_stat(struct rfile *r)
|
|
|
|
{
|
|
|
|
if (fstat(r->fd, &r->stat) < 0)
|
|
|
|
die("fstat() failed: %m");
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rfile *
|
2023-08-24 13:38:44 +00:00
|
|
|
rf_open(pool *p, const char *name, enum rf_mode mode, off_t limit)
|
2023-08-21 16:44:10 +00:00
|
|
|
{
|
|
|
|
int fd = rf_open_get_fd(name, mode);
|
2023-08-16 13:05:36 +00:00
|
|
|
if (fd < 0)
|
|
|
|
return NULL; /* The caller takes care of printing %m. */
|
2018-11-13 17:13:11 +00:00
|
|
|
|
|
|
|
struct rfile *r = ralloc(p, &rf_class);
|
2023-08-16 13:05:36 +00:00
|
|
|
r->fd = fd;
|
2023-08-24 13:38:44 +00:00
|
|
|
r->limit = limit;
|
2023-08-21 16:44:10 +00:00
|
|
|
|
|
|
|
switch (mode)
|
|
|
|
{
|
|
|
|
case RF_APPEND:
|
|
|
|
rf_stat(r);
|
2024-04-01 14:01:26 +00:00
|
|
|
atomic_store_explicit(&r->pos, S_ISREG(r->stat.st_mode) ? r->stat.st_size : 0, memory_order_relaxed);
|
2023-08-21 16:44:10 +00:00
|
|
|
break;
|
|
|
|
|
2023-08-24 15:00:54 +00:00
|
|
|
case RF_FIXED:
|
|
|
|
if ((ftruncate(fd, limit) < 0)
|
|
|
|
|| ((r->mapping = mmap(NULL, limit, PROT_WRITE, MAP_SHARED, fd, 0)) == MAP_FAILED))
|
|
|
|
{
|
|
|
|
int erf = errno;
|
|
|
|
r->mapping = NULL;
|
|
|
|
rfree(r);
|
|
|
|
errno = erf;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2023-08-21 16:44:10 +00:00
|
|
|
default:
|
|
|
|
bug("rf_open() must have the mode set");
|
|
|
|
}
|
|
|
|
|
2023-12-13 02:48:12 +00:00
|
|
|
|
2018-11-13 17:13:11 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2023-08-21 16:44:10 +00:00
|
|
|
off_t
|
|
|
|
rf_size(struct rfile *r)
|
|
|
|
{
|
|
|
|
return atomic_load_explicit(&r->pos, memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2018-11-13 17:13:11 +00:00
|
|
|
int
|
2023-08-21 16:44:10 +00:00
|
|
|
rf_same(struct rfile *a, struct rfile *b)
|
|
|
|
{
|
|
|
|
rf_stat(a);
|
|
|
|
rf_stat(b);
|
|
|
|
|
|
|
|
return
|
2023-08-24 15:00:54 +00:00
|
|
|
(a->limit == b->limit) &&
|
2023-08-21 16:44:10 +00:00
|
|
|
(a->stat.st_mode == b->stat.st_mode) &&
|
|
|
|
(a->stat.st_dev == b->stat.st_dev) &&
|
|
|
|
(a->stat.st_ino == b->stat.st_ino);
|
|
|
|
}
|
|
|
|
|
2024-01-28 20:42:39 +00:00
|
|
|
void
|
|
|
|
rf_write_crude(struct rfile *r, const char *buf, int sz)
|
|
|
|
{
|
|
|
|
if (r->mapping)
|
|
|
|
memcpy(r->mapping, buf, sz);
|
|
|
|
else
|
|
|
|
write(r->fd, buf, sz);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-08-24 13:38:44 +00:00
|
|
|
int
|
2024-01-28 20:42:39 +00:00
|
|
|
rf_writev(struct rfile *r, struct iovec *iov, int iov_count)
|
2018-11-13 17:13:11 +00:00
|
|
|
{
|
2024-01-28 20:42:39 +00:00
|
|
|
off_t size = 0;
|
|
|
|
for (int i = 0; i < iov_count; i++)
|
|
|
|
size += iov[i].iov_len;
|
2023-08-24 15:00:54 +00:00
|
|
|
|
|
|
|
if (r->mapping)
|
|
|
|
{
|
|
|
|
/* Update the pointer */
|
2024-01-28 20:42:39 +00:00
|
|
|
off_t target = atomic_fetch_add_explicit(&r->pos, size, memory_order_relaxed) % r->limit;
|
2023-08-24 15:00:54 +00:00
|
|
|
|
2024-01-28 20:42:39 +00:00
|
|
|
/* Write the line */
|
|
|
|
for (int i = 0; i < iov_count; i++)
|
2023-08-24 15:00:54 +00:00
|
|
|
{
|
2024-01-28 20:42:39 +00:00
|
|
|
/* Take care of wrapping; this should really happen only once */
|
|
|
|
off_t rsz;
|
|
|
|
while ((rsz = r->limit - target) < (off_t) iov[i].iov_len)
|
|
|
|
{
|
|
|
|
memcpy(r->mapping + target, iov[i].iov_base, rsz);
|
|
|
|
iov[i].iov_base += rsz;
|
|
|
|
iov[i].iov_len -= rsz;
|
|
|
|
target = 0;
|
|
|
|
}
|
2023-08-24 15:00:54 +00:00
|
|
|
|
2024-01-28 20:42:39 +00:00
|
|
|
memcpy(r->mapping + target, iov[i].iov_base, iov[i].iov_len);
|
|
|
|
target += iov[i].iov_len;
|
|
|
|
}
|
2023-08-24 15:00:54 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2024-01-28 20:42:39 +00:00
|
|
|
else if (r->limit && (atomic_fetch_add_explicit(&r->pos, size, memory_order_relaxed) + size > r->limit))
|
2023-08-24 13:38:44 +00:00
|
|
|
{
|
2024-01-28 20:42:39 +00:00
|
|
|
atomic_fetch_sub_explicit(&r->pos, size, memory_order_relaxed);
|
2023-08-24 13:38:44 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-01-28 20:42:39 +00:00
|
|
|
while (size > 0)
|
|
|
|
{
|
|
|
|
/* Try to write */
|
|
|
|
ssize_t e = writev(r->fd, iov, iov_count);
|
|
|
|
if (e < 0)
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
else
|
|
|
|
return 1; /* FIXME: What should we do when we suddenly can't write? */
|
|
|
|
|
|
|
|
/* It is expected that we always write the whole bunch at once */
|
|
|
|
if (e == size)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Block split should not happen (we write small enough messages)
|
|
|
|
* but if it happens, let's try to write the rest of the log */
|
|
|
|
size -= e;
|
|
|
|
while (e > 0)
|
|
|
|
{
|
|
|
|
if ((ssize_t) iov[0].iov_len > e)
|
|
|
|
{
|
|
|
|
/* Some bytes are remaining in the first chunk */
|
|
|
|
iov[0].iov_len -= e;
|
|
|
|
iov[0].iov_base += e;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First chunk written completely, get rid of it */
|
|
|
|
e -= iov[0].iov_len;
|
|
|
|
iov++;
|
|
|
|
iov_count--;
|
|
|
|
ASSERT_DIE(iov_count > 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-24 13:38:44 +00:00
|
|
|
return 1;
|
|
|
|
}
|
1999-12-06 13:43:47 +00:00
|
|
|
}
|
|
|
|
|
2014-05-18 09:42:26 +00:00
|
|
|
|
2017-05-30 17:12:35 +00:00
|
|
|
/*
|
|
|
|
* Time clock
|
|
|
|
*/
|
|
|
|
|
2017-06-01 10:33:20 +00:00
|
|
|
btime boot_time;
|
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
|
2017-05-30 17:12:35 +00:00
|
|
|
void
|
2021-08-04 20:48:51 +00:00
|
|
|
times_update(void)
|
2017-05-30 17:12:35 +00:00
|
|
|
{
|
|
|
|
struct timespec ts;
|
|
|
|
int rv;
|
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
btime old_time = current_time();
|
|
|
|
btime old_real_time = current_real_time();
|
|
|
|
|
2017-05-30 17:12:35 +00:00
|
|
|
rv = clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
if (rv < 0)
|
|
|
|
die("Monotonic clock is missing");
|
|
|
|
|
2019-03-22 20:40:35 +00:00
|
|
|
if ((ts.tv_sec < 0) || (((u64) ts.tv_sec) > ((u64) 1 << 40)))
|
2017-05-30 17:12:35 +00:00
|
|
|
log(L_WARN "Monotonic clock is crazy");
|
2023-02-24 08:13:35 +00:00
|
|
|
|
2017-06-20 13:55:39 +00:00
|
|
|
btime new_time = ts.tv_sec S + ts.tv_nsec NS;
|
2017-05-30 17:12:35 +00:00
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
if (new_time < old_time)
|
2017-05-30 17:12:35 +00:00
|
|
|
log(L_ERR "Monotonic clock is broken");
|
|
|
|
|
2017-06-01 10:33:20 +00:00
|
|
|
rv = clock_gettime(CLOCK_REALTIME, &ts);
|
|
|
|
if (rv < 0)
|
|
|
|
die("clock_gettime: %m");
|
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
btime new_real_time = ts.tv_sec S + ts.tv_nsec NS;
|
2017-06-01 10:33:20 +00:00
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
if (!atomic_compare_exchange_strong_explicit(
|
|
|
|
&last_time,
|
|
|
|
&old_time,
|
|
|
|
new_time,
|
|
|
|
memory_order_acq_rel,
|
|
|
|
memory_order_relaxed))
|
|
|
|
DBG("Time update collision: last_time");
|
2017-06-01 10:33:20 +00:00
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
if (!atomic_compare_exchange_strong_explicit(
|
|
|
|
&real_time,
|
|
|
|
&old_real_time,
|
|
|
|
new_real_time,
|
|
|
|
memory_order_acq_rel,
|
|
|
|
memory_order_relaxed))
|
|
|
|
DBG("Time update collision: real_time");
|
|
|
|
}
|
2017-05-30 17:12:35 +00:00
|
|
|
|
2023-06-01 22:26:41 +00:00
|
|
|
btime
|
|
|
|
current_time_now(void)
|
|
|
|
{
|
|
|
|
struct timespec ts;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
rv = clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
if (rv < 0)
|
|
|
|
die("clock_gettime: %m");
|
|
|
|
|
|
|
|
return ts.tv_sec S + ts.tv_nsec NS;
|
|
|
|
}
|
|
|
|
|
1998-05-24 14:50:18 +00:00
|
|
|
|
2015-03-02 08:41:14 +00:00
|
|
|
/*
|
|
|
|
* Internal event log and watchdog
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define EVENT_LOG_LENGTH 32
|
|
|
|
|
|
|
|
struct event_log_entry
|
|
|
|
{
|
|
|
|
void *hook;
|
|
|
|
void *data;
|
|
|
|
btime timestamp;
|
|
|
|
btime duration;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct event_log_entry event_log[EVENT_LOG_LENGTH];
|
|
|
|
static struct event_log_entry *event_open;
|
|
|
|
static int event_log_pos, event_log_num, watchdog_active;
|
2021-08-04 20:48:51 +00:00
|
|
|
static btime last_io_time;
|
2015-03-02 08:41:14 +00:00
|
|
|
static btime loop_time;
|
|
|
|
|
|
|
|
static void
|
|
|
|
io_update_time(void)
|
|
|
|
{
|
2021-08-04 20:48:51 +00:00
|
|
|
last_io_time = current_time();
|
2015-03-02 08:41:14 +00:00
|
|
|
|
|
|
|
if (event_open)
|
|
|
|
{
|
2021-08-04 20:48:51 +00:00
|
|
|
event_open->duration = last_io_time - event_open->timestamp;
|
2015-03-02 08:41:14 +00:00
|
|
|
|
2024-06-13 14:30:51 +00:00
|
|
|
struct global_runtime *gr = atomic_load_explicit(&global_runtime, memory_order_relaxed);
|
|
|
|
if (event_open->duration > gr->latency_limit)
|
2022-06-04 15:54:08 +00:00
|
|
|
log(L_WARN "Event 0x%p 0x%p took %u.%03u ms",
|
|
|
|
event_open->hook, event_open->data, (uint) (event_open->duration TO_MS), (uint) (event_open->duration % 1000));
|
2015-03-02 08:41:14 +00:00
|
|
|
|
|
|
|
event_open = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* io_log_event - mark approaching event into event log
|
|
|
|
* @hook: event hook address
|
|
|
|
* @data: event data address
|
|
|
|
*
|
|
|
|
* Store info (hook, data, timestamp) about the following internal event into
|
|
|
|
* a circular event log (@event_log). When latency tracking is enabled, the log
|
|
|
|
* entry is kept open (in @event_open) so the duration can be filled later.
|
|
|
|
*/
|
|
|
|
void
|
2024-06-07 10:12:00 +00:00
|
|
|
io_log_event(void *hook, void *data, uint flag)
|
2015-03-02 08:41:14 +00:00
|
|
|
{
|
2024-06-13 14:30:51 +00:00
|
|
|
struct global_runtime *gr = atomic_load_explicit(&global_runtime, memory_order_relaxed);
|
|
|
|
if (gr->latency_debug & flag)
|
2015-03-02 08:41:14 +00:00
|
|
|
io_update_time();
|
|
|
|
|
|
|
|
struct event_log_entry *en = event_log + event_log_pos;
|
|
|
|
|
|
|
|
en->hook = hook;
|
|
|
|
en->data = data;
|
2021-08-04 20:48:51 +00:00
|
|
|
en->timestamp = last_io_time;
|
2015-03-02 08:41:14 +00:00
|
|
|
en->duration = 0;
|
|
|
|
|
|
|
|
event_log_num++;
|
|
|
|
event_log_pos++;
|
|
|
|
event_log_pos %= EVENT_LOG_LENGTH;
|
|
|
|
|
2024-06-13 14:30:51 +00:00
|
|
|
event_open = (gr->latency_debug & flag) ? en : NULL;
|
2015-03-02 08:41:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
io_close_event(void)
|
|
|
|
{
|
|
|
|
if (event_open)
|
|
|
|
io_update_time();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
io_log_dump(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
log(L_DEBUG "Event log:");
|
|
|
|
for (i = 0; i < EVENT_LOG_LENGTH; i++)
|
|
|
|
{
|
|
|
|
struct event_log_entry *en = event_log + (event_log_pos + i) % EVENT_LOG_LENGTH;
|
|
|
|
if (en->hook)
|
|
|
|
log(L_DEBUG " Event 0x%p 0x%p at %8d for %d ms", en->hook, en->data,
|
2021-08-04 20:48:51 +00:00
|
|
|
(int) ((last_io_time - en->timestamp) TO_MS), (int) (en->duration TO_MS));
|
2015-03-02 08:41:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
watchdog_sigalrm(int sig UNUSED)
|
|
|
|
{
|
2021-08-04 20:48:51 +00:00
|
|
|
/* Update last_io_time and duration, but skip latency check */
|
2024-06-13 14:30:51 +00:00
|
|
|
struct global_runtime *gr = atomic_load_explicit(&global_runtime, memory_order_relaxed);
|
|
|
|
gr->latency_limit = 0xffffffff;
|
2015-03-02 08:41:14 +00:00
|
|
|
io_update_time();
|
|
|
|
|
2023-01-12 16:40:53 +00:00
|
|
|
debug_safe("Watchdog timer timed out\n");
|
|
|
|
|
2015-03-02 08:41:14 +00:00
|
|
|
/* We want core dump */
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
watchdog_start1(void)
|
|
|
|
{
|
|
|
|
io_update_time();
|
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
loop_time = last_io_time;
|
2015-03-02 08:41:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
watchdog_start(void)
|
|
|
|
{
|
|
|
|
io_update_time();
|
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
loop_time = last_io_time;
|
2015-03-02 08:41:14 +00:00
|
|
|
event_log_num = 0;
|
|
|
|
|
2024-08-27 15:50:30 +00:00
|
|
|
union bird_global_runtime *gr = BIRD_GLOBAL_RUNTIME;
|
2024-06-13 14:30:51 +00:00
|
|
|
if (gr->watchdog_timeout)
|
2015-03-02 08:41:14 +00:00
|
|
|
{
|
2024-06-13 14:30:51 +00:00
|
|
|
alarm(gr->watchdog_timeout);
|
2015-03-02 08:41:14 +00:00
|
|
|
watchdog_active = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
watchdog_stop(void)
|
|
|
|
{
|
|
|
|
io_update_time();
|
|
|
|
|
|
|
|
if (watchdog_active)
|
|
|
|
{
|
|
|
|
alarm(0);
|
|
|
|
watchdog_active = 0;
|
|
|
|
}
|
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
btime duration = last_io_time - loop_time;
|
2024-08-27 15:50:30 +00:00
|
|
|
union bird_global_runtime *gr = BIRD_GLOBAL_RUNTIME;
|
2024-06-13 14:30:51 +00:00
|
|
|
if (duration > gr->watchdog_warning)
|
2022-06-04 15:54:08 +00:00
|
|
|
log(L_WARN "I/O loop cycle took %u.%03u ms for %d events",
|
|
|
|
(uint) (duration TO_MS), (uint) (duration % 1000), event_log_num);
|
2015-03-02 08:41:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
1998-05-24 14:50:18 +00:00
|
|
|
/*
|
|
|
|
* Main I/O Loop
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
io_init(void)
|
|
|
|
{
|
2023-02-24 08:13:35 +00:00
|
|
|
init_list(&main_birdloop.sock_list);
|
2021-06-19 18:50:18 +00:00
|
|
|
ev_init_list(&global_event_list, &main_birdloop, "Global event list");
|
|
|
|
ev_init_list(&global_work_list, &main_birdloop, "Global work list");
|
|
|
|
ev_init_list(&main_birdloop.event_list, &main_birdloop, "Global fast event list");
|
1999-03-26 21:44:38 +00:00
|
|
|
krt_io_init();
|
2017-06-01 10:33:20 +00:00
|
|
|
// XXX init_times();
|
|
|
|
// XXX update_times();
|
|
|
|
boot_time = current_time();
|
2018-05-03 15:07:39 +00:00
|
|
|
|
|
|
|
u64 now = (u64) current_real_time();
|
|
|
|
srandom((uint) (now ^ (now >> 32)));
|
1998-05-24 14:50:18 +00:00
|
|
|
}
|
|
|
|
|
2009-10-11 16:56:16 +00:00
|
|
|
static int short_loops = 0;
|
|
|
|
#define SHORT_LOOP_MAX 10
|
2021-03-12 14:35:56 +00:00
|
|
|
#define WORK_EVENTS_MAX 10
|
2009-10-11 16:56:16 +00:00
|
|
|
|
2024-08-27 13:04:32 +00:00
|
|
|
extern sock *stored_sock; /* mainloop hack */
|
2023-04-02 17:15:22 +00:00
|
|
|
|
2024-08-22 16:55:53 +00:00
|
|
|
int sk_read(sock *s, int revents);
|
|
|
|
int sk_write(sock *s);
|
|
|
|
void sk_err(sock *s, int revents);
|
|
|
|
|
1998-05-24 14:50:18 +00:00
|
|
|
void
|
|
|
|
io_loop(void)
|
|
|
|
{
|
2017-05-31 13:46:04 +00:00
|
|
|
int poll_tout, timeout;
|
2023-02-24 08:13:35 +00:00
|
|
|
int events, pout;
|
2017-11-28 16:43:20 +00:00
|
|
|
timer *t;
|
2023-02-24 08:13:35 +00:00
|
|
|
struct pfd pfd;
|
|
|
|
BUFFER_INIT(pfd.pfd, &root_pool, 16);
|
|
|
|
BUFFER_INIT(pfd.loop, &root_pool, 16);
|
1998-05-24 14:50:18 +00:00
|
|
|
|
2015-03-02 08:41:14 +00:00
|
|
|
watchdog_start1();
|
1998-05-24 14:50:18 +00:00
|
|
|
for(;;)
|
|
|
|
{
|
2021-08-04 20:48:51 +00:00
|
|
|
times_update();
|
2023-10-04 15:36:03 +00:00
|
|
|
ev_run_list(&global_event_list);
|
|
|
|
ev_run_list_limited(&global_work_list, WORK_EVENTS_MAX);
|
2024-01-28 12:40:23 +00:00
|
|
|
ev_run_list(&main_birdloop.event_list);
|
2021-06-19 18:50:18 +00:00
|
|
|
timers_fire(&main_birdloop.time, 1);
|
2015-03-02 08:41:14 +00:00
|
|
|
io_close_event();
|
|
|
|
|
2024-01-28 12:40:23 +00:00
|
|
|
events =
|
|
|
|
!ev_list_empty(&global_event_list) ||
|
|
|
|
!ev_list_empty(&global_work_list) ||
|
|
|
|
!ev_list_empty(&main_birdloop.event_list);
|
|
|
|
|
2017-06-01 10:33:20 +00:00
|
|
|
poll_tout = (events ? 0 : 3000); /* Time in milliseconds */
|
2021-06-19 18:50:18 +00:00
|
|
|
if (t = timers_first(&main_birdloop.time))
|
2017-05-31 13:46:04 +00:00
|
|
|
{
|
2021-08-04 20:48:51 +00:00
|
|
|
times_update();
|
2017-11-28 16:43:20 +00:00
|
|
|
timeout = (tm_remains(t) TO_MS) + 1;
|
2017-05-31 13:46:04 +00:00
|
|
|
poll_tout = MIN(poll_tout, timeout);
|
|
|
|
}
|
|
|
|
|
2023-02-24 08:13:35 +00:00
|
|
|
BUFFER_FLUSH(pfd.pfd);
|
|
|
|
BUFFER_FLUSH(pfd.loop);
|
2021-02-08 08:51:59 +00:00
|
|
|
|
2023-02-24 08:13:35 +00:00
|
|
|
pipe_pollin(&main_birdloop.thread->wakeup, &pfd);
|
|
|
|
sockets_prepare(&main_birdloop, &pfd);
|
1998-05-24 14:50:18 +00:00
|
|
|
|
1999-02-13 19:43:21 +00:00
|
|
|
/*
|
|
|
|
* Yes, this is racy. But even if the signal comes before this test
|
2016-03-09 11:12:02 +00:00
|
|
|
* and entering poll(), it gets caught on the next timer tick.
|
1999-02-13 19:43:21 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (async_config_flag)
|
|
|
|
{
|
2024-06-07 10:12:00 +00:00
|
|
|
io_log_event(async_config, NULL, DL_EVENTS);
|
1999-02-13 19:43:21 +00:00
|
|
|
async_config();
|
|
|
|
async_config_flag = 0;
|
1999-02-13 20:15:36 +00:00
|
|
|
continue;
|
1999-02-13 19:43:21 +00:00
|
|
|
}
|
|
|
|
if (async_dump_flag)
|
|
|
|
{
|
2024-06-07 10:12:00 +00:00
|
|
|
io_log_event(async_dump, NULL, DL_EVENTS);
|
1999-02-13 19:43:21 +00:00
|
|
|
async_dump();
|
|
|
|
async_dump_flag = 0;
|
1999-02-13 20:15:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (async_shutdown_flag)
|
|
|
|
{
|
2024-06-07 10:12:00 +00:00
|
|
|
io_log_event(async_shutdown, NULL, DL_EVENTS);
|
1999-02-13 20:15:36 +00:00
|
|
|
async_shutdown();
|
|
|
|
async_shutdown_flag = 0;
|
|
|
|
continue;
|
1999-02-13 19:43:21 +00:00
|
|
|
}
|
|
|
|
|
2016-03-09 11:12:02 +00:00
|
|
|
/* And finally enter poll() to find active sockets */
|
2015-03-02 08:41:14 +00:00
|
|
|
watchdog_stop();
|
2021-06-19 18:50:18 +00:00
|
|
|
birdloop_leave(&main_birdloop);
|
2023-02-24 08:13:35 +00:00
|
|
|
pout = poll(pfd.pfd.data, pfd.pfd.used, poll_tout);
|
2021-06-19 18:50:18 +00:00
|
|
|
birdloop_enter(&main_birdloop);
|
2015-03-02 08:41:14 +00:00
|
|
|
watchdog_start();
|
2009-10-11 16:56:16 +00:00
|
|
|
|
2016-03-30 14:21:32 +00:00
|
|
|
if (pout < 0)
|
1998-05-24 14:50:18 +00:00
|
|
|
{
|
|
|
|
if (errno == EINTR || errno == EAGAIN)
|
|
|
|
continue;
|
2022-10-05 14:26:13 +00:00
|
|
|
bug("poll: %m");
|
1998-05-24 14:50:18 +00:00
|
|
|
}
|
2016-03-30 14:21:32 +00:00
|
|
|
if (pout)
|
1998-05-24 14:50:18 +00:00
|
|
|
{
|
2023-02-24 08:13:35 +00:00
|
|
|
if (pfd.pfd.data[0].revents & POLLIN)
|
2021-02-08 08:51:59 +00:00
|
|
|
{
|
|
|
|
/* IO loop reload requested */
|
2023-01-19 09:49:47 +00:00
|
|
|
pipe_drain(&main_birdloop.thread->wakeup);
|
2023-02-24 08:13:35 +00:00
|
|
|
atomic_fetch_and_explicit(&main_birdloop.thread_transition, ~LTT_PING, memory_order_acq_rel);
|
2021-02-08 08:51:59 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-08-04 20:48:51 +00:00
|
|
|
times_update();
|
2017-05-31 13:46:04 +00:00
|
|
|
|
2009-10-11 16:56:16 +00:00
|
|
|
/* guaranteed to be non-empty */
|
2023-04-02 17:15:22 +00:00
|
|
|
main_birdloop.sock_active = SKIP_BACK(sock, n, HEAD(main_birdloop.sock_list));
|
2009-10-11 16:56:16 +00:00
|
|
|
|
2023-04-02 17:15:22 +00:00
|
|
|
while (main_birdloop.sock_active)
|
|
|
|
{
|
|
|
|
sock *s = main_birdloop.sock_active;
|
|
|
|
if (s->index != -1)
|
1998-05-24 14:50:18 +00:00
|
|
|
{
|
2004-05-31 21:48:19 +00:00
|
|
|
int e;
|
2009-10-11 16:56:16 +00:00
|
|
|
int steps;
|
|
|
|
|
|
|
|
steps = MAX_STEPS;
|
2023-02-24 08:13:35 +00:00
|
|
|
if (s->fast_rx && (pfd.pfd.data[s->index].revents & POLLIN) && s->rx_hook)
|
2004-05-31 21:48:19 +00:00
|
|
|
do
|
|
|
|
{
|
2009-06-19 22:59:32 +00:00
|
|
|
steps--;
|
2024-06-07 10:12:00 +00:00
|
|
|
io_log_event(s->rx_hook, s->data, DL_SOCKETS);
|
2023-02-24 08:13:35 +00:00
|
|
|
e = sk_read(s, pfd.pfd.data[s->index].revents);
|
2004-05-31 21:48:19 +00:00
|
|
|
}
|
2023-04-02 17:15:22 +00:00
|
|
|
while (e && (main_birdloop.sock_active == s) && s->rx_hook && steps);
|
|
|
|
|
|
|
|
if (s != main_birdloop.sock_active)
|
|
|
|
continue;
|
2009-06-19 22:59:32 +00:00
|
|
|
|
|
|
|
steps = MAX_STEPS;
|
2023-02-24 08:13:35 +00:00
|
|
|
if (pfd.pfd.data[s->index].revents & POLLOUT)
|
2004-05-31 21:48:19 +00:00
|
|
|
do
|
|
|
|
{
|
2009-06-19 22:59:32 +00:00
|
|
|
steps--;
|
2024-06-07 10:12:00 +00:00
|
|
|
io_log_event(s->tx_hook, s->data, DL_SOCKETS);
|
2004-05-31 21:48:19 +00:00
|
|
|
e = sk_write(s);
|
|
|
|
}
|
2023-04-02 17:15:22 +00:00
|
|
|
while (e && (main_birdloop.sock_active == s) && steps);
|
2016-05-30 12:28:22 +00:00
|
|
|
|
2023-04-02 17:15:22 +00:00
|
|
|
if (s != main_birdloop.sock_active)
|
|
|
|
continue;
|
1998-05-24 14:50:18 +00:00
|
|
|
}
|
2009-10-11 16:56:16 +00:00
|
|
|
|
2023-04-02 17:15:22 +00:00
|
|
|
main_birdloop.sock_active = sk_next(s);
|
|
|
|
}
|
|
|
|
|
2009-10-11 16:56:16 +00:00
|
|
|
short_loops++;
|
|
|
|
if (events && (short_loops < SHORT_LOOP_MAX))
|
|
|
|
continue;
|
|
|
|
short_loops = 0;
|
|
|
|
|
|
|
|
int count = 0;
|
2023-04-02 17:15:22 +00:00
|
|
|
main_birdloop.sock_active = stored_sock;
|
|
|
|
if (main_birdloop.sock_active == NULL)
|
|
|
|
main_birdloop.sock_active = SKIP_BACK(sock, n, HEAD(main_birdloop.sock_list));
|
2009-10-11 16:56:16 +00:00
|
|
|
|
2023-04-02 17:15:22 +00:00
|
|
|
while (main_birdloop.sock_active && count < MAX_RX_STEPS)
|
2009-10-11 16:56:16 +00:00
|
|
|
{
|
2023-04-02 17:15:22 +00:00
|
|
|
sock *s = main_birdloop.sock_active;
|
2016-03-09 11:12:02 +00:00
|
|
|
if (s->index == -1)
|
2023-04-02 17:15:22 +00:00
|
|
|
goto next2;
|
2009-10-11 16:56:16 +00:00
|
|
|
|
2023-02-24 08:13:35 +00:00
|
|
|
if (!s->fast_rx && (pfd.pfd.data[s->index].revents & POLLIN) && s->rx_hook)
|
2009-10-11 16:56:16 +00:00
|
|
|
{
|
|
|
|
count++;
|
2024-06-07 10:12:00 +00:00
|
|
|
io_log_event(s->rx_hook, s->data, DL_SOCKETS);
|
2023-02-24 08:13:35 +00:00
|
|
|
sk_read(s, pfd.pfd.data[s->index].revents);
|
2023-04-02 17:15:22 +00:00
|
|
|
if (s != main_birdloop.sock_active)
|
|
|
|
continue;
|
2009-10-11 16:56:16 +00:00
|
|
|
}
|
2016-05-30 12:28:22 +00:00
|
|
|
|
2023-02-24 08:13:35 +00:00
|
|
|
if (pfd.pfd.data[s->index].revents & (POLLHUP | POLLERR))
|
2016-05-30 12:28:22 +00:00
|
|
|
{
|
2023-02-24 08:13:35 +00:00
|
|
|
sk_err(s, pfd.pfd.data[s->index].revents);
|
2023-04-02 17:15:22 +00:00
|
|
|
if (s != main_birdloop.sock_active)
|
|
|
|
continue;
|
2009-10-11 16:56:16 +00:00
|
|
|
}
|
2016-05-30 12:28:22 +00:00
|
|
|
|
2009-10-11 16:56:16 +00:00
|
|
|
next2: ;
|
2023-04-02 17:15:22 +00:00
|
|
|
main_birdloop.sock_active = sk_next(s);
|
2009-10-11 16:56:16 +00:00
|
|
|
}
|
|
|
|
|
2016-05-30 12:28:22 +00:00
|
|
|
|
2023-04-02 17:15:22 +00:00
|
|
|
stored_sock = main_birdloop.sock_active;
|
1998-05-24 14:50:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2004-06-06 17:05:25 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
test_old_bird(char *path)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
struct sockaddr_un sa;
|
|
|
|
|
|
|
|
fd = socket(AF_UNIX, SOCK_STREAM, 0);
|
|
|
|
if (fd < 0)
|
|
|
|
die("Cannot create socket: %m");
|
2010-03-17 11:19:22 +00:00
|
|
|
if (strlen(path) >= sizeof(sa.sun_path))
|
|
|
|
die("Socket path too long");
|
2004-06-06 17:05:25 +00:00
|
|
|
bzero(&sa, sizeof(sa));
|
|
|
|
sa.sun_family = AF_UNIX;
|
|
|
|
strcpy(sa.sun_path, path);
|
|
|
|
if (connect(fd, (struct sockaddr *) &sa, SUN_LEN(&sa)) == 0)
|
|
|
|
die("I found another BIRD running.");
|
|
|
|
close(fd);
|
|
|
|
}
|
2023-12-13 02:48:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DNS resolver
|
|
|
|
*/
|
|
|
|
|
|
|
|
ip_addr
|
|
|
|
resolve_hostname(const char *host, int type, const char **err_msg)
|
|
|
|
{
|
|
|
|
struct addrinfo *res;
|
|
|
|
struct addrinfo hints = {
|
|
|
|
.ai_family = AF_UNSPEC,
|
|
|
|
.ai_socktype = (type == SK_UDP) ? SOCK_DGRAM : SOCK_STREAM,
|
|
|
|
.ai_flags = AI_ADDRCONFIG,
|
|
|
|
};
|
|
|
|
|
|
|
|
*err_msg = NULL;
|
|
|
|
|
|
|
|
int err_code = getaddrinfo(host, NULL, &hints, &res);
|
|
|
|
if (err_code != 0)
|
|
|
|
{
|
|
|
|
*err_msg = gai_strerror(err_code);
|
|
|
|
return IPA_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
ip_addr addr = IPA_NONE;
|
|
|
|
uint unused;
|
|
|
|
|
|
|
|
sockaddr_read((sockaddr *) res->ai_addr, res->ai_family, &addr, NULL, &unused);
|
|
|
|
freeaddrinfo(res);
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|