-
/*
* Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
#include <string.h>
#include <errno.h>
-static int kq_fd;
-static struct kevent *kq_changes;
-static int kq_changemax, kq_changecnt;
-static struct kevent *kq_events;
-static int kq_eventmax;
-
static void
-kqueue_change (int fd, int filter, int flags, int fflags)
+kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
{
struct kevent *ke;
- array_needsize (kq_changes, kq_changemax, ++kq_changecnt, );
+ array_needsize (kqueue_changes, kqueue_changemax, ++kqueue_changecnt, );
- ke = &kq_changes [kq_changecnt - 1];
+ ke = &kqueue_changes [kqueue_changecnt - 1];
memset (ke, 0, sizeof (struct kevent));
ke->ident = fd;
ke->filter = filter;
ke->fflags = fflags;
}
+#ifndef NOTE_EOF
+# define NOTE_EOF 0
+#endif
+
static void
-kqueue_modify (int fd, int oev, int nev)
+kqueue_modify (EV_P_ int fd, int oev, int nev)
{
- if ((oev ^ new) & EV_READ)
+ if ((oev ^ nev) & EV_READ)
{
if (nev & EV_READ)
kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF);
kqueue_change (fd, EVFILT_READ, EV_DELETE, 0);
}
- if ((oev ^ new) & EV_WRITE)
+ if ((oev ^ nev) & EV_WRITE)
{
if (nev & EV_WRITE)
kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
}
static void
-kqueue_poll (ev_tstamp timeout)
+kqueue_poll (EV_P_ ev_tstamp timeout)
{
int res, i;
struct timespec ts;
ts.tv_sec = (time_t)timeout;
ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9;
- res = kevent (kq_fd, kq_changes, kq_changecnt, kq_events, kq_eventmax, &ts);
- kq_changecnt = 0;
+ res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
+ kqueue_changecnt = 0;
if (res < 0)
return;
for (i = 0; i < res; ++i)
{
- if (kq_events [i].flags & EV_ERROR)
+ if (kqueue_events [i].flags & EV_ERROR)
{
/*
* Error messages that can happen, when a delete fails.
* an event we are still processing. In that case
* the data field is set to ENOENT.
*/
- if (events [i].data == EBADF)
- fd_kill (events [i].ident);
+ if (kqueue_events [i].data == EBADF)
+ fd_kill (EV_A_ kqueue_events [i].ident);
}
else
fd_event (
- events [i].ident,
- events [i].filter == EVFILT_READ ? EV_READ
- : events [i].filter == EVFILT_WRITE ? EV_WRITE
+ EV_A_
+ kqueue_events [i].ident,
+ kqueue_events [i].filter == EVFILT_READ ? EV_READ
+ : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE
: 0
);
}
- if (expect_false (res == kq_eventmax))
+ if (expect_false (res == kqueue_eventmax))
{
- free (kq_events);
- kq_eventmax = array_roundsize (kq_events, kq_eventmax << 1);
- kq_events = malloc (sizeof (struct kevent) * kq_eventmax);
+ free (kqueue_events);
+ kqueue_eventmax = array_roundsize (kqueue_events, kqueue_eventmax << 1);
+ kqueue_events = malloc (sizeof (struct kevent) * kqueue_eventmax);
}
}
-static void
-kqueue_init (struct event_base *base)
+static int
+kqueue_init (EV_P_ int flags)
{
struct kevent ch, ev;
/* Initalize the kernel queue */
- if ((kq_fd = kqueue ()) < 0)
- return;
+ if ((kqueue_fd = kqueue ()) < 0)
+ return 0;
+
+ fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
/* Check for Mac OS X kqueue bug. */
ch.ident = -1;
/*
* If kqueue works, then kevent will succeed, and it will
- * stick an error in events[0]. If kqueue is broken, then
+ * stick an error in ev. If kqueue is broken, then
* kevent will fail.
*/
- if (kevent (kq_fd, &ch, 1, &ev, 1, 0) != 1
+ if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1
|| ev.ident != -1
|| ev.flags != EV_ERROR)
{
/* detected broken kqueue */
- close (kq_fd);
- return;
+ close (kqueue_fd);
+ return 0;
}
- ev_method = EVMETHOD_KQUEUE;
method_fudge = 1e-3; /* needed to compensate for kevent returning early */
- method_modify = kq_modify;
- method_poll = kq_poll;
+ method_modify = kqueue_modify;
+ method_poll = kqueue_poll;
+
+ kqueue_eventmax = 64; /* intiial number of events receivable per poll */
+ kqueue_events = malloc (sizeof (struct kevent) * kqueue_eventmax);
+
+ kqueue_changes = 0;
+ kqueue_changemax = 0;
+ kqueue_changecnt = 0;
+
+ return EVMETHOD_KQUEUE;
+}
+
+static void
+kqueue_destroy (EV_P)
+{
+ close (kqueue_fd);
+
+ free (kqueue_events);
+ free (kqueue_changes);
+}
+
+static void
+kqueue_fork (EV_P)
+{
+ kqueue_fd = kqueue ();
+ fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC);
- kq_eventmax = 64; /* intiial number of events receivable per poll */
- kq_events = malloc (sizeof (struct kevent) * kq_eventmax);
+ /* re-register interest in fds */
+ fd_rearm_all ();
}