* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * general notes about epoll:
+ *
+ * a) epoll silently removes fds from the fd set. as nothing tells us
+ * that an fd has been removed otherwise, we have to continually
+ * "rearm" fds that we suspect *might* have changed (same
+ * problem with kqueue, but much less costly there).
+ * b) the fact that ADD != MOD creates a lot of extra syscalls due to a)
+ * and seems not to have any advantage.
+ * c) the inability to handle fork or file descriptors (think dup)
+ * limits the applicability over poll, so this is not a generic
+ * poll replacement.
+ *
+ * lots of "weird code" and complication handling in this file is due
+ * to these design problems with epoll, as we try very hard to avoid
+ * epoll_ctl syscalls for common usage patterns.
+ */
+
#include <sys/epoll.h>
static void
epoll_modify (EV_P_ int fd, int oev, int nev)
{
- int mode = nev ? oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD : EPOLL_CTL_DEL;
-
struct epoll_event ev;
+
+ /*
+ * we handle EPOLL_CTL_DEL by ignoring it here
+ * on the assumption that the fd is gone anyways
+ * if that is wrong, we have to handle the spurious
+ * event in epoll_poll.
+ */
+ if (!nev)
+ return;
+
ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */
- ev.events =
- (nev & EV_READ ? EPOLLIN : 0)
- | (nev & EV_WRITE ? EPOLLOUT : 0);
+ ev.events = (nev & EV_READ ? EPOLLIN : 0)
+ | (nev & EV_WRITE ? EPOLLOUT : 0);
- epoll_ctl (epoll_fd, mode, fd, &ev);
-}
+ if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
+ return;
-static void
-epoll_postfork_child (EV_P)
-{
- int fd;
+ if (expect_true (errno == ENOENT))
+ {
+ /* on ENOENT the fd went away, so try to do the right thing */
+ if (!nev)
+ return;
- epoll_fd = epoll_create (256);
- fcntl (epoll_fd, F_SETFD, FD_CLOEXEC);
+ if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
+ return;
+ }
+ else if (expect_true (errno == EEXIST))
+ {
+ /* on EEXIST we ignored a previous DEL */
+ if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
+ return;
+ }
- /* re-register interest in fds */
- for (fd = 0; fd < anfdmax; ++fd)
- if (anfds [fd].events)//D
- epoll_modify (EV_A_ fd, EV_NONE, anfds [fd].events);
+ fd_kill (EV_A_ fd);
}
static void
epoll_poll (EV_P_ ev_tstamp timeout)
{
- int eventcnt = epoll_wait (epoll_fd, events, eventmax, ceil (timeout * 1000.));
int i;
+ int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.));
- if (eventcnt < 0)
- return;
+ if (expect_false (eventcnt < 0))
+ {
+ if (errno != EINTR)
+ syserr ("(libev) epoll_wait");
+
+ return;
+ }
for (i = 0; i < eventcnt; ++i)
- fd_event (
- EV_A_
- events [i].data.u64,
- (events [i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
- | (events [i].events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0)
- );
+ {
+ struct epoll_event *ev = epoll_events + i;
+
+ int fd = ev->data.u64;
+ int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
+ | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
+ int want = anfds [fd].events;
+
+ if (expect_false (got & ~want))
+ {
+ /* we received an event but are not interested in it, try mod or del */
+ ev->events = (want & EV_READ ? EPOLLIN : 0)
+ | (want & EV_WRITE ? EPOLLOUT : 0);
+
+ epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev);
+ }
+
+ fd_event (EV_A_ fd, got);
+ }
/* if the receive array was full, increase its size */
- if (expect_false (eventcnt == eventmax))
+ if (expect_false (eventcnt == epoll_eventmax))
{
- free (events);
- eventmax = array_roundsize (events, eventmax << 1);
- events = malloc (sizeof (struct epoll_event) * eventmax);
+ ev_free (epoll_events);
+ epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
+ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
}
}
-static int
+int inline_size
epoll_init (EV_P_ int flags)
{
- epoll_fd = epoll_create (256);
+ backend_fd = epoll_create (256);
- if (epoll_fd < 0)
+ if (backend_fd < 0)
return 0;
- fcntl (epoll_fd, F_SETFD, FD_CLOEXEC);
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
+
+ backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */
+ backend_modify = epoll_modify;
+ backend_poll = epoll_poll;
+
+ epoll_eventmax = 64; /* intiial number of events receivable per poll */
+ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
+
+ return EVBACKEND_EPOLL;
+}
+
+void inline_size
+epoll_destroy (EV_P)
+{
+ ev_free (epoll_events);
+}
+
+void inline_size
+epoll_fork (EV_P)
+{
+ close (backend_fd);
- method_fudge = 1e-3; /* needed to compensate for epoll returning early */
- method_modify = epoll_modify;
- method_poll = epoll_poll;
+ while ((backend_fd = epoll_create (256)) < 0)
+ syserr ("(libev) epoll_create");
- eventmax = 64; /* intiial number of events receivable per poll */
- events = malloc (sizeof (struct epoll_event) * eventmax);
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
- return EVMETHOD_EPOLL;
+ fd_rearm_all (EV_A);
}