return;
}
+ if (expect_false (!w_->cb))
+ return;
+
w_->pending = ++pendingcnt [ABSPRI (w_)];
array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2);
pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
return flags;
}
+unsigned int
+ev_embeddable_backends (void)
+{
+ return EVBACKEND_EPOLL
+ | EVBACKEND_KQUEUE
+ | EVBACKEND_PORT;
+}
+
unsigned int
ev_backend (EV_P)
{
ev_stop (EV_A_ (W)w);
}
+#if EV_MULTIPLICITY
+static void
+embed_cb (EV_P_ struct ev_io *io, int revents)
+{
+ struct ev_embed *w = (struct ev_embed *)(((char *)io) - offsetof (struct ev_embed, io));
+
+ ev_feed_event (EV_A_ (W)w, EV_EMBED);
+ ev_loop (w->loop, EVLOOP_NONBLOCK);
+}
+
+void
+ev_embed_start (EV_P_ struct ev_embed *w)
+{
+ if (expect_false (ev_is_active (w)))
+ return;
+
+ {
+ struct ev_loop *loop = w->loop;
+ assert (("loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
+ ev_io_init (&w->io, embed_cb, backend_fd, EV_READ);
+ }
+
+ ev_io_start (EV_A_ &w->io);
+ ev_start (EV_A_ (W)w, 1);
+}
+
+void
+ev_embed_stop (EV_P_ struct ev_embed *w)
+{
+ ev_clear_pending (EV_A_ (W)w);
+ if (expect_false (!ev_is_active (w)))
+ return;
+
+ ev_io_stop (EV_A_ &w->io);
+ ev_stop (EV_A_ (W)w);
+}
+#endif
+
/*****************************************************************************/
struct ev_once
#define EV_CHECK 0x001000L /* check only */
#define EV_PREPARE 0x002000L /* prepare only */
#define EV_CHILD 0x004000L /* child/pid only */
+#define EV_EMBED 0x008000L /* embedded event loop */
#define EV_ERROR 0x800000L /* sent when an error occurs */
/* can be used to add custom fields to all watchers, while losing binary compatibility */
int rstatus; /* rw, holds the exit status, use the macros from sys/wait.h */
};
+#if EV_MULTIPLICITY
+/* used to embed an event loop inside another */
+/* the callback gets invoked when the event loop has handled events, and can be 0 */
+struct ev_embed
+{
+ EV_WATCHER (ev_embed)
+
+ struct ev_io io; /* private */
+ struct ev_loop *loop; /* ro */
+};
+#endif
+
/* the presence of this union forces similar struct layout */
union ev_any_watcher
{
struct ev_check check;
struct ev_signal signal;
struct ev_child child;
+ struct ev_embed embed;
};
/* bits for ev_default_loop and ev_loop_new */
unsigned int ev_supported_backends (void);
unsigned int ev_recommended_backends (void);
+unsigned int ev_embeddable_backends (void);
ev_tstamp ev_time (void);
#define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_check_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_child_set(ev,pid_) do { (ev)->pid = (pid_); } while (0)
+#define ev_embed_set(ev,loop_) do { (ev)->loop = (loop_); } while (0)
#define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0)
#define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0)
#define ev_prepare_init(ev,cb) do { ev_init ((ev), (cb)); ev_prepare_set ((ev)); } while (0)
#define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0)
#define ev_child_init(ev,cb,pid) do { ev_init ((ev), (cb)); ev_child_set ((ev),(pid)); } while (0)
+#define ev_embed_init(ev,cb,loop) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(loop)); } while (0)
#define ev_is_pending(ev) (0 + ((struct ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */
#define ev_is_active(ev) (0 + ((struct ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */
/* only supported in the default loop */
void ev_child_start (EV_P_ struct ev_child *w);
void ev_child_stop (EV_P_ struct ev_child *w);
+
+# if EV_MULTIPLICITY
+/* only supported when loop to be embedded is in fact embeddable */
+void ev_embed_start (EV_P_ struct ev_embed *w);
+void ev_embed_stop (EV_P_ struct ev_embed *w);
+# endif
+
#endif
#ifdef __cplusplus
(nev & EV_READ ? EPOLLIN : 0)
| (nev & EV_WRITE ? EPOLLOUT : 0);
- if (epoll_ctl (epoll_fd, mode, fd, &ev))
+ if (epoll_ctl (backend_fd, mode, fd, &ev))
if (errno != ENOENT /* on ENOENT the fd went away, so try to do the right thing */
- || (nev && epoll_ctl (epoll_fd, EPOLL_CTL_ADD, fd, &ev)))
+ || (nev && epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)))
fd_kill (EV_A_ fd);
}
epoll_poll (EV_P_ ev_tstamp timeout)
{
int i;
- int eventcnt = epoll_wait (epoll_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.));
+ int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.));
if (eventcnt < 0)
{
static int
epoll_init (EV_P_ int flags)
{
- epoll_fd = epoll_create (256);
+ backend_fd = epoll_create (256);
- if (epoll_fd < 0)
+ if (backend_fd < 0)
return 0;
- fcntl (epoll_fd, F_SETFD, FD_CLOEXEC);
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
backend_fudge = 1e-3; /* needed to compensate for epoll returning early */
backend_modify = epoll_modify;
static void
epoll_destroy (EV_P)
{
- close (epoll_fd);
+ close (backend_fd);
ev_free (epoll_events);
}
static void
epoll_fork (EV_P)
{
- close (epoll_fd);
+ close (backend_fd);
- while ((epoll_fd = epoll_create (256)) < 0)
+ while ((backend_fd = epoll_create (256)) < 0)
syserr ("(libev) epoll_create");
- fcntl (epoll_fd, F_SETFD, FD_CLOEXEC);
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
fd_rearm_all (EV_A);
}
ts.tv_sec = (time_t)timeout;
ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9);
- res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
+ res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
kqueue_changecnt = 0;
if (res < 0)
struct kevent ch, ev;
/* Initalize the kernel queue */
- if ((kqueue_fd = kqueue ()) < 0)
+ if ((backend_fd = kqueue ()) < 0)
return 0;
- fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
/* Check for Mac OS X kqueue bug. */
ch.ident = -1;
* stick an error in ev. If kqueue is broken, then
* kevent will fail.
*/
- if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1
+ if (kevent (backend_fd, &ch, 1, &ev, 1, 0) != 1
|| ev.ident != -1
|| ev.flags != EV_ERROR)
{
/* detected broken kqueue */
- close (kqueue_fd);
+ close (backend_fd);
return 0;
}
static void
kqueue_destroy (EV_P)
{
- close (kqueue_fd);
+ close (backend_fd);
ev_free (kqueue_events);
ev_free (kqueue_changes);
static void
kqueue_fork (EV_P)
{
- close (kqueue_fd);
+ close (backend_fd);
- while ((kqueue_fd = kqueue ()) < 0)
+ while ((backend_fd = kqueue ()) < 0)
syserr ("(libev) kqueue");
- fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC);
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
/* re-register interest in fds */
fd_rearm_all (EV_A);
if (!nev)
{
if (oev)
- port_dissociate (port_fd, PORT_SOURCE_FD, fd);
+ port_dissociate (backend_fd, PORT_SOURCE_FD, fd);
}
else if (0 >
port_associate (
- port_fd, PORT_SOURCE_FD, fd,
+ backend_fd, PORT_SOURCE_FD, fd,
(nev & EV_READ ? POLLIN : 0)
| (nev & EV_WRITE ? POLLOUT : 0),
0
ts.tv_sec = (time_t)timeout;
ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9;
- res = port_getn (port_fd, port_events, port_eventmax, &nget, &ts);
+ res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts);
if (res < 0)
{
port_init (EV_P_ int flags)
{
/* Initalize the kernel queue */
- if ((port_fd = port_create ()) < 0)
+ if ((backend_fd = port_create ()) < 0)
return 0;
- fcntl (port_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
backend_fudge = 1e-3; /* needed to compensate for port_getn returning early */
backend_modify = port_modify;
static void
port_destroy (EV_P)
{
- close (port_fd);
+ close (backend_fd);
ev_free (port_events);
}
static void
port_fork (EV_P)
{
- close (port_fd);
+ close (backend_fd);
- while ((port_fd = port_create ()) < 0)
+ while ((backend_fd = port_create ()) < 0)
syserr ("(libev) port");
- fcntl (port_fd, F_SETFD, FD_CLOEXEC);
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
/* re-register interest in fds */
fd_rearm_all (EV_A);
VARx(ev_tstamp, backend_fudge) /* assumed typical timer resolution */
VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev))
VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout))
+VARx(int, backend_fd)
VARx(int, postfork) /* true if we need to recreate kernel state after fork */
VARx(int, activecnt) /* number of active events */
#endif
#if EV_USE_EPOLL || EV_GENWRAP
-VARx(int, epoll_fd)
-
VARx(struct epoll_event *, epoll_events)
VARx(int, epoll_eventmax)
#endif
#if EV_USE_KQUEUE || EV_GENWRAP
-VARx(int, kqueue_fd)
VARx(struct kevent *, kqueue_changes)
VARx(int, kqueue_changemax)
VARx(int, kqueue_changecnt)
#endif
#if EV_USE_PORT || EV_GENWRAP
-VARx(int, port_fd)
VARx(struct port_event *, port_events)
VARx(int, port_eventmax)
#endif
#define backend_fudge ((loop)->backend_fudge)
#define backend_modify ((loop)->backend_modify)
#define backend_poll ((loop)->backend_poll)
+#define backend_fd ((loop)->backend_fd)
#define postfork ((loop)->postfork)
#define activecnt ((loop)->activecnt)
#define vec_ri ((loop)->vec_ri)
#define pollcnt ((loop)->pollcnt)
#define pollidxs ((loop)->pollidxs)
#define pollidxmax ((loop)->pollidxmax)
-#define epoll_fd ((loop)->epoll_fd)
#define epoll_events ((loop)->epoll_events)
#define epoll_eventmax ((loop)->epoll_eventmax)
-#define kqueue_fd ((loop)->kqueue_fd)
#define kqueue_changes ((loop)->kqueue_changes)
#define kqueue_changemax ((loop)->kqueue_changemax)
#define kqueue_changecnt ((loop)->kqueue_changecnt)
#define kqueue_events ((loop)->kqueue_events)
#define kqueue_eventmax ((loop)->kqueue_eventmax)
-#define port_fd ((loop)->port_fd)
#define port_events ((loop)->port_events)
#define port_eventmax ((loop)->port_eventmax)
#define anfds ((loop)->anfds)