* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifndef EV_STANDALONE
# include "config.h"
# if HAVE_CLOCK_GETTIME
-# define EV_USE_MONOTONIC 1
-# define EV_USE_REALTIME 1
+# ifndef EV_USE_MONOTONIC
+# define EV_USE_MONOTONIC 1
+# endif
+# ifndef EV_USE_REALTIME
+# define EV_USE_REALTIME 1
+# endif
# endif
-# if HAVE_SELECT && HAVE_SYS_SELECT_H
+# if HAVE_SELECT && HAVE_SYS_SELECT_H && !defined (EV_USE_SELECT)
# define EV_USE_SELECT 1
# endif
-# if HAVE_POLL && HAVE_POLL_H
+# if HAVE_POLL && HAVE_POLL_H && !defined (EV_USE_POLL)
# define EV_USE_POLL 1
# endif
-# if HAVE_EPOLL && HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
+# if HAVE_EPOLL && HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H && !defined (EV_USE_EPOLL)
# define EV_USE_EPOLL 1
# endif
-# if HAVE_KQUEUE && HAVE_WORKING_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H
+# if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H && !defined (EV_USE_KQUEUE)
# define EV_USE_KQUEUE 1
# endif
#define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */
/*#define CLEANUP_INTERVAL 300. /* how often to try to free memory and re-check fds */
-#include "ev.h"
+#ifdef EV_H
+# include EV_H
+#else
+# include "ev.h"
+#endif
#if __GNUC__ >= 3
# define expect(expr,value) __builtin_expect ((expr),(value))
static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
-#include "ev_win32.c"
+#ifdef WIN32
+# include "ev_win32.c"
+#endif
/*****************************************************************************/
#if EV_MULTIPLICITY
-struct ev_loop
-{
-# define VAR(name,decl) decl;
-# include "ev_vars.h"
-};
-# undef VAR
-# include "ev_wrap.h"
+ struct ev_loop
+ {
+ ev_tstamp ev_rt_now;
+ #define ev_rt_now ((loop)->ev_rt_now)
+ #define VAR(name,decl) decl;
+ #include "ev_vars.h"
+ #undef VAR
+ };
+ #include "ev_wrap.h"
+
+ struct ev_loop default_loop_struct;
+ static struct ev_loop *default_loop;
#else
-# define VAR(name,decl) static decl;
-# include "ev_vars.h"
-# undef VAR
+ ev_tstamp ev_rt_now;
+ #define VAR(name,decl) static decl;
+ #include "ev_vars.h"
+ #undef VAR
+
+ static int default_loop;
#endif
/*****************************************************************************/
-inline ev_tstamp
+ev_tstamp
ev_time (void)
{
#if EV_USE_REALTIME
return ev_time ();
}
+#if EV_MULTIPLICITY
ev_tstamp
ev_now (EV_P)
{
- return rt_now;
+ return ev_rt_now;
}
+#endif
#define array_roundsize(type,n) ((n) | 4 & ~3)
}
}
-static void
-event (EV_P_ W w, int events)
+void
+ev_feed_event (EV_P_ void *w, int revents)
{
- if (w->pending)
+ W w_ = (W)w;
+
+ if (w_->pending)
{
- pendings [ABSPRI (w)][w->pending - 1].events |= events;
+ pendings [ABSPRI (w_)][w_->pending - 1].events |= revents;
return;
}
- w->pending = ++pendingcnt [ABSPRI (w)];
- array_needsize (ANPENDING, pendings [ABSPRI (w)], pendingmax [ABSPRI (w)], pendingcnt [ABSPRI (w)], (void));
- pendings [ABSPRI (w)][w->pending - 1].w = w;
- pendings [ABSPRI (w)][w->pending - 1].events = events;
+ w_->pending = ++pendingcnt [ABSPRI (w_)];
+ array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], (void));
+ pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
+ pendings [ABSPRI (w_)][w_->pending - 1].events = revents;
}
static void
int i;
for (i = 0; i < eventcnt; ++i)
- event (EV_A_ events [i], type);
+ ev_feed_event (EV_A_ events [i], type);
}
-static void
-fd_event (EV_P_ int fd, int events)
+inline void
+fd_event (EV_P_ int fd, int revents)
{
ANFD *anfd = anfds + fd;
struct ev_io *w;
for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
{
- int ev = w->events & events;
+ int ev = w->events & revents;
if (ev)
- event (EV_A_ (W)w, ev);
+ ev_feed_event (EV_A_ (W)w, ev);
}
}
+void
+ev_feed_fd_event (EV_P_ int fd, int revents)
+{
+ fd_event (EV_A_ fd, revents);
+}
+
/*****************************************************************************/
static void
while ((w = (struct ev_io *)anfds [fd].head))
{
ev_io_stop (EV_A_ w);
- event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
+ ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
}
}
((W)heap [k])->active = k + 1;
}
+inline void
+adjustheap (WT *heap, int N, int k)
+{
+ upheap (heap, k);
+ downheap (heap, N, k);
+}
+
/*****************************************************************************/
typedef struct
}
}
+void
+ev_feed_signal_event (EV_P_ int signum)
+{
+ WL w;
+
+#if EV_MULTIPLICITY
+ assert (("feeding signal events is only supported in the default loop", loop == default_loop));
+#endif
+
+ --signum;
+
+ if (signum < 0 || signum >= signalmax)
+ return;
+
+ signals [signum].gotsig = 0;
+
+ for (w = signals [signum].head; w; w = w->next)
+ ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
+}
+
static void
sigcb (EV_P_ struct ev_io *iow, int revents)
{
- WL w;
int signum;
#ifdef WIN32
for (signum = signalmax; signum--; )
if (signals [signum].gotsig)
- {
- signals [signum].gotsig = 0;
-
- for (w = signals [signum].head; w; w = w->next)
- event (EV_A_ (W)w, EV_SIGNAL);
- }
+ ev_feed_signal_event (EV_A_ signum + 1);
}
static void
ev_priority (w) = ev_priority (sw); /* need to do it *now* */
w->rpid = pid;
w->rstatus = status;
- event (EV_A_ (W)w, EV_CHILD);
+ ev_feed_event (EV_A_ (W)w, EV_CHILD);
}
}
if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
{
/* make sure we are called again until all childs have been reaped */
- event (EV_A_ (W)sw, EV_SIGNAL);
+ ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
child_reap (EV_A_ sw, pid, pid, status);
child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */
}
#endif
- rt_now = ev_time ();
+ ev_rt_now = ev_time ();
mn_now = get_clock ();
now_floor = mn_now;
- rtmn_diff = rt_now - mn_now;
+ rtmn_diff = ev_rt_now - mn_now;
if (methods == EVMETHOD_AUTO)
if (!enable_secure () && getenv ("LIBEV_METHODS"))
if (!method && (methods & EVMETHOD_SELECT)) method = select_init (EV_A_ methods);
#endif
- ev_watcher_init (&sigev, sigcb);
+ ev_init (&sigev, sigcb);
ev_set_priority (&sigev, EV_MAXPRI);
}
}
/* have to use the microsoft-never-gets-it-right macro */
array_free_microshit (fdchange);
array_free_microshit (timer);
+#if EV_PERIODICS
array_free_microshit (periodic);
+#endif
array_free_microshit (idle);
array_free_microshit (prepare);
array_free_microshit (check);
#endif
#if EV_MULTIPLICITY
-struct ev_loop default_loop_struct;
-static struct ev_loop *default_loop;
-
struct ev_loop *
#else
-static int default_loop;
-
int
#endif
ev_default_loop (int methods)
if (p->w)
{
p->w->pending = 0;
- p->w->cb (EV_A_ p->w, p->events);
+ EV_CB_INVOKE (p->w, p->events);
}
}
}
if (w->repeat)
{
assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
- ((WT)w)->at = mn_now + w->repeat;
+
+ ((WT)w)->at += w->repeat;
+ if (((WT)w)->at < mn_now)
+ ((WT)w)->at = mn_now;
+
downheap ((WT *)timers, timercnt, 0);
}
else
ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
- event (EV_A_ (W)w, EV_TIMEOUT);
+ ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
}
}
+#if EV_PERIODICS
static void
periodics_reify (EV_P)
{
- while (periodiccnt && ((WT)periodics [0])->at <= rt_now)
+ while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
{
struct ev_periodic *w = periodics [0];
/* first reschedule or stop timer */
if (w->reschedule_cb)
{
- ev_tstamp at = ((WT)w)->at = w->reschedule_cb (w, rt_now + 0.0001);
+ ev_tstamp at = ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + 0.0001);
- assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > rt_now));
+ assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
downheap ((WT *)periodics, periodiccnt, 0);
}
else if (w->interval)
{
- ((WT)w)->at += floor ((rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval;
- assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > rt_now));
+ ((WT)w)->at += floor ((ev_rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval;
+ assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
downheap ((WT *)periodics, periodiccnt, 0);
}
else
ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
- event (EV_A_ (W)w, EV_PERIODIC);
+ ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
}
}
struct ev_periodic *w = periodics [i];
if (w->reschedule_cb)
- ((WT)w)->at = w->reschedule_cb (w, rt_now);
+ ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
else if (w->interval)
- ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval;
+ ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
}
/* now rebuild the heap */
for (i = periodiccnt >> 1; i--; )
downheap ((WT *)periodics, periodiccnt, i);
}
+#endif
inline int
time_update_monotonic (EV_P)
if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
{
- rt_now = rtmn_diff + mn_now;
+ ev_rt_now = rtmn_diff + mn_now;
return 0;
}
else
{
now_floor = mn_now;
- rt_now = ev_time ();
+ ev_rt_now = ev_time ();
return 1;
}
}
for (i = 4; --i; ) /* loop a few times, before making important decisions */
{
- rtmn_diff = rt_now - mn_now;
+ rtmn_diff = ev_rt_now - mn_now;
if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)
return; /* all is well */
- rt_now = ev_time ();
+ ev_rt_now = ev_time ();
mn_now = get_clock ();
now_floor = mn_now;
}
+# if EV_PERIODICS
periodics_reschedule (EV_A);
+# endif
/* no timer adjustment, as the monotonic clock doesn't jump */
/* timers_reschedule (EV_A_ rtmn_diff - odiff) */
}
else
#endif
{
- rt_now = ev_time ();
+ ev_rt_now = ev_time ();
- if (expect_false (mn_now > rt_now || mn_now < rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
+ if (expect_false (mn_now > ev_rt_now || mn_now < ev_rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
{
+#if EV_PERIODICS
periodics_reschedule (EV_A);
+#endif
/* adjust timers. this is easy, as the offset is the same for all */
for (i = 0; i < timercnt; ++i)
- ((WT)timers [i])->at += rt_now - mn_now;
+ ((WT)timers [i])->at += ev_rt_now - mn_now;
}
- mn_now = rt_now;
+ mn_now = ev_rt_now;
}
}
else
#endif
{
- rt_now = ev_time ();
- mn_now = rt_now;
+ ev_rt_now = ev_time ();
+ mn_now = ev_rt_now;
}
if (flags & EVLOOP_NONBLOCK || idlecnt)
if (block > to) block = to;
}
+#if EV_PERIODICS
if (periodiccnt)
{
- ev_tstamp to = ((WT)periodics [0])->at - rt_now + method_fudge;
+ ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + method_fudge;
if (block > to) block = to;
}
+#endif
if (block < 0.) block = 0.;
}
method_poll (EV_A_ block);
- /* update rt_now, do magic */
+ /* update ev_rt_now, do magic */
time_update (EV_A);
/* queue pending timers and reschedule them */
timers_reify (EV_A); /* relative timers called last */
+#if EV_PERIODICS
periodics_reify (EV_A); /* absolute timers called first */
+#endif
/* queue idle watchers unless io or timers are pending */
if (idlecnt && !any_pending (EV_A))
if (!ev_is_active (w))
return;
+ assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
+
wlist_del ((WL *)&anfds[w->fd].head, (WL)w);
ev_stop (EV_A_ (W)w);
if (((W)w)->active < timercnt--)
{
timers [((W)w)->active - 1] = timers [timercnt];
- downheap ((WT *)timers, timercnt, ((W)w)->active - 1);
+ adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
}
- ((WT)w)->at = w->repeat;
+ ((WT)w)->at -= mn_now;
ev_stop (EV_A_ (W)w);
}
if (w->repeat)
{
((WT)w)->at = mn_now + w->repeat;
- downheap ((WT *)timers, timercnt, ((W)w)->active - 1);
+ adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
}
else
ev_timer_stop (EV_A_ w);
ev_timer_start (EV_A_ w);
}
+#if EV_PERIODICS
void
ev_periodic_start (EV_P_ struct ev_periodic *w)
{
return;
if (w->reschedule_cb)
- ((WT)w)->at = w->reschedule_cb (w, rt_now);
+ ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
else if (w->interval)
{
assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
/* this formula differs from the one in periodic_reify because we do not always round up */
- ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval;
+ ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
}
ev_start (EV_A_ (W)w, ++periodiccnt);
if (((W)w)->active < periodiccnt--)
{
periodics [((W)w)->active - 1] = periodics [periodiccnt];
- downheap ((WT *)periodics, periodiccnt, ((W)w)->active - 1);
+ adjustheap ((WT *)periodics, periodiccnt, ((W)w)->active - 1);
}
ev_stop (EV_A_ (W)w);
void
ev_periodic_again (EV_P_ struct ev_periodic *w)
{
+ /* TODO: use adjustheap and recalculation */
ev_periodic_stop (EV_A_ w);
ev_periodic_start (EV_A_ w);
}
+#endif
void
ev_idle_start (EV_P_ struct ev_idle *w)
ev_check_stop (EV_P_ struct ev_check *w)
{
ev_clear_pending (EV_A_ (W)w);
- if (ev_is_active (w))
+ if (!ev_is_active (w))
return;
checks [((W)w)->active - 1] = checks [--checkcnt];
ev_child_stop (EV_P_ struct ev_child *w)
{
ev_clear_pending (EV_A_ (W)w);
- if (ev_is_active (w))
+ if (!ev_is_active (w))
return;
wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
once->cb = cb;
once->arg = arg;
- ev_watcher_init (&once->io, once_cb_io);
+ ev_init (&once->io, once_cb_io);
if (fd >= 0)
{
ev_io_set (&once->io, fd, events);
ev_io_start (EV_A_ &once->io);
}
- ev_watcher_init (&once->to, once_cb_to);
+ ev_init (&once->to, once_cb_to);
if (timeout >= 0.)
{
ev_timer_set (&once->to, timeout, 0.);
}
}
+#ifdef __cplusplus
+}
+#endif
+