#define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */
/*#define CLEANUP_INTERVAL 300. /* how often to try to free memory and re-check fds */
-#include "ev.h"
+#ifdef EV_H
+# include EV_H
+#else
+# include "ev.h"
+#endif
#if __GNUC__ >= 3
# define expect(expr,value) __builtin_expect ((expr),(value))
#if EV_MULTIPLICITY
-struct ev_loop
-{
-# define VAR(name,decl) decl;
-# include "ev_vars.h"
-};
-# undef VAR
-# include "ev_wrap.h"
+ struct ev_loop
+ {
+ #define VAR(name,decl) decl;
+ #include "ev_vars.h"
+ #undef VAR
+ };
+ #include "ev_wrap.h"
+
+ struct ev_loop default_loop_struct;
+ static struct ev_loop *default_loop;
#else
-# define VAR(name,decl) static decl;
-# include "ev_vars.h"
-# undef VAR
+ #define VAR(name,decl) static decl;
+ #include "ev_vars.h"
+ #undef VAR
+
+ static int default_loop;
#endif
return rt_now;
}
-#define array_roundsize(base,n) ((n) | 4 & ~3)
+#define array_roundsize(type,n) ((n) | 4 & ~3)
-#define array_needsize(base,cur,cnt,init) \
+#define array_needsize(type,base,cur,cnt,init) \
if (expect_false ((cnt) > cur)) \
{ \
int newcnt = cur; \
do \
{ \
- newcnt = array_roundsize (base, newcnt << 1); \
+ newcnt = array_roundsize (type, newcnt << 1); \
} \
while ((cnt) > newcnt); \
\
- base = ev_realloc (base, sizeof (*base) * (newcnt)); \
+ base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\
init (base + cur, newcnt - cur); \
cur = newcnt; \
}
-#define array_slim(stem) \
+#define array_slim(type,stem) \
if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
{ \
stem ## max = array_roundsize (stem ## cnt >> 1); \
- base = ev_realloc (base, sizeof (*base) * (stem ## max)); \
+ base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
}
}
}
-static void
-event (EV_P_ W w, int events)
+void
+ev_feed_event (EV_P_ void *w, int revents)
{
- if (w->pending)
+ W w_ = (W)w;
+
+ if (w_->pending)
{
- pendings [ABSPRI (w)][w->pending - 1].events |= events;
+ pendings [ABSPRI (w_)][w_->pending - 1].events |= revents;
return;
}
- w->pending = ++pendingcnt [ABSPRI (w)];
- array_needsize (pendings [ABSPRI (w)], pendingmax [ABSPRI (w)], pendingcnt [ABSPRI (w)], (void));
- pendings [ABSPRI (w)][w->pending - 1].w = w;
- pendings [ABSPRI (w)][w->pending - 1].events = events;
+ w_->pending = ++pendingcnt [ABSPRI (w_)];
+ array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], (void));
+ pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
+ pendings [ABSPRI (w_)][w_->pending - 1].events = revents;
}
static void
int i;
for (i = 0; i < eventcnt; ++i)
- event (EV_A_ events [i], type);
+ ev_feed_event (EV_A_ events [i], type);
}
-static void
-fd_event (EV_P_ int fd, int events)
+inline void
+fd_event (EV_P_ int fd, int revents)
{
ANFD *anfd = anfds + fd;
struct ev_io *w;
for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
{
- int ev = w->events & events;
+ int ev = w->events & revents;
if (ev)
- event (EV_A_ (W)w, ev);
+ ev_feed_event (EV_A_ (W)w, ev);
}
}
+void
+ev_feed_fd_event (EV_P_ int fd, int revents)
+{
+ fd_event (EV_A_ fd, revents);
+}
+
/*****************************************************************************/
static void
anfds [fd].reify = 1;
++fdchangecnt;
- array_needsize (fdchanges, fdchangemax, fdchangecnt, (void));
+ array_needsize (int, fdchanges, fdchangemax, fdchangecnt, (void));
fdchanges [fdchangecnt - 1] = fd;
}
while ((w = (struct ev_io *)anfds [fd].head))
{
ev_io_stop (EV_A_ w);
- event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
+ ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
}
}
((W)heap [k])->active = k + 1;
}
+inline void
+adjustheap (WT *heap, int N, int k, ev_tstamp at)
+{
+ ev_tstamp old_at = heap [k]->at;
+ heap [k]->at = at;
+
+ if (old_at < at)
+ downheap (heap, N, k);
+ else
+ upheap (heap, k);
+}
+
/*****************************************************************************/
typedef struct
{
int old_errno = errno;
gotsig = 1;
+#ifdef WIN32
+ send (sigpipe [1], &signum, 1, MSG_DONTWAIT);
+#else
write (sigpipe [1], &signum, 1);
+#endif
errno = old_errno;
}
}
+void
+ev_feed_signal_event (EV_P_ int signum)
+{
+ WL w;
+
+#if EV_MULTIPLICITY
+ assert (("feeding signal events is only supported in the default loop", loop == default_loop));
+#endif
+
+ --signum;
+
+ if (signum < 0 || signum >= signalmax)
+ return;
+
+ signals [signum].gotsig = 0;
+
+ for (w = signals [signum].head; w; w = w->next)
+ ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
+}
+
static void
sigcb (EV_P_ struct ev_io *iow, int revents)
{
- WL w;
int signum;
+#ifdef WIN32
+ recv (sigpipe [0], &revents, 1, MSG_DONTWAIT);
+#else
read (sigpipe [0], &revents, 1);
+#endif
gotsig = 0;
for (signum = signalmax; signum--; )
if (signals [signum].gotsig)
- {
- signals [signum].gotsig = 0;
-
- for (w = signals [signum].head; w; w = w->next)
- event (EV_A_ (W)w, EV_SIGNAL);
- }
+ ev_feed_signal_event (EV_A_ signum + 1);
}
static void
ev_priority (w) = ev_priority (sw); /* need to do it *now* */
w->rpid = pid;
w->rstatus = status;
- event (EV_A_ (W)w, EV_CHILD);
+ ev_feed_event (EV_A_ (W)w, EV_CHILD);
}
}
if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
{
/* make sure we are called again until all childs have been reaped */
- event (EV_A_ (W)sw, EV_SIGNAL);
+ ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
child_reap (EV_A_ sw, pid, pid, status);
child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */
if (!method && (methods & EVMETHOD_SELECT)) method = select_init (EV_A_ methods);
#endif
- ev_watcher_init (&sigev, sigcb);
+ ev_init (&sigev, sigcb);
ev_set_priority (&sigev, EV_MAXPRI);
}
}
#endif
#if EV_MULTIPLICITY
-struct ev_loop default_loop_struct;
-static struct ev_loop *default_loop;
-
struct ev_loop *
#else
-static int default_loop;
-
int
#endif
ev_default_loop (int methods)
/*****************************************************************************/
+static int
+any_pending (EV_P)
+{
+ int pri;
+
+ for (pri = NUMPRI; pri--; )
+ if (pendingcnt [pri])
+ return 1;
+
+ return 0;
+}
+
static void
call_pending (EV_P)
{
if (p->w)
{
p->w->pending = 0;
- p->w->cb (EV_A_ p->w, p->events);
+ EV_CB_INVOKE (p->w, p->events);
}
}
}
else
ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
- event (EV_A_ (W)w, EV_TIMEOUT);
+ ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
}
}
assert (("inactive timer on periodic heap detected", ev_is_active (w)));
/* first reschedule or stop timer */
- if (w->interval)
+ if (w->reschedule_cb)
+ {
+ ev_tstamp at = ((WT)w)->at = w->reschedule_cb (w, rt_now + 0.0001);
+
+ assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > rt_now));
+ downheap ((WT *)periodics, periodiccnt, 0);
+ }
+ else if (w->interval)
{
((WT)w)->at += floor ((rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval;
assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > rt_now));
else
ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
- event (EV_A_ (W)w, EV_PERIODIC);
+ ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
}
}
{
struct ev_periodic *w = periodics [i];
- if (w->interval)
- {
- ev_tstamp diff = ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval;
-
- if (fabs (diff) >= 1e-4)
- {
- ev_periodic_stop (EV_A_ w);
- ev_periodic_start (EV_A_ w);
-
- i = 0; /* restart loop, inefficient, but time jumps should be rare */
- }
- }
+ if (w->reschedule_cb)
+ ((WT)w)->at = w->reschedule_cb (w, rt_now);
+ else if (w->interval)
+ ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval;
}
+
+ /* now rebuild the heap */
+ for (i = periodiccnt >> 1; i--; )
+ downheap ((WT *)periodics, periodiccnt, i);
}
inline int
/* calculate blocking time */
- /* we only need this for !monotonic clockor timers, but as we basically
+ /* we only need this for !monotonic clock or timers, but as we basically
always have timers, we just calculate it always */
#if EV_USE_MONOTONIC
if (expect_true (have_monotonic))
periodics_reify (EV_A); /* absolute timers called first */
/* queue idle watchers unless io or timers are pending */
- if (!pendingcnt)
+ if (idlecnt && !any_pending (EV_A))
queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE);
/* queue check watchers, to be executed first */
assert (("ev_io_start called with negative fd", fd >= 0));
ev_start (EV_A_ (W)w, 1);
- array_needsize (anfds, anfdmax, fd + 1, anfds_init);
+ array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init);
wlist_add ((WL *)&anfds[fd].head, (WL)w);
fd_change (EV_A_ fd);
assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
ev_start (EV_A_ (W)w, ++timercnt);
- array_needsize (timers, timermax, timercnt, (void));
+ array_needsize (struct ev_timer *, timers, timermax, timercnt, (void));
timers [timercnt - 1] = w;
upheap ((WT *)timers, timercnt - 1);
if (ev_is_active (w))
{
if (w->repeat)
- {
- ((WT)w)->at = mn_now + w->repeat;
- downheap ((WT *)timers, timercnt, ((W)w)->active - 1);
- }
+ adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1, mn_now + w->repeat);
else
ev_timer_stop (EV_A_ w);
}
if (ev_is_active (w))
return;
- assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
-
- /* this formula differs from the one in periodic_reify because we do not always round up */
- if (w->interval)
- ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval;
+ if (w->reschedule_cb)
+ ((WT)w)->at = w->reschedule_cb (w, rt_now);
+ else if (w->interval)
+ {
+ assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
+ /* this formula differs from the one in periodic_reify because we do not always round up */
+ ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval;
+ }
ev_start (EV_A_ (W)w, ++periodiccnt);
- array_needsize (periodics, periodicmax, periodiccnt, (void));
+ array_needsize (struct ev_periodic *, periodics, periodicmax, periodiccnt, (void));
periodics [periodiccnt - 1] = w;
upheap ((WT *)periodics, periodiccnt - 1);
ev_stop (EV_A_ (W)w);
}
+void
+ev_periodic_again (EV_P_ struct ev_periodic *w)
+{
+ /* TODO: use adjustheap and recalculation */
+ ev_periodic_stop (EV_A_ w);
+ ev_periodic_start (EV_A_ w);
+}
+
void
ev_idle_start (EV_P_ struct ev_idle *w)
{
return;
ev_start (EV_A_ (W)w, ++idlecnt);
- array_needsize (idles, idlemax, idlecnt, (void));
+ array_needsize (struct ev_idle *, idles, idlemax, idlecnt, (void));
idles [idlecnt - 1] = w;
}
return;
ev_start (EV_A_ (W)w, ++preparecnt);
- array_needsize (prepares, preparemax, preparecnt, (void));
+ array_needsize (struct ev_prepare *, prepares, preparemax, preparecnt, (void));
prepares [preparecnt - 1] = w;
}
return;
ev_start (EV_A_ (W)w, ++checkcnt);
- array_needsize (checks, checkmax, checkcnt, (void));
+ array_needsize (struct ev_check *, checks, checkmax, checkcnt, (void));
checks [checkcnt - 1] = w;
}
assert (("ev_signal_start called with illegal signal number", w->signum > 0));
ev_start (EV_A_ (W)w, 1);
- array_needsize (signals, signalmax, w->signum, signals_init);
+ array_needsize (ANSIG, signals, signalmax, w->signum, signals_init);
wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w);
if (!((WL)w)->next)
void
ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
{
- struct ev_once *once = ev_malloc (sizeof (struct ev_once));
+ struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
if (!once)
cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
once->cb = cb;
once->arg = arg;
- ev_watcher_init (&once->io, once_cb_io);
+ ev_init (&once->io, once_cb_io);
if (fd >= 0)
{
ev_io_set (&once->io, fd, events);
ev_io_start (EV_A_ &once->io);
}
- ev_watcher_init (&once->to, once_cb_to);
+ ev_init (&once->to, once_cb_to);
if (timeout >= 0.)
{
ev_timer_set (&once->to, timeout, 0.);