X-Git-Url: https://git.llucax.com/software/libev.git/blobdiff_plain/4e62f1267caf106a83789131419fc7d2853bcd4f..647706a2acbfa58a6fdf5fb3ab17f82833faa0b0:/ev.c diff --git a/ev.c b/ev.c index ab89221..221da59 100644 --- a/ev.c +++ b/ev.c @@ -283,22 +283,22 @@ syserr (const char *msg) } } -static void *(*alloc)(void *ptr, size_t size) = realloc; +static void *(*alloc)(void *ptr, long size); void -ev_set_allocator (void *(*cb)(void *ptr, size_t size)) +ev_set_allocator (void *(*cb)(void *ptr, long size)) { alloc = cb; } inline_speed void * -ev_realloc (void *ptr, size_t size) +ev_realloc (void *ptr, long size) { - ptr = alloc (ptr, size); + ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); if (!ptr && size) { - fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", (long)size); + fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); abort (); } @@ -326,12 +326,12 @@ typedef struct int events; } ANPENDING; +#if EV_USE_INOTIFY typedef struct { -#if EV_USE_INOTIFY WL head; -#endif } ANFS; +#endif #if EV_MULTIPLICITY @@ -591,7 +591,6 @@ fd_rearm_all (EV_P) { int fd; - /* this should be highly optimised to not do anything but set a flag */ for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { @@ -907,6 +906,12 @@ loop_init (EV_P_ unsigned int flags) now_floor = mn_now; rtmn_diff = ev_rt_now - mn_now; + /* pid check not overridable via env */ +#ifndef _WIN32 + if (flags & EVFLAG_FORKCHECK) + curpid = getpid (); +#endif + if (!(flags & EVFLAG_NOENV) && !enable_secure () && getenv ("LIBEV_FLAGS")) @@ -987,6 +992,8 @@ loop_destroy (EV_P) backend = 0; } +void inline_size infy_fork (EV_P); + void inline_size loop_fork (EV_P) { @@ -999,6 +1006,9 @@ loop_fork (EV_P) #if EV_USE_EPOLL if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); #endif +#if EV_USE_INOTIFY + infy_fork (EV_A); +#endif if (ev_is_active (&sigev)) { @@ -1267,10 +1277,10 @@ time_update (EV_P) /* loop a few times, before making important decisions. * on the choice of "4": one iteration isn't enough, * in case we get preempted during the calls to - * ev_time and get_clock. a second call is almost guarenteed + * ev_time and get_clock. a second call is almost guaranteed * to succeed in that case, though. and looping a few more times * doesn't hurt either as we only do this on time-jumps or - * in the unlikely event of getting preempted here. + * in the unlikely event of having been preempted here. */ for (i = 4; --i; ) { @@ -1302,7 +1312,7 @@ time_update (EV_P) periodics_reschedule (EV_A); #endif - /* adjust timers. this is easy, as the offset is the same for all */ + /* adjust timers. this is easy, as the offset is the same for all of them */ for (i = 0; i < timercnt; ++i) ((WT)timers [i])->at += ev_rt_now - mn_now; } @@ -1332,17 +1342,28 @@ ev_loop (EV_P_ int flags) ? EVUNLOOP_ONE : EVUNLOOP_CANCEL; - while (activecnt) + call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ + + while (expect_false (!activecnt)) { - /* we might have forked, so reify kernel state if necessary */ - #if EV_FORK_ENABLE - if (expect_false (postfork)) - if (forkcnt) - { - queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); - call_pending (EV_A); - } - #endif +#ifndef _WIN32 + if (expect_false (curpid)) /* penalise the forking check even more */ + if (expect_false (getpid () != curpid)) + { + curpid = getpid (); + postfork = 1; + } +#endif + +#if EV_FORK_ENABLE + /* we might have forked, so queue fork handlers */ + if (expect_false (postfork)) + if (forkcnt) + { + queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); + call_pending (EV_A); + } +#endif /* queue check watchers (and execute them) */ if (expect_false (preparecnt)) @@ -1351,6 +1372,9 @@ ev_loop (EV_P_ int flags) call_pending (EV_A); } + if (expect_false (!activecnt)) + break; + /* we might have forked, so reify kernel state if necessary */ if (expect_false (postfork)) loop_fork (EV_A); @@ -1360,9 +1384,9 @@ ev_loop (EV_P_ int flags) /* calculate blocking time */ { - double block; + ev_tstamp block; - if (flags & EVLOOP_NONBLOCK || idlecnt) + if (expect_false (flags & EVLOOP_NONBLOCK || idlecnt || !activecnt)) block = 0.; /* do not block at all */ else { @@ -1715,7 +1739,7 @@ ev_child_stop (EV_P_ ev_child *w) #define DEF_STAT_INTERVAL 5.0074891 #define MIN_STAT_INTERVAL 0.1074891 -void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); +static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); #if EV_USE_INOTIFY # define EV_INOTIFY_BUFSIZE 8192 @@ -1833,6 +1857,38 @@ infy_init (EV_P) } } +void inline_size +infy_fork (EV_P) +{ + int slot; + + if (fs_fd < 0) + return; + + close (fs_fd); + fs_fd = inotify_init (); + + for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot) + { + WL w_ = fs_hash [slot].head; + fs_hash [slot].head = 0; + + while (w_) + { + ev_stat *w = (ev_stat *)w_; + w_ = w_->next; /* lets us add this watcher */ + + w->wd = -1; + + if (fs_fd >= 0) + infy_add (EV_A_ w); /* re-add, no matter what */ + else + ev_timer_start (EV_A_ &w->timer); + } + + } +} + #endif void @@ -1844,7 +1900,7 @@ ev_stat_stat (EV_P_ ev_stat *w) w->attr.st_nlink = 1; } -void noinline +static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents) { ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); @@ -1854,8 +1910,20 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents) w->prev = w->attr; ev_stat_stat (EV_A_ w); - if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata))) - { + /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ + if ( + w->prev.st_dev != w->attr.st_dev + || w->prev.st_ino != w->attr.st_ino + || w->prev.st_mode != w->attr.st_mode + || w->prev.st_nlink != w->attr.st_nlink + || w->prev.st_uid != w->attr.st_uid + || w->prev.st_gid != w->attr.st_gid + || w->prev.st_rdev != w->attr.st_rdev + || w->prev.st_size != w->attr.st_size + || w->prev.st_atime != w->attr.st_atime + || w->prev.st_mtime != w->attr.st_mtime + || w->prev.st_ctime != w->attr.st_ctime + ) { #if EV_USE_INOTIFY infy_del (EV_A_ w); infy_add (EV_A_ w);