From 4ebe9e295b974622f802229ca8e371ee6df9525a Mon Sep 17 00:00:00 2001 From: root Date: Wed, 28 Nov 2007 11:15:55 +0000 Subject: [PATCH] experimental, and likely broken, inotify support --- ev.c | 206 +++++++++++++++++++++++++++++++++++++++++++++++++--- ev.h | 12 +-- ev.pod | 18 ++++- ev_epoll.c | 8 +- ev_kqueue.c | 10 +-- ev_poll.c | 6 +- ev_port.c | 8 +- ev_select.c | 4 +- ev_vars.h | 10 ++- ev_wrap.h | 3 + libev.m4 | 8 +- 11 files changed, 250 insertions(+), 43 deletions(-) diff --git a/ev.c b/ev.c index 06c0ec3..0d5e4b9 100644 --- a/ev.c +++ b/ev.c @@ -96,6 +96,14 @@ extern "C" { # endif # endif +# ifndef EV_USE_INOTIFY +# if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H +# define EV_USE_INOTIFY 1 +# else +# define EV_USE_INOTIFY 0 +# endif +# endif + #endif #include @@ -112,6 +120,12 @@ extern "C" { #include +#ifdef EV_H +# include EV_H +#else +# include "ev.h" +#endif + #ifndef _WIN32 # include # include @@ -158,6 +172,10 @@ extern "C" { # define EV_USE_PORT 0 #endif +#ifndef EV_USE_INOTIFY +# define EV_USE_INOTIFY 0 +#endif + #ifndef EV_PID_HASHSIZE # if EV_MINIMAL # define EV_PID_HASHSIZE 1 @@ -166,6 +184,14 @@ extern "C" { # endif #endif +#ifndef EV_INOTIFY_HASHSIZE +# if EV_MINIMAL +# define EV_INOTIFY_HASHSIZE 1 +# else +# define EV_INOTIFY_HASHSIZE 16 +# endif +#endif + /**/ #ifndef CLOCK_MONOTONIC @@ -182,18 +208,20 @@ extern "C" { # include #endif +#if !EV_STAT_ENABLE +# define EV_USE_INOTIFY 0 +#endif + +#if EV_USE_INOTIFY +# include +#endif + /**/ #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ /*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds */ -#ifdef EV_H -# include EV_H -#else -# include "ev.h" -#endif - #if __GNUC__ >= 3 # define expect(expr,value) __builtin_expect ((expr),(value)) # define inline_size static inline /* inline for codesize */ @@ -298,6 +326,13 @@ typedef struct int events; } ANPENDING; +typedef struct +{ +#if EV_USE_INOTIFY + WL head; +#endif +} ANFS; + #if EV_MULTIPLICITY struct ev_loop @@ -881,6 +916,11 @@ loop_init (EV_P_ unsigned int flags) flags |= ev_recommended_backends (); backend = 0; + backend_fd = -1; +#if EV_USE_INOTIFY + fs_fd = -2; +#endif + #if EV_USE_PORT if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); #endif @@ -907,6 +947,14 @@ loop_destroy (EV_P) { int i; +#if EV_USE_INOTIFY + if (fs_fd >= 0) + close (fs_fd); +#endif + + if (backend_fd >= 0) + close (backend_fd); + #if EV_USE_PORT if (backend == EVBACKEND_PORT ) port_destroy (EV_A); #endif @@ -1667,6 +1715,127 @@ ev_child_stop (EV_P_ ev_child *w) #define DEF_STAT_INTERVAL 5.0074891 #define MIN_STAT_INTERVAL 0.1074891 +void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); + +#if EV_USE_INOTIFY +# define EV_INOTIFY_BUFSIZE ((PATH_MAX + sizeof (struct inotify_event)) + 2048) + +static void noinline +infy_add (EV_P_ ev_stat *w) +{ + w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD); + + if (w->wd < 0) + { + ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ + + /* monitor some parent directory for speedup hints */ + if (errno == ENOENT || errno == EACCES) + { + char path [PATH_MAX]; + strcpy (path, w->path); + + do + { + int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF + | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO); + + char *pend = strrchr (path, '/'); + + if (!pend) + break; /* whoops, no '/', complain to your admin */ + + *pend = 0; + w->wd = inotify_add_watch (fs_fd, path, IN_DELETE_SELF | IN_CREATE | IN_MOVED_TO | IN_MASK_ADD); + } + while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); + } + } + else + ev_timer_stop (EV_A_ &w->timer); /* we can watch this in a race-free way */ + + if (w->wd >= 0) + wlist_add (&fs_hash [w->wd & (EV_INOTIFY_HASHSIZE - 1)].head, (WL)w); +} + +static void noinline +infy_del (EV_P_ ev_stat *w) +{ + WL w_; + int slot; + int wd = w->wd; + + if (wd < 0) + return; + + w->wd = -2; + slot = wd & (EV_INOTIFY_HASHSIZE - 1); + wlist_del (&fs_hash [slot].head, (WL)w); + + /* remove this watcher, if others are watching it, they will rearm */ + inotify_rm_watch (fs_fd, wd); +} + +static void noinline +infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) +{ + if (slot < 0) + /* overflow, need to check for all hahs slots */ + for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot) + infy_wd (EV_A_ slot, wd, ev); + else + { + WL w_; + + for (w_ = fs_hash [slot & (EV_INOTIFY_HASHSIZE - 1)].head; w_; ) + { + ev_stat *w = (ev_stat *)w_; + w_ = w_->next; /* lets us remove this watcher and all before it */ + + if (w->wd == wd || wd == -1) + { + if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF)) + { + w->wd = -1; + infy_add (EV_A_ w); /* re-add, no matter what */ + } + + stat_timer_cb (EV_P_ &w->timer, 0); + } + } + } +} + +static void +infy_cb (EV_P_ ev_io *w, int revents) +{ + char buf [EV_INOTIFY_BUFSIZE]; + struct inotify_event *ev = (struct inotify_event *)buf; + int ofs; + int len = read (fs_fd, buf, sizeof (buf)); + + for (ofs = 0; ofs < len; ofs += sizeof (struct inotify_event) + ev->len) + infy_wd (EV_A_ ev->wd, ev->wd, ev); +} + +void inline_size +infy_init (EV_P) +{ + if (fs_fd != -2) + return; + + fs_fd = inotify_init (); + + if (fs_fd >= 0) + { + ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ); + ev_set_priority (&fs_w, EV_MAXPRI); + ev_io_start (EV_A_ &fs_w); + } +} + +#endif + void ev_stat_stat (EV_P_ ev_stat *w) { @@ -1676,7 +1845,7 @@ ev_stat_stat (EV_P_ ev_stat *w) w->attr.st_nlink = 1; } -static void +void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents) { ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); @@ -1687,7 +1856,15 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents) ev_stat_stat (EV_A_ w); if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata))) - ev_feed_event (EV_A_ w, EV_STAT); + { + #if EV_USE_INOTIFY + infy_del (EV_A_ w); + infy_add (EV_A_ w); + ev_stat_stat (EV_A_ w); /* avoid race... */ + #endif + + ev_feed_event (EV_A_ w, EV_STAT); + } } void @@ -1707,7 +1884,15 @@ ev_stat_start (EV_P_ ev_stat *w) ev_timer_init (&w->timer, stat_timer_cb, w->interval, w->interval); ev_set_priority (&w->timer, ev_priority (w)); - ev_timer_start (EV_A_ &w->timer); + +#if EV_USE_INOTIFY + infy_init (EV_A); + + if (fs_fd >= 0) + infy_add (EV_A_ w); + else +#endif + ev_timer_start (EV_A_ &w->timer); ev_start (EV_A_ (W)w, 1); } @@ -1719,6 +1904,9 @@ ev_stat_stop (EV_P_ ev_stat *w) if (expect_false (!ev_is_active (w))) return; +#if EV_USE_INOTIFY + infy_del (EV_A_ w); +#endif ev_timer_stop (EV_A_ &w->timer); ev_stop (EV_A_ (W)w); diff --git a/ev.h b/ev.h index db79520..bd023a7 100644 --- a/ev.h +++ b/ev.h @@ -220,23 +220,25 @@ typedef struct ev_child #if EV_STAT_ENABLE /* st_nlink = 0 means missing file or other error */ -#ifdef _WIN32 +# ifdef _WIN32 typedef struct _stati64 ev_statdata; -#else +# else typedef struct stat ev_statdata; -#endif +# endif /* invoked each time the stat data changes for a given path */ /* revent EV_STAT */ typedef struct ev_stat { - EV_WATCHER (ev_stat) + EV_WATCHER_LIST (ev_stat) ev_timer timer; /* private */ ev_tstamp interval; /* ro */ const char *path; /* ro */ ev_statdata prev; /* ro */ ev_statdata attr; /* ro */ + + int wd; /* wd for inotify, fd for kqueue */ } ev_stat; #endif @@ -426,7 +428,7 @@ void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revent #define ev_periodic_set(ev,at_,ival_,res_) do { (ev)->at = (at_); (ev)->interval = (ival_); (ev)->reschedule_cb= (res_); } while (0) #define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0) #define ev_child_set(ev,pid_) do { (ev)->pid = (pid_); } while (0) -#define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); } while (0) +#define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0) #define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_check_set(ev) /* nop, yes, this is a serious in-joke */ diff --git a/ev.pod b/ev.pod index cac8de1..241fade 100644 --- a/ev.pod +++ b/ev.pod @@ -2016,6 +2016,12 @@ backend for Solaris 10 systems. reserved for future expansion, works like the USE symbols above. +=item EV_USE_INOTIFY + +If defined to be C<1>, libev will compile in support for the Linux inotify +interface to speed up C watchers. Its actual availability will +be detected at runtime. + =item EV_H The name of the F header file used to include it. The default if @@ -2080,7 +2086,15 @@ some inlining decisions, saves roughly 30% codesize of amd64. C watchers use a small hash table to distribute workload by pid. The default size is C<16> (or C<1> with C), usually more than enough. If you need to manage thousands of children you might want to -increase this value. +increase this value (I be a power of two). + +=item EV_INOTIFY_HASHSIZE + +C watchers use a small hash table to distribute workload by +inotify watch id. The default size is C<16> (or C<1> with C), +usually more than enough. If you need to manage thousands of C +watchers you might want to increase this value (I be a power of +two). =item EV_COMMON @@ -2150,7 +2164,7 @@ documentation for C. =item Stopping check/prepare/idle watchers: O(1) -=item Stopping an io/signal/child watcher: O(number_of_watchers_for_this_(fd/signal/pid % 16)) +=item Stopping an io/signal/child watcher: O(number_of_watchers_for_this_(fd/signal/pid % EV_PID_HASHSIZE)) =item Finding the next timer per loop iteration: O(1) diff --git a/ev_epoll.c b/ev_epoll.c index 4edbecd..54a4ccb 100644 --- a/ev_epoll.c +++ b/ev_epoll.c @@ -79,7 +79,7 @@ epoll_poll (EV_P_ ev_tstamp timeout) } } -static int +int inline_size epoll_init (EV_P_ int flags) { backend_fd = epoll_create (256); @@ -99,15 +99,13 @@ epoll_init (EV_P_ int flags) return EVBACKEND_EPOLL; } -static void +void inline_size epoll_destroy (EV_P) { - close (backend_fd); - ev_free (epoll_events); } -static void +void inline_size epoll_fork (EV_P) { close (backend_fd); diff --git a/ev_kqueue.c b/ev_kqueue.c index 7a34e9b..afd3f9c 100644 --- a/ev_kqueue.c +++ b/ev_kqueue.c @@ -36,7 +36,7 @@ #include #include -static void +void inline_speed kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) { struct kevent *ke; @@ -142,7 +142,7 @@ kqueue_poll (EV_P_ ev_tstamp timeout) } } -static int +int inline_size kqueue_init (EV_P_ int flags) { struct kevent ch, ev; @@ -168,16 +168,14 @@ kqueue_init (EV_P_ int flags) return EVBACKEND_KQUEUE; } -static void +void inline_size kqueue_destroy (EV_P) { - close (backend_fd); - ev_free (kqueue_events); ev_free (kqueue_changes); } -static void +void inline_size kqueue_fork (EV_P) { close (backend_fd); diff --git a/ev_poll.c b/ev_poll.c index 293958f..52fe42d 100644 --- a/ev_poll.c +++ b/ev_poll.c @@ -1,5 +1,5 @@ /* - * libev epoll fd activity backend + * libev poll fd activity backend * * Copyright (c) 2007 Marc Alexander Lehmann * All rights reserved. @@ -105,7 +105,7 @@ poll_poll (EV_P_ ev_tstamp timeout) ); } -static int +int inline_size poll_init (EV_P_ int flags) { backend_fudge = 1e-3; /* needed to compensate for select returning early, very conservative */ @@ -118,7 +118,7 @@ poll_init (EV_P_ int flags) return EVBACKEND_POLL; } -static void +void inline_size poll_destroy (EV_P) { ev_free (pollidxs); diff --git a/ev_port.c b/ev_port.c index e6555bb..db3bae5 100644 --- a/ev_port.c +++ b/ev_port.c @@ -108,7 +108,7 @@ port_poll (EV_P_ ev_tstamp timeout) } } -static int +int inline_size port_init (EV_P_ int flags) { /* Initalize the kernel queue */ @@ -127,15 +127,13 @@ port_init (EV_P_ int flags) return EVBACKEND_PORT; } -static void +void inline_size port_destroy (EV_P) { - close (backend_fd); - ev_free (port_events); } -static void +void inline_size port_fork (EV_P) { close (backend_fd); diff --git a/ev_select.c b/ev_select.c index 38a2398..1df0953 100644 --- a/ev_select.c +++ b/ev_select.c @@ -200,7 +200,7 @@ select_poll (EV_P_ ev_tstamp timeout) #endif } -static int +int inline_size select_init (EV_P_ int flags) { backend_fudge = 1e-2; /* needed to compensate for select returning early, very conservative */ @@ -224,7 +224,7 @@ select_init (EV_P_ int flags) return EVBACKEND_SELECT; } -static void +void inline_size select_destroy (EV_P) { ev_free (vec_ri); diff --git a/ev_vars.h b/ev_vars.h index c1bfaa4..3750492 100644 --- a/ev_vars.h +++ b/ev_vars.h @@ -2,7 +2,7 @@ VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */ VARx(ev_tstamp, mn_now) /* monotonic clock "now" */ -VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ +VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time */ VARx(int, backend) VARx(ev_tstamp, backend_fudge) /* assumed typical timer resolution */ @@ -11,7 +11,7 @@ VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) VARx(int, backend_fd) VARx(int, postfork) /* true if we need to recreate kernel state after fork */ -VARx(int, activecnt) /* number of active events */ +VARx(int, activecnt) /* total number of active events ("refcount") */ #if EV_USE_SELECT || EV_GENWRAP VARx(void *, vec_ri) @@ -86,5 +86,11 @@ VARx(int, forkmax) VARx(int, forkcnt) #endif +#if EV_USE_INOTIFY || EV_GENWRAP +VARx(int, fs_fd) +VARx(ev_io, fs_w); +VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE]) +#endif + #undef VARx diff --git a/ev_wrap.h b/ev_wrap.h index 37812ca..f3942f9 100644 --- a/ev_wrap.h +++ b/ev_wrap.h @@ -54,3 +54,6 @@ #define forks ((loop)->forks) #define forkmax ((loop)->forkmax) #define forkcnt ((loop)->forkcnt) +#define fs_fd ((loop)->fs_fd) +#define fs_w ((loop)->fs_w); +#define fs_hash ((loop)->fs_hash) diff --git a/libev.m4 b/libev.m4 index 55f94f4..39c28f4 100644 --- a/libev.m4 +++ b/libev.m4 @@ -2,14 +2,14 @@ dnl this file is part of libev, do not make local modifications dnl http://software.schmorp.de/pkg/libev dnl libev support -AC_CHECK_HEADERS(sys/epoll.h sys/event.h sys/queue.h port.h poll.h sys/select.h) +AC_CHECK_HEADERS(sys/inotify.h sys/epoll.h sys/event.h sys/queue.h port.h poll.h sys/select.h) -AC_CHECK_FUNCS(epoll_ctl kqueue port_create poll select) +AC_CHECK_FUNCS(inotify_init epoll_ctl kqueue port_create poll select) AC_CHECK_FUNC(clock_gettime, [], [ if test -z "$LIBEV_M4_AVOID_LIBRT"; then - AC_CHECK_LIB(rt, clock_gettime) - AC_CHECK_FUNCS(clock_gettime) + AC_CHECK_LIB(rt, clock_gettime) + AC_CHECK_FUNCS(clock_gettime) fi ]) -- 2.43.0