void inline_speed
kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
{
- struct kevent *ke;
-
++kqueue_changecnt;
array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2);
if (kqueue_changecnt > kqueue_eventmax)
{
ev_free (kqueue_events);
- kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt);
+ kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt);
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
}
res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
kqueue_changecnt = 0;
- if (res < 0)
+ if (expect_false (res < 0))
{
if (errno != EINTR)
syserr ("(libev) kevent");
if (expect_false (res == kqueue_eventmax))
{
ev_free (kqueue_events);
- kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1);
+ kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1);
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
}
}
int inline_size
kqueue_init (EV_P_ int flags)
{
- struct kevent ch, ev;
-
/* Initalize the kernel queue */
if ((backend_fd = kqueue ()) < 0)
return 0;
fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
- /* fudge *might* be zero from the documentation, but bsd docs are notoriously wrong */
- backend_fudge = 1e-3; /* needed to compensate for kevent returning early */
+ backend_fudge = 0.;
backend_modify = kqueue_modify;
backend_poll = kqueue_poll;