/*
* Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
/*
* Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
{
if (nev & EV_READ)
kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF);
{
if (nev & EV_READ)
kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF);
kqueue_change (fd, EVFILT_READ, EV_DELETE, 0);
}
kqueue_change (fd, EVFILT_READ, EV_DELETE, 0);
}
{
if (nev & EV_WRITE)
kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
{
if (nev & EV_WRITE)
kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
* kevent will fail.
*/
if (kevent (kq_fd, &ch, 1, &ev, 1, 0) != 1
* kevent will fail.
*/
if (kevent (kq_fd, &ch, 1, &ev, 1, 0) != 1
ev_method = EVMETHOD_KQUEUE;
method_fudge = 1e-3; /* needed to compensate for kevent returning early */
ev_method = EVMETHOD_KQUEUE;
method_fudge = 1e-3; /* needed to compensate for kevent returning early */
kq_eventmax = 64; /* intiial number of events receivable per poll */
kq_events = malloc (sizeof (struct kevent) * kq_eventmax);
kq_eventmax = 64; /* intiial number of events receivable per poll */
kq_events = malloc (sizeof (struct kevent) * kq_eventmax);