1 1.3 christos /* $NetBSD: kqueue.c,v 1.3 2021/04/07 03:36:48 christos Exp $ */ 2 1.1 plunky /* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */ 3 1.1 plunky 4 1.1 plunky /* 5 1.2 kamil * Copyright 2000-2007 Niels Provos <provos (at) citi.umich.edu> 6 1.2 kamil * Copyright 2007-2012 Niels Provos and Nick Mathewson 7 1.1 plunky * 8 1.1 plunky * Redistribution and use in source and binary forms, with or without 9 1.1 plunky * modification, are permitted provided that the following conditions 10 1.1 plunky * are met: 11 1.1 plunky * 1. Redistributions of source code must retain the above copyright 12 1.1 plunky * notice, this list of conditions and the following disclaimer. 13 1.1 plunky * 2. Redistributions in binary form must reproduce the above copyright 14 1.1 plunky * notice, this list of conditions and the following disclaimer in the 15 1.1 plunky * documentation and/or other materials provided with the distribution. 16 1.1 plunky * 3. The name of the author may not be used to endorse or promote products 17 1.1 plunky * derived from this software without specific prior written permission. 18 1.1 plunky * 19 1.1 plunky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 1.1 plunky * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 1.1 plunky * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 1.1 plunky * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 1.1 plunky * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 1.1 plunky * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 1.1 plunky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 1.1 plunky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 1.1 plunky * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 1.1 plunky * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 1.1 plunky */ 30 1.2 kamil #include "event2/event-config.h" 31 1.2 kamil #include <sys/cdefs.h> 32 1.3 christos __RCSID("$NetBSD: kqueue.c,v 1.3 2021/04/07 03:36:48 christos Exp $"); 33 1.2 kamil #include "evconfig-private.h" 34 1.2 kamil 35 1.2 kamil #ifdef EVENT__HAVE_KQUEUE 36 1.1 plunky 37 1.1 plunky #include <sys/types.h> 38 1.2 kamil #ifdef EVENT__HAVE_SYS_TIME_H 39 1.1 plunky #include <sys/time.h> 40 1.1 plunky #endif 41 1.1 plunky #include <sys/queue.h> 42 1.1 plunky #include <sys/event.h> 43 1.3 christos #include <limits.h> 44 1.1 plunky #include <signal.h> 45 1.1 plunky #include <stdio.h> 46 1.1 plunky #include <stdlib.h> 47 1.1 plunky #include <string.h> 48 1.1 plunky #include <unistd.h> 49 1.1 plunky #include <errno.h> 50 1.2 kamil #ifdef EVENT__HAVE_INTTYPES_H 51 1.1 plunky #include <inttypes.h> 52 1.1 plunky #endif 53 1.1 plunky 54 1.1 plunky /* Some platforms apparently define the udata field of struct kevent as 55 1.1 plunky * intptr_t, whereas others define it as void*. There doesn't seem to be an 56 1.1 plunky * easy way to tell them apart via autoconf, so we need to use OS macros. */ 57 1.2 kamil #if defined(__NetBSD__) 58 1.2 kamil #define PTR_TO_UDATA(x) ((typeof(((struct kevent *)0)->udata))(x)) 59 1.2 kamil #define INT_TO_UDATA(x) ((typeof(((struct kevent *)0)->udata))(intptr_t)(x)) 60 1.2 kamil #elif defined(EVENT__HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__) && !defined(__CloudABI__) 61 1.1 plunky #define PTR_TO_UDATA(x) ((intptr_t)(x)) 62 1.2 kamil #define INT_TO_UDATA(x) ((intptr_t)(x)) 63 1.1 plunky #else 64 1.1 plunky #define PTR_TO_UDATA(x) (x) 65 1.2 kamil #define INT_TO_UDATA(x) ((void*)(x)) 66 1.1 plunky #endif 67 1.1 plunky 68 1.1 plunky #include "event-internal.h" 69 1.2 kamil #include "log-internal.h" 70 1.2 kamil #include "evmap-internal.h" 71 1.2 kamil #include "event2/thread.h" 72 1.3 christos #include "event2/util.h" 73 1.2 kamil #include "evthread-internal.h" 74 1.2 kamil #include "changelist-internal.h" 75 1.1 plunky 76 1.2 kamil #include "kqueue-internal.h" 77 1.1 plunky 78 1.1 plunky #define NEVENT 64 79 1.1 plunky 80 1.1 plunky struct kqop { 81 1.1 plunky struct kevent *changes; 82 1.2 kamil int changes_size; 83 1.2 kamil 84 1.1 plunky struct kevent *events; 85 1.2 kamil int events_size; 86 1.1 plunky int kq; 87 1.2 kamil int notify_event_added; 88 1.1 plunky pid_t pid; 89 1.1 plunky }; 90 1.1 plunky 91 1.2 kamil static void kqop_free(struct kqop *kqop); 92 1.2 kamil 93 1.2 kamil static void *kq_init(struct event_base *); 94 1.2 kamil static int kq_sig_add(struct event_base *, int, short, short, void *); 95 1.2 kamil static int kq_sig_del(struct event_base *, int, short, short, void *); 96 1.2 kamil static int kq_dispatch(struct event_base *, struct timeval *); 97 1.2 kamil static void kq_dealloc(struct event_base *); 98 1.1 plunky 99 1.1 plunky const struct eventop kqops = { 100 1.1 plunky "kqueue", 101 1.1 plunky kq_init, 102 1.2 kamil event_changelist_add_, 103 1.2 kamil event_changelist_del_, 104 1.1 plunky kq_dispatch, 105 1.1 plunky kq_dealloc, 106 1.2 kamil 1 /* need reinit */, 107 1.2 kamil EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS, 108 1.2 kamil EVENT_CHANGELIST_FDINFO_SIZE 109 1.2 kamil }; 110 1.2 kamil 111 1.2 kamil static const struct eventop kqsigops = { 112 1.2 kamil "kqueue_signal", 113 1.2 kamil NULL, 114 1.2 kamil kq_sig_add, 115 1.2 kamil kq_sig_del, 116 1.2 kamil NULL, 117 1.2 kamil NULL, 118 1.2 kamil 1 /* need reinit */, 119 1.2 kamil 0, 120 1.2 kamil 0 121 1.1 plunky }; 122 1.1 plunky 123 1.1 plunky static void * 124 1.1 plunky kq_init(struct event_base *base) 125 1.1 plunky { 126 1.2 kamil int kq = -1; 127 1.2 kamil struct kqop *kqueueop = NULL; 128 1.1 plunky 129 1.2 kamil if (!(kqueueop = mm_calloc(1, sizeof(struct kqop)))) 130 1.1 plunky return (NULL); 131 1.1 plunky 132 1.2 kamil /* Initialize the kernel queue */ 133 1.1 plunky 134 1.1 plunky if ((kq = kqueue()) == -1) { 135 1.1 plunky event_warn("kqueue"); 136 1.2 kamil goto err; 137 1.1 plunky } 138 1.1 plunky 139 1.1 plunky kqueueop->kq = kq; 140 1.1 plunky 141 1.1 plunky kqueueop->pid = getpid(); 142 1.1 plunky 143 1.2 kamil /* Initialize fields */ 144 1.2 kamil kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent)); 145 1.2 kamil if (kqueueop->changes == NULL) 146 1.2 kamil goto err; 147 1.2 kamil kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent)); 148 1.2 kamil if (kqueueop->events == NULL) 149 1.2 kamil goto err; 150 1.2 kamil kqueueop->events_size = kqueueop->changes_size = NEVENT; 151 1.1 plunky 152 1.1 plunky /* Check for Mac OS X kqueue bug. */ 153 1.2 kamil memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]); 154 1.1 plunky kqueueop->changes[0].ident = -1; 155 1.1 plunky kqueueop->changes[0].filter = EVFILT_READ; 156 1.1 plunky kqueueop->changes[0].flags = EV_ADD; 157 1.2 kamil /* 158 1.1 plunky * If kqueue works, then kevent will succeed, and it will 159 1.1 plunky * stick an error in events[0]. If kqueue is broken, then 160 1.1 plunky * kevent will fail. 161 1.1 plunky */ 162 1.1 plunky if (kevent(kq, 163 1.1 plunky kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 || 164 1.2 kamil (int)kqueueop->events[0].ident != -1 || 165 1.2 kamil !(kqueueop->events[0].flags & EV_ERROR)) { 166 1.1 plunky event_warn("%s: detected broken kqueue; not using.", __func__); 167 1.2 kamil goto err; 168 1.1 plunky } 169 1.1 plunky 170 1.2 kamil base->evsigsel = &kqsigops; 171 1.2 kamil 172 1.1 plunky return (kqueueop); 173 1.2 kamil err: 174 1.2 kamil if (kqueueop) 175 1.2 kamil kqop_free(kqueueop); 176 1.2 kamil 177 1.2 kamil return (NULL); 178 1.2 kamil } 179 1.2 kamil 180 1.2 kamil #define ADD_UDATA 0x30303 181 1.2 kamil 182 1.2 kamil static void 183 1.2 kamil kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change) 184 1.2 kamil { 185 1.2 kamil memset(out, 0, sizeof(struct kevent)); 186 1.2 kamil out->ident = fd; 187 1.2 kamil out->filter = filter; 188 1.2 kamil 189 1.2 kamil if (change & EV_CHANGE_ADD) { 190 1.2 kamil out->flags = EV_ADD; 191 1.2 kamil /* We set a magic number here so that we can tell 'add' 192 1.2 kamil * errors from 'del' errors. */ 193 1.2 kamil out->udata = INT_TO_UDATA(ADD_UDATA); 194 1.2 kamil if (change & EV_ET) 195 1.2 kamil out->flags |= EV_CLEAR; 196 1.2 kamil #ifdef NOTE_EOF 197 1.2 kamil /* Make it behave like select() and poll() */ 198 1.2 kamil if (filter == EVFILT_READ) 199 1.2 kamil out->fflags = NOTE_EOF; 200 1.2 kamil #endif 201 1.2 kamil } else { 202 1.2 kamil EVUTIL_ASSERT(change & EV_CHANGE_DEL); 203 1.2 kamil out->flags = EV_DELETE; 204 1.2 kamil } 205 1.1 plunky } 206 1.1 plunky 207 1.1 plunky static int 208 1.2 kamil kq_build_changes_list(const struct event_changelist *changelist, 209 1.2 kamil struct kqop *kqop) 210 1.1 plunky { 211 1.2 kamil int i; 212 1.2 kamil int n_changes = 0; 213 1.1 plunky 214 1.2 kamil for (i = 0; i < changelist->n_changes; ++i) { 215 1.2 kamil struct event_change *in_ch = &changelist->changes[i]; 216 1.2 kamil struct kevent *out_ch; 217 1.2 kamil if (n_changes >= kqop->changes_size - 1) { 218 1.3 christos int newsize; 219 1.2 kamil struct kevent *newchanges; 220 1.2 kamil 221 1.3 christos if (kqop->changes_size > INT_MAX / 2 || 222 1.3 christos (size_t)kqop->changes_size * 2 > EV_SIZE_MAX / 223 1.3 christos sizeof(struct kevent)) { 224 1.3 christos event_warnx("%s: int overflow", __func__); 225 1.3 christos return (-1); 226 1.3 christos } 227 1.3 christos 228 1.3 christos newsize = kqop->changes_size * 2; 229 1.2 kamil newchanges = mm_realloc(kqop->changes, 230 1.2 kamil newsize * sizeof(struct kevent)); 231 1.2 kamil if (newchanges == NULL) { 232 1.2 kamil event_warn("%s: realloc", __func__); 233 1.2 kamil return (-1); 234 1.2 kamil } 235 1.2 kamil kqop->changes = newchanges; 236 1.2 kamil kqop->changes_size = newsize; 237 1.2 kamil } 238 1.2 kamil if (in_ch->read_change) { 239 1.2 kamil out_ch = &kqop->changes[n_changes++]; 240 1.2 kamil kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ, 241 1.2 kamil in_ch->read_change); 242 1.1 plunky } 243 1.2 kamil if (in_ch->write_change) { 244 1.2 kamil out_ch = &kqop->changes[n_changes++]; 245 1.2 kamil kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE, 246 1.2 kamil in_ch->write_change); 247 1.1 plunky } 248 1.1 plunky } 249 1.2 kamil return n_changes; 250 1.2 kamil } 251 1.1 plunky 252 1.2 kamil static int 253 1.2 kamil kq_grow_events(struct kqop *kqop, size_t new_size) 254 1.2 kamil { 255 1.2 kamil struct kevent *newresult; 256 1.1 plunky 257 1.2 kamil newresult = mm_realloc(kqop->events, 258 1.2 kamil new_size * sizeof(struct kevent)); 259 1.1 plunky 260 1.2 kamil if (newresult) { 261 1.2 kamil kqop->events = newresult; 262 1.2 kamil kqop->events_size = new_size; 263 1.2 kamil return 0; 264 1.2 kamil } else { 265 1.2 kamil return -1; 266 1.2 kamil } 267 1.1 plunky } 268 1.1 plunky 269 1.1 plunky static int 270 1.2 kamil kq_dispatch(struct event_base *base, struct timeval *tv) 271 1.1 plunky { 272 1.2 kamil struct kqop *kqop = base->evbase; 273 1.1 plunky struct kevent *events = kqop->events; 274 1.2 kamil struct kevent *changes; 275 1.1 plunky struct timespec ts, *ts_p = NULL; 276 1.2 kamil int i, n_changes, res; 277 1.1 plunky 278 1.1 plunky if (tv != NULL) { 279 1.2 kamil ts.tv_sec = tv->tv_sec; 280 1.2 kamil ts.tv_nsec = tv->tv_usec * 1000; 281 1.1 plunky ts_p = &ts; 282 1.1 plunky } 283 1.1 plunky 284 1.2 kamil /* Build "changes" from "base->changes" */ 285 1.2 kamil EVUTIL_ASSERT(kqop->changes); 286 1.2 kamil n_changes = kq_build_changes_list(&base->changelist, kqop); 287 1.2 kamil if (n_changes < 0) 288 1.2 kamil return -1; 289 1.2 kamil 290 1.2 kamil event_changelist_remove_all_(&base->changelist, base); 291 1.2 kamil 292 1.2 kamil /* steal the changes array in case some broken code tries to call 293 1.2 kamil * dispatch twice at once. */ 294 1.2 kamil changes = kqop->changes; 295 1.2 kamil kqop->changes = NULL; 296 1.2 kamil 297 1.2 kamil /* Make sure that 'events' is at least as long as the list of changes: 298 1.2 kamil * otherwise errors in the changes can get reported as a -1 return 299 1.2 kamil * value from kevent() rather than as EV_ERROR events in the events 300 1.2 kamil * array. 301 1.2 kamil * 302 1.2 kamil * (We could instead handle -1 return values from kevent() by 303 1.2 kamil * retrying with a smaller changes array or a larger events array, 304 1.2 kamil * but this approach seems less risky for now.) 305 1.2 kamil */ 306 1.2 kamil if (kqop->events_size < n_changes) { 307 1.2 kamil int new_size = kqop->events_size; 308 1.2 kamil do { 309 1.2 kamil new_size *= 2; 310 1.2 kamil } while (new_size < n_changes); 311 1.2 kamil 312 1.2 kamil kq_grow_events(kqop, new_size); 313 1.2 kamil events = kqop->events; 314 1.2 kamil } 315 1.2 kamil 316 1.2 kamil EVBASE_RELEASE_LOCK(base, th_base_lock); 317 1.2 kamil 318 1.2 kamil res = kevent(kqop->kq, changes, n_changes, 319 1.2 kamil events, kqop->events_size, ts_p); 320 1.2 kamil 321 1.2 kamil EVBASE_ACQUIRE_LOCK(base, th_base_lock); 322 1.2 kamil 323 1.2 kamil EVUTIL_ASSERT(kqop->changes == NULL); 324 1.2 kamil kqop->changes = changes; 325 1.2 kamil 326 1.1 plunky if (res == -1) { 327 1.1 plunky if (errno != EINTR) { 328 1.2 kamil event_warn("kevent"); 329 1.1 plunky return (-1); 330 1.1 plunky } 331 1.1 plunky 332 1.1 plunky return (0); 333 1.1 plunky } 334 1.1 plunky 335 1.1 plunky event_debug(("%s: kevent reports %d", __func__, res)); 336 1.1 plunky 337 1.1 plunky for (i = 0; i < res; i++) { 338 1.1 plunky int which = 0; 339 1.1 plunky 340 1.1 plunky if (events[i].flags & EV_ERROR) { 341 1.2 kamil switch (events[i].data) { 342 1.2 kamil 343 1.2 kamil /* Can occur on delete if we are not currently 344 1.2 kamil * watching any events on this fd. That can 345 1.2 kamil * happen when the fd was closed and another 346 1.2 kamil * file was opened with that fd. */ 347 1.2 kamil case ENOENT: 348 1.2 kamil /* Can occur for reasons not fully understood 349 1.2 kamil * on FreeBSD. */ 350 1.2 kamil case EINVAL: 351 1.2 kamil continue; 352 1.2 kamil #if defined(__FreeBSD__) 353 1.2 kamil /* 354 1.2 kamil * This currently occurs if an FD is closed 355 1.2 kamil * before the EV_DELETE makes it out via kevent(). 356 1.2 kamil * The FreeBSD capabilities code sees the blank 357 1.2 kamil * capability set and rejects the request to 358 1.2 kamil * modify an event. 359 1.2 kamil * 360 1.2 kamil * To be strictly correct - when an FD is closed, 361 1.2 kamil * all the registered events are also removed. 362 1.2 kamil * Queuing EV_DELETE to a closed FD is wrong. 363 1.2 kamil * The event(s) should just be deleted from 364 1.2 kamil * the pending changelist. 365 1.1 plunky */ 366 1.2 kamil case ENOTCAPABLE: 367 1.1 plunky continue; 368 1.2 kamil #endif 369 1.1 plunky 370 1.2 kamil /* Can occur on a delete if the fd is closed. */ 371 1.2 kamil case EBADF: 372 1.2 kamil /* XXXX On NetBSD, we can also get EBADF if we 373 1.2 kamil * try to add the write side of a pipe, but 374 1.2 kamil * the read side has already been closed. 375 1.2 kamil * Other BSDs call this situation 'EPIPE'. It 376 1.2 kamil * would be good if we had a way to report 377 1.2 kamil * this situation. */ 378 1.2 kamil continue; 379 1.2 kamil /* These two can occur on an add if the fd was one side 380 1.2 kamil * of a pipe, and the other side was closed. */ 381 1.2 kamil case EPERM: 382 1.2 kamil case EPIPE: 383 1.2 kamil /* Report read events, if we're listening for 384 1.2 kamil * them, so that the user can learn about any 385 1.2 kamil * add errors. (If the operation was a 386 1.2 kamil * delete, then udata should be cleared.) */ 387 1.2 kamil if (events[i].udata) { 388 1.2 kamil /* The operation was an add: 389 1.2 kamil * report the error as a read. */ 390 1.2 kamil which |= EV_READ; 391 1.2 kamil break; 392 1.2 kamil } else { 393 1.2 kamil /* The operation was a del: 394 1.2 kamil * report nothing. */ 395 1.2 kamil continue; 396 1.2 kamil } 397 1.2 kamil 398 1.2 kamil /* Other errors shouldn't occur. */ 399 1.2 kamil default: 400 1.2 kamil errno = events[i].data; 401 1.2 kamil return (-1); 402 1.2 kamil } 403 1.2 kamil } else if (events[i].filter == EVFILT_READ) { 404 1.1 plunky which |= EV_READ; 405 1.1 plunky } else if (events[i].filter == EVFILT_WRITE) { 406 1.1 plunky which |= EV_WRITE; 407 1.1 plunky } else if (events[i].filter == EVFILT_SIGNAL) { 408 1.1 plunky which |= EV_SIGNAL; 409 1.2 kamil #ifdef EVFILT_USER 410 1.2 kamil } else if (events[i].filter == EVFILT_USER) { 411 1.2 kamil base->is_notify_pending = 0; 412 1.2 kamil #endif 413 1.1 plunky } 414 1.1 plunky 415 1.1 plunky if (!which) 416 1.1 plunky continue; 417 1.1 plunky 418 1.1 plunky if (events[i].filter == EVFILT_SIGNAL) { 419 1.2 kamil evmap_signal_active_(base, events[i].ident, 1); 420 1.1 plunky } else { 421 1.2 kamil evmap_io_active_(base, events[i].ident, which | EV_ET); 422 1.2 kamil } 423 1.2 kamil } 424 1.1 plunky 425 1.2 kamil if (res == kqop->events_size) { 426 1.2 kamil /* We used all the events space that we have. Maybe we should 427 1.2 kamil make it bigger. */ 428 1.2 kamil kq_grow_events(kqop, kqop->events_size * 2); 429 1.1 plunky } 430 1.1 plunky 431 1.1 plunky return (0); 432 1.1 plunky } 433 1.1 plunky 434 1.2 kamil static void 435 1.2 kamil kqop_free(struct kqop *kqop) 436 1.2 kamil { 437 1.2 kamil if (kqop->changes) 438 1.2 kamil mm_free(kqop->changes); 439 1.2 kamil if (kqop->events) 440 1.2 kamil mm_free(kqop->events); 441 1.2 kamil if (kqop->kq >= 0 && kqop->pid == getpid()) 442 1.2 kamil close(kqop->kq); 443 1.2 kamil memset(kqop, 0, sizeof(struct kqop)); 444 1.2 kamil mm_free(kqop); 445 1.2 kamil } 446 1.2 kamil 447 1.2 kamil static void 448 1.2 kamil kq_dealloc(struct event_base *base) 449 1.2 kamil { 450 1.2 kamil struct kqop *kqop = base->evbase; 451 1.2 kamil evsig_dealloc_(base); 452 1.2 kamil kqop_free(kqop); 453 1.2 kamil } 454 1.1 plunky 455 1.2 kamil /* signal handling */ 456 1.1 plunky static int 457 1.2 kamil kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p) 458 1.1 plunky { 459 1.2 kamil struct kqop *kqop = base->evbase; 460 1.1 plunky struct kevent kev; 461 1.2 kamil struct timespec timeout = { 0, 0 }; 462 1.2 kamil (void)p; 463 1.2 kamil 464 1.2 kamil EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG); 465 1.1 plunky 466 1.2 kamil memset(&kev, 0, sizeof(kev)); 467 1.2 kamil kev.ident = nsignal; 468 1.2 kamil kev.filter = EVFILT_SIGNAL; 469 1.2 kamil kev.flags = EV_ADD; 470 1.2 kamil 471 1.2 kamil /* Be ready for the signal if it is sent any 472 1.2 kamil * time between now and the next call to 473 1.2 kamil * kq_dispatch. */ 474 1.2 kamil if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) 475 1.2 kamil return (-1); 476 1.2 kamil 477 1.2 kamil /* We can set the handler for most signals to SIG_IGN and 478 1.2 kamil * still have them reported to us in the queue. However, 479 1.2 kamil * if the handler for SIGCHLD is SIG_IGN, the system reaps 480 1.2 kamil * zombie processes for us, and we don't get any notification. 481 1.2 kamil * This appears to be the only signal with this quirk. */ 482 1.2 kamil if (evsig_set_handler_(base, nsignal, 483 1.2 kamil nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1) 484 1.2 kamil return (-1); 485 1.1 plunky 486 1.2 kamil return (0); 487 1.2 kamil } 488 1.1 plunky 489 1.2 kamil static int 490 1.2 kamil kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p) 491 1.2 kamil { 492 1.2 kamil struct kqop *kqop = base->evbase; 493 1.2 kamil struct kevent kev; 494 1.1 plunky 495 1.2 kamil struct timespec timeout = { 0, 0 }; 496 1.2 kamil (void)p; 497 1.1 plunky 498 1.2 kamil EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG); 499 1.1 plunky 500 1.2 kamil memset(&kev, 0, sizeof(kev)); 501 1.2 kamil kev.ident = nsignal; 502 1.2 kamil kev.filter = EVFILT_SIGNAL; 503 1.2 kamil kev.flags = EV_DELETE; 504 1.2 kamil 505 1.2 kamil /* Because we insert signal events 506 1.2 kamil * immediately, we need to delete them 507 1.2 kamil * immediately, too */ 508 1.2 kamil if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) 509 1.2 kamil return (-1); 510 1.1 plunky 511 1.2 kamil if (evsig_restore_handler_(base, nsignal) == -1) 512 1.2 kamil return (-1); 513 1.1 plunky 514 1.1 plunky return (0); 515 1.1 plunky } 516 1.1 plunky 517 1.2 kamil 518 1.2 kamil /* OSX 10.6 and FreeBSD 8.1 add support for EVFILT_USER, which we can use 519 1.2 kamil * to wake up the event loop from another thread. */ 520 1.2 kamil 521 1.2 kamil /* Magic number we use for our filter ID. */ 522 1.2 kamil #define NOTIFY_IDENT 42 523 1.2 kamil 524 1.2 kamil int 525 1.2 kamil event_kq_add_notify_event_(struct event_base *base) 526 1.1 plunky { 527 1.2 kamil struct kqop *kqop = base->evbase; 528 1.2 kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 529 1.1 plunky struct kevent kev; 530 1.2 kamil struct timespec timeout = { 0, 0 }; 531 1.2 kamil #endif 532 1.1 plunky 533 1.2 kamil if (kqop->notify_event_added) 534 1.2 kamil return 0; 535 1.1 plunky 536 1.2 kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 537 1.2 kamil memset(&kev, 0, sizeof(kev)); 538 1.2 kamil kev.ident = NOTIFY_IDENT; 539 1.2 kamil kev.filter = EVFILT_USER; 540 1.2 kamil kev.flags = EV_ADD | EV_CLEAR; 541 1.1 plunky 542 1.2 kamil if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) { 543 1.2 kamil event_warn("kevent: adding EVFILT_USER event"); 544 1.2 kamil return -1; 545 1.2 kamil } 546 1.1 plunky 547 1.2 kamil kqop->notify_event_added = 1; 548 1.1 plunky 549 1.2 kamil return 0; 550 1.2 kamil #else 551 1.2 kamil return -1; 552 1.2 kamil #endif 553 1.2 kamil } 554 1.1 plunky 555 1.2 kamil int 556 1.2 kamil event_kq_notify_base_(struct event_base *base) 557 1.2 kamil { 558 1.2 kamil struct kqop *kqop = base->evbase; 559 1.2 kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 560 1.2 kamil struct kevent kev; 561 1.2 kamil struct timespec timeout = { 0, 0 }; 562 1.2 kamil #endif 563 1.2 kamil if (! kqop->notify_event_added) 564 1.2 kamil return -1; 565 1.1 plunky 566 1.2 kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER) 567 1.2 kamil memset(&kev, 0, sizeof(kev)); 568 1.2 kamil kev.ident = NOTIFY_IDENT; 569 1.2 kamil kev.filter = EVFILT_USER; 570 1.2 kamil kev.fflags = NOTE_TRIGGER; 571 1.1 plunky 572 1.2 kamil if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) { 573 1.2 kamil event_warn("kevent: triggering EVFILT_USER event"); 574 1.2 kamil return -1; 575 1.1 plunky } 576 1.1 plunky 577 1.2 kamil return 0; 578 1.2 kamil #else 579 1.2 kamil return -1; 580 1.2 kamil #endif 581 1.1 plunky } 582 1.1 plunky 583 1.2 kamil #endif /* EVENT__HAVE_KQUEUE */ 584