event.c revision 1.3 1 1.3 christos /* $NetBSD: event.c,v 1.3 2015/04/07 17:34:20 christos Exp $ */
2 1.1 christos
3 1.1 christos /*
4 1.1 christos * Copyright (c) 2000-2007 Niels Provos <provos (at) citi.umich.edu>
5 1.1 christos * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6 1.1 christos *
7 1.1 christos * Redistribution and use in source and binary forms, with or without
8 1.1 christos * modification, are permitted provided that the following conditions
9 1.1 christos * are met:
10 1.1 christos * 1. Redistributions of source code must retain the above copyright
11 1.1 christos * notice, this list of conditions and the following disclaimer.
12 1.1 christos * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 christos * notice, this list of conditions and the following disclaimer in the
14 1.1 christos * documentation and/or other materials provided with the distribution.
15 1.1 christos * 3. The name of the author may not be used to endorse or promote products
16 1.1 christos * derived from this software without specific prior written permission.
17 1.1 christos *
18 1.1 christos * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.1 christos * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.1 christos * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.1 christos * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.1 christos * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 1.1 christos * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 1.1 christos * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 1.1 christos * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 1.1 christos * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 1.1 christos * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 1.1 christos */
29 1.1 christos #include "event2/event-config.h"
30 1.1 christos #include "evconfig-private.h"
31 1.1 christos
32 1.1 christos #ifdef _WIN32
33 1.1 christos #include <winsock2.h>
34 1.1 christos #define WIN32_LEAN_AND_MEAN
35 1.1 christos #include <windows.h>
36 1.1 christos #undef WIN32_LEAN_AND_MEAN
37 1.1 christos #endif
38 1.1 christos #include <sys/types.h>
39 1.1 christos #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 1.1 christos #include <sys/time.h>
41 1.1 christos #endif
42 1.1 christos #include <sys/queue.h>
43 1.1 christos #ifdef EVENT__HAVE_SYS_SOCKET_H
44 1.1 christos #include <sys/socket.h>
45 1.1 christos #endif
46 1.1 christos #include <stdio.h>
47 1.1 christos #include <stdlib.h>
48 1.1 christos #ifdef EVENT__HAVE_UNISTD_H
49 1.1 christos #include <unistd.h>
50 1.1 christos #endif
51 1.1 christos #include <ctype.h>
52 1.1 christos #include <errno.h>
53 1.1 christos #include <signal.h>
54 1.1 christos #include <string.h>
55 1.1 christos #include <time.h>
56 1.1 christos #include <limits.h>
57 1.1 christos
58 1.1 christos #include "event2/event.h"
59 1.1 christos #include "event2/event_struct.h"
60 1.1 christos #include "event2/event_compat.h"
61 1.1 christos #include "event-internal.h"
62 1.1 christos #include "defer-internal.h"
63 1.1 christos #include "evthread-internal.h"
64 1.1 christos #include "event2/thread.h"
65 1.1 christos #include "event2/util.h"
66 1.1 christos #include "log-internal.h"
67 1.1 christos #include "evmap-internal.h"
68 1.1 christos #include "iocp-internal.h"
69 1.1 christos #include "changelist-internal.h"
70 1.1 christos #define HT_NO_CACHE_HASH_VALUES
71 1.1 christos #include "ht-internal.h"
72 1.1 christos #include "util-internal.h"
73 1.1 christos
74 1.1 christos
75 1.1 christos #ifdef EVENT__HAVE_WORKING_KQUEUE
76 1.1 christos #include "kqueue-internal.h"
77 1.1 christos #endif
78 1.1 christos
79 1.1 christos #ifdef EVENT__HAVE_EVENT_PORTS
80 1.1 christos extern const struct eventop evportops;
81 1.1 christos #endif
82 1.1 christos #ifdef EVENT__HAVE_SELECT
83 1.1 christos extern const struct eventop selectops;
84 1.1 christos #endif
85 1.1 christos #ifdef EVENT__HAVE_POLL
86 1.1 christos extern const struct eventop pollops;
87 1.1 christos #endif
88 1.1 christos #ifdef EVENT__HAVE_EPOLL
89 1.1 christos extern const struct eventop epollops;
90 1.1 christos #endif
91 1.1 christos #ifdef EVENT__HAVE_WORKING_KQUEUE
92 1.1 christos extern const struct eventop kqops;
93 1.1 christos #endif
94 1.1 christos #ifdef EVENT__HAVE_DEVPOLL
95 1.1 christos extern const struct eventop devpollops;
96 1.1 christos #endif
97 1.1 christos #ifdef _WIN32
98 1.1 christos extern const struct eventop win32ops;
99 1.1 christos #endif
100 1.1 christos
101 1.1 christos /* Array of backends in order of preference. */
102 1.1 christos static const struct eventop *eventops[] = {
103 1.1 christos #ifdef EVENT__HAVE_EVENT_PORTS
104 1.1 christos &evportops,
105 1.1 christos #endif
106 1.1 christos #ifdef EVENT__HAVE_WORKING_KQUEUE
107 1.1 christos &kqops,
108 1.1 christos #endif
109 1.1 christos #ifdef EVENT__HAVE_EPOLL
110 1.1 christos &epollops,
111 1.1 christos #endif
112 1.1 christos #ifdef EVENT__HAVE_DEVPOLL
113 1.1 christos &devpollops,
114 1.1 christos #endif
115 1.1 christos #ifdef EVENT__HAVE_POLL
116 1.1 christos &pollops,
117 1.1 christos #endif
118 1.1 christos #ifdef EVENT__HAVE_SELECT
119 1.1 christos &selectops,
120 1.1 christos #endif
121 1.1 christos #ifdef _WIN32
122 1.1 christos &win32ops,
123 1.1 christos #endif
124 1.1 christos NULL
125 1.1 christos };
126 1.1 christos
127 1.1 christos /* Global state; deprecated */
128 1.1 christos struct event_base *event_global_current_base_ = NULL;
129 1.1 christos #define current_base event_global_current_base_
130 1.1 christos
131 1.1 christos /* Global state */
132 1.1 christos
133 1.1 christos static void *event_self_cbarg_ptr_ = NULL;
134 1.1 christos
135 1.1 christos /* Prototypes */
136 1.1 christos static void event_queue_insert_active(struct event_base *, struct event_callback *);
137 1.1 christos static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
138 1.1 christos static void event_queue_insert_timeout(struct event_base *, struct event *);
139 1.1 christos static void event_queue_insert_inserted(struct event_base *, struct event *);
140 1.1 christos static void event_queue_remove_active(struct event_base *, struct event_callback *);
141 1.1 christos static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
142 1.1 christos static void event_queue_remove_timeout(struct event_base *, struct event *);
143 1.1 christos static void event_queue_remove_inserted(struct event_base *, struct event *);
144 1.1 christos static void event_queue_make_later_events_active(struct event_base *base);
145 1.1 christos
146 1.1 christos static int evthread_make_base_notifiable_nolock_(struct event_base *base);
147 1.2 christos static int event_del_(struct event *ev, int blocking);
148 1.1 christos
149 1.1 christos #ifdef USE_REINSERT_TIMEOUT
150 1.1 christos /* This code seems buggy; only turn it on if we find out what the trouble is. */
151 1.1 christos static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
152 1.1 christos #endif
153 1.1 christos
154 1.1 christos static int event_haveevents(struct event_base *);
155 1.1 christos
156 1.1 christos static int event_process_active(struct event_base *);
157 1.1 christos
158 1.1 christos static int timeout_next(struct event_base *, struct timeval **);
159 1.1 christos static void timeout_process(struct event_base *);
160 1.1 christos
161 1.1 christos static inline void event_signal_closure(struct event_base *, struct event *ev);
162 1.1 christos static inline void event_persist_closure(struct event_base *, struct event *ev);
163 1.1 christos
164 1.1 christos static int evthread_notify_base(struct event_base *base);
165 1.1 christos
166 1.1 christos static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
167 1.1 christos struct event *ev);
168 1.1 christos
169 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
170 1.1 christos /* These functions implement a hashtable of which 'struct event *' structures
171 1.1 christos * have been setup or added. We don't want to trust the content of the struct
172 1.1 christos * event itself, since we're trying to work through cases where an event gets
173 1.1 christos * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
174 1.1 christos */
175 1.1 christos
176 1.1 christos struct event_debug_entry {
177 1.1 christos HT_ENTRY(event_debug_entry) node;
178 1.1 christos const struct event *ptr;
179 1.1 christos unsigned added : 1;
180 1.1 christos };
181 1.1 christos
182 1.1 christos static inline unsigned
183 1.1 christos hash_debug_entry(const struct event_debug_entry *e)
184 1.1 christos {
185 1.1 christos /* We need to do this silliness to convince compilers that we
186 1.1 christos * honestly mean to cast e->ptr to an integer, and discard any
187 1.1 christos * part of it that doesn't fit in an unsigned.
188 1.1 christos */
189 1.1 christos unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
190 1.1 christos /* Our hashtable implementation is pretty sensitive to low bits,
191 1.1 christos * and every struct event is over 64 bytes in size, so we can
192 1.1 christos * just say >>6. */
193 1.1 christos return (u >> 6);
194 1.1 christos }
195 1.1 christos
196 1.1 christos static inline int
197 1.1 christos eq_debug_entry(const struct event_debug_entry *a,
198 1.1 christos const struct event_debug_entry *b)
199 1.1 christos {
200 1.1 christos return a->ptr == b->ptr;
201 1.1 christos }
202 1.1 christos
203 1.1 christos int event_debug_mode_on_ = 0;
204 1.1 christos /* Set if it's too late to enable event_debug_mode. */
205 1.1 christos static int event_debug_mode_too_late = 0;
206 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
207 1.1 christos static void *event_debug_map_lock_ = NULL;
208 1.1 christos #endif
209 1.1 christos static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
210 1.1 christos HT_INITIALIZER();
211 1.1 christos
212 1.1 christos HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
213 1.1 christos eq_debug_entry)
214 1.1 christos HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
215 1.1 christos eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
216 1.1 christos
217 1.1 christos /* Macro: record that ev is now setup (that is, ready for an add) */
218 1.1 christos #define event_debug_note_setup_(ev) do { \
219 1.1 christos if (event_debug_mode_on_) { \
220 1.1 christos struct event_debug_entry *dent,find; \
221 1.1 christos find.ptr = (ev); \
222 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
223 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
224 1.1 christos if (dent) { \
225 1.1 christos dent->added = 0; \
226 1.1 christos } else { \
227 1.1 christos dent = mm_malloc(sizeof(*dent)); \
228 1.1 christos if (!dent) \
229 1.1 christos event_err(1, \
230 1.1 christos "Out of memory in debugging code"); \
231 1.1 christos dent->ptr = (ev); \
232 1.1 christos dent->added = 0; \
233 1.1 christos HT_INSERT(event_debug_map, &global_debug_map, dent); \
234 1.1 christos } \
235 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
236 1.1 christos } \
237 1.1 christos event_debug_mode_too_late = 1; \
238 1.1 christos } while (0)
239 1.1 christos /* Macro: record that ev is no longer setup */
240 1.1 christos #define event_debug_note_teardown_(ev) do { \
241 1.1 christos if (event_debug_mode_on_) { \
242 1.1 christos struct event_debug_entry *dent,find; \
243 1.1 christos find.ptr = (ev); \
244 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
245 1.1 christos dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
246 1.1 christos if (dent) \
247 1.1 christos mm_free(dent); \
248 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
249 1.1 christos } \
250 1.1 christos event_debug_mode_too_late = 1; \
251 1.1 christos } while (0)
252 1.1 christos /* Macro: record that ev is now added */
253 1.1 christos #define event_debug_note_add_(ev) do { \
254 1.1 christos if (event_debug_mode_on_) { \
255 1.1 christos struct event_debug_entry *dent,find; \
256 1.1 christos find.ptr = (ev); \
257 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
258 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
259 1.1 christos if (dent) { \
260 1.1 christos dent->added = 1; \
261 1.1 christos } else { \
262 1.1 christos event_errx(EVENT_ERR_ABORT_, \
263 1.1 christos "%s: noting an add on a non-setup event %p" \
264 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT \
265 1.1 christos ", flags: 0x%x)", \
266 1.1 christos __func__, (ev), (ev)->ev_events, \
267 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
268 1.1 christos } \
269 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
270 1.1 christos } \
271 1.1 christos event_debug_mode_too_late = 1; \
272 1.1 christos } while (0)
273 1.1 christos /* Macro: record that ev is no longer added */
274 1.1 christos #define event_debug_note_del_(ev) do { \
275 1.1 christos if (event_debug_mode_on_) { \
276 1.1 christos struct event_debug_entry *dent,find; \
277 1.1 christos find.ptr = (ev); \
278 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
279 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
280 1.1 christos if (dent) { \
281 1.1 christos dent->added = 0; \
282 1.1 christos } else { \
283 1.1 christos event_errx(EVENT_ERR_ABORT_, \
284 1.1 christos "%s: noting a del on a non-setup event %p" \
285 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT \
286 1.1 christos ", flags: 0x%x)", \
287 1.1 christos __func__, (ev), (ev)->ev_events, \
288 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
289 1.1 christos } \
290 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
291 1.1 christos } \
292 1.1 christos event_debug_mode_too_late = 1; \
293 1.1 christos } while (0)
294 1.1 christos /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
295 1.1 christos #define event_debug_assert_is_setup_(ev) do { \
296 1.1 christos if (event_debug_mode_on_) { \
297 1.1 christos struct event_debug_entry *dent,find; \
298 1.1 christos find.ptr = (ev); \
299 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
300 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
301 1.1 christos if (!dent) { \
302 1.1 christos event_errx(EVENT_ERR_ABORT_, \
303 1.1 christos "%s called on a non-initialized event %p" \
304 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT\
305 1.1 christos ", flags: 0x%x)", \
306 1.1 christos __func__, (ev), (ev)->ev_events, \
307 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
308 1.1 christos } \
309 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
310 1.1 christos } \
311 1.1 christos } while (0)
312 1.1 christos /* Macro: assert that ev is not added (i.e., okay to tear down or set
313 1.1 christos * up again) */
314 1.1 christos #define event_debug_assert_not_added_(ev) do { \
315 1.1 christos if (event_debug_mode_on_) { \
316 1.1 christos struct event_debug_entry *dent,find; \
317 1.1 christos find.ptr = (ev); \
318 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
319 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
320 1.1 christos if (dent && dent->added) { \
321 1.1 christos event_errx(EVENT_ERR_ABORT_, \
322 1.1 christos "%s called on an already added event %p" \
323 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT", " \
324 1.1 christos "flags: 0x%x)", \
325 1.1 christos __func__, (ev), (ev)->ev_events, \
326 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
327 1.1 christos } \
328 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
329 1.1 christos } \
330 1.1 christos } while (0)
331 1.1 christos #else
332 1.1 christos #define event_debug_note_setup_(ev) \
333 1.1 christos ((void)0)
334 1.1 christos #define event_debug_note_teardown_(ev) \
335 1.1 christos ((void)0)
336 1.1 christos #define event_debug_note_add_(ev) \
337 1.1 christos ((void)0)
338 1.1 christos #define event_debug_note_del_(ev) \
339 1.1 christos ((void)0)
340 1.1 christos #define event_debug_assert_is_setup_(ev) \
341 1.1 christos ((void)0)
342 1.1 christos #define event_debug_assert_not_added_(ev) \
343 1.1 christos ((void)0)
344 1.1 christos #endif
345 1.1 christos
346 1.1 christos #define EVENT_BASE_ASSERT_LOCKED(base) \
347 1.1 christos EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
348 1.1 christos
349 1.1 christos /* How often (in seconds) do we check for changes in wall clock time relative
350 1.1 christos * to monotonic time? Set this to -1 for 'never.' */
351 1.1 christos #define CLOCK_SYNC_INTERVAL 5
352 1.1 christos
353 1.1 christos /** Set 'tp' to the current time according to 'base'. We must hold the lock
354 1.1 christos * on 'base'. If there is a cached time, return it. Otherwise, use
355 1.1 christos * clock_gettime or gettimeofday as appropriate to find out the right time.
356 1.1 christos * Return 0 on success, -1 on failure.
357 1.1 christos */
358 1.1 christos static int
359 1.1 christos gettime(struct event_base *base, struct timeval *tp)
360 1.1 christos {
361 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
362 1.1 christos
363 1.1 christos if (base->tv_cache.tv_sec) {
364 1.1 christos *tp = base->tv_cache;
365 1.1 christos return (0);
366 1.1 christos }
367 1.1 christos
368 1.1 christos if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
369 1.1 christos return -1;
370 1.1 christos }
371 1.1 christos
372 1.1 christos if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
373 1.1 christos < tp->tv_sec) {
374 1.1 christos struct timeval tv;
375 1.1 christos evutil_gettimeofday(&tv,NULL);
376 1.1 christos evutil_timersub(&tv, tp, &base->tv_clock_diff);
377 1.1 christos base->last_updated_clock_diff = tp->tv_sec;
378 1.1 christos }
379 1.1 christos
380 1.1 christos return 0;
381 1.1 christos }
382 1.1 christos
383 1.1 christos int
384 1.1 christos event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
385 1.1 christos {
386 1.1 christos int r;
387 1.1 christos if (!base) {
388 1.1 christos base = current_base;
389 1.1 christos if (!current_base)
390 1.1 christos return evutil_gettimeofday(tv, NULL);
391 1.1 christos }
392 1.1 christos
393 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
394 1.1 christos if (base->tv_cache.tv_sec == 0) {
395 1.1 christos r = evutil_gettimeofday(tv, NULL);
396 1.1 christos } else {
397 1.1 christos evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
398 1.1 christos r = 0;
399 1.1 christos }
400 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
401 1.1 christos return r;
402 1.1 christos }
403 1.1 christos
404 1.1 christos /** Make 'base' have no current cached time. */
405 1.1 christos static inline void
406 1.1 christos clear_time_cache(struct event_base *base)
407 1.1 christos {
408 1.1 christos base->tv_cache.tv_sec = 0;
409 1.1 christos }
410 1.1 christos
411 1.1 christos /** Replace the cached time in 'base' with the current time. */
412 1.1 christos static inline void
413 1.1 christos update_time_cache(struct event_base *base)
414 1.1 christos {
415 1.1 christos base->tv_cache.tv_sec = 0;
416 1.1 christos if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
417 1.1 christos gettime(base, &base->tv_cache);
418 1.1 christos }
419 1.1 christos
420 1.1 christos int
421 1.1 christos event_base_update_cache_time(struct event_base *base)
422 1.1 christos {
423 1.1 christos
424 1.1 christos if (!base) {
425 1.1 christos base = current_base;
426 1.1 christos if (!current_base)
427 1.1 christos return -1;
428 1.1 christos }
429 1.1 christos
430 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
431 1.2 christos if (base->running_loop)
432 1.2 christos update_time_cache(base);
433 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
434 1.1 christos return 0;
435 1.1 christos }
436 1.1 christos
437 1.1 christos static inline struct event *
438 1.1 christos event_callback_to_event(struct event_callback *evcb)
439 1.1 christos {
440 1.1 christos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
441 1.1 christos return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
442 1.1 christos }
443 1.1 christos
444 1.1 christos static inline struct event_callback *
445 1.1 christos event_to_event_callback(struct event *ev)
446 1.1 christos {
447 1.1 christos return &ev->ev_evcallback;
448 1.1 christos }
449 1.1 christos
450 1.1 christos struct event_base *
451 1.1 christos event_init(void)
452 1.1 christos {
453 1.1 christos struct event_base *base = event_base_new_with_config(NULL);
454 1.1 christos
455 1.1 christos if (base == NULL) {
456 1.1 christos event_errx(1, "%s: Unable to construct event_base", __func__);
457 1.1 christos return NULL;
458 1.1 christos }
459 1.1 christos
460 1.1 christos current_base = base;
461 1.1 christos
462 1.1 christos return (base);
463 1.1 christos }
464 1.1 christos
465 1.1 christos struct event_base *
466 1.1 christos event_base_new(void)
467 1.1 christos {
468 1.1 christos struct event_base *base = NULL;
469 1.1 christos struct event_config *cfg = event_config_new();
470 1.1 christos if (cfg) {
471 1.1 christos base = event_base_new_with_config(cfg);
472 1.1 christos event_config_free(cfg);
473 1.1 christos }
474 1.1 christos return base;
475 1.1 christos }
476 1.1 christos
477 1.1 christos /** Return true iff 'method' is the name of a method that 'cfg' tells us to
478 1.1 christos * avoid. */
479 1.1 christos static int
480 1.1 christos event_config_is_avoided_method(const struct event_config *cfg,
481 1.1 christos const char *method)
482 1.1 christos {
483 1.1 christos struct event_config_entry *entry;
484 1.1 christos
485 1.1 christos TAILQ_FOREACH(entry, &cfg->entries, next) {
486 1.1 christos if (entry->avoid_method != NULL &&
487 1.1 christos strcmp(entry->avoid_method, method) == 0)
488 1.1 christos return (1);
489 1.1 christos }
490 1.1 christos
491 1.1 christos return (0);
492 1.1 christos }
493 1.1 christos
494 1.1 christos /** Return true iff 'method' is disabled according to the environment. */
495 1.1 christos static int
496 1.1 christos event_is_method_disabled(const char *name)
497 1.1 christos {
498 1.1 christos char environment[64];
499 1.1 christos int i;
500 1.1 christos
501 1.1 christos evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
502 1.1 christos for (i = 8; environment[i] != '\0'; ++i)
503 1.1 christos environment[i] = EVUTIL_TOUPPER_(environment[i]);
504 1.1 christos /* Note that evutil_getenv_() ignores the environment entirely if
505 1.1 christos * we're setuid */
506 1.1 christos return (evutil_getenv_(environment) != NULL);
507 1.1 christos }
508 1.1 christos
509 1.1 christos int
510 1.1 christos event_base_get_features(const struct event_base *base)
511 1.1 christos {
512 1.1 christos return base->evsel->features;
513 1.1 christos }
514 1.1 christos
515 1.1 christos void
516 1.1 christos event_enable_debug_mode(void)
517 1.1 christos {
518 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
519 1.1 christos if (event_debug_mode_on_)
520 1.1 christos event_errx(1, "%s was called twice!", __func__);
521 1.1 christos if (event_debug_mode_too_late)
522 1.1 christos event_errx(1, "%s must be called *before* creating any events "
523 1.1 christos "or event_bases",__func__);
524 1.1 christos
525 1.1 christos event_debug_mode_on_ = 1;
526 1.1 christos
527 1.1 christos HT_INIT(event_debug_map, &global_debug_map);
528 1.1 christos #endif
529 1.1 christos }
530 1.1 christos
531 1.1 christos void
532 1.1 christos event_disable_debug_mode(void)
533 1.1 christos {
534 1.3 christos #ifndef EVENT__DISABLE_DEBUG_MODE
535 1.1 christos struct event_debug_entry **ent, *victim;
536 1.1 christos
537 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0);
538 1.1 christos for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
539 1.1 christos victim = *ent;
540 1.3 christos ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
541 1.1 christos mm_free(victim);
542 1.1 christos }
543 1.1 christos HT_CLEAR(event_debug_map, &global_debug_map);
544 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
545 1.3 christos
546 1.3 christos event_debug_mode_on_ = 0;
547 1.3 christos #endif
548 1.1 christos }
549 1.1 christos
550 1.1 christos struct event_base *
551 1.1 christos event_base_new_with_config(const struct event_config *cfg)
552 1.1 christos {
553 1.1 christos int i;
554 1.1 christos struct event_base *base;
555 1.1 christos int should_check_environment;
556 1.1 christos
557 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
558 1.1 christos event_debug_mode_too_late = 1;
559 1.1 christos #endif
560 1.1 christos
561 1.1 christos if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
562 1.1 christos event_warn("%s: calloc", __func__);
563 1.1 christos return NULL;
564 1.1 christos }
565 1.1 christos
566 1.1 christos if (cfg)
567 1.1 christos base->flags = cfg->flags;
568 1.1 christos
569 1.1 christos should_check_environment =
570 1.1 christos !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
571 1.1 christos
572 1.1 christos {
573 1.1 christos struct timeval tmp;
574 1.1 christos int precise_time =
575 1.1 christos cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
576 1.1 christos int flags;
577 1.1 christos if (should_check_environment && !precise_time) {
578 1.1 christos precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
579 1.1 christos base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
580 1.1 christos }
581 1.1 christos flags = precise_time ? EV_MONOT_PRECISE : 0;
582 1.1 christos evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
583 1.1 christos
584 1.1 christos gettime(base, &tmp);
585 1.1 christos }
586 1.1 christos
587 1.1 christos min_heap_ctor_(&base->timeheap);
588 1.1 christos
589 1.1 christos base->sig.ev_signal_pair[0] = -1;
590 1.1 christos base->sig.ev_signal_pair[1] = -1;
591 1.1 christos base->th_notify_fd[0] = -1;
592 1.1 christos base->th_notify_fd[1] = -1;
593 1.1 christos
594 1.1 christos TAILQ_INIT(&base->active_later_queue);
595 1.1 christos
596 1.1 christos evmap_io_initmap_(&base->io);
597 1.1 christos evmap_signal_initmap_(&base->sigmap);
598 1.1 christos event_changelist_init_(&base->changelist);
599 1.1 christos
600 1.1 christos base->evbase = NULL;
601 1.1 christos
602 1.1 christos if (cfg) {
603 1.1 christos memcpy(&base->max_dispatch_time,
604 1.1 christos &cfg->max_dispatch_interval, sizeof(struct timeval));
605 1.1 christos base->limit_callbacks_after_prio =
606 1.1 christos cfg->limit_callbacks_after_prio;
607 1.1 christos } else {
608 1.1 christos base->max_dispatch_time.tv_sec = -1;
609 1.1 christos base->limit_callbacks_after_prio = 1;
610 1.1 christos }
611 1.1 christos if (cfg && cfg->max_dispatch_callbacks >= 0) {
612 1.1 christos base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
613 1.1 christos } else {
614 1.1 christos base->max_dispatch_callbacks = INT_MAX;
615 1.1 christos }
616 1.1 christos if (base->max_dispatch_callbacks == INT_MAX &&
617 1.1 christos base->max_dispatch_time.tv_sec == -1)
618 1.1 christos base->limit_callbacks_after_prio = INT_MAX;
619 1.1 christos
620 1.1 christos for (i = 0; eventops[i] && !base->evbase; i++) {
621 1.1 christos if (cfg != NULL) {
622 1.1 christos /* determine if this backend should be avoided */
623 1.1 christos if (event_config_is_avoided_method(cfg,
624 1.1 christos eventops[i]->name))
625 1.1 christos continue;
626 1.1 christos if ((eventops[i]->features & cfg->require_features)
627 1.1 christos != cfg->require_features)
628 1.1 christos continue;
629 1.1 christos }
630 1.1 christos
631 1.1 christos /* also obey the environment variables */
632 1.1 christos if (should_check_environment &&
633 1.1 christos event_is_method_disabled(eventops[i]->name))
634 1.1 christos continue;
635 1.1 christos
636 1.1 christos base->evsel = eventops[i];
637 1.1 christos
638 1.1 christos base->evbase = base->evsel->init(base);
639 1.1 christos }
640 1.1 christos
641 1.1 christos if (base->evbase == NULL) {
642 1.1 christos event_warnx("%s: no event mechanism available",
643 1.1 christos __func__);
644 1.1 christos base->evsel = NULL;
645 1.1 christos event_base_free(base);
646 1.1 christos return NULL;
647 1.1 christos }
648 1.1 christos
649 1.1 christos if (evutil_getenv_("EVENT_SHOW_METHOD"))
650 1.1 christos event_msgx("libevent using: %s", base->evsel->name);
651 1.1 christos
652 1.1 christos /* allocate a single active event queue */
653 1.1 christos if (event_base_priority_init(base, 1) < 0) {
654 1.1 christos event_base_free(base);
655 1.1 christos return NULL;
656 1.1 christos }
657 1.1 christos
658 1.1 christos /* prepare for threading */
659 1.1 christos
660 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
661 1.1 christos if (EVTHREAD_LOCKING_ENABLED() &&
662 1.1 christos (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
663 1.1 christos int r;
664 1.1 christos EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
665 1.1 christos EVTHREAD_ALLOC_COND(base->current_event_cond);
666 1.1 christos r = evthread_make_base_notifiable(base);
667 1.1 christos if (r<0) {
668 1.1 christos event_warnx("%s: Unable to make base notifiable.", __func__);
669 1.1 christos event_base_free(base);
670 1.1 christos return NULL;
671 1.1 christos }
672 1.1 christos }
673 1.1 christos #endif
674 1.1 christos
675 1.1 christos #ifdef _WIN32
676 1.1 christos if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
677 1.1 christos event_base_start_iocp_(base, cfg->n_cpus_hint);
678 1.1 christos #endif
679 1.1 christos
680 1.1 christos return (base);
681 1.1 christos }
682 1.1 christos
683 1.1 christos int
684 1.1 christos event_base_start_iocp_(struct event_base *base, int n_cpus)
685 1.1 christos {
686 1.1 christos #ifdef _WIN32
687 1.1 christos if (base->iocp)
688 1.1 christos return 0;
689 1.1 christos base->iocp = event_iocp_port_launch_(n_cpus);
690 1.1 christos if (!base->iocp) {
691 1.1 christos event_warnx("%s: Couldn't launch IOCP", __func__);
692 1.1 christos return -1;
693 1.1 christos }
694 1.1 christos return 0;
695 1.1 christos #else
696 1.1 christos return -1;
697 1.1 christos #endif
698 1.1 christos }
699 1.1 christos
700 1.1 christos void
701 1.1 christos event_base_stop_iocp_(struct event_base *base)
702 1.1 christos {
703 1.1 christos #ifdef _WIN32
704 1.1 christos int rv;
705 1.1 christos
706 1.1 christos if (!base->iocp)
707 1.1 christos return;
708 1.1 christos rv = event_iocp_shutdown_(base->iocp, -1);
709 1.1 christos EVUTIL_ASSERT(rv >= 0);
710 1.1 christos base->iocp = NULL;
711 1.1 christos #endif
712 1.1 christos }
713 1.1 christos
714 1.2 christos static int
715 1.2 christos event_base_cancel_single_callback_(struct event_base *base,
716 1.2 christos struct event_callback *evcb,
717 1.2 christos int run_finalizers)
718 1.2 christos {
719 1.2 christos int result = 0;
720 1.2 christos
721 1.2 christos if (evcb->evcb_flags & EVLIST_INIT) {
722 1.2 christos struct event *ev = event_callback_to_event(evcb);
723 1.2 christos if (!(ev->ev_flags & EVLIST_INTERNAL)) {
724 1.2 christos event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
725 1.2 christos result = 1;
726 1.2 christos }
727 1.2 christos } else {
728 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
729 1.2 christos event_callback_cancel_nolock_(base, evcb, 1);
730 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
731 1.2 christos result = 1;
732 1.2 christos }
733 1.2 christos
734 1.2 christos if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
735 1.2 christos switch (evcb->evcb_closure) {
736 1.2 christos case EV_CLOSURE_EVENT_FINALIZE:
737 1.2 christos case EV_CLOSURE_EVENT_FINALIZE_FREE: {
738 1.2 christos struct event *ev = event_callback_to_event(evcb);
739 1.2 christos ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
740 1.2 christos if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
741 1.2 christos mm_free(ev);
742 1.2 christos break;
743 1.2 christos }
744 1.2 christos case EV_CLOSURE_CB_FINALIZE:
745 1.2 christos evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
746 1.2 christos break;
747 1.2 christos default:
748 1.2 christos break;
749 1.2 christos }
750 1.2 christos }
751 1.2 christos return result;
752 1.2 christos }
753 1.2 christos
754 1.2 christos static void
755 1.2 christos event_base_free_(struct event_base *base, int run_finalizers)
756 1.1 christos {
757 1.1 christos int i, n_deleted=0;
758 1.1 christos struct event *ev;
759 1.1 christos /* XXXX grab the lock? If there is contention when one thread frees
760 1.1 christos * the base, then the contending thread will be very sad soon. */
761 1.1 christos
762 1.1 christos /* event_base_free(NULL) is how to free the current_base if we
763 1.1 christos * made it with event_init and forgot to hold a reference to it. */
764 1.1 christos if (base == NULL && current_base)
765 1.1 christos base = current_base;
766 1.1 christos /* Don't actually free NULL. */
767 1.1 christos if (base == NULL) {
768 1.1 christos event_warnx("%s: no base to free", __func__);
769 1.1 christos return;
770 1.1 christos }
771 1.1 christos /* XXX(niels) - check for internal events first */
772 1.1 christos
773 1.1 christos #ifdef _WIN32
774 1.1 christos event_base_stop_iocp_(base);
775 1.1 christos #endif
776 1.1 christos
777 1.1 christos /* threading fds if we have them */
778 1.1 christos if (base->th_notify_fd[0] != -1) {
779 1.1 christos event_del(&base->th_notify);
780 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
781 1.1 christos if (base->th_notify_fd[1] != -1)
782 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
783 1.1 christos base->th_notify_fd[0] = -1;
784 1.1 christos base->th_notify_fd[1] = -1;
785 1.1 christos event_debug_unassign(&base->th_notify);
786 1.1 christos }
787 1.1 christos
788 1.1 christos /* Delete all non-internal events. */
789 1.1 christos evmap_delete_all_(base);
790 1.1 christos
791 1.1 christos while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
792 1.1 christos event_del(ev);
793 1.1 christos ++n_deleted;
794 1.1 christos }
795 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
796 1.1 christos struct common_timeout_list *ctl =
797 1.1 christos base->common_timeout_queues[i];
798 1.1 christos event_del(&ctl->timeout_event); /* Internal; doesn't count */
799 1.1 christos event_debug_unassign(&ctl->timeout_event);
800 1.1 christos for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
801 1.1 christos struct event *next = TAILQ_NEXT(ev,
802 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
803 1.1 christos if (!(ev->ev_flags & EVLIST_INTERNAL)) {
804 1.1 christos event_del(ev);
805 1.1 christos ++n_deleted;
806 1.1 christos }
807 1.1 christos ev = next;
808 1.1 christos }
809 1.1 christos mm_free(ctl);
810 1.1 christos }
811 1.1 christos if (base->common_timeout_queues)
812 1.1 christos mm_free(base->common_timeout_queues);
813 1.1 christos
814 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
815 1.1 christos struct event_callback *evcb, *next;
816 1.1 christos for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
817 1.1 christos next = TAILQ_NEXT(evcb, evcb_active_next);
818 1.2 christos n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
819 1.1 christos evcb = next;
820 1.1 christos }
821 1.1 christos }
822 1.1 christos {
823 1.1 christos struct event_callback *evcb;
824 1.1 christos while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
825 1.2 christos n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
826 1.1 christos }
827 1.1 christos }
828 1.1 christos
829 1.1 christos
830 1.1 christos if (n_deleted)
831 1.1 christos event_debug(("%s: %d events were still set in base",
832 1.1 christos __func__, n_deleted));
833 1.1 christos
834 1.1 christos while (LIST_FIRST(&base->once_events)) {
835 1.1 christos struct event_once *eonce = LIST_FIRST(&base->once_events);
836 1.1 christos LIST_REMOVE(eonce, next_once);
837 1.1 christos mm_free(eonce);
838 1.1 christos }
839 1.1 christos
840 1.1 christos if (base->evsel != NULL && base->evsel->dealloc != NULL)
841 1.1 christos base->evsel->dealloc(base);
842 1.1 christos
843 1.1 christos for (i = 0; i < base->nactivequeues; ++i)
844 1.1 christos EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
845 1.1 christos
846 1.1 christos EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
847 1.1 christos min_heap_dtor_(&base->timeheap);
848 1.1 christos
849 1.1 christos mm_free(base->activequeues);
850 1.1 christos
851 1.1 christos evmap_io_clear_(&base->io);
852 1.1 christos evmap_signal_clear_(&base->sigmap);
853 1.1 christos event_changelist_freemem_(&base->changelist);
854 1.1 christos
855 1.1 christos EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
856 1.1 christos EVTHREAD_FREE_COND(base->current_event_cond);
857 1.1 christos
858 1.2 christos /* If we're freeing current_base, there won't be a current_base. */
859 1.2 christos if (base == current_base)
860 1.2 christos current_base = NULL;
861 1.1 christos mm_free(base);
862 1.1 christos }
863 1.1 christos
864 1.2 christos void
865 1.2 christos event_base_free_nofinalize(struct event_base *base)
866 1.2 christos {
867 1.2 christos event_base_free_(base, 0);
868 1.2 christos }
869 1.2 christos
870 1.2 christos void
871 1.2 christos event_base_free(struct event_base *base)
872 1.2 christos {
873 1.2 christos event_base_free_(base, 1);
874 1.2 christos }
875 1.2 christos
876 1.1 christos /* Fake eventop; used to disable the backend temporarily inside event_reinit
877 1.1 christos * so that we can call event_del() on an event without telling the backend.
878 1.1 christos */
879 1.1 christos static int
880 1.1 christos nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
881 1.1 christos short events, void *fdinfo)
882 1.1 christos {
883 1.1 christos return 0;
884 1.1 christos }
885 1.1 christos const struct eventop nil_eventop = {
886 1.1 christos "nil",
887 1.1 christos NULL, /* init: unused. */
888 1.1 christos NULL, /* add: unused. */
889 1.1 christos nil_backend_del, /* del: used, so needs to be killed. */
890 1.1 christos NULL, /* dispatch: unused. */
891 1.1 christos NULL, /* dealloc: unused. */
892 1.1 christos 0, 0, 0
893 1.1 christos };
894 1.1 christos
895 1.1 christos /* reinitialize the event base after a fork */
896 1.1 christos int
897 1.1 christos event_reinit(struct event_base *base)
898 1.1 christos {
899 1.1 christos const struct eventop *evsel;
900 1.1 christos int res = 0;
901 1.1 christos int was_notifiable = 0;
902 1.1 christos int had_signal_added = 0;
903 1.1 christos
904 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
905 1.1 christos
906 1.1 christos evsel = base->evsel;
907 1.1 christos
908 1.1 christos /* check if this event mechanism requires reinit on the backend */
909 1.1 christos if (evsel->need_reinit) {
910 1.1 christos /* We're going to call event_del() on our notify events (the
911 1.1 christos * ones that tell about signals and wakeup events). But we
912 1.1 christos * don't actually want to tell the backend to change its
913 1.1 christos * state, since it might still share some resource (a kqueue,
914 1.1 christos * an epoll fd) with the parent process, and we don't want to
915 1.1 christos * delete the fds from _that_ backend, we temporarily stub out
916 1.1 christos * the evsel with a replacement.
917 1.1 christos */
918 1.1 christos base->evsel = &nil_eventop;
919 1.1 christos }
920 1.1 christos
921 1.1 christos /* We need to re-create a new signal-notification fd and a new
922 1.1 christos * thread-notification fd. Otherwise, we'll still share those with
923 1.1 christos * the parent process, which would make any notification sent to them
924 1.1 christos * get received by one or both of the event loops, more or less at
925 1.1 christos * random.
926 1.1 christos */
927 1.1 christos if (base->sig.ev_signal_added) {
928 1.2 christos event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
929 1.1 christos event_debug_unassign(&base->sig.ev_signal);
930 1.1 christos memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
931 1.1 christos if (base->sig.ev_signal_pair[0] != -1)
932 1.1 christos EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
933 1.1 christos if (base->sig.ev_signal_pair[1] != -1)
934 1.1 christos EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
935 1.1 christos had_signal_added = 1;
936 1.1 christos base->sig.ev_signal_added = 0;
937 1.1 christos }
938 1.1 christos if (base->th_notify_fn != NULL) {
939 1.1 christos was_notifiable = 1;
940 1.1 christos base->th_notify_fn = NULL;
941 1.1 christos }
942 1.1 christos if (base->th_notify_fd[0] != -1) {
943 1.2 christos event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
944 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
945 1.1 christos if (base->th_notify_fd[1] != -1)
946 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
947 1.1 christos base->th_notify_fd[0] = -1;
948 1.1 christos base->th_notify_fd[1] = -1;
949 1.1 christos event_debug_unassign(&base->th_notify);
950 1.1 christos }
951 1.1 christos
952 1.1 christos /* Replace the original evsel. */
953 1.1 christos base->evsel = evsel;
954 1.1 christos
955 1.1 christos if (evsel->need_reinit) {
956 1.1 christos /* Reconstruct the backend through brute-force, so that we do
957 1.1 christos * not share any structures with the parent process. For some
958 1.1 christos * backends, this is necessary: epoll and kqueue, for
959 1.1 christos * instance, have events associated with a kernel
960 1.1 christos * structure. If didn't reinitialize, we'd share that
961 1.1 christos * structure with the parent process, and any changes made by
962 1.1 christos * the parent would affect our backend's behavior (and vice
963 1.1 christos * versa).
964 1.1 christos */
965 1.1 christos if (base->evsel->dealloc != NULL)
966 1.1 christos base->evsel->dealloc(base);
967 1.1 christos base->evbase = evsel->init(base);
968 1.1 christos if (base->evbase == NULL) {
969 1.1 christos event_errx(1,
970 1.1 christos "%s: could not reinitialize event mechanism",
971 1.1 christos __func__);
972 1.1 christos res = -1;
973 1.1 christos goto done;
974 1.1 christos }
975 1.1 christos
976 1.1 christos /* Empty out the changelist (if any): we are starting from a
977 1.1 christos * blank slate. */
978 1.1 christos event_changelist_freemem_(&base->changelist);
979 1.1 christos
980 1.1 christos /* Tell the event maps to re-inform the backend about all
981 1.1 christos * pending events. This will make the signal notification
982 1.1 christos * event get re-created if necessary. */
983 1.1 christos if (evmap_reinit_(base) < 0)
984 1.1 christos res = -1;
985 1.1 christos } else {
986 1.1 christos if (had_signal_added)
987 1.1 christos res = evsig_init_(base);
988 1.1 christos }
989 1.1 christos
990 1.1 christos /* If we were notifiable before, and nothing just exploded, become
991 1.1 christos * notifiable again. */
992 1.1 christos if (was_notifiable && res == 0)
993 1.1 christos res = evthread_make_base_notifiable_nolock_(base);
994 1.1 christos
995 1.1 christos done:
996 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
997 1.1 christos return (res);
998 1.1 christos }
999 1.1 christos
1000 1.3 christos /* Get the monotonic time for this event_base' timer */
1001 1.3 christos int
1002 1.3 christos event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1003 1.3 christos {
1004 1.3 christos int rv = -1;
1005 1.3 christos
1006 1.3 christos if (base && tv) {
1007 1.3 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1008 1.3 christos rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1009 1.3 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1010 1.3 christos }
1011 1.3 christos
1012 1.3 christos return rv;
1013 1.3 christos }
1014 1.3 christos
1015 1.1 christos const char **
1016 1.1 christos event_get_supported_methods(void)
1017 1.1 christos {
1018 1.1 christos static const char **methods = NULL;
1019 1.1 christos const struct eventop **method;
1020 1.1 christos const char **tmp;
1021 1.1 christos int i = 0, k;
1022 1.1 christos
1023 1.1 christos /* count all methods */
1024 1.1 christos for (method = &eventops[0]; *method != NULL; ++method) {
1025 1.1 christos ++i;
1026 1.1 christos }
1027 1.1 christos
1028 1.1 christos /* allocate one more than we need for the NULL pointer */
1029 1.1 christos tmp = mm_calloc((i + 1), sizeof(char *));
1030 1.1 christos if (tmp == NULL)
1031 1.1 christos return (NULL);
1032 1.1 christos
1033 1.1 christos /* populate the array with the supported methods */
1034 1.1 christos for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1035 1.1 christos tmp[i++] = eventops[k]->name;
1036 1.1 christos }
1037 1.1 christos tmp[i] = NULL;
1038 1.1 christos
1039 1.1 christos if (methods != NULL)
1040 1.1 christos mm_free((char**)methods);
1041 1.1 christos
1042 1.1 christos methods = tmp;
1043 1.1 christos
1044 1.1 christos return (methods);
1045 1.1 christos }
1046 1.1 christos
1047 1.1 christos struct event_config *
1048 1.1 christos event_config_new(void)
1049 1.1 christos {
1050 1.1 christos struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1051 1.1 christos
1052 1.1 christos if (cfg == NULL)
1053 1.1 christos return (NULL);
1054 1.1 christos
1055 1.1 christos TAILQ_INIT(&cfg->entries);
1056 1.1 christos cfg->max_dispatch_interval.tv_sec = -1;
1057 1.1 christos cfg->max_dispatch_callbacks = INT_MAX;
1058 1.1 christos cfg->limit_callbacks_after_prio = 1;
1059 1.1 christos
1060 1.1 christos return (cfg);
1061 1.1 christos }
1062 1.1 christos
1063 1.1 christos static void
1064 1.1 christos event_config_entry_free(struct event_config_entry *entry)
1065 1.1 christos {
1066 1.1 christos if (entry->avoid_method != NULL)
1067 1.1 christos mm_free((char *)entry->avoid_method);
1068 1.1 christos mm_free(entry);
1069 1.1 christos }
1070 1.1 christos
1071 1.1 christos void
1072 1.1 christos event_config_free(struct event_config *cfg)
1073 1.1 christos {
1074 1.1 christos struct event_config_entry *entry;
1075 1.1 christos
1076 1.1 christos while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1077 1.1 christos TAILQ_REMOVE(&cfg->entries, entry, next);
1078 1.1 christos event_config_entry_free(entry);
1079 1.1 christos }
1080 1.1 christos mm_free(cfg);
1081 1.1 christos }
1082 1.1 christos
1083 1.1 christos int
1084 1.1 christos event_config_set_flag(struct event_config *cfg, int flag)
1085 1.1 christos {
1086 1.1 christos if (!cfg)
1087 1.1 christos return -1;
1088 1.1 christos cfg->flags |= flag;
1089 1.1 christos return 0;
1090 1.1 christos }
1091 1.1 christos
1092 1.1 christos int
1093 1.1 christos event_config_avoid_method(struct event_config *cfg, const char *method)
1094 1.1 christos {
1095 1.1 christos struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1096 1.1 christos if (entry == NULL)
1097 1.1 christos return (-1);
1098 1.1 christos
1099 1.1 christos if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1100 1.1 christos mm_free(entry);
1101 1.1 christos return (-1);
1102 1.1 christos }
1103 1.1 christos
1104 1.1 christos TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1105 1.1 christos
1106 1.1 christos return (0);
1107 1.1 christos }
1108 1.1 christos
1109 1.1 christos int
1110 1.1 christos event_config_require_features(struct event_config *cfg,
1111 1.1 christos int features)
1112 1.1 christos {
1113 1.1 christos if (!cfg)
1114 1.1 christos return (-1);
1115 1.1 christos cfg->require_features = features;
1116 1.1 christos return (0);
1117 1.1 christos }
1118 1.1 christos
1119 1.1 christos int
1120 1.1 christos event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1121 1.1 christos {
1122 1.1 christos if (!cfg)
1123 1.1 christos return (-1);
1124 1.1 christos cfg->n_cpus_hint = cpus;
1125 1.1 christos return (0);
1126 1.1 christos }
1127 1.1 christos
1128 1.1 christos int
1129 1.1 christos event_config_set_max_dispatch_interval(struct event_config *cfg,
1130 1.1 christos const struct timeval *max_interval, int max_callbacks, int min_priority)
1131 1.1 christos {
1132 1.1 christos if (max_interval)
1133 1.1 christos memcpy(&cfg->max_dispatch_interval, max_interval,
1134 1.1 christos sizeof(struct timeval));
1135 1.1 christos else
1136 1.1 christos cfg->max_dispatch_interval.tv_sec = -1;
1137 1.1 christos cfg->max_dispatch_callbacks =
1138 1.1 christos max_callbacks >= 0 ? max_callbacks : INT_MAX;
1139 1.1 christos if (min_priority < 0)
1140 1.1 christos min_priority = 0;
1141 1.1 christos cfg->limit_callbacks_after_prio = min_priority;
1142 1.1 christos return (0);
1143 1.1 christos }
1144 1.1 christos
1145 1.1 christos int
1146 1.1 christos event_priority_init(int npriorities)
1147 1.1 christos {
1148 1.1 christos return event_base_priority_init(current_base, npriorities);
1149 1.1 christos }
1150 1.1 christos
1151 1.1 christos int
1152 1.1 christos event_base_priority_init(struct event_base *base, int npriorities)
1153 1.1 christos {
1154 1.1 christos int i, r;
1155 1.1 christos r = -1;
1156 1.1 christos
1157 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1158 1.1 christos
1159 1.1 christos if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1160 1.1 christos || npriorities >= EVENT_MAX_PRIORITIES)
1161 1.1 christos goto err;
1162 1.1 christos
1163 1.1 christos if (npriorities == base->nactivequeues)
1164 1.1 christos goto ok;
1165 1.1 christos
1166 1.1 christos if (base->nactivequeues) {
1167 1.1 christos mm_free(base->activequeues);
1168 1.1 christos base->nactivequeues = 0;
1169 1.1 christos }
1170 1.1 christos
1171 1.1 christos /* Allocate our priority queues */
1172 1.1 christos base->activequeues = (struct evcallback_list *)
1173 1.1 christos mm_calloc(npriorities, sizeof(struct evcallback_list));
1174 1.1 christos if (base->activequeues == NULL) {
1175 1.1 christos event_warn("%s: calloc", __func__);
1176 1.1 christos goto err;
1177 1.1 christos }
1178 1.1 christos base->nactivequeues = npriorities;
1179 1.1 christos
1180 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
1181 1.1 christos TAILQ_INIT(&base->activequeues[i]);
1182 1.1 christos }
1183 1.1 christos
1184 1.1 christos ok:
1185 1.1 christos r = 0;
1186 1.1 christos err:
1187 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1188 1.1 christos return (r);
1189 1.1 christos }
1190 1.1 christos
1191 1.1 christos int
1192 1.1 christos event_base_get_npriorities(struct event_base *base)
1193 1.1 christos {
1194 1.1 christos
1195 1.1 christos int n;
1196 1.1 christos if (base == NULL)
1197 1.1 christos base = current_base;
1198 1.1 christos
1199 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1200 1.1 christos n = base->nactivequeues;
1201 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1202 1.1 christos return (n);
1203 1.1 christos }
1204 1.1 christos
1205 1.2 christos int
1206 1.2 christos event_base_get_num_events(struct event_base *base, unsigned int type)
1207 1.2 christos {
1208 1.2 christos int r = 0;
1209 1.2 christos
1210 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1211 1.2 christos
1212 1.2 christos if (type & EVENT_BASE_COUNT_ACTIVE)
1213 1.2 christos r += base->event_count_active;
1214 1.2 christos
1215 1.2 christos if (type & EVENT_BASE_COUNT_VIRTUAL)
1216 1.2 christos r += base->virtual_event_count;
1217 1.2 christos
1218 1.2 christos if (type & EVENT_BASE_COUNT_ADDED)
1219 1.2 christos r += base->event_count;
1220 1.2 christos
1221 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1222 1.2 christos
1223 1.2 christos return r;
1224 1.2 christos }
1225 1.2 christos
1226 1.2 christos int
1227 1.2 christos event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1228 1.2 christos {
1229 1.2 christos int r = 0;
1230 1.2 christos
1231 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1232 1.2 christos
1233 1.2 christos if (type & EVENT_BASE_COUNT_ACTIVE) {
1234 1.2 christos r += base->event_count_active_max;
1235 1.2 christos if (clear)
1236 1.2 christos base->event_count_active_max = 0;
1237 1.2 christos }
1238 1.2 christos
1239 1.2 christos if (type & EVENT_BASE_COUNT_VIRTUAL) {
1240 1.2 christos r += base->virtual_event_count_max;
1241 1.2 christos if (clear)
1242 1.2 christos base->virtual_event_count_max = 0;
1243 1.2 christos }
1244 1.2 christos
1245 1.2 christos if (type & EVENT_BASE_COUNT_ADDED) {
1246 1.2 christos r += base->event_count_max;
1247 1.2 christos if (clear)
1248 1.2 christos base->event_count_max = 0;
1249 1.2 christos }
1250 1.2 christos
1251 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1252 1.2 christos
1253 1.2 christos return r;
1254 1.2 christos }
1255 1.2 christos
1256 1.1 christos /* Returns true iff we're currently watching any events. */
1257 1.1 christos static int
1258 1.1 christos event_haveevents(struct event_base *base)
1259 1.1 christos {
1260 1.1 christos /* Caller must hold th_base_lock */
1261 1.1 christos return (base->virtual_event_count > 0 || base->event_count > 0);
1262 1.1 christos }
1263 1.1 christos
1264 1.1 christos /* "closure" function called when processing active signal events */
1265 1.1 christos static inline void
1266 1.1 christos event_signal_closure(struct event_base *base, struct event *ev)
1267 1.1 christos {
1268 1.1 christos short ncalls;
1269 1.1 christos int should_break;
1270 1.1 christos
1271 1.1 christos /* Allows deletes to work */
1272 1.1 christos ncalls = ev->ev_ncalls;
1273 1.1 christos if (ncalls != 0)
1274 1.1 christos ev->ev_pncalls = &ncalls;
1275 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1276 1.1 christos while (ncalls) {
1277 1.1 christos ncalls--;
1278 1.1 christos ev->ev_ncalls = ncalls;
1279 1.1 christos if (ncalls == 0)
1280 1.1 christos ev->ev_pncalls = NULL;
1281 1.1 christos (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1282 1.1 christos
1283 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1284 1.1 christos should_break = base->event_break;
1285 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1286 1.1 christos
1287 1.1 christos if (should_break) {
1288 1.1 christos if (ncalls != 0)
1289 1.1 christos ev->ev_pncalls = NULL;
1290 1.1 christos return;
1291 1.1 christos }
1292 1.1 christos }
1293 1.1 christos }
1294 1.1 christos
1295 1.1 christos /* Common timeouts are special timeouts that are handled as queues rather than
1296 1.1 christos * in the minheap. This is more efficient than the minheap if we happen to
1297 1.1 christos * know that we're going to get several thousands of timeout events all with
1298 1.1 christos * the same timeout value.
1299 1.1 christos *
1300 1.1 christos * Since all our timeout handling code assumes timevals can be copied,
1301 1.1 christos * assigned, etc, we can't use "magic pointer" to encode these common
1302 1.1 christos * timeouts. Searching through a list to see if every timeout is common could
1303 1.1 christos * also get inefficient. Instead, we take advantage of the fact that tv_usec
1304 1.1 christos * is 32 bits long, but only uses 20 of those bits (since it can never be over
1305 1.1 christos * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1306 1.1 christos * of index into the event_base's aray of common timeouts.
1307 1.1 christos */
1308 1.1 christos
1309 1.1 christos #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1310 1.1 christos #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1311 1.1 christos #define COMMON_TIMEOUT_IDX_SHIFT 20
1312 1.1 christos #define COMMON_TIMEOUT_MASK 0xf0000000
1313 1.1 christos #define COMMON_TIMEOUT_MAGIC 0x50000000
1314 1.1 christos
1315 1.1 christos #define COMMON_TIMEOUT_IDX(tv) \
1316 1.1 christos (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1317 1.1 christos
1318 1.1 christos /** Return true iff if 'tv' is a common timeout in 'base' */
1319 1.1 christos static inline int
1320 1.1 christos is_common_timeout(const struct timeval *tv,
1321 1.1 christos const struct event_base *base)
1322 1.1 christos {
1323 1.1 christos int idx;
1324 1.1 christos if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1325 1.1 christos return 0;
1326 1.1 christos idx = COMMON_TIMEOUT_IDX(tv);
1327 1.1 christos return idx < base->n_common_timeouts;
1328 1.1 christos }
1329 1.1 christos
1330 1.1 christos /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1331 1.1 christos * one is a common timeout. */
1332 1.1 christos static inline int
1333 1.1 christos is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1334 1.1 christos {
1335 1.1 christos return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1336 1.1 christos (tv2->tv_usec & ~MICROSECONDS_MASK);
1337 1.1 christos }
1338 1.1 christos
1339 1.1 christos /** Requires that 'tv' is a common timeout. Return the corresponding
1340 1.1 christos * common_timeout_list. */
1341 1.1 christos static inline struct common_timeout_list *
1342 1.1 christos get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1343 1.1 christos {
1344 1.1 christos return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1345 1.1 christos }
1346 1.1 christos
1347 1.1 christos #if 0
1348 1.1 christos static inline int
1349 1.1 christos common_timeout_ok(const struct timeval *tv,
1350 1.1 christos struct event_base *base)
1351 1.1 christos {
1352 1.1 christos const struct timeval *expect =
1353 1.1 christos &get_common_timeout_list(base, tv)->duration;
1354 1.1 christos return tv->tv_sec == expect->tv_sec &&
1355 1.1 christos tv->tv_usec == expect->tv_usec;
1356 1.1 christos }
1357 1.1 christos #endif
1358 1.1 christos
1359 1.1 christos /* Add the timeout for the first event in given common timeout list to the
1360 1.1 christos * event_base's minheap. */
1361 1.1 christos static void
1362 1.1 christos common_timeout_schedule(struct common_timeout_list *ctl,
1363 1.1 christos const struct timeval *now, struct event *head)
1364 1.1 christos {
1365 1.1 christos struct timeval timeout = head->ev_timeout;
1366 1.1 christos timeout.tv_usec &= MICROSECONDS_MASK;
1367 1.1 christos event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1368 1.1 christos }
1369 1.1 christos
1370 1.1 christos /* Callback: invoked when the timeout for a common timeout queue triggers.
1371 1.1 christos * This means that (at least) the first event in that queue should be run,
1372 1.1 christos * and the timeout should be rescheduled if there are more events. */
1373 1.1 christos static void
1374 1.1 christos common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1375 1.1 christos {
1376 1.1 christos struct timeval now;
1377 1.1 christos struct common_timeout_list *ctl = arg;
1378 1.1 christos struct event_base *base = ctl->base;
1379 1.1 christos struct event *ev = NULL;
1380 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1381 1.1 christos gettime(base, &now);
1382 1.1 christos while (1) {
1383 1.1 christos ev = TAILQ_FIRST(&ctl->events);
1384 1.1 christos if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1385 1.1 christos (ev->ev_timeout.tv_sec == now.tv_sec &&
1386 1.1 christos (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1387 1.1 christos break;
1388 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1389 1.1 christos event_active_nolock_(ev, EV_TIMEOUT, 1);
1390 1.1 christos }
1391 1.1 christos if (ev)
1392 1.1 christos common_timeout_schedule(ctl, &now, ev);
1393 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1394 1.1 christos }
1395 1.1 christos
1396 1.1 christos #define MAX_COMMON_TIMEOUTS 256
1397 1.1 christos
1398 1.1 christos const struct timeval *
1399 1.1 christos event_base_init_common_timeout(struct event_base *base,
1400 1.1 christos const struct timeval *duration)
1401 1.1 christos {
1402 1.1 christos int i;
1403 1.1 christos struct timeval tv;
1404 1.1 christos const struct timeval *result=NULL;
1405 1.1 christos struct common_timeout_list *new_ctl;
1406 1.1 christos
1407 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1408 1.1 christos if (duration->tv_usec > 1000000) {
1409 1.1 christos memcpy(&tv, duration, sizeof(struct timeval));
1410 1.1 christos if (is_common_timeout(duration, base))
1411 1.1 christos tv.tv_usec &= MICROSECONDS_MASK;
1412 1.1 christos tv.tv_sec += tv.tv_usec / 1000000;
1413 1.1 christos tv.tv_usec %= 1000000;
1414 1.1 christos duration = &tv;
1415 1.1 christos }
1416 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
1417 1.1 christos const struct common_timeout_list *ctl =
1418 1.1 christos base->common_timeout_queues[i];
1419 1.1 christos if (duration->tv_sec == ctl->duration.tv_sec &&
1420 1.1 christos duration->tv_usec ==
1421 1.1 christos (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1422 1.1 christos EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1423 1.1 christos result = &ctl->duration;
1424 1.1 christos goto done;
1425 1.1 christos }
1426 1.1 christos }
1427 1.1 christos if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1428 1.1 christos event_warnx("%s: Too many common timeouts already in use; "
1429 1.1 christos "we only support %d per event_base", __func__,
1430 1.1 christos MAX_COMMON_TIMEOUTS);
1431 1.1 christos goto done;
1432 1.1 christos }
1433 1.1 christos if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1434 1.1 christos int n = base->n_common_timeouts < 16 ? 16 :
1435 1.1 christos base->n_common_timeouts*2;
1436 1.1 christos struct common_timeout_list **newqueues =
1437 1.1 christos mm_realloc(base->common_timeout_queues,
1438 1.1 christos n*sizeof(struct common_timeout_queue *));
1439 1.1 christos if (!newqueues) {
1440 1.1 christos event_warn("%s: realloc",__func__);
1441 1.1 christos goto done;
1442 1.1 christos }
1443 1.1 christos base->n_common_timeouts_allocated = n;
1444 1.1 christos base->common_timeout_queues = newqueues;
1445 1.1 christos }
1446 1.1 christos new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1447 1.1 christos if (!new_ctl) {
1448 1.1 christos event_warn("%s: calloc",__func__);
1449 1.1 christos goto done;
1450 1.1 christos }
1451 1.1 christos TAILQ_INIT(&new_ctl->events);
1452 1.1 christos new_ctl->duration.tv_sec = duration->tv_sec;
1453 1.1 christos new_ctl->duration.tv_usec =
1454 1.1 christos duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1455 1.1 christos (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1456 1.1 christos evtimer_assign(&new_ctl->timeout_event, base,
1457 1.1 christos common_timeout_callback, new_ctl);
1458 1.1 christos new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1459 1.1 christos event_priority_set(&new_ctl->timeout_event, 0);
1460 1.1 christos new_ctl->base = base;
1461 1.1 christos base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1462 1.1 christos result = &new_ctl->duration;
1463 1.1 christos
1464 1.1 christos done:
1465 1.1 christos if (result)
1466 1.1 christos EVUTIL_ASSERT(is_common_timeout(result, base));
1467 1.1 christos
1468 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1469 1.1 christos return result;
1470 1.1 christos }
1471 1.1 christos
1472 1.1 christos /* Closure function invoked when we're activating a persistent event. */
1473 1.1 christos static inline void
1474 1.1 christos event_persist_closure(struct event_base *base, struct event *ev)
1475 1.1 christos {
1476 1.3 christos void (*evcb_callback)(evutil_socket_t, short, void *);
1477 1.2 christos
1478 1.3 christos // Other fields of *ev that must be stored before executing
1479 1.3 christos evutil_socket_t evcb_fd;
1480 1.3 christos short evcb_res;
1481 1.3 christos void *evcb_arg;
1482 1.2 christos
1483 1.1 christos /* reschedule the persistent event if we have a timeout. */
1484 1.1 christos if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1485 1.1 christos /* If there was a timeout, we want it to run at an interval of
1486 1.1 christos * ev_io_timeout after the last time it was _scheduled_ for,
1487 1.1 christos * not ev_io_timeout after _now_. If it fired for another
1488 1.1 christos * reason, though, the timeout ought to start ticking _now_. */
1489 1.1 christos struct timeval run_at, relative_to, delay, now;
1490 1.1 christos ev_uint32_t usec_mask = 0;
1491 1.1 christos EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1492 1.1 christos &ev->ev_io_timeout));
1493 1.1 christos gettime(base, &now);
1494 1.1 christos if (is_common_timeout(&ev->ev_timeout, base)) {
1495 1.1 christos delay = ev->ev_io_timeout;
1496 1.1 christos usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1497 1.1 christos delay.tv_usec &= MICROSECONDS_MASK;
1498 1.1 christos if (ev->ev_res & EV_TIMEOUT) {
1499 1.1 christos relative_to = ev->ev_timeout;
1500 1.1 christos relative_to.tv_usec &= MICROSECONDS_MASK;
1501 1.1 christos } else {
1502 1.1 christos relative_to = now;
1503 1.1 christos }
1504 1.1 christos } else {
1505 1.1 christos delay = ev->ev_io_timeout;
1506 1.1 christos if (ev->ev_res & EV_TIMEOUT) {
1507 1.1 christos relative_to = ev->ev_timeout;
1508 1.1 christos } else {
1509 1.1 christos relative_to = now;
1510 1.1 christos }
1511 1.1 christos }
1512 1.1 christos evutil_timeradd(&relative_to, &delay, &run_at);
1513 1.1 christos if (evutil_timercmp(&run_at, &now, <)) {
1514 1.1 christos /* Looks like we missed at least one invocation due to
1515 1.1 christos * a clock jump, not running the event loop for a
1516 1.1 christos * while, really slow callbacks, or
1517 1.1 christos * something. Reschedule relative to now.
1518 1.1 christos */
1519 1.1 christos evutil_timeradd(&now, &delay, &run_at);
1520 1.1 christos }
1521 1.1 christos run_at.tv_usec |= usec_mask;
1522 1.1 christos event_add_nolock_(ev, &run_at, 1);
1523 1.1 christos }
1524 1.2 christos
1525 1.2 christos // Save our callback before we release the lock
1526 1.3 christos evcb_callback = ev->ev_callback;
1527 1.3 christos evcb_fd = ev->ev_fd;
1528 1.3 christos evcb_res = ev->ev_res;
1529 1.3 christos evcb_arg = ev->ev_arg;
1530 1.2 christos
1531 1.2 christos // Release the lock
1532 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1533 1.2 christos
1534 1.2 christos // Execute the callback
1535 1.3 christos (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1536 1.1 christos }
1537 1.1 christos
1538 1.1 christos /*
1539 1.1 christos Helper for event_process_active to process all the events in a single queue,
1540 1.1 christos releasing the lock as we go. This function requires that the lock be held
1541 1.1 christos when it's invoked. Returns -1 if we get a signal or an event_break that
1542 1.1 christos means we should stop processing any active events now. Otherwise returns
1543 1.1 christos the number of non-internal event_callbacks that we processed.
1544 1.1 christos */
1545 1.1 christos static int
1546 1.1 christos event_process_active_single_queue(struct event_base *base,
1547 1.1 christos struct evcallback_list *activeq,
1548 1.1 christos int max_to_process, const struct timeval *endtime)
1549 1.1 christos {
1550 1.1 christos struct event_callback *evcb;
1551 1.1 christos int count = 0;
1552 1.1 christos
1553 1.1 christos EVUTIL_ASSERT(activeq != NULL);
1554 1.1 christos
1555 1.1 christos for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1556 1.1 christos struct event *ev=NULL;
1557 1.1 christos if (evcb->evcb_flags & EVLIST_INIT) {
1558 1.1 christos ev = event_callback_to_event(evcb);
1559 1.1 christos
1560 1.2 christos if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1561 1.1 christos event_queue_remove_active(base, evcb);
1562 1.1 christos else
1563 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1564 1.1 christos event_debug((
1565 1.2 christos "event_process_active: event: %p, %s%s%scall %p",
1566 1.1 christos ev,
1567 1.1 christos ev->ev_res & EV_READ ? "EV_READ " : " ",
1568 1.1 christos ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1569 1.2 christos ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1570 1.1 christos ev->ev_callback));
1571 1.1 christos } else {
1572 1.1 christos event_queue_remove_active(base, evcb);
1573 1.1 christos event_debug(("event_process_active: event_callback %p, "
1574 1.1 christos "closure %d, call %p",
1575 1.1 christos evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1576 1.1 christos }
1577 1.1 christos
1578 1.1 christos if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1579 1.1 christos ++count;
1580 1.1 christos
1581 1.1 christos
1582 1.1 christos base->current_event = evcb;
1583 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
1584 1.1 christos base->current_event_waiters = 0;
1585 1.1 christos #endif
1586 1.1 christos
1587 1.1 christos switch (evcb->evcb_closure) {
1588 1.1 christos case EV_CLOSURE_EVENT_SIGNAL:
1589 1.2 christos EVUTIL_ASSERT(ev != NULL);
1590 1.1 christos event_signal_closure(base, ev);
1591 1.1 christos break;
1592 1.1 christos case EV_CLOSURE_EVENT_PERSIST:
1593 1.2 christos EVUTIL_ASSERT(ev != NULL);
1594 1.1 christos event_persist_closure(base, ev);
1595 1.1 christos break;
1596 1.2 christos case EV_CLOSURE_EVENT: {
1597 1.3 christos void (*evcb_callback)(evutil_socket_t, short, void *);
1598 1.2 christos EVUTIL_ASSERT(ev != NULL);
1599 1.3 christos evcb_callback = *ev->ev_callback;
1600 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1601 1.2 christos evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1602 1.2 christos }
1603 1.2 christos break;
1604 1.2 christos case EV_CLOSURE_CB_SELF: {
1605 1.2 christos void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1606 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1607 1.2 christos evcb_selfcb(evcb, evcb->evcb_arg);
1608 1.2 christos }
1609 1.2 christos break;
1610 1.2 christos case EV_CLOSURE_EVENT_FINALIZE:
1611 1.2 christos case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1612 1.3 christos void (*evcb_evfinalize)(struct event *, void *);
1613 1.3 christos int evcb_closure = evcb->evcb_closure;
1614 1.2 christos EVUTIL_ASSERT(ev != NULL);
1615 1.2 christos base->current_event = NULL;
1616 1.3 christos evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1617 1.2 christos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1618 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1619 1.2 christos evcb_evfinalize(ev, ev->ev_arg);
1620 1.2 christos event_debug_note_teardown_(ev);
1621 1.3 christos if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1622 1.2 christos mm_free(ev);
1623 1.2 christos }
1624 1.2 christos break;
1625 1.2 christos case EV_CLOSURE_CB_FINALIZE: {
1626 1.2 christos void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1627 1.2 christos base->current_event = NULL;
1628 1.2 christos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1629 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1630 1.2 christos evcb_cbfinalize(evcb, evcb->evcb_arg);
1631 1.2 christos }
1632 1.2 christos break;
1633 1.1 christos default:
1634 1.1 christos EVUTIL_ASSERT(0);
1635 1.1 christos }
1636 1.1 christos
1637 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1638 1.1 christos base->current_event = NULL;
1639 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
1640 1.1 christos if (base->current_event_waiters) {
1641 1.1 christos base->current_event_waiters = 0;
1642 1.1 christos EVTHREAD_COND_BROADCAST(base->current_event_cond);
1643 1.1 christos }
1644 1.1 christos #endif
1645 1.1 christos
1646 1.1 christos if (base->event_break)
1647 1.1 christos return -1;
1648 1.1 christos if (count >= max_to_process)
1649 1.1 christos return count;
1650 1.1 christos if (count && endtime) {
1651 1.1 christos struct timeval now;
1652 1.1 christos update_time_cache(base);
1653 1.1 christos gettime(base, &now);
1654 1.1 christos if (evutil_timercmp(&now, endtime, >=))
1655 1.1 christos return count;
1656 1.1 christos }
1657 1.1 christos if (base->event_continue)
1658 1.1 christos break;
1659 1.1 christos }
1660 1.1 christos return count;
1661 1.1 christos }
1662 1.1 christos
1663 1.1 christos /*
1664 1.1 christos * Active events are stored in priority queues. Lower priorities are always
1665 1.1 christos * process before higher priorities. Low priority events can starve high
1666 1.1 christos * priority ones.
1667 1.1 christos */
1668 1.1 christos
1669 1.1 christos static int
1670 1.1 christos event_process_active(struct event_base *base)
1671 1.1 christos {
1672 1.1 christos /* Caller must hold th_base_lock */
1673 1.1 christos struct evcallback_list *activeq = NULL;
1674 1.1 christos int i, c = 0;
1675 1.1 christos const struct timeval *endtime;
1676 1.1 christos struct timeval tv;
1677 1.1 christos const int maxcb = base->max_dispatch_callbacks;
1678 1.1 christos const int limit_after_prio = base->limit_callbacks_after_prio;
1679 1.1 christos if (base->max_dispatch_time.tv_sec >= 0) {
1680 1.1 christos update_time_cache(base);
1681 1.1 christos gettime(base, &tv);
1682 1.1 christos evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1683 1.1 christos endtime = &tv;
1684 1.1 christos } else {
1685 1.1 christos endtime = NULL;
1686 1.1 christos }
1687 1.1 christos
1688 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
1689 1.1 christos if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1690 1.1 christos base->event_running_priority = i;
1691 1.1 christos activeq = &base->activequeues[i];
1692 1.1 christos if (i < limit_after_prio)
1693 1.1 christos c = event_process_active_single_queue(base, activeq,
1694 1.1 christos INT_MAX, NULL);
1695 1.1 christos else
1696 1.1 christos c = event_process_active_single_queue(base, activeq,
1697 1.1 christos maxcb, endtime);
1698 1.1 christos if (c < 0) {
1699 1.1 christos goto done;
1700 1.1 christos } else if (c > 0)
1701 1.1 christos break; /* Processed a real event; do not
1702 1.1 christos * consider lower-priority events */
1703 1.1 christos /* If we get here, all of the events we processed
1704 1.1 christos * were internal. Continue. */
1705 1.1 christos }
1706 1.1 christos }
1707 1.1 christos
1708 1.1 christos done:
1709 1.1 christos base->event_running_priority = -1;
1710 1.1 christos
1711 1.1 christos return c;
1712 1.1 christos }
1713 1.1 christos
1714 1.1 christos /*
1715 1.1 christos * Wait continuously for events. We exit only if no events are left.
1716 1.1 christos */
1717 1.1 christos
1718 1.1 christos int
1719 1.1 christos event_dispatch(void)
1720 1.1 christos {
1721 1.1 christos return (event_loop(0));
1722 1.1 christos }
1723 1.1 christos
1724 1.1 christos int
1725 1.1 christos event_base_dispatch(struct event_base *event_base)
1726 1.1 christos {
1727 1.1 christos return (event_base_loop(event_base, 0));
1728 1.1 christos }
1729 1.1 christos
1730 1.1 christos const char *
1731 1.1 christos event_base_get_method(const struct event_base *base)
1732 1.1 christos {
1733 1.1 christos EVUTIL_ASSERT(base);
1734 1.1 christos return (base->evsel->name);
1735 1.1 christos }
1736 1.1 christos
1737 1.1 christos /** Callback: used to implement event_base_loopexit by telling the event_base
1738 1.1 christos * that it's time to exit its loop. */
1739 1.1 christos static void
1740 1.1 christos event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1741 1.1 christos {
1742 1.1 christos struct event_base *base = arg;
1743 1.1 christos base->event_gotterm = 1;
1744 1.1 christos }
1745 1.1 christos
1746 1.1 christos int
1747 1.1 christos event_loopexit(const struct timeval *tv)
1748 1.1 christos {
1749 1.1 christos return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1750 1.1 christos current_base, tv));
1751 1.1 christos }
1752 1.1 christos
1753 1.1 christos int
1754 1.1 christos event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1755 1.1 christos {
1756 1.1 christos return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1757 1.1 christos event_base, tv));
1758 1.1 christos }
1759 1.1 christos
1760 1.1 christos int
1761 1.1 christos event_loopbreak(void)
1762 1.1 christos {
1763 1.1 christos return (event_base_loopbreak(current_base));
1764 1.1 christos }
1765 1.1 christos
1766 1.1 christos int
1767 1.1 christos event_base_loopbreak(struct event_base *event_base)
1768 1.1 christos {
1769 1.1 christos int r = 0;
1770 1.1 christos if (event_base == NULL)
1771 1.1 christos return (-1);
1772 1.1 christos
1773 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1774 1.1 christos event_base->event_break = 1;
1775 1.1 christos
1776 1.1 christos if (EVBASE_NEED_NOTIFY(event_base)) {
1777 1.1 christos r = evthread_notify_base(event_base);
1778 1.1 christos } else {
1779 1.1 christos r = (0);
1780 1.1 christos }
1781 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1782 1.1 christos return r;
1783 1.1 christos }
1784 1.1 christos
1785 1.1 christos int
1786 1.1 christos event_base_loopcontinue(struct event_base *event_base)
1787 1.1 christos {
1788 1.1 christos int r = 0;
1789 1.1 christos if (event_base == NULL)
1790 1.1 christos return (-1);
1791 1.1 christos
1792 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1793 1.1 christos event_base->event_continue = 1;
1794 1.1 christos
1795 1.1 christos if (EVBASE_NEED_NOTIFY(event_base)) {
1796 1.1 christos r = evthread_notify_base(event_base);
1797 1.1 christos } else {
1798 1.1 christos r = (0);
1799 1.1 christos }
1800 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1801 1.1 christos return r;
1802 1.1 christos }
1803 1.1 christos
1804 1.1 christos int
1805 1.1 christos event_base_got_break(struct event_base *event_base)
1806 1.1 christos {
1807 1.1 christos int res;
1808 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1809 1.1 christos res = event_base->event_break;
1810 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1811 1.1 christos return res;
1812 1.1 christos }
1813 1.1 christos
1814 1.1 christos int
1815 1.1 christos event_base_got_exit(struct event_base *event_base)
1816 1.1 christos {
1817 1.1 christos int res;
1818 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1819 1.1 christos res = event_base->event_gotterm;
1820 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1821 1.1 christos return res;
1822 1.1 christos }
1823 1.1 christos
1824 1.1 christos /* not thread safe */
1825 1.1 christos
1826 1.1 christos int
1827 1.1 christos event_loop(int flags)
1828 1.1 christos {
1829 1.1 christos return event_base_loop(current_base, flags);
1830 1.1 christos }
1831 1.1 christos
1832 1.1 christos int
1833 1.1 christos event_base_loop(struct event_base *base, int flags)
1834 1.1 christos {
1835 1.1 christos const struct eventop *evsel = base->evsel;
1836 1.1 christos struct timeval tv;
1837 1.1 christos struct timeval *tv_p;
1838 1.1 christos int res, done, retval = 0;
1839 1.1 christos
1840 1.1 christos /* Grab the lock. We will release it inside evsel.dispatch, and again
1841 1.1 christos * as we invoke user callbacks. */
1842 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1843 1.1 christos
1844 1.1 christos if (base->running_loop) {
1845 1.1 christos event_warnx("%s: reentrant invocation. Only one event_base_loop"
1846 1.1 christos " can run on each event_base at once.", __func__);
1847 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1848 1.1 christos return -1;
1849 1.1 christos }
1850 1.1 christos
1851 1.1 christos base->running_loop = 1;
1852 1.1 christos
1853 1.1 christos clear_time_cache(base);
1854 1.1 christos
1855 1.1 christos if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1856 1.1 christos evsig_set_base_(base);
1857 1.1 christos
1858 1.1 christos done = 0;
1859 1.1 christos
1860 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
1861 1.1 christos base->th_owner_id = EVTHREAD_GET_ID();
1862 1.1 christos #endif
1863 1.1 christos
1864 1.1 christos base->event_gotterm = base->event_break = 0;
1865 1.1 christos
1866 1.1 christos while (!done) {
1867 1.1 christos base->event_continue = 0;
1868 1.1 christos base->n_deferreds_queued = 0;
1869 1.1 christos
1870 1.1 christos /* Terminate the loop if we have been asked to */
1871 1.1 christos if (base->event_gotterm) {
1872 1.1 christos break;
1873 1.1 christos }
1874 1.1 christos
1875 1.1 christos if (base->event_break) {
1876 1.1 christos break;
1877 1.1 christos }
1878 1.1 christos
1879 1.1 christos tv_p = &tv;
1880 1.1 christos if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1881 1.1 christos timeout_next(base, &tv_p);
1882 1.1 christos } else {
1883 1.1 christos /*
1884 1.1 christos * if we have active events, we just poll new events
1885 1.1 christos * without waiting.
1886 1.1 christos */
1887 1.1 christos evutil_timerclear(&tv);
1888 1.1 christos }
1889 1.1 christos
1890 1.1 christos /* If we have no events, we just exit */
1891 1.1 christos if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1892 1.1 christos !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1893 1.1 christos event_debug(("%s: no events registered.", __func__));
1894 1.1 christos retval = 1;
1895 1.1 christos goto done;
1896 1.1 christos }
1897 1.1 christos
1898 1.1 christos event_queue_make_later_events_active(base);
1899 1.1 christos
1900 1.1 christos clear_time_cache(base);
1901 1.1 christos
1902 1.1 christos res = evsel->dispatch(base, tv_p);
1903 1.1 christos
1904 1.1 christos if (res == -1) {
1905 1.1 christos event_debug(("%s: dispatch returned unsuccessfully.",
1906 1.1 christos __func__));
1907 1.1 christos retval = -1;
1908 1.1 christos goto done;
1909 1.1 christos }
1910 1.1 christos
1911 1.1 christos update_time_cache(base);
1912 1.1 christos
1913 1.1 christos timeout_process(base);
1914 1.1 christos
1915 1.1 christos if (N_ACTIVE_CALLBACKS(base)) {
1916 1.1 christos int n = event_process_active(base);
1917 1.1 christos if ((flags & EVLOOP_ONCE)
1918 1.1 christos && N_ACTIVE_CALLBACKS(base) == 0
1919 1.1 christos && n != 0)
1920 1.1 christos done = 1;
1921 1.1 christos } else if (flags & EVLOOP_NONBLOCK)
1922 1.1 christos done = 1;
1923 1.1 christos }
1924 1.1 christos event_debug(("%s: asked to terminate loop.", __func__));
1925 1.1 christos
1926 1.1 christos done:
1927 1.1 christos clear_time_cache(base);
1928 1.1 christos base->running_loop = 0;
1929 1.1 christos
1930 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1931 1.1 christos
1932 1.1 christos return (retval);
1933 1.1 christos }
1934 1.1 christos
1935 1.1 christos /* One-time callback to implement event_base_once: invokes the user callback,
1936 1.1 christos * then deletes the allocated storage */
1937 1.1 christos static void
1938 1.1 christos event_once_cb(evutil_socket_t fd, short events, void *arg)
1939 1.1 christos {
1940 1.1 christos struct event_once *eonce = arg;
1941 1.1 christos
1942 1.1 christos (*eonce->cb)(fd, events, eonce->arg);
1943 1.1 christos EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1944 1.1 christos LIST_REMOVE(eonce, next_once);
1945 1.1 christos EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1946 1.1 christos event_debug_unassign(&eonce->ev);
1947 1.1 christos mm_free(eonce);
1948 1.1 christos }
1949 1.1 christos
1950 1.1 christos /* not threadsafe, event scheduled once. */
1951 1.1 christos int
1952 1.1 christos event_once(evutil_socket_t fd, short events,
1953 1.1 christos void (*callback)(evutil_socket_t, short, void *),
1954 1.1 christos void *arg, const struct timeval *tv)
1955 1.1 christos {
1956 1.1 christos return event_base_once(current_base, fd, events, callback, arg, tv);
1957 1.1 christos }
1958 1.1 christos
1959 1.1 christos /* Schedules an event once */
1960 1.1 christos int
1961 1.1 christos event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1962 1.1 christos void (*callback)(evutil_socket_t, short, void *),
1963 1.1 christos void *arg, const struct timeval *tv)
1964 1.1 christos {
1965 1.1 christos struct event_once *eonce;
1966 1.1 christos int res = 0;
1967 1.1 christos int activate = 0;
1968 1.1 christos
1969 1.1 christos /* We cannot support signals that just fire once, or persistent
1970 1.1 christos * events. */
1971 1.1 christos if (events & (EV_SIGNAL|EV_PERSIST))
1972 1.1 christos return (-1);
1973 1.1 christos
1974 1.1 christos if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1975 1.1 christos return (-1);
1976 1.1 christos
1977 1.1 christos eonce->cb = callback;
1978 1.1 christos eonce->arg = arg;
1979 1.1 christos
1980 1.2 christos if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
1981 1.1 christos evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1982 1.1 christos
1983 1.1 christos if (tv == NULL || ! evutil_timerisset(tv)) {
1984 1.1 christos /* If the event is going to become active immediately,
1985 1.1 christos * don't put it on the timeout queue. This is one
1986 1.1 christos * idiom for scheduling a callback, so let's make
1987 1.1 christos * it fast (and order-preserving). */
1988 1.1 christos activate = 1;
1989 1.1 christos }
1990 1.2 christos } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
1991 1.2 christos events &= EV_READ|EV_WRITE|EV_CLOSED;
1992 1.1 christos
1993 1.1 christos event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1994 1.1 christos } else {
1995 1.1 christos /* Bad event combination */
1996 1.1 christos mm_free(eonce);
1997 1.1 christos return (-1);
1998 1.1 christos }
1999 1.1 christos
2000 1.1 christos if (res == 0) {
2001 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2002 1.1 christos if (activate)
2003 1.1 christos event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2004 1.1 christos else
2005 1.1 christos res = event_add_nolock_(&eonce->ev, tv, 0);
2006 1.1 christos
2007 1.1 christos if (res != 0) {
2008 1.1 christos mm_free(eonce);
2009 1.1 christos return (res);
2010 1.1 christos } else {
2011 1.1 christos LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2012 1.1 christos }
2013 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2014 1.1 christos }
2015 1.1 christos
2016 1.1 christos return (0);
2017 1.1 christos }
2018 1.1 christos
2019 1.1 christos int
2020 1.1 christos event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2021 1.1 christos {
2022 1.1 christos if (!base)
2023 1.1 christos base = current_base;
2024 1.1 christos if (arg == &event_self_cbarg_ptr_)
2025 1.1 christos arg = ev;
2026 1.1 christos
2027 1.1 christos event_debug_assert_not_added_(ev);
2028 1.1 christos
2029 1.1 christos ev->ev_base = base;
2030 1.1 christos
2031 1.1 christos ev->ev_callback = callback;
2032 1.1 christos ev->ev_arg = arg;
2033 1.1 christos ev->ev_fd = fd;
2034 1.1 christos ev->ev_events = events;
2035 1.1 christos ev->ev_res = 0;
2036 1.1 christos ev->ev_flags = EVLIST_INIT;
2037 1.1 christos ev->ev_ncalls = 0;
2038 1.1 christos ev->ev_pncalls = NULL;
2039 1.1 christos
2040 1.1 christos if (events & EV_SIGNAL) {
2041 1.2 christos if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2042 1.1 christos event_warnx("%s: EV_SIGNAL is not compatible with "
2043 1.2 christos "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2044 1.1 christos return -1;
2045 1.1 christos }
2046 1.1 christos ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2047 1.1 christos } else {
2048 1.1 christos if (events & EV_PERSIST) {
2049 1.1 christos evutil_timerclear(&ev->ev_io_timeout);
2050 1.1 christos ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2051 1.1 christos } else {
2052 1.1 christos ev->ev_closure = EV_CLOSURE_EVENT;
2053 1.1 christos }
2054 1.1 christos }
2055 1.1 christos
2056 1.1 christos min_heap_elem_init_(ev);
2057 1.1 christos
2058 1.1 christos if (base != NULL) {
2059 1.1 christos /* by default, we put new events into the middle priority */
2060 1.1 christos ev->ev_pri = base->nactivequeues / 2;
2061 1.1 christos }
2062 1.1 christos
2063 1.1 christos event_debug_note_setup_(ev);
2064 1.1 christos
2065 1.1 christos return 0;
2066 1.1 christos }
2067 1.1 christos
2068 1.1 christos int
2069 1.1 christos event_base_set(struct event_base *base, struct event *ev)
2070 1.1 christos {
2071 1.1 christos /* Only innocent events may be assigned to a different base */
2072 1.1 christos if (ev->ev_flags != EVLIST_INIT)
2073 1.1 christos return (-1);
2074 1.1 christos
2075 1.1 christos event_debug_assert_is_setup_(ev);
2076 1.1 christos
2077 1.1 christos ev->ev_base = base;
2078 1.1 christos ev->ev_pri = base->nactivequeues/2;
2079 1.1 christos
2080 1.1 christos return (0);
2081 1.1 christos }
2082 1.1 christos
2083 1.1 christos void
2084 1.1 christos event_set(struct event *ev, evutil_socket_t fd, short events,
2085 1.1 christos void (*callback)(evutil_socket_t, short, void *), void *arg)
2086 1.1 christos {
2087 1.1 christos int r;
2088 1.1 christos r = event_assign(ev, current_base, fd, events, callback, arg);
2089 1.1 christos EVUTIL_ASSERT(r == 0);
2090 1.1 christos }
2091 1.1 christos
2092 1.1 christos void *
2093 1.1 christos event_self_cbarg(void)
2094 1.1 christos {
2095 1.1 christos return &event_self_cbarg_ptr_;
2096 1.1 christos }
2097 1.1 christos
2098 1.1 christos struct event *
2099 1.1 christos event_base_get_running_event(struct event_base *base)
2100 1.1 christos {
2101 1.1 christos struct event *ev = NULL;
2102 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2103 1.1 christos if (EVBASE_IN_THREAD(base)) {
2104 1.1 christos struct event_callback *evcb = base->current_event;
2105 1.1 christos if (evcb->evcb_flags & EVLIST_INIT)
2106 1.1 christos ev = event_callback_to_event(evcb);
2107 1.1 christos }
2108 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2109 1.1 christos return ev;
2110 1.1 christos }
2111 1.1 christos
2112 1.1 christos struct event *
2113 1.1 christos event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2114 1.1 christos {
2115 1.1 christos struct event *ev;
2116 1.1 christos ev = mm_malloc(sizeof(struct event));
2117 1.1 christos if (ev == NULL)
2118 1.1 christos return (NULL);
2119 1.1 christos if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2120 1.1 christos mm_free(ev);
2121 1.1 christos return (NULL);
2122 1.1 christos }
2123 1.1 christos
2124 1.1 christos return (ev);
2125 1.1 christos }
2126 1.1 christos
2127 1.1 christos void
2128 1.1 christos event_free(struct event *ev)
2129 1.1 christos {
2130 1.2 christos /* This is disabled, so that events which have been finalized be a
2131 1.2 christos * valid target for event_free(). That's */
2132 1.2 christos // event_debug_assert_is_setup_(ev);
2133 1.1 christos
2134 1.1 christos /* make sure that this event won't be coming back to haunt us. */
2135 1.1 christos event_del(ev);
2136 1.1 christos event_debug_note_teardown_(ev);
2137 1.1 christos mm_free(ev);
2138 1.1 christos
2139 1.1 christos }
2140 1.1 christos
2141 1.1 christos void
2142 1.1 christos event_debug_unassign(struct event *ev)
2143 1.1 christos {
2144 1.1 christos event_debug_assert_not_added_(ev);
2145 1.1 christos event_debug_note_teardown_(ev);
2146 1.1 christos
2147 1.1 christos ev->ev_flags &= ~EVLIST_INIT;
2148 1.1 christos }
2149 1.1 christos
2150 1.2 christos #define EVENT_FINALIZE_FREE_ 0x10000
2151 1.2 christos static int
2152 1.2 christos event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2153 1.2 christos {
2154 1.2 christos ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2155 1.2 christos EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2156 1.2 christos
2157 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2158 1.2 christos ev->ev_closure = closure;
2159 1.2 christos ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2160 1.2 christos event_active_nolock_(ev, EV_FINALIZE, 1);
2161 1.2 christos ev->ev_flags |= EVLIST_FINALIZING;
2162 1.2 christos return 0;
2163 1.2 christos }
2164 1.2 christos
2165 1.2 christos static int
2166 1.2 christos event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2167 1.2 christos {
2168 1.2 christos int r;
2169 1.2 christos struct event_base *base = ev->ev_base;
2170 1.2 christos if (EVUTIL_FAILURE_CHECK(!base)) {
2171 1.2 christos event_warnx("%s: event has no event_base set.", __func__);
2172 1.2 christos return -1;
2173 1.2 christos }
2174 1.2 christos
2175 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2176 1.2 christos r = event_finalize_nolock_(base, flags, ev, cb);
2177 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2178 1.2 christos return r;
2179 1.2 christos }
2180 1.2 christos
2181 1.2 christos int
2182 1.2 christos event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2183 1.2 christos {
2184 1.2 christos return event_finalize_impl_(flags, ev, cb);
2185 1.2 christos }
2186 1.2 christos
2187 1.2 christos int
2188 1.2 christos event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2189 1.2 christos {
2190 1.2 christos return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2191 1.2 christos }
2192 1.2 christos
2193 1.2 christos void
2194 1.2 christos event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2195 1.2 christos {
2196 1.2 christos struct event *ev = NULL;
2197 1.2 christos if (evcb->evcb_flags & EVLIST_INIT) {
2198 1.2 christos ev = event_callback_to_event(evcb);
2199 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2200 1.2 christos } else {
2201 1.2 christos event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2202 1.2 christos }
2203 1.2 christos
2204 1.2 christos evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2205 1.2 christos evcb->evcb_cb_union.evcb_cbfinalize = cb;
2206 1.2 christos event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2207 1.2 christos evcb->evcb_flags |= EVLIST_FINALIZING;
2208 1.2 christos }
2209 1.2 christos
2210 1.2 christos void
2211 1.2 christos event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2212 1.2 christos {
2213 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2214 1.2 christos event_callback_finalize_nolock_(base, flags, evcb, cb);
2215 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2216 1.2 christos }
2217 1.2 christos
2218 1.2 christos /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2219 1.2 christos * callback will be invoked on *one of them*, after they have *all* been
2220 1.2 christos * finalized. */
2221 1.2 christos int
2222 1.2 christos event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2223 1.2 christos {
2224 1.2 christos int n_pending = 0, i;
2225 1.2 christos
2226 1.2 christos if (base == NULL)
2227 1.2 christos base = current_base;
2228 1.2 christos
2229 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2230 1.2 christos
2231 1.2 christos event_debug(("%s: %d events finalizing", __func__, n_cbs));
2232 1.2 christos
2233 1.2 christos /* At most one can be currently executing; the rest we just
2234 1.2 christos * cancel... But we always make sure that the finalize callback
2235 1.2 christos * runs. */
2236 1.2 christos for (i = 0; i < n_cbs; ++i) {
2237 1.2 christos struct event_callback *evcb = evcbs[i];
2238 1.2 christos if (evcb == base->current_event) {
2239 1.2 christos event_callback_finalize_nolock_(base, 0, evcb, cb);
2240 1.2 christos ++n_pending;
2241 1.2 christos } else {
2242 1.2 christos event_callback_cancel_nolock_(base, evcb, 0);
2243 1.2 christos }
2244 1.2 christos }
2245 1.2 christos
2246 1.2 christos if (n_pending == 0) {
2247 1.2 christos /* Just do the first one. */
2248 1.2 christos event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2249 1.2 christos }
2250 1.2 christos
2251 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2252 1.2 christos return 0;
2253 1.2 christos }
2254 1.2 christos
2255 1.1 christos /*
2256 1.1 christos * Set's the priority of an event - if an event is already scheduled
2257 1.1 christos * changing the priority is going to fail.
2258 1.1 christos */
2259 1.1 christos
2260 1.1 christos int
2261 1.1 christos event_priority_set(struct event *ev, int pri)
2262 1.1 christos {
2263 1.1 christos event_debug_assert_is_setup_(ev);
2264 1.1 christos
2265 1.1 christos if (ev->ev_flags & EVLIST_ACTIVE)
2266 1.1 christos return (-1);
2267 1.1 christos if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2268 1.1 christos return (-1);
2269 1.1 christos
2270 1.1 christos ev->ev_pri = pri;
2271 1.1 christos
2272 1.1 christos return (0);
2273 1.1 christos }
2274 1.1 christos
2275 1.1 christos /*
2276 1.1 christos * Checks if a specific event is pending or scheduled.
2277 1.1 christos */
2278 1.1 christos
2279 1.1 christos int
2280 1.1 christos event_pending(const struct event *ev, short event, struct timeval *tv)
2281 1.1 christos {
2282 1.1 christos int flags = 0;
2283 1.1 christos
2284 1.1 christos if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2285 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2286 1.1 christos return 0;
2287 1.1 christos }
2288 1.1 christos
2289 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2290 1.1 christos event_debug_assert_is_setup_(ev);
2291 1.1 christos
2292 1.1 christos if (ev->ev_flags & EVLIST_INSERTED)
2293 1.2 christos flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2294 1.1 christos if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2295 1.1 christos flags |= ev->ev_res;
2296 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT)
2297 1.1 christos flags |= EV_TIMEOUT;
2298 1.1 christos
2299 1.2 christos event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2300 1.1 christos
2301 1.1 christos /* See if there is a timeout that we should report */
2302 1.1 christos if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2303 1.1 christos struct timeval tmp = ev->ev_timeout;
2304 1.1 christos tmp.tv_usec &= MICROSECONDS_MASK;
2305 1.1 christos /* correctly remamp to real time */
2306 1.1 christos evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2307 1.1 christos }
2308 1.1 christos
2309 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2310 1.1 christos
2311 1.1 christos return (flags & event);
2312 1.1 christos }
2313 1.1 christos
2314 1.1 christos int
2315 1.1 christos event_initialized(const struct event *ev)
2316 1.1 christos {
2317 1.1 christos if (!(ev->ev_flags & EVLIST_INIT))
2318 1.1 christos return 0;
2319 1.1 christos
2320 1.1 christos return 1;
2321 1.1 christos }
2322 1.1 christos
2323 1.1 christos void
2324 1.1 christos event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2325 1.1 christos {
2326 1.1 christos event_debug_assert_is_setup_(event);
2327 1.1 christos
2328 1.1 christos if (base_out)
2329 1.1 christos *base_out = event->ev_base;
2330 1.1 christos if (fd_out)
2331 1.1 christos *fd_out = event->ev_fd;
2332 1.1 christos if (events_out)
2333 1.1 christos *events_out = event->ev_events;
2334 1.1 christos if (callback_out)
2335 1.1 christos *callback_out = event->ev_callback;
2336 1.1 christos if (arg_out)
2337 1.1 christos *arg_out = event->ev_arg;
2338 1.1 christos }
2339 1.1 christos
2340 1.1 christos size_t
2341 1.1 christos event_get_struct_event_size(void)
2342 1.1 christos {
2343 1.1 christos return sizeof(struct event);
2344 1.1 christos }
2345 1.1 christos
2346 1.1 christos evutil_socket_t
2347 1.1 christos event_get_fd(const struct event *ev)
2348 1.1 christos {
2349 1.1 christos event_debug_assert_is_setup_(ev);
2350 1.1 christos return ev->ev_fd;
2351 1.1 christos }
2352 1.1 christos
2353 1.1 christos struct event_base *
2354 1.1 christos event_get_base(const struct event *ev)
2355 1.1 christos {
2356 1.1 christos event_debug_assert_is_setup_(ev);
2357 1.1 christos return ev->ev_base;
2358 1.1 christos }
2359 1.1 christos
2360 1.1 christos short
2361 1.1 christos event_get_events(const struct event *ev)
2362 1.1 christos {
2363 1.1 christos event_debug_assert_is_setup_(ev);
2364 1.1 christos return ev->ev_events;
2365 1.1 christos }
2366 1.1 christos
2367 1.1 christos event_callback_fn
2368 1.1 christos event_get_callback(const struct event *ev)
2369 1.1 christos {
2370 1.1 christos event_debug_assert_is_setup_(ev);
2371 1.1 christos return ev->ev_callback;
2372 1.1 christos }
2373 1.1 christos
2374 1.1 christos void *
2375 1.1 christos event_get_callback_arg(const struct event *ev)
2376 1.1 christos {
2377 1.1 christos event_debug_assert_is_setup_(ev);
2378 1.1 christos return ev->ev_arg;
2379 1.1 christos }
2380 1.1 christos
2381 1.1 christos int
2382 1.1 christos event_get_priority(const struct event *ev)
2383 1.1 christos {
2384 1.1 christos event_debug_assert_is_setup_(ev);
2385 1.1 christos return ev->ev_pri;
2386 1.1 christos }
2387 1.1 christos
2388 1.1 christos int
2389 1.1 christos event_add(struct event *ev, const struct timeval *tv)
2390 1.1 christos {
2391 1.1 christos int res;
2392 1.1 christos
2393 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2394 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2395 1.1 christos return -1;
2396 1.1 christos }
2397 1.1 christos
2398 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2399 1.1 christos
2400 1.1 christos res = event_add_nolock_(ev, tv, 0);
2401 1.1 christos
2402 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2403 1.1 christos
2404 1.1 christos return (res);
2405 1.1 christos }
2406 1.1 christos
2407 1.1 christos /* Helper callback: wake an event_base from another thread. This version
2408 1.1 christos * works by writing a byte to one end of a socketpair, so that the event_base
2409 1.1 christos * listening on the other end will wake up as the corresponding event
2410 1.1 christos * triggers */
2411 1.1 christos static int
2412 1.1 christos evthread_notify_base_default(struct event_base *base)
2413 1.1 christos {
2414 1.1 christos char buf[1];
2415 1.1 christos int r;
2416 1.1 christos buf[0] = (char) 0;
2417 1.1 christos #ifdef _WIN32
2418 1.1 christos r = send(base->th_notify_fd[1], buf, 1, 0);
2419 1.1 christos #else
2420 1.1 christos r = write(base->th_notify_fd[1], buf, 1);
2421 1.1 christos #endif
2422 1.1 christos return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2423 1.1 christos }
2424 1.1 christos
2425 1.1 christos #ifdef EVENT__HAVE_EVENTFD
2426 1.1 christos /* Helper callback: wake an event_base from another thread. This version
2427 1.1 christos * assumes that you have a working eventfd() implementation. */
2428 1.1 christos static int
2429 1.1 christos evthread_notify_base_eventfd(struct event_base *base)
2430 1.1 christos {
2431 1.1 christos ev_uint64_t msg = 1;
2432 1.1 christos int r;
2433 1.1 christos do {
2434 1.1 christos r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2435 1.1 christos } while (r < 0 && errno == EAGAIN);
2436 1.1 christos
2437 1.1 christos return (r < 0) ? -1 : 0;
2438 1.1 christos }
2439 1.1 christos #endif
2440 1.1 christos
2441 1.1 christos
2442 1.1 christos /** Tell the thread currently running the event_loop for base (if any) that it
2443 1.1 christos * needs to stop waiting in its dispatch function (if it is) and process all
2444 1.1 christos * active callbacks. */
2445 1.1 christos static int
2446 1.1 christos evthread_notify_base(struct event_base *base)
2447 1.1 christos {
2448 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2449 1.1 christos if (!base->th_notify_fn)
2450 1.1 christos return -1;
2451 1.1 christos if (base->is_notify_pending)
2452 1.1 christos return 0;
2453 1.1 christos base->is_notify_pending = 1;
2454 1.1 christos return base->th_notify_fn(base);
2455 1.1 christos }
2456 1.1 christos
2457 1.1 christos /* Implementation function to remove a timeout on a currently pending event.
2458 1.1 christos */
2459 1.1 christos int
2460 1.1 christos event_remove_timer_nolock_(struct event *ev)
2461 1.1 christos {
2462 1.1 christos struct event_base *base = ev->ev_base;
2463 1.1 christos
2464 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2465 1.1 christos event_debug_assert_is_setup_(ev);
2466 1.1 christos
2467 1.1 christos event_debug(("event_remove_timer_nolock: event: %p", ev));
2468 1.1 christos
2469 1.1 christos /* If it's not pending on a timeout, we don't need to do anything. */
2470 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT) {
2471 1.1 christos event_queue_remove_timeout(base, ev);
2472 1.1 christos evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2473 1.1 christos }
2474 1.1 christos
2475 1.1 christos return (0);
2476 1.1 christos }
2477 1.1 christos
2478 1.1 christos int
2479 1.1 christos event_remove_timer(struct event *ev)
2480 1.1 christos {
2481 1.1 christos int res;
2482 1.1 christos
2483 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2484 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2485 1.1 christos return -1;
2486 1.1 christos }
2487 1.1 christos
2488 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2489 1.1 christos
2490 1.1 christos res = event_remove_timer_nolock_(ev);
2491 1.1 christos
2492 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2493 1.1 christos
2494 1.1 christos return (res);
2495 1.1 christos }
2496 1.1 christos
2497 1.1 christos /* Implementation function to add an event. Works just like event_add,
2498 1.1 christos * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2499 1.1 christos * we treat tv as an absolute time, not as an interval to add to the current
2500 1.1 christos * time */
2501 1.1 christos int
2502 1.1 christos event_add_nolock_(struct event *ev, const struct timeval *tv,
2503 1.1 christos int tv_is_absolute)
2504 1.1 christos {
2505 1.1 christos struct event_base *base = ev->ev_base;
2506 1.1 christos int res = 0;
2507 1.1 christos int notify = 0;
2508 1.1 christos
2509 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2510 1.1 christos event_debug_assert_is_setup_(ev);
2511 1.1 christos
2512 1.1 christos event_debug((
2513 1.2 christos "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2514 1.1 christos ev,
2515 1.1 christos EV_SOCK_ARG(ev->ev_fd),
2516 1.1 christos ev->ev_events & EV_READ ? "EV_READ " : " ",
2517 1.1 christos ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2518 1.2 christos ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2519 1.1 christos tv ? "EV_TIMEOUT " : " ",
2520 1.1 christos ev->ev_callback));
2521 1.1 christos
2522 1.1 christos EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2523 1.1 christos
2524 1.2 christos if (ev->ev_flags & EVLIST_FINALIZING) {
2525 1.2 christos /* XXXX debug */
2526 1.2 christos return (-1);
2527 1.2 christos }
2528 1.2 christos
2529 1.1 christos /*
2530 1.1 christos * prepare for timeout insertion further below, if we get a
2531 1.1 christos * failure on any step, we should not change any state.
2532 1.1 christos */
2533 1.1 christos if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2534 1.1 christos if (min_heap_reserve_(&base->timeheap,
2535 1.1 christos 1 + min_heap_size_(&base->timeheap)) == -1)
2536 1.1 christos return (-1); /* ENOMEM == errno */
2537 1.1 christos }
2538 1.1 christos
2539 1.1 christos /* If the main thread is currently executing a signal event's
2540 1.1 christos * callback, and we are not the main thread, then we want to wait
2541 1.1 christos * until the callback is done before we mess with the event, or else
2542 1.1 christos * we can race on ev_ncalls and ev_pncalls below. */
2543 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
2544 1.1 christos if (base->current_event == event_to_event_callback(ev) &&
2545 1.1 christos (ev->ev_events & EV_SIGNAL)
2546 1.1 christos && !EVBASE_IN_THREAD(base)) {
2547 1.1 christos ++base->current_event_waiters;
2548 1.1 christos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2549 1.1 christos }
2550 1.1 christos #endif
2551 1.1 christos
2552 1.2 christos if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2553 1.1 christos !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2554 1.2 christos if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2555 1.1 christos res = evmap_io_add_(base, ev->ev_fd, ev);
2556 1.1 christos else if (ev->ev_events & EV_SIGNAL)
2557 1.1 christos res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2558 1.1 christos if (res != -1)
2559 1.1 christos event_queue_insert_inserted(base, ev);
2560 1.1 christos if (res == 1) {
2561 1.1 christos /* evmap says we need to notify the main thread. */
2562 1.1 christos notify = 1;
2563 1.1 christos res = 0;
2564 1.1 christos }
2565 1.1 christos }
2566 1.1 christos
2567 1.1 christos /*
2568 1.1 christos * we should change the timeout state only if the previous event
2569 1.1 christos * addition succeeded.
2570 1.1 christos */
2571 1.1 christos if (res != -1 && tv != NULL) {
2572 1.1 christos struct timeval now;
2573 1.1 christos int common_timeout;
2574 1.1 christos #ifdef USE_REINSERT_TIMEOUT
2575 1.1 christos int was_common;
2576 1.1 christos int old_timeout_idx;
2577 1.1 christos #endif
2578 1.1 christos
2579 1.1 christos /*
2580 1.1 christos * for persistent timeout events, we remember the
2581 1.1 christos * timeout value and re-add the event.
2582 1.1 christos *
2583 1.1 christos * If tv_is_absolute, this was already set.
2584 1.1 christos */
2585 1.1 christos if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2586 1.1 christos ev->ev_io_timeout = *tv;
2587 1.1 christos
2588 1.1 christos #ifndef USE_REINSERT_TIMEOUT
2589 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT) {
2590 1.1 christos event_queue_remove_timeout(base, ev);
2591 1.1 christos }
2592 1.1 christos #endif
2593 1.1 christos
2594 1.1 christos /* Check if it is active due to a timeout. Rescheduling
2595 1.1 christos * this timeout before the callback can be executed
2596 1.1 christos * removes it from the active list. */
2597 1.1 christos if ((ev->ev_flags & EVLIST_ACTIVE) &&
2598 1.1 christos (ev->ev_res & EV_TIMEOUT)) {
2599 1.1 christos if (ev->ev_events & EV_SIGNAL) {
2600 1.1 christos /* See if we are just active executing
2601 1.1 christos * this event in a loop
2602 1.1 christos */
2603 1.1 christos if (ev->ev_ncalls && ev->ev_pncalls) {
2604 1.1 christos /* Abort loop */
2605 1.1 christos *ev->ev_pncalls = 0;
2606 1.1 christos }
2607 1.1 christos }
2608 1.1 christos
2609 1.1 christos event_queue_remove_active(base, event_to_event_callback(ev));
2610 1.1 christos }
2611 1.1 christos
2612 1.1 christos gettime(base, &now);
2613 1.1 christos
2614 1.1 christos common_timeout = is_common_timeout(tv, base);
2615 1.1 christos #ifdef USE_REINSERT_TIMEOUT
2616 1.1 christos was_common = is_common_timeout(&ev->ev_timeout, base);
2617 1.1 christos old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2618 1.1 christos #endif
2619 1.1 christos
2620 1.1 christos if (tv_is_absolute) {
2621 1.1 christos ev->ev_timeout = *tv;
2622 1.1 christos } else if (common_timeout) {
2623 1.1 christos struct timeval tmp = *tv;
2624 1.1 christos tmp.tv_usec &= MICROSECONDS_MASK;
2625 1.1 christos evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2626 1.1 christos ev->ev_timeout.tv_usec |=
2627 1.1 christos (tv->tv_usec & ~MICROSECONDS_MASK);
2628 1.1 christos } else {
2629 1.1 christos evutil_timeradd(&now, tv, &ev->ev_timeout);
2630 1.1 christos }
2631 1.1 christos
2632 1.1 christos event_debug((
2633 1.1 christos "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2634 1.1 christos ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2635 1.1 christos
2636 1.1 christos #ifdef USE_REINSERT_TIMEOUT
2637 1.1 christos event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2638 1.1 christos #else
2639 1.1 christos event_queue_insert_timeout(base, ev);
2640 1.1 christos #endif
2641 1.1 christos
2642 1.1 christos if (common_timeout) {
2643 1.1 christos struct common_timeout_list *ctl =
2644 1.1 christos get_common_timeout_list(base, &ev->ev_timeout);
2645 1.1 christos if (ev == TAILQ_FIRST(&ctl->events)) {
2646 1.1 christos common_timeout_schedule(ctl, &now, ev);
2647 1.1 christos }
2648 1.1 christos } else {
2649 1.1 christos struct event* top = NULL;
2650 1.1 christos /* See if the earliest timeout is now earlier than it
2651 1.1 christos * was before: if so, we will need to tell the main
2652 1.1 christos * thread to wake up earlier than it would otherwise.
2653 1.1 christos * We double check the timeout of the top element to
2654 1.1 christos * handle time distortions due to system suspension.
2655 1.1 christos */
2656 1.1 christos if (min_heap_elt_is_top_(ev))
2657 1.1 christos notify = 1;
2658 1.1 christos else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2659 1.1 christos evutil_timercmp(&top->ev_timeout, &now, <))
2660 1.1 christos notify = 1;
2661 1.1 christos }
2662 1.1 christos }
2663 1.1 christos
2664 1.1 christos /* if we are not in the right thread, we need to wake up the loop */
2665 1.1 christos if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2666 1.1 christos evthread_notify_base(base);
2667 1.1 christos
2668 1.1 christos event_debug_note_add_(ev);
2669 1.1 christos
2670 1.1 christos return (res);
2671 1.1 christos }
2672 1.1 christos
2673 1.2 christos static int
2674 1.2 christos event_del_(struct event *ev, int blocking)
2675 1.1 christos {
2676 1.1 christos int res;
2677 1.1 christos
2678 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2679 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2680 1.1 christos return -1;
2681 1.1 christos }
2682 1.1 christos
2683 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2684 1.1 christos
2685 1.2 christos res = event_del_nolock_(ev, blocking);
2686 1.1 christos
2687 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2688 1.1 christos
2689 1.1 christos return (res);
2690 1.1 christos }
2691 1.1 christos
2692 1.1 christos int
2693 1.2 christos event_del(struct event *ev)
2694 1.2 christos {
2695 1.2 christos return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2696 1.2 christos }
2697 1.2 christos
2698 1.2 christos int
2699 1.2 christos event_del_block(struct event *ev)
2700 1.2 christos {
2701 1.2 christos return event_del_(ev, EVENT_DEL_BLOCK);
2702 1.2 christos }
2703 1.2 christos
2704 1.2 christos int
2705 1.2 christos event_del_noblock(struct event *ev)
2706 1.2 christos {
2707 1.2 christos return event_del_(ev, EVENT_DEL_NOBLOCK);
2708 1.2 christos }
2709 1.2 christos
2710 1.2 christos /** Helper for event_del: always called with th_base_lock held.
2711 1.2 christos *
2712 1.2 christos * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2713 1.2 christos * EVEN_IF_FINALIZING} values. See those for more information.
2714 1.2 christos */
2715 1.2 christos int
2716 1.2 christos event_del_nolock_(struct event *ev, int blocking)
2717 1.1 christos {
2718 1.1 christos struct event_base *base;
2719 1.1 christos int res = 0, notify = 0;
2720 1.1 christos
2721 1.1 christos event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2722 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2723 1.1 christos
2724 1.1 christos /* An event without a base has not been added */
2725 1.1 christos if (ev->ev_base == NULL)
2726 1.1 christos return (-1);
2727 1.1 christos
2728 1.1 christos EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2729 1.1 christos
2730 1.2 christos if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2731 1.2 christos if (ev->ev_flags & EVLIST_FINALIZING) {
2732 1.2 christos /* XXXX Debug */
2733 1.2 christos return 0;
2734 1.2 christos }
2735 1.2 christos }
2736 1.2 christos
2737 1.1 christos /* If the main thread is currently executing this event's callback,
2738 1.1 christos * and we are not the main thread, then we want to wait until the
2739 1.1 christos * callback is done before we start removing the event. That way,
2740 1.1 christos * when this function returns, it will be safe to free the
2741 1.1 christos * user-supplied argument. */
2742 1.1 christos base = ev->ev_base;
2743 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
2744 1.2 christos if (blocking != EVENT_DEL_NOBLOCK &&
2745 1.2 christos base->current_event == event_to_event_callback(ev) &&
2746 1.2 christos !EVBASE_IN_THREAD(base) &&
2747 1.2 christos (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2748 1.1 christos ++base->current_event_waiters;
2749 1.1 christos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2750 1.1 christos }
2751 1.1 christos #endif
2752 1.1 christos
2753 1.1 christos EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2754 1.1 christos
2755 1.1 christos /* See if we are just active executing this event in a loop */
2756 1.1 christos if (ev->ev_events & EV_SIGNAL) {
2757 1.1 christos if (ev->ev_ncalls && ev->ev_pncalls) {
2758 1.1 christos /* Abort loop */
2759 1.1 christos *ev->ev_pncalls = 0;
2760 1.1 christos }
2761 1.1 christos }
2762 1.1 christos
2763 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT) {
2764 1.1 christos /* NOTE: We never need to notify the main thread because of a
2765 1.1 christos * deleted timeout event: all that could happen if we don't is
2766 1.1 christos * that the dispatch loop might wake up too early. But the
2767 1.1 christos * point of notifying the main thread _is_ to wake up the
2768 1.1 christos * dispatch loop early anyway, so we wouldn't gain anything by
2769 1.1 christos * doing it.
2770 1.1 christos */
2771 1.1 christos event_queue_remove_timeout(base, ev);
2772 1.1 christos }
2773 1.1 christos
2774 1.1 christos if (ev->ev_flags & EVLIST_ACTIVE)
2775 1.1 christos event_queue_remove_active(base, event_to_event_callback(ev));
2776 1.1 christos else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2777 1.1 christos event_queue_remove_active_later(base, event_to_event_callback(ev));
2778 1.1 christos
2779 1.1 christos if (ev->ev_flags & EVLIST_INSERTED) {
2780 1.1 christos event_queue_remove_inserted(base, ev);
2781 1.2 christos if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2782 1.1 christos res = evmap_io_del_(base, ev->ev_fd, ev);
2783 1.1 christos else
2784 1.1 christos res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2785 1.1 christos if (res == 1) {
2786 1.1 christos /* evmap says we need to notify the main thread. */
2787 1.1 christos notify = 1;
2788 1.1 christos res = 0;
2789 1.1 christos }
2790 1.1 christos }
2791 1.1 christos
2792 1.1 christos /* if we are not in the right thread, we need to wake up the loop */
2793 1.1 christos if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2794 1.1 christos evthread_notify_base(base);
2795 1.1 christos
2796 1.1 christos event_debug_note_del_(ev);
2797 1.1 christos
2798 1.1 christos return (res);
2799 1.1 christos }
2800 1.1 christos
2801 1.1 christos void
2802 1.1 christos event_active(struct event *ev, int res, short ncalls)
2803 1.1 christos {
2804 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2805 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2806 1.1 christos return;
2807 1.1 christos }
2808 1.1 christos
2809 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2810 1.1 christos
2811 1.1 christos event_debug_assert_is_setup_(ev);
2812 1.1 christos
2813 1.1 christos event_active_nolock_(ev, res, ncalls);
2814 1.1 christos
2815 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2816 1.1 christos }
2817 1.1 christos
2818 1.1 christos
2819 1.1 christos void
2820 1.1 christos event_active_nolock_(struct event *ev, int res, short ncalls)
2821 1.1 christos {
2822 1.1 christos struct event_base *base;
2823 1.1 christos
2824 1.1 christos event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2825 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2826 1.1 christos
2827 1.1 christos base = ev->ev_base;
2828 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2829 1.1 christos
2830 1.2 christos if (ev->ev_flags & EVLIST_FINALIZING) {
2831 1.2 christos /* XXXX debug */
2832 1.2 christos return;
2833 1.2 christos }
2834 1.2 christos
2835 1.1 christos switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2836 1.1 christos default:
2837 1.1 christos case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2838 1.1 christos EVUTIL_ASSERT(0);
2839 1.1 christos break;
2840 1.1 christos case EVLIST_ACTIVE:
2841 1.1 christos /* We get different kinds of events, add them together */
2842 1.1 christos ev->ev_res |= res;
2843 1.1 christos return;
2844 1.1 christos case EVLIST_ACTIVE_LATER:
2845 1.1 christos ev->ev_res |= res;
2846 1.1 christos break;
2847 1.1 christos case 0:
2848 1.1 christos ev->ev_res = res;
2849 1.1 christos break;
2850 1.1 christos }
2851 1.1 christos
2852 1.1 christos if (ev->ev_pri < base->event_running_priority)
2853 1.1 christos base->event_continue = 1;
2854 1.1 christos
2855 1.1 christos if (ev->ev_events & EV_SIGNAL) {
2856 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
2857 1.1 christos if (base->current_event == event_to_event_callback(ev) &&
2858 1.1 christos !EVBASE_IN_THREAD(base)) {
2859 1.1 christos ++base->current_event_waiters;
2860 1.1 christos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2861 1.1 christos }
2862 1.1 christos #endif
2863 1.1 christos ev->ev_ncalls = ncalls;
2864 1.1 christos ev->ev_pncalls = NULL;
2865 1.1 christos }
2866 1.1 christos
2867 1.1 christos event_callback_activate_nolock_(base, event_to_event_callback(ev));
2868 1.1 christos }
2869 1.1 christos
2870 1.1 christos void
2871 1.1 christos event_active_later_(struct event *ev, int res)
2872 1.1 christos {
2873 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2874 1.1 christos event_active_later_nolock_(ev, res);
2875 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2876 1.1 christos }
2877 1.1 christos
2878 1.1 christos void
2879 1.1 christos event_active_later_nolock_(struct event *ev, int res)
2880 1.1 christos {
2881 1.1 christos struct event_base *base = ev->ev_base;
2882 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2883 1.1 christos
2884 1.1 christos if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2885 1.1 christos /* We get different kinds of events, add them together */
2886 1.1 christos ev->ev_res |= res;
2887 1.1 christos return;
2888 1.1 christos }
2889 1.1 christos
2890 1.1 christos ev->ev_res = res;
2891 1.1 christos
2892 1.1 christos event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2893 1.1 christos }
2894 1.1 christos
2895 1.1 christos int
2896 1.1 christos event_callback_activate_(struct event_base *base,
2897 1.1 christos struct event_callback *evcb)
2898 1.1 christos {
2899 1.1 christos int r;
2900 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2901 1.1 christos r = event_callback_activate_nolock_(base, evcb);
2902 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2903 1.1 christos return r;
2904 1.1 christos }
2905 1.1 christos
2906 1.1 christos int
2907 1.1 christos event_callback_activate_nolock_(struct event_base *base,
2908 1.1 christos struct event_callback *evcb)
2909 1.1 christos {
2910 1.1 christos int r = 1;
2911 1.1 christos
2912 1.2 christos if (evcb->evcb_flags & EVLIST_FINALIZING)
2913 1.2 christos return 0;
2914 1.2 christos
2915 1.1 christos switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2916 1.1 christos default:
2917 1.1 christos EVUTIL_ASSERT(0);
2918 1.1 christos case EVLIST_ACTIVE_LATER:
2919 1.1 christos event_queue_remove_active_later(base, evcb);
2920 1.1 christos r = 0;
2921 1.1 christos break;
2922 1.1 christos case EVLIST_ACTIVE:
2923 1.1 christos return 0;
2924 1.1 christos case 0:
2925 1.1 christos break;
2926 1.1 christos }
2927 1.1 christos
2928 1.1 christos event_queue_insert_active(base, evcb);
2929 1.1 christos
2930 1.1 christos if (EVBASE_NEED_NOTIFY(base))
2931 1.1 christos evthread_notify_base(base);
2932 1.1 christos
2933 1.1 christos return r;
2934 1.1 christos }
2935 1.1 christos
2936 1.1 christos void
2937 1.1 christos event_callback_activate_later_nolock_(struct event_base *base,
2938 1.1 christos struct event_callback *evcb)
2939 1.1 christos {
2940 1.1 christos if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2941 1.1 christos return;
2942 1.1 christos
2943 1.1 christos event_queue_insert_active_later(base, evcb);
2944 1.1 christos if (EVBASE_NEED_NOTIFY(base))
2945 1.1 christos evthread_notify_base(base);
2946 1.1 christos }
2947 1.1 christos
2948 1.1 christos void
2949 1.1 christos event_callback_init_(struct event_base *base,
2950 1.1 christos struct event_callback *cb)
2951 1.1 christos {
2952 1.1 christos memset(cb, 0, sizeof(*cb));
2953 1.1 christos cb->evcb_pri = base->nactivequeues - 1;
2954 1.1 christos }
2955 1.1 christos
2956 1.1 christos int
2957 1.1 christos event_callback_cancel_(struct event_base *base,
2958 1.1 christos struct event_callback *evcb)
2959 1.1 christos {
2960 1.1 christos int r;
2961 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2962 1.2 christos r = event_callback_cancel_nolock_(base, evcb, 0);
2963 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2964 1.1 christos return r;
2965 1.1 christos }
2966 1.1 christos
2967 1.1 christos int
2968 1.1 christos event_callback_cancel_nolock_(struct event_base *base,
2969 1.2 christos struct event_callback *evcb, int even_if_finalizing)
2970 1.1 christos {
2971 1.2 christos if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
2972 1.2 christos return 0;
2973 1.2 christos
2974 1.1 christos if (evcb->evcb_flags & EVLIST_INIT)
2975 1.2 christos return event_del_nolock_(event_callback_to_event(evcb),
2976 1.2 christos even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
2977 1.1 christos
2978 1.1 christos switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2979 1.1 christos default:
2980 1.1 christos case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2981 1.1 christos EVUTIL_ASSERT(0);
2982 1.1 christos break;
2983 1.1 christos case EVLIST_ACTIVE:
2984 1.1 christos /* We get different kinds of events, add them together */
2985 1.1 christos event_queue_remove_active(base, evcb);
2986 1.1 christos return 0;
2987 1.1 christos case EVLIST_ACTIVE_LATER:
2988 1.1 christos event_queue_remove_active_later(base, evcb);
2989 1.1 christos break;
2990 1.1 christos case 0:
2991 1.1 christos break;
2992 1.1 christos }
2993 1.1 christos
2994 1.1 christos return 0;
2995 1.1 christos }
2996 1.1 christos
2997 1.1 christos void
2998 1.1 christos event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
2999 1.1 christos {
3000 1.1 christos memset(cb, 0, sizeof(*cb));
3001 1.1 christos cb->evcb_cb_union.evcb_selfcb = fn;
3002 1.1 christos cb->evcb_arg = arg;
3003 1.1 christos cb->evcb_pri = priority;
3004 1.1 christos cb->evcb_closure = EV_CLOSURE_CB_SELF;
3005 1.1 christos }
3006 1.1 christos
3007 1.1 christos void
3008 1.1 christos event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3009 1.1 christos {
3010 1.1 christos cb->evcb_pri = priority;
3011 1.1 christos }
3012 1.1 christos
3013 1.1 christos void
3014 1.1 christos event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3015 1.1 christos {
3016 1.1 christos if (!base)
3017 1.1 christos base = current_base;
3018 1.1 christos event_callback_cancel_(base, cb);
3019 1.1 christos }
3020 1.1 christos
3021 1.1 christos #define MAX_DEFERREDS_QUEUED 32
3022 1.1 christos int
3023 1.1 christos event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3024 1.1 christos {
3025 1.1 christos int r = 1;
3026 1.1 christos if (!base)
3027 1.1 christos base = current_base;
3028 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3029 1.1 christos if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3030 1.1 christos event_callback_activate_later_nolock_(base, cb);
3031 1.1 christos } else {
3032 1.1 christos ++base->n_deferreds_queued;
3033 1.1 christos r = event_callback_activate_nolock_(base, cb);
3034 1.1 christos }
3035 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3036 1.1 christos return r;
3037 1.1 christos }
3038 1.1 christos
3039 1.1 christos static int
3040 1.1 christos timeout_next(struct event_base *base, struct timeval **tv_p)
3041 1.1 christos {
3042 1.1 christos /* Caller must hold th_base_lock */
3043 1.1 christos struct timeval now;
3044 1.1 christos struct event *ev;
3045 1.1 christos struct timeval *tv = *tv_p;
3046 1.1 christos int res = 0;
3047 1.1 christos
3048 1.1 christos ev = min_heap_top_(&base->timeheap);
3049 1.1 christos
3050 1.1 christos if (ev == NULL) {
3051 1.1 christos /* if no time-based events are active wait for I/O */
3052 1.1 christos *tv_p = NULL;
3053 1.1 christos goto out;
3054 1.1 christos }
3055 1.1 christos
3056 1.1 christos if (gettime(base, &now) == -1) {
3057 1.1 christos res = -1;
3058 1.1 christos goto out;
3059 1.1 christos }
3060 1.1 christos
3061 1.1 christos if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3062 1.1 christos evutil_timerclear(tv);
3063 1.1 christos goto out;
3064 1.1 christos }
3065 1.1 christos
3066 1.1 christos evutil_timersub(&ev->ev_timeout, &now, tv);
3067 1.1 christos
3068 1.1 christos EVUTIL_ASSERT(tv->tv_sec >= 0);
3069 1.1 christos EVUTIL_ASSERT(tv->tv_usec >= 0);
3070 1.1 christos event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3071 1.1 christos
3072 1.1 christos out:
3073 1.1 christos return (res);
3074 1.1 christos }
3075 1.1 christos
3076 1.1 christos /* Activate every event whose timeout has elapsed. */
3077 1.1 christos static void
3078 1.1 christos timeout_process(struct event_base *base)
3079 1.1 christos {
3080 1.1 christos /* Caller must hold lock. */
3081 1.1 christos struct timeval now;
3082 1.1 christos struct event *ev;
3083 1.1 christos
3084 1.1 christos if (min_heap_empty_(&base->timeheap)) {
3085 1.1 christos return;
3086 1.1 christos }
3087 1.1 christos
3088 1.1 christos gettime(base, &now);
3089 1.1 christos
3090 1.1 christos while ((ev = min_heap_top_(&base->timeheap))) {
3091 1.1 christos if (evutil_timercmp(&ev->ev_timeout, &now, >))
3092 1.1 christos break;
3093 1.1 christos
3094 1.1 christos /* delete this event from the I/O queues */
3095 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3096 1.1 christos
3097 1.1 christos event_debug(("timeout_process: event: %p, call %p",
3098 1.1 christos ev, ev->ev_callback));
3099 1.1 christos event_active_nolock_(ev, EV_TIMEOUT, 1);
3100 1.1 christos }
3101 1.1 christos }
3102 1.1 christos
3103 1.1 christos #if (EVLIST_INTERNAL >> 4) != 1
3104 1.1 christos #error "Mismatch for value of EVLIST_INTERNAL"
3105 1.1 christos #endif
3106 1.2 christos
3107 1.2 christos #ifndef MAX
3108 1.2 christos #define MAX(a,b) (((a)>(b))?(a):(b))
3109 1.2 christos #endif
3110 1.2 christos
3111 1.2 christos #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3112 1.2 christos
3113 1.1 christos /* These are a fancy way to spell
3114 1.1 christos if (flags & EVLIST_INTERNAL)
3115 1.1 christos base->event_count--/++;
3116 1.1 christos */
3117 1.1 christos #define DECR_EVENT_COUNT(base,flags) \
3118 1.1 christos ((base)->event_count -= (~((flags) >> 4) & 1))
3119 1.2 christos #define INCR_EVENT_COUNT(base,flags) do { \
3120 1.2 christos ((base)->event_count += (~((flags) >> 4) & 1)); \
3121 1.2 christos MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3122 1.2 christos } while (0)
3123 1.1 christos
3124 1.1 christos static void
3125 1.1 christos event_queue_remove_inserted(struct event_base *base, struct event *ev)
3126 1.1 christos {
3127 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3128 1.1 christos if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3129 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3130 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3131 1.1 christos return;
3132 1.1 christos }
3133 1.1 christos DECR_EVENT_COUNT(base, ev->ev_flags);
3134 1.1 christos ev->ev_flags &= ~EVLIST_INSERTED;
3135 1.1 christos }
3136 1.1 christos static void
3137 1.1 christos event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3138 1.1 christos {
3139 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3140 1.1 christos if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3141 1.1 christos event_errx(1, "%s: %p not on queue %x", __func__,
3142 1.1 christos evcb, EVLIST_ACTIVE);
3143 1.1 christos return;
3144 1.1 christos }
3145 1.1 christos DECR_EVENT_COUNT(base, evcb->evcb_flags);
3146 1.1 christos evcb->evcb_flags &= ~EVLIST_ACTIVE;
3147 1.1 christos base->event_count_active--;
3148 1.1 christos
3149 1.1 christos TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3150 1.1 christos evcb, evcb_active_next);
3151 1.1 christos }
3152 1.1 christos static void
3153 1.1 christos event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3154 1.1 christos {
3155 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3156 1.1 christos if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3157 1.1 christos event_errx(1, "%s: %p not on queue %x", __func__,
3158 1.1 christos evcb, EVLIST_ACTIVE_LATER);
3159 1.1 christos return;
3160 1.1 christos }
3161 1.1 christos DECR_EVENT_COUNT(base, evcb->evcb_flags);
3162 1.1 christos evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3163 1.1 christos base->event_count_active--;
3164 1.1 christos
3165 1.1 christos TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3166 1.1 christos }
3167 1.1 christos static void
3168 1.1 christos event_queue_remove_timeout(struct event_base *base, struct event *ev)
3169 1.1 christos {
3170 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3171 1.1 christos if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3172 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3173 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3174 1.1 christos return;
3175 1.1 christos }
3176 1.1 christos DECR_EVENT_COUNT(base, ev->ev_flags);
3177 1.1 christos ev->ev_flags &= ~EVLIST_TIMEOUT;
3178 1.1 christos
3179 1.1 christos if (is_common_timeout(&ev->ev_timeout, base)) {
3180 1.1 christos struct common_timeout_list *ctl =
3181 1.1 christos get_common_timeout_list(base, &ev->ev_timeout);
3182 1.1 christos TAILQ_REMOVE(&ctl->events, ev,
3183 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3184 1.1 christos } else {
3185 1.1 christos min_heap_erase_(&base->timeheap, ev);
3186 1.1 christos }
3187 1.1 christos }
3188 1.1 christos
3189 1.1 christos #ifdef USE_REINSERT_TIMEOUT
3190 1.1 christos /* Remove and reinsert 'ev' into the timeout queue. */
3191 1.1 christos static void
3192 1.1 christos event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3193 1.1 christos int was_common, int is_common, int old_timeout_idx)
3194 1.1 christos {
3195 1.1 christos struct common_timeout_list *ctl;
3196 1.1 christos if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3197 1.1 christos event_queue_insert_timeout(base, ev);
3198 1.1 christos return;
3199 1.1 christos }
3200 1.1 christos
3201 1.1 christos switch ((was_common<<1) | is_common) {
3202 1.1 christos case 3: /* Changing from one common timeout to another */
3203 1.1 christos ctl = base->common_timeout_queues[old_timeout_idx];
3204 1.1 christos TAILQ_REMOVE(&ctl->events, ev,
3205 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3206 1.1 christos ctl = get_common_timeout_list(base, &ev->ev_timeout);
3207 1.1 christos insert_common_timeout_inorder(ctl, ev);
3208 1.1 christos break;
3209 1.1 christos case 2: /* Was common; is no longer common */
3210 1.1 christos ctl = base->common_timeout_queues[old_timeout_idx];
3211 1.1 christos TAILQ_REMOVE(&ctl->events, ev,
3212 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3213 1.1 christos min_heap_push_(&base->timeheap, ev);
3214 1.1 christos break;
3215 1.1 christos case 1: /* Wasn't common; has become common. */
3216 1.1 christos min_heap_erase_(&base->timeheap, ev);
3217 1.1 christos ctl = get_common_timeout_list(base, &ev->ev_timeout);
3218 1.1 christos insert_common_timeout_inorder(ctl, ev);
3219 1.1 christos break;
3220 1.1 christos case 0: /* was in heap; is still on heap. */
3221 1.1 christos min_heap_adjust_(&base->timeheap, ev);
3222 1.1 christos break;
3223 1.1 christos default:
3224 1.1 christos EVUTIL_ASSERT(0); /* unreachable */
3225 1.1 christos break;
3226 1.1 christos }
3227 1.1 christos }
3228 1.1 christos #endif
3229 1.1 christos
3230 1.1 christos /* Add 'ev' to the common timeout list in 'ev'. */
3231 1.1 christos static void
3232 1.1 christos insert_common_timeout_inorder(struct common_timeout_list *ctl,
3233 1.1 christos struct event *ev)
3234 1.1 christos {
3235 1.1 christos struct event *e;
3236 1.1 christos /* By all logic, we should just be able to append 'ev' to the end of
3237 1.1 christos * ctl->events, since the timeout on each 'ev' is set to {the common
3238 1.1 christos * timeout} + {the time when we add the event}, and so the events
3239 1.1 christos * should arrive in order of their timeeouts. But just in case
3240 1.1 christos * there's some wacky threading issue going on, we do a search from
3241 1.1 christos * the end of 'ev' to find the right insertion point.
3242 1.1 christos */
3243 1.1 christos TAILQ_FOREACH_REVERSE(e, &ctl->events,
3244 1.1 christos event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3245 1.1 christos /* This timercmp is a little sneaky, since both ev and e have
3246 1.1 christos * magic values in tv_usec. Fortunately, they ought to have
3247 1.1 christos * the _same_ magic values in tv_usec. Let's assert for that.
3248 1.1 christos */
3249 1.1 christos EVUTIL_ASSERT(
3250 1.1 christos is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3251 1.1 christos if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3252 1.1 christos TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3253 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3254 1.1 christos return;
3255 1.1 christos }
3256 1.1 christos }
3257 1.1 christos TAILQ_INSERT_HEAD(&ctl->events, ev,
3258 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3259 1.1 christos }
3260 1.1 christos
3261 1.1 christos static void
3262 1.1 christos event_queue_insert_inserted(struct event_base *base, struct event *ev)
3263 1.1 christos {
3264 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3265 1.1 christos
3266 1.1 christos if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3267 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3268 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd));
3269 1.1 christos return;
3270 1.1 christos }
3271 1.1 christos
3272 1.1 christos INCR_EVENT_COUNT(base, ev->ev_flags);
3273 1.1 christos
3274 1.1 christos ev->ev_flags |= EVLIST_INSERTED;
3275 1.1 christos }
3276 1.1 christos
3277 1.1 christos static void
3278 1.1 christos event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3279 1.1 christos {
3280 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3281 1.1 christos
3282 1.1 christos if (evcb->evcb_flags & EVLIST_ACTIVE) {
3283 1.1 christos /* Double insertion is possible for active events */
3284 1.1 christos return;
3285 1.1 christos }
3286 1.1 christos
3287 1.1 christos INCR_EVENT_COUNT(base, evcb->evcb_flags);
3288 1.1 christos
3289 1.1 christos evcb->evcb_flags |= EVLIST_ACTIVE;
3290 1.1 christos
3291 1.1 christos base->event_count_active++;
3292 1.2 christos MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3293 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3294 1.1 christos TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3295 1.1 christos evcb, evcb_active_next);
3296 1.1 christos }
3297 1.1 christos
3298 1.1 christos static void
3299 1.1 christos event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3300 1.1 christos {
3301 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3302 1.1 christos if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3303 1.1 christos /* Double insertion is possible */
3304 1.1 christos return;
3305 1.1 christos }
3306 1.1 christos
3307 1.1 christos INCR_EVENT_COUNT(base, evcb->evcb_flags);
3308 1.1 christos evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3309 1.1 christos base->event_count_active++;
3310 1.2 christos MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3311 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3312 1.1 christos TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3313 1.1 christos }
3314 1.1 christos
3315 1.1 christos static void
3316 1.1 christos event_queue_insert_timeout(struct event_base *base, struct event *ev)
3317 1.1 christos {
3318 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3319 1.1 christos
3320 1.1 christos if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3321 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3322 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd));
3323 1.1 christos return;
3324 1.1 christos }
3325 1.1 christos
3326 1.1 christos INCR_EVENT_COUNT(base, ev->ev_flags);
3327 1.1 christos
3328 1.1 christos ev->ev_flags |= EVLIST_TIMEOUT;
3329 1.1 christos
3330 1.1 christos if (is_common_timeout(&ev->ev_timeout, base)) {
3331 1.1 christos struct common_timeout_list *ctl =
3332 1.1 christos get_common_timeout_list(base, &ev->ev_timeout);
3333 1.1 christos insert_common_timeout_inorder(ctl, ev);
3334 1.1 christos } else {
3335 1.1 christos min_heap_push_(&base->timeheap, ev);
3336 1.1 christos }
3337 1.1 christos }
3338 1.1 christos
3339 1.1 christos static void
3340 1.1 christos event_queue_make_later_events_active(struct event_base *base)
3341 1.1 christos {
3342 1.1 christos struct event_callback *evcb;
3343 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3344 1.1 christos
3345 1.1 christos while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3346 1.1 christos TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3347 1.1 christos evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3348 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3349 1.1 christos TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3350 1.1 christos base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3351 1.1 christos }
3352 1.1 christos }
3353 1.1 christos
3354 1.1 christos /* Functions for debugging */
3355 1.1 christos
3356 1.1 christos const char *
3357 1.1 christos event_get_version(void)
3358 1.1 christos {
3359 1.1 christos return (EVENT__VERSION);
3360 1.1 christos }
3361 1.1 christos
3362 1.1 christos ev_uint32_t
3363 1.1 christos event_get_version_number(void)
3364 1.1 christos {
3365 1.1 christos return (EVENT__NUMERIC_VERSION);
3366 1.1 christos }
3367 1.1 christos
3368 1.1 christos /*
3369 1.1 christos * No thread-safe interface needed - the information should be the same
3370 1.1 christos * for all threads.
3371 1.1 christos */
3372 1.1 christos
3373 1.1 christos const char *
3374 1.1 christos event_get_method(void)
3375 1.1 christos {
3376 1.1 christos return (current_base->evsel->name);
3377 1.1 christos }
3378 1.1 christos
3379 1.1 christos #ifndef EVENT__DISABLE_MM_REPLACEMENT
3380 1.1 christos static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3381 1.1 christos static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3382 1.1 christos static void (*mm_free_fn_)(void *p) = NULL;
3383 1.1 christos
3384 1.1 christos void *
3385 1.1 christos event_mm_malloc_(size_t sz)
3386 1.1 christos {
3387 1.1 christos if (sz == 0)
3388 1.1 christos return NULL;
3389 1.1 christos
3390 1.1 christos if (mm_malloc_fn_)
3391 1.1 christos return mm_malloc_fn_(sz);
3392 1.1 christos else
3393 1.1 christos return malloc(sz);
3394 1.1 christos }
3395 1.1 christos
3396 1.1 christos void *
3397 1.1 christos event_mm_calloc_(size_t count, size_t size)
3398 1.1 christos {
3399 1.1 christos if (count == 0 || size == 0)
3400 1.1 christos return NULL;
3401 1.1 christos
3402 1.1 christos if (mm_malloc_fn_) {
3403 1.1 christos size_t sz = count * size;
3404 1.1 christos void *p = NULL;
3405 1.1 christos if (count > EV_SIZE_MAX / size)
3406 1.1 christos goto error;
3407 1.1 christos p = mm_malloc_fn_(sz);
3408 1.1 christos if (p)
3409 1.1 christos return memset(p, 0, sz);
3410 1.1 christos } else {
3411 1.1 christos void *p = calloc(count, size);
3412 1.1 christos #ifdef _WIN32
3413 1.1 christos /* Windows calloc doesn't reliably set ENOMEM */
3414 1.1 christos if (p == NULL)
3415 1.1 christos goto error;
3416 1.1 christos #endif
3417 1.1 christos return p;
3418 1.1 christos }
3419 1.1 christos
3420 1.1 christos error:
3421 1.1 christos errno = ENOMEM;
3422 1.1 christos return NULL;
3423 1.1 christos }
3424 1.1 christos
3425 1.1 christos char *
3426 1.1 christos event_mm_strdup_(const char *str)
3427 1.1 christos {
3428 1.1 christos if (!str) {
3429 1.1 christos errno = EINVAL;
3430 1.1 christos return NULL;
3431 1.1 christos }
3432 1.1 christos
3433 1.1 christos if (mm_malloc_fn_) {
3434 1.1 christos size_t ln = strlen(str);
3435 1.1 christos void *p = NULL;
3436 1.1 christos if (ln == EV_SIZE_MAX)
3437 1.1 christos goto error;
3438 1.1 christos p = mm_malloc_fn_(ln+1);
3439 1.1 christos if (p)
3440 1.1 christos return memcpy(p, str, ln+1);
3441 1.1 christos } else
3442 1.1 christos #ifdef _WIN32
3443 1.1 christos return _strdup(str);
3444 1.1 christos #else
3445 1.1 christos return strdup(str);
3446 1.1 christos #endif
3447 1.1 christos
3448 1.1 christos error:
3449 1.1 christos errno = ENOMEM;
3450 1.1 christos return NULL;
3451 1.1 christos }
3452 1.1 christos
3453 1.1 christos void *
3454 1.1 christos event_mm_realloc_(void *ptr, size_t sz)
3455 1.1 christos {
3456 1.1 christos if (mm_realloc_fn_)
3457 1.1 christos return mm_realloc_fn_(ptr, sz);
3458 1.1 christos else
3459 1.1 christos return realloc(ptr, sz);
3460 1.1 christos }
3461 1.1 christos
3462 1.1 christos void
3463 1.1 christos event_mm_free_(void *ptr)
3464 1.1 christos {
3465 1.1 christos if (mm_free_fn_)
3466 1.1 christos mm_free_fn_(ptr);
3467 1.1 christos else
3468 1.1 christos free(ptr);
3469 1.1 christos }
3470 1.1 christos
3471 1.1 christos void
3472 1.1 christos event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3473 1.1 christos void *(*realloc_fn)(void *ptr, size_t sz),
3474 1.1 christos void (*free_fn)(void *ptr))
3475 1.1 christos {
3476 1.1 christos mm_malloc_fn_ = malloc_fn;
3477 1.1 christos mm_realloc_fn_ = realloc_fn;
3478 1.1 christos mm_free_fn_ = free_fn;
3479 1.1 christos }
3480 1.1 christos #endif
3481 1.1 christos
3482 1.1 christos #ifdef EVENT__HAVE_EVENTFD
3483 1.1 christos static void
3484 1.1 christos evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3485 1.1 christos {
3486 1.1 christos ev_uint64_t msg;
3487 1.1 christos ev_ssize_t r;
3488 1.1 christos struct event_base *base = arg;
3489 1.1 christos
3490 1.1 christos r = read(fd, (void*) &msg, sizeof(msg));
3491 1.1 christos if (r<0 && errno != EAGAIN) {
3492 1.1 christos event_sock_warn(fd, "Error reading from eventfd");
3493 1.1 christos }
3494 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3495 1.1 christos base->is_notify_pending = 0;
3496 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3497 1.1 christos }
3498 1.1 christos #endif
3499 1.1 christos
3500 1.1 christos static void
3501 1.1 christos evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3502 1.1 christos {
3503 1.1 christos unsigned char buf[1024];
3504 1.1 christos struct event_base *base = arg;
3505 1.1 christos #ifdef _WIN32
3506 1.1 christos while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3507 1.1 christos ;
3508 1.1 christos #else
3509 1.1 christos while (read(fd, (char*)buf, sizeof(buf)) > 0)
3510 1.1 christos ;
3511 1.1 christos #endif
3512 1.1 christos
3513 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3514 1.1 christos base->is_notify_pending = 0;
3515 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3516 1.1 christos }
3517 1.1 christos
3518 1.1 christos int
3519 1.1 christos evthread_make_base_notifiable(struct event_base *base)
3520 1.1 christos {
3521 1.1 christos int r;
3522 1.1 christos if (!base)
3523 1.1 christos return -1;
3524 1.1 christos
3525 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3526 1.1 christos r = evthread_make_base_notifiable_nolock_(base);
3527 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3528 1.1 christos return r;
3529 1.1 christos }
3530 1.1 christos
3531 1.1 christos static int
3532 1.1 christos evthread_make_base_notifiable_nolock_(struct event_base *base)
3533 1.1 christos {
3534 1.1 christos void (*cb)(evutil_socket_t, short, void *);
3535 1.1 christos int (*notify)(struct event_base *);
3536 1.1 christos
3537 1.1 christos if (base->th_notify_fn != NULL) {
3538 1.1 christos /* The base is already notifiable: we're doing fine. */
3539 1.1 christos return 0;
3540 1.1 christos }
3541 1.1 christos
3542 1.1 christos #if defined(EVENT__HAVE_WORKING_KQUEUE)
3543 1.1 christos if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3544 1.1 christos base->th_notify_fn = event_kq_notify_base_;
3545 1.1 christos /* No need to add an event here; the backend can wake
3546 1.1 christos * itself up just fine. */
3547 1.1 christos return 0;
3548 1.1 christos }
3549 1.1 christos #endif
3550 1.1 christos
3551 1.1 christos #ifdef EVENT__HAVE_EVENTFD
3552 1.1 christos base->th_notify_fd[0] = evutil_eventfd_(0,
3553 1.1 christos EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3554 1.1 christos if (base->th_notify_fd[0] >= 0) {
3555 1.1 christos base->th_notify_fd[1] = -1;
3556 1.1 christos notify = evthread_notify_base_eventfd;
3557 1.1 christos cb = evthread_notify_drain_eventfd;
3558 1.1 christos } else
3559 1.1 christos #endif
3560 1.1 christos if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3561 1.1 christos notify = evthread_notify_base_default;
3562 1.1 christos cb = evthread_notify_drain_default;
3563 1.1 christos } else {
3564 1.1 christos return -1;
3565 1.1 christos }
3566 1.1 christos
3567 1.1 christos base->th_notify_fn = notify;
3568 1.1 christos
3569 1.1 christos /* prepare an event that we can use for wakeup */
3570 1.1 christos event_assign(&base->th_notify, base, base->th_notify_fd[0],
3571 1.1 christos EV_READ|EV_PERSIST, cb, base);
3572 1.1 christos
3573 1.1 christos /* we need to mark this as internal event */
3574 1.1 christos base->th_notify.ev_flags |= EVLIST_INTERNAL;
3575 1.1 christos event_priority_set(&base->th_notify, 0);
3576 1.1 christos
3577 1.1 christos return event_add_nolock_(&base->th_notify, NULL, 0);
3578 1.1 christos }
3579 1.1 christos
3580 1.1 christos int
3581 1.1 christos event_base_foreach_event_nolock_(struct event_base *base,
3582 1.1 christos event_base_foreach_event_cb fn, void *arg)
3583 1.1 christos {
3584 1.1 christos int r, i;
3585 1.1 christos unsigned u;
3586 1.1 christos struct event *ev;
3587 1.1 christos
3588 1.1 christos /* Start out with all the EVLIST_INSERTED events. */
3589 1.1 christos if ((r = evmap_foreach_event_(base, fn, arg)))
3590 1.1 christos return r;
3591 1.1 christos
3592 1.1 christos /* Okay, now we deal with those events that have timeouts and are in
3593 1.1 christos * the min-heap. */
3594 1.1 christos for (u = 0; u < base->timeheap.n; ++u) {
3595 1.1 christos ev = base->timeheap.p[u];
3596 1.1 christos if (ev->ev_flags & EVLIST_INSERTED) {
3597 1.1 christos /* we already processed this one */
3598 1.1 christos continue;
3599 1.1 christos }
3600 1.1 christos if ((r = fn(base, ev, arg)))
3601 1.1 christos return r;
3602 1.1 christos }
3603 1.1 christos
3604 1.1 christos /* Now for the events in one of the timeout queues.
3605 1.1 christos * the min-heap. */
3606 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
3607 1.1 christos struct common_timeout_list *ctl =
3608 1.1 christos base->common_timeout_queues[i];
3609 1.1 christos TAILQ_FOREACH(ev, &ctl->events,
3610 1.1 christos ev_timeout_pos.ev_next_with_common_timeout) {
3611 1.1 christos if (ev->ev_flags & EVLIST_INSERTED) {
3612 1.1 christos /* we already processed this one */
3613 1.1 christos continue;
3614 1.1 christos }
3615 1.1 christos if ((r = fn(base, ev, arg)))
3616 1.1 christos return r;
3617 1.1 christos }
3618 1.1 christos }
3619 1.1 christos
3620 1.1 christos /* Finally, we deal wit all the active events that we haven't touched
3621 1.1 christos * yet. */
3622 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
3623 1.1 christos struct event_callback *evcb;
3624 1.1 christos TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3625 1.1 christos if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3626 1.1 christos /* This isn't an event (evlist_init clear), or
3627 1.1 christos * we already processed it. (inserted or
3628 1.1 christos * timeout set */
3629 1.1 christos continue;
3630 1.1 christos }
3631 1.1 christos ev = event_callback_to_event(evcb);
3632 1.1 christos if ((r = fn(base, ev, arg)))
3633 1.1 christos return r;
3634 1.1 christos }
3635 1.1 christos }
3636 1.1 christos
3637 1.1 christos return 0;
3638 1.1 christos }
3639 1.1 christos
3640 1.1 christos /* Helper for event_base_dump_events: called on each event in the event base;
3641 1.1 christos * dumps only the inserted events. */
3642 1.1 christos static int
3643 1.1 christos dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3644 1.1 christos {
3645 1.1 christos FILE *output = arg;
3646 1.1 christos const char *gloss = (e->ev_events & EV_SIGNAL) ?
3647 1.1 christos "sig" : "fd ";
3648 1.1 christos
3649 1.1 christos if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3650 1.1 christos return 0;
3651 1.1 christos
3652 1.2 christos fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3653 1.1 christos (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3654 1.1 christos (e->ev_events&EV_READ)?" Read":"",
3655 1.1 christos (e->ev_events&EV_WRITE)?" Write":"",
3656 1.2 christos (e->ev_events&EV_CLOSED)?" EOF":"",
3657 1.1 christos (e->ev_events&EV_SIGNAL)?" Signal":"",
3658 1.1 christos (e->ev_events&EV_PERSIST)?" Persist":"",
3659 1.1 christos (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3660 1.1 christos if (e->ev_flags & EVLIST_TIMEOUT) {
3661 1.1 christos struct timeval tv;
3662 1.1 christos tv.tv_sec = e->ev_timeout.tv_sec;
3663 1.1 christos tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3664 1.1 christos evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3665 1.1 christos fprintf(output, " Timeout=%ld.%06d",
3666 1.1 christos (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3667 1.1 christos }
3668 1.1 christos fputc('\n', output);
3669 1.1 christos
3670 1.1 christos return 0;
3671 1.1 christos }
3672 1.1 christos
3673 1.1 christos /* Helper for event_base_dump_events: called on each event in the event base;
3674 1.1 christos * dumps only the active events. */
3675 1.1 christos static int
3676 1.1 christos dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3677 1.1 christos {
3678 1.1 christos FILE *output = arg;
3679 1.1 christos const char *gloss = (e->ev_events & EV_SIGNAL) ?
3680 1.1 christos "sig" : "fd ";
3681 1.1 christos
3682 1.1 christos if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3683 1.1 christos return 0;
3684 1.1 christos
3685 1.2 christos fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3686 1.1 christos (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3687 1.1 christos (e->ev_res&EV_READ)?" Read":"",
3688 1.1 christos (e->ev_res&EV_WRITE)?" Write":"",
3689 1.2 christos (e->ev_res&EV_CLOSED)?" EOF":"",
3690 1.1 christos (e->ev_res&EV_SIGNAL)?" Signal":"",
3691 1.1 christos (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3692 1.1 christos (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3693 1.1 christos (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3694 1.1 christos
3695 1.1 christos return 0;
3696 1.1 christos }
3697 1.1 christos
3698 1.1 christos int
3699 1.1 christos event_base_foreach_event(struct event_base *base,
3700 1.1 christos event_base_foreach_event_cb fn, void *arg)
3701 1.1 christos {
3702 1.1 christos int r;
3703 1.1 christos if ((!fn) || (!base)) {
3704 1.1 christos return -1;
3705 1.1 christos }
3706 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3707 1.1 christos r = event_base_foreach_event_nolock_(base, fn, arg);
3708 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3709 1.1 christos return r;
3710 1.1 christos }
3711 1.1 christos
3712 1.1 christos
3713 1.1 christos void
3714 1.1 christos event_base_dump_events(struct event_base *base, FILE *output)
3715 1.1 christos {
3716 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3717 1.1 christos fprintf(output, "Inserted events:\n");
3718 1.1 christos event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3719 1.1 christos
3720 1.1 christos fprintf(output, "Active events:\n");
3721 1.1 christos event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3722 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3723 1.1 christos }
3724 1.1 christos
3725 1.1 christos void
3726 1.2 christos event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3727 1.2 christos {
3728 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3729 1.2 christos evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3730 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3731 1.2 christos }
3732 1.2 christos
3733 1.2 christos void
3734 1.2 christos event_base_active_by_signal(struct event_base *base, int sig)
3735 1.2 christos {
3736 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3737 1.2 christos evmap_signal_active_(base, sig, 1);
3738 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3739 1.2 christos }
3740 1.2 christos
3741 1.2 christos
3742 1.2 christos void
3743 1.1 christos event_base_add_virtual_(struct event_base *base)
3744 1.1 christos {
3745 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3746 1.1 christos base->virtual_event_count++;
3747 1.2 christos MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3748 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3749 1.1 christos }
3750 1.1 christos
3751 1.1 christos void
3752 1.1 christos event_base_del_virtual_(struct event_base *base)
3753 1.1 christos {
3754 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3755 1.1 christos EVUTIL_ASSERT(base->virtual_event_count > 0);
3756 1.1 christos base->virtual_event_count--;
3757 1.1 christos if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3758 1.1 christos evthread_notify_base(base);
3759 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3760 1.1 christos }
3761 1.1 christos
3762 1.1 christos static void
3763 1.1 christos event_free_debug_globals_locks(void)
3764 1.1 christos {
3765 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
3766 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
3767 1.1 christos if (event_debug_map_lock_ != NULL) {
3768 1.1 christos EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3769 1.1 christos event_debug_map_lock_ = NULL;
3770 1.3 christos evthreadimpl_disable_lock_debugging_();
3771 1.1 christos }
3772 1.1 christos #endif /* EVENT__DISABLE_DEBUG_MODE */
3773 1.1 christos #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3774 1.1 christos return;
3775 1.1 christos }
3776 1.1 christos
3777 1.1 christos static void
3778 1.1 christos event_free_debug_globals(void)
3779 1.1 christos {
3780 1.1 christos event_free_debug_globals_locks();
3781 1.1 christos }
3782 1.1 christos
3783 1.1 christos static void
3784 1.1 christos event_free_evsig_globals(void)
3785 1.1 christos {
3786 1.1 christos evsig_free_globals_();
3787 1.1 christos }
3788 1.1 christos
3789 1.1 christos static void
3790 1.1 christos event_free_evutil_globals(void)
3791 1.1 christos {
3792 1.1 christos evutil_free_globals_();
3793 1.1 christos }
3794 1.1 christos
3795 1.1 christos static void
3796 1.1 christos event_free_globals(void)
3797 1.1 christos {
3798 1.1 christos event_free_debug_globals();
3799 1.1 christos event_free_evsig_globals();
3800 1.1 christos event_free_evutil_globals();
3801 1.1 christos }
3802 1.1 christos
3803 1.1 christos void
3804 1.1 christos libevent_global_shutdown(void)
3805 1.1 christos {
3806 1.3 christos event_disable_debug_mode();
3807 1.1 christos event_free_globals();
3808 1.1 christos }
3809 1.1 christos
3810 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
3811 1.1 christos int
3812 1.1 christos event_global_setup_locks_(const int enable_locks)
3813 1.1 christos {
3814 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
3815 1.1 christos EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3816 1.1 christos #endif
3817 1.1 christos if (evsig_global_setup_locks_(enable_locks) < 0)
3818 1.1 christos return -1;
3819 1.1 christos if (evutil_global_setup_locks_(enable_locks) < 0)
3820 1.1 christos return -1;
3821 1.1 christos if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3822 1.1 christos return -1;
3823 1.1 christos return 0;
3824 1.1 christos }
3825 1.1 christos #endif
3826 1.1 christos
3827 1.1 christos void
3828 1.1 christos event_base_assert_ok_(struct event_base *base)
3829 1.1 christos {
3830 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3831 1.1 christos event_base_assert_ok_nolock_(base);
3832 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3833 1.1 christos }
3834 1.1 christos
3835 1.1 christos void
3836 1.1 christos event_base_assert_ok_nolock_(struct event_base *base)
3837 1.1 christos {
3838 1.1 christos int i;
3839 1.1 christos int count;
3840 1.1 christos
3841 1.1 christos /* First do checks on the per-fd and per-signal lists */
3842 1.1 christos evmap_check_integrity_(base);
3843 1.1 christos
3844 1.1 christos /* Check the heap property */
3845 1.1 christos for (i = 1; i < (int)base->timeheap.n; ++i) {
3846 1.1 christos int parent = (i - 1) / 2;
3847 1.1 christos struct event *ev, *p_ev;
3848 1.1 christos ev = base->timeheap.p[i];
3849 1.1 christos p_ev = base->timeheap.p[parent];
3850 1.1 christos EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3851 1.1 christos EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3852 1.1 christos EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3853 1.1 christos }
3854 1.1 christos
3855 1.1 christos /* Check that the common timeouts are fine */
3856 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
3857 1.1 christos struct common_timeout_list *ctl = base->common_timeout_queues[i];
3858 1.1 christos struct event *last=NULL, *ev;
3859 1.1 christos
3860 1.1 christos EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3861 1.1 christos
3862 1.1 christos TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3863 1.1 christos if (last)
3864 1.1 christos EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3865 1.1 christos EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3866 1.1 christos EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3867 1.1 christos EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3868 1.1 christos last = ev;
3869 1.1 christos }
3870 1.1 christos }
3871 1.1 christos
3872 1.1 christos /* Check the active queues. */
3873 1.1 christos count = 0;
3874 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
3875 1.1 christos struct event_callback *evcb;
3876 1.1 christos EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3877 1.1 christos TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3878 1.1 christos EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3879 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri == i);
3880 1.1 christos ++count;
3881 1.1 christos }
3882 1.1 christos }
3883 1.1 christos
3884 1.1 christos {
3885 1.1 christos struct event_callback *evcb;
3886 1.1 christos TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3887 1.1 christos EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3888 1.1 christos ++count;
3889 1.1 christos }
3890 1.1 christos }
3891 1.1 christos EVUTIL_ASSERT(count == base->event_count_active);
3892 1.1 christos }
3893