event.c revision 1.2 1 1.1 christos /* $NetBSD: event.c,v 1.2 2014/12/19 20:43:18 christos Exp $ */
2 1.1 christos
3 1.1 christos /*
4 1.1 christos * Copyright (c) 2000-2007 Niels Provos <provos (at) citi.umich.edu>
5 1.1 christos * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6 1.1 christos *
7 1.1 christos * Redistribution and use in source and binary forms, with or without
8 1.1 christos * modification, are permitted provided that the following conditions
9 1.1 christos * are met:
10 1.1 christos * 1. Redistributions of source code must retain the above copyright
11 1.1 christos * notice, this list of conditions and the following disclaimer.
12 1.1 christos * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 christos * notice, this list of conditions and the following disclaimer in the
14 1.1 christos * documentation and/or other materials provided with the distribution.
15 1.1 christos * 3. The name of the author may not be used to endorse or promote products
16 1.1 christos * derived from this software without specific prior written permission.
17 1.1 christos *
18 1.1 christos * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.1 christos * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.1 christos * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.1 christos * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.1 christos * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 1.1 christos * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 1.1 christos * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 1.1 christos * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 1.1 christos * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 1.1 christos * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 1.1 christos */
29 1.1 christos #include "event2/event-config.h"
30 1.1 christos #include "evconfig-private.h"
31 1.1 christos
32 1.1 christos #ifdef _WIN32
33 1.1 christos #include <winsock2.h>
34 1.1 christos #define WIN32_LEAN_AND_MEAN
35 1.1 christos #include <windows.h>
36 1.1 christos #undef WIN32_LEAN_AND_MEAN
37 1.1 christos #endif
38 1.1 christos #include <sys/types.h>
39 1.1 christos #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 1.1 christos #include <sys/time.h>
41 1.1 christos #endif
42 1.1 christos #include <sys/queue.h>
43 1.1 christos #ifdef EVENT__HAVE_SYS_SOCKET_H
44 1.1 christos #include <sys/socket.h>
45 1.1 christos #endif
46 1.1 christos #include <stdio.h>
47 1.1 christos #include <stdlib.h>
48 1.1 christos #ifdef EVENT__HAVE_UNISTD_H
49 1.1 christos #include <unistd.h>
50 1.1 christos #endif
51 1.1 christos #include <ctype.h>
52 1.1 christos #include <errno.h>
53 1.1 christos #include <signal.h>
54 1.1 christos #include <string.h>
55 1.1 christos #include <time.h>
56 1.1 christos #include <limits.h>
57 1.1 christos
58 1.1 christos #include "event2/event.h"
59 1.1 christos #include "event2/event_struct.h"
60 1.1 christos #include "event2/event_compat.h"
61 1.1 christos #include "event-internal.h"
62 1.1 christos #include "defer-internal.h"
63 1.1 christos #include "evthread-internal.h"
64 1.1 christos #include "event2/thread.h"
65 1.1 christos #include "event2/util.h"
66 1.1 christos #include "log-internal.h"
67 1.1 christos #include "evmap-internal.h"
68 1.1 christos #include "iocp-internal.h"
69 1.1 christos #include "changelist-internal.h"
70 1.1 christos #define HT_NO_CACHE_HASH_VALUES
71 1.1 christos #include "ht-internal.h"
72 1.1 christos #include "util-internal.h"
73 1.1 christos
74 1.1 christos
75 1.1 christos #ifdef EVENT__HAVE_WORKING_KQUEUE
76 1.1 christos #include "kqueue-internal.h"
77 1.1 christos #endif
78 1.1 christos
79 1.1 christos #ifdef EVENT__HAVE_EVENT_PORTS
80 1.1 christos extern const struct eventop evportops;
81 1.1 christos #endif
82 1.1 christos #ifdef EVENT__HAVE_SELECT
83 1.1 christos extern const struct eventop selectops;
84 1.1 christos #endif
85 1.1 christos #ifdef EVENT__HAVE_POLL
86 1.1 christos extern const struct eventop pollops;
87 1.1 christos #endif
88 1.1 christos #ifdef EVENT__HAVE_EPOLL
89 1.1 christos extern const struct eventop epollops;
90 1.1 christos #endif
91 1.1 christos #ifdef EVENT__HAVE_WORKING_KQUEUE
92 1.1 christos extern const struct eventop kqops;
93 1.1 christos #endif
94 1.1 christos #ifdef EVENT__HAVE_DEVPOLL
95 1.1 christos extern const struct eventop devpollops;
96 1.1 christos #endif
97 1.1 christos #ifdef _WIN32
98 1.1 christos extern const struct eventop win32ops;
99 1.1 christos #endif
100 1.1 christos
101 1.1 christos /* Array of backends in order of preference. */
102 1.1 christos static const struct eventop *eventops[] = {
103 1.1 christos #ifdef EVENT__HAVE_EVENT_PORTS
104 1.1 christos &evportops,
105 1.1 christos #endif
106 1.1 christos #ifdef EVENT__HAVE_WORKING_KQUEUE
107 1.1 christos &kqops,
108 1.1 christos #endif
109 1.1 christos #ifdef EVENT__HAVE_EPOLL
110 1.1 christos &epollops,
111 1.1 christos #endif
112 1.1 christos #ifdef EVENT__HAVE_DEVPOLL
113 1.1 christos &devpollops,
114 1.1 christos #endif
115 1.1 christos #ifdef EVENT__HAVE_POLL
116 1.1 christos &pollops,
117 1.1 christos #endif
118 1.1 christos #ifdef EVENT__HAVE_SELECT
119 1.1 christos &selectops,
120 1.1 christos #endif
121 1.1 christos #ifdef _WIN32
122 1.1 christos &win32ops,
123 1.1 christos #endif
124 1.1 christos NULL
125 1.1 christos };
126 1.1 christos
127 1.1 christos /* Global state; deprecated */
128 1.1 christos struct event_base *event_global_current_base_ = NULL;
129 1.1 christos #define current_base event_global_current_base_
130 1.1 christos
131 1.1 christos /* Global state */
132 1.1 christos
133 1.1 christos static void *event_self_cbarg_ptr_ = NULL;
134 1.1 christos
135 1.1 christos /* Prototypes */
136 1.1 christos static void event_queue_insert_active(struct event_base *, struct event_callback *);
137 1.1 christos static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
138 1.1 christos static void event_queue_insert_timeout(struct event_base *, struct event *);
139 1.1 christos static void event_queue_insert_inserted(struct event_base *, struct event *);
140 1.1 christos static void event_queue_remove_active(struct event_base *, struct event_callback *);
141 1.1 christos static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
142 1.1 christos static void event_queue_remove_timeout(struct event_base *, struct event *);
143 1.1 christos static void event_queue_remove_inserted(struct event_base *, struct event *);
144 1.1 christos static void event_queue_make_later_events_active(struct event_base *base);
145 1.1 christos
146 1.1 christos static int evthread_make_base_notifiable_nolock_(struct event_base *base);
147 1.2 christos static int event_del_(struct event *ev, int blocking);
148 1.1 christos
149 1.1 christos #ifdef USE_REINSERT_TIMEOUT
150 1.1 christos /* This code seems buggy; only turn it on if we find out what the trouble is. */
151 1.1 christos static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
152 1.1 christos #endif
153 1.1 christos
154 1.1 christos static int event_haveevents(struct event_base *);
155 1.1 christos
156 1.1 christos static int event_process_active(struct event_base *);
157 1.1 christos
158 1.1 christos static int timeout_next(struct event_base *, struct timeval **);
159 1.1 christos static void timeout_process(struct event_base *);
160 1.1 christos
161 1.1 christos static inline void event_signal_closure(struct event_base *, struct event *ev);
162 1.1 christos static inline void event_persist_closure(struct event_base *, struct event *ev);
163 1.1 christos
164 1.1 christos static int evthread_notify_base(struct event_base *base);
165 1.1 christos
166 1.1 christos static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
167 1.1 christos struct event *ev);
168 1.1 christos
169 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
170 1.1 christos /* These functions implement a hashtable of which 'struct event *' structures
171 1.1 christos * have been setup or added. We don't want to trust the content of the struct
172 1.1 christos * event itself, since we're trying to work through cases where an event gets
173 1.1 christos * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
174 1.1 christos */
175 1.1 christos
176 1.1 christos struct event_debug_entry {
177 1.1 christos HT_ENTRY(event_debug_entry) node;
178 1.1 christos const struct event *ptr;
179 1.1 christos unsigned added : 1;
180 1.1 christos };
181 1.1 christos
182 1.1 christos static inline unsigned
183 1.1 christos hash_debug_entry(const struct event_debug_entry *e)
184 1.1 christos {
185 1.1 christos /* We need to do this silliness to convince compilers that we
186 1.1 christos * honestly mean to cast e->ptr to an integer, and discard any
187 1.1 christos * part of it that doesn't fit in an unsigned.
188 1.1 christos */
189 1.1 christos unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
190 1.1 christos /* Our hashtable implementation is pretty sensitive to low bits,
191 1.1 christos * and every struct event is over 64 bytes in size, so we can
192 1.1 christos * just say >>6. */
193 1.1 christos return (u >> 6);
194 1.1 christos }
195 1.1 christos
196 1.1 christos static inline int
197 1.1 christos eq_debug_entry(const struct event_debug_entry *a,
198 1.1 christos const struct event_debug_entry *b)
199 1.1 christos {
200 1.1 christos return a->ptr == b->ptr;
201 1.1 christos }
202 1.1 christos
203 1.1 christos int event_debug_mode_on_ = 0;
204 1.1 christos /* Set if it's too late to enable event_debug_mode. */
205 1.1 christos static int event_debug_mode_too_late = 0;
206 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
207 1.1 christos static void *event_debug_map_lock_ = NULL;
208 1.1 christos #endif
209 1.1 christos static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
210 1.1 christos HT_INITIALIZER();
211 1.1 christos
212 1.1 christos HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
213 1.1 christos eq_debug_entry)
214 1.1 christos HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
215 1.1 christos eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
216 1.1 christos
217 1.1 christos /* Macro: record that ev is now setup (that is, ready for an add) */
218 1.1 christos #define event_debug_note_setup_(ev) do { \
219 1.1 christos if (event_debug_mode_on_) { \
220 1.1 christos struct event_debug_entry *dent,find; \
221 1.1 christos find.ptr = (ev); \
222 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
223 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
224 1.1 christos if (dent) { \
225 1.1 christos dent->added = 0; \
226 1.1 christos } else { \
227 1.1 christos dent = mm_malloc(sizeof(*dent)); \
228 1.1 christos if (!dent) \
229 1.1 christos event_err(1, \
230 1.1 christos "Out of memory in debugging code"); \
231 1.1 christos dent->ptr = (ev); \
232 1.1 christos dent->added = 0; \
233 1.1 christos HT_INSERT(event_debug_map, &global_debug_map, dent); \
234 1.1 christos } \
235 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
236 1.1 christos } \
237 1.1 christos event_debug_mode_too_late = 1; \
238 1.1 christos } while (0)
239 1.1 christos /* Macro: record that ev is no longer setup */
240 1.1 christos #define event_debug_note_teardown_(ev) do { \
241 1.1 christos if (event_debug_mode_on_) { \
242 1.1 christos struct event_debug_entry *dent,find; \
243 1.1 christos find.ptr = (ev); \
244 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
245 1.1 christos dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
246 1.1 christos if (dent) \
247 1.1 christos mm_free(dent); \
248 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
249 1.1 christos } \
250 1.1 christos event_debug_mode_too_late = 1; \
251 1.1 christos } while (0)
252 1.1 christos /* Macro: record that ev is now added */
253 1.1 christos #define event_debug_note_add_(ev) do { \
254 1.1 christos if (event_debug_mode_on_) { \
255 1.1 christos struct event_debug_entry *dent,find; \
256 1.1 christos find.ptr = (ev); \
257 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
258 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
259 1.1 christos if (dent) { \
260 1.1 christos dent->added = 1; \
261 1.1 christos } else { \
262 1.1 christos event_errx(EVENT_ERR_ABORT_, \
263 1.1 christos "%s: noting an add on a non-setup event %p" \
264 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT \
265 1.1 christos ", flags: 0x%x)", \
266 1.1 christos __func__, (ev), (ev)->ev_events, \
267 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
268 1.1 christos } \
269 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
270 1.1 christos } \
271 1.1 christos event_debug_mode_too_late = 1; \
272 1.1 christos } while (0)
273 1.1 christos /* Macro: record that ev is no longer added */
274 1.1 christos #define event_debug_note_del_(ev) do { \
275 1.1 christos if (event_debug_mode_on_) { \
276 1.1 christos struct event_debug_entry *dent,find; \
277 1.1 christos find.ptr = (ev); \
278 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
279 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
280 1.1 christos if (dent) { \
281 1.1 christos dent->added = 0; \
282 1.1 christos } else { \
283 1.1 christos event_errx(EVENT_ERR_ABORT_, \
284 1.1 christos "%s: noting a del on a non-setup event %p" \
285 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT \
286 1.1 christos ", flags: 0x%x)", \
287 1.1 christos __func__, (ev), (ev)->ev_events, \
288 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
289 1.1 christos } \
290 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
291 1.1 christos } \
292 1.1 christos event_debug_mode_too_late = 1; \
293 1.1 christos } while (0)
294 1.1 christos /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
295 1.1 christos #define event_debug_assert_is_setup_(ev) do { \
296 1.1 christos if (event_debug_mode_on_) { \
297 1.1 christos struct event_debug_entry *dent,find; \
298 1.1 christos find.ptr = (ev); \
299 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
300 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
301 1.1 christos if (!dent) { \
302 1.1 christos event_errx(EVENT_ERR_ABORT_, \
303 1.1 christos "%s called on a non-initialized event %p" \
304 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT\
305 1.1 christos ", flags: 0x%x)", \
306 1.1 christos __func__, (ev), (ev)->ev_events, \
307 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
308 1.1 christos } \
309 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
310 1.1 christos } \
311 1.1 christos } while (0)
312 1.1 christos /* Macro: assert that ev is not added (i.e., okay to tear down or set
313 1.1 christos * up again) */
314 1.1 christos #define event_debug_assert_not_added_(ev) do { \
315 1.1 christos if (event_debug_mode_on_) { \
316 1.1 christos struct event_debug_entry *dent,find; \
317 1.1 christos find.ptr = (ev); \
318 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0); \
319 1.1 christos dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
320 1.1 christos if (dent && dent->added) { \
321 1.1 christos event_errx(EVENT_ERR_ABORT_, \
322 1.1 christos "%s called on an already added event %p" \
323 1.1 christos " (events: 0x%x, fd: "EV_SOCK_FMT", " \
324 1.1 christos "flags: 0x%x)", \
325 1.1 christos __func__, (ev), (ev)->ev_events, \
326 1.1 christos EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
327 1.1 christos } \
328 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
329 1.1 christos } \
330 1.1 christos } while (0)
331 1.1 christos #else
332 1.1 christos #define event_debug_note_setup_(ev) \
333 1.1 christos ((void)0)
334 1.1 christos #define event_debug_note_teardown_(ev) \
335 1.1 christos ((void)0)
336 1.1 christos #define event_debug_note_add_(ev) \
337 1.1 christos ((void)0)
338 1.1 christos #define event_debug_note_del_(ev) \
339 1.1 christos ((void)0)
340 1.1 christos #define event_debug_assert_is_setup_(ev) \
341 1.1 christos ((void)0)
342 1.1 christos #define event_debug_assert_not_added_(ev) \
343 1.1 christos ((void)0)
344 1.1 christos #endif
345 1.1 christos
346 1.1 christos #define EVENT_BASE_ASSERT_LOCKED(base) \
347 1.1 christos EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
348 1.1 christos
349 1.1 christos /* How often (in seconds) do we check for changes in wall clock time relative
350 1.1 christos * to monotonic time? Set this to -1 for 'never.' */
351 1.1 christos #define CLOCK_SYNC_INTERVAL 5
352 1.1 christos
353 1.1 christos /** Set 'tp' to the current time according to 'base'. We must hold the lock
354 1.1 christos * on 'base'. If there is a cached time, return it. Otherwise, use
355 1.1 christos * clock_gettime or gettimeofday as appropriate to find out the right time.
356 1.1 christos * Return 0 on success, -1 on failure.
357 1.1 christos */
358 1.1 christos static int
359 1.1 christos gettime(struct event_base *base, struct timeval *tp)
360 1.1 christos {
361 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
362 1.1 christos
363 1.1 christos if (base->tv_cache.tv_sec) {
364 1.1 christos *tp = base->tv_cache;
365 1.1 christos return (0);
366 1.1 christos }
367 1.1 christos
368 1.1 christos if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
369 1.1 christos return -1;
370 1.1 christos }
371 1.1 christos
372 1.1 christos if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
373 1.1 christos < tp->tv_sec) {
374 1.1 christos struct timeval tv;
375 1.1 christos evutil_gettimeofday(&tv,NULL);
376 1.1 christos evutil_timersub(&tv, tp, &base->tv_clock_diff);
377 1.1 christos base->last_updated_clock_diff = tp->tv_sec;
378 1.1 christos }
379 1.1 christos
380 1.1 christos return 0;
381 1.1 christos }
382 1.1 christos
383 1.1 christos int
384 1.1 christos event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
385 1.1 christos {
386 1.1 christos int r;
387 1.1 christos if (!base) {
388 1.1 christos base = current_base;
389 1.1 christos if (!current_base)
390 1.1 christos return evutil_gettimeofday(tv, NULL);
391 1.1 christos }
392 1.1 christos
393 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
394 1.1 christos if (base->tv_cache.tv_sec == 0) {
395 1.1 christos r = evutil_gettimeofday(tv, NULL);
396 1.1 christos } else {
397 1.1 christos evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
398 1.1 christos r = 0;
399 1.1 christos }
400 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
401 1.1 christos return r;
402 1.1 christos }
403 1.1 christos
404 1.1 christos /** Make 'base' have no current cached time. */
405 1.1 christos static inline void
406 1.1 christos clear_time_cache(struct event_base *base)
407 1.1 christos {
408 1.1 christos base->tv_cache.tv_sec = 0;
409 1.1 christos }
410 1.1 christos
411 1.1 christos /** Replace the cached time in 'base' with the current time. */
412 1.1 christos static inline void
413 1.1 christos update_time_cache(struct event_base *base)
414 1.1 christos {
415 1.1 christos base->tv_cache.tv_sec = 0;
416 1.1 christos if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
417 1.1 christos gettime(base, &base->tv_cache);
418 1.1 christos }
419 1.1 christos
420 1.1 christos int
421 1.1 christos event_base_update_cache_time(struct event_base *base)
422 1.1 christos {
423 1.1 christos
424 1.1 christos if (!base) {
425 1.1 christos base = current_base;
426 1.1 christos if (!current_base)
427 1.1 christos return -1;
428 1.1 christos }
429 1.1 christos
430 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
431 1.2 christos if (base->running_loop)
432 1.2 christos update_time_cache(base);
433 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
434 1.1 christos return 0;
435 1.1 christos }
436 1.1 christos
437 1.1 christos static inline struct event *
438 1.1 christos event_callback_to_event(struct event_callback *evcb)
439 1.1 christos {
440 1.1 christos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
441 1.1 christos return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
442 1.1 christos }
443 1.1 christos
444 1.1 christos static inline struct event_callback *
445 1.1 christos event_to_event_callback(struct event *ev)
446 1.1 christos {
447 1.1 christos return &ev->ev_evcallback;
448 1.1 christos }
449 1.1 christos
450 1.1 christos struct event_base *
451 1.1 christos event_init(void)
452 1.1 christos {
453 1.1 christos struct event_base *base = event_base_new_with_config(NULL);
454 1.1 christos
455 1.1 christos if (base == NULL) {
456 1.1 christos event_errx(1, "%s: Unable to construct event_base", __func__);
457 1.1 christos return NULL;
458 1.1 christos }
459 1.1 christos
460 1.1 christos current_base = base;
461 1.1 christos
462 1.1 christos return (base);
463 1.1 christos }
464 1.1 christos
465 1.1 christos struct event_base *
466 1.1 christos event_base_new(void)
467 1.1 christos {
468 1.1 christos struct event_base *base = NULL;
469 1.1 christos struct event_config *cfg = event_config_new();
470 1.1 christos if (cfg) {
471 1.1 christos base = event_base_new_with_config(cfg);
472 1.1 christos event_config_free(cfg);
473 1.1 christos }
474 1.1 christos return base;
475 1.1 christos }
476 1.1 christos
477 1.1 christos /** Return true iff 'method' is the name of a method that 'cfg' tells us to
478 1.1 christos * avoid. */
479 1.1 christos static int
480 1.1 christos event_config_is_avoided_method(const struct event_config *cfg,
481 1.1 christos const char *method)
482 1.1 christos {
483 1.1 christos struct event_config_entry *entry;
484 1.1 christos
485 1.1 christos TAILQ_FOREACH(entry, &cfg->entries, next) {
486 1.1 christos if (entry->avoid_method != NULL &&
487 1.1 christos strcmp(entry->avoid_method, method) == 0)
488 1.1 christos return (1);
489 1.1 christos }
490 1.1 christos
491 1.1 christos return (0);
492 1.1 christos }
493 1.1 christos
494 1.1 christos /** Return true iff 'method' is disabled according to the environment. */
495 1.1 christos static int
496 1.1 christos event_is_method_disabled(const char *name)
497 1.1 christos {
498 1.1 christos char environment[64];
499 1.1 christos int i;
500 1.1 christos
501 1.1 christos evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
502 1.1 christos for (i = 8; environment[i] != '\0'; ++i)
503 1.1 christos environment[i] = EVUTIL_TOUPPER_(environment[i]);
504 1.1 christos /* Note that evutil_getenv_() ignores the environment entirely if
505 1.1 christos * we're setuid */
506 1.1 christos return (evutil_getenv_(environment) != NULL);
507 1.1 christos }
508 1.1 christos
509 1.1 christos int
510 1.1 christos event_base_get_features(const struct event_base *base)
511 1.1 christos {
512 1.1 christos return base->evsel->features;
513 1.1 christos }
514 1.1 christos
515 1.1 christos void
516 1.1 christos event_enable_debug_mode(void)
517 1.1 christos {
518 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
519 1.1 christos if (event_debug_mode_on_)
520 1.1 christos event_errx(1, "%s was called twice!", __func__);
521 1.1 christos if (event_debug_mode_too_late)
522 1.1 christos event_errx(1, "%s must be called *before* creating any events "
523 1.1 christos "or event_bases",__func__);
524 1.1 christos
525 1.1 christos event_debug_mode_on_ = 1;
526 1.1 christos
527 1.1 christos HT_INIT(event_debug_map, &global_debug_map);
528 1.1 christos #endif
529 1.1 christos }
530 1.1 christos
531 1.1 christos #if 0
532 1.1 christos void
533 1.1 christos event_disable_debug_mode(void)
534 1.1 christos {
535 1.1 christos struct event_debug_entry **ent, *victim;
536 1.1 christos
537 1.1 christos EVLOCK_LOCK(event_debug_map_lock_, 0);
538 1.1 christos for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
539 1.1 christos victim = *ent;
540 1.1 christos ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
541 1.1 christos mm_free(victim);
542 1.1 christos }
543 1.1 christos HT_CLEAR(event_debug_map, &global_debug_map);
544 1.1 christos EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
545 1.1 christos }
546 1.1 christos #endif
547 1.1 christos
548 1.1 christos struct event_base *
549 1.1 christos event_base_new_with_config(const struct event_config *cfg)
550 1.1 christos {
551 1.1 christos int i;
552 1.1 christos struct event_base *base;
553 1.1 christos int should_check_environment;
554 1.1 christos
555 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
556 1.1 christos event_debug_mode_too_late = 1;
557 1.1 christos #endif
558 1.1 christos
559 1.1 christos if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
560 1.1 christos event_warn("%s: calloc", __func__);
561 1.1 christos return NULL;
562 1.1 christos }
563 1.1 christos
564 1.1 christos if (cfg)
565 1.1 christos base->flags = cfg->flags;
566 1.1 christos
567 1.1 christos should_check_environment =
568 1.1 christos !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
569 1.1 christos
570 1.1 christos {
571 1.1 christos struct timeval tmp;
572 1.1 christos int precise_time =
573 1.1 christos cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
574 1.1 christos int flags;
575 1.1 christos if (should_check_environment && !precise_time) {
576 1.1 christos precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
577 1.1 christos base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
578 1.1 christos }
579 1.1 christos flags = precise_time ? EV_MONOT_PRECISE : 0;
580 1.1 christos evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
581 1.1 christos
582 1.1 christos gettime(base, &tmp);
583 1.1 christos }
584 1.1 christos
585 1.1 christos min_heap_ctor_(&base->timeheap);
586 1.1 christos
587 1.1 christos base->sig.ev_signal_pair[0] = -1;
588 1.1 christos base->sig.ev_signal_pair[1] = -1;
589 1.1 christos base->th_notify_fd[0] = -1;
590 1.1 christos base->th_notify_fd[1] = -1;
591 1.1 christos
592 1.1 christos TAILQ_INIT(&base->active_later_queue);
593 1.1 christos
594 1.1 christos evmap_io_initmap_(&base->io);
595 1.1 christos evmap_signal_initmap_(&base->sigmap);
596 1.1 christos event_changelist_init_(&base->changelist);
597 1.1 christos
598 1.1 christos base->evbase = NULL;
599 1.1 christos
600 1.1 christos if (cfg) {
601 1.1 christos memcpy(&base->max_dispatch_time,
602 1.1 christos &cfg->max_dispatch_interval, sizeof(struct timeval));
603 1.1 christos base->limit_callbacks_after_prio =
604 1.1 christos cfg->limit_callbacks_after_prio;
605 1.1 christos } else {
606 1.1 christos base->max_dispatch_time.tv_sec = -1;
607 1.1 christos base->limit_callbacks_after_prio = 1;
608 1.1 christos }
609 1.1 christos if (cfg && cfg->max_dispatch_callbacks >= 0) {
610 1.1 christos base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
611 1.1 christos } else {
612 1.1 christos base->max_dispatch_callbacks = INT_MAX;
613 1.1 christos }
614 1.1 christos if (base->max_dispatch_callbacks == INT_MAX &&
615 1.1 christos base->max_dispatch_time.tv_sec == -1)
616 1.1 christos base->limit_callbacks_after_prio = INT_MAX;
617 1.1 christos
618 1.1 christos for (i = 0; eventops[i] && !base->evbase; i++) {
619 1.1 christos if (cfg != NULL) {
620 1.1 christos /* determine if this backend should be avoided */
621 1.1 christos if (event_config_is_avoided_method(cfg,
622 1.1 christos eventops[i]->name))
623 1.1 christos continue;
624 1.1 christos if ((eventops[i]->features & cfg->require_features)
625 1.1 christos != cfg->require_features)
626 1.1 christos continue;
627 1.1 christos }
628 1.1 christos
629 1.1 christos /* also obey the environment variables */
630 1.1 christos if (should_check_environment &&
631 1.1 christos event_is_method_disabled(eventops[i]->name))
632 1.1 christos continue;
633 1.1 christos
634 1.1 christos base->evsel = eventops[i];
635 1.1 christos
636 1.1 christos base->evbase = base->evsel->init(base);
637 1.1 christos }
638 1.1 christos
639 1.1 christos if (base->evbase == NULL) {
640 1.1 christos event_warnx("%s: no event mechanism available",
641 1.1 christos __func__);
642 1.1 christos base->evsel = NULL;
643 1.1 christos event_base_free(base);
644 1.1 christos return NULL;
645 1.1 christos }
646 1.1 christos
647 1.1 christos if (evutil_getenv_("EVENT_SHOW_METHOD"))
648 1.1 christos event_msgx("libevent using: %s", base->evsel->name);
649 1.1 christos
650 1.1 christos /* allocate a single active event queue */
651 1.1 christos if (event_base_priority_init(base, 1) < 0) {
652 1.1 christos event_base_free(base);
653 1.1 christos return NULL;
654 1.1 christos }
655 1.1 christos
656 1.1 christos /* prepare for threading */
657 1.1 christos
658 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
659 1.1 christos if (EVTHREAD_LOCKING_ENABLED() &&
660 1.1 christos (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
661 1.1 christos int r;
662 1.1 christos EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
663 1.1 christos EVTHREAD_ALLOC_COND(base->current_event_cond);
664 1.1 christos r = evthread_make_base_notifiable(base);
665 1.1 christos if (r<0) {
666 1.1 christos event_warnx("%s: Unable to make base notifiable.", __func__);
667 1.1 christos event_base_free(base);
668 1.1 christos return NULL;
669 1.1 christos }
670 1.1 christos }
671 1.1 christos #endif
672 1.1 christos
673 1.1 christos #ifdef _WIN32
674 1.1 christos if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
675 1.1 christos event_base_start_iocp_(base, cfg->n_cpus_hint);
676 1.1 christos #endif
677 1.1 christos
678 1.1 christos return (base);
679 1.1 christos }
680 1.1 christos
681 1.1 christos int
682 1.1 christos event_base_start_iocp_(struct event_base *base, int n_cpus)
683 1.1 christos {
684 1.1 christos #ifdef _WIN32
685 1.1 christos if (base->iocp)
686 1.1 christos return 0;
687 1.1 christos base->iocp = event_iocp_port_launch_(n_cpus);
688 1.1 christos if (!base->iocp) {
689 1.1 christos event_warnx("%s: Couldn't launch IOCP", __func__);
690 1.1 christos return -1;
691 1.1 christos }
692 1.1 christos return 0;
693 1.1 christos #else
694 1.1 christos return -1;
695 1.1 christos #endif
696 1.1 christos }
697 1.1 christos
698 1.1 christos void
699 1.1 christos event_base_stop_iocp_(struct event_base *base)
700 1.1 christos {
701 1.1 christos #ifdef _WIN32
702 1.1 christos int rv;
703 1.1 christos
704 1.1 christos if (!base->iocp)
705 1.1 christos return;
706 1.1 christos rv = event_iocp_shutdown_(base->iocp, -1);
707 1.1 christos EVUTIL_ASSERT(rv >= 0);
708 1.1 christos base->iocp = NULL;
709 1.1 christos #endif
710 1.1 christos }
711 1.1 christos
712 1.2 christos static int
713 1.2 christos event_base_cancel_single_callback_(struct event_base *base,
714 1.2 christos struct event_callback *evcb,
715 1.2 christos int run_finalizers)
716 1.2 christos {
717 1.2 christos int result = 0;
718 1.2 christos
719 1.2 christos if (evcb->evcb_flags & EVLIST_INIT) {
720 1.2 christos struct event *ev = event_callback_to_event(evcb);
721 1.2 christos if (!(ev->ev_flags & EVLIST_INTERNAL)) {
722 1.2 christos event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
723 1.2 christos result = 1;
724 1.2 christos }
725 1.2 christos } else {
726 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
727 1.2 christos event_callback_cancel_nolock_(base, evcb, 1);
728 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
729 1.2 christos result = 1;
730 1.2 christos }
731 1.2 christos
732 1.2 christos if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
733 1.2 christos switch (evcb->evcb_closure) {
734 1.2 christos case EV_CLOSURE_EVENT_FINALIZE:
735 1.2 christos case EV_CLOSURE_EVENT_FINALIZE_FREE: {
736 1.2 christos struct event *ev = event_callback_to_event(evcb);
737 1.2 christos ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
738 1.2 christos if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
739 1.2 christos mm_free(ev);
740 1.2 christos break;
741 1.2 christos }
742 1.2 christos case EV_CLOSURE_CB_FINALIZE:
743 1.2 christos evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
744 1.2 christos break;
745 1.2 christos default:
746 1.2 christos break;
747 1.2 christos }
748 1.2 christos }
749 1.2 christos return result;
750 1.2 christos }
751 1.2 christos
752 1.2 christos static void
753 1.2 christos event_base_free_(struct event_base *base, int run_finalizers)
754 1.1 christos {
755 1.1 christos int i, n_deleted=0;
756 1.1 christos struct event *ev;
757 1.1 christos /* XXXX grab the lock? If there is contention when one thread frees
758 1.1 christos * the base, then the contending thread will be very sad soon. */
759 1.1 christos
760 1.1 christos /* event_base_free(NULL) is how to free the current_base if we
761 1.1 christos * made it with event_init and forgot to hold a reference to it. */
762 1.1 christos if (base == NULL && current_base)
763 1.1 christos base = current_base;
764 1.1 christos /* Don't actually free NULL. */
765 1.1 christos if (base == NULL) {
766 1.1 christos event_warnx("%s: no base to free", __func__);
767 1.1 christos return;
768 1.1 christos }
769 1.1 christos /* XXX(niels) - check for internal events first */
770 1.1 christos
771 1.1 christos #ifdef _WIN32
772 1.1 christos event_base_stop_iocp_(base);
773 1.1 christos #endif
774 1.1 christos
775 1.1 christos /* threading fds if we have them */
776 1.1 christos if (base->th_notify_fd[0] != -1) {
777 1.1 christos event_del(&base->th_notify);
778 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
779 1.1 christos if (base->th_notify_fd[1] != -1)
780 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
781 1.1 christos base->th_notify_fd[0] = -1;
782 1.1 christos base->th_notify_fd[1] = -1;
783 1.1 christos event_debug_unassign(&base->th_notify);
784 1.1 christos }
785 1.1 christos
786 1.1 christos /* Delete all non-internal events. */
787 1.1 christos evmap_delete_all_(base);
788 1.1 christos
789 1.1 christos while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
790 1.1 christos event_del(ev);
791 1.1 christos ++n_deleted;
792 1.1 christos }
793 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
794 1.1 christos struct common_timeout_list *ctl =
795 1.1 christos base->common_timeout_queues[i];
796 1.1 christos event_del(&ctl->timeout_event); /* Internal; doesn't count */
797 1.1 christos event_debug_unassign(&ctl->timeout_event);
798 1.1 christos for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
799 1.1 christos struct event *next = TAILQ_NEXT(ev,
800 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
801 1.1 christos if (!(ev->ev_flags & EVLIST_INTERNAL)) {
802 1.1 christos event_del(ev);
803 1.1 christos ++n_deleted;
804 1.1 christos }
805 1.1 christos ev = next;
806 1.1 christos }
807 1.1 christos mm_free(ctl);
808 1.1 christos }
809 1.1 christos if (base->common_timeout_queues)
810 1.1 christos mm_free(base->common_timeout_queues);
811 1.1 christos
812 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
813 1.1 christos struct event_callback *evcb, *next;
814 1.1 christos for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
815 1.1 christos next = TAILQ_NEXT(evcb, evcb_active_next);
816 1.2 christos n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
817 1.1 christos evcb = next;
818 1.1 christos }
819 1.1 christos }
820 1.1 christos {
821 1.1 christos struct event_callback *evcb;
822 1.1 christos while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
823 1.2 christos n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
824 1.1 christos }
825 1.1 christos }
826 1.1 christos
827 1.1 christos
828 1.1 christos if (n_deleted)
829 1.1 christos event_debug(("%s: %d events were still set in base",
830 1.1 christos __func__, n_deleted));
831 1.1 christos
832 1.1 christos while (LIST_FIRST(&base->once_events)) {
833 1.1 christos struct event_once *eonce = LIST_FIRST(&base->once_events);
834 1.1 christos LIST_REMOVE(eonce, next_once);
835 1.1 christos mm_free(eonce);
836 1.1 christos }
837 1.1 christos
838 1.1 christos if (base->evsel != NULL && base->evsel->dealloc != NULL)
839 1.1 christos base->evsel->dealloc(base);
840 1.1 christos
841 1.1 christos for (i = 0; i < base->nactivequeues; ++i)
842 1.1 christos EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
843 1.1 christos
844 1.1 christos EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
845 1.1 christos min_heap_dtor_(&base->timeheap);
846 1.1 christos
847 1.1 christos mm_free(base->activequeues);
848 1.1 christos
849 1.1 christos evmap_io_clear_(&base->io);
850 1.1 christos evmap_signal_clear_(&base->sigmap);
851 1.1 christos event_changelist_freemem_(&base->changelist);
852 1.1 christos
853 1.1 christos EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
854 1.1 christos EVTHREAD_FREE_COND(base->current_event_cond);
855 1.1 christos
856 1.2 christos /* If we're freeing current_base, there won't be a current_base. */
857 1.2 christos if (base == current_base)
858 1.2 christos current_base = NULL;
859 1.1 christos mm_free(base);
860 1.1 christos }
861 1.1 christos
862 1.2 christos void
863 1.2 christos event_base_free_nofinalize(struct event_base *base)
864 1.2 christos {
865 1.2 christos event_base_free_(base, 0);
866 1.2 christos }
867 1.2 christos
868 1.2 christos void
869 1.2 christos event_base_free(struct event_base *base)
870 1.2 christos {
871 1.2 christos event_base_free_(base, 1);
872 1.2 christos }
873 1.2 christos
874 1.1 christos /* Fake eventop; used to disable the backend temporarily inside event_reinit
875 1.1 christos * so that we can call event_del() on an event without telling the backend.
876 1.1 christos */
877 1.1 christos static int
878 1.1 christos nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
879 1.1 christos short events, void *fdinfo)
880 1.1 christos {
881 1.1 christos return 0;
882 1.1 christos }
883 1.1 christos const struct eventop nil_eventop = {
884 1.1 christos "nil",
885 1.1 christos NULL, /* init: unused. */
886 1.1 christos NULL, /* add: unused. */
887 1.1 christos nil_backend_del, /* del: used, so needs to be killed. */
888 1.1 christos NULL, /* dispatch: unused. */
889 1.1 christos NULL, /* dealloc: unused. */
890 1.1 christos 0, 0, 0
891 1.1 christos };
892 1.1 christos
893 1.1 christos /* reinitialize the event base after a fork */
894 1.1 christos int
895 1.1 christos event_reinit(struct event_base *base)
896 1.1 christos {
897 1.1 christos const struct eventop *evsel;
898 1.1 christos int res = 0;
899 1.1 christos int was_notifiable = 0;
900 1.1 christos int had_signal_added = 0;
901 1.1 christos
902 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
903 1.1 christos
904 1.1 christos evsel = base->evsel;
905 1.1 christos
906 1.1 christos /* check if this event mechanism requires reinit on the backend */
907 1.1 christos if (evsel->need_reinit) {
908 1.1 christos /* We're going to call event_del() on our notify events (the
909 1.1 christos * ones that tell about signals and wakeup events). But we
910 1.1 christos * don't actually want to tell the backend to change its
911 1.1 christos * state, since it might still share some resource (a kqueue,
912 1.1 christos * an epoll fd) with the parent process, and we don't want to
913 1.1 christos * delete the fds from _that_ backend, we temporarily stub out
914 1.1 christos * the evsel with a replacement.
915 1.1 christos */
916 1.1 christos base->evsel = &nil_eventop;
917 1.1 christos }
918 1.1 christos
919 1.1 christos /* We need to re-create a new signal-notification fd and a new
920 1.1 christos * thread-notification fd. Otherwise, we'll still share those with
921 1.1 christos * the parent process, which would make any notification sent to them
922 1.1 christos * get received by one or both of the event loops, more or less at
923 1.1 christos * random.
924 1.1 christos */
925 1.1 christos if (base->sig.ev_signal_added) {
926 1.2 christos event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
927 1.1 christos event_debug_unassign(&base->sig.ev_signal);
928 1.1 christos memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
929 1.1 christos if (base->sig.ev_signal_pair[0] != -1)
930 1.1 christos EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
931 1.1 christos if (base->sig.ev_signal_pair[1] != -1)
932 1.1 christos EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
933 1.1 christos had_signal_added = 1;
934 1.1 christos base->sig.ev_signal_added = 0;
935 1.1 christos }
936 1.1 christos if (base->th_notify_fn != NULL) {
937 1.1 christos was_notifiable = 1;
938 1.1 christos base->th_notify_fn = NULL;
939 1.1 christos }
940 1.1 christos if (base->th_notify_fd[0] != -1) {
941 1.2 christos event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
942 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
943 1.1 christos if (base->th_notify_fd[1] != -1)
944 1.1 christos EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
945 1.1 christos base->th_notify_fd[0] = -1;
946 1.1 christos base->th_notify_fd[1] = -1;
947 1.1 christos event_debug_unassign(&base->th_notify);
948 1.1 christos }
949 1.1 christos
950 1.1 christos /* Replace the original evsel. */
951 1.1 christos base->evsel = evsel;
952 1.1 christos
953 1.1 christos if (evsel->need_reinit) {
954 1.1 christos /* Reconstruct the backend through brute-force, so that we do
955 1.1 christos * not share any structures with the parent process. For some
956 1.1 christos * backends, this is necessary: epoll and kqueue, for
957 1.1 christos * instance, have events associated with a kernel
958 1.1 christos * structure. If didn't reinitialize, we'd share that
959 1.1 christos * structure with the parent process, and any changes made by
960 1.1 christos * the parent would affect our backend's behavior (and vice
961 1.1 christos * versa).
962 1.1 christos */
963 1.1 christos if (base->evsel->dealloc != NULL)
964 1.1 christos base->evsel->dealloc(base);
965 1.1 christos base->evbase = evsel->init(base);
966 1.1 christos if (base->evbase == NULL) {
967 1.1 christos event_errx(1,
968 1.1 christos "%s: could not reinitialize event mechanism",
969 1.1 christos __func__);
970 1.1 christos res = -1;
971 1.1 christos goto done;
972 1.1 christos }
973 1.1 christos
974 1.1 christos /* Empty out the changelist (if any): we are starting from a
975 1.1 christos * blank slate. */
976 1.1 christos event_changelist_freemem_(&base->changelist);
977 1.1 christos
978 1.1 christos /* Tell the event maps to re-inform the backend about all
979 1.1 christos * pending events. This will make the signal notification
980 1.1 christos * event get re-created if necessary. */
981 1.1 christos if (evmap_reinit_(base) < 0)
982 1.1 christos res = -1;
983 1.1 christos } else {
984 1.1 christos if (had_signal_added)
985 1.1 christos res = evsig_init_(base);
986 1.1 christos }
987 1.1 christos
988 1.1 christos /* If we were notifiable before, and nothing just exploded, become
989 1.1 christos * notifiable again. */
990 1.1 christos if (was_notifiable && res == 0)
991 1.1 christos res = evthread_make_base_notifiable_nolock_(base);
992 1.1 christos
993 1.1 christos done:
994 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
995 1.1 christos return (res);
996 1.1 christos }
997 1.1 christos
998 1.1 christos const char **
999 1.1 christos event_get_supported_methods(void)
1000 1.1 christos {
1001 1.1 christos static const char **methods = NULL;
1002 1.1 christos const struct eventop **method;
1003 1.1 christos const char **tmp;
1004 1.1 christos int i = 0, k;
1005 1.1 christos
1006 1.1 christos /* count all methods */
1007 1.1 christos for (method = &eventops[0]; *method != NULL; ++method) {
1008 1.1 christos ++i;
1009 1.1 christos }
1010 1.1 christos
1011 1.1 christos /* allocate one more than we need for the NULL pointer */
1012 1.1 christos tmp = mm_calloc((i + 1), sizeof(char *));
1013 1.1 christos if (tmp == NULL)
1014 1.1 christos return (NULL);
1015 1.1 christos
1016 1.1 christos /* populate the array with the supported methods */
1017 1.1 christos for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1018 1.1 christos tmp[i++] = eventops[k]->name;
1019 1.1 christos }
1020 1.1 christos tmp[i] = NULL;
1021 1.1 christos
1022 1.1 christos if (methods != NULL)
1023 1.1 christos mm_free((char**)methods);
1024 1.1 christos
1025 1.1 christos methods = tmp;
1026 1.1 christos
1027 1.1 christos return (methods);
1028 1.1 christos }
1029 1.1 christos
1030 1.1 christos struct event_config *
1031 1.1 christos event_config_new(void)
1032 1.1 christos {
1033 1.1 christos struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1034 1.1 christos
1035 1.1 christos if (cfg == NULL)
1036 1.1 christos return (NULL);
1037 1.1 christos
1038 1.1 christos TAILQ_INIT(&cfg->entries);
1039 1.1 christos cfg->max_dispatch_interval.tv_sec = -1;
1040 1.1 christos cfg->max_dispatch_callbacks = INT_MAX;
1041 1.1 christos cfg->limit_callbacks_after_prio = 1;
1042 1.1 christos
1043 1.1 christos return (cfg);
1044 1.1 christos }
1045 1.1 christos
1046 1.1 christos static void
1047 1.1 christos event_config_entry_free(struct event_config_entry *entry)
1048 1.1 christos {
1049 1.1 christos if (entry->avoid_method != NULL)
1050 1.1 christos mm_free((char *)entry->avoid_method);
1051 1.1 christos mm_free(entry);
1052 1.1 christos }
1053 1.1 christos
1054 1.1 christos void
1055 1.1 christos event_config_free(struct event_config *cfg)
1056 1.1 christos {
1057 1.1 christos struct event_config_entry *entry;
1058 1.1 christos
1059 1.1 christos while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1060 1.1 christos TAILQ_REMOVE(&cfg->entries, entry, next);
1061 1.1 christos event_config_entry_free(entry);
1062 1.1 christos }
1063 1.1 christos mm_free(cfg);
1064 1.1 christos }
1065 1.1 christos
1066 1.1 christos int
1067 1.1 christos event_config_set_flag(struct event_config *cfg, int flag)
1068 1.1 christos {
1069 1.1 christos if (!cfg)
1070 1.1 christos return -1;
1071 1.1 christos cfg->flags |= flag;
1072 1.1 christos return 0;
1073 1.1 christos }
1074 1.1 christos
1075 1.1 christos int
1076 1.1 christos event_config_avoid_method(struct event_config *cfg, const char *method)
1077 1.1 christos {
1078 1.1 christos struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1079 1.1 christos if (entry == NULL)
1080 1.1 christos return (-1);
1081 1.1 christos
1082 1.1 christos if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1083 1.1 christos mm_free(entry);
1084 1.1 christos return (-1);
1085 1.1 christos }
1086 1.1 christos
1087 1.1 christos TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1088 1.1 christos
1089 1.1 christos return (0);
1090 1.1 christos }
1091 1.1 christos
1092 1.1 christos int
1093 1.1 christos event_config_require_features(struct event_config *cfg,
1094 1.1 christos int features)
1095 1.1 christos {
1096 1.1 christos if (!cfg)
1097 1.1 christos return (-1);
1098 1.1 christos cfg->require_features = features;
1099 1.1 christos return (0);
1100 1.1 christos }
1101 1.1 christos
1102 1.1 christos int
1103 1.1 christos event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1104 1.1 christos {
1105 1.1 christos if (!cfg)
1106 1.1 christos return (-1);
1107 1.1 christos cfg->n_cpus_hint = cpus;
1108 1.1 christos return (0);
1109 1.1 christos }
1110 1.1 christos
1111 1.1 christos int
1112 1.1 christos event_config_set_max_dispatch_interval(struct event_config *cfg,
1113 1.1 christos const struct timeval *max_interval, int max_callbacks, int min_priority)
1114 1.1 christos {
1115 1.1 christos if (max_interval)
1116 1.1 christos memcpy(&cfg->max_dispatch_interval, max_interval,
1117 1.1 christos sizeof(struct timeval));
1118 1.1 christos else
1119 1.1 christos cfg->max_dispatch_interval.tv_sec = -1;
1120 1.1 christos cfg->max_dispatch_callbacks =
1121 1.1 christos max_callbacks >= 0 ? max_callbacks : INT_MAX;
1122 1.1 christos if (min_priority < 0)
1123 1.1 christos min_priority = 0;
1124 1.1 christos cfg->limit_callbacks_after_prio = min_priority;
1125 1.1 christos return (0);
1126 1.1 christos }
1127 1.1 christos
1128 1.1 christos int
1129 1.1 christos event_priority_init(int npriorities)
1130 1.1 christos {
1131 1.1 christos return event_base_priority_init(current_base, npriorities);
1132 1.1 christos }
1133 1.1 christos
1134 1.1 christos int
1135 1.1 christos event_base_priority_init(struct event_base *base, int npriorities)
1136 1.1 christos {
1137 1.1 christos int i, r;
1138 1.1 christos r = -1;
1139 1.1 christos
1140 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1141 1.1 christos
1142 1.1 christos if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1143 1.1 christos || npriorities >= EVENT_MAX_PRIORITIES)
1144 1.1 christos goto err;
1145 1.1 christos
1146 1.1 christos if (npriorities == base->nactivequeues)
1147 1.1 christos goto ok;
1148 1.1 christos
1149 1.1 christos if (base->nactivequeues) {
1150 1.1 christos mm_free(base->activequeues);
1151 1.1 christos base->nactivequeues = 0;
1152 1.1 christos }
1153 1.1 christos
1154 1.1 christos /* Allocate our priority queues */
1155 1.1 christos base->activequeues = (struct evcallback_list *)
1156 1.1 christos mm_calloc(npriorities, sizeof(struct evcallback_list));
1157 1.1 christos if (base->activequeues == NULL) {
1158 1.1 christos event_warn("%s: calloc", __func__);
1159 1.1 christos goto err;
1160 1.1 christos }
1161 1.1 christos base->nactivequeues = npriorities;
1162 1.1 christos
1163 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
1164 1.1 christos TAILQ_INIT(&base->activequeues[i]);
1165 1.1 christos }
1166 1.1 christos
1167 1.1 christos ok:
1168 1.1 christos r = 0;
1169 1.1 christos err:
1170 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1171 1.1 christos return (r);
1172 1.1 christos }
1173 1.1 christos
1174 1.1 christos int
1175 1.1 christos event_base_get_npriorities(struct event_base *base)
1176 1.1 christos {
1177 1.1 christos
1178 1.1 christos int n;
1179 1.1 christos if (base == NULL)
1180 1.1 christos base = current_base;
1181 1.1 christos
1182 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1183 1.1 christos n = base->nactivequeues;
1184 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1185 1.1 christos return (n);
1186 1.1 christos }
1187 1.1 christos
1188 1.2 christos int
1189 1.2 christos event_base_get_num_events(struct event_base *base, unsigned int type)
1190 1.2 christos {
1191 1.2 christos int r = 0;
1192 1.2 christos
1193 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1194 1.2 christos
1195 1.2 christos if (type & EVENT_BASE_COUNT_ACTIVE)
1196 1.2 christos r += base->event_count_active;
1197 1.2 christos
1198 1.2 christos if (type & EVENT_BASE_COUNT_VIRTUAL)
1199 1.2 christos r += base->virtual_event_count;
1200 1.2 christos
1201 1.2 christos if (type & EVENT_BASE_COUNT_ADDED)
1202 1.2 christos r += base->event_count;
1203 1.2 christos
1204 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1205 1.2 christos
1206 1.2 christos return r;
1207 1.2 christos }
1208 1.2 christos
1209 1.2 christos int
1210 1.2 christos event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1211 1.2 christos {
1212 1.2 christos int r = 0;
1213 1.2 christos
1214 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1215 1.2 christos
1216 1.2 christos if (type & EVENT_BASE_COUNT_ACTIVE) {
1217 1.2 christos r += base->event_count_active_max;
1218 1.2 christos if (clear)
1219 1.2 christos base->event_count_active_max = 0;
1220 1.2 christos }
1221 1.2 christos
1222 1.2 christos if (type & EVENT_BASE_COUNT_VIRTUAL) {
1223 1.2 christos r += base->virtual_event_count_max;
1224 1.2 christos if (clear)
1225 1.2 christos base->virtual_event_count_max = 0;
1226 1.2 christos }
1227 1.2 christos
1228 1.2 christos if (type & EVENT_BASE_COUNT_ADDED) {
1229 1.2 christos r += base->event_count_max;
1230 1.2 christos if (clear)
1231 1.2 christos base->event_count_max = 0;
1232 1.2 christos }
1233 1.2 christos
1234 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1235 1.2 christos
1236 1.2 christos return r;
1237 1.2 christos }
1238 1.2 christos
1239 1.1 christos /* Returns true iff we're currently watching any events. */
1240 1.1 christos static int
1241 1.1 christos event_haveevents(struct event_base *base)
1242 1.1 christos {
1243 1.1 christos /* Caller must hold th_base_lock */
1244 1.1 christos return (base->virtual_event_count > 0 || base->event_count > 0);
1245 1.1 christos }
1246 1.1 christos
1247 1.1 christos /* "closure" function called when processing active signal events */
1248 1.1 christos static inline void
1249 1.1 christos event_signal_closure(struct event_base *base, struct event *ev)
1250 1.1 christos {
1251 1.1 christos short ncalls;
1252 1.1 christos int should_break;
1253 1.1 christos
1254 1.1 christos /* Allows deletes to work */
1255 1.1 christos ncalls = ev->ev_ncalls;
1256 1.1 christos if (ncalls != 0)
1257 1.1 christos ev->ev_pncalls = &ncalls;
1258 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1259 1.1 christos while (ncalls) {
1260 1.1 christos ncalls--;
1261 1.1 christos ev->ev_ncalls = ncalls;
1262 1.1 christos if (ncalls == 0)
1263 1.1 christos ev->ev_pncalls = NULL;
1264 1.1 christos (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1265 1.1 christos
1266 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1267 1.1 christos should_break = base->event_break;
1268 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1269 1.1 christos
1270 1.1 christos if (should_break) {
1271 1.1 christos if (ncalls != 0)
1272 1.1 christos ev->ev_pncalls = NULL;
1273 1.1 christos return;
1274 1.1 christos }
1275 1.1 christos }
1276 1.1 christos }
1277 1.1 christos
1278 1.1 christos /* Common timeouts are special timeouts that are handled as queues rather than
1279 1.1 christos * in the minheap. This is more efficient than the minheap if we happen to
1280 1.1 christos * know that we're going to get several thousands of timeout events all with
1281 1.1 christos * the same timeout value.
1282 1.1 christos *
1283 1.1 christos * Since all our timeout handling code assumes timevals can be copied,
1284 1.1 christos * assigned, etc, we can't use "magic pointer" to encode these common
1285 1.1 christos * timeouts. Searching through a list to see if every timeout is common could
1286 1.1 christos * also get inefficient. Instead, we take advantage of the fact that tv_usec
1287 1.1 christos * is 32 bits long, but only uses 20 of those bits (since it can never be over
1288 1.1 christos * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1289 1.1 christos * of index into the event_base's aray of common timeouts.
1290 1.1 christos */
1291 1.1 christos
1292 1.1 christos #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1293 1.1 christos #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1294 1.1 christos #define COMMON_TIMEOUT_IDX_SHIFT 20
1295 1.1 christos #define COMMON_TIMEOUT_MASK 0xf0000000
1296 1.1 christos #define COMMON_TIMEOUT_MAGIC 0x50000000
1297 1.1 christos
1298 1.1 christos #define COMMON_TIMEOUT_IDX(tv) \
1299 1.1 christos (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1300 1.1 christos
1301 1.1 christos /** Return true iff if 'tv' is a common timeout in 'base' */
1302 1.1 christos static inline int
1303 1.1 christos is_common_timeout(const struct timeval *tv,
1304 1.1 christos const struct event_base *base)
1305 1.1 christos {
1306 1.1 christos int idx;
1307 1.1 christos if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1308 1.1 christos return 0;
1309 1.1 christos idx = COMMON_TIMEOUT_IDX(tv);
1310 1.1 christos return idx < base->n_common_timeouts;
1311 1.1 christos }
1312 1.1 christos
1313 1.1 christos /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1314 1.1 christos * one is a common timeout. */
1315 1.1 christos static inline int
1316 1.1 christos is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1317 1.1 christos {
1318 1.1 christos return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1319 1.1 christos (tv2->tv_usec & ~MICROSECONDS_MASK);
1320 1.1 christos }
1321 1.1 christos
1322 1.1 christos /** Requires that 'tv' is a common timeout. Return the corresponding
1323 1.1 christos * common_timeout_list. */
1324 1.1 christos static inline struct common_timeout_list *
1325 1.1 christos get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1326 1.1 christos {
1327 1.1 christos return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1328 1.1 christos }
1329 1.1 christos
1330 1.1 christos #if 0
1331 1.1 christos static inline int
1332 1.1 christos common_timeout_ok(const struct timeval *tv,
1333 1.1 christos struct event_base *base)
1334 1.1 christos {
1335 1.1 christos const struct timeval *expect =
1336 1.1 christos &get_common_timeout_list(base, tv)->duration;
1337 1.1 christos return tv->tv_sec == expect->tv_sec &&
1338 1.1 christos tv->tv_usec == expect->tv_usec;
1339 1.1 christos }
1340 1.1 christos #endif
1341 1.1 christos
1342 1.1 christos /* Add the timeout for the first event in given common timeout list to the
1343 1.1 christos * event_base's minheap. */
1344 1.1 christos static void
1345 1.1 christos common_timeout_schedule(struct common_timeout_list *ctl,
1346 1.1 christos const struct timeval *now, struct event *head)
1347 1.1 christos {
1348 1.1 christos struct timeval timeout = head->ev_timeout;
1349 1.1 christos timeout.tv_usec &= MICROSECONDS_MASK;
1350 1.1 christos event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1351 1.1 christos }
1352 1.1 christos
1353 1.1 christos /* Callback: invoked when the timeout for a common timeout queue triggers.
1354 1.1 christos * This means that (at least) the first event in that queue should be run,
1355 1.1 christos * and the timeout should be rescheduled if there are more events. */
1356 1.1 christos static void
1357 1.1 christos common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1358 1.1 christos {
1359 1.1 christos struct timeval now;
1360 1.1 christos struct common_timeout_list *ctl = arg;
1361 1.1 christos struct event_base *base = ctl->base;
1362 1.1 christos struct event *ev = NULL;
1363 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1364 1.1 christos gettime(base, &now);
1365 1.1 christos while (1) {
1366 1.1 christos ev = TAILQ_FIRST(&ctl->events);
1367 1.1 christos if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1368 1.1 christos (ev->ev_timeout.tv_sec == now.tv_sec &&
1369 1.1 christos (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1370 1.1 christos break;
1371 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1372 1.1 christos event_active_nolock_(ev, EV_TIMEOUT, 1);
1373 1.1 christos }
1374 1.1 christos if (ev)
1375 1.1 christos common_timeout_schedule(ctl, &now, ev);
1376 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1377 1.1 christos }
1378 1.1 christos
1379 1.1 christos #define MAX_COMMON_TIMEOUTS 256
1380 1.1 christos
1381 1.1 christos const struct timeval *
1382 1.1 christos event_base_init_common_timeout(struct event_base *base,
1383 1.1 christos const struct timeval *duration)
1384 1.1 christos {
1385 1.1 christos int i;
1386 1.1 christos struct timeval tv;
1387 1.1 christos const struct timeval *result=NULL;
1388 1.1 christos struct common_timeout_list *new_ctl;
1389 1.1 christos
1390 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1391 1.1 christos if (duration->tv_usec > 1000000) {
1392 1.1 christos memcpy(&tv, duration, sizeof(struct timeval));
1393 1.1 christos if (is_common_timeout(duration, base))
1394 1.1 christos tv.tv_usec &= MICROSECONDS_MASK;
1395 1.1 christos tv.tv_sec += tv.tv_usec / 1000000;
1396 1.1 christos tv.tv_usec %= 1000000;
1397 1.1 christos duration = &tv;
1398 1.1 christos }
1399 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
1400 1.1 christos const struct common_timeout_list *ctl =
1401 1.1 christos base->common_timeout_queues[i];
1402 1.1 christos if (duration->tv_sec == ctl->duration.tv_sec &&
1403 1.1 christos duration->tv_usec ==
1404 1.1 christos (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1405 1.1 christos EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1406 1.1 christos result = &ctl->duration;
1407 1.1 christos goto done;
1408 1.1 christos }
1409 1.1 christos }
1410 1.1 christos if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1411 1.1 christos event_warnx("%s: Too many common timeouts already in use; "
1412 1.1 christos "we only support %d per event_base", __func__,
1413 1.1 christos MAX_COMMON_TIMEOUTS);
1414 1.1 christos goto done;
1415 1.1 christos }
1416 1.1 christos if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1417 1.1 christos int n = base->n_common_timeouts < 16 ? 16 :
1418 1.1 christos base->n_common_timeouts*2;
1419 1.1 christos struct common_timeout_list **newqueues =
1420 1.1 christos mm_realloc(base->common_timeout_queues,
1421 1.1 christos n*sizeof(struct common_timeout_queue *));
1422 1.1 christos if (!newqueues) {
1423 1.1 christos event_warn("%s: realloc",__func__);
1424 1.1 christos goto done;
1425 1.1 christos }
1426 1.1 christos base->n_common_timeouts_allocated = n;
1427 1.1 christos base->common_timeout_queues = newqueues;
1428 1.1 christos }
1429 1.1 christos new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1430 1.1 christos if (!new_ctl) {
1431 1.1 christos event_warn("%s: calloc",__func__);
1432 1.1 christos goto done;
1433 1.1 christos }
1434 1.1 christos TAILQ_INIT(&new_ctl->events);
1435 1.1 christos new_ctl->duration.tv_sec = duration->tv_sec;
1436 1.1 christos new_ctl->duration.tv_usec =
1437 1.1 christos duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1438 1.1 christos (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1439 1.1 christos evtimer_assign(&new_ctl->timeout_event, base,
1440 1.1 christos common_timeout_callback, new_ctl);
1441 1.1 christos new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1442 1.1 christos event_priority_set(&new_ctl->timeout_event, 0);
1443 1.1 christos new_ctl->base = base;
1444 1.1 christos base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1445 1.1 christos result = &new_ctl->duration;
1446 1.1 christos
1447 1.1 christos done:
1448 1.1 christos if (result)
1449 1.1 christos EVUTIL_ASSERT(is_common_timeout(result, base));
1450 1.1 christos
1451 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1452 1.1 christos return result;
1453 1.1 christos }
1454 1.1 christos
1455 1.1 christos /* Closure function invoked when we're activating a persistent event. */
1456 1.1 christos static inline void
1457 1.1 christos event_persist_closure(struct event_base *base, struct event *ev)
1458 1.1 christos {
1459 1.2 christos
1460 1.2 christos // Define our callback, we use this to store our callback before it's executed
1461 1.2 christos void (*evcb_callback)(evutil_socket_t, short, void *);
1462 1.2 christos
1463 1.1 christos /* reschedule the persistent event if we have a timeout. */
1464 1.1 christos if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1465 1.1 christos /* If there was a timeout, we want it to run at an interval of
1466 1.1 christos * ev_io_timeout after the last time it was _scheduled_ for,
1467 1.1 christos * not ev_io_timeout after _now_. If it fired for another
1468 1.1 christos * reason, though, the timeout ought to start ticking _now_. */
1469 1.1 christos struct timeval run_at, relative_to, delay, now;
1470 1.1 christos ev_uint32_t usec_mask = 0;
1471 1.1 christos EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1472 1.1 christos &ev->ev_io_timeout));
1473 1.1 christos gettime(base, &now);
1474 1.1 christos if (is_common_timeout(&ev->ev_timeout, base)) {
1475 1.1 christos delay = ev->ev_io_timeout;
1476 1.1 christos usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1477 1.1 christos delay.tv_usec &= MICROSECONDS_MASK;
1478 1.1 christos if (ev->ev_res & EV_TIMEOUT) {
1479 1.1 christos relative_to = ev->ev_timeout;
1480 1.1 christos relative_to.tv_usec &= MICROSECONDS_MASK;
1481 1.1 christos } else {
1482 1.1 christos relative_to = now;
1483 1.1 christos }
1484 1.1 christos } else {
1485 1.1 christos delay = ev->ev_io_timeout;
1486 1.1 christos if (ev->ev_res & EV_TIMEOUT) {
1487 1.1 christos relative_to = ev->ev_timeout;
1488 1.1 christos } else {
1489 1.1 christos relative_to = now;
1490 1.1 christos }
1491 1.1 christos }
1492 1.1 christos evutil_timeradd(&relative_to, &delay, &run_at);
1493 1.1 christos if (evutil_timercmp(&run_at, &now, <)) {
1494 1.1 christos /* Looks like we missed at least one invocation due to
1495 1.1 christos * a clock jump, not running the event loop for a
1496 1.1 christos * while, really slow callbacks, or
1497 1.1 christos * something. Reschedule relative to now.
1498 1.1 christos */
1499 1.1 christos evutil_timeradd(&now, &delay, &run_at);
1500 1.1 christos }
1501 1.1 christos run_at.tv_usec |= usec_mask;
1502 1.1 christos event_add_nolock_(ev, &run_at, 1);
1503 1.1 christos }
1504 1.2 christos
1505 1.2 christos // Save our callback before we release the lock
1506 1.2 christos evcb_callback = *ev->ev_callback;
1507 1.2 christos
1508 1.2 christos // Release the lock
1509 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1510 1.2 christos
1511 1.2 christos // Execute the callback
1512 1.2 christos (evcb_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1513 1.1 christos }
1514 1.1 christos
1515 1.1 christos /*
1516 1.1 christos Helper for event_process_active to process all the events in a single queue,
1517 1.1 christos releasing the lock as we go. This function requires that the lock be held
1518 1.1 christos when it's invoked. Returns -1 if we get a signal or an event_break that
1519 1.1 christos means we should stop processing any active events now. Otherwise returns
1520 1.1 christos the number of non-internal event_callbacks that we processed.
1521 1.1 christos */
1522 1.1 christos static int
1523 1.1 christos event_process_active_single_queue(struct event_base *base,
1524 1.1 christos struct evcallback_list *activeq,
1525 1.1 christos int max_to_process, const struct timeval *endtime)
1526 1.1 christos {
1527 1.1 christos struct event_callback *evcb;
1528 1.1 christos int count = 0;
1529 1.1 christos
1530 1.1 christos EVUTIL_ASSERT(activeq != NULL);
1531 1.1 christos
1532 1.1 christos for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1533 1.1 christos struct event *ev=NULL;
1534 1.1 christos if (evcb->evcb_flags & EVLIST_INIT) {
1535 1.1 christos ev = event_callback_to_event(evcb);
1536 1.1 christos
1537 1.2 christos if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1538 1.1 christos event_queue_remove_active(base, evcb);
1539 1.1 christos else
1540 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1541 1.1 christos event_debug((
1542 1.2 christos "event_process_active: event: %p, %s%s%scall %p",
1543 1.1 christos ev,
1544 1.1 christos ev->ev_res & EV_READ ? "EV_READ " : " ",
1545 1.1 christos ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1546 1.2 christos ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1547 1.1 christos ev->ev_callback));
1548 1.1 christos } else {
1549 1.1 christos event_queue_remove_active(base, evcb);
1550 1.1 christos event_debug(("event_process_active: event_callback %p, "
1551 1.1 christos "closure %d, call %p",
1552 1.1 christos evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1553 1.1 christos }
1554 1.1 christos
1555 1.1 christos if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1556 1.1 christos ++count;
1557 1.1 christos
1558 1.1 christos
1559 1.1 christos base->current_event = evcb;
1560 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
1561 1.1 christos base->current_event_waiters = 0;
1562 1.1 christos #endif
1563 1.1 christos
1564 1.1 christos switch (evcb->evcb_closure) {
1565 1.1 christos case EV_CLOSURE_EVENT_SIGNAL:
1566 1.2 christos EVUTIL_ASSERT(ev != NULL);
1567 1.1 christos event_signal_closure(base, ev);
1568 1.1 christos break;
1569 1.1 christos case EV_CLOSURE_EVENT_PERSIST:
1570 1.2 christos EVUTIL_ASSERT(ev != NULL);
1571 1.1 christos event_persist_closure(base, ev);
1572 1.1 christos break;
1573 1.2 christos case EV_CLOSURE_EVENT: {
1574 1.2 christos EVUTIL_ASSERT(ev != NULL);
1575 1.2 christos void (*evcb_callback)(evutil_socket_t, short, void *) = *ev->ev_callback;
1576 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1577 1.2 christos evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1578 1.2 christos }
1579 1.2 christos break;
1580 1.2 christos case EV_CLOSURE_CB_SELF: {
1581 1.2 christos void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1582 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1583 1.2 christos evcb_selfcb(evcb, evcb->evcb_arg);
1584 1.2 christos }
1585 1.2 christos break;
1586 1.2 christos case EV_CLOSURE_EVENT_FINALIZE:
1587 1.2 christos case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1588 1.2 christos EVUTIL_ASSERT(ev != NULL);
1589 1.2 christos void (*evcb_evfinalize)(struct event *, void *) = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1590 1.2 christos base->current_event = NULL;
1591 1.2 christos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1592 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1593 1.2 christos evcb_evfinalize(ev, ev->ev_arg);
1594 1.2 christos event_debug_note_teardown_(ev);
1595 1.2 christos if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1596 1.2 christos mm_free(ev);
1597 1.2 christos }
1598 1.2 christos break;
1599 1.2 christos case EV_CLOSURE_CB_FINALIZE: {
1600 1.2 christos void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1601 1.2 christos base->current_event = NULL;
1602 1.2 christos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1603 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1604 1.2 christos evcb_cbfinalize(evcb, evcb->evcb_arg);
1605 1.2 christos }
1606 1.2 christos break;
1607 1.1 christos default:
1608 1.1 christos EVUTIL_ASSERT(0);
1609 1.1 christos }
1610 1.1 christos
1611 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1612 1.1 christos base->current_event = NULL;
1613 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
1614 1.1 christos if (base->current_event_waiters) {
1615 1.1 christos base->current_event_waiters = 0;
1616 1.1 christos EVTHREAD_COND_BROADCAST(base->current_event_cond);
1617 1.1 christos }
1618 1.1 christos #endif
1619 1.1 christos
1620 1.1 christos if (base->event_break)
1621 1.1 christos return -1;
1622 1.1 christos if (count >= max_to_process)
1623 1.1 christos return count;
1624 1.1 christos if (count && endtime) {
1625 1.1 christos struct timeval now;
1626 1.1 christos update_time_cache(base);
1627 1.1 christos gettime(base, &now);
1628 1.1 christos if (evutil_timercmp(&now, endtime, >=))
1629 1.1 christos return count;
1630 1.1 christos }
1631 1.1 christos if (base->event_continue)
1632 1.1 christos break;
1633 1.1 christos }
1634 1.1 christos return count;
1635 1.1 christos }
1636 1.1 christos
1637 1.1 christos /*
1638 1.1 christos * Active events are stored in priority queues. Lower priorities are always
1639 1.1 christos * process before higher priorities. Low priority events can starve high
1640 1.1 christos * priority ones.
1641 1.1 christos */
1642 1.1 christos
1643 1.1 christos static int
1644 1.1 christos event_process_active(struct event_base *base)
1645 1.1 christos {
1646 1.1 christos /* Caller must hold th_base_lock */
1647 1.1 christos struct evcallback_list *activeq = NULL;
1648 1.1 christos int i, c = 0;
1649 1.1 christos const struct timeval *endtime;
1650 1.1 christos struct timeval tv;
1651 1.1 christos const int maxcb = base->max_dispatch_callbacks;
1652 1.1 christos const int limit_after_prio = base->limit_callbacks_after_prio;
1653 1.1 christos if (base->max_dispatch_time.tv_sec >= 0) {
1654 1.1 christos update_time_cache(base);
1655 1.1 christos gettime(base, &tv);
1656 1.1 christos evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1657 1.1 christos endtime = &tv;
1658 1.1 christos } else {
1659 1.1 christos endtime = NULL;
1660 1.1 christos }
1661 1.1 christos
1662 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
1663 1.1 christos if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1664 1.1 christos base->event_running_priority = i;
1665 1.1 christos activeq = &base->activequeues[i];
1666 1.1 christos if (i < limit_after_prio)
1667 1.1 christos c = event_process_active_single_queue(base, activeq,
1668 1.1 christos INT_MAX, NULL);
1669 1.1 christos else
1670 1.1 christos c = event_process_active_single_queue(base, activeq,
1671 1.1 christos maxcb, endtime);
1672 1.1 christos if (c < 0) {
1673 1.1 christos goto done;
1674 1.1 christos } else if (c > 0)
1675 1.1 christos break; /* Processed a real event; do not
1676 1.1 christos * consider lower-priority events */
1677 1.1 christos /* If we get here, all of the events we processed
1678 1.1 christos * were internal. Continue. */
1679 1.1 christos }
1680 1.1 christos }
1681 1.1 christos
1682 1.1 christos done:
1683 1.1 christos base->event_running_priority = -1;
1684 1.1 christos
1685 1.1 christos return c;
1686 1.1 christos }
1687 1.1 christos
1688 1.1 christos /*
1689 1.1 christos * Wait continuously for events. We exit only if no events are left.
1690 1.1 christos */
1691 1.1 christos
1692 1.1 christos int
1693 1.1 christos event_dispatch(void)
1694 1.1 christos {
1695 1.1 christos return (event_loop(0));
1696 1.1 christos }
1697 1.1 christos
1698 1.1 christos int
1699 1.1 christos event_base_dispatch(struct event_base *event_base)
1700 1.1 christos {
1701 1.1 christos return (event_base_loop(event_base, 0));
1702 1.1 christos }
1703 1.1 christos
1704 1.1 christos const char *
1705 1.1 christos event_base_get_method(const struct event_base *base)
1706 1.1 christos {
1707 1.1 christos EVUTIL_ASSERT(base);
1708 1.1 christos return (base->evsel->name);
1709 1.1 christos }
1710 1.1 christos
1711 1.1 christos /** Callback: used to implement event_base_loopexit by telling the event_base
1712 1.1 christos * that it's time to exit its loop. */
1713 1.1 christos static void
1714 1.1 christos event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1715 1.1 christos {
1716 1.1 christos struct event_base *base = arg;
1717 1.1 christos base->event_gotterm = 1;
1718 1.1 christos }
1719 1.1 christos
1720 1.1 christos int
1721 1.1 christos event_loopexit(const struct timeval *tv)
1722 1.1 christos {
1723 1.1 christos return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1724 1.1 christos current_base, tv));
1725 1.1 christos }
1726 1.1 christos
1727 1.1 christos int
1728 1.1 christos event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1729 1.1 christos {
1730 1.1 christos return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1731 1.1 christos event_base, tv));
1732 1.1 christos }
1733 1.1 christos
1734 1.1 christos int
1735 1.1 christos event_loopbreak(void)
1736 1.1 christos {
1737 1.1 christos return (event_base_loopbreak(current_base));
1738 1.1 christos }
1739 1.1 christos
1740 1.1 christos int
1741 1.1 christos event_base_loopbreak(struct event_base *event_base)
1742 1.1 christos {
1743 1.1 christos int r = 0;
1744 1.1 christos if (event_base == NULL)
1745 1.1 christos return (-1);
1746 1.1 christos
1747 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1748 1.1 christos event_base->event_break = 1;
1749 1.1 christos
1750 1.1 christos if (EVBASE_NEED_NOTIFY(event_base)) {
1751 1.1 christos r = evthread_notify_base(event_base);
1752 1.1 christos } else {
1753 1.1 christos r = (0);
1754 1.1 christos }
1755 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1756 1.1 christos return r;
1757 1.1 christos }
1758 1.1 christos
1759 1.1 christos int
1760 1.1 christos event_base_loopcontinue(struct event_base *event_base)
1761 1.1 christos {
1762 1.1 christos int r = 0;
1763 1.1 christos if (event_base == NULL)
1764 1.1 christos return (-1);
1765 1.1 christos
1766 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1767 1.1 christos event_base->event_continue = 1;
1768 1.1 christos
1769 1.1 christos if (EVBASE_NEED_NOTIFY(event_base)) {
1770 1.1 christos r = evthread_notify_base(event_base);
1771 1.1 christos } else {
1772 1.1 christos r = (0);
1773 1.1 christos }
1774 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1775 1.1 christos return r;
1776 1.1 christos }
1777 1.1 christos
1778 1.1 christos int
1779 1.1 christos event_base_got_break(struct event_base *event_base)
1780 1.1 christos {
1781 1.1 christos int res;
1782 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1783 1.1 christos res = event_base->event_break;
1784 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1785 1.1 christos return res;
1786 1.1 christos }
1787 1.1 christos
1788 1.1 christos int
1789 1.1 christos event_base_got_exit(struct event_base *event_base)
1790 1.1 christos {
1791 1.1 christos int res;
1792 1.1 christos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1793 1.1 christos res = event_base->event_gotterm;
1794 1.1 christos EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1795 1.1 christos return res;
1796 1.1 christos }
1797 1.1 christos
1798 1.1 christos /* not thread safe */
1799 1.1 christos
1800 1.1 christos int
1801 1.1 christos event_loop(int flags)
1802 1.1 christos {
1803 1.1 christos return event_base_loop(current_base, flags);
1804 1.1 christos }
1805 1.1 christos
1806 1.1 christos int
1807 1.1 christos event_base_loop(struct event_base *base, int flags)
1808 1.1 christos {
1809 1.1 christos const struct eventop *evsel = base->evsel;
1810 1.1 christos struct timeval tv;
1811 1.1 christos struct timeval *tv_p;
1812 1.1 christos int res, done, retval = 0;
1813 1.1 christos
1814 1.1 christos /* Grab the lock. We will release it inside evsel.dispatch, and again
1815 1.1 christos * as we invoke user callbacks. */
1816 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1817 1.1 christos
1818 1.1 christos if (base->running_loop) {
1819 1.1 christos event_warnx("%s: reentrant invocation. Only one event_base_loop"
1820 1.1 christos " can run on each event_base at once.", __func__);
1821 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1822 1.1 christos return -1;
1823 1.1 christos }
1824 1.1 christos
1825 1.1 christos base->running_loop = 1;
1826 1.1 christos
1827 1.1 christos clear_time_cache(base);
1828 1.1 christos
1829 1.1 christos if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1830 1.1 christos evsig_set_base_(base);
1831 1.1 christos
1832 1.1 christos done = 0;
1833 1.1 christos
1834 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
1835 1.1 christos base->th_owner_id = EVTHREAD_GET_ID();
1836 1.1 christos #endif
1837 1.1 christos
1838 1.1 christos base->event_gotterm = base->event_break = 0;
1839 1.1 christos
1840 1.1 christos while (!done) {
1841 1.1 christos base->event_continue = 0;
1842 1.1 christos base->n_deferreds_queued = 0;
1843 1.1 christos
1844 1.1 christos /* Terminate the loop if we have been asked to */
1845 1.1 christos if (base->event_gotterm) {
1846 1.1 christos break;
1847 1.1 christos }
1848 1.1 christos
1849 1.1 christos if (base->event_break) {
1850 1.1 christos break;
1851 1.1 christos }
1852 1.1 christos
1853 1.1 christos tv_p = &tv;
1854 1.1 christos if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1855 1.1 christos timeout_next(base, &tv_p);
1856 1.1 christos } else {
1857 1.1 christos /*
1858 1.1 christos * if we have active events, we just poll new events
1859 1.1 christos * without waiting.
1860 1.1 christos */
1861 1.1 christos evutil_timerclear(&tv);
1862 1.1 christos }
1863 1.1 christos
1864 1.1 christos /* If we have no events, we just exit */
1865 1.1 christos if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1866 1.1 christos !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1867 1.1 christos event_debug(("%s: no events registered.", __func__));
1868 1.1 christos retval = 1;
1869 1.1 christos goto done;
1870 1.1 christos }
1871 1.1 christos
1872 1.1 christos event_queue_make_later_events_active(base);
1873 1.1 christos
1874 1.1 christos clear_time_cache(base);
1875 1.1 christos
1876 1.1 christos res = evsel->dispatch(base, tv_p);
1877 1.1 christos
1878 1.1 christos if (res == -1) {
1879 1.1 christos event_debug(("%s: dispatch returned unsuccessfully.",
1880 1.1 christos __func__));
1881 1.1 christos retval = -1;
1882 1.1 christos goto done;
1883 1.1 christos }
1884 1.1 christos
1885 1.1 christos update_time_cache(base);
1886 1.1 christos
1887 1.1 christos timeout_process(base);
1888 1.1 christos
1889 1.1 christos if (N_ACTIVE_CALLBACKS(base)) {
1890 1.1 christos int n = event_process_active(base);
1891 1.1 christos if ((flags & EVLOOP_ONCE)
1892 1.1 christos && N_ACTIVE_CALLBACKS(base) == 0
1893 1.1 christos && n != 0)
1894 1.1 christos done = 1;
1895 1.1 christos } else if (flags & EVLOOP_NONBLOCK)
1896 1.1 christos done = 1;
1897 1.1 christos }
1898 1.1 christos event_debug(("%s: asked to terminate loop.", __func__));
1899 1.1 christos
1900 1.1 christos done:
1901 1.1 christos clear_time_cache(base);
1902 1.1 christos base->running_loop = 0;
1903 1.1 christos
1904 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1905 1.1 christos
1906 1.1 christos return (retval);
1907 1.1 christos }
1908 1.1 christos
1909 1.1 christos /* One-time callback to implement event_base_once: invokes the user callback,
1910 1.1 christos * then deletes the allocated storage */
1911 1.1 christos static void
1912 1.1 christos event_once_cb(evutil_socket_t fd, short events, void *arg)
1913 1.1 christos {
1914 1.1 christos struct event_once *eonce = arg;
1915 1.1 christos
1916 1.1 christos (*eonce->cb)(fd, events, eonce->arg);
1917 1.1 christos EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1918 1.1 christos LIST_REMOVE(eonce, next_once);
1919 1.1 christos EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1920 1.1 christos event_debug_unassign(&eonce->ev);
1921 1.1 christos mm_free(eonce);
1922 1.1 christos }
1923 1.1 christos
1924 1.1 christos /* not threadsafe, event scheduled once. */
1925 1.1 christos int
1926 1.1 christos event_once(evutil_socket_t fd, short events,
1927 1.1 christos void (*callback)(evutil_socket_t, short, void *),
1928 1.1 christos void *arg, const struct timeval *tv)
1929 1.1 christos {
1930 1.1 christos return event_base_once(current_base, fd, events, callback, arg, tv);
1931 1.1 christos }
1932 1.1 christos
1933 1.1 christos /* Schedules an event once */
1934 1.1 christos int
1935 1.1 christos event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1936 1.1 christos void (*callback)(evutil_socket_t, short, void *),
1937 1.1 christos void *arg, const struct timeval *tv)
1938 1.1 christos {
1939 1.1 christos struct event_once *eonce;
1940 1.1 christos int res = 0;
1941 1.1 christos int activate = 0;
1942 1.1 christos
1943 1.1 christos /* We cannot support signals that just fire once, or persistent
1944 1.1 christos * events. */
1945 1.1 christos if (events & (EV_SIGNAL|EV_PERSIST))
1946 1.1 christos return (-1);
1947 1.1 christos
1948 1.1 christos if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1949 1.1 christos return (-1);
1950 1.1 christos
1951 1.1 christos eonce->cb = callback;
1952 1.1 christos eonce->arg = arg;
1953 1.1 christos
1954 1.2 christos if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
1955 1.1 christos evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1956 1.1 christos
1957 1.1 christos if (tv == NULL || ! evutil_timerisset(tv)) {
1958 1.1 christos /* If the event is going to become active immediately,
1959 1.1 christos * don't put it on the timeout queue. This is one
1960 1.1 christos * idiom for scheduling a callback, so let's make
1961 1.1 christos * it fast (and order-preserving). */
1962 1.1 christos activate = 1;
1963 1.1 christos }
1964 1.2 christos } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
1965 1.2 christos events &= EV_READ|EV_WRITE|EV_CLOSED;
1966 1.1 christos
1967 1.1 christos event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1968 1.1 christos } else {
1969 1.1 christos /* Bad event combination */
1970 1.1 christos mm_free(eonce);
1971 1.1 christos return (-1);
1972 1.1 christos }
1973 1.1 christos
1974 1.1 christos if (res == 0) {
1975 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1976 1.1 christos if (activate)
1977 1.1 christos event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
1978 1.1 christos else
1979 1.1 christos res = event_add_nolock_(&eonce->ev, tv, 0);
1980 1.1 christos
1981 1.1 christos if (res != 0) {
1982 1.1 christos mm_free(eonce);
1983 1.1 christos return (res);
1984 1.1 christos } else {
1985 1.1 christos LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
1986 1.1 christos }
1987 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
1988 1.1 christos }
1989 1.1 christos
1990 1.1 christos return (0);
1991 1.1 christos }
1992 1.1 christos
1993 1.1 christos int
1994 1.1 christos event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1995 1.1 christos {
1996 1.1 christos if (!base)
1997 1.1 christos base = current_base;
1998 1.1 christos if (arg == &event_self_cbarg_ptr_)
1999 1.1 christos arg = ev;
2000 1.1 christos
2001 1.1 christos event_debug_assert_not_added_(ev);
2002 1.1 christos
2003 1.1 christos ev->ev_base = base;
2004 1.1 christos
2005 1.1 christos ev->ev_callback = callback;
2006 1.1 christos ev->ev_arg = arg;
2007 1.1 christos ev->ev_fd = fd;
2008 1.1 christos ev->ev_events = events;
2009 1.1 christos ev->ev_res = 0;
2010 1.1 christos ev->ev_flags = EVLIST_INIT;
2011 1.1 christos ev->ev_ncalls = 0;
2012 1.1 christos ev->ev_pncalls = NULL;
2013 1.1 christos
2014 1.1 christos if (events & EV_SIGNAL) {
2015 1.2 christos if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2016 1.1 christos event_warnx("%s: EV_SIGNAL is not compatible with "
2017 1.2 christos "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2018 1.1 christos return -1;
2019 1.1 christos }
2020 1.1 christos ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2021 1.1 christos } else {
2022 1.1 christos if (events & EV_PERSIST) {
2023 1.1 christos evutil_timerclear(&ev->ev_io_timeout);
2024 1.1 christos ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2025 1.1 christos } else {
2026 1.1 christos ev->ev_closure = EV_CLOSURE_EVENT;
2027 1.1 christos }
2028 1.1 christos }
2029 1.1 christos
2030 1.1 christos min_heap_elem_init_(ev);
2031 1.1 christos
2032 1.1 christos if (base != NULL) {
2033 1.1 christos /* by default, we put new events into the middle priority */
2034 1.1 christos ev->ev_pri = base->nactivequeues / 2;
2035 1.1 christos }
2036 1.1 christos
2037 1.1 christos event_debug_note_setup_(ev);
2038 1.1 christos
2039 1.1 christos return 0;
2040 1.1 christos }
2041 1.1 christos
2042 1.1 christos int
2043 1.1 christos event_base_set(struct event_base *base, struct event *ev)
2044 1.1 christos {
2045 1.1 christos /* Only innocent events may be assigned to a different base */
2046 1.1 christos if (ev->ev_flags != EVLIST_INIT)
2047 1.1 christos return (-1);
2048 1.1 christos
2049 1.1 christos event_debug_assert_is_setup_(ev);
2050 1.1 christos
2051 1.1 christos ev->ev_base = base;
2052 1.1 christos ev->ev_pri = base->nactivequeues/2;
2053 1.1 christos
2054 1.1 christos return (0);
2055 1.1 christos }
2056 1.1 christos
2057 1.1 christos void
2058 1.1 christos event_set(struct event *ev, evutil_socket_t fd, short events,
2059 1.1 christos void (*callback)(evutil_socket_t, short, void *), void *arg)
2060 1.1 christos {
2061 1.1 christos int r;
2062 1.1 christos r = event_assign(ev, current_base, fd, events, callback, arg);
2063 1.1 christos EVUTIL_ASSERT(r == 0);
2064 1.1 christos }
2065 1.1 christos
2066 1.1 christos void *
2067 1.1 christos event_self_cbarg(void)
2068 1.1 christos {
2069 1.1 christos return &event_self_cbarg_ptr_;
2070 1.1 christos }
2071 1.1 christos
2072 1.1 christos struct event *
2073 1.1 christos event_base_get_running_event(struct event_base *base)
2074 1.1 christos {
2075 1.1 christos struct event *ev = NULL;
2076 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2077 1.1 christos if (EVBASE_IN_THREAD(base)) {
2078 1.1 christos struct event_callback *evcb = base->current_event;
2079 1.1 christos if (evcb->evcb_flags & EVLIST_INIT)
2080 1.1 christos ev = event_callback_to_event(evcb);
2081 1.1 christos }
2082 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2083 1.1 christos return ev;
2084 1.1 christos }
2085 1.1 christos
2086 1.1 christos struct event *
2087 1.1 christos event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2088 1.1 christos {
2089 1.1 christos struct event *ev;
2090 1.1 christos ev = mm_malloc(sizeof(struct event));
2091 1.1 christos if (ev == NULL)
2092 1.1 christos return (NULL);
2093 1.1 christos if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2094 1.1 christos mm_free(ev);
2095 1.1 christos return (NULL);
2096 1.1 christos }
2097 1.1 christos
2098 1.1 christos return (ev);
2099 1.1 christos }
2100 1.1 christos
2101 1.1 christos void
2102 1.1 christos event_free(struct event *ev)
2103 1.1 christos {
2104 1.2 christos /* This is disabled, so that events which have been finalized be a
2105 1.2 christos * valid target for event_free(). That's */
2106 1.2 christos // event_debug_assert_is_setup_(ev);
2107 1.1 christos
2108 1.1 christos /* make sure that this event won't be coming back to haunt us. */
2109 1.1 christos event_del(ev);
2110 1.1 christos event_debug_note_teardown_(ev);
2111 1.1 christos mm_free(ev);
2112 1.1 christos
2113 1.1 christos }
2114 1.1 christos
2115 1.1 christos void
2116 1.1 christos event_debug_unassign(struct event *ev)
2117 1.1 christos {
2118 1.1 christos event_debug_assert_not_added_(ev);
2119 1.1 christos event_debug_note_teardown_(ev);
2120 1.1 christos
2121 1.1 christos ev->ev_flags &= ~EVLIST_INIT;
2122 1.1 christos }
2123 1.1 christos
2124 1.2 christos #define EVENT_FINALIZE_FREE_ 0x10000
2125 1.2 christos static int
2126 1.2 christos event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2127 1.2 christos {
2128 1.2 christos ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2129 1.2 christos EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2130 1.2 christos
2131 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2132 1.2 christos ev->ev_closure = closure;
2133 1.2 christos ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2134 1.2 christos event_active_nolock_(ev, EV_FINALIZE, 1);
2135 1.2 christos ev->ev_flags |= EVLIST_FINALIZING;
2136 1.2 christos return 0;
2137 1.2 christos }
2138 1.2 christos
2139 1.2 christos static int
2140 1.2 christos event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2141 1.2 christos {
2142 1.2 christos int r;
2143 1.2 christos struct event_base *base = ev->ev_base;
2144 1.2 christos if (EVUTIL_FAILURE_CHECK(!base)) {
2145 1.2 christos event_warnx("%s: event has no event_base set.", __func__);
2146 1.2 christos return -1;
2147 1.2 christos }
2148 1.2 christos
2149 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2150 1.2 christos r = event_finalize_nolock_(base, flags, ev, cb);
2151 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2152 1.2 christos return r;
2153 1.2 christos }
2154 1.2 christos
2155 1.2 christos int
2156 1.2 christos event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2157 1.2 christos {
2158 1.2 christos return event_finalize_impl_(flags, ev, cb);
2159 1.2 christos }
2160 1.2 christos
2161 1.2 christos int
2162 1.2 christos event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2163 1.2 christos {
2164 1.2 christos return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2165 1.2 christos }
2166 1.2 christos
2167 1.2 christos void
2168 1.2 christos event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2169 1.2 christos {
2170 1.2 christos struct event *ev = NULL;
2171 1.2 christos if (evcb->evcb_flags & EVLIST_INIT) {
2172 1.2 christos ev = event_callback_to_event(evcb);
2173 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2174 1.2 christos } else {
2175 1.2 christos event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2176 1.2 christos }
2177 1.2 christos
2178 1.2 christos evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2179 1.2 christos evcb->evcb_cb_union.evcb_cbfinalize = cb;
2180 1.2 christos event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2181 1.2 christos evcb->evcb_flags |= EVLIST_FINALIZING;
2182 1.2 christos }
2183 1.2 christos
2184 1.2 christos void
2185 1.2 christos event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2186 1.2 christos {
2187 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2188 1.2 christos event_callback_finalize_nolock_(base, flags, evcb, cb);
2189 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2190 1.2 christos }
2191 1.2 christos
2192 1.2 christos /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2193 1.2 christos * callback will be invoked on *one of them*, after they have *all* been
2194 1.2 christos * finalized. */
2195 1.2 christos int
2196 1.2 christos event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2197 1.2 christos {
2198 1.2 christos int n_pending = 0, i;
2199 1.2 christos
2200 1.2 christos if (base == NULL)
2201 1.2 christos base = current_base;
2202 1.2 christos
2203 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2204 1.2 christos
2205 1.2 christos event_debug(("%s: %d events finalizing", __func__, n_cbs));
2206 1.2 christos
2207 1.2 christos /* At most one can be currently executing; the rest we just
2208 1.2 christos * cancel... But we always make sure that the finalize callback
2209 1.2 christos * runs. */
2210 1.2 christos for (i = 0; i < n_cbs; ++i) {
2211 1.2 christos struct event_callback *evcb = evcbs[i];
2212 1.2 christos if (evcb == base->current_event) {
2213 1.2 christos event_callback_finalize_nolock_(base, 0, evcb, cb);
2214 1.2 christos ++n_pending;
2215 1.2 christos } else {
2216 1.2 christos event_callback_cancel_nolock_(base, evcb, 0);
2217 1.2 christos }
2218 1.2 christos }
2219 1.2 christos
2220 1.2 christos if (n_pending == 0) {
2221 1.2 christos /* Just do the first one. */
2222 1.2 christos event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2223 1.2 christos }
2224 1.2 christos
2225 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2226 1.2 christos return 0;
2227 1.2 christos }
2228 1.2 christos
2229 1.1 christos /*
2230 1.1 christos * Set's the priority of an event - if an event is already scheduled
2231 1.1 christos * changing the priority is going to fail.
2232 1.1 christos */
2233 1.1 christos
2234 1.1 christos int
2235 1.1 christos event_priority_set(struct event *ev, int pri)
2236 1.1 christos {
2237 1.1 christos event_debug_assert_is_setup_(ev);
2238 1.1 christos
2239 1.1 christos if (ev->ev_flags & EVLIST_ACTIVE)
2240 1.1 christos return (-1);
2241 1.1 christos if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2242 1.1 christos return (-1);
2243 1.1 christos
2244 1.1 christos ev->ev_pri = pri;
2245 1.1 christos
2246 1.1 christos return (0);
2247 1.1 christos }
2248 1.1 christos
2249 1.1 christos /*
2250 1.1 christos * Checks if a specific event is pending or scheduled.
2251 1.1 christos */
2252 1.1 christos
2253 1.1 christos int
2254 1.1 christos event_pending(const struct event *ev, short event, struct timeval *tv)
2255 1.1 christos {
2256 1.1 christos int flags = 0;
2257 1.1 christos
2258 1.1 christos if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2259 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2260 1.1 christos return 0;
2261 1.1 christos }
2262 1.1 christos
2263 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2264 1.1 christos event_debug_assert_is_setup_(ev);
2265 1.1 christos
2266 1.1 christos if (ev->ev_flags & EVLIST_INSERTED)
2267 1.2 christos flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2268 1.1 christos if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2269 1.1 christos flags |= ev->ev_res;
2270 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT)
2271 1.1 christos flags |= EV_TIMEOUT;
2272 1.1 christos
2273 1.2 christos event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2274 1.1 christos
2275 1.1 christos /* See if there is a timeout that we should report */
2276 1.1 christos if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2277 1.1 christos struct timeval tmp = ev->ev_timeout;
2278 1.1 christos tmp.tv_usec &= MICROSECONDS_MASK;
2279 1.1 christos /* correctly remamp to real time */
2280 1.1 christos evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2281 1.1 christos }
2282 1.1 christos
2283 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2284 1.1 christos
2285 1.1 christos return (flags & event);
2286 1.1 christos }
2287 1.1 christos
2288 1.1 christos int
2289 1.1 christos event_initialized(const struct event *ev)
2290 1.1 christos {
2291 1.1 christos if (!(ev->ev_flags & EVLIST_INIT))
2292 1.1 christos return 0;
2293 1.1 christos
2294 1.1 christos return 1;
2295 1.1 christos }
2296 1.1 christos
2297 1.1 christos void
2298 1.1 christos event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2299 1.1 christos {
2300 1.1 christos event_debug_assert_is_setup_(event);
2301 1.1 christos
2302 1.1 christos if (base_out)
2303 1.1 christos *base_out = event->ev_base;
2304 1.1 christos if (fd_out)
2305 1.1 christos *fd_out = event->ev_fd;
2306 1.1 christos if (events_out)
2307 1.1 christos *events_out = event->ev_events;
2308 1.1 christos if (callback_out)
2309 1.1 christos *callback_out = event->ev_callback;
2310 1.1 christos if (arg_out)
2311 1.1 christos *arg_out = event->ev_arg;
2312 1.1 christos }
2313 1.1 christos
2314 1.1 christos size_t
2315 1.1 christos event_get_struct_event_size(void)
2316 1.1 christos {
2317 1.1 christos return sizeof(struct event);
2318 1.1 christos }
2319 1.1 christos
2320 1.1 christos evutil_socket_t
2321 1.1 christos event_get_fd(const struct event *ev)
2322 1.1 christos {
2323 1.1 christos event_debug_assert_is_setup_(ev);
2324 1.1 christos return ev->ev_fd;
2325 1.1 christos }
2326 1.1 christos
2327 1.1 christos struct event_base *
2328 1.1 christos event_get_base(const struct event *ev)
2329 1.1 christos {
2330 1.1 christos event_debug_assert_is_setup_(ev);
2331 1.1 christos return ev->ev_base;
2332 1.1 christos }
2333 1.1 christos
2334 1.1 christos short
2335 1.1 christos event_get_events(const struct event *ev)
2336 1.1 christos {
2337 1.1 christos event_debug_assert_is_setup_(ev);
2338 1.1 christos return ev->ev_events;
2339 1.1 christos }
2340 1.1 christos
2341 1.1 christos event_callback_fn
2342 1.1 christos event_get_callback(const struct event *ev)
2343 1.1 christos {
2344 1.1 christos event_debug_assert_is_setup_(ev);
2345 1.1 christos return ev->ev_callback;
2346 1.1 christos }
2347 1.1 christos
2348 1.1 christos void *
2349 1.1 christos event_get_callback_arg(const struct event *ev)
2350 1.1 christos {
2351 1.1 christos event_debug_assert_is_setup_(ev);
2352 1.1 christos return ev->ev_arg;
2353 1.1 christos }
2354 1.1 christos
2355 1.1 christos int
2356 1.1 christos event_get_priority(const struct event *ev)
2357 1.1 christos {
2358 1.1 christos event_debug_assert_is_setup_(ev);
2359 1.1 christos return ev->ev_pri;
2360 1.1 christos }
2361 1.1 christos
2362 1.1 christos int
2363 1.1 christos event_add(struct event *ev, const struct timeval *tv)
2364 1.1 christos {
2365 1.1 christos int res;
2366 1.1 christos
2367 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2368 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2369 1.1 christos return -1;
2370 1.1 christos }
2371 1.1 christos
2372 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2373 1.1 christos
2374 1.1 christos res = event_add_nolock_(ev, tv, 0);
2375 1.1 christos
2376 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2377 1.1 christos
2378 1.1 christos return (res);
2379 1.1 christos }
2380 1.1 christos
2381 1.1 christos /* Helper callback: wake an event_base from another thread. This version
2382 1.1 christos * works by writing a byte to one end of a socketpair, so that the event_base
2383 1.1 christos * listening on the other end will wake up as the corresponding event
2384 1.1 christos * triggers */
2385 1.1 christos static int
2386 1.1 christos evthread_notify_base_default(struct event_base *base)
2387 1.1 christos {
2388 1.1 christos char buf[1];
2389 1.1 christos int r;
2390 1.1 christos buf[0] = (char) 0;
2391 1.1 christos #ifdef _WIN32
2392 1.1 christos r = send(base->th_notify_fd[1], buf, 1, 0);
2393 1.1 christos #else
2394 1.1 christos r = write(base->th_notify_fd[1], buf, 1);
2395 1.1 christos #endif
2396 1.1 christos return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2397 1.1 christos }
2398 1.1 christos
2399 1.1 christos #ifdef EVENT__HAVE_EVENTFD
2400 1.1 christos /* Helper callback: wake an event_base from another thread. This version
2401 1.1 christos * assumes that you have a working eventfd() implementation. */
2402 1.1 christos static int
2403 1.1 christos evthread_notify_base_eventfd(struct event_base *base)
2404 1.1 christos {
2405 1.1 christos ev_uint64_t msg = 1;
2406 1.1 christos int r;
2407 1.1 christos do {
2408 1.1 christos r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2409 1.1 christos } while (r < 0 && errno == EAGAIN);
2410 1.1 christos
2411 1.1 christos return (r < 0) ? -1 : 0;
2412 1.1 christos }
2413 1.1 christos #endif
2414 1.1 christos
2415 1.1 christos
2416 1.1 christos /** Tell the thread currently running the event_loop for base (if any) that it
2417 1.1 christos * needs to stop waiting in its dispatch function (if it is) and process all
2418 1.1 christos * active callbacks. */
2419 1.1 christos static int
2420 1.1 christos evthread_notify_base(struct event_base *base)
2421 1.1 christos {
2422 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2423 1.1 christos if (!base->th_notify_fn)
2424 1.1 christos return -1;
2425 1.1 christos if (base->is_notify_pending)
2426 1.1 christos return 0;
2427 1.1 christos base->is_notify_pending = 1;
2428 1.1 christos return base->th_notify_fn(base);
2429 1.1 christos }
2430 1.1 christos
2431 1.1 christos /* Implementation function to remove a timeout on a currently pending event.
2432 1.1 christos */
2433 1.1 christos int
2434 1.1 christos event_remove_timer_nolock_(struct event *ev)
2435 1.1 christos {
2436 1.1 christos struct event_base *base = ev->ev_base;
2437 1.1 christos
2438 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2439 1.1 christos event_debug_assert_is_setup_(ev);
2440 1.1 christos
2441 1.1 christos event_debug(("event_remove_timer_nolock: event: %p", ev));
2442 1.1 christos
2443 1.1 christos /* If it's not pending on a timeout, we don't need to do anything. */
2444 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT) {
2445 1.1 christos event_queue_remove_timeout(base, ev);
2446 1.1 christos evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2447 1.1 christos }
2448 1.1 christos
2449 1.1 christos return (0);
2450 1.1 christos }
2451 1.1 christos
2452 1.1 christos int
2453 1.1 christos event_remove_timer(struct event *ev)
2454 1.1 christos {
2455 1.1 christos int res;
2456 1.1 christos
2457 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2458 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2459 1.1 christos return -1;
2460 1.1 christos }
2461 1.1 christos
2462 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2463 1.1 christos
2464 1.1 christos res = event_remove_timer_nolock_(ev);
2465 1.1 christos
2466 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2467 1.1 christos
2468 1.1 christos return (res);
2469 1.1 christos }
2470 1.1 christos
2471 1.1 christos /* Implementation function to add an event. Works just like event_add,
2472 1.1 christos * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2473 1.1 christos * we treat tv as an absolute time, not as an interval to add to the current
2474 1.1 christos * time */
2475 1.1 christos int
2476 1.1 christos event_add_nolock_(struct event *ev, const struct timeval *tv,
2477 1.1 christos int tv_is_absolute)
2478 1.1 christos {
2479 1.1 christos struct event_base *base = ev->ev_base;
2480 1.1 christos int res = 0;
2481 1.1 christos int notify = 0;
2482 1.1 christos
2483 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2484 1.1 christos event_debug_assert_is_setup_(ev);
2485 1.1 christos
2486 1.1 christos event_debug((
2487 1.2 christos "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2488 1.1 christos ev,
2489 1.1 christos EV_SOCK_ARG(ev->ev_fd),
2490 1.1 christos ev->ev_events & EV_READ ? "EV_READ " : " ",
2491 1.1 christos ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2492 1.2 christos ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2493 1.1 christos tv ? "EV_TIMEOUT " : " ",
2494 1.1 christos ev->ev_callback));
2495 1.1 christos
2496 1.1 christos EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2497 1.1 christos
2498 1.2 christos if (ev->ev_flags & EVLIST_FINALIZING) {
2499 1.2 christos /* XXXX debug */
2500 1.2 christos return (-1);
2501 1.2 christos }
2502 1.2 christos
2503 1.1 christos /*
2504 1.1 christos * prepare for timeout insertion further below, if we get a
2505 1.1 christos * failure on any step, we should not change any state.
2506 1.1 christos */
2507 1.1 christos if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2508 1.1 christos if (min_heap_reserve_(&base->timeheap,
2509 1.1 christos 1 + min_heap_size_(&base->timeheap)) == -1)
2510 1.1 christos return (-1); /* ENOMEM == errno */
2511 1.1 christos }
2512 1.1 christos
2513 1.1 christos /* If the main thread is currently executing a signal event's
2514 1.1 christos * callback, and we are not the main thread, then we want to wait
2515 1.1 christos * until the callback is done before we mess with the event, or else
2516 1.1 christos * we can race on ev_ncalls and ev_pncalls below. */
2517 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
2518 1.1 christos if (base->current_event == event_to_event_callback(ev) &&
2519 1.1 christos (ev->ev_events & EV_SIGNAL)
2520 1.1 christos && !EVBASE_IN_THREAD(base)) {
2521 1.1 christos ++base->current_event_waiters;
2522 1.1 christos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2523 1.1 christos }
2524 1.1 christos #endif
2525 1.1 christos
2526 1.2 christos if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2527 1.1 christos !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2528 1.2 christos if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2529 1.1 christos res = evmap_io_add_(base, ev->ev_fd, ev);
2530 1.1 christos else if (ev->ev_events & EV_SIGNAL)
2531 1.1 christos res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2532 1.1 christos if (res != -1)
2533 1.1 christos event_queue_insert_inserted(base, ev);
2534 1.1 christos if (res == 1) {
2535 1.1 christos /* evmap says we need to notify the main thread. */
2536 1.1 christos notify = 1;
2537 1.1 christos res = 0;
2538 1.1 christos }
2539 1.1 christos }
2540 1.1 christos
2541 1.1 christos /*
2542 1.1 christos * we should change the timeout state only if the previous event
2543 1.1 christos * addition succeeded.
2544 1.1 christos */
2545 1.1 christos if (res != -1 && tv != NULL) {
2546 1.1 christos struct timeval now;
2547 1.1 christos int common_timeout;
2548 1.1 christos #ifdef USE_REINSERT_TIMEOUT
2549 1.1 christos int was_common;
2550 1.1 christos int old_timeout_idx;
2551 1.1 christos #endif
2552 1.1 christos
2553 1.1 christos /*
2554 1.1 christos * for persistent timeout events, we remember the
2555 1.1 christos * timeout value and re-add the event.
2556 1.1 christos *
2557 1.1 christos * If tv_is_absolute, this was already set.
2558 1.1 christos */
2559 1.1 christos if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2560 1.1 christos ev->ev_io_timeout = *tv;
2561 1.1 christos
2562 1.1 christos #ifndef USE_REINSERT_TIMEOUT
2563 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT) {
2564 1.1 christos event_queue_remove_timeout(base, ev);
2565 1.1 christos }
2566 1.1 christos #endif
2567 1.1 christos
2568 1.1 christos /* Check if it is active due to a timeout. Rescheduling
2569 1.1 christos * this timeout before the callback can be executed
2570 1.1 christos * removes it from the active list. */
2571 1.1 christos if ((ev->ev_flags & EVLIST_ACTIVE) &&
2572 1.1 christos (ev->ev_res & EV_TIMEOUT)) {
2573 1.1 christos if (ev->ev_events & EV_SIGNAL) {
2574 1.1 christos /* See if we are just active executing
2575 1.1 christos * this event in a loop
2576 1.1 christos */
2577 1.1 christos if (ev->ev_ncalls && ev->ev_pncalls) {
2578 1.1 christos /* Abort loop */
2579 1.1 christos *ev->ev_pncalls = 0;
2580 1.1 christos }
2581 1.1 christos }
2582 1.1 christos
2583 1.1 christos event_queue_remove_active(base, event_to_event_callback(ev));
2584 1.1 christos }
2585 1.1 christos
2586 1.1 christos gettime(base, &now);
2587 1.1 christos
2588 1.1 christos common_timeout = is_common_timeout(tv, base);
2589 1.1 christos #ifdef USE_REINSERT_TIMEOUT
2590 1.1 christos was_common = is_common_timeout(&ev->ev_timeout, base);
2591 1.1 christos old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2592 1.1 christos #endif
2593 1.1 christos
2594 1.1 christos if (tv_is_absolute) {
2595 1.1 christos ev->ev_timeout = *tv;
2596 1.1 christos } else if (common_timeout) {
2597 1.1 christos struct timeval tmp = *tv;
2598 1.1 christos tmp.tv_usec &= MICROSECONDS_MASK;
2599 1.1 christos evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2600 1.1 christos ev->ev_timeout.tv_usec |=
2601 1.1 christos (tv->tv_usec & ~MICROSECONDS_MASK);
2602 1.1 christos } else {
2603 1.1 christos evutil_timeradd(&now, tv, &ev->ev_timeout);
2604 1.1 christos }
2605 1.1 christos
2606 1.1 christos event_debug((
2607 1.1 christos "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2608 1.1 christos ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2609 1.1 christos
2610 1.1 christos #ifdef USE_REINSERT_TIMEOUT
2611 1.1 christos event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2612 1.1 christos #else
2613 1.1 christos event_queue_insert_timeout(base, ev);
2614 1.1 christos #endif
2615 1.1 christos
2616 1.1 christos if (common_timeout) {
2617 1.1 christos struct common_timeout_list *ctl =
2618 1.1 christos get_common_timeout_list(base, &ev->ev_timeout);
2619 1.1 christos if (ev == TAILQ_FIRST(&ctl->events)) {
2620 1.1 christos common_timeout_schedule(ctl, &now, ev);
2621 1.1 christos }
2622 1.1 christos } else {
2623 1.1 christos struct event* top = NULL;
2624 1.1 christos /* See if the earliest timeout is now earlier than it
2625 1.1 christos * was before: if so, we will need to tell the main
2626 1.1 christos * thread to wake up earlier than it would otherwise.
2627 1.1 christos * We double check the timeout of the top element to
2628 1.1 christos * handle time distortions due to system suspension.
2629 1.1 christos */
2630 1.1 christos if (min_heap_elt_is_top_(ev))
2631 1.1 christos notify = 1;
2632 1.1 christos else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2633 1.1 christos evutil_timercmp(&top->ev_timeout, &now, <))
2634 1.1 christos notify = 1;
2635 1.1 christos }
2636 1.1 christos }
2637 1.1 christos
2638 1.1 christos /* if we are not in the right thread, we need to wake up the loop */
2639 1.1 christos if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2640 1.1 christos evthread_notify_base(base);
2641 1.1 christos
2642 1.1 christos event_debug_note_add_(ev);
2643 1.1 christos
2644 1.1 christos return (res);
2645 1.1 christos }
2646 1.1 christos
2647 1.2 christos static int
2648 1.2 christos event_del_(struct event *ev, int blocking)
2649 1.1 christos {
2650 1.1 christos int res;
2651 1.1 christos
2652 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2653 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2654 1.1 christos return -1;
2655 1.1 christos }
2656 1.1 christos
2657 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2658 1.1 christos
2659 1.2 christos res = event_del_nolock_(ev, blocking);
2660 1.1 christos
2661 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2662 1.1 christos
2663 1.1 christos return (res);
2664 1.1 christos }
2665 1.1 christos
2666 1.1 christos int
2667 1.2 christos event_del(struct event *ev)
2668 1.2 christos {
2669 1.2 christos return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2670 1.2 christos }
2671 1.2 christos
2672 1.2 christos int
2673 1.2 christos event_del_block(struct event *ev)
2674 1.2 christos {
2675 1.2 christos return event_del_(ev, EVENT_DEL_BLOCK);
2676 1.2 christos }
2677 1.2 christos
2678 1.2 christos int
2679 1.2 christos event_del_noblock(struct event *ev)
2680 1.2 christos {
2681 1.2 christos return event_del_(ev, EVENT_DEL_NOBLOCK);
2682 1.2 christos }
2683 1.2 christos
2684 1.2 christos /** Helper for event_del: always called with th_base_lock held.
2685 1.2 christos *
2686 1.2 christos * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2687 1.2 christos * EVEN_IF_FINALIZING} values. See those for more information.
2688 1.2 christos */
2689 1.2 christos int
2690 1.2 christos event_del_nolock_(struct event *ev, int blocking)
2691 1.1 christos {
2692 1.1 christos struct event_base *base;
2693 1.1 christos int res = 0, notify = 0;
2694 1.1 christos
2695 1.1 christos event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2696 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2697 1.1 christos
2698 1.1 christos /* An event without a base has not been added */
2699 1.1 christos if (ev->ev_base == NULL)
2700 1.1 christos return (-1);
2701 1.1 christos
2702 1.1 christos EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2703 1.1 christos
2704 1.2 christos if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2705 1.2 christos if (ev->ev_flags & EVLIST_FINALIZING) {
2706 1.2 christos /* XXXX Debug */
2707 1.2 christos return 0;
2708 1.2 christos }
2709 1.2 christos }
2710 1.2 christos
2711 1.1 christos /* If the main thread is currently executing this event's callback,
2712 1.1 christos * and we are not the main thread, then we want to wait until the
2713 1.1 christos * callback is done before we start removing the event. That way,
2714 1.1 christos * when this function returns, it will be safe to free the
2715 1.1 christos * user-supplied argument. */
2716 1.1 christos base = ev->ev_base;
2717 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
2718 1.2 christos if (blocking != EVENT_DEL_NOBLOCK &&
2719 1.2 christos base->current_event == event_to_event_callback(ev) &&
2720 1.2 christos !EVBASE_IN_THREAD(base) &&
2721 1.2 christos (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2722 1.1 christos ++base->current_event_waiters;
2723 1.1 christos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2724 1.1 christos }
2725 1.1 christos #endif
2726 1.1 christos
2727 1.1 christos EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2728 1.1 christos
2729 1.1 christos /* See if we are just active executing this event in a loop */
2730 1.1 christos if (ev->ev_events & EV_SIGNAL) {
2731 1.1 christos if (ev->ev_ncalls && ev->ev_pncalls) {
2732 1.1 christos /* Abort loop */
2733 1.1 christos *ev->ev_pncalls = 0;
2734 1.1 christos }
2735 1.1 christos }
2736 1.1 christos
2737 1.1 christos if (ev->ev_flags & EVLIST_TIMEOUT) {
2738 1.1 christos /* NOTE: We never need to notify the main thread because of a
2739 1.1 christos * deleted timeout event: all that could happen if we don't is
2740 1.1 christos * that the dispatch loop might wake up too early. But the
2741 1.1 christos * point of notifying the main thread _is_ to wake up the
2742 1.1 christos * dispatch loop early anyway, so we wouldn't gain anything by
2743 1.1 christos * doing it.
2744 1.1 christos */
2745 1.1 christos event_queue_remove_timeout(base, ev);
2746 1.1 christos }
2747 1.1 christos
2748 1.1 christos if (ev->ev_flags & EVLIST_ACTIVE)
2749 1.1 christos event_queue_remove_active(base, event_to_event_callback(ev));
2750 1.1 christos else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2751 1.1 christos event_queue_remove_active_later(base, event_to_event_callback(ev));
2752 1.1 christos
2753 1.1 christos if (ev->ev_flags & EVLIST_INSERTED) {
2754 1.1 christos event_queue_remove_inserted(base, ev);
2755 1.2 christos if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2756 1.1 christos res = evmap_io_del_(base, ev->ev_fd, ev);
2757 1.1 christos else
2758 1.1 christos res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2759 1.1 christos if (res == 1) {
2760 1.1 christos /* evmap says we need to notify the main thread. */
2761 1.1 christos notify = 1;
2762 1.1 christos res = 0;
2763 1.1 christos }
2764 1.1 christos }
2765 1.1 christos
2766 1.1 christos /* if we are not in the right thread, we need to wake up the loop */
2767 1.1 christos if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2768 1.1 christos evthread_notify_base(base);
2769 1.1 christos
2770 1.1 christos event_debug_note_del_(ev);
2771 1.1 christos
2772 1.1 christos return (res);
2773 1.1 christos }
2774 1.1 christos
2775 1.1 christos void
2776 1.1 christos event_active(struct event *ev, int res, short ncalls)
2777 1.1 christos {
2778 1.1 christos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2779 1.1 christos event_warnx("%s: event has no event_base set.", __func__);
2780 1.1 christos return;
2781 1.1 christos }
2782 1.1 christos
2783 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2784 1.1 christos
2785 1.1 christos event_debug_assert_is_setup_(ev);
2786 1.1 christos
2787 1.1 christos event_active_nolock_(ev, res, ncalls);
2788 1.1 christos
2789 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2790 1.1 christos }
2791 1.1 christos
2792 1.1 christos
2793 1.1 christos void
2794 1.1 christos event_active_nolock_(struct event *ev, int res, short ncalls)
2795 1.1 christos {
2796 1.1 christos struct event_base *base;
2797 1.1 christos
2798 1.1 christos event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2799 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2800 1.1 christos
2801 1.1 christos base = ev->ev_base;
2802 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2803 1.1 christos
2804 1.2 christos if (ev->ev_flags & EVLIST_FINALIZING) {
2805 1.2 christos /* XXXX debug */
2806 1.2 christos return;
2807 1.2 christos }
2808 1.2 christos
2809 1.1 christos switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2810 1.1 christos default:
2811 1.1 christos case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2812 1.1 christos EVUTIL_ASSERT(0);
2813 1.1 christos break;
2814 1.1 christos case EVLIST_ACTIVE:
2815 1.1 christos /* We get different kinds of events, add them together */
2816 1.1 christos ev->ev_res |= res;
2817 1.1 christos return;
2818 1.1 christos case EVLIST_ACTIVE_LATER:
2819 1.1 christos ev->ev_res |= res;
2820 1.1 christos break;
2821 1.1 christos case 0:
2822 1.1 christos ev->ev_res = res;
2823 1.1 christos break;
2824 1.1 christos }
2825 1.1 christos
2826 1.1 christos if (ev->ev_pri < base->event_running_priority)
2827 1.1 christos base->event_continue = 1;
2828 1.1 christos
2829 1.1 christos if (ev->ev_events & EV_SIGNAL) {
2830 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
2831 1.1 christos if (base->current_event == event_to_event_callback(ev) &&
2832 1.1 christos !EVBASE_IN_THREAD(base)) {
2833 1.1 christos ++base->current_event_waiters;
2834 1.1 christos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2835 1.1 christos }
2836 1.1 christos #endif
2837 1.1 christos ev->ev_ncalls = ncalls;
2838 1.1 christos ev->ev_pncalls = NULL;
2839 1.1 christos }
2840 1.1 christos
2841 1.1 christos event_callback_activate_nolock_(base, event_to_event_callback(ev));
2842 1.1 christos }
2843 1.1 christos
2844 1.1 christos void
2845 1.1 christos event_active_later_(struct event *ev, int res)
2846 1.1 christos {
2847 1.1 christos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2848 1.1 christos event_active_later_nolock_(ev, res);
2849 1.1 christos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2850 1.1 christos }
2851 1.1 christos
2852 1.1 christos void
2853 1.1 christos event_active_later_nolock_(struct event *ev, int res)
2854 1.1 christos {
2855 1.1 christos struct event_base *base = ev->ev_base;
2856 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
2857 1.1 christos
2858 1.1 christos if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2859 1.1 christos /* We get different kinds of events, add them together */
2860 1.1 christos ev->ev_res |= res;
2861 1.1 christos return;
2862 1.1 christos }
2863 1.1 christos
2864 1.1 christos ev->ev_res = res;
2865 1.1 christos
2866 1.1 christos event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2867 1.1 christos }
2868 1.1 christos
2869 1.1 christos int
2870 1.1 christos event_callback_activate_(struct event_base *base,
2871 1.1 christos struct event_callback *evcb)
2872 1.1 christos {
2873 1.1 christos int r;
2874 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2875 1.1 christos r = event_callback_activate_nolock_(base, evcb);
2876 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2877 1.1 christos return r;
2878 1.1 christos }
2879 1.1 christos
2880 1.1 christos int
2881 1.1 christos event_callback_activate_nolock_(struct event_base *base,
2882 1.1 christos struct event_callback *evcb)
2883 1.1 christos {
2884 1.1 christos int r = 1;
2885 1.1 christos
2886 1.2 christos if (evcb->evcb_flags & EVLIST_FINALIZING)
2887 1.2 christos return 0;
2888 1.2 christos
2889 1.1 christos switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2890 1.1 christos default:
2891 1.1 christos EVUTIL_ASSERT(0);
2892 1.1 christos case EVLIST_ACTIVE_LATER:
2893 1.1 christos event_queue_remove_active_later(base, evcb);
2894 1.1 christos r = 0;
2895 1.1 christos break;
2896 1.1 christos case EVLIST_ACTIVE:
2897 1.1 christos return 0;
2898 1.1 christos case 0:
2899 1.1 christos break;
2900 1.1 christos }
2901 1.1 christos
2902 1.1 christos event_queue_insert_active(base, evcb);
2903 1.1 christos
2904 1.1 christos if (EVBASE_NEED_NOTIFY(base))
2905 1.1 christos evthread_notify_base(base);
2906 1.1 christos
2907 1.1 christos return r;
2908 1.1 christos }
2909 1.1 christos
2910 1.1 christos void
2911 1.1 christos event_callback_activate_later_nolock_(struct event_base *base,
2912 1.1 christos struct event_callback *evcb)
2913 1.1 christos {
2914 1.1 christos if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2915 1.1 christos return;
2916 1.1 christos
2917 1.1 christos event_queue_insert_active_later(base, evcb);
2918 1.1 christos if (EVBASE_NEED_NOTIFY(base))
2919 1.1 christos evthread_notify_base(base);
2920 1.1 christos }
2921 1.1 christos
2922 1.1 christos void
2923 1.1 christos event_callback_init_(struct event_base *base,
2924 1.1 christos struct event_callback *cb)
2925 1.1 christos {
2926 1.1 christos memset(cb, 0, sizeof(*cb));
2927 1.1 christos cb->evcb_pri = base->nactivequeues - 1;
2928 1.1 christos }
2929 1.1 christos
2930 1.1 christos int
2931 1.1 christos event_callback_cancel_(struct event_base *base,
2932 1.1 christos struct event_callback *evcb)
2933 1.1 christos {
2934 1.1 christos int r;
2935 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2936 1.2 christos r = event_callback_cancel_nolock_(base, evcb, 0);
2937 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
2938 1.1 christos return r;
2939 1.1 christos }
2940 1.1 christos
2941 1.1 christos int
2942 1.1 christos event_callback_cancel_nolock_(struct event_base *base,
2943 1.2 christos struct event_callback *evcb, int even_if_finalizing)
2944 1.1 christos {
2945 1.2 christos if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
2946 1.2 christos return 0;
2947 1.2 christos
2948 1.1 christos if (evcb->evcb_flags & EVLIST_INIT)
2949 1.2 christos return event_del_nolock_(event_callback_to_event(evcb),
2950 1.2 christos even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
2951 1.1 christos
2952 1.1 christos switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2953 1.1 christos default:
2954 1.1 christos case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2955 1.1 christos EVUTIL_ASSERT(0);
2956 1.1 christos break;
2957 1.1 christos case EVLIST_ACTIVE:
2958 1.1 christos /* We get different kinds of events, add them together */
2959 1.1 christos event_queue_remove_active(base, evcb);
2960 1.1 christos return 0;
2961 1.1 christos case EVLIST_ACTIVE_LATER:
2962 1.1 christos event_queue_remove_active_later(base, evcb);
2963 1.1 christos break;
2964 1.1 christos case 0:
2965 1.1 christos break;
2966 1.1 christos }
2967 1.1 christos
2968 1.1 christos return 0;
2969 1.1 christos }
2970 1.1 christos
2971 1.1 christos void
2972 1.1 christos event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
2973 1.1 christos {
2974 1.1 christos memset(cb, 0, sizeof(*cb));
2975 1.1 christos cb->evcb_cb_union.evcb_selfcb = fn;
2976 1.1 christos cb->evcb_arg = arg;
2977 1.1 christos cb->evcb_pri = priority;
2978 1.1 christos cb->evcb_closure = EV_CLOSURE_CB_SELF;
2979 1.1 christos }
2980 1.1 christos
2981 1.1 christos void
2982 1.1 christos event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
2983 1.1 christos {
2984 1.1 christos cb->evcb_pri = priority;
2985 1.1 christos }
2986 1.1 christos
2987 1.1 christos void
2988 1.1 christos event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
2989 1.1 christos {
2990 1.1 christos if (!base)
2991 1.1 christos base = current_base;
2992 1.1 christos event_callback_cancel_(base, cb);
2993 1.1 christos }
2994 1.1 christos
2995 1.1 christos #define MAX_DEFERREDS_QUEUED 32
2996 1.1 christos int
2997 1.1 christos event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
2998 1.1 christos {
2999 1.1 christos int r = 1;
3000 1.1 christos if (!base)
3001 1.1 christos base = current_base;
3002 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3003 1.1 christos if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3004 1.1 christos event_callback_activate_later_nolock_(base, cb);
3005 1.1 christos } else {
3006 1.1 christos ++base->n_deferreds_queued;
3007 1.1 christos r = event_callback_activate_nolock_(base, cb);
3008 1.1 christos }
3009 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3010 1.1 christos return r;
3011 1.1 christos }
3012 1.1 christos
3013 1.1 christos static int
3014 1.1 christos timeout_next(struct event_base *base, struct timeval **tv_p)
3015 1.1 christos {
3016 1.1 christos /* Caller must hold th_base_lock */
3017 1.1 christos struct timeval now;
3018 1.1 christos struct event *ev;
3019 1.1 christos struct timeval *tv = *tv_p;
3020 1.1 christos int res = 0;
3021 1.1 christos
3022 1.1 christos ev = min_heap_top_(&base->timeheap);
3023 1.1 christos
3024 1.1 christos if (ev == NULL) {
3025 1.1 christos /* if no time-based events are active wait for I/O */
3026 1.1 christos *tv_p = NULL;
3027 1.1 christos goto out;
3028 1.1 christos }
3029 1.1 christos
3030 1.1 christos if (gettime(base, &now) == -1) {
3031 1.1 christos res = -1;
3032 1.1 christos goto out;
3033 1.1 christos }
3034 1.1 christos
3035 1.1 christos if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3036 1.1 christos evutil_timerclear(tv);
3037 1.1 christos goto out;
3038 1.1 christos }
3039 1.1 christos
3040 1.1 christos evutil_timersub(&ev->ev_timeout, &now, tv);
3041 1.1 christos
3042 1.1 christos EVUTIL_ASSERT(tv->tv_sec >= 0);
3043 1.1 christos EVUTIL_ASSERT(tv->tv_usec >= 0);
3044 1.1 christos event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3045 1.1 christos
3046 1.1 christos out:
3047 1.1 christos return (res);
3048 1.1 christos }
3049 1.1 christos
3050 1.1 christos /* Activate every event whose timeout has elapsed. */
3051 1.1 christos static void
3052 1.1 christos timeout_process(struct event_base *base)
3053 1.1 christos {
3054 1.1 christos /* Caller must hold lock. */
3055 1.1 christos struct timeval now;
3056 1.1 christos struct event *ev;
3057 1.1 christos
3058 1.1 christos if (min_heap_empty_(&base->timeheap)) {
3059 1.1 christos return;
3060 1.1 christos }
3061 1.1 christos
3062 1.1 christos gettime(base, &now);
3063 1.1 christos
3064 1.1 christos while ((ev = min_heap_top_(&base->timeheap))) {
3065 1.1 christos if (evutil_timercmp(&ev->ev_timeout, &now, >))
3066 1.1 christos break;
3067 1.1 christos
3068 1.1 christos /* delete this event from the I/O queues */
3069 1.2 christos event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3070 1.1 christos
3071 1.1 christos event_debug(("timeout_process: event: %p, call %p",
3072 1.1 christos ev, ev->ev_callback));
3073 1.1 christos event_active_nolock_(ev, EV_TIMEOUT, 1);
3074 1.1 christos }
3075 1.1 christos }
3076 1.1 christos
3077 1.1 christos #if (EVLIST_INTERNAL >> 4) != 1
3078 1.1 christos #error "Mismatch for value of EVLIST_INTERNAL"
3079 1.1 christos #endif
3080 1.2 christos
3081 1.2 christos #ifndef MAX
3082 1.2 christos #define MAX(a,b) (((a)>(b))?(a):(b))
3083 1.2 christos #endif
3084 1.2 christos
3085 1.2 christos #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3086 1.2 christos
3087 1.1 christos /* These are a fancy way to spell
3088 1.1 christos if (flags & EVLIST_INTERNAL)
3089 1.1 christos base->event_count--/++;
3090 1.1 christos */
3091 1.1 christos #define DECR_EVENT_COUNT(base,flags) \
3092 1.1 christos ((base)->event_count -= (~((flags) >> 4) & 1))
3093 1.2 christos #define INCR_EVENT_COUNT(base,flags) do { \
3094 1.2 christos ((base)->event_count += (~((flags) >> 4) & 1)); \
3095 1.2 christos MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3096 1.2 christos } while (0)
3097 1.1 christos
3098 1.1 christos static void
3099 1.1 christos event_queue_remove_inserted(struct event_base *base, struct event *ev)
3100 1.1 christos {
3101 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3102 1.1 christos if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3103 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3104 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3105 1.1 christos return;
3106 1.1 christos }
3107 1.1 christos DECR_EVENT_COUNT(base, ev->ev_flags);
3108 1.1 christos ev->ev_flags &= ~EVLIST_INSERTED;
3109 1.1 christos }
3110 1.1 christos static void
3111 1.1 christos event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3112 1.1 christos {
3113 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3114 1.1 christos if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3115 1.1 christos event_errx(1, "%s: %p not on queue %x", __func__,
3116 1.1 christos evcb, EVLIST_ACTIVE);
3117 1.1 christos return;
3118 1.1 christos }
3119 1.1 christos DECR_EVENT_COUNT(base, evcb->evcb_flags);
3120 1.1 christos evcb->evcb_flags &= ~EVLIST_ACTIVE;
3121 1.1 christos base->event_count_active--;
3122 1.1 christos
3123 1.1 christos TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3124 1.1 christos evcb, evcb_active_next);
3125 1.1 christos }
3126 1.1 christos static void
3127 1.1 christos event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3128 1.1 christos {
3129 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3130 1.1 christos if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3131 1.1 christos event_errx(1, "%s: %p not on queue %x", __func__,
3132 1.1 christos evcb, EVLIST_ACTIVE_LATER);
3133 1.1 christos return;
3134 1.1 christos }
3135 1.1 christos DECR_EVENT_COUNT(base, evcb->evcb_flags);
3136 1.1 christos evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3137 1.1 christos base->event_count_active--;
3138 1.1 christos
3139 1.1 christos TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3140 1.1 christos }
3141 1.1 christos static void
3142 1.1 christos event_queue_remove_timeout(struct event_base *base, struct event *ev)
3143 1.1 christos {
3144 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3145 1.1 christos if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3146 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3147 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3148 1.1 christos return;
3149 1.1 christos }
3150 1.1 christos DECR_EVENT_COUNT(base, ev->ev_flags);
3151 1.1 christos ev->ev_flags &= ~EVLIST_TIMEOUT;
3152 1.1 christos
3153 1.1 christos if (is_common_timeout(&ev->ev_timeout, base)) {
3154 1.1 christos struct common_timeout_list *ctl =
3155 1.1 christos get_common_timeout_list(base, &ev->ev_timeout);
3156 1.1 christos TAILQ_REMOVE(&ctl->events, ev,
3157 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3158 1.1 christos } else {
3159 1.1 christos min_heap_erase_(&base->timeheap, ev);
3160 1.1 christos }
3161 1.1 christos }
3162 1.1 christos
3163 1.1 christos #ifdef USE_REINSERT_TIMEOUT
3164 1.1 christos /* Remove and reinsert 'ev' into the timeout queue. */
3165 1.1 christos static void
3166 1.1 christos event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3167 1.1 christos int was_common, int is_common, int old_timeout_idx)
3168 1.1 christos {
3169 1.1 christos struct common_timeout_list *ctl;
3170 1.1 christos if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3171 1.1 christos event_queue_insert_timeout(base, ev);
3172 1.1 christos return;
3173 1.1 christos }
3174 1.1 christos
3175 1.1 christos switch ((was_common<<1) | is_common) {
3176 1.1 christos case 3: /* Changing from one common timeout to another */
3177 1.1 christos ctl = base->common_timeout_queues[old_timeout_idx];
3178 1.1 christos TAILQ_REMOVE(&ctl->events, ev,
3179 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3180 1.1 christos ctl = get_common_timeout_list(base, &ev->ev_timeout);
3181 1.1 christos insert_common_timeout_inorder(ctl, ev);
3182 1.1 christos break;
3183 1.1 christos case 2: /* Was common; is no longer common */
3184 1.1 christos ctl = base->common_timeout_queues[old_timeout_idx];
3185 1.1 christos TAILQ_REMOVE(&ctl->events, ev,
3186 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3187 1.1 christos min_heap_push_(&base->timeheap, ev);
3188 1.1 christos break;
3189 1.1 christos case 1: /* Wasn't common; has become common. */
3190 1.1 christos min_heap_erase_(&base->timeheap, ev);
3191 1.1 christos ctl = get_common_timeout_list(base, &ev->ev_timeout);
3192 1.1 christos insert_common_timeout_inorder(ctl, ev);
3193 1.1 christos break;
3194 1.1 christos case 0: /* was in heap; is still on heap. */
3195 1.1 christos min_heap_adjust_(&base->timeheap, ev);
3196 1.1 christos break;
3197 1.1 christos default:
3198 1.1 christos EVUTIL_ASSERT(0); /* unreachable */
3199 1.1 christos break;
3200 1.1 christos }
3201 1.1 christos }
3202 1.1 christos #endif
3203 1.1 christos
3204 1.1 christos /* Add 'ev' to the common timeout list in 'ev'. */
3205 1.1 christos static void
3206 1.1 christos insert_common_timeout_inorder(struct common_timeout_list *ctl,
3207 1.1 christos struct event *ev)
3208 1.1 christos {
3209 1.1 christos struct event *e;
3210 1.1 christos /* By all logic, we should just be able to append 'ev' to the end of
3211 1.1 christos * ctl->events, since the timeout on each 'ev' is set to {the common
3212 1.1 christos * timeout} + {the time when we add the event}, and so the events
3213 1.1 christos * should arrive in order of their timeeouts. But just in case
3214 1.1 christos * there's some wacky threading issue going on, we do a search from
3215 1.1 christos * the end of 'ev' to find the right insertion point.
3216 1.1 christos */
3217 1.1 christos TAILQ_FOREACH_REVERSE(e, &ctl->events,
3218 1.1 christos event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3219 1.1 christos /* This timercmp is a little sneaky, since both ev and e have
3220 1.1 christos * magic values in tv_usec. Fortunately, they ought to have
3221 1.1 christos * the _same_ magic values in tv_usec. Let's assert for that.
3222 1.1 christos */
3223 1.1 christos EVUTIL_ASSERT(
3224 1.1 christos is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3225 1.1 christos if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3226 1.1 christos TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3227 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3228 1.1 christos return;
3229 1.1 christos }
3230 1.1 christos }
3231 1.1 christos TAILQ_INSERT_HEAD(&ctl->events, ev,
3232 1.1 christos ev_timeout_pos.ev_next_with_common_timeout);
3233 1.1 christos }
3234 1.1 christos
3235 1.1 christos static void
3236 1.1 christos event_queue_insert_inserted(struct event_base *base, struct event *ev)
3237 1.1 christos {
3238 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3239 1.1 christos
3240 1.1 christos if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3241 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3242 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd));
3243 1.1 christos return;
3244 1.1 christos }
3245 1.1 christos
3246 1.1 christos INCR_EVENT_COUNT(base, ev->ev_flags);
3247 1.1 christos
3248 1.1 christos ev->ev_flags |= EVLIST_INSERTED;
3249 1.1 christos }
3250 1.1 christos
3251 1.1 christos static void
3252 1.1 christos event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3253 1.1 christos {
3254 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3255 1.1 christos
3256 1.1 christos if (evcb->evcb_flags & EVLIST_ACTIVE) {
3257 1.1 christos /* Double insertion is possible for active events */
3258 1.1 christos return;
3259 1.1 christos }
3260 1.1 christos
3261 1.1 christos INCR_EVENT_COUNT(base, evcb->evcb_flags);
3262 1.1 christos
3263 1.1 christos evcb->evcb_flags |= EVLIST_ACTIVE;
3264 1.1 christos
3265 1.1 christos base->event_count_active++;
3266 1.2 christos MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3267 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3268 1.1 christos TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3269 1.1 christos evcb, evcb_active_next);
3270 1.1 christos }
3271 1.1 christos
3272 1.1 christos static void
3273 1.1 christos event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3274 1.1 christos {
3275 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3276 1.1 christos if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3277 1.1 christos /* Double insertion is possible */
3278 1.1 christos return;
3279 1.1 christos }
3280 1.1 christos
3281 1.1 christos INCR_EVENT_COUNT(base, evcb->evcb_flags);
3282 1.1 christos evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3283 1.1 christos base->event_count_active++;
3284 1.2 christos MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3285 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3286 1.1 christos TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3287 1.1 christos }
3288 1.1 christos
3289 1.1 christos static void
3290 1.1 christos event_queue_insert_timeout(struct event_base *base, struct event *ev)
3291 1.1 christos {
3292 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3293 1.1 christos
3294 1.1 christos if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3295 1.1 christos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3296 1.1 christos ev, EV_SOCK_ARG(ev->ev_fd));
3297 1.1 christos return;
3298 1.1 christos }
3299 1.1 christos
3300 1.1 christos INCR_EVENT_COUNT(base, ev->ev_flags);
3301 1.1 christos
3302 1.1 christos ev->ev_flags |= EVLIST_TIMEOUT;
3303 1.1 christos
3304 1.1 christos if (is_common_timeout(&ev->ev_timeout, base)) {
3305 1.1 christos struct common_timeout_list *ctl =
3306 1.1 christos get_common_timeout_list(base, &ev->ev_timeout);
3307 1.1 christos insert_common_timeout_inorder(ctl, ev);
3308 1.1 christos } else {
3309 1.1 christos min_heap_push_(&base->timeheap, ev);
3310 1.1 christos }
3311 1.1 christos }
3312 1.1 christos
3313 1.1 christos static void
3314 1.1 christos event_queue_make_later_events_active(struct event_base *base)
3315 1.1 christos {
3316 1.1 christos struct event_callback *evcb;
3317 1.1 christos EVENT_BASE_ASSERT_LOCKED(base);
3318 1.1 christos
3319 1.1 christos while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3320 1.1 christos TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3321 1.1 christos evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3322 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3323 1.1 christos TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3324 1.1 christos base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3325 1.1 christos }
3326 1.1 christos }
3327 1.1 christos
3328 1.1 christos /* Functions for debugging */
3329 1.1 christos
3330 1.1 christos const char *
3331 1.1 christos event_get_version(void)
3332 1.1 christos {
3333 1.1 christos return (EVENT__VERSION);
3334 1.1 christos }
3335 1.1 christos
3336 1.1 christos ev_uint32_t
3337 1.1 christos event_get_version_number(void)
3338 1.1 christos {
3339 1.1 christos return (EVENT__NUMERIC_VERSION);
3340 1.1 christos }
3341 1.1 christos
3342 1.1 christos /*
3343 1.1 christos * No thread-safe interface needed - the information should be the same
3344 1.1 christos * for all threads.
3345 1.1 christos */
3346 1.1 christos
3347 1.1 christos const char *
3348 1.1 christos event_get_method(void)
3349 1.1 christos {
3350 1.1 christos return (current_base->evsel->name);
3351 1.1 christos }
3352 1.1 christos
3353 1.1 christos #ifndef EVENT__DISABLE_MM_REPLACEMENT
3354 1.1 christos static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3355 1.1 christos static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3356 1.1 christos static void (*mm_free_fn_)(void *p) = NULL;
3357 1.1 christos
3358 1.1 christos void *
3359 1.1 christos event_mm_malloc_(size_t sz)
3360 1.1 christos {
3361 1.1 christos if (sz == 0)
3362 1.1 christos return NULL;
3363 1.1 christos
3364 1.1 christos if (mm_malloc_fn_)
3365 1.1 christos return mm_malloc_fn_(sz);
3366 1.1 christos else
3367 1.1 christos return malloc(sz);
3368 1.1 christos }
3369 1.1 christos
3370 1.1 christos void *
3371 1.1 christos event_mm_calloc_(size_t count, size_t size)
3372 1.1 christos {
3373 1.1 christos if (count == 0 || size == 0)
3374 1.1 christos return NULL;
3375 1.1 christos
3376 1.1 christos if (mm_malloc_fn_) {
3377 1.1 christos size_t sz = count * size;
3378 1.1 christos void *p = NULL;
3379 1.1 christos if (count > EV_SIZE_MAX / size)
3380 1.1 christos goto error;
3381 1.1 christos p = mm_malloc_fn_(sz);
3382 1.1 christos if (p)
3383 1.1 christos return memset(p, 0, sz);
3384 1.1 christos } else {
3385 1.1 christos void *p = calloc(count, size);
3386 1.1 christos #ifdef _WIN32
3387 1.1 christos /* Windows calloc doesn't reliably set ENOMEM */
3388 1.1 christos if (p == NULL)
3389 1.1 christos goto error;
3390 1.1 christos #endif
3391 1.1 christos return p;
3392 1.1 christos }
3393 1.1 christos
3394 1.1 christos error:
3395 1.1 christos errno = ENOMEM;
3396 1.1 christos return NULL;
3397 1.1 christos }
3398 1.1 christos
3399 1.1 christos char *
3400 1.1 christos event_mm_strdup_(const char *str)
3401 1.1 christos {
3402 1.1 christos if (!str) {
3403 1.1 christos errno = EINVAL;
3404 1.1 christos return NULL;
3405 1.1 christos }
3406 1.1 christos
3407 1.1 christos if (mm_malloc_fn_) {
3408 1.1 christos size_t ln = strlen(str);
3409 1.1 christos void *p = NULL;
3410 1.1 christos if (ln == EV_SIZE_MAX)
3411 1.1 christos goto error;
3412 1.1 christos p = mm_malloc_fn_(ln+1);
3413 1.1 christos if (p)
3414 1.1 christos return memcpy(p, str, ln+1);
3415 1.1 christos } else
3416 1.1 christos #ifdef _WIN32
3417 1.1 christos return _strdup(str);
3418 1.1 christos #else
3419 1.1 christos return strdup(str);
3420 1.1 christos #endif
3421 1.1 christos
3422 1.1 christos error:
3423 1.1 christos errno = ENOMEM;
3424 1.1 christos return NULL;
3425 1.1 christos }
3426 1.1 christos
3427 1.1 christos void *
3428 1.1 christos event_mm_realloc_(void *ptr, size_t sz)
3429 1.1 christos {
3430 1.1 christos if (mm_realloc_fn_)
3431 1.1 christos return mm_realloc_fn_(ptr, sz);
3432 1.1 christos else
3433 1.1 christos return realloc(ptr, sz);
3434 1.1 christos }
3435 1.1 christos
3436 1.1 christos void
3437 1.1 christos event_mm_free_(void *ptr)
3438 1.1 christos {
3439 1.1 christos if (mm_free_fn_)
3440 1.1 christos mm_free_fn_(ptr);
3441 1.1 christos else
3442 1.1 christos free(ptr);
3443 1.1 christos }
3444 1.1 christos
3445 1.1 christos void
3446 1.1 christos event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3447 1.1 christos void *(*realloc_fn)(void *ptr, size_t sz),
3448 1.1 christos void (*free_fn)(void *ptr))
3449 1.1 christos {
3450 1.1 christos mm_malloc_fn_ = malloc_fn;
3451 1.1 christos mm_realloc_fn_ = realloc_fn;
3452 1.1 christos mm_free_fn_ = free_fn;
3453 1.1 christos }
3454 1.1 christos #endif
3455 1.1 christos
3456 1.1 christos #ifdef EVENT__HAVE_EVENTFD
3457 1.1 christos static void
3458 1.1 christos evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3459 1.1 christos {
3460 1.1 christos ev_uint64_t msg;
3461 1.1 christos ev_ssize_t r;
3462 1.1 christos struct event_base *base = arg;
3463 1.1 christos
3464 1.1 christos r = read(fd, (void*) &msg, sizeof(msg));
3465 1.1 christos if (r<0 && errno != EAGAIN) {
3466 1.1 christos event_sock_warn(fd, "Error reading from eventfd");
3467 1.1 christos }
3468 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3469 1.1 christos base->is_notify_pending = 0;
3470 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3471 1.1 christos }
3472 1.1 christos #endif
3473 1.1 christos
3474 1.1 christos static void
3475 1.1 christos evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3476 1.1 christos {
3477 1.1 christos unsigned char buf[1024];
3478 1.1 christos struct event_base *base = arg;
3479 1.1 christos #ifdef _WIN32
3480 1.1 christos while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3481 1.1 christos ;
3482 1.1 christos #else
3483 1.1 christos while (read(fd, (char*)buf, sizeof(buf)) > 0)
3484 1.1 christos ;
3485 1.1 christos #endif
3486 1.1 christos
3487 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3488 1.1 christos base->is_notify_pending = 0;
3489 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3490 1.1 christos }
3491 1.1 christos
3492 1.1 christos int
3493 1.1 christos evthread_make_base_notifiable(struct event_base *base)
3494 1.1 christos {
3495 1.1 christos int r;
3496 1.1 christos if (!base)
3497 1.1 christos return -1;
3498 1.1 christos
3499 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3500 1.1 christos r = evthread_make_base_notifiable_nolock_(base);
3501 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3502 1.1 christos return r;
3503 1.1 christos }
3504 1.1 christos
3505 1.1 christos static int
3506 1.1 christos evthread_make_base_notifiable_nolock_(struct event_base *base)
3507 1.1 christos {
3508 1.1 christos void (*cb)(evutil_socket_t, short, void *);
3509 1.1 christos int (*notify)(struct event_base *);
3510 1.1 christos
3511 1.1 christos if (base->th_notify_fn != NULL) {
3512 1.1 christos /* The base is already notifiable: we're doing fine. */
3513 1.1 christos return 0;
3514 1.1 christos }
3515 1.1 christos
3516 1.1 christos #if defined(EVENT__HAVE_WORKING_KQUEUE)
3517 1.1 christos if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3518 1.1 christos base->th_notify_fn = event_kq_notify_base_;
3519 1.1 christos /* No need to add an event here; the backend can wake
3520 1.1 christos * itself up just fine. */
3521 1.1 christos return 0;
3522 1.1 christos }
3523 1.1 christos #endif
3524 1.1 christos
3525 1.1 christos #ifdef EVENT__HAVE_EVENTFD
3526 1.1 christos base->th_notify_fd[0] = evutil_eventfd_(0,
3527 1.1 christos EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3528 1.1 christos if (base->th_notify_fd[0] >= 0) {
3529 1.1 christos base->th_notify_fd[1] = -1;
3530 1.1 christos notify = evthread_notify_base_eventfd;
3531 1.1 christos cb = evthread_notify_drain_eventfd;
3532 1.1 christos } else
3533 1.1 christos #endif
3534 1.1 christos if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3535 1.1 christos notify = evthread_notify_base_default;
3536 1.1 christos cb = evthread_notify_drain_default;
3537 1.1 christos } else {
3538 1.1 christos return -1;
3539 1.1 christos }
3540 1.1 christos
3541 1.1 christos base->th_notify_fn = notify;
3542 1.1 christos
3543 1.1 christos /* prepare an event that we can use for wakeup */
3544 1.1 christos event_assign(&base->th_notify, base, base->th_notify_fd[0],
3545 1.1 christos EV_READ|EV_PERSIST, cb, base);
3546 1.1 christos
3547 1.1 christos /* we need to mark this as internal event */
3548 1.1 christos base->th_notify.ev_flags |= EVLIST_INTERNAL;
3549 1.1 christos event_priority_set(&base->th_notify, 0);
3550 1.1 christos
3551 1.1 christos return event_add_nolock_(&base->th_notify, NULL, 0);
3552 1.1 christos }
3553 1.1 christos
3554 1.1 christos int
3555 1.1 christos event_base_foreach_event_nolock_(struct event_base *base,
3556 1.1 christos event_base_foreach_event_cb fn, void *arg)
3557 1.1 christos {
3558 1.1 christos int r, i;
3559 1.1 christos unsigned u;
3560 1.1 christos struct event *ev;
3561 1.1 christos
3562 1.1 christos /* Start out with all the EVLIST_INSERTED events. */
3563 1.1 christos if ((r = evmap_foreach_event_(base, fn, arg)))
3564 1.1 christos return r;
3565 1.1 christos
3566 1.1 christos /* Okay, now we deal with those events that have timeouts and are in
3567 1.1 christos * the min-heap. */
3568 1.1 christos for (u = 0; u < base->timeheap.n; ++u) {
3569 1.1 christos ev = base->timeheap.p[u];
3570 1.1 christos if (ev->ev_flags & EVLIST_INSERTED) {
3571 1.1 christos /* we already processed this one */
3572 1.1 christos continue;
3573 1.1 christos }
3574 1.1 christos if ((r = fn(base, ev, arg)))
3575 1.1 christos return r;
3576 1.1 christos }
3577 1.1 christos
3578 1.1 christos /* Now for the events in one of the timeout queues.
3579 1.1 christos * the min-heap. */
3580 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
3581 1.1 christos struct common_timeout_list *ctl =
3582 1.1 christos base->common_timeout_queues[i];
3583 1.1 christos TAILQ_FOREACH(ev, &ctl->events,
3584 1.1 christos ev_timeout_pos.ev_next_with_common_timeout) {
3585 1.1 christos if (ev->ev_flags & EVLIST_INSERTED) {
3586 1.1 christos /* we already processed this one */
3587 1.1 christos continue;
3588 1.1 christos }
3589 1.1 christos if ((r = fn(base, ev, arg)))
3590 1.1 christos return r;
3591 1.1 christos }
3592 1.1 christos }
3593 1.1 christos
3594 1.1 christos /* Finally, we deal wit all the active events that we haven't touched
3595 1.1 christos * yet. */
3596 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
3597 1.1 christos struct event_callback *evcb;
3598 1.1 christos TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3599 1.1 christos if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3600 1.1 christos /* This isn't an event (evlist_init clear), or
3601 1.1 christos * we already processed it. (inserted or
3602 1.1 christos * timeout set */
3603 1.1 christos continue;
3604 1.1 christos }
3605 1.1 christos ev = event_callback_to_event(evcb);
3606 1.1 christos if ((r = fn(base, ev, arg)))
3607 1.1 christos return r;
3608 1.1 christos }
3609 1.1 christos }
3610 1.1 christos
3611 1.1 christos return 0;
3612 1.1 christos }
3613 1.1 christos
3614 1.1 christos /* Helper for event_base_dump_events: called on each event in the event base;
3615 1.1 christos * dumps only the inserted events. */
3616 1.1 christos static int
3617 1.1 christos dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3618 1.1 christos {
3619 1.1 christos FILE *output = arg;
3620 1.1 christos const char *gloss = (e->ev_events & EV_SIGNAL) ?
3621 1.1 christos "sig" : "fd ";
3622 1.1 christos
3623 1.1 christos if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3624 1.1 christos return 0;
3625 1.1 christos
3626 1.2 christos fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3627 1.1 christos (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3628 1.1 christos (e->ev_events&EV_READ)?" Read":"",
3629 1.1 christos (e->ev_events&EV_WRITE)?" Write":"",
3630 1.2 christos (e->ev_events&EV_CLOSED)?" EOF":"",
3631 1.1 christos (e->ev_events&EV_SIGNAL)?" Signal":"",
3632 1.1 christos (e->ev_events&EV_PERSIST)?" Persist":"",
3633 1.1 christos (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3634 1.1 christos if (e->ev_flags & EVLIST_TIMEOUT) {
3635 1.1 christos struct timeval tv;
3636 1.1 christos tv.tv_sec = e->ev_timeout.tv_sec;
3637 1.1 christos tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3638 1.1 christos evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3639 1.1 christos fprintf(output, " Timeout=%ld.%06d",
3640 1.1 christos (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3641 1.1 christos }
3642 1.1 christos fputc('\n', output);
3643 1.1 christos
3644 1.1 christos return 0;
3645 1.1 christos }
3646 1.1 christos
3647 1.1 christos /* Helper for event_base_dump_events: called on each event in the event base;
3648 1.1 christos * dumps only the active events. */
3649 1.1 christos static int
3650 1.1 christos dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3651 1.1 christos {
3652 1.1 christos FILE *output = arg;
3653 1.1 christos const char *gloss = (e->ev_events & EV_SIGNAL) ?
3654 1.1 christos "sig" : "fd ";
3655 1.1 christos
3656 1.1 christos if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3657 1.1 christos return 0;
3658 1.1 christos
3659 1.2 christos fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3660 1.1 christos (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3661 1.1 christos (e->ev_res&EV_READ)?" Read":"",
3662 1.1 christos (e->ev_res&EV_WRITE)?" Write":"",
3663 1.2 christos (e->ev_res&EV_CLOSED)?" EOF":"",
3664 1.1 christos (e->ev_res&EV_SIGNAL)?" Signal":"",
3665 1.1 christos (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3666 1.1 christos (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3667 1.1 christos (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3668 1.1 christos
3669 1.1 christos return 0;
3670 1.1 christos }
3671 1.1 christos
3672 1.1 christos int
3673 1.1 christos event_base_foreach_event(struct event_base *base,
3674 1.1 christos event_base_foreach_event_cb fn, void *arg)
3675 1.1 christos {
3676 1.1 christos int r;
3677 1.1 christos if ((!fn) || (!base)) {
3678 1.1 christos return -1;
3679 1.1 christos }
3680 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3681 1.1 christos r = event_base_foreach_event_nolock_(base, fn, arg);
3682 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3683 1.1 christos return r;
3684 1.1 christos }
3685 1.1 christos
3686 1.1 christos
3687 1.1 christos void
3688 1.1 christos event_base_dump_events(struct event_base *base, FILE *output)
3689 1.1 christos {
3690 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3691 1.1 christos fprintf(output, "Inserted events:\n");
3692 1.1 christos event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3693 1.1 christos
3694 1.1 christos fprintf(output, "Active events:\n");
3695 1.1 christos event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3696 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3697 1.1 christos }
3698 1.1 christos
3699 1.1 christos void
3700 1.2 christos event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3701 1.2 christos {
3702 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3703 1.2 christos evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3704 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3705 1.2 christos }
3706 1.2 christos
3707 1.2 christos void
3708 1.2 christos event_base_active_by_signal(struct event_base *base, int sig)
3709 1.2 christos {
3710 1.2 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3711 1.2 christos evmap_signal_active_(base, sig, 1);
3712 1.2 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3713 1.2 christos }
3714 1.2 christos
3715 1.2 christos
3716 1.2 christos void
3717 1.1 christos event_base_add_virtual_(struct event_base *base)
3718 1.1 christos {
3719 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3720 1.1 christos base->virtual_event_count++;
3721 1.2 christos MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3722 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3723 1.1 christos }
3724 1.1 christos
3725 1.1 christos void
3726 1.1 christos event_base_del_virtual_(struct event_base *base)
3727 1.1 christos {
3728 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3729 1.1 christos EVUTIL_ASSERT(base->virtual_event_count > 0);
3730 1.1 christos base->virtual_event_count--;
3731 1.1 christos if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3732 1.1 christos evthread_notify_base(base);
3733 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3734 1.1 christos }
3735 1.1 christos
3736 1.1 christos static void
3737 1.1 christos event_free_debug_globals_locks(void)
3738 1.1 christos {
3739 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
3740 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
3741 1.1 christos if (event_debug_map_lock_ != NULL) {
3742 1.1 christos EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3743 1.1 christos event_debug_map_lock_ = NULL;
3744 1.1 christos }
3745 1.1 christos #endif /* EVENT__DISABLE_DEBUG_MODE */
3746 1.1 christos #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3747 1.1 christos return;
3748 1.1 christos }
3749 1.1 christos
3750 1.1 christos static void
3751 1.1 christos event_free_debug_globals(void)
3752 1.1 christos {
3753 1.1 christos event_free_debug_globals_locks();
3754 1.1 christos }
3755 1.1 christos
3756 1.1 christos static void
3757 1.1 christos event_free_evsig_globals(void)
3758 1.1 christos {
3759 1.1 christos evsig_free_globals_();
3760 1.1 christos }
3761 1.1 christos
3762 1.1 christos static void
3763 1.1 christos event_free_evutil_globals(void)
3764 1.1 christos {
3765 1.1 christos evutil_free_globals_();
3766 1.1 christos }
3767 1.1 christos
3768 1.1 christos static void
3769 1.1 christos event_free_globals(void)
3770 1.1 christos {
3771 1.1 christos event_free_debug_globals();
3772 1.1 christos event_free_evsig_globals();
3773 1.1 christos event_free_evutil_globals();
3774 1.1 christos }
3775 1.1 christos
3776 1.1 christos void
3777 1.1 christos libevent_global_shutdown(void)
3778 1.1 christos {
3779 1.1 christos event_free_globals();
3780 1.1 christos }
3781 1.1 christos
3782 1.1 christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
3783 1.1 christos int
3784 1.1 christos event_global_setup_locks_(const int enable_locks)
3785 1.1 christos {
3786 1.1 christos #ifndef EVENT__DISABLE_DEBUG_MODE
3787 1.1 christos EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3788 1.1 christos #endif
3789 1.1 christos if (evsig_global_setup_locks_(enable_locks) < 0)
3790 1.1 christos return -1;
3791 1.1 christos if (evutil_global_setup_locks_(enable_locks) < 0)
3792 1.1 christos return -1;
3793 1.1 christos if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3794 1.1 christos return -1;
3795 1.1 christos return 0;
3796 1.1 christos }
3797 1.1 christos #endif
3798 1.1 christos
3799 1.1 christos void
3800 1.1 christos event_base_assert_ok_(struct event_base *base)
3801 1.1 christos {
3802 1.1 christos EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3803 1.1 christos event_base_assert_ok_nolock_(base);
3804 1.1 christos EVBASE_RELEASE_LOCK(base, th_base_lock);
3805 1.1 christos }
3806 1.1 christos
3807 1.1 christos void
3808 1.1 christos event_base_assert_ok_nolock_(struct event_base *base)
3809 1.1 christos {
3810 1.1 christos int i;
3811 1.1 christos int count;
3812 1.1 christos
3813 1.1 christos /* First do checks on the per-fd and per-signal lists */
3814 1.1 christos evmap_check_integrity_(base);
3815 1.1 christos
3816 1.1 christos /* Check the heap property */
3817 1.1 christos for (i = 1; i < (int)base->timeheap.n; ++i) {
3818 1.1 christos int parent = (i - 1) / 2;
3819 1.1 christos struct event *ev, *p_ev;
3820 1.1 christos ev = base->timeheap.p[i];
3821 1.1 christos p_ev = base->timeheap.p[parent];
3822 1.1 christos EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3823 1.1 christos EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3824 1.1 christos EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3825 1.1 christos }
3826 1.1 christos
3827 1.1 christos /* Check that the common timeouts are fine */
3828 1.1 christos for (i = 0; i < base->n_common_timeouts; ++i) {
3829 1.1 christos struct common_timeout_list *ctl = base->common_timeout_queues[i];
3830 1.1 christos struct event *last=NULL, *ev;
3831 1.1 christos
3832 1.1 christos EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3833 1.1 christos
3834 1.1 christos TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3835 1.1 christos if (last)
3836 1.1 christos EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3837 1.1 christos EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3838 1.1 christos EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3839 1.1 christos EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3840 1.1 christos last = ev;
3841 1.1 christos }
3842 1.1 christos }
3843 1.1 christos
3844 1.1 christos /* Check the active queues. */
3845 1.1 christos count = 0;
3846 1.1 christos for (i = 0; i < base->nactivequeues; ++i) {
3847 1.1 christos struct event_callback *evcb;
3848 1.1 christos EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3849 1.1 christos TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3850 1.1 christos EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3851 1.1 christos EVUTIL_ASSERT(evcb->evcb_pri == i);
3852 1.1 christos ++count;
3853 1.1 christos }
3854 1.1 christos }
3855 1.1 christos
3856 1.1 christos {
3857 1.1 christos struct event_callback *evcb;
3858 1.1 christos TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3859 1.1 christos EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3860 1.1 christos ++count;
3861 1.1 christos }
3862 1.1 christos }
3863 1.1 christos EVUTIL_ASSERT(count == base->event_count_active);
3864 1.1 christos }
3865