2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
31 #define WIN32_LEAN_AND_MEAN
33 #undef WIN32_LEAN_AND_MEAN
35 #include <sys/types.h>
36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
39 #include <sys/queue.h>
40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
41 #include <sys/socket.h>
45 #ifdef _EVENT_HAVE_UNISTD_H
48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
49 #include <sys/eventfd.h>
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/event_compat.h"
60 #include "event-internal.h"
61 #include "defer-internal.h"
62 #include "evthread-internal.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "log-internal.h"
66 #include "evmap-internal.h"
67 #include "iocp-internal.h"
68 #include "changelist-internal.h"
69 #include "ht-internal.h"
70 #include "util-internal.h"
72 #ifdef _EVENT_HAVE_EVENT_PORTS
73 extern const struct eventop evportops;
75 #ifdef _EVENT_HAVE_SELECT
76 extern const struct eventop selectops;
78 #ifdef _EVENT_HAVE_POLL
79 extern const struct eventop pollops;
81 #ifdef _EVENT_HAVE_EPOLL
82 extern const struct eventop epollops;
84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
85 extern const struct eventop kqops;
87 #ifdef _EVENT_HAVE_DEVPOLL
88 extern const struct eventop devpollops;
91 extern const struct eventop win32ops;
94 /* Array of backends in order of preference. */
95 static const struct eventop *eventops[] = {
96 #ifdef _EVENT_HAVE_EVENT_PORTS
99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
102 #ifdef _EVENT_HAVE_EPOLL
105 #ifdef _EVENT_HAVE_DEVPOLL
108 #ifdef _EVENT_HAVE_POLL
111 #ifdef _EVENT_HAVE_SELECT
120 /* Global state; deprecated */
121 struct event_base *event_global_current_base_ = NULL;
122 #define current_base event_global_current_base_
126 static int use_monotonic;
129 static inline int event_add_internal(struct event *ev,
130 const struct timeval *tv, int tv_is_absolute);
131 static inline int event_del_internal(struct event *ev);
133 static void event_queue_insert(struct event_base *, struct event *, int);
134 static void event_queue_remove(struct event_base *, struct event *, int);
135 static int event_haveevents(struct event_base *);
137 static int event_process_active(struct event_base *);
139 static int timeout_next(struct event_base *, struct timeval **);
140 static void timeout_process(struct event_base *);
141 static void timeout_correct(struct event_base *, struct timeval *);
143 static inline void event_signal_closure(struct event_base *, struct event *ev);
144 static inline void event_persist_closure(struct event_base *, struct event *ev);
146 static int evthread_notify_base(struct event_base *base);
148 #ifndef _EVENT_DISABLE_DEBUG_MODE
149 /* These functions implement a hashtable of which 'struct event *' structures
150 * have been setup or added. We don't want to trust the content of the struct
151 * event itself, since we're trying to work through cases where an event gets
152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
155 struct event_debug_entry {
156 HT_ENTRY(event_debug_entry) node;
157 const struct event *ptr;
161 static inline unsigned
162 hash_debug_entry(const struct event_debug_entry *e)
164 /* We need to do this silliness to convince compilers that we
165 * honestly mean to cast e->ptr to an integer, and discard any
166 * part of it that doesn't fit in an unsigned.
168 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
169 /* Our hashtable implementation is pretty sensitive to low bits,
170 * and every struct event is over 64 bytes in size, so we can
176 eq_debug_entry(const struct event_debug_entry *a,
177 const struct event_debug_entry *b)
179 return a->ptr == b->ptr;
182 int _event_debug_mode_on = 0;
183 /* Set if it's too late to enable event_debug_mode. */
184 static int event_debug_mode_too_late = 0;
185 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
186 static void *_event_debug_map_lock = NULL;
188 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
191 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
193 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
194 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
196 /* Macro: record that ev is now setup (that is, ready for an add) */
197 #define _event_debug_note_setup(ev) do { \
198 if (_event_debug_mode_on) { \
199 struct event_debug_entry *dent,find; \
201 EVLOCK_LOCK(_event_debug_map_lock, 0); \
202 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
206 dent = mm_malloc(sizeof(*dent)); \
209 "Out of memory in debugging code"); \
212 HT_INSERT(event_debug_map, &global_debug_map, dent); \
214 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
216 event_debug_mode_too_late = 1; \
218 /* Macro: record that ev is no longer setup */
219 #define _event_debug_note_teardown(ev) do { \
220 if (_event_debug_mode_on) { \
221 struct event_debug_entry *dent,find; \
223 EVLOCK_LOCK(_event_debug_map_lock, 0); \
224 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
227 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
229 event_debug_mode_too_late = 1; \
231 /* Macro: record that ev is now added */
232 #define _event_debug_note_add(ev) do { \
233 if (_event_debug_mode_on) { \
234 struct event_debug_entry *dent,find; \
236 EVLOCK_LOCK(_event_debug_map_lock, 0); \
237 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
241 event_errx(_EVENT_ERR_ABORT, \
242 "%s: noting an add on a non-setup event %p" \
243 " (events: 0x%x, fd: %d, flags: 0x%x)", \
244 __func__, (ev), (ev)->ev_events, \
245 (ev)->ev_fd, (ev)->ev_flags); \
247 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
249 event_debug_mode_too_late = 1; \
251 /* Macro: record that ev is no longer added */
252 #define _event_debug_note_del(ev) do { \
253 if (_event_debug_mode_on) { \
254 struct event_debug_entry *dent,find; \
256 EVLOCK_LOCK(_event_debug_map_lock, 0); \
257 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
261 event_errx(_EVENT_ERR_ABORT, \
262 "%s: noting a del on a non-setup event %p" \
263 " (events: 0x%x, fd: %d, flags: 0x%x)", \
264 __func__, (ev), (ev)->ev_events, \
265 (ev)->ev_fd, (ev)->ev_flags); \
267 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
269 event_debug_mode_too_late = 1; \
271 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
272 #define _event_debug_assert_is_setup(ev) do { \
273 if (_event_debug_mode_on) { \
274 struct event_debug_entry *dent,find; \
276 EVLOCK_LOCK(_event_debug_map_lock, 0); \
277 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
279 event_errx(_EVENT_ERR_ABORT, \
280 "%s called on a non-initialized event %p" \
281 " (events: 0x%x, fd: %d, flags: 0x%x)", \
282 __func__, (ev), (ev)->ev_events, \
283 (ev)->ev_fd, (ev)->ev_flags); \
285 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
288 /* Macro: assert that ev is not added (i.e., okay to tear down or set
290 #define _event_debug_assert_not_added(ev) do { \
291 if (_event_debug_mode_on) { \
292 struct event_debug_entry *dent,find; \
294 EVLOCK_LOCK(_event_debug_map_lock, 0); \
295 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
296 if (dent && dent->added) { \
297 event_errx(_EVENT_ERR_ABORT, \
298 "%s called on an already added event %p" \
299 " (events: 0x%x, fd: %d, flags: 0x%x)", \
300 __func__, (ev), (ev)->ev_events, \
301 (ev)->ev_fd, (ev)->ev_flags); \
303 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
307 #define _event_debug_note_setup(ev) \
309 #define _event_debug_note_teardown(ev) \
311 #define _event_debug_note_add(ev) \
313 #define _event_debug_note_del(ev) \
315 #define _event_debug_assert_is_setup(ev) \
317 #define _event_debug_assert_not_added(ev) \
321 #define EVENT_BASE_ASSERT_LOCKED(base) \
322 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
324 /* The first time this function is called, it sets use_monotonic to 1
325 * if we have a clock function that supports monotonic time */
327 detect_monotonic(void)
329 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
331 static int use_monotonic_initialized = 0;
333 if (use_monotonic_initialized)
336 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
339 use_monotonic_initialized = 1;
343 /* How often (in seconds) do we check for changes in wall clock time relative
344 * to monotonic time? Set this to -1 for 'never.' */
345 #define CLOCK_SYNC_INTERVAL -1
347 /** Set 'tp' to the current time according to 'base'. We must hold the lock
348 * on 'base'. If there is a cached time, return it. Otherwise, use
349 * clock_gettime or gettimeofday as appropriate to find out the right time.
350 * Return 0 on success, -1 on failure.
353 gettime(struct event_base *base, struct timeval *tp)
355 EVENT_BASE_ASSERT_LOCKED(base);
357 if (base->tv_cache.tv_sec) {
358 *tp = base->tv_cache;
362 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
366 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
369 tp->tv_sec = ts.tv_sec;
370 tp->tv_usec = ts.tv_nsec / 1000;
371 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
374 evutil_gettimeofday(&tv,NULL);
375 evutil_timersub(&tv, tp, &base->tv_clock_diff);
376 base->last_updated_clock_diff = ts.tv_sec;
383 return (evutil_gettimeofday(tp, NULL));
387 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
393 return evutil_gettimeofday(tv, NULL);
396 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
397 if (base->tv_cache.tv_sec == 0) {
398 r = evutil_gettimeofday(tv, NULL);
400 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
401 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
403 *tv = base->tv_cache;
407 EVBASE_RELEASE_LOCK(base, th_base_lock);
411 /** Make 'base' have no current cached time. */
413 clear_time_cache(struct event_base *base)
415 base->tv_cache.tv_sec = 0;
418 /** Replace the cached time in 'base' with the current time. */
420 update_time_cache(struct event_base *base)
422 base->tv_cache.tv_sec = 0;
423 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
424 gettime(base, &base->tv_cache);
430 struct event_base *base = event_base_new_with_config(NULL);
433 event_errx(1, "%s: Unable to construct event_base", __func__);
445 struct event_base *base = NULL;
446 struct event_config *cfg = event_config_new();
448 base = event_base_new_with_config(cfg);
449 event_config_free(cfg);
454 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
457 event_config_is_avoided_method(const struct event_config *cfg,
460 struct event_config_entry *entry;
462 TAILQ_FOREACH(entry, &cfg->entries, next) {
463 if (entry->avoid_method != NULL &&
464 strcmp(entry->avoid_method, method) == 0)
471 /** Return true iff 'method' is disabled according to the environment. */
473 event_is_method_disabled(const char *name)
475 char environment[64];
478 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
479 for (i = 8; environment[i] != '\0'; ++i)
480 environment[i] = EVUTIL_TOUPPER(environment[i]);
481 /* Note that evutil_getenv() ignores the environment entirely if
483 return (evutil_getenv(environment) != NULL);
487 event_base_get_features(const struct event_base *base)
489 return base->evsel->features;
493 event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
495 memset(cb, 0, sizeof(struct deferred_cb_queue));
496 TAILQ_INIT(&cb->deferred_cb_list);
499 /** Helper for the deferred_cb queue: wake up the event base. */
501 notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
503 struct event_base *base = baseptr;
504 if (EVBASE_NEED_NOTIFY(base))
505 evthread_notify_base(base);
508 struct deferred_cb_queue *
509 event_base_get_deferred_cb_queue(struct event_base *base)
511 return base ? &base->defer_queue : NULL;
515 event_enable_debug_mode(void)
517 #ifndef _EVENT_DISABLE_DEBUG_MODE
518 if (_event_debug_mode_on)
519 event_errx(1, "%s was called twice!", __func__);
520 if (event_debug_mode_too_late)
521 event_errx(1, "%s must be called *before* creating any events "
522 "or event_bases",__func__);
524 _event_debug_mode_on = 1;
526 HT_INIT(event_debug_map, &global_debug_map);
532 event_disable_debug_mode(void)
534 struct event_debug_entry **ent, *victim;
536 EVLOCK_LOCK(_event_debug_map_lock, 0);
537 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
539 ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
542 HT_CLEAR(event_debug_map, &global_debug_map);
543 EVLOCK_UNLOCK(_event_debug_map_lock , 0);
548 event_base_new_with_config(const struct event_config *cfg)
551 struct event_base *base;
552 int should_check_environment;
554 #ifndef _EVENT_DISABLE_DEBUG_MODE
555 event_debug_mode_too_late = 1;
558 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
559 event_warn("%s: calloc", __func__);
563 gettime(base, &base->event_tv);
565 min_heap_ctor(&base->timeheap);
566 TAILQ_INIT(&base->eventqueue);
567 base->sig.ev_signal_pair[0] = -1;
568 base->sig.ev_signal_pair[1] = -1;
569 base->th_notify_fd[0] = -1;
570 base->th_notify_fd[1] = -1;
572 event_deferred_cb_queue_init(&base->defer_queue);
573 base->defer_queue.notify_fn = notify_base_cbq_callback;
574 base->defer_queue.notify_arg = base;
576 base->flags = cfg->flags;
578 evmap_io_initmap(&base->io);
579 evmap_signal_initmap(&base->sigmap);
580 event_changelist_init(&base->changelist);
584 should_check_environment =
585 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
587 for (i = 0; eventops[i] && !base->evbase; i++) {
589 /* determine if this backend should be avoided */
590 if (event_config_is_avoided_method(cfg,
593 if ((eventops[i]->features & cfg->require_features)
594 != cfg->require_features)
598 /* also obey the environment variables */
599 if (should_check_environment &&
600 event_is_method_disabled(eventops[i]->name))
603 base->evsel = eventops[i];
605 base->evbase = base->evsel->init(base);
608 if (base->evbase == NULL) {
609 event_warnx("%s: no event mechanism available",
612 event_base_free(base);
616 if (evutil_getenv("EVENT_SHOW_METHOD"))
617 event_msgx("libevent using: %s", base->evsel->name);
619 /* allocate a single active event queue */
620 if (event_base_priority_init(base, 1) < 0) {
621 event_base_free(base);
625 /* prepare for threading */
627 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
628 if (EVTHREAD_LOCKING_ENABLED() &&
629 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
631 EVTHREAD_ALLOC_LOCK(base->th_base_lock,
632 EVTHREAD_LOCKTYPE_RECURSIVE);
633 base->defer_queue.lock = base->th_base_lock;
634 EVTHREAD_ALLOC_COND(base->current_event_cond);
635 r = evthread_make_base_notifiable(base);
637 event_warnx("%s: Unable to make base notifiable.", __func__);
638 event_base_free(base);
645 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
646 event_base_start_iocp(base, cfg->n_cpus_hint);
653 event_base_start_iocp(struct event_base *base, int n_cpus)
658 base->iocp = event_iocp_port_launch(n_cpus);
660 event_warnx("%s: Couldn't launch IOCP", __func__);
670 event_base_stop_iocp(struct event_base *base)
677 rv = event_iocp_shutdown(base->iocp, -1);
678 EVUTIL_ASSERT(rv >= 0);
684 event_base_free(struct event_base *base)
688 /* XXXX grab the lock? If there is contention when one thread frees
689 * the base, then the contending thread will be very sad soon. */
691 /* event_base_free(NULL) is how to free the current_base if we
692 * made it with event_init and forgot to hold a reference to it. */
693 if (base == NULL && current_base)
695 /* If we're freeing current_base, there won't be a current_base. */
696 if (base == current_base)
698 /* Don't actually free NULL. */
700 event_warnx("%s: no base to free", __func__);
703 /* XXX(niels) - check for internal events first */
706 event_base_stop_iocp(base);
709 /* threading fds if we have them */
710 if (base->th_notify_fd[0] != -1) {
711 event_del(&base->th_notify);
712 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
713 if (base->th_notify_fd[1] != -1)
714 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
715 base->th_notify_fd[0] = -1;
716 base->th_notify_fd[1] = -1;
717 event_debug_unassign(&base->th_notify);
720 /* Delete all non-internal events. */
721 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
722 struct event *next = TAILQ_NEXT(ev, ev_next);
723 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
729 while ((ev = min_heap_top(&base->timeheap)) != NULL) {
733 for (i = 0; i < base->n_common_timeouts; ++i) {
734 struct common_timeout_list *ctl =
735 base->common_timeout_queues[i];
736 event_del(&ctl->timeout_event); /* Internal; doesn't count */
737 event_debug_unassign(&ctl->timeout_event);
738 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
739 struct event *next = TAILQ_NEXT(ev,
740 ev_timeout_pos.ev_next_with_common_timeout);
741 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
749 if (base->common_timeout_queues)
750 mm_free(base->common_timeout_queues);
752 for (i = 0; i < base->nactivequeues; ++i) {
753 for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
754 struct event *next = TAILQ_NEXT(ev, ev_active_next);
755 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
764 event_debug(("%s: %d events were still set in base",
765 __func__, n_deleted));
767 if (base->evsel != NULL && base->evsel->dealloc != NULL)
768 base->evsel->dealloc(base);
770 for (i = 0; i < base->nactivequeues; ++i)
771 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
773 EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
774 min_heap_dtor(&base->timeheap);
776 mm_free(base->activequeues);
778 EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
780 evmap_io_clear(&base->io);
781 evmap_signal_clear(&base->sigmap);
782 event_changelist_freemem(&base->changelist);
784 EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
785 EVTHREAD_FREE_COND(base->current_event_cond);
790 /* reinitialize the event base after a fork */
792 event_reinit(struct event_base *base)
794 const struct eventop *evsel;
797 int was_notifiable = 0;
799 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
804 /* Right now, reinit always takes effect, since even if the
805 backend doesn't require it, the signal socketpair code does.
809 /* check if this event mechanism requires reinit */
810 if (!evsel->need_reinit)
814 /* prevent internal delete */
815 if (base->sig.ev_signal_added) {
816 /* we cannot call event_del here because the base has
817 * not been reinitialized yet. */
818 event_queue_remove(base, &base->sig.ev_signal,
820 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
821 event_queue_remove(base, &base->sig.ev_signal,
823 if (base->sig.ev_signal_pair[0] != -1)
824 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
825 if (base->sig.ev_signal_pair[1] != -1)
826 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
827 base->sig.ev_signal_added = 0;
829 if (base->th_notify_fd[0] != -1) {
830 /* we cannot call event_del here because the base has
831 * not been reinitialized yet. */
833 event_queue_remove(base, &base->th_notify,
835 if (base->th_notify.ev_flags & EVLIST_ACTIVE)
836 event_queue_remove(base, &base->th_notify,
838 base->sig.ev_signal_added = 0;
839 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
840 if (base->th_notify_fd[1] != -1)
841 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
842 base->th_notify_fd[0] = -1;
843 base->th_notify_fd[1] = -1;
844 event_debug_unassign(&base->th_notify);
847 if (base->evsel->dealloc != NULL)
848 base->evsel->dealloc(base);
849 base->evbase = evsel->init(base);
850 if (base->evbase == NULL) {
851 event_errx(1, "%s: could not reinitialize event mechanism",
857 event_changelist_freemem(&base->changelist); /* XXX */
858 evmap_io_clear(&base->io);
859 evmap_signal_clear(&base->sigmap);
861 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
862 if (ev->ev_events & (EV_READ|EV_WRITE)) {
863 if (ev == &base->sig.ev_signal) {
864 /* If we run into the ev_signal event, it's only
865 * in eventqueue because some signal event was
866 * added, which made evsig_add re-add ev_signal.
867 * So don't double-add it. */
870 if (evmap_io_add(base, ev->ev_fd, ev) == -1)
872 } else if (ev->ev_events & EV_SIGNAL) {
873 if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
878 if (was_notifiable && res == 0)
879 res = evthread_make_base_notifiable(base);
882 EVBASE_RELEASE_LOCK(base, th_base_lock);
887 event_get_supported_methods(void)
889 static const char **methods = NULL;
890 const struct eventop **method;
894 /* count all methods */
895 for (method = &eventops[0]; *method != NULL; ++method) {
899 /* allocate one more than we need for the NULL pointer */
900 tmp = mm_calloc((i + 1), sizeof(char *));
904 /* populate the array with the supported methods */
905 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
906 tmp[i++] = eventops[k]->name;
911 mm_free((char**)methods);
918 struct event_config *
919 event_config_new(void)
921 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
926 TAILQ_INIT(&cfg->entries);
932 event_config_entry_free(struct event_config_entry *entry)
934 if (entry->avoid_method != NULL)
935 mm_free((char *)entry->avoid_method);
940 event_config_free(struct event_config *cfg)
942 struct event_config_entry *entry;
944 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
945 TAILQ_REMOVE(&cfg->entries, entry, next);
946 event_config_entry_free(entry);
952 event_config_set_flag(struct event_config *cfg, int flag)
961 event_config_avoid_method(struct event_config *cfg, const char *method)
963 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
967 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
972 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
978 event_config_require_features(struct event_config *cfg,
983 cfg->require_features = features;
988 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
992 cfg->n_cpus_hint = cpus;
997 event_priority_init(int npriorities)
999 return event_base_priority_init(current_base, npriorities);
1003 event_base_priority_init(struct event_base *base, int npriorities)
1007 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1008 || npriorities >= EVENT_MAX_PRIORITIES)
1011 if (npriorities == base->nactivequeues)
1014 if (base->nactivequeues) {
1015 mm_free(base->activequeues);
1016 base->nactivequeues = 0;
1019 /* Allocate our priority queues */
1020 base->activequeues = (struct event_list *)
1021 mm_calloc(npriorities, sizeof(struct event_list));
1022 if (base->activequeues == NULL) {
1023 event_warn("%s: calloc", __func__);
1026 base->nactivequeues = npriorities;
1028 for (i = 0; i < base->nactivequeues; ++i) {
1029 TAILQ_INIT(&base->activequeues[i]);
1035 /* Returns true iff we're currently watching any events. */
1037 event_haveevents(struct event_base *base)
1039 /* Caller must hold th_base_lock */
1040 return (base->virtual_event_count > 0 || base->event_count > 0);
1043 /* "closure" function called when processing active signal events */
1045 event_signal_closure(struct event_base *base, struct event *ev)
1050 /* Allows deletes to work */
1051 ncalls = ev->ev_ncalls;
1053 ev->ev_pncalls = &ncalls;
1054 EVBASE_RELEASE_LOCK(base, th_base_lock);
1057 ev->ev_ncalls = ncalls;
1059 ev->ev_pncalls = NULL;
1060 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1062 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1063 should_break = base->event_break;
1064 EVBASE_RELEASE_LOCK(base, th_base_lock);
1068 ev->ev_pncalls = NULL;
1074 /* Common timeouts are special timeouts that are handled as queues rather than
1075 * in the minheap. This is more efficient than the minheap if we happen to
1076 * know that we're going to get several thousands of timeout events all with
1077 * the same timeout value.
1079 * Since all our timeout handling code assumes timevals can be copied,
1080 * assigned, etc, we can't use "magic pointer" to encode these common
1081 * timeouts. Searching through a list to see if every timeout is common could
1082 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1083 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1084 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1085 * of index into the event_base's aray of common timeouts.
1088 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1089 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1090 #define COMMON_TIMEOUT_IDX_SHIFT 20
1091 #define COMMON_TIMEOUT_MASK 0xf0000000
1092 #define COMMON_TIMEOUT_MAGIC 0x50000000
1094 #define COMMON_TIMEOUT_IDX(tv) \
1095 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1097 /** Return true iff if 'tv' is a common timeout in 'base' */
1099 is_common_timeout(const struct timeval *tv,
1100 const struct event_base *base)
1103 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1105 idx = COMMON_TIMEOUT_IDX(tv);
1106 return idx < base->n_common_timeouts;
1109 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1110 * one is a common timeout. */
1112 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1114 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1115 (tv2->tv_usec & ~MICROSECONDS_MASK);
1118 /** Requires that 'tv' is a common timeout. Return the corresponding
1119 * common_timeout_list. */
1120 static inline struct common_timeout_list *
1121 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1123 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1128 common_timeout_ok(const struct timeval *tv,
1129 struct event_base *base)
1131 const struct timeval *expect =
1132 &get_common_timeout_list(base, tv)->duration;
1133 return tv->tv_sec == expect->tv_sec &&
1134 tv->tv_usec == expect->tv_usec;
1138 /* Add the timeout for the first event in given common timeout list to the
1139 * event_base's minheap. */
1141 common_timeout_schedule(struct common_timeout_list *ctl,
1142 const struct timeval *now, struct event *head)
1144 struct timeval timeout = head->ev_timeout;
1145 timeout.tv_usec &= MICROSECONDS_MASK;
1146 event_add_internal(&ctl->timeout_event, &timeout, 1);
1149 /* Callback: invoked when the timeout for a common timeout queue triggers.
1150 * This means that (at least) the first event in that queue should be run,
1151 * and the timeout should be rescheduled if there are more events. */
1153 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1156 struct common_timeout_list *ctl = arg;
1157 struct event_base *base = ctl->base;
1158 struct event *ev = NULL;
1159 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1160 gettime(base, &now);
1162 ev = TAILQ_FIRST(&ctl->events);
1163 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1164 (ev->ev_timeout.tv_sec == now.tv_sec &&
1165 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1167 event_del_internal(ev);
1168 event_active_nolock(ev, EV_TIMEOUT, 1);
1171 common_timeout_schedule(ctl, &now, ev);
1172 EVBASE_RELEASE_LOCK(base, th_base_lock);
1175 #define MAX_COMMON_TIMEOUTS 256
1177 const struct timeval *
1178 event_base_init_common_timeout(struct event_base *base,
1179 const struct timeval *duration)
1183 const struct timeval *result=NULL;
1184 struct common_timeout_list *new_ctl;
1186 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1187 if (duration->tv_usec > 1000000) {
1188 memcpy(&tv, duration, sizeof(struct timeval));
1189 if (is_common_timeout(duration, base))
1190 tv.tv_usec &= MICROSECONDS_MASK;
1191 tv.tv_sec += tv.tv_usec / 1000000;
1192 tv.tv_usec %= 1000000;
1195 for (i = 0; i < base->n_common_timeouts; ++i) {
1196 const struct common_timeout_list *ctl =
1197 base->common_timeout_queues[i];
1198 if (duration->tv_sec == ctl->duration.tv_sec &&
1199 duration->tv_usec ==
1200 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1201 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1202 result = &ctl->duration;
1206 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1207 event_warnx("%s: Too many common timeouts already in use; "
1208 "we only support %d per event_base", __func__,
1209 MAX_COMMON_TIMEOUTS);
1212 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1213 int n = base->n_common_timeouts < 16 ? 16 :
1214 base->n_common_timeouts*2;
1215 struct common_timeout_list **newqueues =
1216 mm_realloc(base->common_timeout_queues,
1217 n*sizeof(struct common_timeout_queue *));
1219 event_warn("%s: realloc",__func__);
1222 base->n_common_timeouts_allocated = n;
1223 base->common_timeout_queues = newqueues;
1225 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1227 event_warn("%s: calloc",__func__);
1230 TAILQ_INIT(&new_ctl->events);
1231 new_ctl->duration.tv_sec = duration->tv_sec;
1232 new_ctl->duration.tv_usec =
1233 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1234 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1235 evtimer_assign(&new_ctl->timeout_event, base,
1236 common_timeout_callback, new_ctl);
1237 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1238 event_priority_set(&new_ctl->timeout_event, 0);
1239 new_ctl->base = base;
1240 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1241 result = &new_ctl->duration;
1245 EVUTIL_ASSERT(is_common_timeout(result, base));
1247 EVBASE_RELEASE_LOCK(base, th_base_lock);
1251 /* Closure function invoked when we're activating a persistent event. */
1253 event_persist_closure(struct event_base *base, struct event *ev)
1255 /* reschedule the persistent event if we have a timeout. */
1256 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1257 /* If there was a timeout, we want it to run at an interval of
1258 * ev_io_timeout after the last time it was _scheduled_ for,
1259 * not ev_io_timeout after _now_. If it fired for another
1260 * reason, though, the timeout ought to start ticking _now_. */
1261 struct timeval run_at;
1262 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1263 &ev->ev_io_timeout));
1264 if (is_common_timeout(&ev->ev_timeout, base)) {
1265 ev_uint32_t usec_mask;
1266 struct timeval delay, relative_to;
1267 delay = ev->ev_io_timeout;
1268 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1269 delay.tv_usec &= MICROSECONDS_MASK;
1270 if (ev->ev_res & EV_TIMEOUT) {
1271 relative_to = ev->ev_timeout;
1272 relative_to.tv_usec &= MICROSECONDS_MASK;
1274 gettime(base, &relative_to);
1276 evutil_timeradd(&relative_to, &delay, &run_at);
1277 run_at.tv_usec |= usec_mask;
1279 struct timeval relative_to;
1280 if (ev->ev_res & EV_TIMEOUT) {
1281 relative_to = ev->ev_timeout;
1283 gettime(base, &relative_to);
1285 evutil_timeradd(&ev->ev_io_timeout, &relative_to,
1288 event_add_internal(ev, &run_at, 1);
1290 EVBASE_RELEASE_LOCK(base, th_base_lock);
1291 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1295 Helper for event_process_active to process all the events in a single queue,
1296 releasing the lock as we go. This function requires that the lock be held
1297 when it's invoked. Returns -1 if we get a signal or an event_break that
1298 means we should stop processing any active events now. Otherwise returns
1299 the number of non-internal events that we processed.
1302 event_process_active_single_queue(struct event_base *base,
1303 struct event_list *activeq)
1308 EVUTIL_ASSERT(activeq != NULL);
1310 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
1311 if (ev->ev_events & EV_PERSIST)
1312 event_queue_remove(base, ev, EVLIST_ACTIVE);
1314 event_del_internal(ev);
1315 if (!(ev->ev_flags & EVLIST_INTERNAL))
1319 "event_process_active: event: %p, %s%scall %p",
1321 ev->ev_res & EV_READ ? "EV_READ " : " ",
1322 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1325 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1326 base->current_event = ev;
1327 base->current_event_waiters = 0;
1330 switch (ev->ev_closure) {
1331 case EV_CLOSURE_SIGNAL:
1332 event_signal_closure(base, ev);
1334 case EV_CLOSURE_PERSIST:
1335 event_persist_closure(base, ev);
1338 case EV_CLOSURE_NONE:
1339 EVBASE_RELEASE_LOCK(base, th_base_lock);
1341 ev->ev_fd, ev->ev_res, ev->ev_arg);
1345 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1346 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1347 base->current_event = NULL;
1348 if (base->current_event_waiters) {
1349 base->current_event_waiters = 0;
1350 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1354 if (base->event_break)
1361 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1362 *breakptr becomes set to 1, stop. Requires that we start out holding
1363 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1367 event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
1370 struct deferred_cb *cb;
1372 #define MAX_DEFERRED 16
1373 while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
1375 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
1376 --queue->active_count;
1377 UNLOCK_DEFERRED_QUEUE(queue);
1379 cb->cb(cb, cb->arg);
1381 LOCK_DEFERRED_QUEUE(queue);
1384 if (++count == MAX_DEFERRED)
1392 * Active events are stored in priority queues. Lower priorities are always
1393 * process before higher priorities. Low priority events can starve high
1398 event_process_active(struct event_base *base)
1400 /* Caller must hold th_base_lock */
1401 struct event_list *activeq = NULL;
1404 for (i = 0; i < base->nactivequeues; ++i) {
1405 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1406 activeq = &base->activequeues[i];
1407 c = event_process_active_single_queue(base, activeq);
1411 break; /* Processed a real event; do not
1412 * consider lower-priority events */
1413 /* If we get here, all of the events we processed
1414 * were internal. Continue. */
1418 event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
1423 * Wait continuously for events. We exit only if no events are left.
1427 event_dispatch(void)
1429 return (event_loop(0));
1433 event_base_dispatch(struct event_base *event_base)
1435 return (event_base_loop(event_base, 0));
1439 event_base_get_method(const struct event_base *base)
1441 EVUTIL_ASSERT(base);
1442 return (base->evsel->name);
1445 /** Callback: used to implement event_base_loopexit by telling the event_base
1446 * that it's time to exit its loop. */
1448 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1450 struct event_base *base = arg;
1451 base->event_gotterm = 1;
1455 event_loopexit(const struct timeval *tv)
1457 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1462 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1464 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1469 event_loopbreak(void)
1471 return (event_base_loopbreak(current_base));
1475 event_base_loopbreak(struct event_base *event_base)
1478 if (event_base == NULL)
1481 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1482 event_base->event_break = 1;
1484 if (EVBASE_NEED_NOTIFY(event_base)) {
1485 r = evthread_notify_base(event_base);
1489 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1494 event_base_got_break(struct event_base *event_base)
1497 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1498 res = event_base->event_break;
1499 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1504 event_base_got_exit(struct event_base *event_base)
1507 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1508 res = event_base->event_gotterm;
1509 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1513 /* not thread safe */
1516 event_loop(int flags)
1518 return event_base_loop(current_base, flags);
1522 event_base_loop(struct event_base *base, int flags)
1524 const struct eventop *evsel = base->evsel;
1526 struct timeval *tv_p;
1527 int res, done, retval = 0;
1529 /* Grab the lock. We will release it inside evsel.dispatch, and again
1530 * as we invoke user callbacks. */
1531 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1533 if (base->running_loop) {
1534 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1535 " can run on each event_base at once.", __func__);
1536 EVBASE_RELEASE_LOCK(base, th_base_lock);
1540 base->running_loop = 1;
1542 clear_time_cache(base);
1544 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1545 evsig_set_base(base);
1549 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1550 base->th_owner_id = EVTHREAD_GET_ID();
1553 base->event_gotterm = base->event_break = 0;
1556 /* Terminate the loop if we have been asked to */
1557 if (base->event_gotterm) {
1561 if (base->event_break) {
1565 timeout_correct(base, &tv);
1568 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1569 timeout_next(base, &tv_p);
1572 * if we have active events, we just poll new events
1575 evutil_timerclear(&tv);
1578 /* If we have no events, we just exit */
1579 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1580 event_debug(("%s: no events registered.", __func__));
1585 /* update last old time */
1586 gettime(base, &base->event_tv);
1588 clear_time_cache(base);
1590 res = evsel->dispatch(base, tv_p);
1593 event_debug(("%s: dispatch returned unsuccessfully.",
1599 update_time_cache(base);
1601 timeout_process(base);
1603 if (N_ACTIVE_CALLBACKS(base)) {
1604 int n = event_process_active(base);
1605 if ((flags & EVLOOP_ONCE)
1606 && N_ACTIVE_CALLBACKS(base) == 0
1609 } else if (flags & EVLOOP_NONBLOCK)
1612 event_debug(("%s: asked to terminate loop.", __func__));
1615 clear_time_cache(base);
1616 base->running_loop = 0;
1618 EVBASE_RELEASE_LOCK(base, th_base_lock);
1623 /* Sets up an event for processing once */
1627 void (*cb)(evutil_socket_t, short, void *);
1631 /* One-time callback to implement event_base_once: invokes the user callback,
1632 * then deletes the allocated storage */
1634 event_once_cb(evutil_socket_t fd, short events, void *arg)
1636 struct event_once *eonce = arg;
1638 (*eonce->cb)(fd, events, eonce->arg);
1639 event_debug_unassign(&eonce->ev);
1643 /* not threadsafe, event scheduled once. */
1645 event_once(evutil_socket_t fd, short events,
1646 void (*callback)(evutil_socket_t, short, void *),
1647 void *arg, const struct timeval *tv)
1649 return event_base_once(current_base, fd, events, callback, arg, tv);
1652 /* Schedules an event once */
1654 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1655 void (*callback)(evutil_socket_t, short, void *),
1656 void *arg, const struct timeval *tv)
1658 struct event_once *eonce;
1662 /* We cannot support signals that just fire once, or persistent
1664 if (events & (EV_SIGNAL|EV_PERSIST))
1667 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1670 eonce->cb = callback;
1673 if (events == EV_TIMEOUT) {
1675 evutil_timerclear(&etv);
1679 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1680 } else if (events & (EV_READ|EV_WRITE)) {
1681 events &= EV_READ|EV_WRITE;
1683 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1685 /* Bad event combination */
1691 res = event_add(&eonce->ev, tv);
1701 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1704 base = current_base;
1706 _event_debug_assert_not_added(ev);
1710 ev->ev_callback = callback;
1713 ev->ev_events = events;
1715 ev->ev_flags = EVLIST_INIT;
1717 ev->ev_pncalls = NULL;
1719 if (events & EV_SIGNAL) {
1720 if ((events & (EV_READ|EV_WRITE)) != 0) {
1721 event_warnx("%s: EV_SIGNAL is not compatible with "
1722 "EV_READ or EV_WRITE", __func__);
1725 ev->ev_closure = EV_CLOSURE_SIGNAL;
1727 if (events & EV_PERSIST) {
1728 evutil_timerclear(&ev->ev_io_timeout);
1729 ev->ev_closure = EV_CLOSURE_PERSIST;
1731 ev->ev_closure = EV_CLOSURE_NONE;
1735 min_heap_elem_init(ev);
1738 /* by default, we put new events into the middle priority */
1739 ev->ev_pri = base->nactivequeues / 2;
1742 _event_debug_note_setup(ev);
1748 event_base_set(struct event_base *base, struct event *ev)
1750 /* Only innocent events may be assigned to a different base */
1751 if (ev->ev_flags != EVLIST_INIT)
1754 _event_debug_assert_is_setup(ev);
1757 ev->ev_pri = base->nactivequeues/2;
1763 event_set(struct event *ev, evutil_socket_t fd, short events,
1764 void (*callback)(evutil_socket_t, short, void *), void *arg)
1767 r = event_assign(ev, current_base, fd, events, callback, arg);
1768 EVUTIL_ASSERT(r == 0);
1772 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
1775 ev = mm_malloc(sizeof(struct event));
1778 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
1787 event_free(struct event *ev)
1789 _event_debug_assert_is_setup(ev);
1791 /* make sure that this event won't be coming back to haunt us. */
1793 _event_debug_note_teardown(ev);
1799 event_debug_unassign(struct event *ev)
1801 _event_debug_assert_not_added(ev);
1802 _event_debug_note_teardown(ev);
1804 ev->ev_flags &= ~EVLIST_INIT;
1808 * Set's the priority of an event - if an event is already scheduled
1809 * changing the priority is going to fail.
1813 event_priority_set(struct event *ev, int pri)
1815 _event_debug_assert_is_setup(ev);
1817 if (ev->ev_flags & EVLIST_ACTIVE)
1819 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
1828 * Checks if a specific event is pending or scheduled.
1832 event_pending(const struct event *ev, short event, struct timeval *tv)
1836 _event_debug_assert_is_setup(ev);
1838 if (ev->ev_flags & EVLIST_INSERTED)
1839 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
1840 if (ev->ev_flags & EVLIST_ACTIVE)
1841 flags |= ev->ev_res;
1842 if (ev->ev_flags & EVLIST_TIMEOUT)
1843 flags |= EV_TIMEOUT;
1845 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
1847 /* See if there is a timeout that we should report */
1848 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
1849 struct timeval tmp = ev->ev_timeout;
1850 tmp.tv_usec &= MICROSECONDS_MASK;
1851 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1852 /* correctly remamp to real time */
1853 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
1859 return (flags & event);
1863 event_initialized(const struct event *ev)
1865 if (!(ev->ev_flags & EVLIST_INIT))
1872 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
1874 _event_debug_assert_is_setup(event);
1877 *base_out = event->ev_base;
1879 *fd_out = event->ev_fd;
1881 *events_out = event->ev_events;
1883 *callback_out = event->ev_callback;
1885 *arg_out = event->ev_arg;
1889 event_get_struct_event_size(void)
1891 return sizeof(struct event);
1895 event_get_fd(const struct event *ev)
1897 _event_debug_assert_is_setup(ev);
1902 event_get_base(const struct event *ev)
1904 _event_debug_assert_is_setup(ev);
1909 event_get_events(const struct event *ev)
1911 _event_debug_assert_is_setup(ev);
1912 return ev->ev_events;
1916 event_get_callback(const struct event *ev)
1918 _event_debug_assert_is_setup(ev);
1919 return ev->ev_callback;
1923 event_get_callback_arg(const struct event *ev)
1925 _event_debug_assert_is_setup(ev);
1930 event_add(struct event *ev, const struct timeval *tv)
1934 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
1935 event_warnx("%s: event has no event_base set.", __func__);
1939 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
1941 res = event_add_internal(ev, tv, 0);
1943 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
1948 /* Helper callback: wake an event_base from another thread. This version
1949 * works by writing a byte to one end of a socketpair, so that the event_base
1950 * listening on the other end will wake up as the corresponding event
1953 evthread_notify_base_default(struct event_base *base)
1959 r = send(base->th_notify_fd[1], buf, 1, 0);
1961 r = write(base->th_notify_fd[1], buf, 1);
1963 return (r < 0 && errno != EAGAIN) ? -1 : 0;
1966 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
1967 /* Helper callback: wake an event_base from another thread. This version
1968 * assumes that you have a working eventfd() implementation. */
1970 evthread_notify_base_eventfd(struct event_base *base)
1972 ev_uint64_t msg = 1;
1975 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
1976 } while (r < 0 && errno == EAGAIN);
1978 return (r < 0) ? -1 : 0;
1982 /** Tell the thread currently running the event_loop for base (if any) that it
1983 * needs to stop waiting in its dispatch function (if it is) and process all
1984 * active events and deferred callbacks (if there are any). */
1986 evthread_notify_base(struct event_base *base)
1988 EVENT_BASE_ASSERT_LOCKED(base);
1989 if (!base->th_notify_fn)
1991 if (base->is_notify_pending)
1993 base->is_notify_pending = 1;
1994 return base->th_notify_fn(base);
1997 /* Implementation function to add an event. Works just like event_add,
1998 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
1999 * we treat tv as an absolute time, not as an interval to add to the current
2002 event_add_internal(struct event *ev, const struct timeval *tv,
2005 struct event_base *base = ev->ev_base;
2009 EVENT_BASE_ASSERT_LOCKED(base);
2010 _event_debug_assert_is_setup(ev);
2013 "event_add: event: %p (fd %d), %s%s%scall %p",
2016 ev->ev_events & EV_READ ? "EV_READ " : " ",
2017 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2018 tv ? "EV_TIMEOUT " : " ",
2021 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2024 * prepare for timeout insertion further below, if we get a
2025 * failure on any step, we should not change any state.
2027 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2028 if (min_heap_reserve(&base->timeheap,
2029 1 + min_heap_size(&base->timeheap)) == -1)
2030 return (-1); /* ENOMEM == errno */
2033 /* If the main thread is currently executing a signal event's
2034 * callback, and we are not the main thread, then we want to wait
2035 * until the callback is done before we mess with the event, or else
2036 * we can race on ev_ncalls and ev_pncalls below. */
2037 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2038 if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
2039 && !EVBASE_IN_THREAD(base)) {
2040 ++base->current_event_waiters;
2041 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2045 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
2046 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
2047 if (ev->ev_events & (EV_READ|EV_WRITE))
2048 res = evmap_io_add(base, ev->ev_fd, ev);
2049 else if (ev->ev_events & EV_SIGNAL)
2050 res = evmap_signal_add(base, (int)ev->ev_fd, ev);
2052 event_queue_insert(base, ev, EVLIST_INSERTED);
2054 /* evmap says we need to notify the main thread. */
2061 * we should change the timeout state only if the previous event
2062 * addition succeeded.
2064 if (res != -1 && tv != NULL) {
2069 * for persistent timeout events, we remember the
2070 * timeout value and re-add the event.
2072 * If tv_is_absolute, this was already set.
2074 if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
2075 ev->ev_io_timeout = *tv;
2078 * we already reserved memory above for the case where we
2079 * are not replacing an existing timeout.
2081 if (ev->ev_flags & EVLIST_TIMEOUT) {
2082 /* XXX I believe this is needless. */
2083 if (min_heap_elt_is_top(ev))
2085 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2088 /* Check if it is active due to a timeout. Rescheduling
2089 * this timeout before the callback can be executed
2090 * removes it from the active list. */
2091 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2092 (ev->ev_res & EV_TIMEOUT)) {
2093 if (ev->ev_events & EV_SIGNAL) {
2094 /* See if we are just active executing
2095 * this event in a loop
2097 if (ev->ev_ncalls && ev->ev_pncalls) {
2099 *ev->ev_pncalls = 0;
2103 event_queue_remove(base, ev, EVLIST_ACTIVE);
2106 gettime(base, &now);
2108 common_timeout = is_common_timeout(tv, base);
2109 if (tv_is_absolute) {
2110 ev->ev_timeout = *tv;
2111 } else if (common_timeout) {
2112 struct timeval tmp = *tv;
2113 tmp.tv_usec &= MICROSECONDS_MASK;
2114 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2115 ev->ev_timeout.tv_usec |=
2116 (tv->tv_usec & ~MICROSECONDS_MASK);
2118 evutil_timeradd(&now, tv, &ev->ev_timeout);
2122 "event_add: timeout in %d seconds, call %p",
2123 (int)tv->tv_sec, ev->ev_callback));
2125 event_queue_insert(base, ev, EVLIST_TIMEOUT);
2126 if (common_timeout) {
2127 struct common_timeout_list *ctl =
2128 get_common_timeout_list(base, &ev->ev_timeout);
2129 if (ev == TAILQ_FIRST(&ctl->events)) {
2130 common_timeout_schedule(ctl, &now, ev);
2133 /* See if the earliest timeout is now earlier than it
2134 * was before: if so, we will need to tell the main
2135 * thread to wake up earlier than it would
2137 if (min_heap_elt_is_top(ev))
2142 /* if we are not in the right thread, we need to wake up the loop */
2143 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2144 evthread_notify_base(base);
2146 _event_debug_note_add(ev);
2152 event_del(struct event *ev)
2156 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2157 event_warnx("%s: event has no event_base set.", __func__);
2161 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2163 res = event_del_internal(ev);
2165 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2170 /* Helper for event_del: always called with th_base_lock held. */
2172 event_del_internal(struct event *ev)
2174 struct event_base *base;
2175 int res = 0, notify = 0;
2177 event_debug(("event_del: %p (fd %d), callback %p",
2178 ev, (int)ev->ev_fd, ev->ev_callback));
2180 /* An event without a base has not been added */
2181 if (ev->ev_base == NULL)
2184 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2186 /* If the main thread is currently executing this event's callback,
2187 * and we are not the main thread, then we want to wait until the
2188 * callback is done before we start removing the event. That way,
2189 * when this function returns, it will be safe to free the
2190 * user-supplied argument. */
2192 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2193 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2194 ++base->current_event_waiters;
2195 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2199 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2201 /* See if we are just active executing this event in a loop */
2202 if (ev->ev_events & EV_SIGNAL) {
2203 if (ev->ev_ncalls && ev->ev_pncalls) {
2205 *ev->ev_pncalls = 0;
2209 if (ev->ev_flags & EVLIST_TIMEOUT) {
2210 /* NOTE: We never need to notify the main thread because of a
2211 * deleted timeout event: all that could happen if we don't is
2212 * that the dispatch loop might wake up too early. But the
2213 * point of notifying the main thread _is_ to wake up the
2214 * dispatch loop early anyway, so we wouldn't gain anything by
2217 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2220 if (ev->ev_flags & EVLIST_ACTIVE)
2221 event_queue_remove(base, ev, EVLIST_ACTIVE);
2223 if (ev->ev_flags & EVLIST_INSERTED) {
2224 event_queue_remove(base, ev, EVLIST_INSERTED);
2225 if (ev->ev_events & (EV_READ|EV_WRITE))
2226 res = evmap_io_del(base, ev->ev_fd, ev);
2228 res = evmap_signal_del(base, (int)ev->ev_fd, ev);
2230 /* evmap says we need to notify the main thread. */
2236 /* if we are not in the right thread, we need to wake up the loop */
2237 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2238 evthread_notify_base(base);
2240 _event_debug_note_del(ev);
2246 event_active(struct event *ev, int res, short ncalls)
2248 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2249 event_warnx("%s: event has no event_base set.", __func__);
2253 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2255 _event_debug_assert_is_setup(ev);
2257 event_active_nolock(ev, res, ncalls);
2259 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2264 event_active_nolock(struct event *ev, int res, short ncalls)
2266 struct event_base *base;
2268 event_debug(("event_active: %p (fd %d), res %d, callback %p",
2269 ev, (int)ev->ev_fd, (int)res, ev->ev_callback));
2272 /* We get different kinds of events, add them together */
2273 if (ev->ev_flags & EVLIST_ACTIVE) {
2280 EVENT_BASE_ASSERT_LOCKED(base);
2284 if (ev->ev_events & EV_SIGNAL) {
2285 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2286 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2287 ++base->current_event_waiters;
2288 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2291 ev->ev_ncalls = ncalls;
2292 ev->ev_pncalls = NULL;
2295 event_queue_insert(base, ev, EVLIST_ACTIVE);
2297 if (EVBASE_NEED_NOTIFY(base))
2298 evthread_notify_base(base);
2302 event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
2304 memset(cb, 0, sizeof(struct deferred_cb));
2310 event_deferred_cb_cancel(struct deferred_cb_queue *queue,
2311 struct deferred_cb *cb)
2315 queue = ¤t_base->defer_queue;
2320 LOCK_DEFERRED_QUEUE(queue);
2322 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
2323 --queue->active_count;
2326 UNLOCK_DEFERRED_QUEUE(queue);
2330 event_deferred_cb_schedule(struct deferred_cb_queue *queue,
2331 struct deferred_cb *cb)
2335 queue = ¤t_base->defer_queue;
2340 LOCK_DEFERRED_QUEUE(queue);
2343 TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
2344 ++queue->active_count;
2345 if (queue->notify_fn)
2346 queue->notify_fn(queue, queue->notify_arg);
2348 UNLOCK_DEFERRED_QUEUE(queue);
2352 timeout_next(struct event_base *base, struct timeval **tv_p)
2354 /* Caller must hold th_base_lock */
2357 struct timeval *tv = *tv_p;
2360 ev = min_heap_top(&base->timeheap);
2363 /* if no time-based events are active wait for I/O */
2368 if (gettime(base, &now) == -1) {
2373 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
2374 evutil_timerclear(tv);
2378 evutil_timersub(&ev->ev_timeout, &now, tv);
2380 EVUTIL_ASSERT(tv->tv_sec >= 0);
2381 EVUTIL_ASSERT(tv->tv_usec >= 0);
2382 event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
2389 * Determines if the time is running backwards by comparing the current time
2390 * against the last time we checked. Not needed when using clock monotonic.
2391 * If time is running backwards, we adjust the firing time of every event by
2392 * the amount that time seems to have jumped.
2395 timeout_correct(struct event_base *base, struct timeval *tv)
2397 /* Caller must hold th_base_lock. */
2406 /* Check if time is running backwards */
2409 if (evutil_timercmp(tv, &base->event_tv, >=)) {
2410 base->event_tv = *tv;
2414 event_debug(("%s: time is running backwards, corrected",
2416 evutil_timersub(&base->event_tv, tv, &off);
2419 * We can modify the key element of the node without destroying
2420 * the minheap property, because we change every element.
2422 pev = base->timeheap.p;
2423 size = base->timeheap.n;
2424 for (; size-- > 0; ++pev) {
2425 struct timeval *ev_tv = &(**pev).ev_timeout;
2426 evutil_timersub(ev_tv, &off, ev_tv);
2428 for (i=0; i<base->n_common_timeouts; ++i) {
2430 struct common_timeout_list *ctl =
2431 base->common_timeout_queues[i];
2432 TAILQ_FOREACH(ev, &ctl->events,
2433 ev_timeout_pos.ev_next_with_common_timeout) {
2434 struct timeval *ev_tv = &ev->ev_timeout;
2435 ev_tv->tv_usec &= MICROSECONDS_MASK;
2436 evutil_timersub(ev_tv, &off, ev_tv);
2437 ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
2438 (i<<COMMON_TIMEOUT_IDX_SHIFT);
2442 /* Now remember what the new time turned out to be. */
2443 base->event_tv = *tv;
2446 /* Activate every event whose timeout has elapsed. */
2448 timeout_process(struct event_base *base)
2450 /* Caller must hold lock. */
2454 if (min_heap_empty(&base->timeheap)) {
2458 gettime(base, &now);
2460 while ((ev = min_heap_top(&base->timeheap))) {
2461 if (evutil_timercmp(&ev->ev_timeout, &now, >))
2464 /* delete this event from the I/O queues */
2465 event_del_internal(ev);
2467 event_debug(("timeout_process: call %p",
2469 event_active_nolock(ev, EV_TIMEOUT, 1);
2473 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2475 event_queue_remove(struct event_base *base, struct event *ev, int queue)
2477 EVENT_BASE_ASSERT_LOCKED(base);
2479 if (!(ev->ev_flags & queue)) {
2480 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
2481 ev, ev->ev_fd, queue);
2485 if (~ev->ev_flags & EVLIST_INTERNAL)
2486 base->event_count--;
2488 ev->ev_flags &= ~queue;
2490 case EVLIST_INSERTED:
2491 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
2494 base->event_count_active--;
2495 TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
2496 ev, ev_active_next);
2498 case EVLIST_TIMEOUT:
2499 if (is_common_timeout(&ev->ev_timeout, base)) {
2500 struct common_timeout_list *ctl =
2501 get_common_timeout_list(base, &ev->ev_timeout);
2502 TAILQ_REMOVE(&ctl->events, ev,
2503 ev_timeout_pos.ev_next_with_common_timeout);
2505 min_heap_erase(&base->timeheap, ev);
2509 event_errx(1, "%s: unknown queue %x", __func__, queue);
2513 /* Add 'ev' to the common timeout list in 'ev'. */
2515 insert_common_timeout_inorder(struct common_timeout_list *ctl,
2519 /* By all logic, we should just be able to append 'ev' to the end of
2520 * ctl->events, since the timeout on each 'ev' is set to {the common
2521 * timeout} + {the time when we add the event}, and so the events
2522 * should arrive in order of their timeeouts. But just in case
2523 * there's some wacky threading issue going on, we do a search from
2524 * the end of 'ev' to find the right insertion point.
2526 TAILQ_FOREACH_REVERSE(e, &ctl->events,
2527 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
2528 /* This timercmp is a little sneaky, since both ev and e have
2529 * magic values in tv_usec. Fortunately, they ought to have
2530 * the _same_ magic values in tv_usec. Let's assert for that.
2533 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
2534 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
2535 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
2536 ev_timeout_pos.ev_next_with_common_timeout);
2540 TAILQ_INSERT_HEAD(&ctl->events, ev,
2541 ev_timeout_pos.ev_next_with_common_timeout);
2545 event_queue_insert(struct event_base *base, struct event *ev, int queue)
2547 EVENT_BASE_ASSERT_LOCKED(base);
2549 if (ev->ev_flags & queue) {
2550 /* Double insertion is possible for active events */
2551 if (queue & EVLIST_ACTIVE)
2554 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
2555 ev, ev->ev_fd, queue);
2559 if (~ev->ev_flags & EVLIST_INTERNAL)
2560 base->event_count++;
2562 ev->ev_flags |= queue;
2564 case EVLIST_INSERTED:
2565 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
2568 base->event_count_active++;
2569 TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
2572 case EVLIST_TIMEOUT: {
2573 if (is_common_timeout(&ev->ev_timeout, base)) {
2574 struct common_timeout_list *ctl =
2575 get_common_timeout_list(base, &ev->ev_timeout);
2576 insert_common_timeout_inorder(ctl, ev);
2578 min_heap_push(&base->timeheap, ev);
2582 event_errx(1, "%s: unknown queue %x", __func__, queue);
2586 /* Functions for debugging */
2589 event_get_version(void)
2591 return (_EVENT_VERSION);
2595 event_get_version_number(void)
2597 return (_EVENT_NUMERIC_VERSION);
2601 * No thread-safe interface needed - the information should be the same
2606 event_get_method(void)
2608 return (current_base->evsel->name);
2611 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2612 static void *(*_mm_malloc_fn)(size_t sz) = NULL;
2613 static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
2614 static void (*_mm_free_fn)(void *p) = NULL;
2617 event_mm_malloc_(size_t sz)
2620 return _mm_malloc_fn(sz);
2626 event_mm_calloc_(size_t count, size_t size)
2628 if (_mm_malloc_fn) {
2629 size_t sz = count * size;
2630 void *p = _mm_malloc_fn(sz);
2635 return calloc(count, size);
2639 event_mm_strdup_(const char *str)
2641 if (_mm_malloc_fn) {
2642 size_t ln = strlen(str);
2643 void *p = _mm_malloc_fn(ln+1);
2645 memcpy(p, str, ln+1);
2649 return _strdup(str);
2656 event_mm_realloc_(void *ptr, size_t sz)
2659 return _mm_realloc_fn(ptr, sz);
2661 return realloc(ptr, sz);
2665 event_mm_free_(void *ptr)
2674 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
2675 void *(*realloc_fn)(void *ptr, size_t sz),
2676 void (*free_fn)(void *ptr))
2678 _mm_malloc_fn = malloc_fn;
2679 _mm_realloc_fn = realloc_fn;
2680 _mm_free_fn = free_fn;
2684 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2686 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
2690 struct event_base *base = arg;
2692 r = read(fd, (void*) &msg, sizeof(msg));
2693 if (r<0 && errno != EAGAIN) {
2694 event_sock_warn(fd, "Error reading from eventfd");
2696 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2697 base->is_notify_pending = 0;
2698 EVBASE_RELEASE_LOCK(base, th_base_lock);
2703 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
2705 unsigned char buf[1024];
2706 struct event_base *base = arg;
2708 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
2711 while (read(fd, (char*)buf, sizeof(buf)) > 0)
2715 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2716 base->is_notify_pending = 0;
2717 EVBASE_RELEASE_LOCK(base, th_base_lock);
2721 evthread_make_base_notifiable(struct event_base *base)
2723 void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
2724 int (*notify)(struct event_base *) = evthread_notify_base_default;
2726 /* XXXX grab the lock here? */
2730 if (base->th_notify_fd[0] >= 0)
2733 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2735 #define EFD_CLOEXEC 0
2737 base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
2738 if (base->th_notify_fd[0] >= 0) {
2739 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2740 notify = evthread_notify_base_eventfd;
2741 cb = evthread_notify_drain_eventfd;
2744 #if defined(_EVENT_HAVE_PIPE)
2745 if (base->th_notify_fd[0] < 0) {
2746 if ((base->evsel->features & EV_FEATURE_FDS)) {
2747 if (pipe(base->th_notify_fd) < 0) {
2748 event_warn("%s: pipe", __func__);
2750 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2751 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2758 #define LOCAL_SOCKETPAIR_AF AF_INET
2760 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2762 if (base->th_notify_fd[0] < 0) {
2763 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
2764 base->th_notify_fd) == -1) {
2765 event_sock_warn(-1, "%s: socketpair", __func__);
2768 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2769 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2773 evutil_make_socket_nonblocking(base->th_notify_fd[0]);
2775 base->th_notify_fn = notify;
2778 Making the second socket nonblocking is a bit subtle, given that we
2779 ignore any EAGAIN returns when writing to it, and you don't usally
2780 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2781 then there's no need to add any more data to the buffer, since
2782 the main thread is already either about to wake up and drain it,
2783 or woken up and in the process of draining it.
2785 if (base->th_notify_fd[1] > 0)
2786 evutil_make_socket_nonblocking(base->th_notify_fd[1]);
2788 /* prepare an event that we can use for wakeup */
2789 event_assign(&base->th_notify, base, base->th_notify_fd[0],
2790 EV_READ|EV_PERSIST, cb, base);
2792 /* we need to mark this as internal event */
2793 base->th_notify.ev_flags |= EVLIST_INTERNAL;
2794 event_priority_set(&base->th_notify, 0);
2796 return event_add(&base->th_notify, NULL);
2800 event_base_dump_events(struct event_base *base, FILE *output)
2804 fprintf(output, "Inserted events:\n");
2805 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2806 fprintf(output, " %p [fd %ld]%s%s%s%s%s\n",
2807 (void*)e, (long)e->ev_fd,
2808 (e->ev_events&EV_READ)?" Read":"",
2809 (e->ev_events&EV_WRITE)?" Write":"",
2810 (e->ev_events&EV_SIGNAL)?" Signal":"",
2811 (e->ev_events&EV_TIMEOUT)?" Timeout":"",
2812 (e->ev_events&EV_PERSIST)?" Persist":"");
2815 for (i = 0; i < base->nactivequeues; ++i) {
2816 if (TAILQ_EMPTY(&base->activequeues[i]))
2818 fprintf(output, "Active events [priority %d]:\n", i);
2819 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2820 fprintf(output, " %p [fd %ld]%s%s%s%s\n",
2821 (void*)e, (long)e->ev_fd,
2822 (e->ev_res&EV_READ)?" Read active":"",
2823 (e->ev_res&EV_WRITE)?" Write active":"",
2824 (e->ev_res&EV_SIGNAL)?" Signal active":"",
2825 (e->ev_res&EV_TIMEOUT)?" Timeout active":"");
2831 event_base_add_virtual(struct event_base *base)
2833 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2834 base->virtual_event_count++;
2835 EVBASE_RELEASE_LOCK(base, th_base_lock);
2839 event_base_del_virtual(struct event_base *base)
2841 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2842 EVUTIL_ASSERT(base->virtual_event_count > 0);
2843 base->virtual_event_count--;
2844 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
2845 evthread_notify_base(base);
2846 EVBASE_RELEASE_LOCK(base, th_base_lock);
2849 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2851 event_global_setup_locks_(const int enable_locks)
2853 #ifndef _EVENT_DISABLE_DEBUG_MODE
2854 EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
2856 if (evsig_global_setup_locks_(enable_locks) < 0)
2858 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
2865 event_base_assert_ok(struct event_base *base)
2868 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2869 evmap_check_integrity(base);
2871 /* Check the heap property */
2872 for (i = 1; i < (int)base->timeheap.n; ++i) {
2873 int parent = (i - 1) / 2;
2874 struct event *ev, *p_ev;
2875 ev = base->timeheap.p[i];
2876 p_ev = base->timeheap.p[parent];
2877 EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
2878 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
2879 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
2882 /* Check that the common timeouts are fine */
2883 for (i = 0; i < base->n_common_timeouts; ++i) {
2884 struct common_timeout_list *ctl = base->common_timeout_queues[i];
2885 struct event *last=NULL, *ev;
2886 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
2888 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
2889 EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
2890 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
2891 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
2896 EVBASE_RELEASE_LOCK(base, th_base_lock);