2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
31 #define WIN32_LEAN_AND_MEAN
33 #undef WIN32_LEAN_AND_MEAN
35 #include <sys/types.h>
36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
39 #include <sys/queue.h>
40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
41 #include <sys/socket.h>
45 #ifdef _EVENT_HAVE_UNISTD_H
48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
49 #include <sys/eventfd.h>
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/event_compat.h"
60 #include "event-internal.h"
61 #include "defer-internal.h"
62 #include "evthread-internal.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "log-internal.h"
66 #include "evmap-internal.h"
67 #include "iocp-internal.h"
68 #include "changelist-internal.h"
69 #include "ht-internal.h"
70 #include "util-internal.h"
72 #ifdef _EVENT_HAVE_EVENT_PORTS
73 extern const struct eventop evportops;
75 #ifdef _EVENT_HAVE_SELECT
76 extern const struct eventop selectops;
78 #ifdef _EVENT_HAVE_POLL
79 extern const struct eventop pollops;
81 #ifdef _EVENT_HAVE_EPOLL
82 extern const struct eventop epollops;
84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
85 extern const struct eventop kqops;
87 #ifdef _EVENT_HAVE_DEVPOLL
88 extern const struct eventop devpollops;
91 extern const struct eventop win32ops;
94 /* Array of backends in order of preference. */
95 static const struct eventop *eventops[] = {
96 #ifdef _EVENT_HAVE_EVENT_PORTS
99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
102 #ifdef _EVENT_HAVE_EPOLL
105 #ifdef _EVENT_HAVE_DEVPOLL
108 #ifdef _EVENT_HAVE_POLL
111 #ifdef _EVENT_HAVE_SELECT
120 /* Global state; deprecated */
121 struct event_base *event_global_current_base_ = NULL;
122 #define current_base event_global_current_base_
126 static int use_monotonic;
129 static inline int event_add_internal(struct event *ev,
130 const struct timeval *tv, int tv_is_absolute);
131 static inline int event_del_internal(struct event *ev);
133 static void event_queue_insert(struct event_base *, struct event *, int);
134 static void event_queue_remove(struct event_base *, struct event *, int);
135 static int event_haveevents(struct event_base *);
137 static int event_process_active(struct event_base *);
139 static int timeout_next(struct event_base *, struct timeval **);
140 static void timeout_process(struct event_base *);
141 static void timeout_correct(struct event_base *, struct timeval *);
143 static inline void event_signal_closure(struct event_base *, struct event *ev);
144 static inline void event_persist_closure(struct event_base *, struct event *ev);
146 static int evthread_notify_base(struct event_base *base);
148 #ifndef _EVENT_DISABLE_DEBUG_MODE
149 /* These functions implement a hashtable of which 'struct event *' structures
150 * have been setup or added. We don't want to trust the content of the struct
151 * event itself, since we're trying to work through cases where an event gets
152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
155 struct event_debug_entry {
156 HT_ENTRY(event_debug_entry) node;
157 const struct event *ptr;
161 static inline unsigned
162 hash_debug_entry(const struct event_debug_entry *e)
164 /* We need to do this silliness to convince compilers that we
165 * honestly mean to cast e->ptr to an integer, and discard any
166 * part of it that doesn't fit in an unsigned.
168 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
169 /* Our hashtable implementation is pretty sensitive to low bits,
170 * and every struct event is over 64 bytes in size, so we can
176 eq_debug_entry(const struct event_debug_entry *a,
177 const struct event_debug_entry *b)
179 return a->ptr == b->ptr;
182 int _event_debug_mode_on = 0;
183 /* Set if it's too late to enable event_debug_mode. */
184 static int event_debug_mode_too_late = 0;
185 static void *_event_debug_map_lock = NULL;
186 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
189 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
191 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
192 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
194 /* Macro: record that ev is now setup (that is, ready for an add) */
195 #define _event_debug_note_setup(ev) do { \
196 if (_event_debug_mode_on) { \
197 struct event_debug_entry *dent,find; \
199 EVLOCK_LOCK(_event_debug_map_lock, 0); \
200 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
204 dent = mm_malloc(sizeof(*dent)); \
207 "Out of memory in debugging code"); \
210 HT_INSERT(event_debug_map, &global_debug_map, dent); \
212 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
214 event_debug_mode_too_late = 1; \
216 /* Macro: record that ev is no longer setup */
217 #define _event_debug_note_teardown(ev) do { \
218 if (_event_debug_mode_on) { \
219 struct event_debug_entry *dent,find; \
221 EVLOCK_LOCK(_event_debug_map_lock, 0); \
222 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
225 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
227 event_debug_mode_too_late = 1; \
229 /* Macro: record that ev is now added */
230 #define _event_debug_note_add(ev) do { \
231 if (_event_debug_mode_on) { \
232 struct event_debug_entry *dent,find; \
234 EVLOCK_LOCK(_event_debug_map_lock, 0); \
235 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
239 event_errx(_EVENT_ERR_ABORT, \
240 "%s: noting an add on a non-setup event %p", \
243 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
245 event_debug_mode_too_late = 1; \
247 /* Macro: record that ev is no longer added */
248 #define _event_debug_note_del(ev) do { \
249 if (_event_debug_mode_on) { \
250 struct event_debug_entry *dent,find; \
252 EVLOCK_LOCK(_event_debug_map_lock, 0); \
253 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
257 event_errx(_EVENT_ERR_ABORT, \
258 "%s: noting a del on a non-setup event %p", \
261 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
263 event_debug_mode_too_late = 1; \
265 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
266 #define _event_debug_assert_is_setup(ev) do { \
267 if (_event_debug_mode_on) { \
268 struct event_debug_entry *dent,find; \
270 EVLOCK_LOCK(_event_debug_map_lock, 0); \
271 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
273 event_errx(_EVENT_ERR_ABORT, \
274 "%s called on a non-initialized event %p", \
277 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
280 /* Macro: assert that ev is not added (i.e., okay to tear down or set
282 #define _event_debug_assert_not_added(ev) do { \
283 if (_event_debug_mode_on) { \
284 struct event_debug_entry *dent,find; \
286 EVLOCK_LOCK(_event_debug_map_lock, 0); \
287 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
288 if (dent && dent->added) { \
289 event_errx(_EVENT_ERR_ABORT, \
290 "%s called on an already added event %p", \
293 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
297 #define _event_debug_note_setup(ev) \
299 #define _event_debug_note_teardown(ev) \
301 #define _event_debug_note_add(ev) \
303 #define _event_debug_note_del(ev) \
305 #define _event_debug_assert_is_setup(ev) \
307 #define _event_debug_assert_not_added(ev) \
311 #define EVENT_BASE_ASSERT_LOCKED(base) \
312 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
314 /* The first time this function is called, it sets use_monotonic to 1
315 * if we have a clock function that supports monotonic time */
317 detect_monotonic(void)
319 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
321 static int use_monotonic_initialized = 0;
323 if (use_monotonic_initialized)
326 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
329 use_monotonic_initialized = 1;
333 /* How often (in seconds) do we check for changes in wall clock time relative
334 * to monotonic time? Set this to -1 for 'never.' */
335 #define CLOCK_SYNC_INTERVAL -1
337 /** Set 'tp' to the current time according to 'base'. We must hold the lock
338 * on 'base'. If there is a cached time, return it. Otherwise, use
339 * clock_gettime or gettimeofday as appropriate to find out the right time.
340 * Return 0 on success, -1 on failure.
343 gettime(struct event_base *base, struct timeval *tp)
345 EVENT_BASE_ASSERT_LOCKED(base);
347 if (base->tv_cache.tv_sec) {
348 *tp = base->tv_cache;
352 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
356 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
359 tp->tv_sec = ts.tv_sec;
360 tp->tv_usec = ts.tv_nsec / 1000;
361 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
364 evutil_gettimeofday(&tv,NULL);
365 evutil_timersub(&tv, tp, &base->tv_clock_diff);
366 base->last_updated_clock_diff = ts.tv_sec;
373 return (evutil_gettimeofday(tp, NULL));
377 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
383 return evutil_gettimeofday(tv, NULL);
386 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
387 if (base->tv_cache.tv_sec == 0) {
388 r = evutil_gettimeofday(tv, NULL);
390 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
391 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
393 *tv = base->tv_cache;
397 EVBASE_RELEASE_LOCK(base, th_base_lock);
401 /** Make 'base' have no current cached time. */
403 clear_time_cache(struct event_base *base)
405 base->tv_cache.tv_sec = 0;
408 /** Replace the cached time in 'base' with the current time. */
410 update_time_cache(struct event_base *base)
412 base->tv_cache.tv_sec = 0;
413 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
414 gettime(base, &base->tv_cache);
420 struct event_base *base = event_base_new_with_config(NULL);
423 event_errx(1, "%s: Unable to construct event_base", __func__);
435 struct event_base *base = NULL;
436 struct event_config *cfg = event_config_new();
438 base = event_base_new_with_config(cfg);
439 event_config_free(cfg);
444 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
447 event_config_is_avoided_method(const struct event_config *cfg,
450 struct event_config_entry *entry;
452 TAILQ_FOREACH(entry, &cfg->entries, next) {
453 if (entry->avoid_method != NULL &&
454 strcmp(entry->avoid_method, method) == 0)
461 /** Return true iff 'method' is disabled according to the environment. */
463 event_is_method_disabled(const char *name)
465 char environment[64];
468 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
469 for (i = 8; environment[i] != '\0'; ++i)
470 environment[i] = EVUTIL_TOUPPER(environment[i]);
471 /* Note that evutil_getenv() ignores the environment entirely if
473 return (evutil_getenv(environment) != NULL);
477 event_base_get_features(const struct event_base *base)
479 return base->evsel->features;
483 event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
485 memset(cb, 0, sizeof(struct deferred_cb_queue));
486 TAILQ_INIT(&cb->deferred_cb_list);
489 /** Helper for the deferred_cb queue: wake up the event base. */
491 notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
493 struct event_base *base = baseptr;
494 if (EVBASE_NEED_NOTIFY(base))
495 evthread_notify_base(base);
498 struct deferred_cb_queue *
499 event_base_get_deferred_cb_queue(struct event_base *base)
501 return base ? &base->defer_queue : NULL;
505 event_enable_debug_mode(void)
507 #ifndef _EVENT_DISABLE_DEBUG_MODE
508 if (_event_debug_mode_on)
509 event_errx(1, "%s was called twice!", __func__);
510 if (event_debug_mode_too_late)
511 event_errx(1, "%s must be called *before* creating any events "
512 "or event_bases",__func__);
514 _event_debug_mode_on = 1;
516 HT_INIT(event_debug_map, &global_debug_map);
518 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
524 event_disable_debug_mode(void)
526 struct event_debug_entry **ent, *victim;
528 EVLOCK_LOCK(_event_debug_map_lock, 0);
529 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
531 ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
534 HT_CLEAR(event_debug_map, &global_debug_map);
535 EVLOCK_UNLOCK(_event_debug_map_lock , 0);
540 event_base_new_with_config(const struct event_config *cfg)
543 struct event_base *base;
544 int should_check_environment;
546 #ifndef _EVENT_DISABLE_DEBUG_MODE
547 event_debug_mode_too_late = 1;
548 if (_event_debug_mode_on && !_event_debug_map_lock) {
549 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
553 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
554 event_warn("%s: calloc", __func__);
558 gettime(base, &base->event_tv);
560 min_heap_ctor(&base->timeheap);
561 TAILQ_INIT(&base->eventqueue);
562 base->sig.ev_signal_pair[0] = -1;
563 base->sig.ev_signal_pair[1] = -1;
564 base->th_notify_fd[0] = -1;
565 base->th_notify_fd[1] = -1;
567 event_deferred_cb_queue_init(&base->defer_queue);
568 base->defer_queue.notify_fn = notify_base_cbq_callback;
569 base->defer_queue.notify_arg = base;
571 base->flags = cfg->flags;
573 evmap_io_initmap(&base->io);
574 evmap_signal_initmap(&base->sigmap);
575 event_changelist_init(&base->changelist);
579 should_check_environment =
580 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
582 for (i = 0; eventops[i] && !base->evbase; i++) {
584 /* determine if this backend should be avoided */
585 if (event_config_is_avoided_method(cfg,
588 if ((eventops[i]->features & cfg->require_features)
589 != cfg->require_features)
593 /* also obey the environment variables */
594 if (should_check_environment &&
595 event_is_method_disabled(eventops[i]->name))
598 base->evsel = eventops[i];
600 base->evbase = base->evsel->init(base);
603 if (base->evbase == NULL) {
604 event_warnx("%s: no event mechanism available",
606 event_base_free(base);
610 if (evutil_getenv("EVENT_SHOW_METHOD"))
611 event_msgx("libevent using: %s", base->evsel->name);
613 /* allocate a single active event queue */
614 if (event_base_priority_init(base, 1) < 0) {
615 event_base_free(base);
619 /* prepare for threading */
621 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
622 if (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK)) {
624 EVTHREAD_ALLOC_LOCK(base->th_base_lock,
625 EVTHREAD_LOCKTYPE_RECURSIVE);
626 base->defer_queue.lock = base->th_base_lock;
627 EVTHREAD_ALLOC_COND(base->current_event_cond);
628 r = evthread_make_base_notifiable(base);
630 event_base_free(base);
637 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
638 event_base_start_iocp(base, cfg->n_cpus_hint);
645 event_base_start_iocp(struct event_base *base, int n_cpus)
650 base->iocp = event_iocp_port_launch(n_cpus);
652 event_warnx("%s: Couldn't launch IOCP", __func__);
662 event_base_stop_iocp(struct event_base *base)
669 rv = event_iocp_shutdown(base->iocp, -1);
670 EVUTIL_ASSERT(rv >= 0);
676 event_base_free(struct event_base *base)
680 /* XXXX grab the lock? If there is contention when one thread frees
681 * the base, then the contending thread will be very sad soon. */
683 if (base == NULL && current_base)
685 if (base == current_base)
688 /* XXX(niels) - check for internal events first */
692 event_base_stop_iocp(base);
695 /* threading fds if we have them */
696 if (base->th_notify_fd[0] != -1) {
697 event_del(&base->th_notify);
698 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
699 if (base->th_notify_fd[1] != -1)
700 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
701 base->th_notify_fd[0] = -1;
702 base->th_notify_fd[1] = -1;
703 event_debug_unassign(&base->th_notify);
706 /* Delete all non-internal events. */
707 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
708 struct event *next = TAILQ_NEXT(ev, ev_next);
709 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
715 while ((ev = min_heap_top(&base->timeheap)) != NULL) {
719 for (i = 0; i < base->n_common_timeouts; ++i) {
720 struct common_timeout_list *ctl =
721 base->common_timeout_queues[i];
722 event_del(&ctl->timeout_event); /* Internal; doesn't count */
723 event_debug_unassign(&ctl->timeout_event);
724 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
725 struct event *next = TAILQ_NEXT(ev,
726 ev_timeout_pos.ev_next_with_common_timeout);
727 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
735 if (base->common_timeout_queues)
736 mm_free(base->common_timeout_queues);
738 for (i = 0; i < base->nactivequeues; ++i) {
739 for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
740 struct event *next = TAILQ_NEXT(ev, ev_active_next);
741 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
750 event_debug(("%s: %d events were still set in base",
751 __func__, n_deleted));
753 if (base->evsel != NULL && base->evsel->dealloc != NULL)
754 base->evsel->dealloc(base);
756 for (i = 0; i < base->nactivequeues; ++i)
757 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
759 EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
760 min_heap_dtor(&base->timeheap);
762 mm_free(base->activequeues);
764 EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
766 evmap_io_clear(&base->io);
767 evmap_signal_clear(&base->sigmap);
768 event_changelist_freemem(&base->changelist);
770 EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
771 EVTHREAD_FREE_COND(base->current_event_cond);
776 /* reinitialize the event base after a fork */
778 event_reinit(struct event_base *base)
780 const struct eventop *evsel;
783 int was_notifiable = 0;
785 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
790 /* Right now, reinit always takes effect, since even if the
791 backend doesn't require it, the signal socketpair code does.
795 /* check if this event mechanism requires reinit */
796 if (!evsel->need_reinit)
800 /* prevent internal delete */
801 if (base->sig.ev_signal_added) {
802 /* we cannot call event_del here because the base has
803 * not been reinitialized yet. */
804 event_queue_remove(base, &base->sig.ev_signal,
806 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
807 event_queue_remove(base, &base->sig.ev_signal,
809 base->sig.ev_signal_added = 0;
811 if (base->th_notify_fd[0] != -1) {
812 /* we cannot call event_del here because the base has
813 * not been reinitialized yet. */
815 event_queue_remove(base, &base->th_notify,
817 if (base->th_notify.ev_flags & EVLIST_ACTIVE)
818 event_queue_remove(base, &base->th_notify,
820 base->sig.ev_signal_added = 0;
821 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
822 if (base->th_notify_fd[1] != -1)
823 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
824 base->th_notify_fd[0] = -1;
825 base->th_notify_fd[1] = -1;
826 event_debug_unassign(&base->th_notify);
829 if (base->evsel->dealloc != NULL)
830 base->evsel->dealloc(base);
831 base->evbase = evsel->init(base);
832 if (base->evbase == NULL) {
833 event_errx(1, "%s: could not reinitialize event mechanism",
839 event_changelist_freemem(&base->changelist); /* XXX */
840 evmap_io_clear(&base->io);
841 evmap_signal_clear(&base->sigmap);
843 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
844 if (ev->ev_events & (EV_READ|EV_WRITE)) {
845 if (evmap_io_add(base, ev->ev_fd, ev) == -1)
847 } else if (ev->ev_events & EV_SIGNAL) {
848 if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
853 if (was_notifiable && res == 0)
854 res = evthread_make_base_notifiable(base);
857 EVBASE_RELEASE_LOCK(base, th_base_lock);
862 event_get_supported_methods(void)
864 static const char **methods = NULL;
865 const struct eventop **method;
869 /* count all methods */
870 for (method = &eventops[0]; *method != NULL; ++method) {
874 /* allocate one more than we need for the NULL pointer */
875 tmp = mm_calloc((i + 1), sizeof(char *));
879 /* populate the array with the supported methods */
880 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
881 tmp[i++] = eventops[k]->name;
886 mm_free((char**)methods);
893 struct event_config *
894 event_config_new(void)
896 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
901 TAILQ_INIT(&cfg->entries);
907 event_config_entry_free(struct event_config_entry *entry)
909 if (entry->avoid_method != NULL)
910 mm_free((char *)entry->avoid_method);
915 event_config_free(struct event_config *cfg)
917 struct event_config_entry *entry;
919 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
920 TAILQ_REMOVE(&cfg->entries, entry, next);
921 event_config_entry_free(entry);
927 event_config_set_flag(struct event_config *cfg, int flag)
936 event_config_avoid_method(struct event_config *cfg, const char *method)
938 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
942 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
947 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
953 event_config_require_features(struct event_config *cfg,
958 cfg->require_features = features;
963 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
967 cfg->n_cpus_hint = cpus;
972 event_priority_init(int npriorities)
974 return event_base_priority_init(current_base, npriorities);
978 event_base_priority_init(struct event_base *base, int npriorities)
982 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
983 || npriorities >= EVENT_MAX_PRIORITIES)
986 if (npriorities == base->nactivequeues)
989 if (base->nactivequeues) {
990 mm_free(base->activequeues);
991 base->nactivequeues = 0;
994 /* Allocate our priority queues */
995 base->activequeues = (struct event_list *)
996 mm_calloc(npriorities, sizeof(struct event_list));
997 if (base->activequeues == NULL) {
998 event_warn("%s: calloc", __func__);
1001 base->nactivequeues = npriorities;
1003 for (i = 0; i < base->nactivequeues; ++i) {
1004 TAILQ_INIT(&base->activequeues[i]);
1010 /* Returns true iff we're currently watching any events. */
1012 event_haveevents(struct event_base *base)
1014 /* Caller must hold th_base_lock */
1015 return (base->virtual_event_count > 0 || base->event_count > 0);
1018 /* "closure" function called when processing active signal events */
1020 event_signal_closure(struct event_base *base, struct event *ev)
1024 /* Allows deletes to work */
1025 ncalls = ev->ev_ncalls;
1026 ev->ev_pncalls = &ncalls;
1027 EVBASE_RELEASE_LOCK(base, th_base_lock);
1030 ev->ev_ncalls = ncalls;
1032 ev->ev_pncalls = NULL;
1033 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1035 /* XXXX we can't do this without a lock on the base. */
1036 if (base->event_break)
1042 /* Common timeouts are special timeouts that are handled as queues rather than
1043 * in the minheap. This is more efficient than the minheap if we happen to
1044 * know that we're going to get several thousands of timeout events all with
1045 * the same timeout value.
1047 * Since all our timeout handling code assumes timevals can be copied,
1048 * assigned, etc, we can't use "magic pointer" to encode these common
1049 * timeouts. Searching through a list to see if every timeout is common could
1050 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1051 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1052 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1053 * of index into the event_base's aray of common timeouts.
1056 #define MICROSECONDS_MASK 0x000fffff
1057 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1058 #define COMMON_TIMEOUT_IDX_SHIFT 20
1059 #define COMMON_TIMEOUT_MASK 0xf0000000
1060 #define COMMON_TIMEOUT_MAGIC 0x50000000
1062 #define COMMON_TIMEOUT_IDX(tv) \
1063 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1065 /** Return true iff if 'tv' is a common timeout in 'base' */
1067 is_common_timeout(const struct timeval *tv,
1068 const struct event_base *base)
1071 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1073 idx = COMMON_TIMEOUT_IDX(tv);
1074 return idx < base->n_common_timeouts;
1077 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1078 * one is a common timeout. */
1080 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1082 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1083 (tv2->tv_usec & ~MICROSECONDS_MASK);
1086 /** Requires that 'tv' is a common timeout. Return the corresponding
1087 * common_timeout_list. */
1088 static inline struct common_timeout_list *
1089 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1091 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1096 common_timeout_ok(const struct timeval *tv,
1097 struct event_base *base)
1099 const struct timeval *expect =
1100 &get_common_timeout_list(base, tv)->duration;
1101 return tv->tv_sec == expect->tv_sec &&
1102 tv->tv_usec == expect->tv_usec;
1106 /* Add the timeout for the first event in given common timeout list to the
1107 * event_base's minheap. */
1109 common_timeout_schedule(struct common_timeout_list *ctl,
1110 const struct timeval *now, struct event *head)
1112 struct timeval timeout = head->ev_timeout;
1113 timeout.tv_usec &= MICROSECONDS_MASK;
1114 event_add_internal(&ctl->timeout_event, &timeout, 1);
1117 /* Callback: invoked when the timeout for a common timeout queue triggers.
1118 * This means that (at least) the first event in that queue should be run,
1119 * and the timeout should be rescheduled if there are more events. */
1121 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1124 struct common_timeout_list *ctl = arg;
1125 struct event_base *base = ctl->base;
1126 struct event *ev = NULL;
1127 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1128 gettime(base, &now);
1130 ev = TAILQ_FIRST(&ctl->events);
1131 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1132 (ev->ev_timeout.tv_sec == now.tv_sec &&
1133 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1135 event_del_internal(ev);
1136 event_active_nolock(ev, EV_TIMEOUT, 1);
1139 common_timeout_schedule(ctl, &now, ev);
1140 EVBASE_RELEASE_LOCK(base, th_base_lock);
1143 #define MAX_COMMON_TIMEOUTS 256
1145 const struct timeval *
1146 event_base_init_common_timeout(struct event_base *base,
1147 const struct timeval *duration)
1151 const struct timeval *result=NULL;
1152 struct common_timeout_list *new_ctl;
1154 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1155 if (duration->tv_usec > 1000000) {
1156 memcpy(&tv, duration, sizeof(struct timeval));
1157 if (is_common_timeout(duration, base))
1158 tv.tv_usec &= MICROSECONDS_MASK;
1159 tv.tv_sec += tv.tv_usec / 1000000;
1160 tv.tv_usec %= 1000000;
1163 for (i = 0; i < base->n_common_timeouts; ++i) {
1164 const struct common_timeout_list *ctl =
1165 base->common_timeout_queues[i];
1166 if (duration->tv_sec == ctl->duration.tv_sec &&
1167 duration->tv_usec ==
1168 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1169 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1170 result = &ctl->duration;
1174 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1175 event_warnx("%s: Too many common timeouts already in use; "
1176 "we only support %d per event_base", __func__,
1177 MAX_COMMON_TIMEOUTS);
1180 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1181 int n = base->n_common_timeouts < 16 ? 16 :
1182 base->n_common_timeouts*2;
1183 struct common_timeout_list **newqueues =
1184 mm_realloc(base->common_timeout_queues,
1185 n*sizeof(struct common_timeout_queue *));
1187 event_warn("%s: realloc",__func__);
1190 base->n_common_timeouts_allocated = n;
1191 base->common_timeout_queues = newqueues;
1193 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1195 event_warn("%s: calloc",__func__);
1198 TAILQ_INIT(&new_ctl->events);
1199 new_ctl->duration.tv_sec = duration->tv_sec;
1200 new_ctl->duration.tv_usec =
1201 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1202 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1203 evtimer_assign(&new_ctl->timeout_event, base,
1204 common_timeout_callback, new_ctl);
1205 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1206 event_priority_set(&new_ctl->timeout_event, 0);
1207 new_ctl->base = base;
1208 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1209 result = &new_ctl->duration;
1213 EVUTIL_ASSERT(is_common_timeout(result, base));
1215 EVBASE_RELEASE_LOCK(base, th_base_lock);
1219 /* Closure function invoked when we're activating a persistent event. */
1221 event_persist_closure(struct event_base *base, struct event *ev)
1223 /* reschedule the persistent event if we have a timeout. */
1224 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1225 /* If there was a timeout, we want it to run at an interval of
1226 * ev_io_timeout after the last time it was _scheduled_ for,
1227 * not ev_io_timeout after _now_. If it fired for another
1228 * reason, though, the timeout ought to start ticking _now_. */
1229 struct timeval run_at;
1230 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1231 &ev->ev_io_timeout));
1232 if (is_common_timeout(&ev->ev_timeout, base)) {
1233 ev_uint32_t usec_mask;
1234 struct timeval delay, relative_to;
1235 delay = ev->ev_io_timeout;
1236 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1237 delay.tv_usec &= MICROSECONDS_MASK;
1238 if (ev->ev_res & EV_TIMEOUT) {
1239 relative_to = ev->ev_timeout;
1240 relative_to.tv_usec &= MICROSECONDS_MASK;
1242 gettime(base, &relative_to);
1244 evutil_timeradd(&relative_to, &delay, &run_at);
1245 run_at.tv_usec |= usec_mask;
1247 struct timeval relative_to;
1248 if (ev->ev_res & EV_TIMEOUT) {
1249 relative_to = ev->ev_timeout;
1251 gettime(base, &relative_to);
1253 evutil_timeradd(&ev->ev_io_timeout, &relative_to,
1256 event_add_internal(ev, &run_at, 1);
1258 EVBASE_RELEASE_LOCK(base, th_base_lock);
1259 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1263 Helper for event_process_active to process all the events in a single queue,
1264 releasing the lock as we go. This function requires that the lock be held
1265 when it's invoked. Returns -1 if we get a signal or an event_break that
1266 means we should stop processing any active events now. Otherwise returns
1267 the number of non-internal events that we processed.
1270 event_process_active_single_queue(struct event_base *base,
1271 struct event_list *activeq)
1276 EVUTIL_ASSERT(activeq != NULL);
1278 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
1279 if (ev->ev_events & EV_PERSIST)
1280 event_queue_remove(base, ev, EVLIST_ACTIVE);
1282 event_del_internal(ev);
1283 if (!(ev->ev_flags & EVLIST_INTERNAL))
1287 "event_process_active: event: %p, %s%scall %p",
1289 ev->ev_res & EV_READ ? "EV_READ " : " ",
1290 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1293 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1294 base->current_event = ev;
1295 base->current_event_waiters = 0;
1298 switch (ev->ev_closure) {
1299 case EV_CLOSURE_SIGNAL:
1300 event_signal_closure(base, ev);
1302 case EV_CLOSURE_PERSIST:
1303 event_persist_closure(base, ev);
1306 case EV_CLOSURE_NONE:
1307 EVBASE_RELEASE_LOCK(base, th_base_lock);
1309 (int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1313 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1314 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1315 base->current_event = NULL;
1316 if (base->current_event_waiters) {
1317 base->current_event_waiters = 0;
1318 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1322 if (base->event_break)
1329 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1330 *breakptr becomes set to 1, stop. Requires that we start out holding
1331 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1335 event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
1338 struct deferred_cb *cb;
1340 #define MAX_DEFERRED 16
1341 while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
1343 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
1344 --queue->active_count;
1345 UNLOCK_DEFERRED_QUEUE(queue);
1347 cb->cb(cb, cb->arg);
1349 LOCK_DEFERRED_QUEUE(queue);
1352 if (++count == MAX_DEFERRED)
1360 * Active events are stored in priority queues. Lower priorities are always
1361 * process before higher priorities. Low priority events can starve high
1366 event_process_active(struct event_base *base)
1368 /* Caller must hold th_base_lock */
1369 struct event_list *activeq = NULL;
1372 for (i = 0; i < base->nactivequeues; ++i) {
1373 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1374 activeq = &base->activequeues[i];
1375 c = event_process_active_single_queue(base, activeq);
1379 break; /* Processed a real event; do not
1380 * consider lower-priority events */
1381 /* If we get here, all of the events we processed
1382 * were internal. Continue. */
1386 event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
1391 * Wait continuously for events. We exit only if no events are left.
1395 event_dispatch(void)
1397 return (event_loop(0));
1401 event_base_dispatch(struct event_base *event_base)
1403 return (event_base_loop(event_base, 0));
1407 event_base_get_method(const struct event_base *base)
1409 EVUTIL_ASSERT(base);
1410 return (base->evsel->name);
1413 /** Callback: used to implement event_base_loopexit by telling the event_base
1414 * that it's time to exit its loop. */
1416 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1418 struct event_base *base = arg;
1419 base->event_gotterm = 1;
1423 event_loopexit(const struct timeval *tv)
1425 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1430 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1432 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1437 event_loopbreak(void)
1439 return (event_base_loopbreak(current_base));
1443 event_base_loopbreak(struct event_base *event_base)
1446 if (event_base == NULL)
1449 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1450 event_base->event_break = 1;
1452 if (EVBASE_NEED_NOTIFY(event_base)) {
1453 r = evthread_notify_base(event_base);
1457 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1462 event_base_got_break(struct event_base *event_base)
1465 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1466 res = event_base->event_break;
1467 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1472 event_base_got_exit(struct event_base *event_base)
1475 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1476 res = event_base->event_gotterm;
1477 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1481 /* not thread safe */
1484 event_loop(int flags)
1486 return event_base_loop(current_base, flags);
1490 event_base_loop(struct event_base *base, int flags)
1492 const struct eventop *evsel = base->evsel;
1494 struct timeval *tv_p;
1495 int res, done, retval = 0;
1497 /* Grab the lock. We will release it inside evsel.dispatch, and again
1498 * as we invoke user callbacks. */
1499 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1501 if (base->running_loop) {
1502 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1503 " can run on each event_base at once.", __func__);
1504 EVBASE_RELEASE_LOCK(base, th_base_lock);
1508 base->running_loop = 1;
1510 clear_time_cache(base);
1512 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1513 evsig_set_base(base);
1517 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1518 base->th_owner_id = EVTHREAD_GET_ID();
1521 base->event_gotterm = base->event_break = 0;
1524 /* Terminate the loop if we have been asked to */
1525 if (base->event_gotterm) {
1529 if (base->event_break) {
1533 timeout_correct(base, &tv);
1536 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1537 timeout_next(base, &tv_p);
1540 * if we have active events, we just poll new events
1543 evutil_timerclear(&tv);
1546 /* If we have no events, we just exit */
1547 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1548 event_debug(("%s: no events registered.", __func__));
1553 /* update last old time */
1554 gettime(base, &base->event_tv);
1556 clear_time_cache(base);
1558 res = evsel->dispatch(base, tv_p);
1561 event_debug(("%s: dispatch returned unsuccessfully.",
1567 update_time_cache(base);
1569 timeout_process(base);
1571 if (N_ACTIVE_CALLBACKS(base)) {
1572 int n = event_process_active(base);
1573 if ((flags & EVLOOP_ONCE)
1574 && N_ACTIVE_CALLBACKS(base) == 0
1577 } else if (flags & EVLOOP_NONBLOCK)
1580 event_debug(("%s: asked to terminate loop.", __func__));
1583 clear_time_cache(base);
1584 base->running_loop = 0;
1586 EVBASE_RELEASE_LOCK(base, th_base_lock);
1591 /* Sets up an event for processing once */
1595 void (*cb)(evutil_socket_t, short, void *);
1599 /* One-time callback to implement event_base_once: invokes the user callback,
1600 * then deletes the allocated storage */
1602 event_once_cb(evutil_socket_t fd, short events, void *arg)
1604 struct event_once *eonce = arg;
1606 (*eonce->cb)(fd, events, eonce->arg);
1607 event_debug_unassign(&eonce->ev);
1611 /* not threadsafe, event scheduled once. */
1613 event_once(evutil_socket_t fd, short events,
1614 void (*callback)(evutil_socket_t, short, void *),
1615 void *arg, const struct timeval *tv)
1617 return event_base_once(current_base, fd, events, callback, arg, tv);
1620 /* Schedules an event once */
1622 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1623 void (*callback)(evutil_socket_t, short, void *),
1624 void *arg, const struct timeval *tv)
1626 struct event_once *eonce;
1630 /* We cannot support signals that just fire once, or persistent
1632 if (events & (EV_SIGNAL|EV_PERSIST))
1635 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1638 eonce->cb = callback;
1641 if (events == EV_TIMEOUT) {
1643 evutil_timerclear(&etv);
1647 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1648 } else if (events & (EV_READ|EV_WRITE)) {
1649 events &= EV_READ|EV_WRITE;
1651 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1653 /* Bad event combination */
1659 res = event_add(&eonce->ev, tv);
1669 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1672 base = current_base;
1674 _event_debug_assert_not_added(ev);
1678 ev->ev_callback = callback;
1681 ev->ev_events = events;
1683 ev->ev_flags = EVLIST_INIT;
1685 ev->ev_pncalls = NULL;
1687 if (events & EV_SIGNAL) {
1688 if ((events & (EV_READ|EV_WRITE)) != 0) {
1689 event_warnx("%s: EV_SIGNAL is not compatible with "
1690 "EV_READ or EV_WRITE", __func__);
1693 ev->ev_closure = EV_CLOSURE_SIGNAL;
1695 if (events & EV_PERSIST) {
1696 evutil_timerclear(&ev->ev_io_timeout);
1697 ev->ev_closure = EV_CLOSURE_PERSIST;
1699 ev->ev_closure = EV_CLOSURE_NONE;
1703 min_heap_elem_init(ev);
1706 /* by default, we put new events into the middle priority */
1707 ev->ev_pri = base->nactivequeues / 2;
1710 _event_debug_note_setup(ev);
1716 event_base_set(struct event_base *base, struct event *ev)
1718 /* Only innocent events may be assigned to a different base */
1719 if (ev->ev_flags != EVLIST_INIT)
1722 _event_debug_assert_is_setup(ev);
1725 ev->ev_pri = base->nactivequeues/2;
1731 event_set(struct event *ev, evutil_socket_t fd, short events,
1732 void (*callback)(evutil_socket_t, short, void *), void *arg)
1735 r = event_assign(ev, current_base, fd, events, callback, arg);
1736 EVUTIL_ASSERT(r == 0);
1740 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
1743 ev = mm_malloc(sizeof(struct event));
1746 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
1755 event_free(struct event *ev)
1757 _event_debug_assert_is_setup(ev);
1759 /* make sure that this event won't be coming back to haunt us. */
1761 _event_debug_note_teardown(ev);
1767 event_debug_unassign(struct event *ev)
1769 _event_debug_assert_not_added(ev);
1770 _event_debug_note_teardown(ev);
1772 ev->ev_flags &= ~EVLIST_INIT;
1776 * Set's the priority of an event - if an event is already scheduled
1777 * changing the priority is going to fail.
1781 event_priority_set(struct event *ev, int pri)
1783 _event_debug_assert_is_setup(ev);
1785 if (ev->ev_flags & EVLIST_ACTIVE)
1787 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
1796 * Checks if a specific event is pending or scheduled.
1800 event_pending(const struct event *ev, short event, struct timeval *tv)
1804 _event_debug_assert_is_setup(ev);
1806 if (ev->ev_flags & EVLIST_INSERTED)
1807 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
1808 if (ev->ev_flags & EVLIST_ACTIVE)
1809 flags |= ev->ev_res;
1810 if (ev->ev_flags & EVLIST_TIMEOUT)
1811 flags |= EV_TIMEOUT;
1813 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
1815 /* See if there is a timeout that we should report */
1816 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
1817 struct timeval tmp = ev->ev_timeout;
1818 tmp.tv_usec &= MICROSECONDS_MASK;
1819 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1820 /* correctly remamp to real time */
1821 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
1827 return (flags & event);
1831 event_initialized(const struct event *ev)
1833 if (!(ev->ev_flags & EVLIST_INIT))
1840 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
1842 _event_debug_assert_is_setup(event);
1845 *base_out = event->ev_base;
1847 *fd_out = event->ev_fd;
1849 *events_out = event->ev_events;
1851 *callback_out = event->ev_callback;
1853 *arg_out = event->ev_arg;
1857 event_get_struct_event_size(void)
1859 return sizeof(struct event);
1863 event_get_fd(const struct event *ev)
1865 _event_debug_assert_is_setup(ev);
1870 event_get_base(const struct event *ev)
1872 _event_debug_assert_is_setup(ev);
1877 event_get_events(const struct event *ev)
1879 _event_debug_assert_is_setup(ev);
1880 return ev->ev_events;
1884 event_get_callback(const struct event *ev)
1886 _event_debug_assert_is_setup(ev);
1887 return ev->ev_callback;
1891 event_get_callback_arg(const struct event *ev)
1893 _event_debug_assert_is_setup(ev);
1898 event_add(struct event *ev, const struct timeval *tv)
1902 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
1903 event_warnx("%s: event has no event_base set.", __func__);
1907 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
1909 res = event_add_internal(ev, tv, 0);
1911 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
1916 /* Helper callback: wake an event_base from another thread. This version
1917 * works by writing a byte to one end of a socketpair, so that the event_base
1918 * listening on the other end will wake up as the corresponding event
1921 evthread_notify_base_default(struct event_base *base)
1927 r = send(base->th_notify_fd[1], buf, 1, 0);
1929 r = write(base->th_notify_fd[1], buf, 1);
1931 return (r < 0 && errno != EAGAIN) ? -1 : 0;
1934 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
1935 /* Helper callback: wake an event_base from another thread. This version
1936 * assumes that you have a working eventfd() implementation. */
1938 evthread_notify_base_eventfd(struct event_base *base)
1940 ev_uint64_t msg = 1;
1943 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
1944 } while (r < 0 && errno == EAGAIN);
1946 return (r < 0) ? -1 : 0;
1950 /** Tell the thread currently running the event_loop for base (if any) that it
1951 * needs to stop waiting in its dispatch function (if it is) and process all
1952 * active events and deferred callbacks (if there are any). */
1954 evthread_notify_base(struct event_base *base)
1956 EVENT_BASE_ASSERT_LOCKED(base);
1957 if (!base->th_notify_fn)
1959 if (base->is_notify_pending)
1961 base->is_notify_pending = 1;
1962 return base->th_notify_fn(base);
1965 /* Implementation function to add an event. Works just like event_add,
1966 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
1967 * we treat tv as an absolute time, not as an interval to add to the current
1970 event_add_internal(struct event *ev, const struct timeval *tv,
1973 struct event_base *base = ev->ev_base;
1977 EVENT_BASE_ASSERT_LOCKED(base);
1978 _event_debug_assert_is_setup(ev);
1981 "event_add: event: %p (fd %d), %s%s%scall %p",
1984 ev->ev_events & EV_READ ? "EV_READ " : " ",
1985 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
1986 tv ? "EV_TIMEOUT " : " ",
1989 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
1992 * prepare for timeout insertion further below, if we get a
1993 * failure on any step, we should not change any state.
1995 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
1996 if (min_heap_reserve(&base->timeheap,
1997 1 + min_heap_size(&base->timeheap)) == -1)
1998 return (-1); /* ENOMEM == errno */
2001 /* If the main thread is currently executing a signal event's
2002 * callback, and we are not the main thread, then we want to wait
2003 * until the callback is done before we mess with the event, or else
2004 * we can race on ev_ncalls and ev_pncalls below. */
2005 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2006 if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
2007 && !EVBASE_IN_THREAD(base)) {
2008 ++base->current_event_waiters;
2009 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2013 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
2014 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
2015 if (ev->ev_events & (EV_READ|EV_WRITE))
2016 res = evmap_io_add(base, ev->ev_fd, ev);
2017 else if (ev->ev_events & EV_SIGNAL)
2018 res = evmap_signal_add(base, (int)ev->ev_fd, ev);
2020 event_queue_insert(base, ev, EVLIST_INSERTED);
2022 /* evmap says we need to notify the main thread. */
2029 * we should change the timeout state only if the previous event
2030 * addition succeeded.
2032 if (res != -1 && tv != NULL) {
2037 * for persistent timeout events, we remember the
2038 * timeout value and re-add the event.
2040 * If tv_is_absolute, this was already set.
2042 if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
2043 ev->ev_io_timeout = *tv;
2046 * we already reserved memory above for the case where we
2047 * are not replacing an existing timeout.
2049 if (ev->ev_flags & EVLIST_TIMEOUT) {
2050 /* XXX I believe this is needless. */
2051 if (min_heap_elt_is_top(ev))
2053 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2056 /* Check if it is active due to a timeout. Rescheduling
2057 * this timeout before the callback can be executed
2058 * removes it from the active list. */
2059 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2060 (ev->ev_res & EV_TIMEOUT)) {
2061 if (ev->ev_events & EV_SIGNAL) {
2062 /* See if we are just active executing
2063 * this event in a loop
2065 if (ev->ev_ncalls && ev->ev_pncalls) {
2067 *ev->ev_pncalls = 0;
2071 event_queue_remove(base, ev, EVLIST_ACTIVE);
2074 gettime(base, &now);
2076 common_timeout = is_common_timeout(tv, base);
2077 if (tv_is_absolute) {
2078 ev->ev_timeout = *tv;
2079 } else if (common_timeout) {
2080 struct timeval tmp = *tv;
2081 tmp.tv_usec &= MICROSECONDS_MASK;
2082 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2083 ev->ev_timeout.tv_usec |=
2084 (tv->tv_usec & ~MICROSECONDS_MASK);
2086 evutil_timeradd(&now, tv, &ev->ev_timeout);
2090 "event_add: timeout in %d seconds, call %p",
2091 (int)tv->tv_sec, ev->ev_callback));
2093 event_queue_insert(base, ev, EVLIST_TIMEOUT);
2094 if (common_timeout) {
2095 struct common_timeout_list *ctl =
2096 get_common_timeout_list(base, &ev->ev_timeout);
2097 if (ev == TAILQ_FIRST(&ctl->events)) {
2098 common_timeout_schedule(ctl, &now, ev);
2101 /* See if the earliest timeout is now earlier than it
2102 * was before: if so, we will need to tell the main
2103 * thread to wake up earlier than it would
2105 if (min_heap_elt_is_top(ev))
2110 /* if we are not in the right thread, we need to wake up the loop */
2111 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2112 evthread_notify_base(base);
2114 _event_debug_note_add(ev);
2120 event_del(struct event *ev)
2124 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2125 event_warnx("%s: event has no event_base set.", __func__);
2129 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2131 res = event_del_internal(ev);
2133 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2138 /* Helper for event_del: always called with th_base_lock held. */
2140 event_del_internal(struct event *ev)
2142 struct event_base *base;
2143 int res = 0, notify = 0;
2145 event_debug(("event_del: %p (fd %d), callback %p",
2146 ev, (int)ev->ev_fd, ev->ev_callback));
2148 /* An event without a base has not been added */
2149 if (ev->ev_base == NULL)
2152 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2154 /* If the main thread is currently executing this event's callback,
2155 * and we are not the main thread, then we want to wait until the
2156 * callback is done before we start removing the event. That way,
2157 * when this function returns, it will be safe to free the
2158 * user-supplied argument. */
2160 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2161 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2162 ++base->current_event_waiters;
2163 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2167 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2169 /* See if we are just active executing this event in a loop */
2170 if (ev->ev_events & EV_SIGNAL) {
2171 if (ev->ev_ncalls && ev->ev_pncalls) {
2173 *ev->ev_pncalls = 0;
2177 if (ev->ev_flags & EVLIST_TIMEOUT) {
2178 /* NOTE: We never need to notify the main thread because of a
2179 * deleted timeout event: all that could happen if we don't is
2180 * that the dispatch loop might wake up too early. But the
2181 * point of notifying the main thread _is_ to wake up the
2182 * dispatch loop early anyway, so we wouldn't gain anything by
2185 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2188 if (ev->ev_flags & EVLIST_ACTIVE)
2189 event_queue_remove(base, ev, EVLIST_ACTIVE);
2191 if (ev->ev_flags & EVLIST_INSERTED) {
2192 event_queue_remove(base, ev, EVLIST_INSERTED);
2193 if (ev->ev_events & (EV_READ|EV_WRITE))
2194 res = evmap_io_del(base, ev->ev_fd, ev);
2196 res = evmap_signal_del(base, (int)ev->ev_fd, ev);
2198 /* evmap says we need to notify the main thread. */
2204 /* if we are not in the right thread, we need to wake up the loop */
2205 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2206 evthread_notify_base(base);
2208 _event_debug_note_del(ev);
2214 event_active(struct event *ev, int res, short ncalls)
2216 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2217 event_warnx("%s: event has no event_base set.", __func__);
2221 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2223 _event_debug_assert_is_setup(ev);
2225 event_active_nolock(ev, res, ncalls);
2227 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2232 event_active_nolock(struct event *ev, int res, short ncalls)
2234 struct event_base *base;
2236 event_debug(("event_active: %p (fd %d), res %d, callback %p",
2237 ev, (int)ev->ev_fd, (int)res, ev->ev_callback));
2240 /* We get different kinds of events, add them together */
2241 if (ev->ev_flags & EVLIST_ACTIVE) {
2248 EVENT_BASE_ASSERT_LOCKED(base);
2252 if (ev->ev_events & EV_SIGNAL) {
2253 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2254 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2255 ++base->current_event_waiters;
2256 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2259 ev->ev_ncalls = ncalls;
2260 ev->ev_pncalls = NULL;
2263 event_queue_insert(base, ev, EVLIST_ACTIVE);
2265 if (EVBASE_NEED_NOTIFY(base))
2266 evthread_notify_base(base);
2270 event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
2272 memset(cb, 0, sizeof(struct deferred_cb));
2278 event_deferred_cb_cancel(struct deferred_cb_queue *queue,
2279 struct deferred_cb *cb)
2283 queue = ¤t_base->defer_queue;
2288 LOCK_DEFERRED_QUEUE(queue);
2290 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
2291 --queue->active_count;
2294 UNLOCK_DEFERRED_QUEUE(queue);
2298 event_deferred_cb_schedule(struct deferred_cb_queue *queue,
2299 struct deferred_cb *cb)
2303 queue = ¤t_base->defer_queue;
2308 LOCK_DEFERRED_QUEUE(queue);
2311 TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
2312 ++queue->active_count;
2313 if (queue->notify_fn)
2314 queue->notify_fn(queue, queue->notify_arg);
2316 UNLOCK_DEFERRED_QUEUE(queue);
2320 timeout_next(struct event_base *base, struct timeval **tv_p)
2322 /* Caller must hold th_base_lock */
2325 struct timeval *tv = *tv_p;
2328 ev = min_heap_top(&base->timeheap);
2331 /* if no time-based events are active wait for I/O */
2336 if (gettime(base, &now) == -1) {
2341 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
2342 evutil_timerclear(tv);
2346 evutil_timersub(&ev->ev_timeout, &now, tv);
2348 EVUTIL_ASSERT(tv->tv_sec >= 0);
2349 EVUTIL_ASSERT(tv->tv_usec >= 0);
2350 event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
2357 * Determines if the time is running backwards by comparing the current time
2358 * against the last time we checked. Not needed when using clock monotonic.
2359 * If time is running backwards, we adjust the firing time of every event by
2360 * the amount that time seems to have jumped.
2363 timeout_correct(struct event_base *base, struct timeval *tv)
2365 /* Caller must hold th_base_lock. */
2374 /* Check if time is running backwards */
2377 if (evutil_timercmp(tv, &base->event_tv, >=)) {
2378 base->event_tv = *tv;
2382 event_debug(("%s: time is running backwards, corrected",
2384 evutil_timersub(&base->event_tv, tv, &off);
2387 * We can modify the key element of the node without destroying
2388 * the minheap property, because we change every element.
2390 pev = base->timeheap.p;
2391 size = base->timeheap.n;
2392 for (; size-- > 0; ++pev) {
2393 struct timeval *ev_tv = &(**pev).ev_timeout;
2394 evutil_timersub(ev_tv, &off, ev_tv);
2396 for (i=0; i<base->n_common_timeouts; ++i) {
2398 struct common_timeout_list *ctl =
2399 base->common_timeout_queues[i];
2400 TAILQ_FOREACH(ev, &ctl->events,
2401 ev_timeout_pos.ev_next_with_common_timeout) {
2402 struct timeval *ev_tv = &ev->ev_timeout;
2403 ev_tv->tv_usec &= MICROSECONDS_MASK;
2404 evutil_timersub(ev_tv, &off, ev_tv);
2405 ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
2406 (i<<COMMON_TIMEOUT_IDX_SHIFT);
2410 /* Now remember what the new time turned out to be. */
2411 base->event_tv = *tv;
2414 /* Activate every event whose timeout has elapsed. */
2416 timeout_process(struct event_base *base)
2418 /* Caller must hold lock. */
2422 if (min_heap_empty(&base->timeheap)) {
2426 gettime(base, &now);
2428 while ((ev = min_heap_top(&base->timeheap))) {
2429 if (evutil_timercmp(&ev->ev_timeout, &now, >))
2432 /* delete this event from the I/O queues */
2433 event_del_internal(ev);
2435 event_debug(("timeout_process: call %p",
2437 event_active_nolock(ev, EV_TIMEOUT, 1);
2441 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2443 event_queue_remove(struct event_base *base, struct event *ev, int queue)
2445 EVENT_BASE_ASSERT_LOCKED(base);
2447 if (!(ev->ev_flags & queue)) {
2448 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
2449 ev, ev->ev_fd, queue);
2453 if (~ev->ev_flags & EVLIST_INTERNAL)
2454 base->event_count--;
2456 ev->ev_flags &= ~queue;
2458 case EVLIST_INSERTED:
2459 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
2462 base->event_count_active--;
2463 TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
2464 ev, ev_active_next);
2466 case EVLIST_TIMEOUT:
2467 if (is_common_timeout(&ev->ev_timeout, base)) {
2468 struct common_timeout_list *ctl =
2469 get_common_timeout_list(base, &ev->ev_timeout);
2470 TAILQ_REMOVE(&ctl->events, ev,
2471 ev_timeout_pos.ev_next_with_common_timeout);
2473 min_heap_erase(&base->timeheap, ev);
2477 event_errx(1, "%s: unknown queue %x", __func__, queue);
2481 /* Add 'ev' to the common timeout list in 'ev'. */
2483 insert_common_timeout_inorder(struct common_timeout_list *ctl,
2487 /* By all logic, we should just be able to append 'ev' to the end of
2488 * ctl->events, since the timeout on each 'ev' is set to {the common
2489 * timeout} + {the time when we add the event}, and so the events
2490 * should arrive in order of their timeeouts. But just in case
2491 * there's some wacky threading issue going on, we do a search from
2492 * the end of 'ev' to find the right insertion point.
2494 TAILQ_FOREACH_REVERSE(e, &ctl->events,
2495 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
2496 /* This timercmp is a little sneaky, since both ev and e have
2497 * magic values in tv_usec. Fortunately, they ought to have
2498 * the _same_ magic values in tv_usec. Let's assert for that.
2501 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
2502 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
2503 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
2504 ev_timeout_pos.ev_next_with_common_timeout);
2508 TAILQ_INSERT_HEAD(&ctl->events, ev,
2509 ev_timeout_pos.ev_next_with_common_timeout);
2513 event_queue_insert(struct event_base *base, struct event *ev, int queue)
2515 EVENT_BASE_ASSERT_LOCKED(base);
2517 if (ev->ev_flags & queue) {
2518 /* Double insertion is possible for active events */
2519 if (queue & EVLIST_ACTIVE)
2522 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
2523 ev, ev->ev_fd, queue);
2527 if (~ev->ev_flags & EVLIST_INTERNAL)
2528 base->event_count++;
2530 ev->ev_flags |= queue;
2532 case EVLIST_INSERTED:
2533 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
2536 base->event_count_active++;
2537 TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
2540 case EVLIST_TIMEOUT: {
2541 if (is_common_timeout(&ev->ev_timeout, base)) {
2542 struct common_timeout_list *ctl =
2543 get_common_timeout_list(base, &ev->ev_timeout);
2544 insert_common_timeout_inorder(ctl, ev);
2546 min_heap_push(&base->timeheap, ev);
2550 event_errx(1, "%s: unknown queue %x", __func__, queue);
2554 /* Functions for debugging */
2557 event_get_version(void)
2559 return (_EVENT_VERSION);
2563 event_get_version_number(void)
2565 return (_EVENT_NUMERIC_VERSION);
2569 * No thread-safe interface needed - the information should be the same
2574 event_get_method(void)
2576 return (current_base->evsel->name);
2579 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2580 static void *(*_mm_malloc_fn)(size_t sz) = NULL;
2581 static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
2582 static void (*_mm_free_fn)(void *p) = NULL;
2585 event_mm_malloc_(size_t sz)
2588 return _mm_malloc_fn(sz);
2594 event_mm_calloc_(size_t count, size_t size)
2596 if (_mm_malloc_fn) {
2597 size_t sz = count * size;
2598 void *p = _mm_malloc_fn(sz);
2603 return calloc(count, size);
2607 event_mm_strdup_(const char *str)
2609 if (_mm_malloc_fn) {
2610 size_t ln = strlen(str);
2611 void *p = _mm_malloc_fn(ln+1);
2613 memcpy(p, str, ln+1);
2617 return _strdup(str);
2624 event_mm_realloc_(void *ptr, size_t sz)
2627 return _mm_realloc_fn(ptr, sz);
2629 return realloc(ptr, sz);
2633 event_mm_free_(void *ptr)
2642 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
2643 void *(*realloc_fn)(void *ptr, size_t sz),
2644 void (*free_fn)(void *ptr))
2646 _mm_malloc_fn = malloc_fn;
2647 _mm_realloc_fn = realloc_fn;
2648 _mm_free_fn = free_fn;
2652 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2654 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
2658 struct event_base *base = arg;
2660 r = read(fd, (void*) &msg, sizeof(msg));
2661 if (r<0 && errno != EAGAIN) {
2662 event_sock_warn(fd, "Error reading from eventfd");
2664 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2665 base->is_notify_pending = 0;
2666 EVBASE_RELEASE_LOCK(base, th_base_lock);
2671 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
2673 unsigned char buf[1024];
2674 struct event_base *base = arg;
2676 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
2679 while (read(fd, (char*)buf, sizeof(buf)) > 0)
2683 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2684 base->is_notify_pending = 0;
2685 EVBASE_RELEASE_LOCK(base, th_base_lock);
2689 evthread_make_base_notifiable(struct event_base *base)
2691 void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
2692 int (*notify)(struct event_base *) = evthread_notify_base_default;
2694 /* XXXX grab the lock here? */
2698 if (base->th_notify_fd[0] >= 0)
2701 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2703 #define EFD_CLOEXEC 0
2705 base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
2706 if (base->th_notify_fd[0] >= 0) {
2707 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2708 notify = evthread_notify_base_eventfd;
2709 cb = evthread_notify_drain_eventfd;
2712 #if defined(_EVENT_HAVE_PIPE)
2713 if (base->th_notify_fd[0] < 0) {
2714 if ((base->evsel->features & EV_FEATURE_FDS)) {
2715 if (pipe(base->th_notify_fd) < 0) {
2716 event_warn("%s: pipe", __func__);
2718 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2719 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2726 #define LOCAL_SOCKETPAIR_AF AF_INET
2728 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2730 if (base->th_notify_fd[0] < 0) {
2731 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
2732 base->th_notify_fd) == -1) {
2733 event_sock_warn(-1, "%s: socketpair", __func__);
2736 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2737 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2741 evutil_make_socket_nonblocking(base->th_notify_fd[0]);
2743 base->th_notify_fn = notify;
2746 Making the second socket nonblocking is a bit subtle, given that we
2747 ignore any EAGAIN returns when writing to it, and you don't usally
2748 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2749 then there's no need to add any more data to the buffer, since
2750 the main thread is already either about to wake up and drain it,
2751 or woken up and in the process of draining it.
2753 if (base->th_notify_fd[1] > 0)
2754 evutil_make_socket_nonblocking(base->th_notify_fd[1]);
2756 /* prepare an event that we can use for wakeup */
2757 event_assign(&base->th_notify, base, base->th_notify_fd[0],
2758 EV_READ|EV_PERSIST, cb, base);
2760 /* we need to mark this as internal event */
2761 base->th_notify.ev_flags |= EVLIST_INTERNAL;
2762 event_priority_set(&base->th_notify, 0);
2764 return event_add(&base->th_notify, NULL);
2768 event_base_dump_events(struct event_base *base, FILE *output)
2772 fprintf(output, "Inserted events:\n");
2773 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2774 fprintf(output, " %p [fd %ld]%s%s%s%s%s\n",
2775 (void*)e, (long)e->ev_fd,
2776 (e->ev_events&EV_READ)?" Read":"",
2777 (e->ev_events&EV_WRITE)?" Write":"",
2778 (e->ev_events&EV_SIGNAL)?" Signal":"",
2779 (e->ev_events&EV_TIMEOUT)?" Timeout":"",
2780 (e->ev_events&EV_PERSIST)?" Persist":"");
2783 for (i = 0; i < base->nactivequeues; ++i) {
2784 if (TAILQ_EMPTY(&base->activequeues[i]))
2786 fprintf(output, "Active events [priority %d]:\n", i);
2787 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2788 fprintf(output, " %p [fd %ld]%s%s%s%s\n",
2789 (void*)e, (long)e->ev_fd,
2790 (e->ev_res&EV_READ)?" Read active":"",
2791 (e->ev_res&EV_WRITE)?" Write active":"",
2792 (e->ev_res&EV_SIGNAL)?" Signal active":"",
2793 (e->ev_res&EV_TIMEOUT)?" Timeout active":"");
2799 event_base_add_virtual(struct event_base *base)
2801 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2802 base->virtual_event_count++;
2803 EVBASE_RELEASE_LOCK(base, th_base_lock);
2807 event_base_del_virtual(struct event_base *base)
2809 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2810 EVUTIL_ASSERT(base->virtual_event_count > 0);
2811 base->virtual_event_count--;
2812 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
2813 evthread_notify_base(base);
2814 EVBASE_RELEASE_LOCK(base, th_base_lock);