2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
31 #define WIN32_LEAN_AND_MEAN
33 #undef WIN32_LEAN_AND_MEAN
35 #include <sys/types.h>
36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
39 #include <sys/queue.h>
40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
41 #include <sys/socket.h>
45 #ifdef _EVENT_HAVE_UNISTD_H
48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
49 #include <sys/eventfd.h>
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/event_compat.h"
60 #include "event-internal.h"
61 #include "defer-internal.h"
62 #include "evthread-internal.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "log-internal.h"
66 #include "evmap-internal.h"
67 #include "iocp-internal.h"
68 #include "changelist-internal.h"
69 #include "ht-internal.h"
70 #include "util-internal.h"
72 #ifdef _EVENT_HAVE_EVENT_PORTS
73 extern const struct eventop evportops;
75 #ifdef _EVENT_HAVE_SELECT
76 extern const struct eventop selectops;
78 #ifdef _EVENT_HAVE_POLL
79 extern const struct eventop pollops;
81 #ifdef _EVENT_HAVE_EPOLL
82 extern const struct eventop epollops;
84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
85 extern const struct eventop kqops;
87 #ifdef _EVENT_HAVE_DEVPOLL
88 extern const struct eventop devpollops;
91 extern const struct eventop win32ops;
94 /* Array of backends in order of preference. */
95 static const struct eventop *eventops[] = {
96 #ifdef _EVENT_HAVE_EVENT_PORTS
99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
102 #ifdef _EVENT_HAVE_EPOLL
105 #ifdef _EVENT_HAVE_DEVPOLL
108 #ifdef _EVENT_HAVE_POLL
111 #ifdef _EVENT_HAVE_SELECT
120 /* Global state; deprecated */
121 struct event_base *event_global_current_base_ = NULL;
122 #define current_base event_global_current_base_
126 static int use_monotonic;
129 static inline int event_add_internal(struct event *ev,
130 const struct timeval *tv, int tv_is_absolute);
131 static inline int event_del_internal(struct event *ev);
133 static void event_queue_insert(struct event_base *, struct event *, int);
134 static void event_queue_remove(struct event_base *, struct event *, int);
135 static int event_haveevents(struct event_base *);
137 static int event_process_active(struct event_base *);
139 static int timeout_next(struct event_base *, struct timeval **);
140 static void timeout_process(struct event_base *);
141 static void timeout_correct(struct event_base *, struct timeval *);
143 static inline void event_signal_closure(struct event_base *, struct event *ev);
144 static inline void event_persist_closure(struct event_base *, struct event *ev);
146 static int evthread_notify_base(struct event_base *base);
148 #ifndef _EVENT_DISABLE_DEBUG_MODE
149 /* These functions implement a hashtable of which 'struct event *' structures
150 * have been setup or added. We don't want to trust the content of the struct
151 * event itself, since we're trying to work through cases where an event gets
152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
155 struct event_debug_entry {
156 HT_ENTRY(event_debug_entry) node;
157 const struct event *ptr;
161 static inline unsigned
162 hash_debug_entry(const struct event_debug_entry *e)
164 /* We need to do this silliness to convince compilers that we
165 * honestly mean to cast e->ptr to an integer, and discard any
166 * part of it that doesn't fit in an unsigned.
168 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
169 /* Our hashtable implementation is pretty sensitive to low bits,
170 * and every struct event is over 64 bytes in size, so we can
176 eq_debug_entry(const struct event_debug_entry *a,
177 const struct event_debug_entry *b)
179 return a->ptr == b->ptr;
182 int _event_debug_mode_on = 0;
183 /* Set if it's too late to enable event_debug_mode. */
184 static int event_debug_mode_too_late = 0;
185 static void *_event_debug_map_lock = NULL;
186 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
189 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
191 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
192 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
194 /* Macro: record that ev is now setup (that is, ready for an add) */
195 #define _event_debug_note_setup(ev) do { \
196 if (_event_debug_mode_on) { \
197 struct event_debug_entry *dent,find; \
199 EVLOCK_LOCK(_event_debug_map_lock, 0); \
200 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
204 dent = mm_malloc(sizeof(*dent)); \
207 "Out of memory in debugging code"); \
210 HT_INSERT(event_debug_map, &global_debug_map, dent); \
212 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
214 event_debug_mode_too_late = 1; \
216 /* Macro: record that ev is no longer setup */
217 #define _event_debug_note_teardown(ev) do { \
218 if (_event_debug_mode_on) { \
219 struct event_debug_entry *dent,find; \
221 EVLOCK_LOCK(_event_debug_map_lock, 0); \
222 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
225 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
227 event_debug_mode_too_late = 1; \
229 /* Macro: record that ev is now added */
230 #define _event_debug_note_add(ev) do { \
231 if (_event_debug_mode_on) { \
232 struct event_debug_entry *dent,find; \
234 EVLOCK_LOCK(_event_debug_map_lock, 0); \
235 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
239 event_errx(_EVENT_ERR_ABORT, \
240 "%s: noting an add on a non-setup event %p", \
243 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
245 event_debug_mode_too_late = 1; \
247 /* Macro: record that ev is no longer added */
248 #define _event_debug_note_del(ev) do { \
249 if (_event_debug_mode_on) { \
250 struct event_debug_entry *dent,find; \
252 EVLOCK_LOCK(_event_debug_map_lock, 0); \
253 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
257 event_errx(_EVENT_ERR_ABORT, \
258 "%s: noting a del on a non-setup event %p", \
261 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
263 event_debug_mode_too_late = 1; \
265 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
266 #define _event_debug_assert_is_setup(ev) do { \
267 if (_event_debug_mode_on) { \
268 struct event_debug_entry *dent,find; \
270 EVLOCK_LOCK(_event_debug_map_lock, 0); \
271 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
273 event_errx(_EVENT_ERR_ABORT, \
274 "%s called on a non-initialized event %p", \
277 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
280 /* Macro: assert that ev is not added (i.e., okay to tear down or set
282 #define _event_debug_assert_not_added(ev) do { \
283 if (_event_debug_mode_on) { \
284 struct event_debug_entry *dent,find; \
286 EVLOCK_LOCK(_event_debug_map_lock, 0); \
287 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
288 if (dent && dent->added) { \
289 event_errx(_EVENT_ERR_ABORT, \
290 "%s called on an already added event %p", \
293 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
297 #define _event_debug_note_setup(ev) \
299 #define _event_debug_note_teardown(ev) \
301 #define _event_debug_note_add(ev) \
303 #define _event_debug_note_del(ev) \
305 #define _event_debug_assert_is_setup(ev) \
307 #define _event_debug_assert_not_added(ev) \
311 #define EVENT_BASE_ASSERT_LOCKED(base) \
312 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
314 /* The first time this function is called, it sets use_monotonic to 1
315 * if we have a clock function that supports monotonic time */
317 detect_monotonic(void)
319 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
321 static int use_monotonic_initialized = 0;
323 if (use_monotonic_initialized)
326 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
329 use_monotonic_initialized = 1;
333 /** Set 'tp' to the current time according to 'base'. We must hold the lock
334 * on 'base'. If there is a cached time, return it. Otherwise, use
335 * clock_gettime or gettimeofday as appropriate to find out the right time.
336 * Return 0 on success, -1 on failure.
339 gettime(struct event_base *base, struct timeval *tp)
341 EVENT_BASE_ASSERT_LOCKED(base);
343 if (base->tv_cache.tv_sec) {
344 *tp = base->tv_cache;
348 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
352 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
355 tp->tv_sec = ts.tv_sec;
356 tp->tv_usec = ts.tv_nsec / 1000;
361 return (evutil_gettimeofday(tp, NULL));
365 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
371 return evutil_gettimeofday(tv, NULL);
374 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
375 r = gettime(base, tv);
376 EVBASE_RELEASE_LOCK(base, th_base_lock);
380 /** Make 'base' have no current cached time. */
382 clear_time_cache(struct event_base *base)
384 base->tv_cache.tv_sec = 0;
387 /** Replace the cached time in 'base' with the current time. */
389 update_time_cache(struct event_base *base)
391 base->tv_cache.tv_sec = 0;
392 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
393 gettime(base, &base->tv_cache);
399 struct event_base *base = event_base_new_with_config(NULL);
402 event_errx(1, "%s: Unable to construct event_base", __func__);
414 struct event_base *base = NULL;
415 struct event_config *cfg = event_config_new();
417 base = event_base_new_with_config(cfg);
418 event_config_free(cfg);
423 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
426 event_config_is_avoided_method(const struct event_config *cfg,
429 struct event_config_entry *entry;
431 TAILQ_FOREACH(entry, &cfg->entries, next) {
432 if (entry->avoid_method != NULL &&
433 strcmp(entry->avoid_method, method) == 0)
440 /** Return true iff 'method' is disabled according to the environment. */
442 event_is_method_disabled(const char *name)
444 char environment[64];
447 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
448 for (i = 8; environment[i] != '\0'; ++i)
449 environment[i] = EVUTIL_TOUPPER(environment[i]);
450 /* Note that evutil_getenv() ignores the environment entirely if
452 return (evutil_getenv(environment) != NULL);
456 event_base_get_features(const struct event_base *base)
458 return base->evsel->features;
462 event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
464 memset(cb, 0, sizeof(struct deferred_cb_queue));
465 TAILQ_INIT(&cb->deferred_cb_list);
468 /** Helper for the deferred_cb queue: wake up the event base. */
470 notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
472 struct event_base *base = baseptr;
473 if (EVBASE_NEED_NOTIFY(base))
474 evthread_notify_base(base);
477 struct deferred_cb_queue *
478 event_base_get_deferred_cb_queue(struct event_base *base)
480 return base ? &base->defer_queue : NULL;
484 event_enable_debug_mode(void)
486 #ifndef _EVENT_DISABLE_DEBUG_MODE
487 if (_event_debug_mode_on)
488 event_errx(1, "%s was called twice!", __func__);
489 if (event_debug_mode_too_late)
490 event_errx(1, "%s must be called *before* creating any events "
491 "or event_bases",__func__);
493 _event_debug_mode_on = 1;
495 HT_INIT(event_debug_map, &global_debug_map);
497 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
503 event_disable_debug_mode(void)
505 struct event_debug_entry **ent, *victim;
507 EVLOCK_LOCK(_event_debug_map_lock, 0);
508 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
510 ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
513 HT_CLEAR(event_debug_map, &global_debug_map);
514 EVLOCK_UNLOCK(_event_debug_map_lock , 0);
519 event_base_new_with_config(const struct event_config *cfg)
522 struct event_base *base;
523 int should_check_environment;
525 #ifndef _EVENT_DISABLE_DEBUG_MODE
526 event_debug_mode_too_late = 1;
527 if (_event_debug_mode_on && !_event_debug_map_lock) {
528 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
532 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
533 event_warn("%s: calloc", __func__);
537 gettime(base, &base->event_tv);
539 min_heap_ctor(&base->timeheap);
540 TAILQ_INIT(&base->eventqueue);
541 base->sig.ev_signal_pair[0] = -1;
542 base->sig.ev_signal_pair[1] = -1;
543 base->th_notify_fd[0] = -1;
544 base->th_notify_fd[1] = -1;
546 event_deferred_cb_queue_init(&base->defer_queue);
547 base->defer_queue.notify_fn = notify_base_cbq_callback;
548 base->defer_queue.notify_arg = base;
550 base->flags = cfg->flags;
552 evmap_io_initmap(&base->io);
553 evmap_signal_initmap(&base->sigmap);
554 event_changelist_init(&base->changelist);
558 should_check_environment =
559 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
561 for (i = 0; eventops[i] && !base->evbase; i++) {
563 /* determine if this backend should be avoided */
564 if (event_config_is_avoided_method(cfg,
567 if ((eventops[i]->features & cfg->require_features)
568 != cfg->require_features)
572 /* also obey the environment variables */
573 if (should_check_environment &&
574 event_is_method_disabled(eventops[i]->name))
577 base->evsel = eventops[i];
579 base->evbase = base->evsel->init(base);
582 if (base->evbase == NULL) {
583 event_warnx("%s: no event mechanism available",
585 event_base_free(base);
589 if (evutil_getenv("EVENT_SHOW_METHOD"))
590 event_msgx("libevent using: %s", base->evsel->name);
592 /* allocate a single active event queue */
593 if (event_base_priority_init(base, 1) < 0) {
594 event_base_free(base);
598 /* prepare for threading */
600 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
601 if (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK)) {
603 EVTHREAD_ALLOC_LOCK(base->th_base_lock,
604 EVTHREAD_LOCKTYPE_RECURSIVE);
605 base->defer_queue.lock = base->th_base_lock;
606 EVTHREAD_ALLOC_COND(base->current_event_cond);
607 r = evthread_make_base_notifiable(base);
609 event_base_free(base);
616 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
617 event_base_start_iocp(base, cfg->n_cpus_hint);
624 event_base_start_iocp(struct event_base *base, int n_cpus)
629 base->iocp = event_iocp_port_launch(n_cpus);
631 event_warnx("%s: Couldn't launch IOCP", __func__);
641 event_base_stop_iocp(struct event_base *base)
648 rv = event_iocp_shutdown(base->iocp, -1);
649 EVUTIL_ASSERT(rv >= 0);
655 event_base_free(struct event_base *base)
659 /* XXXX grab the lock? If there is contention when one thread frees
660 * the base, then the contending thread will be very sad soon. */
662 if (base == NULL && current_base)
664 if (base == current_base)
667 /* XXX(niels) - check for internal events first */
671 event_base_stop_iocp(base);
674 /* threading fds if we have them */
675 if (base->th_notify_fd[0] != -1) {
676 event_del(&base->th_notify);
677 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
678 if (base->th_notify_fd[1] != -1)
679 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
680 base->th_notify_fd[0] = -1;
681 base->th_notify_fd[1] = -1;
682 event_debug_unassign(&base->th_notify);
685 /* Delete all non-internal events. */
686 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
687 struct event *next = TAILQ_NEXT(ev, ev_next);
688 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
694 while ((ev = min_heap_top(&base->timeheap)) != NULL) {
698 for (i = 0; i < base->n_common_timeouts; ++i) {
699 struct common_timeout_list *ctl =
700 base->common_timeout_queues[i];
701 event_del(&ctl->timeout_event); /* Internal; doesn't count */
702 event_debug_unassign(&ctl->timeout_event);
703 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
704 struct event *next = TAILQ_NEXT(ev,
705 ev_timeout_pos.ev_next_with_common_timeout);
706 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
714 if (base->common_timeout_queues)
715 mm_free(base->common_timeout_queues);
717 for (i = 0; i < base->nactivequeues; ++i) {
718 for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
719 struct event *next = TAILQ_NEXT(ev, ev_active_next);
720 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
729 event_debug(("%s: %d events were still set in base",
730 __func__, n_deleted));
732 if (base->evsel != NULL && base->evsel->dealloc != NULL)
733 base->evsel->dealloc(base);
735 for (i = 0; i < base->nactivequeues; ++i)
736 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
738 EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
739 min_heap_dtor(&base->timeheap);
741 mm_free(base->activequeues);
743 EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
745 evmap_io_clear(&base->io);
746 evmap_signal_clear(&base->sigmap);
747 event_changelist_freemem(&base->changelist);
749 EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
750 EVTHREAD_FREE_COND(base->current_event_cond);
755 /* reinitialize the event base after a fork */
757 event_reinit(struct event_base *base)
759 const struct eventop *evsel;
762 int was_notifiable = 0;
764 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
769 /* Right now, reinit always takes effect, since even if the
770 backend doesn't require it, the signal socketpair code does.
774 /* check if this event mechanism requires reinit */
775 if (!evsel->need_reinit)
779 /* prevent internal delete */
780 if (base->sig.ev_signal_added) {
781 /* we cannot call event_del here because the base has
782 * not been reinitialized yet. */
783 event_queue_remove(base, &base->sig.ev_signal,
785 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
786 event_queue_remove(base, &base->sig.ev_signal,
788 base->sig.ev_signal_added = 0;
790 if (base->th_notify_fd[0] != -1) {
791 /* we cannot call event_del here because the base has
792 * not been reinitialized yet. */
794 event_queue_remove(base, &base->th_notify,
796 if (base->th_notify.ev_flags & EVLIST_ACTIVE)
797 event_queue_remove(base, &base->th_notify,
799 base->sig.ev_signal_added = 0;
800 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
801 if (base->th_notify_fd[1] != -1)
802 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
803 base->th_notify_fd[0] = -1;
804 base->th_notify_fd[1] = -1;
805 event_debug_unassign(&base->th_notify);
808 if (base->evsel->dealloc != NULL)
809 base->evsel->dealloc(base);
810 base->evbase = evsel->init(base);
811 if (base->evbase == NULL) {
812 event_errx(1, "%s: could not reinitialize event mechanism",
818 event_changelist_freemem(&base->changelist); /* XXX */
819 evmap_io_clear(&base->io);
820 evmap_signal_clear(&base->sigmap);
822 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
823 if (ev->ev_events & (EV_READ|EV_WRITE)) {
824 if (evmap_io_add(base, ev->ev_fd, ev) == -1)
826 } else if (ev->ev_events & EV_SIGNAL) {
827 if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
832 if (was_notifiable && res == 0)
833 res = evthread_make_base_notifiable(base);
836 EVBASE_RELEASE_LOCK(base, th_base_lock);
841 event_get_supported_methods(void)
843 static const char **methods = NULL;
844 const struct eventop **method;
848 /* count all methods */
849 for (method = &eventops[0]; *method != NULL; ++method) {
853 /* allocate one more than we need for the NULL pointer */
854 tmp = mm_calloc((i + 1), sizeof(char *));
858 /* populate the array with the supported methods */
859 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
860 tmp[i++] = eventops[k]->name;
865 mm_free((char**)methods);
872 struct event_config *
873 event_config_new(void)
875 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
880 TAILQ_INIT(&cfg->entries);
886 event_config_entry_free(struct event_config_entry *entry)
888 if (entry->avoid_method != NULL)
889 mm_free((char *)entry->avoid_method);
894 event_config_free(struct event_config *cfg)
896 struct event_config_entry *entry;
898 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
899 TAILQ_REMOVE(&cfg->entries, entry, next);
900 event_config_entry_free(entry);
906 event_config_set_flag(struct event_config *cfg, int flag)
915 event_config_avoid_method(struct event_config *cfg, const char *method)
917 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
921 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
926 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
932 event_config_require_features(struct event_config *cfg,
937 cfg->require_features = features;
942 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
946 cfg->n_cpus_hint = cpus;
951 event_priority_init(int npriorities)
953 return event_base_priority_init(current_base, npriorities);
957 event_base_priority_init(struct event_base *base, int npriorities)
961 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
962 || npriorities >= EVENT_MAX_PRIORITIES)
965 if (npriorities == base->nactivequeues)
968 if (base->nactivequeues) {
969 mm_free(base->activequeues);
970 base->nactivequeues = 0;
973 /* Allocate our priority queues */
974 base->activequeues = (struct event_list *)
975 mm_calloc(npriorities, sizeof(struct event_list));
976 if (base->activequeues == NULL) {
977 event_warn("%s: calloc", __func__);
980 base->nactivequeues = npriorities;
982 for (i = 0; i < base->nactivequeues; ++i) {
983 TAILQ_INIT(&base->activequeues[i]);
989 /* Returns true iff we're currently watching any events. */
991 event_haveevents(struct event_base *base)
993 /* Caller must hold th_base_lock */
994 return (base->virtual_event_count > 0 || base->event_count > 0);
997 /* "closure" function called when processing active signal events */
999 event_signal_closure(struct event_base *base, struct event *ev)
1003 /* Allows deletes to work */
1004 ncalls = ev->ev_ncalls;
1005 ev->ev_pncalls = &ncalls;
1006 EVBASE_RELEASE_LOCK(base, th_base_lock);
1009 ev->ev_ncalls = ncalls;
1011 ev->ev_pncalls = NULL;
1012 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1014 /* XXXX we can't do this without a lock on the base. */
1015 if (base->event_break)
1021 /* Common timeouts are special timeouts that are handled as queues rather than
1022 * in the minheap. This is more efficient than the minheap if we happen to
1023 * know that we're going to get several thousands of timeout events all with
1024 * the same timeout value.
1026 * Since all our timeout handling code assumes timevals can be copied,
1027 * assigned, etc, we can't use "magic pointer" to encode these common
1028 * timeouts. Searching through a list to see if every timeout is common could
1029 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1030 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1031 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1032 * of index into the event_base's aray of common timeouts.
1035 #define MICROSECONDS_MASK 0x000fffff
1036 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1037 #define COMMON_TIMEOUT_IDX_SHIFT 20
1038 #define COMMON_TIMEOUT_MASK 0xf0000000
1039 #define COMMON_TIMEOUT_MAGIC 0x50000000
1041 #define COMMON_TIMEOUT_IDX(tv) \
1042 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1044 /** Return true iff if 'tv' is a common timeout in 'base' */
1046 is_common_timeout(const struct timeval *tv,
1047 const struct event_base *base)
1050 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1052 idx = COMMON_TIMEOUT_IDX(tv);
1053 return idx < base->n_common_timeouts;
1056 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1057 * one is a common timeout. */
1059 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1061 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1062 (tv2->tv_usec & ~MICROSECONDS_MASK);
1065 /** Requires that 'tv' is a common timeout. Return the corresponding
1066 * common_timeout_list. */
1067 static inline struct common_timeout_list *
1068 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1070 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1075 common_timeout_ok(const struct timeval *tv,
1076 struct event_base *base)
1078 const struct timeval *expect =
1079 &get_common_timeout_list(base, tv)->duration;
1080 return tv->tv_sec == expect->tv_sec &&
1081 tv->tv_usec == expect->tv_usec;
1085 /* Add the timeout for the first event in given common timeout list to the
1086 * event_base's minheap. */
1088 common_timeout_schedule(struct common_timeout_list *ctl,
1089 const struct timeval *now, struct event *head)
1091 struct timeval timeout = head->ev_timeout;
1092 timeout.tv_usec &= MICROSECONDS_MASK;
1093 event_add_internal(&ctl->timeout_event, &timeout, 1);
1096 /* Callback: invoked when the timeout for a common timeout queue triggers.
1097 * This means that (at least) the first event in that queue should be run,
1098 * and the timeout should be rescheduled if there are more events. */
1100 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1103 struct common_timeout_list *ctl = arg;
1104 struct event_base *base = ctl->base;
1105 struct event *ev = NULL;
1106 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1107 gettime(base, &now);
1109 ev = TAILQ_FIRST(&ctl->events);
1110 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1111 (ev->ev_timeout.tv_sec == now.tv_sec &&
1112 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1114 event_del_internal(ev);
1115 event_active_nolock(ev, EV_TIMEOUT, 1);
1118 common_timeout_schedule(ctl, &now, ev);
1119 EVBASE_RELEASE_LOCK(base, th_base_lock);
1122 #define MAX_COMMON_TIMEOUTS 256
1124 const struct timeval *
1125 event_base_init_common_timeout(struct event_base *base,
1126 const struct timeval *duration)
1130 const struct timeval *result=NULL;
1131 struct common_timeout_list *new_ctl;
1133 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1134 if (duration->tv_usec > 1000000) {
1135 memcpy(&tv, duration, sizeof(struct timeval));
1136 if (is_common_timeout(duration, base))
1137 tv.tv_usec &= MICROSECONDS_MASK;
1138 tv.tv_sec += tv.tv_usec / 1000000;
1139 tv.tv_usec %= 1000000;
1142 for (i = 0; i < base->n_common_timeouts; ++i) {
1143 const struct common_timeout_list *ctl =
1144 base->common_timeout_queues[i];
1145 if (duration->tv_sec == ctl->duration.tv_sec &&
1146 duration->tv_usec ==
1147 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1148 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1149 result = &ctl->duration;
1153 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1154 event_warnx("%s: Too many common timeouts already in use; "
1155 "we only support %d per event_base", __func__,
1156 MAX_COMMON_TIMEOUTS);
1159 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1160 int n = base->n_common_timeouts < 16 ? 16 :
1161 base->n_common_timeouts*2;
1162 struct common_timeout_list **newqueues =
1163 mm_realloc(base->common_timeout_queues,
1164 n*sizeof(struct common_timeout_queue *));
1166 event_warn("%s: realloc",__func__);
1169 base->n_common_timeouts_allocated = n;
1170 base->common_timeout_queues = newqueues;
1172 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1174 event_warn("%s: calloc",__func__);
1177 TAILQ_INIT(&new_ctl->events);
1178 new_ctl->duration.tv_sec = duration->tv_sec;
1179 new_ctl->duration.tv_usec =
1180 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1181 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1182 evtimer_assign(&new_ctl->timeout_event, base,
1183 common_timeout_callback, new_ctl);
1184 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1185 event_priority_set(&new_ctl->timeout_event, 0);
1186 new_ctl->base = base;
1187 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1188 result = &new_ctl->duration;
1192 EVUTIL_ASSERT(is_common_timeout(result, base));
1194 EVBASE_RELEASE_LOCK(base, th_base_lock);
1198 /* Closure function invoked when we're activating a persistent event. */
1200 event_persist_closure(struct event_base *base, struct event *ev)
1202 /* reschedule the persistent event if we have a timeout. */
1203 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1204 /* If there was a timeout, we want it to run at an interval of
1205 * ev_io_timeout after the last time it was _scheduled_ for,
1206 * not ev_io_timeout after _now_. If it fired for another
1207 * reason, though, the timeout ought to start ticking _now_. */
1208 struct timeval run_at;
1209 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1210 &ev->ev_io_timeout));
1211 if (is_common_timeout(&ev->ev_timeout, base)) {
1212 ev_uint32_t usec_mask;
1213 struct timeval delay, relative_to;
1214 delay = ev->ev_io_timeout;
1215 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1216 delay.tv_usec &= MICROSECONDS_MASK;
1217 if (ev->ev_res & EV_TIMEOUT) {
1218 relative_to = ev->ev_timeout;
1219 relative_to.tv_usec &= MICROSECONDS_MASK;
1221 gettime(base, &relative_to);
1223 evutil_timeradd(&relative_to, &delay, &run_at);
1224 run_at.tv_usec |= usec_mask;
1226 struct timeval relative_to;
1227 if (ev->ev_res & EV_TIMEOUT) {
1228 relative_to = ev->ev_timeout;
1230 gettime(base, &relative_to);
1232 evutil_timeradd(&ev->ev_io_timeout, &relative_to,
1235 event_add_internal(ev, &run_at, 1);
1237 EVBASE_RELEASE_LOCK(base, th_base_lock);
1238 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1242 Helper for event_process_active to process all the events in a single queue,
1243 releasing the lock as we go. This function requires that the lock be held
1244 when it's invoked. Returns -1 if we get a signal or an event_break that
1245 means we should stop processing any active events now. Otherwise returns
1246 the number of non-internal events that we processed.
1249 event_process_active_single_queue(struct event_base *base,
1250 struct event_list *activeq)
1255 EVUTIL_ASSERT(activeq != NULL);
1257 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
1258 if (ev->ev_events & EV_PERSIST)
1259 event_queue_remove(base, ev, EVLIST_ACTIVE);
1261 event_del_internal(ev);
1262 if (!(ev->ev_flags & EVLIST_INTERNAL))
1266 "event_process_active: event: %p, %s%scall %p",
1268 ev->ev_res & EV_READ ? "EV_READ " : " ",
1269 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1272 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1273 base->current_event = ev;
1274 base->current_event_waiters = 0;
1277 switch (ev->ev_closure) {
1278 case EV_CLOSURE_SIGNAL:
1279 event_signal_closure(base, ev);
1281 case EV_CLOSURE_PERSIST:
1282 event_persist_closure(base, ev);
1285 case EV_CLOSURE_NONE:
1286 EVBASE_RELEASE_LOCK(base, th_base_lock);
1288 (int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1292 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1293 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1294 base->current_event = NULL;
1295 if (base->current_event_waiters) {
1296 base->current_event_waiters = 0;
1297 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1301 if (base->event_break)
1308 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1309 *breakptr becomes set to 1, stop. Requires that we start out holding
1310 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1314 event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
1317 struct deferred_cb *cb;
1319 #define MAX_DEFERRED 16
1320 while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
1322 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
1323 --queue->active_count;
1324 UNLOCK_DEFERRED_QUEUE(queue);
1326 cb->cb(cb, cb->arg);
1328 LOCK_DEFERRED_QUEUE(queue);
1331 if (++count == MAX_DEFERRED)
1339 * Active events are stored in priority queues. Lower priorities are always
1340 * process before higher priorities. Low priority events can starve high
1345 event_process_active(struct event_base *base)
1347 /* Caller must hold th_base_lock */
1348 struct event_list *activeq = NULL;
1351 for (i = 0; i < base->nactivequeues; ++i) {
1352 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1353 activeq = &base->activequeues[i];
1354 c = event_process_active_single_queue(base, activeq);
1358 break; /* Processed a real event; do not
1359 * consider lower-priority events */
1360 /* If we get here, all of the events we processed
1361 * were internal. Continue. */
1365 event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
1370 * Wait continuously for events. We exit only if no events are left.
1374 event_dispatch(void)
1376 return (event_loop(0));
1380 event_base_dispatch(struct event_base *event_base)
1382 return (event_base_loop(event_base, 0));
1386 event_base_get_method(const struct event_base *base)
1388 EVUTIL_ASSERT(base);
1389 return (base->evsel->name);
1392 /** Callback: used to implement event_base_loopexit by telling the event_base
1393 * that it's time to exit its loop. */
1395 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1397 struct event_base *base = arg;
1398 base->event_gotterm = 1;
1402 event_loopexit(const struct timeval *tv)
1404 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1409 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1411 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1416 event_loopbreak(void)
1418 return (event_base_loopbreak(current_base));
1422 event_base_loopbreak(struct event_base *event_base)
1425 if (event_base == NULL)
1428 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1429 event_base->event_break = 1;
1431 if (EVBASE_NEED_NOTIFY(event_base)) {
1432 r = evthread_notify_base(event_base);
1436 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1441 event_base_got_break(struct event_base *event_base)
1444 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1445 res = event_base->event_break;
1446 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1451 event_base_got_exit(struct event_base *event_base)
1454 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1455 res = event_base->event_gotterm;
1456 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1460 /* not thread safe */
1463 event_loop(int flags)
1465 return event_base_loop(current_base, flags);
1469 event_base_loop(struct event_base *base, int flags)
1471 const struct eventop *evsel = base->evsel;
1473 struct timeval *tv_p;
1474 int res, done, retval = 0;
1476 /* Grab the lock. We will release it inside evsel.dispatch, and again
1477 * as we invoke user callbacks. */
1478 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1480 if (base->running_loop) {
1481 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1482 " can run on each event_base at once.", __func__);
1483 EVBASE_RELEASE_LOCK(base, th_base_lock);
1487 base->running_loop = 1;
1489 clear_time_cache(base);
1491 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1492 evsig_set_base(base);
1496 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1497 base->th_owner_id = EVTHREAD_GET_ID();
1500 base->event_gotterm = base->event_break = 0;
1503 /* Terminate the loop if we have been asked to */
1504 if (base->event_gotterm) {
1508 if (base->event_break) {
1512 timeout_correct(base, &tv);
1515 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1516 timeout_next(base, &tv_p);
1519 * if we have active events, we just poll new events
1522 evutil_timerclear(&tv);
1525 /* If we have no events, we just exit */
1526 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1527 event_debug(("%s: no events registered.", __func__));
1532 /* update last old time */
1533 gettime(base, &base->event_tv);
1535 clear_time_cache(base);
1537 res = evsel->dispatch(base, tv_p);
1540 event_debug(("%s: dispatch returned unsuccessfully.",
1546 update_time_cache(base);
1548 timeout_process(base);
1550 if (N_ACTIVE_CALLBACKS(base)) {
1551 int n = event_process_active(base);
1552 if ((flags & EVLOOP_ONCE)
1553 && N_ACTIVE_CALLBACKS(base) == 0
1556 } else if (flags & EVLOOP_NONBLOCK)
1559 event_debug(("%s: asked to terminate loop.", __func__));
1562 clear_time_cache(base);
1563 base->running_loop = 0;
1565 EVBASE_RELEASE_LOCK(base, th_base_lock);
1570 /* Sets up an event for processing once */
1574 void (*cb)(evutil_socket_t, short, void *);
1578 /* One-time callback to implement event_base_once: invokes the user callback,
1579 * then deletes the allocated storage */
1581 event_once_cb(evutil_socket_t fd, short events, void *arg)
1583 struct event_once *eonce = arg;
1585 (*eonce->cb)(fd, events, eonce->arg);
1586 event_debug_unassign(&eonce->ev);
1590 /* not threadsafe, event scheduled once. */
1592 event_once(evutil_socket_t fd, short events,
1593 void (*callback)(evutil_socket_t, short, void *),
1594 void *arg, const struct timeval *tv)
1596 return event_base_once(current_base, fd, events, callback, arg, tv);
1599 /* Schedules an event once */
1601 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1602 void (*callback)(evutil_socket_t, short, void *),
1603 void *arg, const struct timeval *tv)
1605 struct event_once *eonce;
1609 /* We cannot support signals that just fire once, or persistent
1611 if (events & (EV_SIGNAL|EV_PERSIST))
1614 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1617 eonce->cb = callback;
1620 if (events == EV_TIMEOUT) {
1622 evutil_timerclear(&etv);
1626 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1627 } else if (events & (EV_READ|EV_WRITE)) {
1628 events &= EV_READ|EV_WRITE;
1630 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1632 /* Bad event combination */
1638 res = event_add(&eonce->ev, tv);
1648 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1651 base = current_base;
1653 _event_debug_assert_not_added(ev);
1657 ev->ev_callback = callback;
1660 ev->ev_events = events;
1662 ev->ev_flags = EVLIST_INIT;
1664 ev->ev_pncalls = NULL;
1666 if (events & EV_SIGNAL) {
1667 if ((events & (EV_READ|EV_WRITE)) != 0) {
1668 event_warnx("%s: EV_SIGNAL is not compatible with "
1669 "EV_READ or EV_WRITE", __func__);
1672 ev->ev_closure = EV_CLOSURE_SIGNAL;
1674 if (events & EV_PERSIST) {
1675 evutil_timerclear(&ev->ev_io_timeout);
1676 ev->ev_closure = EV_CLOSURE_PERSIST;
1678 ev->ev_closure = EV_CLOSURE_NONE;
1682 min_heap_elem_init(ev);
1685 /* by default, we put new events into the middle priority */
1686 ev->ev_pri = base->nactivequeues / 2;
1689 _event_debug_note_setup(ev);
1695 event_base_set(struct event_base *base, struct event *ev)
1697 /* Only innocent events may be assigned to a different base */
1698 if (ev->ev_flags != EVLIST_INIT)
1701 _event_debug_assert_is_setup(ev);
1704 ev->ev_pri = base->nactivequeues/2;
1710 event_set(struct event *ev, evutil_socket_t fd, short events,
1711 void (*callback)(evutil_socket_t, short, void *), void *arg)
1714 r = event_assign(ev, current_base, fd, events, callback, arg);
1715 EVUTIL_ASSERT(r == 0);
1719 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
1722 ev = mm_malloc(sizeof(struct event));
1725 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
1734 event_free(struct event *ev)
1736 _event_debug_assert_is_setup(ev);
1738 /* make sure that this event won't be coming back to haunt us. */
1740 _event_debug_note_teardown(ev);
1746 event_debug_unassign(struct event *ev)
1748 _event_debug_assert_not_added(ev);
1749 _event_debug_note_teardown(ev);
1751 ev->ev_flags &= ~EVLIST_INIT;
1755 * Set's the priority of an event - if an event is already scheduled
1756 * changing the priority is going to fail.
1760 event_priority_set(struct event *ev, int pri)
1762 _event_debug_assert_is_setup(ev);
1764 if (ev->ev_flags & EVLIST_ACTIVE)
1766 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
1775 * Checks if a specific event is pending or scheduled.
1779 event_pending(const struct event *ev, short event, struct timeval *tv)
1781 struct timeval now, res;
1784 _event_debug_assert_is_setup(ev);
1786 if (ev->ev_flags & EVLIST_INSERTED)
1787 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
1788 if (ev->ev_flags & EVLIST_ACTIVE)
1789 flags |= ev->ev_res;
1790 if (ev->ev_flags & EVLIST_TIMEOUT)
1791 flags |= EV_TIMEOUT;
1793 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
1795 /* See if there is a timeout that we should report */
1796 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
1797 struct timeval tmp = ev->ev_timeout;
1798 event_base_gettimeofday_cached(ev->ev_base, &now);
1799 tmp.tv_usec &= MICROSECONDS_MASK;
1800 evutil_timersub(&tmp, &now, &res);
1801 /* correctly remap to real time */
1802 evutil_gettimeofday(&now, NULL);
1803 evutil_timeradd(&now, &res, tv);
1806 return (flags & event);
1810 event_initialized(const struct event *ev)
1812 if (!(ev->ev_flags & EVLIST_INIT))
1819 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
1821 _event_debug_assert_is_setup(event);
1824 *base_out = event->ev_base;
1826 *fd_out = event->ev_fd;
1828 *events_out = event->ev_events;
1830 *callback_out = event->ev_callback;
1832 *arg_out = event->ev_arg;
1836 event_get_struct_event_size(void)
1838 return sizeof(struct event);
1842 event_get_fd(const struct event *ev)
1844 _event_debug_assert_is_setup(ev);
1849 event_get_base(const struct event *ev)
1851 _event_debug_assert_is_setup(ev);
1856 event_get_events(const struct event *ev)
1858 _event_debug_assert_is_setup(ev);
1859 return ev->ev_events;
1863 event_get_callback(const struct event *ev)
1865 _event_debug_assert_is_setup(ev);
1866 return ev->ev_callback;
1870 event_get_callback_arg(const struct event *ev)
1872 _event_debug_assert_is_setup(ev);
1877 event_add(struct event *ev, const struct timeval *tv)
1881 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
1882 event_warnx("%s: event has no event_base set.", __func__);
1886 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
1888 res = event_add_internal(ev, tv, 0);
1890 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
1895 /* Helper callback: wake an event_base from another thread. This version
1896 * works by writing a byte to one end of a socketpair, so that the event_base
1897 * listening on the other end will wake up as the corresponding event
1900 evthread_notify_base_default(struct event_base *base)
1906 r = send(base->th_notify_fd[1], buf, 1, 0);
1908 r = write(base->th_notify_fd[1], buf, 1);
1910 return (r < 0 && errno != EAGAIN) ? -1 : 0;
1913 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
1914 /* Helper callback: wake an event_base from another thread. This version
1915 * assumes that you have a working eventfd() implementation. */
1917 evthread_notify_base_eventfd(struct event_base *base)
1919 ev_uint64_t msg = 1;
1922 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
1923 } while (r < 0 && errno == EAGAIN);
1925 return (r < 0) ? -1 : 0;
1929 /** Tell the thread currently running the event_loop for base (if any) that it
1930 * needs to stop waiting in its dispatch function (if it is) and process all
1931 * active events and deferred callbacks (if there are any). */
1933 evthread_notify_base(struct event_base *base)
1935 EVENT_BASE_ASSERT_LOCKED(base);
1936 if (!base->th_notify_fn)
1938 if (base->is_notify_pending)
1940 base->is_notify_pending = 1;
1941 return base->th_notify_fn(base);
1944 /* Implementation function to add an event. Works just like event_add,
1945 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
1946 * we treat tv as an absolute time, not as an interval to add to the current
1949 event_add_internal(struct event *ev, const struct timeval *tv,
1952 struct event_base *base = ev->ev_base;
1956 EVENT_BASE_ASSERT_LOCKED(base);
1957 _event_debug_assert_is_setup(ev);
1960 "event_add: event: %p (fd %d), %s%s%scall %p",
1963 ev->ev_events & EV_READ ? "EV_READ " : " ",
1964 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
1965 tv ? "EV_TIMEOUT " : " ",
1968 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
1971 * prepare for timeout insertion further below, if we get a
1972 * failure on any step, we should not change any state.
1974 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
1975 if (min_heap_reserve(&base->timeheap,
1976 1 + min_heap_size(&base->timeheap)) == -1)
1977 return (-1); /* ENOMEM == errno */
1980 /* If the main thread is currently executing a signal event's
1981 * callback, and we are not the main thread, then we want to wait
1982 * until the callback is done before we mess with the event, or else
1983 * we can race on ev_ncalls and ev_pncalls below. */
1984 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1985 if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
1986 && !EVBASE_IN_THREAD(base)) {
1987 ++base->current_event_waiters;
1988 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
1992 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
1993 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
1994 if (ev->ev_events & (EV_READ|EV_WRITE))
1995 res = evmap_io_add(base, ev->ev_fd, ev);
1996 else if (ev->ev_events & EV_SIGNAL)
1997 res = evmap_signal_add(base, (int)ev->ev_fd, ev);
1999 event_queue_insert(base, ev, EVLIST_INSERTED);
2001 /* evmap says we need to notify the main thread. */
2008 * we should change the timeout state only if the previous event
2009 * addition succeeded.
2011 if (res != -1 && tv != NULL) {
2016 * for persistent timeout events, we remember the
2017 * timeout value and re-add the event.
2019 * If tv_is_absolute, this was already set.
2021 if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
2022 ev->ev_io_timeout = *tv;
2025 * we already reserved memory above for the case where we
2026 * are not replacing an existing timeout.
2028 if (ev->ev_flags & EVLIST_TIMEOUT) {
2029 /* XXX I believe this is needless. */
2030 if (min_heap_elt_is_top(ev))
2032 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2035 /* Check if it is active due to a timeout. Rescheduling
2036 * this timeout before the callback can be executed
2037 * removes it from the active list. */
2038 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2039 (ev->ev_res & EV_TIMEOUT)) {
2040 if (ev->ev_events & EV_SIGNAL) {
2041 /* See if we are just active executing
2042 * this event in a loop
2044 if (ev->ev_ncalls && ev->ev_pncalls) {
2046 *ev->ev_pncalls = 0;
2050 event_queue_remove(base, ev, EVLIST_ACTIVE);
2053 gettime(base, &now);
2055 common_timeout = is_common_timeout(tv, base);
2056 if (tv_is_absolute) {
2057 ev->ev_timeout = *tv;
2058 } else if (common_timeout) {
2059 struct timeval tmp = *tv;
2060 tmp.tv_usec &= MICROSECONDS_MASK;
2061 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2062 ev->ev_timeout.tv_usec |=
2063 (tv->tv_usec & ~MICROSECONDS_MASK);
2065 evutil_timeradd(&now, tv, &ev->ev_timeout);
2069 "event_add: timeout in %d seconds, call %p",
2070 (int)tv->tv_sec, ev->ev_callback));
2072 event_queue_insert(base, ev, EVLIST_TIMEOUT);
2073 if (common_timeout) {
2074 struct common_timeout_list *ctl =
2075 get_common_timeout_list(base, &ev->ev_timeout);
2076 if (ev == TAILQ_FIRST(&ctl->events)) {
2077 common_timeout_schedule(ctl, &now, ev);
2080 /* See if the earliest timeout is now earlier than it
2081 * was before: if so, we will need to tell the main
2082 * thread to wake up earlier than it would
2084 if (min_heap_elt_is_top(ev))
2089 /* if we are not in the right thread, we need to wake up the loop */
2090 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2091 evthread_notify_base(base);
2093 _event_debug_note_add(ev);
2099 event_del(struct event *ev)
2103 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2104 event_warnx("%s: event has no event_base set.", __func__);
2108 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2110 res = event_del_internal(ev);
2112 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2117 /* Helper for event_del: always called with th_base_lock held. */
2119 event_del_internal(struct event *ev)
2121 struct event_base *base;
2122 int res = 0, notify = 0;
2124 event_debug(("event_del: %p (fd %d), callback %p",
2125 ev, (int)ev->ev_fd, ev->ev_callback));
2127 /* An event without a base has not been added */
2128 if (ev->ev_base == NULL)
2131 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2133 /* If the main thread is currently executing this event's callback,
2134 * and we are not the main thread, then we want to wait until the
2135 * callback is done before we start removing the event. That way,
2136 * when this function returns, it will be safe to free the
2137 * user-supplied argument. */
2139 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2140 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2141 ++base->current_event_waiters;
2142 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2146 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2148 /* See if we are just active executing this event in a loop */
2149 if (ev->ev_events & EV_SIGNAL) {
2150 if (ev->ev_ncalls && ev->ev_pncalls) {
2152 *ev->ev_pncalls = 0;
2156 if (ev->ev_flags & EVLIST_TIMEOUT) {
2157 /* NOTE: We never need to notify the main thread because of a
2158 * deleted timeout event: all that could happen if we don't is
2159 * that the dispatch loop might wake up too early. But the
2160 * point of notifying the main thread _is_ to wake up the
2161 * dispatch loop early anyway, so we wouldn't gain anything by
2164 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2167 if (ev->ev_flags & EVLIST_ACTIVE)
2168 event_queue_remove(base, ev, EVLIST_ACTIVE);
2170 if (ev->ev_flags & EVLIST_INSERTED) {
2171 event_queue_remove(base, ev, EVLIST_INSERTED);
2172 if (ev->ev_events & (EV_READ|EV_WRITE))
2173 res = evmap_io_del(base, ev->ev_fd, ev);
2175 res = evmap_signal_del(base, (int)ev->ev_fd, ev);
2177 /* evmap says we need to notify the main thread. */
2183 /* if we are not in the right thread, we need to wake up the loop */
2184 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2185 evthread_notify_base(base);
2187 _event_debug_note_del(ev);
2193 event_active(struct event *ev, int res, short ncalls)
2195 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2196 event_warnx("%s: event has no event_base set.", __func__);
2200 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2202 _event_debug_assert_is_setup(ev);
2204 event_active_nolock(ev, res, ncalls);
2206 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2211 event_active_nolock(struct event *ev, int res, short ncalls)
2213 struct event_base *base;
2215 event_debug(("event_active: %p (fd %d), res %d, callback %p",
2216 ev, (int)ev->ev_fd, (int)res, ev->ev_callback));
2219 /* We get different kinds of events, add them together */
2220 if (ev->ev_flags & EVLIST_ACTIVE) {
2227 EVENT_BASE_ASSERT_LOCKED(base);
2231 if (ev->ev_events & EV_SIGNAL) {
2232 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2233 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2234 ++base->current_event_waiters;
2235 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2238 ev->ev_ncalls = ncalls;
2239 ev->ev_pncalls = NULL;
2242 event_queue_insert(base, ev, EVLIST_ACTIVE);
2244 if (EVBASE_NEED_NOTIFY(base))
2245 evthread_notify_base(base);
2249 event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
2251 memset(cb, 0, sizeof(struct deferred_cb));
2257 event_deferred_cb_cancel(struct deferred_cb_queue *queue,
2258 struct deferred_cb *cb)
2262 queue = ¤t_base->defer_queue;
2267 LOCK_DEFERRED_QUEUE(queue);
2269 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
2270 --queue->active_count;
2273 UNLOCK_DEFERRED_QUEUE(queue);
2277 event_deferred_cb_schedule(struct deferred_cb_queue *queue,
2278 struct deferred_cb *cb)
2282 queue = ¤t_base->defer_queue;
2287 LOCK_DEFERRED_QUEUE(queue);
2290 TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
2291 ++queue->active_count;
2292 if (queue->notify_fn)
2293 queue->notify_fn(queue, queue->notify_arg);
2295 UNLOCK_DEFERRED_QUEUE(queue);
2299 timeout_next(struct event_base *base, struct timeval **tv_p)
2301 /* Caller must hold th_base_lock */
2304 struct timeval *tv = *tv_p;
2307 ev = min_heap_top(&base->timeheap);
2310 /* if no time-based events are active wait for I/O */
2315 if (gettime(base, &now) == -1) {
2320 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
2321 evutil_timerclear(tv);
2325 evutil_timersub(&ev->ev_timeout, &now, tv);
2327 EVUTIL_ASSERT(tv->tv_sec >= 0);
2328 EVUTIL_ASSERT(tv->tv_usec >= 0);
2329 event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
2336 * Determines if the time is running backwards by comparing the current time
2337 * against the last time we checked. Not needed when using clock monotonic.
2338 * If time is running backwards, we adjust the firing time of every event by
2339 * the amount that time seems to have jumped.
2342 timeout_correct(struct event_base *base, struct timeval *tv)
2344 /* Caller must hold th_base_lock. */
2353 /* Check if time is running backwards */
2356 if (evutil_timercmp(tv, &base->event_tv, >=)) {
2357 base->event_tv = *tv;
2361 event_debug(("%s: time is running backwards, corrected",
2363 evutil_timersub(&base->event_tv, tv, &off);
2366 * We can modify the key element of the node without destroying
2367 * the minheap property, because we change every element.
2369 pev = base->timeheap.p;
2370 size = base->timeheap.n;
2371 for (; size-- > 0; ++pev) {
2372 struct timeval *ev_tv = &(**pev).ev_timeout;
2373 evutil_timersub(ev_tv, &off, ev_tv);
2375 for (i=0; i<base->n_common_timeouts; ++i) {
2377 struct common_timeout_list *ctl =
2378 base->common_timeout_queues[i];
2379 TAILQ_FOREACH(ev, &ctl->events,
2380 ev_timeout_pos.ev_next_with_common_timeout) {
2381 struct timeval *ev_tv = &ev->ev_timeout;
2382 ev_tv->tv_usec &= MICROSECONDS_MASK;
2383 evutil_timersub(ev_tv, &off, ev_tv);
2384 ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
2385 (i<<COMMON_TIMEOUT_IDX_SHIFT);
2389 /* Now remember what the new time turned out to be. */
2390 base->event_tv = *tv;
2393 /* Activate every event whose timeout has elapsed. */
2395 timeout_process(struct event_base *base)
2397 /* Caller must hold lock. */
2401 if (min_heap_empty(&base->timeheap)) {
2405 gettime(base, &now);
2407 while ((ev = min_heap_top(&base->timeheap))) {
2408 if (evutil_timercmp(&ev->ev_timeout, &now, >))
2411 /* delete this event from the I/O queues */
2412 event_del_internal(ev);
2414 event_debug(("timeout_process: call %p",
2416 event_active_nolock(ev, EV_TIMEOUT, 1);
2420 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2422 event_queue_remove(struct event_base *base, struct event *ev, int queue)
2424 EVENT_BASE_ASSERT_LOCKED(base);
2426 if (!(ev->ev_flags & queue)) {
2427 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
2428 ev, ev->ev_fd, queue);
2432 if (~ev->ev_flags & EVLIST_INTERNAL)
2433 base->event_count--;
2435 ev->ev_flags &= ~queue;
2437 case EVLIST_INSERTED:
2438 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
2441 base->event_count_active--;
2442 TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
2443 ev, ev_active_next);
2445 case EVLIST_TIMEOUT:
2446 if (is_common_timeout(&ev->ev_timeout, base)) {
2447 struct common_timeout_list *ctl =
2448 get_common_timeout_list(base, &ev->ev_timeout);
2449 TAILQ_REMOVE(&ctl->events, ev,
2450 ev_timeout_pos.ev_next_with_common_timeout);
2452 min_heap_erase(&base->timeheap, ev);
2456 event_errx(1, "%s: unknown queue %x", __func__, queue);
2460 /* Add 'ev' to the common timeout list in 'ev'. */
2462 insert_common_timeout_inorder(struct common_timeout_list *ctl,
2466 /* By all logic, we should just be able to append 'ev' to the end of
2467 * ctl->events, since the timeout on each 'ev' is set to {the common
2468 * timeout} + {the time when we add the event}, and so the events
2469 * should arrive in order of their timeeouts. But just in case
2470 * there's some wacky threading issue going on, we do a search from
2471 * the end of 'ev' to find the right insertion point.
2473 TAILQ_FOREACH_REVERSE(e, &ctl->events,
2474 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
2475 /* This timercmp is a little sneaky, since both ev and e have
2476 * magic values in tv_usec. Fortunately, they ought to have
2477 * the _same_ magic values in tv_usec. Let's assert for that.
2480 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
2481 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
2482 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
2483 ev_timeout_pos.ev_next_with_common_timeout);
2487 TAILQ_INSERT_HEAD(&ctl->events, ev,
2488 ev_timeout_pos.ev_next_with_common_timeout);
2492 event_queue_insert(struct event_base *base, struct event *ev, int queue)
2494 EVENT_BASE_ASSERT_LOCKED(base);
2496 if (ev->ev_flags & queue) {
2497 /* Double insertion is possible for active events */
2498 if (queue & EVLIST_ACTIVE)
2501 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
2502 ev, ev->ev_fd, queue);
2506 if (~ev->ev_flags & EVLIST_INTERNAL)
2507 base->event_count++;
2509 ev->ev_flags |= queue;
2511 case EVLIST_INSERTED:
2512 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
2515 base->event_count_active++;
2516 TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
2519 case EVLIST_TIMEOUT: {
2520 if (is_common_timeout(&ev->ev_timeout, base)) {
2521 struct common_timeout_list *ctl =
2522 get_common_timeout_list(base, &ev->ev_timeout);
2523 insert_common_timeout_inorder(ctl, ev);
2525 min_heap_push(&base->timeheap, ev);
2529 event_errx(1, "%s: unknown queue %x", __func__, queue);
2533 /* Functions for debugging */
2536 event_get_version(void)
2538 return (_EVENT_VERSION);
2542 event_get_version_number(void)
2544 return (_EVENT_NUMERIC_VERSION);
2548 * No thread-safe interface needed - the information should be the same
2553 event_get_method(void)
2555 return (current_base->evsel->name);
2558 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2559 static void *(*_mm_malloc_fn)(size_t sz) = NULL;
2560 static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
2561 static void (*_mm_free_fn)(void *p) = NULL;
2564 event_mm_malloc_(size_t sz)
2567 return _mm_malloc_fn(sz);
2573 event_mm_calloc_(size_t count, size_t size)
2575 if (_mm_malloc_fn) {
2576 size_t sz = count * size;
2577 void *p = _mm_malloc_fn(sz);
2582 return calloc(count, size);
2586 event_mm_strdup_(const char *str)
2588 if (_mm_malloc_fn) {
2589 size_t ln = strlen(str);
2590 void *p = _mm_malloc_fn(ln+1);
2592 memcpy(p, str, ln+1);
2596 return _strdup(str);
2603 event_mm_realloc_(void *ptr, size_t sz)
2606 return _mm_realloc_fn(ptr, sz);
2608 return realloc(ptr, sz);
2612 event_mm_free_(void *ptr)
2621 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
2622 void *(*realloc_fn)(void *ptr, size_t sz),
2623 void (*free_fn)(void *ptr))
2625 _mm_malloc_fn = malloc_fn;
2626 _mm_realloc_fn = realloc_fn;
2627 _mm_free_fn = free_fn;
2631 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2633 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
2637 struct event_base *base = arg;
2639 r = read(fd, (void*) &msg, sizeof(msg));
2640 if (r<0 && errno != EAGAIN) {
2641 event_sock_warn(fd, "Error reading from eventfd");
2643 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2644 base->is_notify_pending = 0;
2645 EVBASE_RELEASE_LOCK(base, th_base_lock);
2650 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
2652 unsigned char buf[1024];
2653 struct event_base *base = arg;
2655 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
2658 while (read(fd, (char*)buf, sizeof(buf)) > 0)
2662 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2663 base->is_notify_pending = 0;
2664 EVBASE_RELEASE_LOCK(base, th_base_lock);
2668 evthread_make_base_notifiable(struct event_base *base)
2670 void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
2671 int (*notify)(struct event_base *) = evthread_notify_base_default;
2673 /* XXXX grab the lock here? */
2677 if (base->th_notify_fd[0] >= 0)
2680 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2682 #define EFD_CLOEXEC 0
2684 base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
2685 if (base->th_notify_fd[0] >= 0) {
2686 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2687 notify = evthread_notify_base_eventfd;
2688 cb = evthread_notify_drain_eventfd;
2691 #if defined(_EVENT_HAVE_PIPE)
2692 if (base->th_notify_fd[0] < 0) {
2693 if ((base->evsel->features & EV_FEATURE_FDS)) {
2694 if (pipe(base->th_notify_fd) < 0) {
2695 event_warn("%s: pipe", __func__);
2697 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2698 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2705 #define LOCAL_SOCKETPAIR_AF AF_INET
2707 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2709 if (base->th_notify_fd[0] < 0) {
2710 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
2711 base->th_notify_fd) == -1) {
2712 event_sock_warn(-1, "%s: socketpair", __func__);
2715 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2716 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2720 evutil_make_socket_nonblocking(base->th_notify_fd[0]);
2722 base->th_notify_fn = notify;
2725 Making the second socket nonblocking is a bit subtle, given that we
2726 ignore any EAGAIN returns when writing to it, and you don't usally
2727 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2728 then there's no need to add any more data to the buffer, since
2729 the main thread is already either about to wake up and drain it,
2730 or woken up and in the process of draining it.
2732 if (base->th_notify_fd[1] > 0)
2733 evutil_make_socket_nonblocking(base->th_notify_fd[1]);
2735 /* prepare an event that we can use for wakeup */
2736 event_assign(&base->th_notify, base, base->th_notify_fd[0],
2737 EV_READ|EV_PERSIST, cb, base);
2739 /* we need to mark this as internal event */
2740 base->th_notify.ev_flags |= EVLIST_INTERNAL;
2741 event_priority_set(&base->th_notify, 0);
2743 return event_add(&base->th_notify, NULL);
2747 event_base_dump_events(struct event_base *base, FILE *output)
2751 fprintf(output, "Inserted events:\n");
2752 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2753 fprintf(output, " %p [fd %ld]%s%s%s%s%s\n",
2754 (void*)e, (long)e->ev_fd,
2755 (e->ev_events&EV_READ)?" Read":"",
2756 (e->ev_events&EV_WRITE)?" Write":"",
2757 (e->ev_events&EV_SIGNAL)?" Signal":"",
2758 (e->ev_events&EV_TIMEOUT)?" Timeout":"",
2759 (e->ev_events&EV_PERSIST)?" Persist":"");
2762 for (i = 0; i < base->nactivequeues; ++i) {
2763 if (TAILQ_EMPTY(&base->activequeues[i]))
2765 fprintf(output, "Active events [priority %d]:\n", i);
2766 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2767 fprintf(output, " %p [fd %ld]%s%s%s%s\n",
2768 (void*)e, (long)e->ev_fd,
2769 (e->ev_res&EV_READ)?" Read active":"",
2770 (e->ev_res&EV_WRITE)?" Write active":"",
2771 (e->ev_res&EV_SIGNAL)?" Signal active":"",
2772 (e->ev_res&EV_TIMEOUT)?" Timeout active":"");
2778 event_base_add_virtual(struct event_base *base)
2780 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2781 base->virtual_event_count++;
2782 EVBASE_RELEASE_LOCK(base, th_base_lock);
2786 event_base_del_virtual(struct event_base *base)
2788 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2789 EVUTIL_ASSERT(base->virtual_event_count > 0);
2790 base->virtual_event_count--;
2791 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
2792 evthread_notify_base(base);
2793 EVBASE_RELEASE_LOCK(base, th_base_lock);