2 * Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "event2/event-config.h"
31 #ifdef _EVENT_HAVE_SYS_TIME_H
39 #ifdef _EVENT_HAVE_STDARG_H
42 #ifdef _EVENT_HAVE_UNISTD_H
51 #include <sys/queue.h>
53 #include "event2/util.h"
54 #include "event2/bufferevent.h"
55 #include "event2/buffer.h"
56 #include "event2/bufferevent_struct.h"
57 #include "event2/event.h"
58 #include "event2/util.h"
59 #include "event-internal.h"
60 #include "log-internal.h"
61 #include "mm-internal.h"
62 #include "bufferevent-internal.h"
63 #include "util-internal.h"
64 #include "iocp-internal.h"
66 #ifndef SO_UPDATE_CONNECT_CONTEXT
67 /* Mingw is sometimes missing this */
68 #define SO_UPDATE_CONNECT_CONTEXT 0x7010
72 static int be_async_enable(struct bufferevent *, short);
73 static int be_async_disable(struct bufferevent *, short);
74 static void be_async_destruct(struct bufferevent *);
75 static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
76 static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
78 struct bufferevent_async {
79 struct bufferevent_private bev;
80 struct event_overlapped connect_overlapped;
81 struct event_overlapped read_overlapped;
82 struct event_overlapped write_overlapped;
83 unsigned read_in_progress : 1;
84 unsigned write_in_progress : 1;
86 unsigned read_added : 1;
87 unsigned write_added : 1;
90 const struct bufferevent_ops bufferevent_ops_async = {
92 evutil_offsetof(struct bufferevent_async, bev.bev),
96 _bufferevent_generic_adj_timeouts,
101 static inline struct bufferevent_async *
102 upcast(struct bufferevent *bev)
104 struct bufferevent_async *bev_a;
105 if (bev->be_ops != &bufferevent_ops_async)
107 bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
111 static inline struct bufferevent_async *
112 upcast_connect(struct event_overlapped *eo)
114 struct bufferevent_async *bev_a;
115 bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
116 EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
120 static inline struct bufferevent_async *
121 upcast_read(struct event_overlapped *eo)
123 struct bufferevent_async *bev_a;
124 bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
125 EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
129 static inline struct bufferevent_async *
130 upcast_write(struct event_overlapped *eo)
132 struct bufferevent_async *bev_a;
133 bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
134 EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
139 bev_async_del_write(struct bufferevent_async *beva)
141 struct bufferevent *bev = &beva->bev.bev;
143 if (beva->write_added) {
144 beva->write_added = 0;
145 event_base_del_virtual(bev->ev_base);
150 bev_async_del_read(struct bufferevent_async *beva)
152 struct bufferevent *bev = &beva->bev.bev;
154 if (beva->read_added) {
155 beva->read_added = 0;
156 event_base_del_virtual(bev->ev_base);
161 bev_async_add_write(struct bufferevent_async *beva)
163 struct bufferevent *bev = &beva->bev.bev;
165 if (!beva->write_added) {
166 beva->write_added = 1;
167 event_base_add_virtual(bev->ev_base);
172 bev_async_add_read(struct bufferevent_async *beva)
174 struct bufferevent *bev = &beva->bev.bev;
176 if (!beva->read_added) {
177 beva->read_added = 1;
178 event_base_add_virtual(bev->ev_base);
183 bev_async_consider_writing(struct bufferevent_async *beva)
187 struct bufferevent *bev = &beva->bev.bev;
189 /* Don't write if there's a write in progress, or we do not
190 * want to write, or when there's nothing left to write. */
191 if (beva->write_in_progress)
193 if (!beva->ok || !(bev->enabled&EV_WRITE) ||
194 !evbuffer_get_length(bev->output)) {
195 bev_async_del_write(beva);
199 at_most = evbuffer_get_length(bev->output);
201 /* XXXX This over-commits. */
202 /* This is safe so long as bufferevent_get_write_max never returns
203 * more than INT_MAX. That's true for now. XXXX */
204 limit = (int)_bufferevent_get_write_max(&beva->bev);
205 if (at_most >= (size_t)limit && limit >= 0)
208 if (beva->bev.write_suspended) {
209 bev_async_del_write(beva);
213 /* XXXX doesn't respect low-water mark very well. */
214 bufferevent_incref(bev);
215 if (evbuffer_launch_write(bev->output, at_most,
216 &beva->write_overlapped)) {
217 bufferevent_decref(bev);
219 _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
221 beva->write_in_progress = 1;
222 bev_async_add_write(beva);
227 bev_async_consider_reading(struct bufferevent_async *beva)
233 struct bufferevent *bev = &beva->bev.bev;
235 /* Don't read if there is a read in progress, or we do not
237 if (beva->read_in_progress)
239 if (!beva->ok || !(bev->enabled&EV_READ)) {
240 bev_async_del_read(beva);
244 /* Don't read if we're full */
245 cur_size = evbuffer_get_length(bev->input);
246 read_high = bev->wm_read.high;
248 if (cur_size >= read_high) {
249 bev_async_del_read(beva);
252 at_most = read_high - cur_size;
254 at_most = 16384; /* FIXME totally magic. */
257 /* XXXX This over-commits. */
258 /* XXXX see also not above on cast on _bufferevent_get_write_max() */
259 limit = (int)_bufferevent_get_read_max(&beva->bev);
260 if (at_most >= (size_t)limit && limit >= 0)
263 if (beva->bev.read_suspended) {
264 bev_async_del_read(beva);
268 bufferevent_incref(bev);
269 if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
271 bufferevent_decref(bev);
272 _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
274 beva->read_in_progress = 1;
275 bev_async_add_read(beva);
282 be_async_outbuf_callback(struct evbuffer *buf,
283 const struct evbuffer_cb_info *cbinfo,
286 struct bufferevent *bev = arg;
287 struct bufferevent_async *bev_async = upcast(bev);
289 /* If we added data to the outbuf and were not writing before,
290 * we may want to write now. */
292 _bufferevent_incref_and_lock(bev);
295 bev_async_consider_writing(bev_async);
297 _bufferevent_decref_and_unlock(bev);
301 be_async_inbuf_callback(struct evbuffer *buf,
302 const struct evbuffer_cb_info *cbinfo,
305 struct bufferevent *bev = arg;
306 struct bufferevent_async *bev_async = upcast(bev);
308 /* If we drained data from the inbuf and were not reading before,
309 * we may want to read now */
311 _bufferevent_incref_and_lock(bev);
313 if (cbinfo->n_deleted)
314 bev_async_consider_reading(bev_async);
316 _bufferevent_decref_and_unlock(bev);
320 be_async_enable(struct bufferevent *buf, short what)
322 struct bufferevent_async *bev_async = upcast(buf);
327 /* NOTE: This interferes with non-blocking connect */
329 BEV_RESET_GENERIC_READ_TIMEOUT(buf);
331 BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
333 /* If we newly enable reading or writing, and we aren't reading or
334 writing already, consider launching a new read or write. */
337 bev_async_consider_reading(bev_async);
339 bev_async_consider_writing(bev_async);
344 be_async_disable(struct bufferevent *bev, short what)
346 struct bufferevent_async *bev_async = upcast(bev);
347 /* XXXX If we disable reading or writing, we may want to consider
348 * canceling any in-progress read or write operation, though it might
351 if (what & EV_READ) {
352 BEV_DEL_GENERIC_READ_TIMEOUT(bev);
353 bev_async_del_read(bev_async);
355 if (what & EV_WRITE) {
356 BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
357 bev_async_del_write(bev_async);
364 be_async_destruct(struct bufferevent *bev)
366 struct bufferevent_async *bev_async = upcast(bev);
367 struct bufferevent_private *bev_p = BEV_UPCAST(bev);
370 EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
371 !upcast(bev)->read_in_progress);
373 bev_async_del_read(bev_async);
374 bev_async_del_write(bev_async);
376 fd = _evbuffer_overlapped_get_fd(bev->input);
377 if (bev_p->options & BEV_OPT_CLOSE_ON_FREE)
378 evutil_closesocket(fd);
379 /* delete this in case non-blocking connect was used */
380 if (event_initialized(&bev->ev_write)) {
381 event_del(&bev->ev_write);
382 _bufferevent_del_generic_timeout_cbs(bev);
386 /* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
387 * we use WSAGetOverlappedResult to translate. */
389 bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
394 fd = _evbuffer_overlapped_get_fd(bev->input);
395 WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
399 be_async_flush(struct bufferevent *bev, short what,
400 enum bufferevent_flush_mode mode)
406 connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
407 ev_ssize_t nbytes, int ok)
409 struct bufferevent_async *bev_a = upcast_connect(eo);
410 struct bufferevent *bev = &bev_a->bev.bev;
411 evutil_socket_t sock;
415 EVUTIL_ASSERT(bev_a->bev.connecting);
416 bev_a->bev.connecting = 0;
417 sock = _evbuffer_overlapped_get_fd(bev_a->bev.bev.input);
418 /* XXXX Handle error? */
419 setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
422 bufferevent_async_set_connected(bev);
424 bev_async_set_wsa_error(bev, eo);
426 _bufferevent_run_eventcb(bev,
427 ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);
429 event_base_del_virtual(bev->ev_base);
431 _bufferevent_decref_and_unlock(bev);
435 read_complete(struct event_overlapped *eo, ev_uintptr_t key,
436 ev_ssize_t nbytes, int ok)
438 struct bufferevent_async *bev_a = upcast_read(eo);
439 struct bufferevent *bev = &bev_a->bev.bev;
440 short what = BEV_EVENT_READING;
443 EVUTIL_ASSERT(bev_a->read_in_progress);
445 evbuffer_commit_read(bev->input, nbytes);
446 bev_a->read_in_progress = 0;
449 bev_async_set_wsa_error(bev, eo);
453 BEV_RESET_GENERIC_READ_TIMEOUT(bev);
454 _bufferevent_decrement_read_buckets(&bev_a->bev,
456 if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
457 _bufferevent_run_readcb(bev);
458 bev_async_consider_reading(bev_a);
460 what |= BEV_EVENT_ERROR;
462 _bufferevent_run_eventcb(bev, what);
463 } else if (!nbytes) {
464 what |= BEV_EVENT_EOF;
466 _bufferevent_run_eventcb(bev, what);
470 _bufferevent_decref_and_unlock(bev);
474 write_complete(struct event_overlapped *eo, ev_uintptr_t key,
475 ev_ssize_t nbytes, int ok)
477 struct bufferevent_async *bev_a = upcast_write(eo);
478 struct bufferevent *bev = &bev_a->bev.bev;
479 short what = BEV_EVENT_WRITING;
482 EVUTIL_ASSERT(bev_a->write_in_progress);
483 evbuffer_commit_write(bev->output, nbytes);
484 bev_a->write_in_progress = 0;
487 bev_async_set_wsa_error(bev, eo);
491 BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
492 _bufferevent_decrement_write_buckets(&bev_a->bev,
494 if (evbuffer_get_length(bev->output) <=
496 _bufferevent_run_writecb(bev);
497 bev_async_consider_writing(bev_a);
499 what |= BEV_EVENT_ERROR;
501 _bufferevent_run_eventcb(bev, what);
502 } else if (!nbytes) {
503 what |= BEV_EVENT_EOF;
505 _bufferevent_run_eventcb(bev, what);
509 _bufferevent_decref_and_unlock(bev);
513 bufferevent_async_new(struct event_base *base,
514 evutil_socket_t fd, int options)
516 struct bufferevent_async *bev_a;
517 struct bufferevent *bev;
518 struct event_iocp_port *iocp;
520 options |= BEV_OPT_THREADSAFE;
522 if (!(iocp = event_base_get_iocp(base)))
525 if (fd >= 0 && event_iocp_port_associate(iocp, fd, 1)<0) {
526 int err = GetLastError();
527 /* We may have alrady associated this fd with a port.
528 * Let's hope it's this port, and that the error code
529 * for doing this neer changes. */
530 if (err != ERROR_INVALID_PARAMETER)
534 if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
537 bev = &bev_a->bev.bev;
538 if (!(bev->input = evbuffer_overlapped_new(fd))) {
542 if (!(bev->output = evbuffer_overlapped_new(fd))) {
543 evbuffer_free(bev->input);
548 if (bufferevent_init_common(&bev_a->bev, base, &bufferevent_ops_async,
552 evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
553 evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
555 event_overlapped_init(&bev_a->connect_overlapped, connect_complete);
556 event_overlapped_init(&bev_a->read_overlapped, read_complete);
557 event_overlapped_init(&bev_a->write_overlapped, write_complete);
561 _bufferevent_init_generic_timeout_cbs(bev);
565 bufferevent_free(&bev_a->bev.bev);
570 bufferevent_async_set_connected(struct bufferevent *bev)
572 struct bufferevent_async *bev_async = upcast(bev);
574 _bufferevent_init_generic_timeout_cbs(bev);
575 /* Now's a good time to consider reading/writing */
576 be_async_enable(bev, bev->enabled);
580 bufferevent_async_can_connect(struct bufferevent *bev)
582 const struct win32_extension_fns *ext =
583 event_get_win32_extension_fns();
585 if (BEV_IS_ASYNC(bev) &&
586 event_base_get_iocp(bev->ev_base) &&
587 ext && ext->ConnectEx)
594 bufferevent_async_connect(struct bufferevent *bev, evutil_socket_t fd,
595 const struct sockaddr *sa, int socklen)
598 struct bufferevent_async *bev_async = upcast(bev);
599 struct sockaddr_storage ss;
600 const struct win32_extension_fns *ext =
601 event_get_win32_extension_fns();
603 EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
605 /* ConnectEx() requires that the socket be bound to an address
606 * with bind() before using, otherwise it will fail. We attempt
607 * to issue a bind() here, taking into account that the error
608 * code is set to WSAEINVAL when the socket is already bound. */
609 memset(&ss, 0, sizeof(ss));
610 if (sa->sa_family == AF_INET) {
611 struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
612 sin->sin_family = AF_INET;
613 sin->sin_addr.s_addr = INADDR_ANY;
614 } else if (sa->sa_family == AF_INET6) {
615 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
616 sin6->sin6_family = AF_INET6;
617 sin6->sin6_addr = in6addr_any;
619 /* Well, the user will have to bind() */
622 if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
623 WSAGetLastError() != WSAEINVAL)
626 event_base_add_virtual(bev->ev_base);
627 bufferevent_incref(bev);
628 rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
629 &bev_async->connect_overlapped.overlapped);
630 if (rc || WSAGetLastError() == ERROR_IO_PENDING)
633 event_base_del_virtual(bev->ev_base);
634 bufferevent_decref(bev);
640 be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
641 union bufferevent_ctrl_data *data)
644 case BEV_CTRL_GET_FD:
645 data->fd = _evbuffer_overlapped_get_fd(bev->input);
647 case BEV_CTRL_SET_FD: {
648 struct event_iocp_port *iocp;
650 if (data->fd == _evbuffer_overlapped_get_fd(bev->input))
652 if (!(iocp = event_base_get_iocp(bev->ev_base)))
654 if (event_iocp_port_associate(iocp, data->fd, 1) < 0)
656 _evbuffer_overlapped_set_fd(bev->input, data->fd);
657 _evbuffer_overlapped_set_fd(bev->output, data->fd);
660 case BEV_CTRL_GET_UNDERLYING: