+#if defined(IO_USE_EPOLL) || defined(IO_USE_KQUEUE) || defined(IO_USE_DEVPOLL)
+ if (io_masterfd >= 0)
+ close(io_masterfd);
+ io_masterfd = -1;
+#endif
+#ifdef IO_USE_KQUEUE
+ array_free(&io_evcache);
+#endif
+ library_initialized = false;
+}
+
+
+bool
+io_event_setcb(int fd, void (*cbfunc) (int, short))
+{
+ io_event *i = io_event_get(fd);
+ if (!i)
+ return false;
+
+ i->callback = cbfunc;
+ return true;
+}
+
+
+static bool
+backend_create_ev(int fd, short what)
+{
+ bool ret;
+#ifdef IO_USE_DEVPOLL
+ ret = io_event_change_devpoll(fd, what);
+#endif
+#ifdef IO_USE_POLL
+ ret = io_event_change_poll(fd, what);
+#endif
+#ifdef IO_USE_EPOLL
+ ret = io_event_change_epoll(fd, what, EPOLL_CTL_ADD);
+#endif
+#ifdef IO_USE_KQUEUE
+ ret = io_event_change_kqueue(fd, what, EV_ADD|EV_ENABLE);
+#endif
+#ifdef IO_USE_SELECT
+ if (io_masterfd < 0)
+ ret = io_event_add(fd, what);
+#endif
+ return ret;
+}
+
+
+bool
+io_event_create(int fd, short what, void (*cbfunc) (int, short))
+{
+ bool ret;
+ io_event *i;
+
+ assert(fd >= 0);
+#if defined(IO_USE_SELECT) && defined(FD_SETSIZE)
+ if (io_masterfd < 0 && fd >= FD_SETSIZE) {
+ Log(LOG_ERR,
+ "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)",
+ fd, FD_SETSIZE);
+ return false;
+ }
+#endif
+ i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd);
+ if (!i) {
+ Log(LOG_WARNING,
+ "array_alloc failed: could not allocate space for %d io_event structures",
+ fd);
+ return false;
+ }
+
+ i->callback = cbfunc;
+ i->what = 0;
+ ret = backend_create_ev(fd, what);
+ if (ret)
+ i->what = what;
+ return ret;
+}
+
+
+bool
+io_event_add(int fd, short what)
+{
+ io_event *i = io_event_get(fd);
+
+ if (!i) return false;
+
+ if ((i->what & what) == what) /* event type is already registered */
+ return true;
+
+ io_debug("io_event_add: fd, what", fd, what);
+
+ i->what |= what;
+#ifdef IO_USE_EPOLL
+ if (io_masterfd >= 0)
+ return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
+#endif
+#ifdef IO_USE_KQUEUE
+ return io_event_change_kqueue(fd, what, EV_ADD | EV_ENABLE);
+#endif
+#ifdef IO_USE_DEVPOLL
+ return io_event_change_devpoll(fd, i->what);
+#endif
+#ifdef IO_USE_POLL
+ return io_event_change_poll(fd, i->what);
+#endif
+#ifdef IO_USE_SELECT
+ if (fd > select_maxfd)
+ select_maxfd = fd;
+
+ if (what & IO_WANTREAD)
+ FD_SET(fd, &readers);
+ if (what & IO_WANTWRITE)
+ FD_SET(fd, &writers);
+
+ return true;
+#endif
+ return false;
+}
+
+
+bool
+io_setnonblock(int fd)
+{
+ int flags = fcntl(fd, F_GETFL);
+ if (flags == -1)
+ return false;
+#ifndef O_NONBLOCK
+#define O_NONBLOCK O_NDELAY
+#endif
+ flags |= O_NONBLOCK;
+
+ return fcntl(fd, F_SETFL, flags) == 0;
+}
+
+bool
+io_setcloexec(int fd)
+{
+ int flags = fcntl(fd, F_GETFD);
+ if (flags == -1)
+ return false;
+#ifdef FD_CLOEXEC
+ flags |= FD_CLOEXEC;
+#endif
+
+ return fcntl(fd, F_SETFD, flags) == 0;
+}
+
+bool
+io_close(int fd)
+{
+ io_event *i;
+
+ i = io_event_get(fd);
+#ifdef IO_USE_KQUEUE
+ if (array_length(&io_evcache, sizeof (struct kevent))) /* pending data in cache? */
+ io_event_kqueue_commit_cache();
+
+ /* both kqueue and epoll remove fd from all sets automatically on the last close
+ * of the descriptor. since we don't know if this is the last close we'll have
+ * to remove the set explicitly. */
+ if (i) {
+ io_event_change_kqueue(fd, i->what, EV_DELETE);
+ io_event_kqueue_commit_cache();
+ }
+#endif
+ io_close_devpoll(fd);
+ io_close_poll(fd);
+ io_close_select(fd);
+#ifdef IO_USE_EPOLL
+ io_event_change_epoll(fd, 0, EPOLL_CTL_DEL);
+#endif
+ if (i) {
+ i->callback = NULL;
+ i->what = 0;
+ }
+ return close(fd) == 0;
+}
+
+
+bool
+io_event_del(int fd, short what)
+{
+ io_event *i = io_event_get(fd);
+
+ io_debug("io_event_del: trying to delete eventtype; fd, what", fd, what);
+ if (!i) return false;
+
+ if (!(i->what & what)) /* event is already disabled */
+ return true;
+
+ i->what &= ~what;
+#ifdef IO_USE_DEVPOLL
+ return io_event_change_devpoll(fd, i->what);
+#endif
+#ifdef IO_USE_POLL
+ return io_event_change_poll(fd, i->what);
+#endif
+#ifdef IO_USE_EPOLL
+ if (io_masterfd >= 0)
+ return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
+#endif
+#ifdef IO_USE_KQUEUE
+ return io_event_change_kqueue(fd, what, EV_DISABLE);
+#endif
+#ifdef IO_USE_SELECT
+ if (what & IO_WANTWRITE)
+ FD_CLR(fd, &writers);
+
+ if (what & IO_WANTREAD)
+ FD_CLR(fd, &readers);
+ return true;
+#endif
+ return false;
+}