+ Conf_MaxConnections = FD_SETSIZE - 1;
+ }
+#else
+ Log(LOG_WARNING,
+ "FD_SETSIZE undefined, don't know how many descriptors select() can handle on your platform ...");
+#endif /* FD_SETSIZE */
+ library_initialized = true;
+}
+
+static void
+io_close_select(int fd)
+{
+ io_event *i;
+
+ if (io_masterfd >= 0) /* Are we using epoll()? */
+ return;
+
+ FD_CLR(fd, &writers);
+ FD_CLR(fd, &readers);
+
+ i = io_event_get(fd);
+ if (!i) return;
+
+ if (fd == select_maxfd) {
+ while (select_maxfd>0) {
+ --select_maxfd; /* find largest fd */
+ i = io_event_get(select_maxfd);
+ if (i && i->callback) break;
+ }
+ }
+}
+#else
+static inline void
+io_library_init_select(int UNUSED x)
+{ /* NOTHING */ }
+static inline void
+io_close_select(int UNUSED x)
+{ /* NOTHING */ }
+#endif /* SELECT */
+
+
+#ifdef IO_USE_EPOLL
+static bool
+io_event_change_epoll(int fd, short what, const int action)
+{
+ struct epoll_event ev = { 0, {0} };
+ ev.data.fd = fd;
+
+ if (what & IO_WANTREAD)
+ ev.events = EPOLLIN | EPOLLPRI;
+ if (what & IO_WANTWRITE)
+ ev.events |= EPOLLOUT;
+
+ return epoll_ctl(io_masterfd, action, fd, &ev) == 0;
+}
+
+static int
+io_dispatch_epoll(struct timeval *tv)
+{
+ time_t sec = tv->tv_sec * 1000;
+ int i, ret, timeout = tv->tv_usec + sec;
+ struct epoll_event epoll_ev[MAX_EVENTS];
+ short type;
+
+ if (timeout < 0)
+ timeout = 1000;
+
+ ret = epoll_wait(io_masterfd, epoll_ev, MAX_EVENTS, timeout);
+
+ for (i = 0; i < ret; i++) {
+ type = 0;
+ if (epoll_ev[i].events & (EPOLLERR | EPOLLHUP))
+ type = IO_ERROR;
+
+ if (epoll_ev[i].events & (EPOLLIN | EPOLLPRI))
+ type |= IO_WANTREAD;
+
+ if (epoll_ev[i].events & EPOLLOUT)
+ type |= IO_WANTWRITE;
+
+ io_docallback(epoll_ev[i].data.fd, type);
+ }
+
+ return ret;
+}
+
+static void
+io_library_init_epoll(unsigned int eventsize)
+{
+ int ecreate_hint = (int)eventsize;
+ if (ecreate_hint <= 0)
+ ecreate_hint = 128;
+ io_masterfd = epoll_create(ecreate_hint);
+ if (io_masterfd >= 0) {
+ library_initialized = true;
+ Log(LOG_INFO,
+ "IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).",
+ ecreate_hint, eventsize, io_masterfd);
+ return;
+ }
+#ifdef IO_USE_SELECT
+ Log(LOG_INFO, "Can't initialize epoll() IO interface, falling back to select() ...");
+#endif
+}
+#else
+static inline void
+io_library_init_epoll(unsigned int UNUSED ev)
+{ /* NOTHING */ }
+#endif /* IO_USE_EPOLL */
+
+
+#ifdef IO_USE_KQUEUE
+static bool
+io_event_kqueue_commit_cache(void)
+{
+ struct kevent *events;
+ bool ret;
+ int len = (int) array_length(&io_evcache, sizeof (struct kevent));
+
+ if (!len) /* nothing to do */
+ return true;
+
+ assert(len>0);
+
+ if (len < 0) {
+ array_free(&io_evcache);
+ return false;
+ }
+
+ events = array_start(&io_evcache);
+
+ assert(events != NULL);
+
+ ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0;
+ if (ret)
+ array_trunc(&io_evcache);
+ return ret;
+}
+
+static bool
+io_event_change_kqueue(int fd, short what, const int action)
+{
+ struct kevent kev;
+ bool ret = true;
+
+ if (what & IO_WANTREAD) {
+ EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0);
+ ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
+ if (!ret)
+ ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0;
+ }
+
+ if (ret && (what & IO_WANTWRITE)) {
+ EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0);
+ ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
+ if (!ret)
+ ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0;
+ }
+
+ if (array_length(&io_evcache, sizeof kev) >= 100)
+ io_event_kqueue_commit_cache();
+ return ret;
+}
+
+static int
+io_dispatch_kqueue(struct timeval *tv)
+{
+ int i, ret;
+ struct kevent kev[MAX_EVENTS];
+ struct kevent *newevents;
+ struct timespec ts;
+ int newevents_len;
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec * 1000;
+
+ newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent));
+ newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL;
+ assert(newevents_len >= 0);
+
+ ret = kevent(io_masterfd, newevents, newevents_len, kev, MAX_EVENTS, &ts);
+ if (newevents && ret != -1)
+ array_trunc(&io_evcache);
+
+ for (i = 0; i < ret; i++) {
+ io_debug("dispatch_kqueue: fd, kev.flags", (int)kev[i].ident, kev[i].flags);
+ if (kev[i].flags & (EV_EOF|EV_ERROR)) {
+ if (kev[i].flags & EV_ERROR)
+ Log(LOG_ERR, "kevent fd %d: EV_ERROR (%s)",
+ (int)kev[i].ident, strerror((int)kev[i].data));
+ io_docallback((int)kev[i].ident, IO_ERROR);
+ continue;
+ }
+
+ switch (kev[i].filter) {
+ case EVFILT_READ:
+ io_docallback((int)kev[i].ident, IO_WANTREAD);
+ break;
+ case EVFILT_WRITE:
+ io_docallback((int)kev[i].ident, IO_WANTWRITE);
+ break;
+ default:
+ LogDebug("Unknown kev.filter number %d for fd %d",
+ kev[i].filter, kev[i].ident);
+ /* Fall through */
+ case EV_ERROR:
+ io_docallback((int)kev[i].ident, IO_ERROR);
+ break;
+ }