X-Git-Url: https://arthur.barton.de/cgi-bin/gitweb.cgi?p=ngircd-alex.git;a=blobdiff_plain;f=src%2Fngircd%2Fio.c;h=e331464647d5c6b9e8141febb591fa426de8311d;hp=96672da691a1b858f70eb2723a44898f2991cc44;hb=a2f5a05ff86402bb7fb09094c52d607743fc9f49;hpb=30c11b23133e98e95303423d44193e4835a3c676 diff --git a/src/ngircd/io.c b/src/ngircd/io.c index 96672da6..e3314646 100644 --- a/src/ngircd/io.c +++ b/src/ngircd/io.c @@ -12,7 +12,7 @@ #include "portab.h" -static char UNUSED id[] = "$Id: io.c,v 1.10 2005/08/30 13:38:16 fw Exp $"; +static char UNUSED id[] = "$Id: io.c,v 1.21 2006/09/19 18:21:30 fw Exp $"; #include #include @@ -26,10 +26,11 @@ static char UNUSED id[] = "$Id: io.c,v 1.10 2005/08/30 13:38:16 fw Exp $"; #include "io.h" #include "log.h" +/* Enables extra debug messages in event add/delete/callback code. */ +/* #define DEBUG_IO */ typedef struct { void (*callback)(int, short); - int fd; short what; } io_event; @@ -42,9 +43,17 @@ typedef struct { # ifdef HAVE_KQUEUE #define IO_USE_KQUEUE 1 # else +# ifdef HAVE_SYS_DEVPOLL_H +#define IO_USE_DEVPOLL 1 +# else +# ifdef HAVE_POLL +#define IO_USE_POLL 1 +# else #define IO_USE_SELECT 1 -#endif -#endif +# endif /* HAVE_POLL */ +# endif /* HAVE_SYS_DEVPOLL_H */ +# endif /* HAVE_KQUEUE */ +#endif /* HAVE_EPOLL_CREATE */ static bool library_initialized; @@ -52,8 +61,7 @@ static bool library_initialized; #include static int io_masterfd; -static bool io_event_new_epoll(int fd, short what); -static bool io_event_change_epoll(int fd, short what); +static bool io_event_change_epoll(int fd, short what, const int action); static int io_dispatch_epoll(struct timeval *tv); #endif @@ -64,7 +72,23 @@ static array io_evcache; static int io_masterfd; static int io_dispatch_kqueue(struct timeval *tv); -static bool io_event_add_kqueue(int, short); +static bool io_event_change_kqueue(int, short, const int action); +#endif + +#ifdef IO_USE_POLL +#include + +static array pollfds; +static int poll_maxfd; + +static bool io_event_change_poll(int fd, short what); +#endif + +#ifdef IO_USE_DEVPOLL +#include +static int io_masterfd; + +static bool io_event_change_devpoll(int fd, short what); #endif #ifdef IO_USE_SELECT @@ -87,77 +111,138 @@ static io_event * io_event_get(int fd) { io_event *i; + assert(fd >= 0); - i = (io_event *) array_get(&io_events, sizeof(io_event), fd); - assert(i); + + i = (io_event *) array_get(&io_events, sizeof(io_event), (size_t) fd); + + assert(i != NULL); return i; } -bool -io_library_init(unsigned int eventsize) +#ifdef IO_USE_DEVPOLL +static void +io_library_init_devpoll(unsigned int eventsize) { - bool ret; -#ifdef IO_USE_EPOLL - int ecreate_hint = (int)eventsize; - if (ecreate_hint <= 0) - ecreate_hint = 128; + io_masterfd = open("/dev/poll", O_RDWR); + if (io_masterfd >= 0) + library_initialized = true; + Log(LOG_INFO, "IO subsystem: /dev/poll (initial maxfd %u, masterfd %d).", + eventsize, io_masterfd); +} #endif - if (library_initialized) - return true; -#ifdef IO_USE_SELECT -#ifdef FD_SETSIZE - if (eventsize >= FD_SETSIZE) - eventsize = FD_SETSIZE - 1; -#endif +#ifdef IO_USE_POLL +static void +io_library_init_poll(unsigned int eventsize) +{ + struct pollfd *p; + array_init(&pollfds); + poll_maxfd = 0; + Log(LOG_INFO, "IO subsystem: poll (initial maxfd %u).", + eventsize); + p = array_alloc(&pollfds, sizeof(struct pollfd), eventsize); + if (p) { + unsigned i; + p = array_start(&pollfds); + for (i = 0; i < eventsize; i++) + p[i].fd = -1; + + library_initialized = true; + } +} #endif - if (eventsize && !array_alloc(&io_events, sizeof(io_event), eventsize)) - eventsize = 0; -#ifdef IO_USE_EPOLL - io_masterfd = epoll_create(ecreate_hint); - Log(LOG_INFO, - "IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).", - ecreate_hint, eventsize, io_masterfd); - ret = io_masterfd >= 0; - if (ret) library_initialized = true; - return ret; -#endif #ifdef IO_USE_SELECT +static void +io_library_init_select(unsigned int eventsize) +{ Log(LOG_INFO, "IO subsystem: select (initial maxfd %u).", eventsize); FD_ZERO(&readers); FD_ZERO(&writers); #ifdef FD_SETSIZE - if (Conf_MaxConnections >= FD_SETSIZE) { + if (Conf_MaxConnections >= (int)FD_SETSIZE) { Log(LOG_WARNING, "MaxConnections (%d) exceeds limit (%u), changed MaxConnections to %u.", Conf_MaxConnections, FD_SETSIZE, FD_SETSIZE - 1); Conf_MaxConnections = FD_SETSIZE - 1; } -#else - Log(LOG_WARNING, - "FD_SETSIZE undefined, don't know how many descriptors select() can handle on your platform ..."); #endif /* FD_SETSIZE */ library_initialized = true; - return true; +} #endif /* SELECT */ + + +#ifdef IO_USE_EPOLL +static void +io_library_init_epoll(unsigned int eventsize) +{ + int ecreate_hint = (int)eventsize; + if (ecreate_hint <= 0) + ecreate_hint = 128; + io_masterfd = epoll_create(ecreate_hint); + Log(LOG_INFO, + "IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).", + ecreate_hint, eventsize, io_masterfd); + if (io_masterfd >= 0) + library_initialized = true; +} +#endif + + #ifdef IO_USE_KQUEUE +static void +io_library_init_kqueue(unsigned int eventsize) +{ io_masterfd = kqueue(); Log(LOG_INFO, "IO subsystem: kqueue (initial maxfd %u, masterfd %d)", eventsize, io_masterfd); - ret = io_masterfd >= 0; - if (ret) library_initialized = true; + if (io_masterfd >= 0) + library_initialized = true; +} +#endif - return ret; + +bool +io_library_init(unsigned int eventsize) +{ + if (library_initialized) + return true; +#ifdef IO_USE_SELECT +#ifndef FD_SETSIZE + Log(LOG_WARNING, + "FD_SETSIZE undefined, don't know how many descriptors select() can handle on your platform ..."); +#else + if (eventsize >= FD_SETSIZE) + eventsize = FD_SETSIZE - 1; +#endif /* FD_SETSIZE */ +#endif /* IO_USE_SELECT */ + if ((eventsize > 0) && !array_alloc(&io_events, sizeof(io_event), (size_t)eventsize)) + eventsize = 0; +#ifdef IO_USE_EPOLL + io_library_init_epoll(eventsize); #endif +#ifdef IO_USE_KQUEUE + io_library_init_kqueue(eventsize); +#endif +#ifdef IO_USE_DEVPOLL + io_library_init_devpoll(eventsize); +#endif +#ifdef IO_USE_POLL + io_library_init_poll(eventsize); +#endif +#ifdef IO_USE_SELECT + io_library_init_select(eventsize); +#endif + return library_initialized; } @@ -167,11 +252,14 @@ io_library_shutdown(void) #ifdef IO_USE_SELECT FD_ZERO(&readers); FD_ZERO(&writers); -#else - close(io_masterfd); /* kqueue, epoll */ +#endif +#ifdef IO_USE_EPOLL + close(io_masterfd); io_masterfd = -1; #endif #ifdef IO_USE_KQUEUE + close(io_masterfd); + io_masterfd = -1; array_free(&io_evcache); #endif library_initialized = false; @@ -193,22 +281,19 @@ io_event_setcb(int fd, void (*cbfunc) (int, short)) bool io_event_create(int fd, short what, void (*cbfunc) (int, short)) { + bool ret; io_event *i; assert(fd >= 0); - -#ifdef IO_USE_SELECT -#ifdef FD_SETSIZE +#if defined(IO_USE_SELECT) || defined(FD_SETSIZE) if (fd >= FD_SETSIZE) { Log(LOG_ERR, "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)", fd, FD_SETSIZE); return false; } -#endif /* FD_SETSIZE */ -#endif /* IO_USE_SELECT */ - - i = (io_event *) array_alloc(&io_events, sizeof(io_event), fd); +#endif + i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd); if (!i) { Log(LOG_WARNING, "array_alloc failed: could not allocate space for %d io_event structures", @@ -216,41 +301,74 @@ io_event_create(int fd, short what, void (*cbfunc) (int, short)) return false; } - i->fd = fd; i->callback = cbfunc; + i->what = 0; +#ifdef IO_USE_DEVPOLL + ret = io_event_change_devpoll(fd, what); +#endif +#ifdef IO_USE_POLL + ret = io_event_change_poll(fd, what); +#endif #ifdef IO_USE_EPOLL - i->what = what; - return io_event_new_epoll(fd, what); + ret = io_event_change_epoll(fd, what, EPOLL_CTL_ADD); #endif #ifdef IO_USE_KQUEUE - i->what = what; - return io_event_add_kqueue(fd, what); + ret = io_event_change_kqueue(fd, what, EV_ADD|EV_ENABLE); #endif #ifdef IO_USE_SELECT - i->what = 0; - return io_event_add(fd, what); + ret = io_event_add(fd, what); #endif + if (ret) i->what = what; + return ret; } -#ifdef IO_USE_EPOLL +#ifdef IO_USE_DEVPOLL static bool -io_event_new_epoll(int fd, short what) +io_event_change_devpoll(int fd, short what) { - struct epoll_event ev = { 0, {0} }; - ev.data.fd = fd; + struct pollfd p; + + p.events = 0; if (what & IO_WANTREAD) - ev.events = EPOLLIN | EPOLLPRI; + p.events = POLLIN | POLLPRI; if (what & IO_WANTWRITE) - ev.events |= EPOLLOUT; + p.events |= POLLOUT; - return epoll_ctl(io_masterfd, EPOLL_CTL_ADD, fd, &ev) == 0; + p.fd = fd; + return write(io_masterfd, &p, sizeof p) == (ssize_t)sizeof p; } +#endif + + +#ifdef IO_USE_POLL +static bool +io_event_change_poll(int fd, short what) +{ + struct pollfd *p; + short events = 0; + + if (what & IO_WANTREAD) + events = POLLIN | POLLPRI; + if (what & IO_WANTWRITE) + events |= POLLOUT; + + p = array_alloc(&pollfds, sizeof *p, fd); + if (p) { + p->events = events; + p->fd = fd; + if (fd > poll_maxfd) + poll_maxfd = fd; + } + return p != NULL; +} +#endif +#ifdef IO_USE_EPOLL static bool -io_event_change_epoll(int fd, short what) +io_event_change_epoll(int fd, short what, const int action) { struct epoll_event ev = { 0, {0} }; ev.data.fd = fd; @@ -260,7 +378,7 @@ io_event_change_epoll(int fd, short what) if (what & IO_WANTWRITE) ev.events |= EPOLLOUT; - return epoll_ctl(io_masterfd, EPOLL_CTL_MOD, fd, &ev) == 0; + return epoll_ctl(io_masterfd, action, fd, &ev) == 0; } #endif @@ -271,7 +389,7 @@ io_event_kqueue_commit_cache(void) struct kevent *events; bool ret; int len = (int) array_length(&io_evcache, sizeof (struct kevent)); - + if (!len) /* nothing to do */ return true; @@ -284,7 +402,7 @@ io_event_kqueue_commit_cache(void) events = array_start(&io_evcache); - assert(events); + assert(events != NULL); ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0; if (ret) @@ -294,24 +412,28 @@ io_event_kqueue_commit_cache(void) static bool -io_event_add_kqueue(int fd, short what) +io_event_change_kqueue(int fd, short what, const int action) { struct kevent kev; - short filter = 0; - size_t len = array_length(&io_evcache, sizeof kev); - - if (what & IO_WANTREAD) - filter = EVFILT_READ; + bool ret = true; - if (what & IO_WANTWRITE) - filter |= EVFILT_WRITE; + if (what & IO_WANTREAD) { + EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0); + ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev)); + if (!ret) + ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0; + } - if (len >= 100) { - (void)io_event_kqueue_commit_cache(); + if (ret && (what & IO_WANTWRITE)) { + EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0); + ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev)); + if (!ret) + ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0; } - EV_SET(&kev, fd, filter, EV_ADD | EV_ENABLE, 0, 0, NULL); - return array_catb(&io_evcache, (char*) &kev, sizeof (kev)); + if (array_length(&io_evcache, sizeof kev) >= 100) + io_event_kqueue_commit_cache(); + return ret; } #endif @@ -321,26 +443,25 @@ io_event_add(int fd, short what) { io_event *i = io_event_get(fd); - assert(i); - - if (!i) - return false; - if (i->what == what) - return true; -#ifdef DEBUG + if (!i) return false; + if (i->what == what) return true; +#ifdef DEBUG_IO Log(LOG_DEBUG, "io_event_add(): fd %d (arg: %d), what %d.", i->fd, fd, what); #endif - i->what |= what; - #ifdef IO_USE_EPOLL - return io_event_change_epoll(fd, i->what); + return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD); #endif #ifdef IO_USE_KQUEUE - return io_event_add_kqueue(fd, what); + return io_event_change_kqueue(fd, what, EV_ADD | EV_ENABLE); +#endif +#ifdef IO_USE_DEVPOLL + return io_event_change_devpoll(fd, i->what); +#endif +#ifdef IO_USE_POLL + return io_event_change_poll(fd, i->what); #endif - #ifdef IO_USE_SELECT if (fd > select_maxfd) select_maxfd = fd; @@ -371,67 +492,132 @@ io_setnonblock(int fd) } -bool -io_close(int fd) +#ifdef IO_USE_DEVPOLL +static void +io_close_devpoll(int fd) { - io_event *i = io_event_get(fd); - if (i) { - memset(i, 0, sizeof(io_event)); - i->fd = -1; + struct pollfd p; + p.events = POLLREMOVE; + p.fd = fd; + write(io_masterfd, &p, sizeof p); +} +#else +static inline void io_close_devpoll(int UNUSED x) { /* NOTHING */ } +#endif + + + +#ifdef IO_USE_POLL +static void +io_close_poll(int fd) +{ + struct pollfd *p; + p = array_get(&pollfds, sizeof *p, fd); + if (!p) return; + + p->fd = -1; + if (fd == poll_maxfd) { + while (poll_maxfd > 0) { + --poll_maxfd; + p = array_get(&pollfds, sizeof *p, poll_maxfd); + if (p && p->fd >= 0) + break; + } } +} +#else +static inline void io_close_poll(int UNUSED x) { /* NOTHING */ } +#endif + + #ifdef IO_USE_SELECT +static void +io_close_select(int fd) +{ + io_event *i; FD_CLR(fd, &writers); FD_CLR(fd, &readers); - if (fd == select_maxfd) - select_maxfd--; + i = io_event_get(fd); + if (!i) return; + + if (fd == select_maxfd) { + while (select_maxfd>0) { + --select_maxfd; /* find largest fd */ + i = io_event_get(select_maxfd); + if (i && i->callback) break; + } + } +} +#else +static inline void io_close_select(int UNUSED x) { /* NOTHING */ } #endif + + +bool +io_close(int fd) +{ + io_event *i; + + i = io_event_get(fd); #ifdef IO_USE_KQUEUE if (array_length(&io_evcache, sizeof (struct kevent))) /* pending data in cache? */ io_event_kqueue_commit_cache(); + + /* both kqueue and epoll remove fd from all sets automatically on the last close + * of the descriptor. since we don't know if this is the last close we'll have + * to remove the set explicitly. */ + if (i) { + io_event_change_kqueue(fd, i->what, EV_DELETE); + io_event_kqueue_commit_cache(); + } +#endif + + io_close_devpoll(fd); + io_close_poll(fd); + io_close_select(fd); + +#ifdef IO_USE_EPOLL + io_event_change_epoll(fd, 0, EPOLL_CTL_DEL); #endif - return close(fd) == 0; /* both epoll an kqueue will remove fd from all sets automatically */ + if (i) { + i->callback = NULL; + i->what = 0; + } + return close(fd) == 0; } bool io_event_del(int fd, short what) { -#ifdef IO_USE_KQUEUE - struct kevent kev; - short filter = 0; -#endif io_event *i = io_event_get(fd); -#ifdef DEBUG +#ifdef DEBUG_IO Log(LOG_DEBUG, "io_event_del(): trying to delete eventtype %d on fd %d", what, fd); #endif - assert(i); - if (!i) - return false; + if (!i) return false; i->what &= ~what; +#ifdef IO_USE_DEVPOLL + return io_event_change_devpoll(fd, i->what); +#endif +#ifdef IO_USE_POLL + return io_event_change_poll(fd, i->what); +#endif #ifdef IO_USE_EPOLL - return io_event_change_epoll(fd, i->what); + return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD); #endif #ifdef IO_USE_KQUEUE - if (what & IO_WANTREAD) - filter = EVFILT_READ; - - if (what & IO_WANTWRITE) - filter |= EVFILT_WRITE; - - EV_SET(&kev, fd, filter, EV_DELETE, 0, 0, NULL); - return kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0; + return io_event_change_kqueue(fd, what, EV_DISABLE); #endif - #ifdef IO_USE_SELECT if (what & IO_WANTWRITE) - FD_CLR(i->fd, &writers); + FD_CLR(fd, &writers); if (what & IO_WANTREAD) - FD_CLR(i->fd, &readers); + FD_CLR(fd, &readers); return true; #endif @@ -475,6 +661,92 @@ io_dispatch_select(struct timeval *tv) #endif +#ifdef IO_USE_DEVPOLL +static int +io_dispatch_devpoll(struct timeval *tv) +{ + struct dvpoll dvp; + time_t sec = tv->tv_sec * 1000; + int i, total, ret, timeout = tv->tv_usec + sec; + short what; + struct pollfd p[100]; + + if (timeout < 0) + timeout = 1000; + + total = 0; + do { + dvp.dp_timeout = timeout; + dvp.dp_nfds = 100; + dvp.dp_fds = p; + ret = ioctl(io_masterfd, DP_POLL, &dvp); + total += ret; + if (ret <= 0) + return total; + for (i=0; i < ret ; i++) { + what = 0; + if (p[i].revents & (POLLIN|POLLPRI)) + what = IO_WANTREAD; + + if (p[i].revents & POLLOUT) + what |= IO_WANTWRITE; + + if (p[i].revents && !what) { + /* other flag is set, probably POLLERR */ + what = IO_ERROR; + } + io_docallback(p[i].fd, what); + } + } while (ret == 100); + + return total; +} +#endif + + +#ifdef IO_USE_POLL +static int +io_dispatch_poll(struct timeval *tv) +{ + time_t sec = tv->tv_sec * 1000; + int i, ret, timeout = tv->tv_usec + sec; + int fds_ready; + short what; + struct pollfd *p = array_start(&pollfds); + + if (timeout < 0) + timeout = 1000; + + ret = poll(p, poll_maxfd + 1, timeout); + if (ret <= 0) + return ret; + + fds_ready = ret; + for (i=0; i <= poll_maxfd; i++) { + what = 0; + if (p[i].revents & (POLLIN|POLLPRI)) + what = IO_WANTREAD; + + if (p[i].revents & POLLOUT) + what |= IO_WANTWRITE; + + if (p[i].revents && !what) { + /* other flag is set, probably POLLERR */ + what = IO_ERROR; + } + if (what) { + fds_ready--; + io_docallback(i, what); + } + if (fds_ready <= 0) + break; + } + + return ret; +} +#endif + + #ifdef IO_USE_EPOLL static int io_dispatch_epoll(struct timeval *tv) @@ -524,10 +796,9 @@ io_dispatch_kqueue(struct timeval *tv) struct kevent *newevents; struct timespec ts; int newevents_len; - short type; ts.tv_sec = tv->tv_sec; ts.tv_nsec = tv->tv_usec * 1000; - + do { newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent)); newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL; @@ -536,9 +807,8 @@ io_dispatch_kqueue(struct timeval *tv) newevents_len = 0; #ifdef DEBUG if (newevents_len) - assert(newevents); + assert(newevents != NULL); #endif - ret = kevent(io_masterfd, newevents, newevents_len, kev, 100, &ts); if ((newevents_len>0) && ret != -1) @@ -549,25 +819,34 @@ io_dispatch_kqueue(struct timeval *tv) return total; for (i = 0; i < ret; i++) { - type = 0; - if (kev[i].flags & EV_EOF) - type = IO_ERROR; - - if (kev[i].filter & EV_ERROR) - type = IO_ERROR; - - if (kev[i].filter & EVFILT_READ) - type |= IO_WANTREAD; - - if (kev[i].filter & EVFILT_WRITE) - type |= IO_WANTWRITE; - - io_docallback(kev[i].ident, type); + if (kev[i].flags & EV_EOF) { +#ifdef DEBUG + LogDebug("kev.flag has EV_EOF set, setting IO_ERROR", + kev[i].filter, kev[i].ident); +#endif + io_docallback((int)kev[i].ident, IO_ERROR); + continue; + } + + switch (kev[i].filter) { + case EVFILT_READ: + io_docallback((int)kev[i].ident, IO_WANTREAD); + break; + case EVFILT_WRITE: + io_docallback((int)kev[i].ident, IO_WANTWRITE); + break; + default: +#ifdef DEBUG + LogDebug("Unknown kev.filter number %d for fd %d", + kev[i].filter, kev[i].ident); /* Fall through */ +#endif + case EV_ERROR: + io_docallback((int)kev[i].ident, IO_ERROR); + break; + } } - ts.tv_sec = 0; ts.tv_nsec = 0; - } while (ret == 100); return total; @@ -584,6 +863,12 @@ io_dispatch(struct timeval *tv) #ifdef IO_USE_KQUEUE return io_dispatch_kqueue(tv); #endif +#ifdef IO_USE_DEVPOLL + return io_dispatch_devpoll(tv); +#endif +#ifdef IO_USE_POLL + return io_dispatch_poll(tv); +#endif #ifdef IO_USE_EPOLL return io_dispatch_epoll(tv); #endif @@ -595,13 +880,14 @@ static void io_docallback(int fd, short what) { io_event *i; -#ifdef DEBUG +#ifdef DEBUG_IO Log(LOG_DEBUG, "doing callback for fd %d, what %d", fd, what); #endif i = io_event_get(fd); - assert(i); - if (i->callback) /* callback might be 0 if previous callback function called io_close on this fd */ + if (i->callback) { /* callback might be NULL if a previous callback function + called io_close on this fd */ i->callback(fd, (what & IO_ERROR) ? i->what : what); - /* if error indicator is set, we return the event(s) the app asked for */ + } + /* if error indicator is set, we return the event(s) that were registered */ }