1 /* 2 * netio.c -- network I/O support. 3 * 4 * Copyright (c) 2001-2011, NLnet Labs. All rights reserved. 5 * 6 * See LICENSE for the license. 7 * 8 */ 9 #include "config.h" 10 11 #include <assert.h> 12 #include <errno.h> 13 #include <sys/time.h> 14 #include <string.h> 15 #include <stdlib.h> 16 17 #include "netio.h" 18 #include "util.h" 19 20 21 #ifndef HAVE_PSELECT 22 int pselect(int n, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, 23 const struct timespec *timeout, const sigset_t *sigmask); 24 #else 25 #include <sys/select.h> 26 #endif 27 28 29 struct netio_handler_list 30 { 31 netio_handler_list_type *next; 32 netio_handler_type *handler; 33 }; 34 35 netio_type * 36 netio_create(region_type *region) 37 { 38 netio_type *result; 39 40 assert(region); 41 42 result = (netio_type *) region_alloc(region, sizeof(netio_type)); 43 result->region = region; 44 result->handlers = NULL; 45 result->deallocated = NULL; 46 result->dispatch_next = NULL; 47 return result; 48 } 49 50 void 51 netio_add_handler(netio_type *netio, netio_handler_type *handler) 52 { 53 netio_handler_list_type *elt; 54 55 assert(netio); 56 assert(handler); 57 58 if (netio->deallocated) { 59 /* 60 * If we have deallocated handler list elements, reuse 61 * the first one. 62 */ 63 elt = netio->deallocated; 64 netio->deallocated = elt->next; 65 } else { 66 /* 67 * Allocate a new one. 68 */ 69 elt = (netio_handler_list_type *) region_alloc( 70 netio->region, sizeof(netio_handler_list_type)); 71 } 72 73 elt->next = netio->handlers; 74 elt->handler = handler; 75 netio->handlers = elt; 76 } 77 78 void 79 netio_remove_handler(netio_type *netio, netio_handler_type *handler) 80 { 81 netio_handler_list_type **elt_ptr; 82 83 assert(netio); 84 assert(handler); 85 86 for (elt_ptr = &netio->handlers; *elt_ptr; elt_ptr = &(*elt_ptr)->next) { 87 if ((*elt_ptr)->handler == handler) { 88 netio_handler_list_type *next = (*elt_ptr)->next; 89 if ((*elt_ptr) == netio->dispatch_next) 90 netio->dispatch_next = next; 91 (*elt_ptr)->handler = NULL; 92 (*elt_ptr)->next = netio->deallocated; 93 netio->deallocated = *elt_ptr; 94 *elt_ptr = next; 95 break; 96 } 97 } 98 } 99 100 const struct timespec * 101 netio_current_time(netio_type *netio) 102 { 103 assert(netio); 104 105 if (!netio->have_current_time) { 106 struct timeval current_timeval; 107 if (gettimeofday(¤t_timeval, NULL) == -1) { 108 log_msg(LOG_CRIT, "gettimeofday: %s, aborting.", strerror(errno)); 109 abort(); 110 } 111 timeval_to_timespec(&netio->cached_current_time, ¤t_timeval); 112 netio->have_current_time = 1; 113 } 114 115 return &netio->cached_current_time; 116 } 117 118 int 119 netio_dispatch(netio_type *netio, const struct timespec *timeout, const sigset_t *sigmask) 120 { 121 fd_set readfds, writefds, exceptfds; 122 int max_fd; 123 int have_timeout = 0; 124 struct timespec minimum_timeout; 125 netio_handler_type *timeout_handler = NULL; 126 netio_handler_list_type *elt; 127 int rc; 128 int result = 0; 129 130 assert(netio); 131 132 /* 133 * Clear the cached current time. 134 */ 135 netio->have_current_time = 0; 136 137 /* 138 * Initialize the minimum timeout with the timeout parameter. 139 */ 140 if (timeout) { 141 have_timeout = 1; 142 memcpy(&minimum_timeout, timeout, sizeof(struct timespec)); 143 } 144 145 /* 146 * Initialize the fd_sets and timeout based on the handler 147 * information. 148 */ 149 max_fd = -1; 150 FD_ZERO(&readfds); 151 FD_ZERO(&writefds); 152 FD_ZERO(&exceptfds); 153 154 for (elt = netio->handlers; elt; elt = elt->next) { 155 netio_handler_type *handler = elt->handler; 156 if (handler->fd != -1 && handler->fd < (int)FD_SETSIZE) { 157 if (handler->fd > max_fd) { 158 max_fd = handler->fd; 159 } 160 if (handler->event_types & NETIO_EVENT_READ) { 161 extern int slowaccept; 162 extern struct timespec slowaccept_timeout; 163 164 if ((handler->event_types & NETIO_EVENT_ACCEPT) && slowaccept) { 165 if (timespec_compare(&slowaccept_timeout, netio_current_time(netio)) < 0) { 166 slowaccept = 0; 167 } 168 if (slowaccept) { 169 /** Timeout after slowaccept timeout. */ 170 struct timespec relative; 171 relative.tv_sec = slowaccept_timeout.tv_sec; 172 relative.tv_nsec = slowaccept_timeout.tv_nsec; 173 timespec_subtract(&relative, netio_current_time(netio)); 174 if (!have_timeout || 175 timespec_compare(&relative, &minimum_timeout) < 0) { 176 have_timeout = 1; 177 minimum_timeout.tv_sec = relative.tv_sec; 178 minimum_timeout.tv_nsec = relative.tv_nsec; 179 } 180 } else { 181 FD_SET(handler->fd, &readfds); 182 } 183 } else { 184 /* Not accept event or not slow accept */ 185 FD_SET(handler->fd, &readfds); 186 } 187 } 188 if (handler->event_types & NETIO_EVENT_WRITE) { 189 FD_SET(handler->fd, &writefds); 190 } 191 if (handler->event_types & NETIO_EVENT_EXCEPT) { 192 FD_SET(handler->fd, &exceptfds); 193 } 194 } 195 if (handler->timeout && (handler->event_types & NETIO_EVENT_TIMEOUT)) { 196 struct timespec relative; 197 198 relative.tv_sec = handler->timeout->tv_sec; 199 relative.tv_nsec = handler->timeout->tv_nsec; 200 timespec_subtract(&relative, netio_current_time(netio)); 201 202 if (!have_timeout || 203 timespec_compare(&relative, &minimum_timeout) < 0) 204 { 205 have_timeout = 1; 206 minimum_timeout.tv_sec = relative.tv_sec; 207 minimum_timeout.tv_nsec = relative.tv_nsec; 208 timeout_handler = handler; 209 } 210 } 211 } 212 213 if (have_timeout && minimum_timeout.tv_sec < 0) { 214 /* 215 * On negative timeout for a handler, immediatly 216 * dispatch the timeout event without checking for 217 * other events. 218 */ 219 if (timeout_handler && (timeout_handler->event_types & NETIO_EVENT_TIMEOUT)) { 220 timeout_handler->event_handler(netio, timeout_handler, NETIO_EVENT_TIMEOUT); 221 } 222 return result; 223 } 224 225 /* Check for events. */ 226 rc = pselect(max_fd + 1, &readfds, &writefds, &exceptfds, 227 have_timeout ? &minimum_timeout : NULL, 228 sigmask); 229 if (rc == -1) { 230 if(errno == EINVAL || errno == EACCES || errno == EBADF) { 231 log_msg(LOG_ERR, "fatal error pselect: %s.", 232 strerror(errno)); 233 exit(1); 234 } 235 return -1; 236 } 237 238 /* 239 * Clear the cached current_time (pselect(2) may block for 240 * some time so the cached value is likely to be old). 241 */ 242 netio->have_current_time = 0; 243 244 if (rc == 0) { 245 /* 246 * No events before the minimum timeout expired. 247 * Dispatch to handler if interested. 248 */ 249 if (timeout_handler && (timeout_handler->event_types & NETIO_EVENT_TIMEOUT)) { 250 timeout_handler->event_handler(netio, timeout_handler, NETIO_EVENT_TIMEOUT); 251 } 252 } else { 253 /* 254 * Dispatch all the events to interested handlers 255 * based on the fd_sets. Note that a handler might 256 * deinstall itself, so store the next handler before 257 * calling the current handler! 258 */ 259 assert(netio->dispatch_next == NULL); 260 for (elt = netio->handlers; elt && rc; ) { 261 netio_handler_type *handler = elt->handler; 262 netio->dispatch_next = elt->next; 263 if (handler->fd != -1 && handler->fd < (int)FD_SETSIZE) { 264 netio_event_types_type event_types 265 = NETIO_EVENT_NONE; 266 if (FD_ISSET(handler->fd, &readfds)) { 267 event_types |= NETIO_EVENT_READ; 268 FD_CLR(handler->fd, &readfds); 269 rc--; 270 } 271 if (FD_ISSET(handler->fd, &writefds)) { 272 event_types |= NETIO_EVENT_WRITE; 273 FD_CLR(handler->fd, &writefds); 274 rc--; 275 } 276 if (FD_ISSET(handler->fd, &exceptfds)) { 277 event_types |= NETIO_EVENT_EXCEPT; 278 FD_CLR(handler->fd, &exceptfds); 279 rc--; 280 } 281 282 if (event_types & handler->event_types) { 283 handler->event_handler(netio, handler, event_types & handler->event_types); 284 ++result; 285 } 286 } 287 elt = netio->dispatch_next; 288 } 289 netio->dispatch_next = NULL; 290 } 291 292 return result; 293 } 294