1 /* Copyright libuv project contributors. All rights reserved. 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to 5 * deal in the Software without restriction, including without limitation the 6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 * sell copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 * IN THE SOFTWARE. 20 */ 21 22 #include "uv.h" 23 #include "internal.h" 24 25 /* POSIX defines poll() as a portable way to wait on file descriptors. 26 * Here we maintain a dynamically sized array of file descriptors and 27 * events to pass as the first argument to poll(). 28 */ 29 30 #include <assert.h> 31 #include <stddef.h> 32 #include <stdint.h> 33 #include <errno.h> 34 #include <unistd.h> 35 36 int uv__platform_loop_init(uv_loop_t* loop) { 37 loop->poll_fds = NULL; 38 loop->poll_fds_used = 0; 39 loop->poll_fds_size = 0; 40 loop->poll_fds_iterating = 0; 41 return 0; 42 } 43 44 void uv__platform_loop_delete(uv_loop_t* loop) { 45 uv__free(loop->poll_fds); 46 loop->poll_fds = NULL; 47 } 48 49 int uv__io_fork(uv_loop_t* loop) { 50 uv__platform_loop_delete(loop); 51 return uv__platform_loop_init(loop); 52 } 53 54 /* Allocate or dynamically resize our poll fds array. */ 55 static void uv__pollfds_maybe_resize(uv_loop_t* loop) { 56 size_t i; 57 size_t n; 58 struct pollfd* p; 59 60 if (loop->poll_fds_used < loop->poll_fds_size) 61 return; 62 63 n = loop->poll_fds_size ? loop->poll_fds_size * 2 : 64; 64 p = uv__reallocf(loop->poll_fds, n * sizeof(*loop->poll_fds)); 65 if (p == NULL) 66 abort(); 67 68 loop->poll_fds = p; 69 for (i = loop->poll_fds_size; i < n; i++) { 70 loop->poll_fds[i].fd = -1; 71 loop->poll_fds[i].events = 0; 72 loop->poll_fds[i].revents = 0; 73 } 74 loop->poll_fds_size = n; 75 } 76 77 /* Primitive swap operation on poll fds array elements. */ 78 static void uv__pollfds_swap(uv_loop_t* loop, size_t l, size_t r) { 79 struct pollfd pfd; 80 pfd = loop->poll_fds[l]; 81 loop->poll_fds[l] = loop->poll_fds[r]; 82 loop->poll_fds[r] = pfd; 83 } 84 85 /* Add a watcher's fd to our poll fds array with its pending events. */ 86 static void uv__pollfds_add(uv_loop_t* loop, uv__io_t* w) { 87 size_t i; 88 struct pollfd* pe; 89 90 /* If the fd is already in the set just update its events. */ 91 assert(!loop->poll_fds_iterating); 92 for (i = 0; i < loop->poll_fds_used; ++i) { 93 if (loop->poll_fds[i].fd == w->fd) { 94 loop->poll_fds[i].events = w->pevents; 95 return; 96 } 97 } 98 99 /* Otherwise, allocate a new slot in the set for the fd. */ 100 uv__pollfds_maybe_resize(loop); 101 pe = &loop->poll_fds[loop->poll_fds_used++]; 102 pe->fd = w->fd; 103 pe->events = w->pevents; 104 } 105 106 /* Remove a watcher's fd from our poll fds array. */ 107 static void uv__pollfds_del(uv_loop_t* loop, int fd) { 108 size_t i; 109 assert(!loop->poll_fds_iterating); 110 for (i = 0; i < loop->poll_fds_used;) { 111 if (loop->poll_fds[i].fd == fd) { 112 /* swap to last position and remove */ 113 --loop->poll_fds_used; 114 uv__pollfds_swap(loop, i, loop->poll_fds_used); 115 loop->poll_fds[loop->poll_fds_used].fd = -1; 116 loop->poll_fds[loop->poll_fds_used].events = 0; 117 loop->poll_fds[loop->poll_fds_used].revents = 0; 118 /* This method is called with an fd of -1 to purge the invalidated fds, 119 * so we may possibly have multiples to remove. 120 */ 121 if (-1 != fd) 122 return; 123 } else { 124 /* We must only increment the loop counter when the fds do not match. 125 * Otherwise, when we are purging an invalidated fd, the value just 126 * swapped here from the previous end of the array will be skipped. 127 */ 128 ++i; 129 } 130 } 131 } 132 133 134 void uv__io_poll(uv_loop_t* loop, int timeout) { 135 sigset_t* pset; 136 sigset_t set; 137 uint64_t time_base; 138 uint64_t time_diff; 139 QUEUE* q; 140 uv__io_t* w; 141 size_t i; 142 unsigned int nevents; 143 int nfds; 144 int have_signals; 145 struct pollfd* pe; 146 int fd; 147 int user_timeout; 148 int reset_timeout; 149 150 if (loop->nfds == 0) { 151 assert(QUEUE_EMPTY(&loop->watcher_queue)); 152 return; 153 } 154 155 /* Take queued watchers and add their fds to our poll fds array. */ 156 while (!QUEUE_EMPTY(&loop->watcher_queue)) { 157 q = QUEUE_HEAD(&loop->watcher_queue); 158 QUEUE_REMOVE(q); 159 QUEUE_INIT(q); 160 161 w = QUEUE_DATA(q, uv__io_t, watcher_queue); 162 assert(w->pevents != 0); 163 assert(w->fd >= 0); 164 assert(w->fd < (int) loop->nwatchers); 165 166 uv__pollfds_add(loop, w); 167 168 w->events = w->pevents; 169 } 170 171 /* Prepare a set of signals to block around poll(), if any. */ 172 pset = NULL; 173 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { 174 pset = &set; 175 sigemptyset(pset); 176 sigaddset(pset, SIGPROF); 177 } 178 179 assert(timeout >= -1); 180 time_base = loop->time; 181 182 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { 183 reset_timeout = 1; 184 user_timeout = timeout; 185 timeout = 0; 186 } else { 187 reset_timeout = 0; 188 } 189 190 /* Loop calls to poll() and processing of results. If we get some 191 * results from poll() but they turn out not to be interesting to 192 * our caller then we need to loop around and poll() again. 193 */ 194 for (;;) { 195 /* Only need to set the provider_entry_time if timeout != 0. The function 196 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME. 197 */ 198 if (timeout != 0) 199 uv__metrics_set_provider_entry_time(loop); 200 201 if (pset != NULL) 202 if (pthread_sigmask(SIG_BLOCK, pset, NULL)) 203 abort(); 204 nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout); 205 if (pset != NULL) 206 if (pthread_sigmask(SIG_UNBLOCK, pset, NULL)) 207 abort(); 208 209 /* Update loop->time unconditionally. It's tempting to skip the update when 210 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the 211 * operating system didn't reschedule our process while in the syscall. 212 */ 213 SAVE_ERRNO(uv__update_time(loop)); 214 215 if (nfds == 0) { 216 if (reset_timeout != 0) { 217 timeout = user_timeout; 218 reset_timeout = 0; 219 if (timeout == -1) 220 continue; 221 if (timeout > 0) 222 goto update_timeout; 223 } 224 225 assert(timeout != -1); 226 return; 227 } 228 229 if (nfds == -1) { 230 if (errno != EINTR) 231 abort(); 232 233 if (reset_timeout != 0) { 234 timeout = user_timeout; 235 reset_timeout = 0; 236 } 237 238 if (timeout == -1) 239 continue; 240 241 if (timeout == 0) 242 return; 243 244 /* Interrupted by a signal. Update timeout and poll again. */ 245 goto update_timeout; 246 } 247 248 /* Tell uv__platform_invalidate_fd not to manipulate our array 249 * while we are iterating over it. 250 */ 251 loop->poll_fds_iterating = 1; 252 253 /* Initialize a count of events that we care about. */ 254 nevents = 0; 255 have_signals = 0; 256 257 /* Loop over the entire poll fds array looking for returned events. */ 258 for (i = 0; i < loop->poll_fds_used; i++) { 259 pe = loop->poll_fds + i; 260 fd = pe->fd; 261 262 /* Skip invalidated events, see uv__platform_invalidate_fd. */ 263 if (fd == -1) 264 continue; 265 266 assert(fd >= 0); 267 assert((unsigned) fd < loop->nwatchers); 268 269 w = loop->watchers[fd]; 270 271 if (w == NULL) { 272 /* File descriptor that we've stopped watching, ignore. */ 273 uv__platform_invalidate_fd(loop, fd); 274 continue; 275 } 276 277 /* Filter out events that user has not requested us to watch 278 * (e.g. POLLNVAL). 279 */ 280 pe->revents &= w->pevents | POLLERR | POLLHUP; 281 282 if (pe->revents != 0) { 283 /* Run signal watchers last. */ 284 if (w == &loop->signal_io_watcher) { 285 have_signals = 1; 286 } else { 287 uv__metrics_update_idle_time(loop); 288 w->cb(loop, w, pe->revents); 289 } 290 291 nevents++; 292 } 293 } 294 295 if (reset_timeout != 0) { 296 timeout = user_timeout; 297 reset_timeout = 0; 298 } 299 300 if (have_signals != 0) { 301 uv__metrics_update_idle_time(loop); 302 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); 303 } 304 305 loop->poll_fds_iterating = 0; 306 307 /* Purge invalidated fds from our poll fds array. */ 308 uv__pollfds_del(loop, -1); 309 310 if (have_signals != 0) 311 return; /* Event loop should cycle now so don't poll again. */ 312 313 if (nevents != 0) 314 return; 315 316 if (timeout == 0) 317 return; 318 319 if (timeout == -1) 320 continue; 321 322 update_timeout: 323 assert(timeout > 0); 324 325 time_diff = loop->time - time_base; 326 if (time_diff >= (uint64_t) timeout) 327 return; 328 329 timeout -= time_diff; 330 } 331 } 332 333 /* Remove the given fd from our poll fds array because no one 334 * is interested in its events anymore. 335 */ 336 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { 337 size_t i; 338 339 assert(fd >= 0); 340 341 if (loop->poll_fds_iterating) { 342 /* uv__io_poll is currently iterating. Just invalidate fd. */ 343 for (i = 0; i < loop->poll_fds_used; i++) 344 if (loop->poll_fds[i].fd == fd) { 345 loop->poll_fds[i].fd = -1; 346 loop->poll_fds[i].events = 0; 347 loop->poll_fds[i].revents = 0; 348 } 349 } else { 350 /* uv__io_poll is not iterating. Delete fd from the set. */ 351 uv__pollfds_del(loop, fd); 352 } 353 } 354 355 /* Check whether the given fd is supported by poll(). */ 356 int uv__io_check_fd(uv_loop_t* loop, int fd) { 357 struct pollfd p[1]; 358 int rv; 359 360 p[0].fd = fd; 361 p[0].events = POLLIN; 362 363 do 364 rv = poll(p, 1, 0); 365 while (rv == -1 && (errno == EINTR || errno == EAGAIN)); 366 367 if (rv == -1) 368 return UV__ERR(errno); 369 370 if (p[0].revents & POLLNVAL) 371 return UV_EINVAL; 372 373 return 0; 374 } 375