1 /* Copyright libuv project contributors. All rights reserved. 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to 5 * deal in the Software without restriction, including without limitation the 6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 * sell copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 * IN THE SOFTWARE. 20 */ 21 22 #include "uv.h" 23 #include "internal.h" 24 25 /* POSIX defines poll() as a portable way to wait on file descriptors. 26 * Here we maintain a dynamically sized array of file descriptors and 27 * events to pass as the first argument to poll(). 28 */ 29 30 #include <assert.h> 31 #include <stddef.h> 32 #include <stdint.h> 33 #include <errno.h> 34 #include <unistd.h> 35 36 int uv__platform_loop_init(uv_loop_t* loop) { 37 loop->poll_fds = NULL; 38 loop->poll_fds_used = 0; 39 loop->poll_fds_size = 0; 40 loop->poll_fds_iterating = 0; 41 return 0; 42 } 43 44 void uv__platform_loop_delete(uv_loop_t* loop) { 45 uv__free(loop->poll_fds); 46 loop->poll_fds = NULL; 47 } 48 49 int uv__io_fork(uv_loop_t* loop) { 50 uv__platform_loop_delete(loop); 51 return uv__platform_loop_init(loop); 52 } 53 54 /* Allocate or dynamically resize our poll fds array. */ 55 static void uv__pollfds_maybe_resize(uv_loop_t* loop) { 56 size_t i; 57 size_t n; 58 struct pollfd* p; 59 60 if (loop->poll_fds_used < loop->poll_fds_size) 61 return; 62 63 n = loop->poll_fds_size ? loop->poll_fds_size * 2 : 64; 64 p = uv__reallocf(loop->poll_fds, n * sizeof(*loop->poll_fds)); 65 if (p == NULL) 66 abort(); 67 68 loop->poll_fds = p; 69 for (i = loop->poll_fds_size; i < n; i++) { 70 loop->poll_fds[i].fd = -1; 71 loop->poll_fds[i].events = 0; 72 loop->poll_fds[i].revents = 0; 73 } 74 loop->poll_fds_size = n; 75 } 76 77 /* Primitive swap operation on poll fds array elements. */ 78 static void uv__pollfds_swap(uv_loop_t* loop, size_t l, size_t r) { 79 struct pollfd pfd; 80 pfd = loop->poll_fds[l]; 81 loop->poll_fds[l] = loop->poll_fds[r]; 82 loop->poll_fds[r] = pfd; 83 } 84 85 /* Add a watcher's fd to our poll fds array with its pending events. */ 86 static void uv__pollfds_add(uv_loop_t* loop, uv__io_t* w) { 87 size_t i; 88 struct pollfd* pe; 89 90 /* If the fd is already in the set just update its events. */ 91 assert(!loop->poll_fds_iterating); 92 for (i = 0; i < loop->poll_fds_used; ++i) { 93 if (loop->poll_fds[i].fd == w->fd) { 94 loop->poll_fds[i].events = w->pevents; 95 return; 96 } 97 } 98 99 /* Otherwise, allocate a new slot in the set for the fd. */ 100 uv__pollfds_maybe_resize(loop); 101 pe = &loop->poll_fds[loop->poll_fds_used++]; 102 pe->fd = w->fd; 103 pe->events = w->pevents; 104 } 105 106 /* Remove a watcher's fd from our poll fds array. */ 107 static void uv__pollfds_del(uv_loop_t* loop, int fd) { 108 size_t i; 109 assert(!loop->poll_fds_iterating); 110 for (i = 0; i < loop->poll_fds_used;) { 111 if (loop->poll_fds[i].fd == fd) { 112 /* swap to last position and remove */ 113 --loop->poll_fds_used; 114 uv__pollfds_swap(loop, i, loop->poll_fds_used); 115 loop->poll_fds[loop->poll_fds_used].fd = -1; 116 loop->poll_fds[loop->poll_fds_used].events = 0; 117 loop->poll_fds[loop->poll_fds_used].revents = 0; 118 /* This method is called with an fd of -1 to purge the invalidated fds, 119 * so we may possibly have multiples to remove. 120 */ 121 if (-1 != fd) 122 return; 123 } else { 124 /* We must only increment the loop counter when the fds do not match. 125 * Otherwise, when we are purging an invalidated fd, the value just 126 * swapped here from the previous end of the array will be skipped. 127 */ 128 ++i; 129 } 130 } 131 } 132 133 134 void uv__io_poll(uv_loop_t* loop, int timeout) { 135 sigset_t* pset; 136 sigset_t set; 137 uint64_t time_base; 138 uint64_t time_diff; 139 QUEUE* q; 140 uv__io_t* w; 141 size_t i; 142 unsigned int nevents; 143 int nfds; 144 int have_signals; 145 struct pollfd* pe; 146 int fd; 147 148 if (loop->nfds == 0) { 149 assert(QUEUE_EMPTY(&loop->watcher_queue)); 150 return; 151 } 152 153 /* Take queued watchers and add their fds to our poll fds array. */ 154 while (!QUEUE_EMPTY(&loop->watcher_queue)) { 155 q = QUEUE_HEAD(&loop->watcher_queue); 156 QUEUE_REMOVE(q); 157 QUEUE_INIT(q); 158 159 w = QUEUE_DATA(q, uv__io_t, watcher_queue); 160 assert(w->pevents != 0); 161 assert(w->fd >= 0); 162 assert(w->fd < (int) loop->nwatchers); 163 164 uv__pollfds_add(loop, w); 165 166 w->events = w->pevents; 167 } 168 169 /* Prepare a set of signals to block around poll(), if any. */ 170 pset = NULL; 171 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { 172 pset = &set; 173 sigemptyset(pset); 174 sigaddset(pset, SIGPROF); 175 } 176 177 assert(timeout >= -1); 178 time_base = loop->time; 179 180 /* Loop calls to poll() and processing of results. If we get some 181 * results from poll() but they turn out not to be interesting to 182 * our caller then we need to loop around and poll() again. 183 */ 184 for (;;) { 185 if (pset != NULL) 186 if (pthread_sigmask(SIG_BLOCK, pset, NULL)) 187 abort(); 188 nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout); 189 if (pset != NULL) 190 if (pthread_sigmask(SIG_UNBLOCK, pset, NULL)) 191 abort(); 192 193 /* Update loop->time unconditionally. It's tempting to skip the update when 194 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the 195 * operating system didn't reschedule our process while in the syscall. 196 */ 197 SAVE_ERRNO(uv__update_time(loop)); 198 199 if (nfds == 0) { 200 assert(timeout != -1); 201 return; 202 } 203 204 if (nfds == -1) { 205 if (errno != EINTR) 206 abort(); 207 208 if (timeout == -1) 209 continue; 210 211 if (timeout == 0) 212 return; 213 214 /* Interrupted by a signal. Update timeout and poll again. */ 215 goto update_timeout; 216 } 217 218 /* Tell uv__platform_invalidate_fd not to manipulate our array 219 * while we are iterating over it. 220 */ 221 loop->poll_fds_iterating = 1; 222 223 /* Initialize a count of events that we care about. */ 224 nevents = 0; 225 have_signals = 0; 226 227 /* Loop over the entire poll fds array looking for returned events. */ 228 for (i = 0; i < loop->poll_fds_used; i++) { 229 pe = loop->poll_fds + i; 230 fd = pe->fd; 231 232 /* Skip invalidated events, see uv__platform_invalidate_fd. */ 233 if (fd == -1) 234 continue; 235 236 assert(fd >= 0); 237 assert((unsigned) fd < loop->nwatchers); 238 239 w = loop->watchers[fd]; 240 241 if (w == NULL) { 242 /* File descriptor that we've stopped watching, ignore. */ 243 uv__platform_invalidate_fd(loop, fd); 244 continue; 245 } 246 247 /* Filter out events that user has not requested us to watch 248 * (e.g. POLLNVAL). 249 */ 250 pe->revents &= w->pevents | POLLERR | POLLHUP; 251 252 if (pe->revents != 0) { 253 /* Run signal watchers last. */ 254 if (w == &loop->signal_io_watcher) { 255 have_signals = 1; 256 } else { 257 w->cb(loop, w, pe->revents); 258 } 259 260 nevents++; 261 } 262 } 263 264 if (have_signals != 0) 265 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); 266 267 loop->poll_fds_iterating = 0; 268 269 /* Purge invalidated fds from our poll fds array. */ 270 uv__pollfds_del(loop, -1); 271 272 if (have_signals != 0) 273 return; /* Event loop should cycle now so don't poll again. */ 274 275 if (nevents != 0) 276 return; 277 278 if (timeout == 0) 279 return; 280 281 if (timeout == -1) 282 continue; 283 284 update_timeout: 285 assert(timeout > 0); 286 287 time_diff = loop->time - time_base; 288 if (time_diff >= (uint64_t) timeout) 289 return; 290 291 timeout -= time_diff; 292 } 293 } 294 295 /* Remove the given fd from our poll fds array because no one 296 * is interested in its events anymore. 297 */ 298 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { 299 size_t i; 300 301 assert(fd >= 0); 302 303 if (loop->poll_fds_iterating) { 304 /* uv__io_poll is currently iterating. Just invalidate fd. */ 305 for (i = 0; i < loop->poll_fds_used; i++) 306 if (loop->poll_fds[i].fd == fd) { 307 loop->poll_fds[i].fd = -1; 308 loop->poll_fds[i].events = 0; 309 loop->poll_fds[i].revents = 0; 310 } 311 } else { 312 /* uv__io_poll is not iterating. Delete fd from the set. */ 313 uv__pollfds_del(loop, fd); 314 } 315 } 316 317 /* Check whether the given fd is supported by poll(). */ 318 int uv__io_check_fd(uv_loop_t* loop, int fd) { 319 struct pollfd p[1]; 320 int rv; 321 322 p[0].fd = fd; 323 p[0].events = POLLIN; 324 325 do 326 rv = poll(p, 1, 0); 327 while (rv == -1 && (errno == EINTR || errno == EAGAIN)); 328 329 if (rv == -1) 330 return UV__ERR(errno); 331 332 if (p[0].revents & POLLNVAL) 333 return UV_EINVAL; 334 335 return 0; 336 } 337