1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. 2 * Permission is hereby granted, free of charge, to any person obtaining a copy 3 * of this software and associated documentation files (the "Software"), to 4 * deal in the Software without restriction, including without limitation the 5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 6 * sell copies of the Software, and to permit persons to whom the Software is 7 * furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 18 * IN THE SOFTWARE. 19 */ 20 21 #include "uv.h" 22 #include "internal.h" 23 #include "strtok.h" 24 25 #include <stddef.h> /* NULL */ 26 #include <stdio.h> /* printf */ 27 #include <stdlib.h> 28 #include <string.h> /* strerror */ 29 #include <errno.h> 30 #include <assert.h> 31 #include <unistd.h> 32 #include <sys/types.h> 33 #include <sys/stat.h> 34 #include <fcntl.h> /* O_CLOEXEC */ 35 #include <sys/ioctl.h> 36 #include <sys/socket.h> 37 #include <sys/un.h> 38 #include <netinet/in.h> 39 #include <arpa/inet.h> 40 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */ 41 #include <sys/uio.h> /* writev */ 42 #include <sys/resource.h> /* getrusage */ 43 #include <pwd.h> 44 #include <sys/utsname.h> 45 #include <sys/time.h> 46 47 #ifdef __sun 48 # include <sys/filio.h> 49 # include <sys/types.h> 50 # include <sys/wait.h> 51 #endif 52 53 #if defined(__APPLE__) 54 # include <sys/filio.h> 55 # endif /* defined(__APPLE__) */ 56 57 58 #if defined(__APPLE__) && !TARGET_OS_IPHONE 59 # include <crt_externs.h> 60 # include <mach-o/dyld.h> /* _NSGetExecutablePath */ 61 # define environ (*_NSGetEnviron()) 62 #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */ 63 extern char** environ; 64 #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */ 65 66 67 #if defined(__DragonFly__) || \ 68 defined(__FreeBSD__) || \ 69 defined(__FreeBSD_kernel__) || \ 70 defined(__NetBSD__) || \ 71 defined(__OpenBSD__) 72 # include <sys/sysctl.h> 73 # include <sys/filio.h> 74 # include <sys/wait.h> 75 # if defined(__FreeBSD__) 76 # define uv__accept4 accept4 77 # endif 78 # if defined(__NetBSD__) 79 # define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d)) 80 # endif 81 #endif 82 83 #if defined(__MVS__) 84 # include <sys/ioctl.h> 85 # include "zos-sys-info.h" 86 #endif 87 88 #if defined(__linux__) 89 # include <sched.h> 90 # include <sys/syscall.h> 91 # define uv__accept4 accept4 92 #endif 93 94 #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__) 95 # include <sanitizer/linux_syscall_hooks.h> 96 #endif 97 98 static void uv__run_pending(uv_loop_t* loop); 99 100 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */ 101 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec)); 102 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) == 103 sizeof(((struct iovec*) 0)->iov_base)); 104 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) == 105 sizeof(((struct iovec*) 0)->iov_len)); 106 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base)); 107 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len)); 108 109 110 uint64_t uv_hrtime(void) { 111 return uv__hrtime(UV_CLOCK_PRECISE); 112 } 113 114 115 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) { 116 assert(!uv__is_closing(handle)); 117 118 handle->flags |= UV_HANDLE_CLOSING; 119 handle->close_cb = close_cb; 120 121 switch (handle->type) { 122 case UV_NAMED_PIPE: 123 uv__pipe_close((uv_pipe_t*)handle); 124 break; 125 126 case UV_TTY: 127 uv__stream_close((uv_stream_t*)handle); 128 break; 129 130 case UV_TCP: 131 uv__tcp_close((uv_tcp_t*)handle); 132 break; 133 134 case UV_UDP: 135 uv__udp_close((uv_udp_t*)handle); 136 break; 137 138 case UV_PREPARE: 139 uv__prepare_close((uv_prepare_t*)handle); 140 break; 141 142 case UV_CHECK: 143 uv__check_close((uv_check_t*)handle); 144 break; 145 146 case UV_IDLE: 147 uv__idle_close((uv_idle_t*)handle); 148 break; 149 150 case UV_ASYNC: 151 uv__async_close((uv_async_t*)handle); 152 break; 153 154 case UV_TIMER: 155 uv__timer_close((uv_timer_t*)handle); 156 break; 157 158 case UV_PROCESS: 159 uv__process_close((uv_process_t*)handle); 160 break; 161 162 case UV_FS_EVENT: 163 uv__fs_event_close((uv_fs_event_t*)handle); 164 #if defined(__sun) || defined(__MVS__) 165 /* 166 * On Solaris, illumos, and z/OS we will not be able to dissociate the 167 * watcher for an event which is pending delivery, so we cannot always call 168 * uv__make_close_pending() straight away. The backend will call the 169 * function once the event has cleared. 170 */ 171 return; 172 #endif 173 break; 174 175 case UV_POLL: 176 uv__poll_close((uv_poll_t*)handle); 177 break; 178 179 case UV_FS_POLL: 180 uv__fs_poll_close((uv_fs_poll_t*)handle); 181 /* Poll handles use file system requests, and one of them may still be 182 * running. The poll code will call uv__make_close_pending() for us. */ 183 return; 184 185 case UV_SIGNAL: 186 uv__signal_close((uv_signal_t*) handle); 187 break; 188 189 default: 190 assert(0); 191 } 192 193 uv__make_close_pending(handle); 194 } 195 196 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) { 197 int r; 198 int fd; 199 socklen_t len; 200 201 if (handle == NULL || value == NULL) 202 return UV_EINVAL; 203 204 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE) 205 fd = uv__stream_fd((uv_stream_t*) handle); 206 else if (handle->type == UV_UDP) 207 fd = ((uv_udp_t *) handle)->io_watcher.fd; 208 else 209 return UV_ENOTSUP; 210 211 len = sizeof(*value); 212 213 if (*value == 0) 214 r = getsockopt(fd, SOL_SOCKET, optname, value, &len); 215 else 216 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len); 217 218 if (r < 0) 219 return UV__ERR(errno); 220 221 return 0; 222 } 223 224 void uv__make_close_pending(uv_handle_t* handle) { 225 assert(handle->flags & UV_HANDLE_CLOSING); 226 assert(!(handle->flags & UV_HANDLE_CLOSED)); 227 handle->next_closing = handle->loop->closing_handles; 228 handle->loop->closing_handles = handle; 229 } 230 231 int uv__getiovmax(void) { 232 #if defined(IOV_MAX) 233 return IOV_MAX; 234 #elif defined(_SC_IOV_MAX) 235 static int iovmax_cached = -1; 236 int iovmax; 237 238 iovmax = uv__load_relaxed(&iovmax_cached); 239 if (iovmax != -1) 240 return iovmax; 241 242 /* On some embedded devices (arm-linux-uclibc based ip camera), 243 * sysconf(_SC_IOV_MAX) can not get the correct value. The return 244 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1. 245 */ 246 iovmax = sysconf(_SC_IOV_MAX); 247 if (iovmax == -1) 248 iovmax = 1; 249 250 uv__store_relaxed(&iovmax_cached, iovmax); 251 252 return iovmax; 253 #else 254 return 1024; 255 #endif 256 } 257 258 259 static void uv__finish_close(uv_handle_t* handle) { 260 uv_signal_t* sh; 261 262 /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still 263 * possible for it to be active in the sense that uv__is_active() returns 264 * true. 265 * 266 * A good example is when the user calls uv_shutdown(), immediately followed 267 * by uv_close(). The handle is considered active at this point because the 268 * completion of the shutdown req is still pending. 269 */ 270 assert(handle->flags & UV_HANDLE_CLOSING); 271 assert(!(handle->flags & UV_HANDLE_CLOSED)); 272 handle->flags |= UV_HANDLE_CLOSED; 273 274 switch (handle->type) { 275 case UV_PREPARE: 276 case UV_CHECK: 277 case UV_IDLE: 278 case UV_ASYNC: 279 case UV_TIMER: 280 case UV_PROCESS: 281 case UV_FS_EVENT: 282 case UV_FS_POLL: 283 case UV_POLL: 284 break; 285 286 case UV_SIGNAL: 287 /* If there are any caught signals "trapped" in the signal pipe, 288 * we can't call the close callback yet. Reinserting the handle 289 * into the closing queue makes the event loop spin but that's 290 * okay because we only need to deliver the pending events. 291 */ 292 sh = (uv_signal_t*) handle; 293 if (sh->caught_signals > sh->dispatched_signals) { 294 handle->flags ^= UV_HANDLE_CLOSED; 295 uv__make_close_pending(handle); /* Back into the queue. */ 296 return; 297 } 298 break; 299 300 case UV_NAMED_PIPE: 301 case UV_TCP: 302 case UV_TTY: 303 uv__stream_destroy((uv_stream_t*)handle); 304 break; 305 306 case UV_UDP: 307 uv__udp_finish_close((uv_udp_t*)handle); 308 break; 309 310 default: 311 assert(0); 312 break; 313 } 314 315 uv__handle_unref(handle); 316 QUEUE_REMOVE(&handle->handle_queue); 317 318 if (handle->close_cb) { 319 handle->close_cb(handle); 320 } 321 } 322 323 324 static void uv__run_closing_handles(uv_loop_t* loop) { 325 uv_handle_t* p; 326 uv_handle_t* q; 327 328 p = loop->closing_handles; 329 loop->closing_handles = NULL; 330 331 while (p) { 332 q = p->next_closing; 333 uv__finish_close(p); 334 p = q; 335 } 336 } 337 338 339 int uv_is_closing(const uv_handle_t* handle) { 340 return uv__is_closing(handle); 341 } 342 343 344 int uv_backend_fd(const uv_loop_t* loop) { 345 return loop->backend_fd; 346 } 347 348 349 static int uv__loop_alive(const uv_loop_t* loop) { 350 return uv__has_active_handles(loop) || 351 uv__has_active_reqs(loop) || 352 !QUEUE_EMPTY(&loop->pending_queue) || 353 loop->closing_handles != NULL; 354 } 355 356 357 static int uv__backend_timeout(const uv_loop_t* loop) { 358 if (loop->stop_flag == 0 && 359 /* uv__loop_alive(loop) && */ 360 (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) && 361 QUEUE_EMPTY(&loop->pending_queue) && 362 QUEUE_EMPTY(&loop->idle_handles) && 363 loop->closing_handles == NULL) 364 return uv__next_timeout(loop); 365 return 0; 366 } 367 368 369 int uv_backend_timeout(const uv_loop_t* loop) { 370 if (QUEUE_EMPTY(&loop->watcher_queue)) 371 return uv__backend_timeout(loop); 372 /* Need to call uv_run to update the backend fd state. */ 373 return 0; 374 } 375 376 377 int uv_loop_alive(const uv_loop_t* loop) { 378 return uv__loop_alive(loop); 379 } 380 381 382 int uv_run(uv_loop_t* loop, uv_run_mode mode) { 383 int timeout; 384 int r; 385 int can_sleep; 386 387 r = uv__loop_alive(loop); 388 if (!r) 389 uv__update_time(loop); 390 391 while (r != 0 && loop->stop_flag == 0) { 392 uv__update_time(loop); 393 uv__run_timers(loop); 394 395 can_sleep = 396 QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles); 397 398 uv__run_pending(loop); 399 uv__run_idle(loop); 400 uv__run_prepare(loop); 401 402 timeout = 0; 403 if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT) 404 timeout = uv__backend_timeout(loop); 405 406 uv__io_poll(loop, timeout); 407 408 /* Process immediate callbacks (e.g. write_cb) a small fixed number of 409 * times to avoid loop starvation.*/ 410 for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++) 411 uv__run_pending(loop); 412 413 /* Run one final update on the provider_idle_time in case uv__io_poll 414 * returned because the timeout expired, but no events were received. This 415 * call will be ignored if the provider_entry_time was either never set (if 416 * the timeout == 0) or was already updated b/c an event was received. 417 */ 418 uv__metrics_update_idle_time(loop); 419 420 uv__run_check(loop); 421 uv__run_closing_handles(loop); 422 423 if (mode == UV_RUN_ONCE) { 424 /* UV_RUN_ONCE implies forward progress: at least one callback must have 425 * been invoked when it returns. uv__io_poll() can return without doing 426 * I/O (meaning: no callbacks) when its timeout expires - which means we 427 * have pending timers that satisfy the forward progress constraint. 428 * 429 * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from 430 * the check. 431 */ 432 uv__update_time(loop); 433 uv__run_timers(loop); 434 } 435 436 r = uv__loop_alive(loop); 437 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) 438 break; 439 } 440 441 /* The if statement lets gcc compile it to a conditional store. Avoids 442 * dirtying a cache line. 443 */ 444 if (loop->stop_flag != 0) 445 loop->stop_flag = 0; 446 447 return r; 448 } 449 450 451 void uv_update_time(uv_loop_t* loop) { 452 uv__update_time(loop); 453 } 454 455 456 int uv_is_active(const uv_handle_t* handle) { 457 return uv__is_active(handle); 458 } 459 460 461 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */ 462 int uv__socket(int domain, int type, int protocol) { 463 int sockfd; 464 int err; 465 466 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) 467 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol); 468 if (sockfd != -1) 469 return sockfd; 470 471 if (errno != EINVAL) 472 return UV__ERR(errno); 473 #endif 474 475 sockfd = socket(domain, type, protocol); 476 if (sockfd == -1) 477 return UV__ERR(errno); 478 479 err = uv__nonblock(sockfd, 1); 480 if (err == 0) 481 err = uv__cloexec(sockfd, 1); 482 483 if (err) { 484 uv__close(sockfd); 485 return err; 486 } 487 488 #if defined(SO_NOSIGPIPE) 489 { 490 int on = 1; 491 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)); 492 } 493 #endif 494 495 return sockfd; 496 } 497 498 /* get a file pointer to a file in read-only and close-on-exec mode */ 499 FILE* uv__open_file(const char* path) { 500 int fd; 501 FILE* fp; 502 503 fd = uv__open_cloexec(path, O_RDONLY); 504 if (fd < 0) 505 return NULL; 506 507 fp = fdopen(fd, "r"); 508 if (fp == NULL) 509 uv__close(fd); 510 511 return fp; 512 } 513 514 515 int uv__accept(int sockfd) { 516 int peerfd; 517 int err; 518 519 (void) &err; 520 assert(sockfd >= 0); 521 522 do 523 #ifdef uv__accept4 524 peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC); 525 #else 526 peerfd = accept(sockfd, NULL, NULL); 527 #endif 528 while (peerfd == -1 && errno == EINTR); 529 530 if (peerfd == -1) 531 return UV__ERR(errno); 532 533 #ifndef uv__accept4 534 err = uv__cloexec(peerfd, 1); 535 if (err == 0) 536 err = uv__nonblock(peerfd, 1); 537 538 if (err != 0) { 539 uv__close(peerfd); 540 return err; 541 } 542 #endif 543 544 return peerfd; 545 } 546 547 548 /* close() on macos has the "interesting" quirk that it fails with EINTR 549 * without closing the file descriptor when a thread is in the cancel state. 550 * That's why libuv calls close$NOCANCEL() instead. 551 * 552 * glibc on linux has a similar issue: close() is a cancellation point and 553 * will unwind the thread when it's in the cancel state. Work around that 554 * by making the system call directly. Musl libc is unaffected. 555 */ 556 int uv__close_nocancel(int fd) { 557 #if defined(__APPLE__) 558 #pragma GCC diagnostic push 559 #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension" 560 #if defined(__LP64__) || TARGET_OS_IPHONE 561 extern int close$NOCANCEL(int); 562 return close$NOCANCEL(fd); 563 #else 564 extern int close$NOCANCEL$UNIX2003(int); 565 return close$NOCANCEL$UNIX2003(fd); 566 #endif 567 #pragma GCC diagnostic pop 568 #elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__) 569 long rc; 570 __sanitizer_syscall_pre_close(fd); 571 rc = syscall(SYS_close, fd); 572 __sanitizer_syscall_post_close(rc, fd); 573 return rc; 574 #elif defined(__linux__) && !defined(__SANITIZE_THREAD__) 575 return syscall(SYS_close, fd); 576 #else 577 return close(fd); 578 #endif 579 } 580 581 582 int uv__close_nocheckstdio(int fd) { 583 int saved_errno; 584 int rc; 585 586 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */ 587 588 saved_errno = errno; 589 rc = uv__close_nocancel(fd); 590 if (rc == -1) { 591 rc = UV__ERR(errno); 592 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS)) 593 rc = 0; /* The close is in progress, not an error. */ 594 errno = saved_errno; 595 } 596 597 return rc; 598 } 599 600 601 int uv__close(int fd) { 602 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */ 603 #if defined(__MVS__) 604 SAVE_ERRNO(epoll_file_close(fd)); 605 #endif 606 return uv__close_nocheckstdio(fd); 607 } 608 609 #if UV__NONBLOCK_IS_IOCTL 610 int uv__nonblock_ioctl(int fd, int set) { 611 int r; 612 613 do 614 r = ioctl(fd, FIONBIO, &set); 615 while (r == -1 && errno == EINTR); 616 617 if (r) 618 return UV__ERR(errno); 619 620 return 0; 621 } 622 #endif 623 624 625 int uv__nonblock_fcntl(int fd, int set) { 626 int flags; 627 int r; 628 629 do 630 r = fcntl(fd, F_GETFL); 631 while (r == -1 && errno == EINTR); 632 633 if (r == -1) 634 return UV__ERR(errno); 635 636 /* Bail out now if already set/clear. */ 637 if (!!(r & O_NONBLOCK) == !!set) 638 return 0; 639 640 if (set) 641 flags = r | O_NONBLOCK; 642 else 643 flags = r & ~O_NONBLOCK; 644 645 do 646 r = fcntl(fd, F_SETFL, flags); 647 while (r == -1 && errno == EINTR); 648 649 if (r) 650 return UV__ERR(errno); 651 652 return 0; 653 } 654 655 656 int uv__cloexec(int fd, int set) { 657 int flags; 658 int r; 659 660 flags = 0; 661 if (set) 662 flags = FD_CLOEXEC; 663 664 do 665 r = fcntl(fd, F_SETFD, flags); 666 while (r == -1 && errno == EINTR); 667 668 if (r) 669 return UV__ERR(errno); 670 671 return 0; 672 } 673 674 675 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) { 676 #if defined(__ANDROID__) || \ 677 defined(__DragonFly__) || \ 678 defined(__FreeBSD__) || \ 679 defined(__NetBSD__) || \ 680 defined(__OpenBSD__) || \ 681 defined(__linux__) 682 ssize_t rc; 683 rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC); 684 if (rc == -1) 685 return UV__ERR(errno); 686 return rc; 687 #else 688 struct cmsghdr* cmsg; 689 int* pfd; 690 int* end; 691 ssize_t rc; 692 rc = recvmsg(fd, msg, flags); 693 if (rc == -1) 694 return UV__ERR(errno); 695 if (msg->msg_controllen == 0) 696 return rc; 697 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) 698 if (cmsg->cmsg_type == SCM_RIGHTS) 699 for (pfd = (int*) CMSG_DATA(cmsg), 700 end = (int*) ((char*) cmsg + cmsg->cmsg_len); 701 pfd < end; 702 pfd += 1) 703 uv__cloexec(*pfd, 1); 704 return rc; 705 #endif 706 } 707 708 709 int uv_cwd(char* buffer, size_t* size) { 710 char scratch[1 + UV__PATH_MAX]; 711 712 if (buffer == NULL || size == NULL) 713 return UV_EINVAL; 714 715 /* Try to read directly into the user's buffer first... */ 716 if (getcwd(buffer, *size) != NULL) 717 goto fixup; 718 719 if (errno != ERANGE) 720 return UV__ERR(errno); 721 722 /* ...or into scratch space if the user's buffer is too small 723 * so we can report how much space to provide on the next try. 724 */ 725 if (getcwd(scratch, sizeof(scratch)) == NULL) 726 return UV__ERR(errno); 727 728 buffer = scratch; 729 730 fixup: 731 732 *size = strlen(buffer); 733 734 if (*size > 1 && buffer[*size - 1] == '/') { 735 *size -= 1; 736 buffer[*size] = '\0'; 737 } 738 739 if (buffer == scratch) { 740 *size += 1; 741 return UV_ENOBUFS; 742 } 743 744 return 0; 745 } 746 747 748 int uv_chdir(const char* dir) { 749 if (chdir(dir)) 750 return UV__ERR(errno); 751 752 return 0; 753 } 754 755 756 void uv_disable_stdio_inheritance(void) { 757 int fd; 758 759 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the 760 * first 16 file descriptors. After that, bail out after the first error. 761 */ 762 for (fd = 0; ; fd++) 763 if (uv__cloexec(fd, 1) && fd > 15) 764 break; 765 } 766 767 768 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { 769 int fd_out; 770 771 switch (handle->type) { 772 case UV_TCP: 773 case UV_NAMED_PIPE: 774 case UV_TTY: 775 fd_out = uv__stream_fd((uv_stream_t*) handle); 776 break; 777 778 case UV_UDP: 779 fd_out = ((uv_udp_t *) handle)->io_watcher.fd; 780 break; 781 782 case UV_POLL: 783 fd_out = ((uv_poll_t *) handle)->io_watcher.fd; 784 break; 785 786 default: 787 return UV_EINVAL; 788 } 789 790 if (uv__is_closing(handle) || fd_out == -1) 791 return UV_EBADF; 792 793 *fd = fd_out; 794 return 0; 795 } 796 797 798 static void uv__run_pending(uv_loop_t* loop) { 799 QUEUE* q; 800 QUEUE pq; 801 uv__io_t* w; 802 803 QUEUE_MOVE(&loop->pending_queue, &pq); 804 805 while (!QUEUE_EMPTY(&pq)) { 806 q = QUEUE_HEAD(&pq); 807 QUEUE_REMOVE(q); 808 QUEUE_INIT(q); 809 w = QUEUE_DATA(q, uv__io_t, pending_queue); 810 w->cb(loop, w, POLLOUT); 811 } 812 } 813 814 815 static unsigned int next_power_of_two(unsigned int val) { 816 val -= 1; 817 val |= val >> 1; 818 val |= val >> 2; 819 val |= val >> 4; 820 val |= val >> 8; 821 val |= val >> 16; 822 val += 1; 823 return val; 824 } 825 826 static void maybe_resize(uv_loop_t* loop, unsigned int len) { 827 uv__io_t** watchers; 828 void* fake_watcher_list; 829 void* fake_watcher_count; 830 unsigned int nwatchers; 831 unsigned int i; 832 833 if (len <= loop->nwatchers) 834 return; 835 836 /* Preserve fake watcher list and count at the end of the watchers */ 837 if (loop->watchers != NULL) { 838 fake_watcher_list = loop->watchers[loop->nwatchers]; 839 fake_watcher_count = loop->watchers[loop->nwatchers + 1]; 840 } else { 841 fake_watcher_list = NULL; 842 fake_watcher_count = NULL; 843 } 844 845 nwatchers = next_power_of_two(len + 2) - 2; 846 watchers = uv__reallocf(loop->watchers, 847 (nwatchers + 2) * sizeof(loop->watchers[0])); 848 849 if (watchers == NULL) 850 abort(); 851 for (i = loop->nwatchers; i < nwatchers; i++) 852 watchers[i] = NULL; 853 watchers[nwatchers] = fake_watcher_list; 854 watchers[nwatchers + 1] = fake_watcher_count; 855 856 loop->watchers = watchers; 857 loop->nwatchers = nwatchers; 858 } 859 860 861 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) { 862 assert(cb != NULL); 863 assert(fd >= -1); 864 QUEUE_INIT(&w->pending_queue); 865 QUEUE_INIT(&w->watcher_queue); 866 w->cb = cb; 867 w->fd = fd; 868 w->events = 0; 869 w->pevents = 0; 870 871 #if defined(UV_HAVE_KQUEUE) 872 w->rcount = 0; 873 w->wcount = 0; 874 #endif /* defined(UV_HAVE_KQUEUE) */ 875 } 876 877 878 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) { 879 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI))); 880 assert(0 != events); 881 assert(w->fd >= 0); 882 assert(w->fd < INT_MAX); 883 884 w->pevents |= events; 885 maybe_resize(loop, w->fd + 1); 886 887 #if !defined(__sun) 888 /* The event ports backend needs to rearm all file descriptors on each and 889 * every tick of the event loop but the other backends allow us to 890 * short-circuit here if the event mask is unchanged. 891 */ 892 if (w->events == w->pevents) 893 return; 894 #endif 895 896 if (QUEUE_EMPTY(&w->watcher_queue)) 897 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); 898 899 if (loop->watchers[w->fd] == NULL) { 900 loop->watchers[w->fd] = w; 901 loop->nfds++; 902 } 903 } 904 905 906 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { 907 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI))); 908 assert(0 != events); 909 910 if (w->fd == -1) 911 return; 912 913 assert(w->fd >= 0); 914 915 /* Happens when uv__io_stop() is called on a handle that was never started. */ 916 if ((unsigned) w->fd >= loop->nwatchers) 917 return; 918 919 w->pevents &= ~events; 920 921 if (w->pevents == 0) { 922 QUEUE_REMOVE(&w->watcher_queue); 923 QUEUE_INIT(&w->watcher_queue); 924 w->events = 0; 925 926 if (w == loop->watchers[w->fd]) { 927 assert(loop->nfds > 0); 928 loop->watchers[w->fd] = NULL; 929 loop->nfds--; 930 } 931 } 932 else if (QUEUE_EMPTY(&w->watcher_queue)) 933 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); 934 } 935 936 937 void uv__io_close(uv_loop_t* loop, uv__io_t* w) { 938 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI); 939 QUEUE_REMOVE(&w->pending_queue); 940 941 /* Remove stale events for this file descriptor */ 942 if (w->fd != -1) 943 uv__platform_invalidate_fd(loop, w->fd); 944 } 945 946 947 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) { 948 if (QUEUE_EMPTY(&w->pending_queue)) 949 QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue); 950 } 951 952 953 int uv__io_active(const uv__io_t* w, unsigned int events) { 954 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI))); 955 assert(0 != events); 956 return 0 != (w->pevents & events); 957 } 958 959 960 int uv__fd_exists(uv_loop_t* loop, int fd) { 961 return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL; 962 } 963 964 965 int uv_getrusage(uv_rusage_t* rusage) { 966 struct rusage usage; 967 968 if (getrusage(RUSAGE_SELF, &usage)) 969 return UV__ERR(errno); 970 971 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec; 972 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec; 973 974 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec; 975 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec; 976 977 #if !defined(__MVS__) && !defined(__HAIKU__) 978 rusage->ru_maxrss = usage.ru_maxrss; 979 rusage->ru_ixrss = usage.ru_ixrss; 980 rusage->ru_idrss = usage.ru_idrss; 981 rusage->ru_isrss = usage.ru_isrss; 982 rusage->ru_minflt = usage.ru_minflt; 983 rusage->ru_majflt = usage.ru_majflt; 984 rusage->ru_nswap = usage.ru_nswap; 985 rusage->ru_inblock = usage.ru_inblock; 986 rusage->ru_oublock = usage.ru_oublock; 987 rusage->ru_msgsnd = usage.ru_msgsnd; 988 rusage->ru_msgrcv = usage.ru_msgrcv; 989 rusage->ru_nsignals = usage.ru_nsignals; 990 rusage->ru_nvcsw = usage.ru_nvcsw; 991 rusage->ru_nivcsw = usage.ru_nivcsw; 992 #endif 993 994 return 0; 995 } 996 997 998 int uv__open_cloexec(const char* path, int flags) { 999 #if defined(O_CLOEXEC) 1000 int fd; 1001 1002 fd = open(path, flags | O_CLOEXEC); 1003 if (fd == -1) 1004 return UV__ERR(errno); 1005 1006 return fd; 1007 #else /* O_CLOEXEC */ 1008 int err; 1009 int fd; 1010 1011 fd = open(path, flags); 1012 if (fd == -1) 1013 return UV__ERR(errno); 1014 1015 err = uv__cloexec(fd, 1); 1016 if (err) { 1017 uv__close(fd); 1018 return err; 1019 } 1020 1021 return fd; 1022 #endif /* O_CLOEXEC */ 1023 } 1024 1025 1026 int uv__slurp(const char* filename, char* buf, size_t len) { 1027 ssize_t n; 1028 int fd; 1029 1030 assert(len > 0); 1031 1032 fd = uv__open_cloexec(filename, O_RDONLY); 1033 if (fd < 0) 1034 return fd; 1035 1036 do 1037 n = read(fd, buf, len - 1); 1038 while (n == -1 && errno == EINTR); 1039 1040 if (uv__close_nocheckstdio(fd)) 1041 abort(); 1042 1043 if (n < 0) 1044 return UV__ERR(errno); 1045 1046 buf[n] = '\0'; 1047 1048 return 0; 1049 } 1050 1051 1052 int uv__dup2_cloexec(int oldfd, int newfd) { 1053 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__) 1054 int r; 1055 1056 r = dup3(oldfd, newfd, O_CLOEXEC); 1057 if (r == -1) 1058 return UV__ERR(errno); 1059 1060 return r; 1061 #else 1062 int err; 1063 int r; 1064 1065 r = dup2(oldfd, newfd); /* Never retry. */ 1066 if (r == -1) 1067 return UV__ERR(errno); 1068 1069 err = uv__cloexec(newfd, 1); 1070 if (err != 0) { 1071 uv__close(newfd); 1072 return err; 1073 } 1074 1075 return r; 1076 #endif 1077 } 1078 1079 1080 int uv_os_homedir(char* buffer, size_t* size) { 1081 uv_passwd_t pwd; 1082 size_t len; 1083 int r; 1084 1085 /* Check if the HOME environment variable is set first. The task of 1086 performing input validation on buffer and size is taken care of by 1087 uv_os_getenv(). */ 1088 r = uv_os_getenv("HOME", buffer, size); 1089 1090 if (r != UV_ENOENT) 1091 return r; 1092 1093 /* HOME is not set, so call uv__getpwuid_r() */ 1094 r = uv__getpwuid_r(&pwd); 1095 1096 if (r != 0) { 1097 return r; 1098 } 1099 1100 len = strlen(pwd.homedir); 1101 1102 if (len >= *size) { 1103 *size = len + 1; 1104 uv_os_free_passwd(&pwd); 1105 return UV_ENOBUFS; 1106 } 1107 1108 memcpy(buffer, pwd.homedir, len + 1); 1109 *size = len; 1110 uv_os_free_passwd(&pwd); 1111 1112 return 0; 1113 } 1114 1115 1116 int uv_os_tmpdir(char* buffer, size_t* size) { 1117 const char* buf; 1118 size_t len; 1119 1120 if (buffer == NULL || size == NULL || *size == 0) 1121 return UV_EINVAL; 1122 1123 #define CHECK_ENV_VAR(name) \ 1124 do { \ 1125 buf = getenv(name); \ 1126 if (buf != NULL) \ 1127 goto return_buffer; \ 1128 } \ 1129 while (0) 1130 1131 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */ 1132 CHECK_ENV_VAR("TMPDIR"); 1133 CHECK_ENV_VAR("TMP"); 1134 CHECK_ENV_VAR("TEMP"); 1135 CHECK_ENV_VAR("TEMPDIR"); 1136 1137 #undef CHECK_ENV_VAR 1138 1139 /* No temp environment variables defined */ 1140 #if defined(__ANDROID__) 1141 buf = "/data/local/tmp"; 1142 #else 1143 buf = "/tmp"; 1144 #endif 1145 1146 return_buffer: 1147 len = strlen(buf); 1148 1149 if (len >= *size) { 1150 *size = len + 1; 1151 return UV_ENOBUFS; 1152 } 1153 1154 /* The returned directory should not have a trailing slash. */ 1155 if (len > 1 && buf[len - 1] == '/') { 1156 len--; 1157 } 1158 1159 memcpy(buffer, buf, len + 1); 1160 buffer[len] = '\0'; 1161 *size = len; 1162 1163 return 0; 1164 } 1165 1166 1167 int uv__getpwuid_r(uv_passwd_t* pwd) { 1168 struct passwd pw; 1169 struct passwd* result; 1170 char* buf; 1171 uid_t uid; 1172 size_t bufsize; 1173 size_t name_size; 1174 size_t homedir_size; 1175 size_t shell_size; 1176 int r; 1177 1178 if (pwd == NULL) 1179 return UV_EINVAL; 1180 1181 uid = geteuid(); 1182 1183 /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it 1184 * is frequently 1024 or 4096, so we can just use that directly. The pwent 1185 * will not usually be large. */ 1186 for (bufsize = 2000;; bufsize *= 2) { 1187 buf = uv__malloc(bufsize); 1188 1189 if (buf == NULL) 1190 return UV_ENOMEM; 1191 1192 do 1193 r = getpwuid_r(uid, &pw, buf, bufsize, &result); 1194 while (r == EINTR); 1195 1196 if (r != 0 || result == NULL) 1197 uv__free(buf); 1198 1199 if (r != ERANGE) 1200 break; 1201 } 1202 1203 if (r != 0) 1204 return UV__ERR(r); 1205 1206 if (result == NULL) 1207 return UV_ENOENT; 1208 1209 /* Allocate memory for the username, shell, and home directory */ 1210 name_size = strlen(pw.pw_name) + 1; 1211 homedir_size = strlen(pw.pw_dir) + 1; 1212 shell_size = strlen(pw.pw_shell) + 1; 1213 pwd->username = uv__malloc(name_size + homedir_size + shell_size); 1214 1215 if (pwd->username == NULL) { 1216 uv__free(buf); 1217 return UV_ENOMEM; 1218 } 1219 1220 /* Copy the username */ 1221 memcpy(pwd->username, pw.pw_name, name_size); 1222 1223 /* Copy the home directory */ 1224 pwd->homedir = pwd->username + name_size; 1225 memcpy(pwd->homedir, pw.pw_dir, homedir_size); 1226 1227 /* Copy the shell */ 1228 pwd->shell = pwd->homedir + homedir_size; 1229 memcpy(pwd->shell, pw.pw_shell, shell_size); 1230 1231 /* Copy the uid and gid */ 1232 pwd->uid = pw.pw_uid; 1233 pwd->gid = pw.pw_gid; 1234 1235 uv__free(buf); 1236 1237 return 0; 1238 } 1239 1240 1241 void uv_os_free_passwd(uv_passwd_t* pwd) { 1242 if (pwd == NULL) 1243 return; 1244 1245 /* 1246 The memory for name, shell, and homedir are allocated in a single 1247 uv__malloc() call. The base of the pointer is stored in pwd->username, so 1248 that is the field that needs to be freed. 1249 */ 1250 uv__free(pwd->username); 1251 pwd->username = NULL; 1252 pwd->shell = NULL; 1253 pwd->homedir = NULL; 1254 } 1255 1256 1257 int uv_os_get_passwd(uv_passwd_t* pwd) { 1258 return uv__getpwuid_r(pwd); 1259 } 1260 1261 1262 int uv_translate_sys_error(int sys_errno) { 1263 /* If < 0 then it's already a libuv error. */ 1264 return sys_errno <= 0 ? sys_errno : -sys_errno; 1265 } 1266 1267 1268 int uv_os_environ(uv_env_item_t** envitems, int* count) { 1269 int i, j, cnt; 1270 uv_env_item_t* envitem; 1271 1272 *envitems = NULL; 1273 *count = 0; 1274 1275 for (i = 0; environ[i] != NULL; i++); 1276 1277 *envitems = uv__calloc(i, sizeof(**envitems)); 1278 1279 if (*envitems == NULL) 1280 return UV_ENOMEM; 1281 1282 for (j = 0, cnt = 0; j < i; j++) { 1283 char* buf; 1284 char* ptr; 1285 1286 if (environ[j] == NULL) 1287 break; 1288 1289 buf = uv__strdup(environ[j]); 1290 if (buf == NULL) 1291 goto fail; 1292 1293 ptr = strchr(buf, '='); 1294 if (ptr == NULL) { 1295 uv__free(buf); 1296 continue; 1297 } 1298 1299 *ptr = '\0'; 1300 1301 envitem = &(*envitems)[cnt]; 1302 envitem->name = buf; 1303 envitem->value = ptr + 1; 1304 1305 cnt++; 1306 } 1307 1308 *count = cnt; 1309 return 0; 1310 1311 fail: 1312 for (i = 0; i < cnt; i++) { 1313 envitem = &(*envitems)[cnt]; 1314 uv__free(envitem->name); 1315 } 1316 uv__free(*envitems); 1317 1318 *envitems = NULL; 1319 *count = 0; 1320 return UV_ENOMEM; 1321 } 1322 1323 1324 int uv_os_getenv(const char* name, char* buffer, size_t* size) { 1325 char* var; 1326 size_t len; 1327 1328 if (name == NULL || buffer == NULL || size == NULL || *size == 0) 1329 return UV_EINVAL; 1330 1331 var = getenv(name); 1332 1333 if (var == NULL) 1334 return UV_ENOENT; 1335 1336 len = strlen(var); 1337 1338 if (len >= *size) { 1339 *size = len + 1; 1340 return UV_ENOBUFS; 1341 } 1342 1343 memcpy(buffer, var, len + 1); 1344 *size = len; 1345 1346 return 0; 1347 } 1348 1349 1350 int uv_os_setenv(const char* name, const char* value) { 1351 if (name == NULL || value == NULL) 1352 return UV_EINVAL; 1353 1354 if (setenv(name, value, 1) != 0) 1355 return UV__ERR(errno); 1356 1357 return 0; 1358 } 1359 1360 1361 int uv_os_unsetenv(const char* name) { 1362 if (name == NULL) 1363 return UV_EINVAL; 1364 1365 if (unsetenv(name) != 0) 1366 return UV__ERR(errno); 1367 1368 return 0; 1369 } 1370 1371 1372 int uv_os_gethostname(char* buffer, size_t* size) { 1373 /* 1374 On some platforms, if the input buffer is not large enough, gethostname() 1375 succeeds, but truncates the result. libuv can detect this and return ENOBUFS 1376 instead by creating a large enough buffer and comparing the hostname length 1377 to the size input. 1378 */ 1379 char buf[UV_MAXHOSTNAMESIZE]; 1380 size_t len; 1381 1382 if (buffer == NULL || size == NULL || *size == 0) 1383 return UV_EINVAL; 1384 1385 if (gethostname(buf, sizeof(buf)) != 0) 1386 return UV__ERR(errno); 1387 1388 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */ 1389 len = strlen(buf); 1390 1391 if (len >= *size) { 1392 *size = len + 1; 1393 return UV_ENOBUFS; 1394 } 1395 1396 memcpy(buffer, buf, len + 1); 1397 *size = len; 1398 return 0; 1399 } 1400 1401 1402 uv_os_fd_t uv_get_osfhandle(int fd) { 1403 return fd; 1404 } 1405 1406 int uv_open_osfhandle(uv_os_fd_t os_fd) { 1407 return os_fd; 1408 } 1409 1410 uv_pid_t uv_os_getpid(void) { 1411 return getpid(); 1412 } 1413 1414 1415 uv_pid_t uv_os_getppid(void) { 1416 return getppid(); 1417 } 1418 1419 1420 int uv_os_getpriority(uv_pid_t pid, int* priority) { 1421 int r; 1422 1423 if (priority == NULL) 1424 return UV_EINVAL; 1425 1426 errno = 0; 1427 r = getpriority(PRIO_PROCESS, (int) pid); 1428 1429 if (r == -1 && errno != 0) 1430 return UV__ERR(errno); 1431 1432 *priority = r; 1433 return 0; 1434 } 1435 1436 1437 int uv_os_setpriority(uv_pid_t pid, int priority) { 1438 if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW) 1439 return UV_EINVAL; 1440 1441 if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0) 1442 return UV__ERR(errno); 1443 1444 return 0; 1445 } 1446 1447 1448 int uv_os_uname(uv_utsname_t* buffer) { 1449 struct utsname buf; 1450 int r; 1451 1452 if (buffer == NULL) 1453 return UV_EINVAL; 1454 1455 if (uname(&buf) == -1) { 1456 r = UV__ERR(errno); 1457 goto error; 1458 } 1459 1460 r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname)); 1461 if (r == UV_E2BIG) 1462 goto error; 1463 1464 #ifdef _AIX 1465 r = snprintf(buffer->release, 1466 sizeof(buffer->release), 1467 "%s.%s", 1468 buf.version, 1469 buf.release); 1470 if (r >= sizeof(buffer->release)) { 1471 r = UV_E2BIG; 1472 goto error; 1473 } 1474 #else 1475 r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release)); 1476 if (r == UV_E2BIG) 1477 goto error; 1478 #endif 1479 1480 r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version)); 1481 if (r == UV_E2BIG) 1482 goto error; 1483 1484 #if defined(_AIX) || defined(__PASE__) 1485 r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine)); 1486 #else 1487 r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine)); 1488 #endif 1489 1490 if (r == UV_E2BIG) 1491 goto error; 1492 1493 return 0; 1494 1495 error: 1496 buffer->sysname[0] = '\0'; 1497 buffer->release[0] = '\0'; 1498 buffer->version[0] = '\0'; 1499 buffer->machine[0] = '\0'; 1500 return r; 1501 } 1502 1503 int uv__getsockpeername(const uv_handle_t* handle, 1504 uv__peersockfunc func, 1505 struct sockaddr* name, 1506 int* namelen) { 1507 socklen_t socklen; 1508 uv_os_fd_t fd; 1509 int r; 1510 1511 r = uv_fileno(handle, &fd); 1512 if (r < 0) 1513 return r; 1514 1515 /* sizeof(socklen_t) != sizeof(int) on some systems. */ 1516 socklen = (socklen_t) *namelen; 1517 1518 if (func(fd, name, &socklen)) 1519 return UV__ERR(errno); 1520 1521 *namelen = (int) socklen; 1522 return 0; 1523 } 1524 1525 int uv_gettimeofday(uv_timeval64_t* tv) { 1526 struct timeval time; 1527 1528 if (tv == NULL) 1529 return UV_EINVAL; 1530 1531 if (gettimeofday(&time, NULL) != 0) 1532 return UV__ERR(errno); 1533 1534 tv->tv_sec = (int64_t) time.tv_sec; 1535 tv->tv_usec = (int32_t) time.tv_usec; 1536 return 0; 1537 } 1538 1539 void uv_sleep(unsigned int msec) { 1540 struct timespec timeout; 1541 int rc; 1542 1543 timeout.tv_sec = msec / 1000; 1544 timeout.tv_nsec = (msec % 1000) * 1000 * 1000; 1545 1546 do 1547 rc = nanosleep(&timeout, &timeout); 1548 while (rc == -1 && errno == EINTR); 1549 1550 assert(rc == 0); 1551 } 1552 1553 int uv__search_path(const char* prog, char* buf, size_t* buflen) { 1554 char abspath[UV__PATH_MAX]; 1555 size_t abspath_size; 1556 char trypath[UV__PATH_MAX]; 1557 char* cloned_path; 1558 char* path_env; 1559 char* token; 1560 char* itr; 1561 1562 if (buf == NULL || buflen == NULL || *buflen == 0) 1563 return UV_EINVAL; 1564 1565 /* 1566 * Possibilities for prog: 1567 * i) an absolute path such as: /home/user/myprojects/nodejs/node 1568 * ii) a relative path such as: ./node or ../myprojects/nodejs/node 1569 * iii) a bare filename such as "node", after exporting PATH variable 1570 * to its location. 1571 */ 1572 1573 /* Case i) and ii) absolute or relative paths */ 1574 if (strchr(prog, '/') != NULL) { 1575 if (realpath(prog, abspath) != abspath) 1576 return UV__ERR(errno); 1577 1578 abspath_size = strlen(abspath); 1579 1580 *buflen -= 1; 1581 if (*buflen > abspath_size) 1582 *buflen = abspath_size; 1583 1584 memcpy(buf, abspath, *buflen); 1585 buf[*buflen] = '\0'; 1586 1587 return 0; 1588 } 1589 1590 /* Case iii). Search PATH environment variable */ 1591 cloned_path = NULL; 1592 token = NULL; 1593 path_env = getenv("PATH"); 1594 1595 if (path_env == NULL) 1596 return UV_EINVAL; 1597 1598 cloned_path = uv__strdup(path_env); 1599 if (cloned_path == NULL) 1600 return UV_ENOMEM; 1601 1602 token = uv__strtok(cloned_path, ":", &itr); 1603 while (token != NULL) { 1604 snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog); 1605 if (realpath(trypath, abspath) == abspath) { 1606 /* Check the match is executable */ 1607 if (access(abspath, X_OK) == 0) { 1608 abspath_size = strlen(abspath); 1609 1610 *buflen -= 1; 1611 if (*buflen > abspath_size) 1612 *buflen = abspath_size; 1613 1614 memcpy(buf, abspath, *buflen); 1615 buf[*buflen] = '\0'; 1616 1617 uv__free(cloned_path); 1618 return 0; 1619 } 1620 } 1621 token = uv__strtok(NULL, ":", &itr); 1622 } 1623 uv__free(cloned_path); 1624 1625 /* Out of tokens (path entries), and no match found */ 1626 return UV_EINVAL; 1627 } 1628 1629 1630 unsigned int uv_available_parallelism(void) { 1631 #ifdef __linux__ 1632 cpu_set_t set; 1633 long rc; 1634 1635 memset(&set, 0, sizeof(set)); 1636 1637 /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in 1638 * glibc it's... complicated... so for consistency try sched_getaffinity() 1639 * before falling back to sysconf(_SC_NPROCESSORS_ONLN). 1640 */ 1641 if (0 == sched_getaffinity(0, sizeof(set), &set)) 1642 rc = CPU_COUNT(&set); 1643 else 1644 rc = sysconf(_SC_NPROCESSORS_ONLN); 1645 1646 if (rc < 1) 1647 rc = 1; 1648 1649 return (unsigned) rc; 1650 #elif defined(__MVS__) 1651 int rc; 1652 1653 rc = __get_num_online_cpus(); 1654 if (rc < 1) 1655 rc = 1; 1656 1657 return (unsigned) rc; 1658 #else /* __linux__ */ 1659 long rc; 1660 1661 rc = sysconf(_SC_NPROCESSORS_ONLN); 1662 if (rc < 1) 1663 rc = 1; 1664 1665 return (unsigned) rc; 1666 #endif /* __linux__ */ 1667 } 1668