1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. 2 * Permission is hereby granted, free of charge, to any person obtaining a copy 3 * of this software and associated documentation files (the "Software"), to 4 * deal in the Software without restriction, including without limitation the 5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 6 * sell copies of the Software, and to permit persons to whom the Software is 7 * furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 18 * IN THE SOFTWARE. 19 */ 20 21 #include "uv.h" 22 #include "internal.h" 23 24 #include <stdio.h> 25 #include <stdint.h> 26 #include <stdlib.h> 27 #include <string.h> 28 #include <assert.h> 29 #include <errno.h> 30 31 #ifndef SUNOS_NO_IFADDRS 32 # include <ifaddrs.h> 33 #endif 34 #include <net/if.h> 35 #include <net/if_dl.h> 36 #include <net/if_arp.h> 37 #include <sys/sockio.h> 38 39 #include <sys/loadavg.h> 40 #include <sys/time.h> 41 #include <unistd.h> 42 #include <kstat.h> 43 #include <fcntl.h> 44 45 #include <sys/port.h> 46 #include <port.h> 47 48 #define PORT_FIRED 0x69 49 #define PORT_UNUSED 0x0 50 #define PORT_LOADED 0x99 51 #define PORT_DELETED -1 52 53 #if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64) 54 #define PROCFS_FILE_OFFSET_BITS_HACK 1 55 #undef _FILE_OFFSET_BITS 56 #else 57 #define PROCFS_FILE_OFFSET_BITS_HACK 0 58 #endif 59 60 #include <procfs.h> 61 62 #if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1) 63 #define _FILE_OFFSET_BITS 64 64 #endif 65 66 67 int uv__platform_loop_init(uv_loop_t* loop) { 68 int err; 69 int fd; 70 71 loop->fs_fd = -1; 72 loop->backend_fd = -1; 73 74 fd = port_create(); 75 if (fd == -1) 76 return UV__ERR(errno); 77 78 err = uv__cloexec(fd, 1); 79 if (err) { 80 uv__close(fd); 81 return err; 82 } 83 loop->backend_fd = fd; 84 85 return 0; 86 } 87 88 89 void uv__platform_loop_delete(uv_loop_t* loop) { 90 if (loop->fs_fd != -1) { 91 uv__close(loop->fs_fd); 92 loop->fs_fd = -1; 93 } 94 95 if (loop->backend_fd != -1) { 96 uv__close(loop->backend_fd); 97 loop->backend_fd = -1; 98 } 99 } 100 101 102 int uv__io_fork(uv_loop_t* loop) { 103 #if defined(PORT_SOURCE_FILE) 104 if (loop->fs_fd != -1) { 105 /* stop the watcher before we blow away its fileno */ 106 uv__io_stop(loop, &loop->fs_event_watcher, POLLIN); 107 } 108 #endif 109 uv__platform_loop_delete(loop); 110 return uv__platform_loop_init(loop); 111 } 112 113 114 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { 115 struct port_event* events; 116 uintptr_t i; 117 uintptr_t nfds; 118 119 assert(loop->watchers != NULL); 120 assert(fd >= 0); 121 122 events = (struct port_event*) loop->watchers[loop->nwatchers]; 123 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; 124 if (events == NULL) 125 return; 126 127 /* Invalidate events with same file descriptor */ 128 for (i = 0; i < nfds; i++) 129 if ((int) events[i].portev_object == fd) 130 events[i].portev_object = -1; 131 } 132 133 134 int uv__io_check_fd(uv_loop_t* loop, int fd) { 135 if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0)) 136 return UV__ERR(errno); 137 138 if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) { 139 perror("(libuv) port_dissociate()"); 140 abort(); 141 } 142 143 return 0; 144 } 145 146 147 void uv__io_poll(uv_loop_t* loop, int timeout) { 148 struct port_event events[1024]; 149 struct port_event* pe; 150 struct timespec spec; 151 QUEUE* q; 152 uv__io_t* w; 153 sigset_t* pset; 154 sigset_t set; 155 uint64_t base; 156 uint64_t diff; 157 unsigned int nfds; 158 unsigned int i; 159 int saved_errno; 160 int have_signals; 161 int nevents; 162 int count; 163 int err; 164 int fd; 165 166 if (loop->nfds == 0) { 167 assert(QUEUE_EMPTY(&loop->watcher_queue)); 168 return; 169 } 170 171 while (!QUEUE_EMPTY(&loop->watcher_queue)) { 172 q = QUEUE_HEAD(&loop->watcher_queue); 173 QUEUE_REMOVE(q); 174 QUEUE_INIT(q); 175 176 w = QUEUE_DATA(q, uv__io_t, watcher_queue); 177 assert(w->pevents != 0); 178 179 if (port_associate(loop->backend_fd, 180 PORT_SOURCE_FD, 181 w->fd, 182 w->pevents, 183 0)) { 184 perror("(libuv) port_associate()"); 185 abort(); 186 } 187 188 w->events = w->pevents; 189 } 190 191 pset = NULL; 192 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { 193 pset = &set; 194 sigemptyset(pset); 195 sigaddset(pset, SIGPROF); 196 } 197 198 assert(timeout >= -1); 199 base = loop->time; 200 count = 48; /* Benchmarks suggest this gives the best throughput. */ 201 202 for (;;) { 203 if (timeout != -1) { 204 spec.tv_sec = timeout / 1000; 205 spec.tv_nsec = (timeout % 1000) * 1000000; 206 } 207 208 /* Work around a kernel bug where nfds is not updated. */ 209 events[0].portev_source = 0; 210 211 nfds = 1; 212 saved_errno = 0; 213 214 if (pset != NULL) 215 pthread_sigmask(SIG_BLOCK, pset, NULL); 216 217 err = port_getn(loop->backend_fd, 218 events, 219 ARRAY_SIZE(events), 220 &nfds, 221 timeout == -1 ? NULL : &spec); 222 223 if (pset != NULL) 224 pthread_sigmask(SIG_UNBLOCK, pset, NULL); 225 226 if (err) { 227 /* Work around another kernel bug: port_getn() may return events even 228 * on error. 229 */ 230 if (errno == EINTR || errno == ETIME) { 231 saved_errno = errno; 232 } else { 233 perror("(libuv) port_getn()"); 234 abort(); 235 } 236 } 237 238 /* Update loop->time unconditionally. It's tempting to skip the update when 239 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the 240 * operating system didn't reschedule our process while in the syscall. 241 */ 242 SAVE_ERRNO(uv__update_time(loop)); 243 244 if (events[0].portev_source == 0) { 245 if (timeout == 0) 246 return; 247 248 if (timeout == -1) 249 continue; 250 251 goto update_timeout; 252 } 253 254 if (nfds == 0) { 255 assert(timeout != -1); 256 return; 257 } 258 259 have_signals = 0; 260 nevents = 0; 261 262 assert(loop->watchers != NULL); 263 loop->watchers[loop->nwatchers] = (void*) events; 264 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; 265 for (i = 0; i < nfds; i++) { 266 pe = events + i; 267 fd = pe->portev_object; 268 269 /* Skip invalidated events, see uv__platform_invalidate_fd */ 270 if (fd == -1) 271 continue; 272 273 assert(fd >= 0); 274 assert((unsigned) fd < loop->nwatchers); 275 276 w = loop->watchers[fd]; 277 278 /* File descriptor that we've stopped watching, ignore. */ 279 if (w == NULL) 280 continue; 281 282 /* Run signal watchers last. This also affects child process watchers 283 * because those are implemented in terms of signal watchers. 284 */ 285 if (w == &loop->signal_io_watcher) 286 have_signals = 1; 287 else 288 w->cb(loop, w, pe->portev_events); 289 290 nevents++; 291 292 if (w != loop->watchers[fd]) 293 continue; /* Disabled by callback. */ 294 295 /* Events Ports operates in oneshot mode, rearm timer on next run. */ 296 if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) 297 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); 298 } 299 300 if (have_signals != 0) 301 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); 302 303 loop->watchers[loop->nwatchers] = NULL; 304 loop->watchers[loop->nwatchers + 1] = NULL; 305 306 if (have_signals != 0) 307 return; /* Event loop should cycle now so don't poll again. */ 308 309 if (nevents != 0) { 310 if (nfds == ARRAY_SIZE(events) && --count != 0) { 311 /* Poll for more events but don't block this time. */ 312 timeout = 0; 313 continue; 314 } 315 return; 316 } 317 318 if (saved_errno == ETIME) { 319 assert(timeout != -1); 320 return; 321 } 322 323 if (timeout == 0) 324 return; 325 326 if (timeout == -1) 327 continue; 328 329 update_timeout: 330 assert(timeout > 0); 331 332 diff = loop->time - base; 333 if (diff >= (uint64_t) timeout) 334 return; 335 336 timeout -= diff; 337 } 338 } 339 340 341 uint64_t uv__hrtime(uv_clocktype_t type) { 342 return gethrtime(); 343 } 344 345 346 /* 347 * We could use a static buffer for the path manipulations that we need outside 348 * of the function, but this function could be called by multiple consumers and 349 * we don't want to potentially create a race condition in the use of snprintf. 350 */ 351 int uv_exepath(char* buffer, size_t* size) { 352 ssize_t res; 353 char buf[128]; 354 355 if (buffer == NULL || size == NULL || *size == 0) 356 return UV_EINVAL; 357 358 snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid()); 359 360 res = *size - 1; 361 if (res > 0) 362 res = readlink(buf, buffer, res); 363 364 if (res == -1) 365 return UV__ERR(errno); 366 367 buffer[res] = '\0'; 368 *size = res; 369 return 0; 370 } 371 372 373 uint64_t uv_get_free_memory(void) { 374 return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES); 375 } 376 377 378 uint64_t uv_get_total_memory(void) { 379 return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES); 380 } 381 382 383 uint64_t uv_get_constrained_memory(void) { 384 return 0; /* Memory constraints are unknown. */ 385 } 386 387 388 void uv_loadavg(double avg[3]) { 389 (void) getloadavg(avg, 3); 390 } 391 392 393 #if defined(PORT_SOURCE_FILE) 394 395 static int uv__fs_event_rearm(uv_fs_event_t *handle) { 396 if (handle->fd == -1) 397 return UV_EBADF; 398 399 if (port_associate(handle->loop->fs_fd, 400 PORT_SOURCE_FILE, 401 (uintptr_t) &handle->fo, 402 FILE_ATTRIB | FILE_MODIFIED, 403 handle) == -1) { 404 return UV__ERR(errno); 405 } 406 handle->fd = PORT_LOADED; 407 408 return 0; 409 } 410 411 412 static void uv__fs_event_read(uv_loop_t* loop, 413 uv__io_t* w, 414 unsigned int revents) { 415 uv_fs_event_t *handle = NULL; 416 timespec_t timeout; 417 port_event_t pe; 418 int events; 419 int r; 420 421 (void) w; 422 (void) revents; 423 424 do { 425 uint_t n = 1; 426 427 /* 428 * Note that our use of port_getn() here (and not port_get()) is deliberate: 429 * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout 430 * causes port_get() to return success instead of ETIME when there aren't 431 * actually any events (!); by using port_getn() in lieu of port_get(), 432 * we can at least workaround the bug by checking for zero returned events 433 * and treating it as we would ETIME. 434 */ 435 do { 436 memset(&timeout, 0, sizeof timeout); 437 r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout); 438 } 439 while (r == -1 && errno == EINTR); 440 441 if ((r == -1 && errno == ETIME) || n == 0) 442 break; 443 444 handle = (uv_fs_event_t*) pe.portev_user; 445 assert((r == 0) && "unexpected port_get() error"); 446 447 events = 0; 448 if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED)) 449 events |= UV_CHANGE; 450 if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED)) 451 events |= UV_RENAME; 452 assert(events != 0); 453 handle->fd = PORT_FIRED; 454 handle->cb(handle, NULL, events, 0); 455 456 if (handle->fd != PORT_DELETED) { 457 r = uv__fs_event_rearm(handle); 458 if (r != 0) 459 handle->cb(handle, NULL, 0, r); 460 } 461 } 462 while (handle->fd != PORT_DELETED); 463 } 464 465 466 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { 467 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); 468 return 0; 469 } 470 471 472 int uv_fs_event_start(uv_fs_event_t* handle, 473 uv_fs_event_cb cb, 474 const char* path, 475 unsigned int flags) { 476 int portfd; 477 int first_run; 478 int err; 479 480 if (uv__is_active(handle)) 481 return UV_EINVAL; 482 483 first_run = 0; 484 if (handle->loop->fs_fd == -1) { 485 portfd = port_create(); 486 if (portfd == -1) 487 return UV__ERR(errno); 488 handle->loop->fs_fd = portfd; 489 first_run = 1; 490 } 491 492 uv__handle_start(handle); 493 handle->path = uv__strdup(path); 494 handle->fd = PORT_UNUSED; 495 handle->cb = cb; 496 497 memset(&handle->fo, 0, sizeof handle->fo); 498 handle->fo.fo_name = handle->path; 499 err = uv__fs_event_rearm(handle); 500 if (err != 0) { 501 uv_fs_event_stop(handle); 502 return err; 503 } 504 505 if (first_run) { 506 uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd); 507 uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN); 508 } 509 510 return 0; 511 } 512 513 514 int uv_fs_event_stop(uv_fs_event_t* handle) { 515 if (!uv__is_active(handle)) 516 return 0; 517 518 if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) { 519 port_dissociate(handle->loop->fs_fd, 520 PORT_SOURCE_FILE, 521 (uintptr_t) &handle->fo); 522 } 523 524 handle->fd = PORT_DELETED; 525 uv__free(handle->path); 526 handle->path = NULL; 527 handle->fo.fo_name = NULL; 528 uv__handle_stop(handle); 529 530 return 0; 531 } 532 533 void uv__fs_event_close(uv_fs_event_t* handle) { 534 uv_fs_event_stop(handle); 535 } 536 537 #else /* !defined(PORT_SOURCE_FILE) */ 538 539 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { 540 return UV_ENOSYS; 541 } 542 543 544 int uv_fs_event_start(uv_fs_event_t* handle, 545 uv_fs_event_cb cb, 546 const char* filename, 547 unsigned int flags) { 548 return UV_ENOSYS; 549 } 550 551 552 int uv_fs_event_stop(uv_fs_event_t* handle) { 553 return UV_ENOSYS; 554 } 555 556 557 void uv__fs_event_close(uv_fs_event_t* handle) { 558 UNREACHABLE(); 559 } 560 561 #endif /* defined(PORT_SOURCE_FILE) */ 562 563 564 int uv_resident_set_memory(size_t* rss) { 565 psinfo_t psinfo; 566 int err; 567 int fd; 568 569 fd = open("/proc/self/psinfo", O_RDONLY); 570 if (fd == -1) 571 return UV__ERR(errno); 572 573 /* FIXME(bnoordhuis) Handle EINTR. */ 574 err = UV_EINVAL; 575 if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) { 576 *rss = (size_t)psinfo.pr_rssize * 1024; 577 err = 0; 578 } 579 uv__close(fd); 580 581 return err; 582 } 583 584 585 int uv_uptime(double* uptime) { 586 kstat_ctl_t *kc; 587 kstat_t *ksp; 588 kstat_named_t *knp; 589 590 long hz = sysconf(_SC_CLK_TCK); 591 592 kc = kstat_open(); 593 if (kc == NULL) 594 return UV_EPERM; 595 596 ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc"); 597 if (kstat_read(kc, ksp, NULL) == -1) { 598 *uptime = -1; 599 } else { 600 knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr"); 601 *uptime = knp->value.ul / hz; 602 } 603 kstat_close(kc); 604 605 return 0; 606 } 607 608 609 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { 610 int lookup_instance; 611 kstat_ctl_t *kc; 612 kstat_t *ksp; 613 kstat_named_t *knp; 614 uv_cpu_info_t* cpu_info; 615 616 kc = kstat_open(); 617 if (kc == NULL) 618 return UV_EPERM; 619 620 /* Get count of cpus */ 621 lookup_instance = 0; 622 while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) { 623 lookup_instance++; 624 } 625 626 *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos)); 627 if (!(*cpu_infos)) { 628 kstat_close(kc); 629 return UV_ENOMEM; 630 } 631 632 *count = lookup_instance; 633 634 cpu_info = *cpu_infos; 635 lookup_instance = 0; 636 while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) { 637 if (kstat_read(kc, ksp, NULL) == -1) { 638 cpu_info->speed = 0; 639 cpu_info->model = NULL; 640 } else { 641 knp = kstat_data_lookup(ksp, (char*) "clock_MHz"); 642 assert(knp->data_type == KSTAT_DATA_INT32 || 643 knp->data_type == KSTAT_DATA_INT64); 644 cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32 645 : knp->value.i64; 646 647 knp = kstat_data_lookup(ksp, (char*) "brand"); 648 assert(knp->data_type == KSTAT_DATA_STRING); 649 cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp)); 650 } 651 652 lookup_instance++; 653 cpu_info++; 654 } 655 656 cpu_info = *cpu_infos; 657 lookup_instance = 0; 658 for (;;) { 659 ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys"); 660 661 if (ksp == NULL) 662 break; 663 664 if (kstat_read(kc, ksp, NULL) == -1) { 665 cpu_info->cpu_times.user = 0; 666 cpu_info->cpu_times.nice = 0; 667 cpu_info->cpu_times.sys = 0; 668 cpu_info->cpu_times.idle = 0; 669 cpu_info->cpu_times.irq = 0; 670 } else { 671 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user"); 672 assert(knp->data_type == KSTAT_DATA_UINT64); 673 cpu_info->cpu_times.user = knp->value.ui64; 674 675 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel"); 676 assert(knp->data_type == KSTAT_DATA_UINT64); 677 cpu_info->cpu_times.sys = knp->value.ui64; 678 679 knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle"); 680 assert(knp->data_type == KSTAT_DATA_UINT64); 681 cpu_info->cpu_times.idle = knp->value.ui64; 682 683 knp = kstat_data_lookup(ksp, (char*) "intr"); 684 assert(knp->data_type == KSTAT_DATA_UINT64); 685 cpu_info->cpu_times.irq = knp->value.ui64; 686 cpu_info->cpu_times.nice = 0; 687 } 688 689 lookup_instance++; 690 cpu_info++; 691 } 692 693 kstat_close(kc); 694 695 return 0; 696 } 697 698 699 #ifdef SUNOS_NO_IFADDRS 700 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { 701 *count = 0; 702 *addresses = NULL; 703 return UV_ENOSYS; 704 } 705 #else /* SUNOS_NO_IFADDRS */ 706 /* 707 * Inspired By: 708 * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris 709 * http://www.pauliesworld.org/project/getmac.c 710 */ 711 static int uv__set_phys_addr(uv_interface_address_t* address, 712 struct ifaddrs* ent) { 713 714 struct sockaddr_dl* sa_addr; 715 int sockfd; 716 size_t i; 717 struct arpreq arpreq; 718 719 /* This appears to only work as root */ 720 sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); 721 memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); 722 for (i = 0; i < sizeof(address->phys_addr); i++) { 723 /* Check that all bytes of phys_addr are zero. */ 724 if (address->phys_addr[i] != 0) 725 return 0; 726 } 727 memset(&arpreq, 0, sizeof(arpreq)); 728 if (address->address.address4.sin_family == AF_INET) { 729 struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa); 730 sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr; 731 } else if (address->address.address4.sin_family == AF_INET6) { 732 struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa); 733 memcpy(sin->sin6_addr.s6_addr, 734 address->address.address6.sin6_addr.s6_addr, 735 sizeof(address->address.address6.sin6_addr.s6_addr)); 736 } else { 737 return 0; 738 } 739 740 sockfd = socket(AF_INET, SOCK_DGRAM, 0); 741 if (sockfd < 0) 742 return UV__ERR(errno); 743 744 if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) { 745 uv__close(sockfd); 746 return UV__ERR(errno); 747 } 748 memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr)); 749 uv__close(sockfd); 750 return 0; 751 } 752 753 754 static int uv__ifaddr_exclude(struct ifaddrs *ent) { 755 if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) 756 return 1; 757 if (ent->ifa_addr == NULL) 758 return 1; 759 if (ent->ifa_addr->sa_family != AF_INET && 760 ent->ifa_addr->sa_family != AF_INET6) 761 return 1; 762 return 0; 763 } 764 765 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { 766 uv_interface_address_t* address; 767 struct ifaddrs* addrs; 768 struct ifaddrs* ent; 769 770 *count = 0; 771 *addresses = NULL; 772 773 if (getifaddrs(&addrs)) 774 return UV__ERR(errno); 775 776 /* Count the number of interfaces */ 777 for (ent = addrs; ent != NULL; ent = ent->ifa_next) { 778 if (uv__ifaddr_exclude(ent)) 779 continue; 780 (*count)++; 781 } 782 783 if (*count == 0) { 784 freeifaddrs(addrs); 785 return 0; 786 } 787 788 *addresses = uv__malloc(*count * sizeof(**addresses)); 789 if (!(*addresses)) { 790 freeifaddrs(addrs); 791 return UV_ENOMEM; 792 } 793 794 address = *addresses; 795 796 for (ent = addrs; ent != NULL; ent = ent->ifa_next) { 797 if (uv__ifaddr_exclude(ent)) 798 continue; 799 800 address->name = uv__strdup(ent->ifa_name); 801 802 if (ent->ifa_addr->sa_family == AF_INET6) { 803 address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); 804 } else { 805 address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); 806 } 807 808 if (ent->ifa_netmask->sa_family == AF_INET6) { 809 address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); 810 } else { 811 address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); 812 } 813 814 address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) || 815 (ent->ifa_flags & IFF_LOOPBACK)); 816 817 uv__set_phys_addr(address, ent); 818 address++; 819 } 820 821 freeifaddrs(addrs); 822 823 return 0; 824 } 825 #endif /* SUNOS_NO_IFADDRS */ 826 827 void uv_free_interface_addresses(uv_interface_address_t* addresses, 828 int count) { 829 int i; 830 831 for (i = 0; i < count; i++) { 832 uv__free(addresses[i].name); 833 } 834 835 uv__free(addresses); 836 } 837