1 /* Copyright libuv project contributors. All rights reserved. 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to 5 * deal in the Software without restriction, including without limitation the 6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 * sell copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 * IN THE SOFTWARE. 20 */ 21 22 #include "internal.h" 23 #include <sys/ioctl.h> 24 #include <net/if.h> 25 #include <utmpx.h> 26 #include <unistd.h> 27 #include <sys/ps.h> 28 #include <builtins.h> 29 #include <termios.h> 30 #include <sys/msg.h> 31 #if defined(__clang__) 32 #include "csrsic.h" 33 #else 34 #include "/'SYS1.SAMPLIB(CSRSIC)'" 35 #endif 36 37 #define CVT_PTR 0x10 38 #define PSA_PTR 0x00 39 #define CSD_OFFSET 0x294 40 41 /* 42 Long-term average CPU service used by this logical partition, 43 in millions of service units per hour. If this value is above 44 the partition's defined capacity, the partition will be capped. 45 It is calculated using the physical CPU adjustment factor 46 (RCTPCPUA) so it may not match other measures of service which 47 are based on the logical CPU adjustment factor. It is available 48 if the hardware supports LPAR cluster. 49 */ 50 #define RCTLACS_OFFSET 0xC4 51 52 /* 32-bit count of alive CPUs. This includes both CPs and IFAs */ 53 #define CSD_NUMBER_ONLINE_CPUS 0xD4 54 55 /* Address of system resources manager (SRM) control table */ 56 #define CVTOPCTP_OFFSET 0x25C 57 58 /* Address of the RCT table */ 59 #define RMCTRCT_OFFSET 0xE4 60 61 /* Address of the rsm control and enumeration area. */ 62 #define CVTRCEP_OFFSET 0x490 63 64 /* 65 Number of frames currently available to system. 66 Excluded are frames backing perm storage, frames offline, and bad frames. 67 */ 68 #define RCEPOOL_OFFSET 0x004 69 70 /* Total number of frames currently on all available frame queues. */ 71 #define RCEAFC_OFFSET 0x088 72 73 /* CPC model length from the CSRSI Service. */ 74 #define CPCMODEL_LENGTH 16 75 76 /* Pointer to the home (current) ASCB. */ 77 #define PSAAOLD 0x224 78 79 /* Pointer to rsm address space block extension. */ 80 #define ASCBRSME 0x16C 81 82 /* 83 NUMBER OF FRAMES CURRENTLY IN USE BY THIS ADDRESS SPACE. 84 It does not include 2G frames. 85 */ 86 #define RAXFMCT 0x2C 87 88 /* Thread Entry constants */ 89 #define PGTH_CURRENT 1 90 #define PGTH_LEN 26 91 #define PGTHAPATH 0x20 92 #pragma linkage(BPX4GTH, OS) 93 #pragma linkage(BPX1GTH, OS) 94 95 /* TOD Clock resolution in nanoseconds */ 96 #define TOD_RES 4.096 97 98 typedef unsigned data_area_ptr_assign_type; 99 100 typedef union { 101 struct { 102 #if defined(_LP64) 103 data_area_ptr_assign_type lower; 104 #endif 105 data_area_ptr_assign_type assign; 106 }; 107 char* deref; 108 } data_area_ptr; 109 110 111 void uv_loadavg(double avg[3]) { 112 /* TODO: implement the following */ 113 avg[0] = 0; 114 avg[1] = 0; 115 avg[2] = 0; 116 } 117 118 119 int uv__platform_loop_init(uv_loop_t* loop) { 120 uv__os390_epoll* ep; 121 122 ep = epoll_create1(0); 123 loop->ep = ep; 124 if (ep == NULL) 125 return UV__ERR(errno); 126 127 return 0; 128 } 129 130 131 void uv__platform_loop_delete(uv_loop_t* loop) { 132 if (loop->ep != NULL) { 133 epoll_queue_close(loop->ep); 134 loop->ep = NULL; 135 } 136 } 137 138 139 uint64_t uv__hrtime(uv_clocktype_t type) { 140 unsigned long long timestamp; 141 __stckf(×tamp); 142 /* Convert to nanoseconds */ 143 return timestamp / TOD_RES; 144 } 145 146 147 /* 148 Get the exe path using the thread entry information 149 in the address space. 150 */ 151 static int getexe(const int pid, char* buf, size_t len) { 152 struct { 153 int pid; 154 int thid[2]; 155 char accesspid; 156 char accessthid; 157 char asid[2]; 158 char loginname[8]; 159 char flag; 160 char len; 161 } Input_data; 162 163 union { 164 struct { 165 char gthb[4]; 166 int pid; 167 int thid[2]; 168 char accesspid; 169 char accessthid[3]; 170 int lenused; 171 int offsetProcess; 172 int offsetConTTY; 173 int offsetPath; 174 int offsetCommand; 175 int offsetFileData; 176 int offsetThread; 177 } Output_data; 178 char buf[2048]; 179 } Output_buf; 180 181 struct Output_path_type { 182 char gthe[4]; 183 short int len; 184 char path[1024]; 185 }; 186 187 int Input_length; 188 int Output_length; 189 void* Input_address; 190 void* Output_address; 191 struct Output_path_type* Output_path; 192 int rv; 193 int rc; 194 int rsn; 195 196 Input_length = PGTH_LEN; 197 Output_length = sizeof(Output_buf); 198 Output_address = &Output_buf; 199 Input_address = &Input_data; 200 memset(&Input_data, 0, sizeof Input_data); 201 Input_data.flag |= PGTHAPATH; 202 Input_data.pid = pid; 203 Input_data.accesspid = PGTH_CURRENT; 204 205 #ifdef _LP64 206 BPX4GTH(&Input_length, 207 &Input_address, 208 &Output_length, 209 &Output_address, 210 &rv, 211 &rc, 212 &rsn); 213 #else 214 BPX1GTH(&Input_length, 215 &Input_address, 216 &Output_length, 217 &Output_address, 218 &rv, 219 &rc, 220 &rsn); 221 #endif 222 223 if (rv == -1) { 224 errno = rc; 225 return -1; 226 } 227 228 /* Check highest byte to ensure data availability */ 229 assert(((Output_buf.Output_data.offsetPath >>24) & 0xFF) == 'A'); 230 231 /* Get the offset from the lowest 3 bytes */ 232 Output_path = (struct Output_path_type*) ((char*) (&Output_buf) + 233 (Output_buf.Output_data.offsetPath & 0x00FFFFFF)); 234 235 if (Output_path->len >= len) { 236 errno = ENOBUFS; 237 return -1; 238 } 239 240 uv__strscpy(buf, Output_path->path, len); 241 242 return 0; 243 } 244 245 246 /* 247 * We could use a static buffer for the path manipulations that we need outside 248 * of the function, but this function could be called by multiple consumers and 249 * we don't want to potentially create a race condition in the use of snprintf. 250 * There is no direct way of getting the exe path in zOS - either through /procfs 251 * or through some libc APIs. The below approach is to parse the argv[0]'s pattern 252 * and use it in conjunction with PATH environment variable to craft one. 253 */ 254 int uv_exepath(char* buffer, size_t* size) { 255 int res; 256 char args[PATH_MAX]; 257 char abspath[PATH_MAX]; 258 size_t abspath_size; 259 int pid; 260 261 if (buffer == NULL || size == NULL || *size == 0) 262 return UV_EINVAL; 263 264 pid = getpid(); 265 res = getexe(pid, args, sizeof(args)); 266 if (res < 0) 267 return UV_EINVAL; 268 269 /* 270 * Possibilities for args: 271 * i) an absolute path such as: /home/user/myprojects/nodejs/node 272 * ii) a relative path such as: ./node or ../myprojects/nodejs/node 273 * iii) a bare filename such as "node", after exporting PATH variable 274 * to its location. 275 */ 276 277 /* Case i) and ii) absolute or relative paths */ 278 if (strchr(args, '/') != NULL) { 279 if (realpath(args, abspath) != abspath) 280 return UV__ERR(errno); 281 282 abspath_size = strlen(abspath); 283 284 *size -= 1; 285 if (*size > abspath_size) 286 *size = abspath_size; 287 288 memcpy(buffer, abspath, *size); 289 buffer[*size] = '\0'; 290 291 return 0; 292 } else { 293 /* Case iii). Search PATH environment variable */ 294 char trypath[PATH_MAX]; 295 char* clonedpath = NULL; 296 char* token = NULL; 297 char* path = getenv("PATH"); 298 299 if (path == NULL) 300 return UV_EINVAL; 301 302 clonedpath = uv__strdup(path); 303 if (clonedpath == NULL) 304 return UV_ENOMEM; 305 306 token = strtok(clonedpath, ":"); 307 while (token != NULL) { 308 snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, args); 309 if (realpath(trypath, abspath) == abspath) { 310 /* Check the match is executable */ 311 if (access(abspath, X_OK) == 0) { 312 abspath_size = strlen(abspath); 313 314 *size -= 1; 315 if (*size > abspath_size) 316 *size = abspath_size; 317 318 memcpy(buffer, abspath, *size); 319 buffer[*size] = '\0'; 320 321 uv__free(clonedpath); 322 return 0; 323 } 324 } 325 token = strtok(NULL, ":"); 326 } 327 uv__free(clonedpath); 328 329 /* Out of tokens (path entries), and no match found */ 330 return UV_EINVAL; 331 } 332 } 333 334 335 uint64_t uv_get_free_memory(void) { 336 uint64_t freeram; 337 338 data_area_ptr cvt = {0}; 339 data_area_ptr rcep = {0}; 340 cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR); 341 rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET); 342 freeram = *((uint64_t*)(rcep.deref + RCEAFC_OFFSET)) * 4; 343 return freeram; 344 } 345 346 347 uint64_t uv_get_total_memory(void) { 348 uint64_t totalram; 349 350 data_area_ptr cvt = {0}; 351 data_area_ptr rcep = {0}; 352 cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR); 353 rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET); 354 totalram = *((uint64_t*)(rcep.deref + RCEPOOL_OFFSET)) * 4; 355 return totalram; 356 } 357 358 359 uint64_t uv_get_constrained_memory(void) { 360 return 0; /* Memory constraints are unknown. */ 361 } 362 363 364 int uv_resident_set_memory(size_t* rss) { 365 char* ascb; 366 char* rax; 367 size_t nframes; 368 369 ascb = *(char* __ptr32 *)(PSA_PTR + PSAAOLD); 370 rax = *(char* __ptr32 *)(ascb + ASCBRSME); 371 nframes = *(unsigned int*)(rax + RAXFMCT); 372 373 *rss = nframes * sysconf(_SC_PAGESIZE); 374 return 0; 375 } 376 377 378 int uv_uptime(double* uptime) { 379 struct utmpx u ; 380 struct utmpx *v; 381 time64_t t; 382 383 u.ut_type = BOOT_TIME; 384 v = getutxid(&u); 385 if (v == NULL) 386 return -1; 387 *uptime = difftime64(time64(&t), v->ut_tv.tv_sec); 388 return 0; 389 } 390 391 392 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { 393 uv_cpu_info_t* cpu_info; 394 int idx; 395 siv1v2 info; 396 data_area_ptr cvt = {0}; 397 data_area_ptr csd = {0}; 398 data_area_ptr rmctrct = {0}; 399 data_area_ptr cvtopctp = {0}; 400 int cpu_usage_avg; 401 402 cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR); 403 404 csd.assign = *((data_area_ptr_assign_type *) (cvt.deref + CSD_OFFSET)); 405 cvtopctp.assign = *((data_area_ptr_assign_type *) (cvt.deref + CVTOPCTP_OFFSET)); 406 rmctrct.assign = *((data_area_ptr_assign_type *) (cvtopctp.deref + RMCTRCT_OFFSET)); 407 408 *count = *((int*) (csd.deref + CSD_NUMBER_ONLINE_CPUS)); 409 cpu_usage_avg = *((unsigned short int*) (rmctrct.deref + RCTLACS_OFFSET)); 410 411 *cpu_infos = uv__malloc(*count * sizeof(uv_cpu_info_t)); 412 if (!*cpu_infos) 413 return UV_ENOMEM; 414 415 cpu_info = *cpu_infos; 416 idx = 0; 417 while (idx < *count) { 418 cpu_info->speed = *(int*)(info.siv1v2si22v1.si22v1cpucapability); 419 cpu_info->model = uv__malloc(CPCMODEL_LENGTH + 1); 420 memset(cpu_info->model, '\0', CPCMODEL_LENGTH + 1); 421 memcpy(cpu_info->model, info.siv1v2si11v1.si11v1cpcmodel, CPCMODEL_LENGTH); 422 cpu_info->cpu_times.user = cpu_usage_avg; 423 /* TODO: implement the following */ 424 cpu_info->cpu_times.sys = 0; 425 cpu_info->cpu_times.idle = 0; 426 cpu_info->cpu_times.irq = 0; 427 cpu_info->cpu_times.nice = 0; 428 ++cpu_info; 429 ++idx; 430 } 431 432 return 0; 433 } 434 435 436 static int uv__interface_addresses_v6(uv_interface_address_t** addresses, 437 int* count) { 438 uv_interface_address_t* address; 439 int sockfd; 440 int maxsize; 441 __net_ifconf6header_t ifc; 442 __net_ifconf6entry_t* ifr; 443 __net_ifconf6entry_t* p; 444 __net_ifconf6entry_t flg; 445 446 *count = 0; 447 /* Assume maximum buffer size allowable */ 448 maxsize = 16384; 449 450 if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) 451 return UV__ERR(errno); 452 453 ifc.__nif6h_version = 1; 454 ifc.__nif6h_buflen = maxsize; 455 ifc.__nif6h_buffer = uv__calloc(1, maxsize);; 456 457 if (ioctl(sockfd, SIOCGIFCONF6, &ifc) == -1) { 458 uv__close(sockfd); 459 return UV__ERR(errno); 460 } 461 462 463 *count = 0; 464 ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer); 465 while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) { 466 p = ifr; 467 ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen); 468 469 if (!(p->__nif6e_addr.sin6_family == AF_INET6 || 470 p->__nif6e_addr.sin6_family == AF_INET)) 471 continue; 472 473 if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE)) 474 continue; 475 476 ++(*count); 477 } 478 479 /* Alloc the return interface structs */ 480 *addresses = uv__malloc(*count * sizeof(uv_interface_address_t)); 481 if (!(*addresses)) { 482 uv__close(sockfd); 483 return UV_ENOMEM; 484 } 485 address = *addresses; 486 487 ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer); 488 while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) { 489 p = ifr; 490 ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen); 491 492 if (!(p->__nif6e_addr.sin6_family == AF_INET6 || 493 p->__nif6e_addr.sin6_family == AF_INET)) 494 continue; 495 496 if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE)) 497 continue; 498 499 /* All conditions above must match count loop */ 500 501 address->name = uv__strdup(p->__nif6e_name); 502 503 if (p->__nif6e_addr.sin6_family == AF_INET6) 504 address->address.address6 = *((struct sockaddr_in6*) &p->__nif6e_addr); 505 else 506 address->address.address4 = *((struct sockaddr_in*) &p->__nif6e_addr); 507 508 /* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */ 509 510 address->is_internal = flg.__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0; 511 memset(address->phys_addr, 0, sizeof(address->phys_addr)); 512 address++; 513 } 514 515 uv__close(sockfd); 516 return 0; 517 } 518 519 520 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { 521 uv_interface_address_t* address; 522 int sockfd; 523 int maxsize; 524 struct ifconf ifc; 525 struct ifreq flg; 526 struct ifreq* ifr; 527 struct ifreq* p; 528 int count_v6; 529 530 *count = 0; 531 *addresses = NULL; 532 533 /* get the ipv6 addresses first */ 534 uv_interface_address_t* addresses_v6; 535 uv__interface_addresses_v6(&addresses_v6, &count_v6); 536 537 /* now get the ipv4 addresses */ 538 539 /* Assume maximum buffer size allowable */ 540 maxsize = 16384; 541 542 sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); 543 if (0 > sockfd) 544 return UV__ERR(errno); 545 546 ifc.ifc_req = uv__calloc(1, maxsize); 547 ifc.ifc_len = maxsize; 548 if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) { 549 uv__close(sockfd); 550 return UV__ERR(errno); 551 } 552 553 #define MAX(a,b) (((a)>(b))?(a):(b)) 554 #define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p)) 555 556 /* Count all up and running ipv4/ipv6 addresses */ 557 ifr = ifc.ifc_req; 558 while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) { 559 p = ifr; 560 ifr = (struct ifreq*) 561 ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr)); 562 563 if (!(p->ifr_addr.sa_family == AF_INET6 || 564 p->ifr_addr.sa_family == AF_INET)) 565 continue; 566 567 memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); 568 if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { 569 uv__close(sockfd); 570 return UV__ERR(errno); 571 } 572 573 if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) 574 continue; 575 576 (*count)++; 577 } 578 579 if (*count == 0) { 580 uv__close(sockfd); 581 return 0; 582 } 583 584 /* Alloc the return interface structs */ 585 *addresses = uv__malloc((*count + count_v6) * 586 sizeof(uv_interface_address_t)); 587 588 if (!(*addresses)) { 589 uv__close(sockfd); 590 return UV_ENOMEM; 591 } 592 address = *addresses; 593 594 /* copy over the ipv6 addresses */ 595 memcpy(address, addresses_v6, count_v6 * sizeof(uv_interface_address_t)); 596 address += count_v6; 597 *count += count_v6; 598 uv__free(addresses_v6); 599 600 ifr = ifc.ifc_req; 601 while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) { 602 p = ifr; 603 ifr = (struct ifreq*) 604 ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr)); 605 606 if (!(p->ifr_addr.sa_family == AF_INET6 || 607 p->ifr_addr.sa_family == AF_INET)) 608 continue; 609 610 memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); 611 if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { 612 uv__close(sockfd); 613 return UV_ENOSYS; 614 } 615 616 if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) 617 continue; 618 619 /* All conditions above must match count loop */ 620 621 address->name = uv__strdup(p->ifr_name); 622 623 if (p->ifr_addr.sa_family == AF_INET6) { 624 address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr); 625 } else { 626 address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr); 627 } 628 629 address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0; 630 memset(address->phys_addr, 0, sizeof(address->phys_addr)); 631 address++; 632 } 633 634 #undef ADDR_SIZE 635 #undef MAX 636 637 uv__close(sockfd); 638 return 0; 639 } 640 641 642 void uv_free_interface_addresses(uv_interface_address_t* addresses, 643 int count) { 644 int i; 645 for (i = 0; i < count; ++i) 646 uv__free(addresses[i].name); 647 uv__free(addresses); 648 } 649 650 651 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { 652 struct epoll_event* events; 653 struct epoll_event dummy; 654 uintptr_t i; 655 uintptr_t nfds; 656 657 assert(loop->watchers != NULL); 658 assert(fd >= 0); 659 660 events = (struct epoll_event*) loop->watchers[loop->nwatchers]; 661 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; 662 if (events != NULL) 663 /* Invalidate events with same file descriptor */ 664 for (i = 0; i < nfds; i++) 665 if ((int) events[i].fd == fd) 666 events[i].fd = -1; 667 668 /* Remove the file descriptor from the epoll. */ 669 if (loop->ep != NULL) 670 epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, &dummy); 671 } 672 673 674 int uv__io_check_fd(uv_loop_t* loop, int fd) { 675 struct pollfd p[1]; 676 int rv; 677 678 p[0].fd = fd; 679 p[0].events = POLLIN; 680 681 do 682 rv = poll(p, 1, 0); 683 while (rv == -1 && errno == EINTR); 684 685 if (rv == -1) 686 abort(); 687 688 if (p[0].revents & POLLNVAL) 689 return -1; 690 691 return 0; 692 } 693 694 695 void uv__fs_event_close(uv_fs_event_t* handle) { 696 uv_fs_event_stop(handle); 697 } 698 699 700 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { 701 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); 702 return 0; 703 } 704 705 706 int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, 707 const char* filename, unsigned int flags) { 708 uv__os390_epoll* ep; 709 _RFIS reg_struct; 710 char* path; 711 int rc; 712 713 if (uv__is_active(handle)) 714 return UV_EINVAL; 715 716 ep = handle->loop->ep; 717 assert(ep->msg_queue != -1); 718 719 reg_struct.__rfis_cmd = _RFIS_REG; 720 reg_struct.__rfis_qid = ep->msg_queue; 721 reg_struct.__rfis_type = 1; 722 memcpy(reg_struct.__rfis_utok, &handle, sizeof(handle)); 723 724 path = uv__strdup(filename); 725 if (path == NULL) 726 return UV_ENOMEM; 727 728 rc = __w_pioctl(path, _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct); 729 if (rc != 0) 730 return UV__ERR(errno); 731 732 uv__handle_start(handle); 733 handle->path = path; 734 handle->cb = cb; 735 memcpy(handle->rfis_rftok, reg_struct.__rfis_rftok, 736 sizeof(handle->rfis_rftok)); 737 738 return 0; 739 } 740 741 742 int uv_fs_event_stop(uv_fs_event_t* handle) { 743 uv__os390_epoll* ep; 744 _RFIS reg_struct; 745 int rc; 746 747 if (!uv__is_active(handle)) 748 return 0; 749 750 ep = handle->loop->ep; 751 assert(ep->msg_queue != -1); 752 753 reg_struct.__rfis_cmd = _RFIS_UNREG; 754 reg_struct.__rfis_qid = ep->msg_queue; 755 reg_struct.__rfis_type = 1; 756 memcpy(reg_struct.__rfis_rftok, handle->rfis_rftok, 757 sizeof(handle->rfis_rftok)); 758 759 /* 760 * This call will take "/" as the path argument in case we 761 * don't care to supply the correct path. The system will simply 762 * ignore it. 763 */ 764 rc = __w_pioctl("/", _IOCC_REGFILEINT, sizeof(reg_struct), ®_struct); 765 if (rc != 0 && errno != EALREADY && errno != ENOENT) 766 abort(); 767 768 uv__handle_stop(handle); 769 770 return 0; 771 } 772 773 774 static int os390_message_queue_handler(uv__os390_epoll* ep) { 775 uv_fs_event_t* handle; 776 int msglen; 777 int events; 778 _RFIM msg; 779 780 if (ep->msg_queue == -1) 781 return 0; 782 783 msglen = msgrcv(ep->msg_queue, &msg, sizeof(msg), 0, IPC_NOWAIT); 784 785 if (msglen == -1 && errno == ENOMSG) 786 return 0; 787 788 if (msglen == -1) 789 abort(); 790 791 events = 0; 792 if (msg.__rfim_event == _RFIM_ATTR || msg.__rfim_event == _RFIM_WRITE) 793 events = UV_CHANGE; 794 else if (msg.__rfim_event == _RFIM_RENAME) 795 events = UV_RENAME; 796 else 797 /* Some event that we are not interested in. */ 798 return 0; 799 800 handle = *(uv_fs_event_t**)(msg.__rfim_utok); 801 handle->cb(handle, uv__basename_r(handle->path), events, 0); 802 return 1; 803 } 804 805 806 void uv__io_poll(uv_loop_t* loop, int timeout) { 807 static const int max_safe_timeout = 1789569; 808 struct epoll_event events[1024]; 809 struct epoll_event* pe; 810 struct epoll_event e; 811 uv__os390_epoll* ep; 812 int real_timeout; 813 QUEUE* q; 814 uv__io_t* w; 815 uint64_t base; 816 int count; 817 int nfds; 818 int fd; 819 int op; 820 int i; 821 822 if (loop->nfds == 0) { 823 assert(QUEUE_EMPTY(&loop->watcher_queue)); 824 return; 825 } 826 827 while (!QUEUE_EMPTY(&loop->watcher_queue)) { 828 uv_stream_t* stream; 829 830 q = QUEUE_HEAD(&loop->watcher_queue); 831 QUEUE_REMOVE(q); 832 QUEUE_INIT(q); 833 w = QUEUE_DATA(q, uv__io_t, watcher_queue); 834 835 assert(w->pevents != 0); 836 assert(w->fd >= 0); 837 838 stream= container_of(w, uv_stream_t, io_watcher); 839 840 assert(w->fd < (int) loop->nwatchers); 841 842 e.events = w->pevents; 843 e.fd = w->fd; 844 845 if (w->events == 0) 846 op = EPOLL_CTL_ADD; 847 else 848 op = EPOLL_CTL_MOD; 849 850 /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching 851 * events, skip the syscall and squelch the events after epoll_wait(). 852 */ 853 if (epoll_ctl(loop->ep, op, w->fd, &e)) { 854 if (errno != EEXIST) 855 abort(); 856 857 assert(op == EPOLL_CTL_ADD); 858 859 /* We've reactivated a file descriptor that's been watched before. */ 860 if (epoll_ctl(loop->ep, EPOLL_CTL_MOD, w->fd, &e)) 861 abort(); 862 } 863 864 w->events = w->pevents; 865 } 866 867 assert(timeout >= -1); 868 base = loop->time; 869 count = 48; /* Benchmarks suggest this gives the best throughput. */ 870 real_timeout = timeout; 871 int nevents = 0; 872 873 nfds = 0; 874 for (;;) { 875 if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) 876 timeout = max_safe_timeout; 877 878 nfds = epoll_wait(loop->ep, events, 879 ARRAY_SIZE(events), timeout); 880 881 /* Update loop->time unconditionally. It's tempting to skip the update when 882 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the 883 * operating system didn't reschedule our process while in the syscall. 884 */ 885 base = loop->time; 886 SAVE_ERRNO(uv__update_time(loop)); 887 if (nfds == 0) { 888 assert(timeout != -1); 889 890 if (timeout > 0) { 891 timeout = real_timeout - timeout; 892 continue; 893 } 894 895 return; 896 } 897 898 if (nfds == -1) { 899 900 if (errno != EINTR) 901 abort(); 902 903 if (timeout == -1) 904 continue; 905 906 if (timeout == 0) 907 return; 908 909 /* Interrupted by a signal. Update timeout and poll again. */ 910 goto update_timeout; 911 } 912 913 914 assert(loop->watchers != NULL); 915 loop->watchers[loop->nwatchers] = (void*) events; 916 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; 917 for (i = 0; i < nfds; i++) { 918 pe = events + i; 919 fd = pe->fd; 920 921 /* Skip invalidated events, see uv__platform_invalidate_fd */ 922 if (fd == -1) 923 continue; 924 925 ep = loop->ep; 926 if (pe->is_msg) { 927 os390_message_queue_handler(ep); 928 continue; 929 } 930 931 assert(fd >= 0); 932 assert((unsigned) fd < loop->nwatchers); 933 934 w = loop->watchers[fd]; 935 936 if (w == NULL) { 937 /* File descriptor that we've stopped watching, disarm it. 938 * 939 * Ignore all errors because we may be racing with another thread 940 * when the file descriptor is closed. 941 */ 942 epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, pe); 943 continue; 944 } 945 946 /* Give users only events they're interested in. Prevents spurious 947 * callbacks when previous callback invocation in this loop has stopped 948 * the current watcher. Also, filters out events that users has not 949 * requested us to watch. 950 */ 951 pe->events &= w->pevents | POLLERR | POLLHUP; 952 953 if (pe->events == POLLERR || pe->events == POLLHUP) 954 pe->events |= w->pevents & (POLLIN | POLLOUT); 955 956 if (pe->events != 0) { 957 w->cb(loop, w, pe->events); 958 nevents++; 959 } 960 } 961 loop->watchers[loop->nwatchers] = NULL; 962 loop->watchers[loop->nwatchers + 1] = NULL; 963 964 if (nevents != 0) { 965 if (nfds == ARRAY_SIZE(events) && --count != 0) { 966 /* Poll for more events but don't block this time. */ 967 timeout = 0; 968 continue; 969 } 970 return; 971 } 972 973 if (timeout == 0) 974 return; 975 976 if (timeout == -1) 977 continue; 978 979 update_timeout: 980 assert(timeout > 0); 981 982 real_timeout -= (loop->time - base); 983 if (real_timeout <= 0) 984 return; 985 986 timeout = real_timeout; 987 } 988 } 989 990 void uv__set_process_title(const char* title) { 991 /* do nothing */ 992 } 993 994 int uv__io_fork(uv_loop_t* loop) { 995 /* 996 Nullify the msg queue but don't close it because 997 it is still being used by the parent. 998 */ 999 loop->ep = NULL; 1000 1001 uv__platform_loop_delete(loop); 1002 return uv__platform_loop_init(loop); 1003 } 1004