1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to 5 * deal in the Software without restriction, including without limitation the 6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 * sell copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 * IN THE SOFTWARE. 20 */ 21 22 /* Caveat emptor: this file deviates from the libuv convention of returning 23 * negated errno codes. Most uv_fs_*() functions map directly to the system 24 * call of the same name. For more complex wrappers, it's easier to just 25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of 26 * getting the errno to the right place (req->result or as the return value.) 27 */ 28 29 #include "uv.h" 30 #include "internal.h" 31 32 #include <errno.h> 33 #include <dlfcn.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <limits.h> /* PATH_MAX */ 38 39 #include <sys/types.h> 40 #include <sys/socket.h> 41 #include <sys/stat.h> 42 #include <sys/time.h> 43 #include <sys/uio.h> 44 #include <pthread.h> 45 #include <unistd.h> 46 #include <fcntl.h> 47 #include <poll.h> 48 49 #if defined(__DragonFly__) || \ 50 defined(__FreeBSD__) || \ 51 defined(__FreeBSD_kernel__) || \ 52 defined(__OpenBSD__) || \ 53 defined(__NetBSD__) 54 # define HAVE_PREADV 1 55 #else 56 # define HAVE_PREADV 0 57 #endif 58 59 #if defined(__linux__) || defined(__sun) 60 # include <sys/sendfile.h> 61 #endif 62 63 #if defined(__APPLE__) 64 # include <sys/sysctl.h> 65 #elif defined(__linux__) && !defined(FICLONE) 66 # include <sys/ioctl.h> 67 # define FICLONE _IOW(0x94, 9, int) 68 #endif 69 70 #if defined(_AIX) && !defined(_AIX71) 71 # include <utime.h> 72 #endif 73 74 #if defined(__APPLE__) || \ 75 defined(__DragonFly__) || \ 76 defined(__FreeBSD__) || \ 77 defined(__FreeBSD_kernel__) || \ 78 defined(__OpenBSD__) || \ 79 defined(__NetBSD__) 80 # include <sys/param.h> 81 # include <sys/mount.h> 82 #elif defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__) 83 # include <sys/statvfs.h> 84 #else 85 # include <sys/statfs.h> 86 #endif 87 88 #if defined(_AIX) && _XOPEN_SOURCE <= 600 89 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */ 90 #endif 91 92 #define INIT(subtype) \ 93 do { \ 94 if (req == NULL) \ 95 return UV_EINVAL; \ 96 UV_REQ_INIT(req, UV_FS); \ 97 req->fs_type = UV_FS_ ## subtype; \ 98 req->result = 0; \ 99 req->ptr = NULL; \ 100 req->loop = loop; \ 101 req->path = NULL; \ 102 req->new_path = NULL; \ 103 req->bufs = NULL; \ 104 req->cb = cb; \ 105 } \ 106 while (0) 107 108 #define PATH \ 109 do { \ 110 assert(path != NULL); \ 111 if (cb == NULL) { \ 112 req->path = path; \ 113 } else { \ 114 req->path = uv__strdup(path); \ 115 if (req->path == NULL) \ 116 return UV_ENOMEM; \ 117 } \ 118 } \ 119 while (0) 120 121 #define PATH2 \ 122 do { \ 123 if (cb == NULL) { \ 124 req->path = path; \ 125 req->new_path = new_path; \ 126 } else { \ 127 size_t path_len; \ 128 size_t new_path_len; \ 129 path_len = strlen(path) + 1; \ 130 new_path_len = strlen(new_path) + 1; \ 131 req->path = uv__malloc(path_len + new_path_len); \ 132 if (req->path == NULL) \ 133 return UV_ENOMEM; \ 134 req->new_path = req->path + path_len; \ 135 memcpy((void*) req->path, path, path_len); \ 136 memcpy((void*) req->new_path, new_path, new_path_len); \ 137 } \ 138 } \ 139 while (0) 140 141 #define POST \ 142 do { \ 143 if (cb != NULL) { \ 144 uv__req_register(loop, req); \ 145 uv__work_submit(loop, \ 146 &req->work_req, \ 147 UV__WORK_FAST_IO, \ 148 uv__fs_work, \ 149 uv__fs_done); \ 150 return 0; \ 151 } \ 152 else { \ 153 uv__fs_work(&req->work_req); \ 154 return req->result; \ 155 } \ 156 } \ 157 while (0) 158 159 160 static int uv__fs_close(int fd) { 161 int rc; 162 163 rc = uv__close_nocancel(fd); 164 if (rc == -1) 165 if (errno == EINTR || errno == EINPROGRESS) 166 rc = 0; /* The close is in progress, not an error. */ 167 168 return rc; 169 } 170 171 172 static ssize_t uv__fs_fsync(uv_fs_t* req) { 173 #if defined(__APPLE__) 174 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache 175 * to the drive platters. This is in contrast to Linux's fdatasync and fsync 176 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent 177 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not 178 * supported by the file system we fall back to F_BARRIERFSYNC or fsync(). 179 * This is the same approach taken by sqlite, except sqlite does not issue 180 * an F_BARRIERFSYNC call. 181 */ 182 int r; 183 184 r = fcntl(req->file, F_FULLFSYNC); 185 if (r != 0) 186 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */ 187 if (r != 0) 188 r = fsync(req->file); 189 return r; 190 #else 191 return fsync(req->file); 192 #endif 193 } 194 195 196 static ssize_t uv__fs_fdatasync(uv_fs_t* req) { 197 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__) 198 return fdatasync(req->file); 199 #elif defined(__APPLE__) 200 /* See the comment in uv__fs_fsync. */ 201 return uv__fs_fsync(req); 202 #else 203 return fsync(req->file); 204 #endif 205 } 206 207 208 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) { 209 struct timespec ts; 210 ts.tv_sec = time; 211 ts.tv_nsec = (uint64_t)(time * 1000000) % 1000000 * 1000; 212 return ts; 213 } 214 215 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) { 216 struct timeval tv; 217 tv.tv_sec = time; 218 tv.tv_usec = (uint64_t)(time * 1000000) % 1000000; 219 return tv; 220 } 221 222 static ssize_t uv__fs_futime(uv_fs_t* req) { 223 #if defined(__linux__) \ 224 || defined(_AIX71) \ 225 || defined(__HAIKU__) 226 /* utimesat() has nanosecond resolution but we stick to microseconds 227 * for the sake of consistency with other platforms. 228 */ 229 struct timespec ts[2]; 230 ts[0] = uv__fs_to_timespec(req->atime); 231 ts[1] = uv__fs_to_timespec(req->mtime); 232 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21 233 return utimensat(req->file, NULL, ts, 0); 234 #else 235 return futimens(req->file, ts); 236 #endif 237 #elif defined(__APPLE__) \ 238 || defined(__DragonFly__) \ 239 || defined(__FreeBSD__) \ 240 || defined(__FreeBSD_kernel__) \ 241 || defined(__NetBSD__) \ 242 || defined(__OpenBSD__) \ 243 || defined(__sun) 244 struct timeval tv[2]; 245 tv[0] = uv__fs_to_timeval(req->atime); 246 tv[1] = uv__fs_to_timeval(req->mtime); 247 # if defined(__sun) 248 return futimesat(req->file, NULL, tv); 249 # else 250 return futimes(req->file, tv); 251 # endif 252 #elif defined(__MVS__) 253 attrib_t atr; 254 memset(&atr, 0, sizeof(atr)); 255 atr.att_mtimechg = 1; 256 atr.att_atimechg = 1; 257 atr.att_mtime = req->mtime; 258 atr.att_atime = req->atime; 259 return __fchattr(req->file, &atr, sizeof(atr)); 260 #else 261 errno = ENOSYS; 262 return -1; 263 #endif 264 } 265 266 267 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { 268 return mkdtemp((char*) req->path) ? 0 : -1; 269 } 270 271 272 static int (*uv__mkostemp)(char*, int); 273 274 275 static void uv__mkostemp_initonce(void) { 276 /* z/os doesn't have RTLD_DEFAULT but that's okay 277 * because it doesn't have mkostemp(O_CLOEXEC) either. 278 */ 279 #ifdef RTLD_DEFAULT 280 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp"); 281 282 /* We don't care about errors, but we do want to clean them up. 283 * If there has been no error, then dlerror() will just return 284 * NULL. 285 */ 286 dlerror(); 287 #endif /* RTLD_DEFAULT */ 288 } 289 290 291 static int uv__fs_mkstemp(uv_fs_t* req) { 292 static uv_once_t once = UV_ONCE_INIT; 293 int r; 294 #ifdef O_CLOEXEC 295 static int no_cloexec_support; 296 #endif 297 static const char pattern[] = "XXXXXX"; 298 static const size_t pattern_size = sizeof(pattern) - 1; 299 char* path; 300 size_t path_length; 301 302 path = (char*) req->path; 303 path_length = strlen(path); 304 305 /* EINVAL can be returned for 2 reasons: 306 1. The template's last 6 characters were not XXXXXX 307 2. open() didn't support O_CLOEXEC 308 We want to avoid going to the fallback path in case 309 of 1, so it's manually checked before. */ 310 if (path_length < pattern_size || 311 strcmp(path + path_length - pattern_size, pattern)) { 312 errno = EINVAL; 313 return -1; 314 } 315 316 uv_once(&once, uv__mkostemp_initonce); 317 318 #ifdef O_CLOEXEC 319 if (no_cloexec_support == 0 && uv__mkostemp != NULL) { 320 r = uv__mkostemp(path, O_CLOEXEC); 321 322 if (r >= 0) 323 return r; 324 325 /* If mkostemp() returns EINVAL, it means the kernel doesn't 326 support O_CLOEXEC, so we just fallback to mkstemp() below. */ 327 if (errno != EINVAL) 328 return r; 329 330 /* We set the static variable so that next calls don't even 331 try to use mkostemp. */ 332 no_cloexec_support = 1; 333 } 334 #endif /* O_CLOEXEC */ 335 336 if (req->cb != NULL) 337 uv_rwlock_rdlock(&req->loop->cloexec_lock); 338 339 r = mkstemp(path); 340 341 /* In case of failure `uv__cloexec` will leave error in `errno`, 342 * so it is enough to just set `r` to `-1`. 343 */ 344 if (r >= 0 && uv__cloexec(r, 1) != 0) { 345 r = uv__close(r); 346 if (r != 0) 347 abort(); 348 r = -1; 349 } 350 351 if (req->cb != NULL) 352 uv_rwlock_rdunlock(&req->loop->cloexec_lock); 353 354 return r; 355 } 356 357 358 static ssize_t uv__fs_open(uv_fs_t* req) { 359 #ifdef O_CLOEXEC 360 return open(req->path, req->flags | O_CLOEXEC, req->mode); 361 #else /* O_CLOEXEC */ 362 int r; 363 364 if (req->cb != NULL) 365 uv_rwlock_rdlock(&req->loop->cloexec_lock); 366 367 r = open(req->path, req->flags, req->mode); 368 369 /* In case of failure `uv__cloexec` will leave error in `errno`, 370 * so it is enough to just set `r` to `-1`. 371 */ 372 if (r >= 0 && uv__cloexec(r, 1) != 0) { 373 r = uv__close(r); 374 if (r != 0) 375 abort(); 376 r = -1; 377 } 378 379 if (req->cb != NULL) 380 uv_rwlock_rdunlock(&req->loop->cloexec_lock); 381 382 return r; 383 #endif /* O_CLOEXEC */ 384 } 385 386 387 #if !HAVE_PREADV 388 static ssize_t uv__fs_preadv(uv_file fd, 389 uv_buf_t* bufs, 390 unsigned int nbufs, 391 off_t off) { 392 uv_buf_t* buf; 393 uv_buf_t* end; 394 ssize_t result; 395 ssize_t rc; 396 size_t pos; 397 398 assert(nbufs > 0); 399 400 result = 0; 401 pos = 0; 402 buf = bufs + 0; 403 end = bufs + nbufs; 404 405 for (;;) { 406 do 407 rc = pread(fd, buf->base + pos, buf->len - pos, off + result); 408 while (rc == -1 && errno == EINTR); 409 410 if (rc == 0) 411 break; 412 413 if (rc == -1 && result == 0) 414 return UV__ERR(errno); 415 416 if (rc == -1) 417 break; /* We read some data so return that, ignore the error. */ 418 419 pos += rc; 420 result += rc; 421 422 if (pos < buf->len) 423 continue; 424 425 pos = 0; 426 buf += 1; 427 428 if (buf == end) 429 break; 430 } 431 432 return result; 433 } 434 #endif 435 436 437 static ssize_t uv__fs_read(uv_fs_t* req) { 438 #if defined(__linux__) 439 static int no_preadv; 440 #endif 441 unsigned int iovmax; 442 ssize_t result; 443 444 iovmax = uv__getiovmax(); 445 if (req->nbufs > iovmax) 446 req->nbufs = iovmax; 447 448 if (req->off < 0) { 449 if (req->nbufs == 1) 450 result = read(req->file, req->bufs[0].base, req->bufs[0].len); 451 else 452 result = readv(req->file, (struct iovec*) req->bufs, req->nbufs); 453 } else { 454 if (req->nbufs == 1) { 455 result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off); 456 goto done; 457 } 458 459 #if HAVE_PREADV 460 result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); 461 #else 462 # if defined(__linux__) 463 if (no_preadv) retry: 464 # endif 465 { 466 result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off); 467 } 468 # if defined(__linux__) 469 else { 470 result = uv__preadv(req->file, 471 (struct iovec*)req->bufs, 472 req->nbufs, 473 req->off); 474 if (result == -1 && errno == ENOSYS) { 475 no_preadv = 1; 476 goto retry; 477 } 478 } 479 # endif 480 #endif 481 } 482 483 done: 484 /* Early cleanup of bufs allocation, since we're done with it. */ 485 if (req->bufs != req->bufsml) 486 uv__free(req->bufs); 487 488 req->bufs = NULL; 489 req->nbufs = 0; 490 491 #ifdef __PASE__ 492 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */ 493 if (result == -1 && errno == EOPNOTSUPP) { 494 struct stat buf; 495 ssize_t rc; 496 rc = fstat(req->file, &buf); 497 if (rc == 0 && S_ISDIR(buf.st_mode)) { 498 errno = EISDIR; 499 } 500 } 501 #endif 502 503 return result; 504 } 505 506 507 #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8) 508 #define UV_CONST_DIRENT uv__dirent_t 509 #else 510 #define UV_CONST_DIRENT const uv__dirent_t 511 #endif 512 513 514 static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) { 515 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0; 516 } 517 518 519 static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) { 520 return strcmp((*a)->d_name, (*b)->d_name); 521 } 522 523 524 static ssize_t uv__fs_scandir(uv_fs_t* req) { 525 uv__dirent_t** dents; 526 int n; 527 528 dents = NULL; 529 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort); 530 531 /* NOTE: We will use nbufs as an index field */ 532 req->nbufs = 0; 533 534 if (n == 0) { 535 /* OS X still needs to deallocate some memory. 536 * Memory was allocated using the system allocator, so use free() here. 537 */ 538 free(dents); 539 dents = NULL; 540 } else if (n == -1) { 541 return n; 542 } 543 544 req->ptr = dents; 545 546 return n; 547 } 548 549 static int uv__fs_opendir(uv_fs_t* req) { 550 uv_dir_t* dir; 551 552 dir = uv__malloc(sizeof(*dir)); 553 if (dir == NULL) 554 goto error; 555 556 dir->dir = opendir(req->path); 557 if (dir->dir == NULL) 558 goto error; 559 560 req->ptr = dir; 561 return 0; 562 563 error: 564 uv__free(dir); 565 req->ptr = NULL; 566 return -1; 567 } 568 569 static int uv__fs_readdir(uv_fs_t* req) { 570 uv_dir_t* dir; 571 uv_dirent_t* dirent; 572 struct dirent* res; 573 unsigned int dirent_idx; 574 unsigned int i; 575 576 dir = req->ptr; 577 dirent_idx = 0; 578 579 while (dirent_idx < dir->nentries) { 580 /* readdir() returns NULL on end of directory, as well as on error. errno 581 is used to differentiate between the two conditions. */ 582 errno = 0; 583 res = readdir(dir->dir); 584 585 if (res == NULL) { 586 if (errno != 0) 587 goto error; 588 break; 589 } 590 591 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0) 592 continue; 593 594 dirent = &dir->dirents[dirent_idx]; 595 dirent->name = uv__strdup(res->d_name); 596 597 if (dirent->name == NULL) 598 goto error; 599 600 dirent->type = uv__fs_get_dirent_type(res); 601 ++dirent_idx; 602 } 603 604 return dirent_idx; 605 606 error: 607 for (i = 0; i < dirent_idx; ++i) { 608 uv__free((char*) dir->dirents[i].name); 609 dir->dirents[i].name = NULL; 610 } 611 612 return -1; 613 } 614 615 static int uv__fs_closedir(uv_fs_t* req) { 616 uv_dir_t* dir; 617 618 dir = req->ptr; 619 620 if (dir->dir != NULL) { 621 closedir(dir->dir); 622 dir->dir = NULL; 623 } 624 625 uv__free(req->ptr); 626 req->ptr = NULL; 627 return 0; 628 } 629 630 static int uv__fs_statfs(uv_fs_t* req) { 631 uv_statfs_t* stat_fs; 632 #if defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__) 633 struct statvfs buf; 634 635 if (0 != statvfs(req->path, &buf)) 636 #else 637 struct statfs buf; 638 639 if (0 != statfs(req->path, &buf)) 640 #endif /* defined(__sun) */ 641 return -1; 642 643 stat_fs = uv__malloc(sizeof(*stat_fs)); 644 if (stat_fs == NULL) { 645 errno = ENOMEM; 646 return -1; 647 } 648 649 #if defined(__sun) || defined(__MVS__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__HAIKU__) 650 stat_fs->f_type = 0; /* f_type is not supported. */ 651 #else 652 stat_fs->f_type = buf.f_type; 653 #endif 654 stat_fs->f_bsize = buf.f_bsize; 655 stat_fs->f_blocks = buf.f_blocks; 656 stat_fs->f_bfree = buf.f_bfree; 657 stat_fs->f_bavail = buf.f_bavail; 658 stat_fs->f_files = buf.f_files; 659 stat_fs->f_ffree = buf.f_ffree; 660 req->ptr = stat_fs; 661 return 0; 662 } 663 664 static ssize_t uv__fs_pathmax_size(const char* path) { 665 ssize_t pathmax; 666 667 pathmax = pathconf(path, _PC_PATH_MAX); 668 669 if (pathmax == -1) 670 pathmax = UV__PATH_MAX; 671 672 return pathmax; 673 } 674 675 static ssize_t uv__fs_readlink(uv_fs_t* req) { 676 ssize_t maxlen; 677 ssize_t len; 678 char* buf; 679 680 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX) 681 maxlen = uv__fs_pathmax_size(req->path); 682 #else 683 /* We may not have a real PATH_MAX. Read size of link. */ 684 struct stat st; 685 int ret; 686 ret = lstat(req->path, &st); 687 if (ret != 0) 688 return -1; 689 if (!S_ISLNK(st.st_mode)) { 690 errno = EINVAL; 691 return -1; 692 } 693 694 maxlen = st.st_size; 695 696 /* According to readlink(2) lstat can report st_size == 0 697 for some symlinks, such as those in /proc or /sys. */ 698 if (maxlen == 0) 699 maxlen = uv__fs_pathmax_size(req->path); 700 #endif 701 702 buf = uv__malloc(maxlen); 703 704 if (buf == NULL) { 705 errno = ENOMEM; 706 return -1; 707 } 708 709 #if defined(__MVS__) 710 len = os390_readlink(req->path, buf, maxlen); 711 #else 712 len = readlink(req->path, buf, maxlen); 713 #endif 714 715 if (len == -1) { 716 uv__free(buf); 717 return -1; 718 } 719 720 /* Uncommon case: resize to make room for the trailing nul byte. */ 721 if (len == maxlen) { 722 buf = uv__reallocf(buf, len + 1); 723 724 if (buf == NULL) 725 return -1; 726 } 727 728 buf[len] = '\0'; 729 req->ptr = buf; 730 731 return 0; 732 } 733 734 static ssize_t uv__fs_realpath(uv_fs_t* req) { 735 char* buf; 736 737 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L 738 buf = realpath(req->path, NULL); 739 if (buf == NULL) 740 return -1; 741 #else 742 ssize_t len; 743 744 len = uv__fs_pathmax_size(req->path); 745 buf = uv__malloc(len + 1); 746 747 if (buf == NULL) { 748 errno = ENOMEM; 749 return -1; 750 } 751 752 if (realpath(req->path, buf) == NULL) { 753 uv__free(buf); 754 return -1; 755 } 756 #endif 757 758 req->ptr = buf; 759 760 return 0; 761 } 762 763 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) { 764 struct pollfd pfd; 765 int use_pread; 766 off_t offset; 767 ssize_t nsent; 768 ssize_t nread; 769 ssize_t nwritten; 770 size_t buflen; 771 size_t len; 772 ssize_t n; 773 int in_fd; 774 int out_fd; 775 char buf[8192]; 776 777 len = req->bufsml[0].len; 778 in_fd = req->flags; 779 out_fd = req->file; 780 offset = req->off; 781 use_pread = 1; 782 783 /* Here are the rules regarding errors: 784 * 785 * 1. Read errors are reported only if nsent==0, otherwise we return nsent. 786 * The user needs to know that some data has already been sent, to stop 787 * them from sending it twice. 788 * 789 * 2. Write errors are always reported. Write errors are bad because they 790 * mean data loss: we've read data but now we can't write it out. 791 * 792 * We try to use pread() and fall back to regular read() if the source fd 793 * doesn't support positional reads, for example when it's a pipe fd. 794 * 795 * If we get EAGAIN when writing to the target fd, we poll() on it until 796 * it becomes writable again. 797 * 798 * FIXME: If we get a write error when use_pread==1, it should be safe to 799 * return the number of sent bytes instead of an error because pread() 800 * is, in theory, idempotent. However, special files in /dev or /proc 801 * may support pread() but not necessarily return the same data on 802 * successive reads. 803 * 804 * FIXME: There is no way now to signal that we managed to send *some* data 805 * before a write error. 806 */ 807 for (nsent = 0; (size_t) nsent < len; ) { 808 buflen = len - nsent; 809 810 if (buflen > sizeof(buf)) 811 buflen = sizeof(buf); 812 813 do 814 if (use_pread) 815 nread = pread(in_fd, buf, buflen, offset); 816 else 817 nread = read(in_fd, buf, buflen); 818 while (nread == -1 && errno == EINTR); 819 820 if (nread == 0) 821 goto out; 822 823 if (nread == -1) { 824 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) { 825 use_pread = 0; 826 continue; 827 } 828 829 if (nsent == 0) 830 nsent = -1; 831 832 goto out; 833 } 834 835 for (nwritten = 0; nwritten < nread; ) { 836 do 837 n = write(out_fd, buf + nwritten, nread - nwritten); 838 while (n == -1 && errno == EINTR); 839 840 if (n != -1) { 841 nwritten += n; 842 continue; 843 } 844 845 if (errno != EAGAIN && errno != EWOULDBLOCK) { 846 nsent = -1; 847 goto out; 848 } 849 850 pfd.fd = out_fd; 851 pfd.events = POLLOUT; 852 pfd.revents = 0; 853 854 do 855 n = poll(&pfd, 1, -1); 856 while (n == -1 && errno == EINTR); 857 858 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) { 859 errno = EIO; 860 nsent = -1; 861 goto out; 862 } 863 } 864 865 offset += nread; 866 nsent += nread; 867 } 868 869 out: 870 if (nsent != -1) 871 req->off = offset; 872 873 return nsent; 874 } 875 876 877 static ssize_t uv__fs_sendfile(uv_fs_t* req) { 878 int in_fd; 879 int out_fd; 880 881 in_fd = req->flags; 882 out_fd = req->file; 883 884 #if defined(__linux__) || defined(__sun) 885 { 886 off_t off; 887 ssize_t r; 888 889 off = req->off; 890 r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len); 891 892 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but 893 * it still writes out data. Fortunately, we can detect it by checking if 894 * the offset has been updated. 895 */ 896 if (r != -1 || off > req->off) { 897 r = off - req->off; 898 req->off = off; 899 return r; 900 } 901 902 if (errno == EINVAL || 903 errno == EIO || 904 errno == ENOTSOCK || 905 errno == EXDEV) { 906 errno = 0; 907 return uv__fs_sendfile_emul(req); 908 } 909 910 return -1; 911 } 912 #elif defined(__APPLE__) || \ 913 defined(__DragonFly__) || \ 914 defined(__FreeBSD__) || \ 915 defined(__FreeBSD_kernel__) 916 { 917 off_t len; 918 ssize_t r; 919 920 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in 921 * non-blocking mode and not all data could be written. If a non-zero 922 * number of bytes have been sent, we don't consider it an error. 923 */ 924 925 #if defined(__FreeBSD__) || defined(__DragonFly__) 926 len = 0; 927 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0); 928 #elif defined(__FreeBSD_kernel__) 929 len = 0; 930 r = bsd_sendfile(in_fd, 931 out_fd, 932 req->off, 933 req->bufsml[0].len, 934 NULL, 935 &len, 936 0); 937 #else 938 /* The darwin sendfile takes len as an input for the length to send, 939 * so make sure to initialize it with the caller's value. */ 940 len = req->bufsml[0].len; 941 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0); 942 #endif 943 944 /* 945 * The man page for sendfile(2) on DragonFly states that `len` contains 946 * a meaningful value ONLY in case of EAGAIN and EINTR. 947 * Nothing is said about it's value in case of other errors, so better 948 * not depend on the potential wrong assumption that is was not modified 949 * by the syscall. 950 */ 951 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) { 952 req->off += len; 953 return (ssize_t) len; 954 } 955 956 if (errno == EINVAL || 957 errno == EIO || 958 errno == ENOTSOCK || 959 errno == EXDEV) { 960 errno = 0; 961 return uv__fs_sendfile_emul(req); 962 } 963 964 return -1; 965 } 966 #else 967 /* Squelch compiler warnings. */ 968 (void) &in_fd; 969 (void) &out_fd; 970 971 return uv__fs_sendfile_emul(req); 972 #endif 973 } 974 975 976 static ssize_t uv__fs_utime(uv_fs_t* req) { 977 #if defined(__linux__) \ 978 || defined(_AIX71) \ 979 || defined(__sun) \ 980 || defined(__HAIKU__) 981 /* utimesat() has nanosecond resolution but we stick to microseconds 982 * for the sake of consistency with other platforms. 983 */ 984 struct timespec ts[2]; 985 ts[0] = uv__fs_to_timespec(req->atime); 986 ts[1] = uv__fs_to_timespec(req->mtime); 987 return utimensat(AT_FDCWD, req->path, ts, 0); 988 #elif defined(__APPLE__) \ 989 || defined(__DragonFly__) \ 990 || defined(__FreeBSD__) \ 991 || defined(__FreeBSD_kernel__) \ 992 || defined(__NetBSD__) \ 993 || defined(__OpenBSD__) 994 struct timeval tv[2]; 995 tv[0] = uv__fs_to_timeval(req->atime); 996 tv[1] = uv__fs_to_timeval(req->mtime); 997 return utimes(req->path, tv); 998 #elif defined(_AIX) \ 999 && !defined(_AIX71) 1000 struct utimbuf buf; 1001 buf.actime = req->atime; 1002 buf.modtime = req->mtime; 1003 return utime(req->path, &buf); 1004 #elif defined(__MVS__) 1005 attrib_t atr; 1006 memset(&atr, 0, sizeof(atr)); 1007 atr.att_mtimechg = 1; 1008 atr.att_atimechg = 1; 1009 atr.att_mtime = req->mtime; 1010 atr.att_atime = req->atime; 1011 return __lchattr((char*) req->path, &atr, sizeof(atr)); 1012 #else 1013 errno = ENOSYS; 1014 return -1; 1015 #endif 1016 } 1017 1018 1019 static ssize_t uv__fs_lutime(uv_fs_t* req) { 1020 #if defined(__linux__) || \ 1021 defined(_AIX71) || \ 1022 defined(__sun) || \ 1023 defined(__HAIKU__) 1024 struct timespec ts[2]; 1025 ts[0] = uv__fs_to_timespec(req->atime); 1026 ts[1] = uv__fs_to_timespec(req->mtime); 1027 return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW); 1028 #elif defined(__APPLE__) || \ 1029 defined(__DragonFly__) || \ 1030 defined(__FreeBSD__) || \ 1031 defined(__FreeBSD_kernel__) || \ 1032 defined(__NetBSD__) 1033 struct timeval tv[2]; 1034 tv[0] = uv__fs_to_timeval(req->atime); 1035 tv[1] = uv__fs_to_timeval(req->mtime); 1036 return lutimes(req->path, tv); 1037 #else 1038 errno = ENOSYS; 1039 return -1; 1040 #endif 1041 } 1042 1043 1044 static ssize_t uv__fs_write(uv_fs_t* req) { 1045 #if defined(__linux__) 1046 static int no_pwritev; 1047 #endif 1048 ssize_t r; 1049 1050 /* Serialize writes on OS X, concurrent write() and pwrite() calls result in 1051 * data loss. We can't use a per-file descriptor lock, the descriptor may be 1052 * a dup(). 1053 */ 1054 #if defined(__APPLE__) 1055 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; 1056 1057 if (pthread_mutex_lock(&lock)) 1058 abort(); 1059 #endif 1060 1061 if (req->off < 0) { 1062 if (req->nbufs == 1) 1063 r = write(req->file, req->bufs[0].base, req->bufs[0].len); 1064 else 1065 r = writev(req->file, (struct iovec*) req->bufs, req->nbufs); 1066 } else { 1067 if (req->nbufs == 1) { 1068 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); 1069 goto done; 1070 } 1071 #if HAVE_PREADV 1072 r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); 1073 #else 1074 # if defined(__linux__) 1075 if (no_pwritev) retry: 1076 # endif 1077 { 1078 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); 1079 } 1080 # if defined(__linux__) 1081 else { 1082 r = uv__pwritev(req->file, 1083 (struct iovec*) req->bufs, 1084 req->nbufs, 1085 req->off); 1086 if (r == -1 && errno == ENOSYS) { 1087 no_pwritev = 1; 1088 goto retry; 1089 } 1090 } 1091 # endif 1092 #endif 1093 } 1094 1095 done: 1096 #if defined(__APPLE__) 1097 if (pthread_mutex_unlock(&lock)) 1098 abort(); 1099 #endif 1100 1101 return r; 1102 } 1103 1104 static ssize_t uv__fs_copyfile(uv_fs_t* req) { 1105 uv_fs_t fs_req; 1106 uv_file srcfd; 1107 uv_file dstfd; 1108 struct stat src_statsbuf; 1109 struct stat dst_statsbuf; 1110 int dst_flags; 1111 int result; 1112 int err; 1113 off_t bytes_to_send; 1114 off_t in_offset; 1115 off_t bytes_written; 1116 size_t bytes_chunk; 1117 1118 dstfd = -1; 1119 err = 0; 1120 1121 /* Open the source file. */ 1122 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL); 1123 uv_fs_req_cleanup(&fs_req); 1124 1125 if (srcfd < 0) 1126 return srcfd; 1127 1128 /* Get the source file's mode. */ 1129 if (fstat(srcfd, &src_statsbuf)) { 1130 err = UV__ERR(errno); 1131 goto out; 1132 } 1133 1134 dst_flags = O_WRONLY | O_CREAT | O_TRUNC; 1135 1136 if (req->flags & UV_FS_COPYFILE_EXCL) 1137 dst_flags |= O_EXCL; 1138 1139 /* Open the destination file. */ 1140 dstfd = uv_fs_open(NULL, 1141 &fs_req, 1142 req->new_path, 1143 dst_flags, 1144 src_statsbuf.st_mode, 1145 NULL); 1146 uv_fs_req_cleanup(&fs_req); 1147 1148 if (dstfd < 0) { 1149 err = dstfd; 1150 goto out; 1151 } 1152 1153 /* Get the destination file's mode. */ 1154 if (fstat(dstfd, &dst_statsbuf)) { 1155 err = UV__ERR(errno); 1156 goto out; 1157 } 1158 1159 /* Check if srcfd and dstfd refer to the same file */ 1160 if (src_statsbuf.st_dev == dst_statsbuf.st_dev && 1161 src_statsbuf.st_ino == dst_statsbuf.st_ino) { 1162 goto out; 1163 } 1164 1165 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) { 1166 err = UV__ERR(errno); 1167 #ifdef __linux__ 1168 if (err != UV_EPERM) 1169 goto out; 1170 1171 { 1172 struct statfs s; 1173 1174 /* fchmod() on CIFS shares always fails with EPERM unless the share is 1175 * mounted with "noperm". As fchmod() is a meaningless operation on such 1176 * shares anyway, detect that condition and squelch the error. 1177 */ 1178 if (fstatfs(dstfd, &s) == -1) 1179 goto out; 1180 1181 if (s.f_type != /* CIFS */ 0xFF534D42u) 1182 goto out; 1183 } 1184 1185 err = 0; 1186 #else /* !__linux__ */ 1187 goto out; 1188 #endif /* !__linux__ */ 1189 } 1190 1191 #ifdef FICLONE 1192 if (req->flags & UV_FS_COPYFILE_FICLONE || 1193 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) { 1194 if (ioctl(dstfd, FICLONE, srcfd) == 0) { 1195 /* ioctl() with FICLONE succeeded. */ 1196 goto out; 1197 } 1198 /* If an error occurred and force was set, return the error to the caller; 1199 * fall back to sendfile() when force was not set. */ 1200 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) { 1201 err = UV__ERR(errno); 1202 goto out; 1203 } 1204 } 1205 #else 1206 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) { 1207 err = UV_ENOSYS; 1208 goto out; 1209 } 1210 #endif 1211 1212 bytes_to_send = src_statsbuf.st_size; 1213 in_offset = 0; 1214 while (bytes_to_send != 0) { 1215 bytes_chunk = SSIZE_MAX; 1216 if (bytes_to_send < (off_t) bytes_chunk) 1217 bytes_chunk = bytes_to_send; 1218 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL); 1219 bytes_written = fs_req.result; 1220 uv_fs_req_cleanup(&fs_req); 1221 1222 if (bytes_written < 0) { 1223 err = bytes_written; 1224 break; 1225 } 1226 1227 bytes_to_send -= bytes_written; 1228 in_offset += bytes_written; 1229 } 1230 1231 out: 1232 if (err < 0) 1233 result = err; 1234 else 1235 result = 0; 1236 1237 /* Close the source file. */ 1238 err = uv__close_nocheckstdio(srcfd); 1239 1240 /* Don't overwrite any existing errors. */ 1241 if (err != 0 && result == 0) 1242 result = err; 1243 1244 /* Close the destination file if it is open. */ 1245 if (dstfd >= 0) { 1246 err = uv__close_nocheckstdio(dstfd); 1247 1248 /* Don't overwrite any existing errors. */ 1249 if (err != 0 && result == 0) 1250 result = err; 1251 1252 /* Remove the destination file if something went wrong. */ 1253 if (result != 0) { 1254 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL); 1255 /* Ignore the unlink return value, as an error already happened. */ 1256 uv_fs_req_cleanup(&fs_req); 1257 } 1258 } 1259 1260 if (result == 0) 1261 return 0; 1262 1263 errno = UV__ERR(result); 1264 return -1; 1265 } 1266 1267 static void uv__to_stat(struct stat* src, uv_stat_t* dst) { 1268 dst->st_dev = src->st_dev; 1269 dst->st_mode = src->st_mode; 1270 dst->st_nlink = src->st_nlink; 1271 dst->st_uid = src->st_uid; 1272 dst->st_gid = src->st_gid; 1273 dst->st_rdev = src->st_rdev; 1274 dst->st_ino = src->st_ino; 1275 dst->st_size = src->st_size; 1276 dst->st_blksize = src->st_blksize; 1277 dst->st_blocks = src->st_blocks; 1278 1279 #if defined(__APPLE__) 1280 dst->st_atim.tv_sec = src->st_atimespec.tv_sec; 1281 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec; 1282 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec; 1283 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec; 1284 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec; 1285 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec; 1286 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec; 1287 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec; 1288 dst->st_flags = src->st_flags; 1289 dst->st_gen = src->st_gen; 1290 #elif defined(__ANDROID__) 1291 dst->st_atim.tv_sec = src->st_atime; 1292 dst->st_atim.tv_nsec = src->st_atimensec; 1293 dst->st_mtim.tv_sec = src->st_mtime; 1294 dst->st_mtim.tv_nsec = src->st_mtimensec; 1295 dst->st_ctim.tv_sec = src->st_ctime; 1296 dst->st_ctim.tv_nsec = src->st_ctimensec; 1297 dst->st_birthtim.tv_sec = src->st_ctime; 1298 dst->st_birthtim.tv_nsec = src->st_ctimensec; 1299 dst->st_flags = 0; 1300 dst->st_gen = 0; 1301 #elif !defined(_AIX) && ( \ 1302 defined(__DragonFly__) || \ 1303 defined(__FreeBSD__) || \ 1304 defined(__OpenBSD__) || \ 1305 defined(__NetBSD__) || \ 1306 defined(_GNU_SOURCE) || \ 1307 defined(_BSD_SOURCE) || \ 1308 defined(_SVID_SOURCE) || \ 1309 defined(_XOPEN_SOURCE) || \ 1310 defined(_DEFAULT_SOURCE)) 1311 dst->st_atim.tv_sec = src->st_atim.tv_sec; 1312 dst->st_atim.tv_nsec = src->st_atim.tv_nsec; 1313 dst->st_mtim.tv_sec = src->st_mtim.tv_sec; 1314 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec; 1315 dst->st_ctim.tv_sec = src->st_ctim.tv_sec; 1316 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec; 1317 # if defined(__FreeBSD__) || \ 1318 defined(__NetBSD__) 1319 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec; 1320 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec; 1321 dst->st_flags = src->st_flags; 1322 dst->st_gen = src->st_gen; 1323 # else 1324 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec; 1325 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec; 1326 dst->st_flags = 0; 1327 dst->st_gen = 0; 1328 # endif 1329 #else 1330 dst->st_atim.tv_sec = src->st_atime; 1331 dst->st_atim.tv_nsec = 0; 1332 dst->st_mtim.tv_sec = src->st_mtime; 1333 dst->st_mtim.tv_nsec = 0; 1334 dst->st_ctim.tv_sec = src->st_ctime; 1335 dst->st_ctim.tv_nsec = 0; 1336 dst->st_birthtim.tv_sec = src->st_ctime; 1337 dst->st_birthtim.tv_nsec = 0; 1338 dst->st_flags = 0; 1339 dst->st_gen = 0; 1340 #endif 1341 } 1342 1343 1344 static int uv__fs_statx(int fd, 1345 const char* path, 1346 int is_fstat, 1347 int is_lstat, 1348 uv_stat_t* buf) { 1349 STATIC_ASSERT(UV_ENOSYS != -1); 1350 #ifdef __linux__ 1351 static int no_statx; 1352 struct uv__statx statxbuf; 1353 int dirfd; 1354 int flags; 1355 int mode; 1356 int rc; 1357 1358 if (no_statx) 1359 return UV_ENOSYS; 1360 1361 dirfd = AT_FDCWD; 1362 flags = 0; /* AT_STATX_SYNC_AS_STAT */ 1363 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */ 1364 1365 if (is_fstat) { 1366 dirfd = fd; 1367 flags |= 0x1000; /* AT_EMPTY_PATH */ 1368 } 1369 1370 if (is_lstat) 1371 flags |= AT_SYMLINK_NOFOLLOW; 1372 1373 rc = uv__statx(dirfd, path, flags, mode, &statxbuf); 1374 1375 switch (rc) { 1376 case 0: 1377 break; 1378 case -1: 1379 /* EPERM happens when a seccomp filter rejects the system call. 1380 * Has been observed with libseccomp < 2.3.3 and docker < 18.04. 1381 */ 1382 if (errno != EINVAL && errno != EPERM && errno != ENOSYS) 1383 return -1; 1384 /* Fall through. */ 1385 default: 1386 /* Normally on success, zero is returned and On error, -1 is returned. 1387 * Observed on S390 RHEL running in a docker container with statx not 1388 * implemented, rc might return 1 with 0 set as the error code in which 1389 * case we return ENOSYS. 1390 */ 1391 no_statx = 1; 1392 return UV_ENOSYS; 1393 } 1394 1395 buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor; 1396 buf->st_mode = statxbuf.stx_mode; 1397 buf->st_nlink = statxbuf.stx_nlink; 1398 buf->st_uid = statxbuf.stx_uid; 1399 buf->st_gid = statxbuf.stx_gid; 1400 buf->st_rdev = statxbuf.stx_rdev_major; 1401 buf->st_ino = statxbuf.stx_ino; 1402 buf->st_size = statxbuf.stx_size; 1403 buf->st_blksize = statxbuf.stx_blksize; 1404 buf->st_blocks = statxbuf.stx_blocks; 1405 buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec; 1406 buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec; 1407 buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec; 1408 buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec; 1409 buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec; 1410 buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec; 1411 buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec; 1412 buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec; 1413 buf->st_flags = 0; 1414 buf->st_gen = 0; 1415 1416 return 0; 1417 #else 1418 return UV_ENOSYS; 1419 #endif /* __linux__ */ 1420 } 1421 1422 1423 static int uv__fs_stat(const char *path, uv_stat_t *buf) { 1424 struct stat pbuf; 1425 int ret; 1426 1427 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf); 1428 if (ret != UV_ENOSYS) 1429 return ret; 1430 1431 ret = stat(path, &pbuf); 1432 if (ret == 0) 1433 uv__to_stat(&pbuf, buf); 1434 1435 return ret; 1436 } 1437 1438 1439 static int uv__fs_lstat(const char *path, uv_stat_t *buf) { 1440 struct stat pbuf; 1441 int ret; 1442 1443 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf); 1444 if (ret != UV_ENOSYS) 1445 return ret; 1446 1447 ret = lstat(path, &pbuf); 1448 if (ret == 0) 1449 uv__to_stat(&pbuf, buf); 1450 1451 return ret; 1452 } 1453 1454 1455 static int uv__fs_fstat(int fd, uv_stat_t *buf) { 1456 struct stat pbuf; 1457 int ret; 1458 1459 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf); 1460 if (ret != UV_ENOSYS) 1461 return ret; 1462 1463 ret = fstat(fd, &pbuf); 1464 if (ret == 0) 1465 uv__to_stat(&pbuf, buf); 1466 1467 return ret; 1468 } 1469 1470 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) { 1471 size_t offset; 1472 /* Figure out which bufs are done */ 1473 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset) 1474 size -= bufs[offset].len; 1475 1476 /* Fix a partial read/write */ 1477 if (size > 0) { 1478 bufs[offset].base += size; 1479 bufs[offset].len -= size; 1480 } 1481 return offset; 1482 } 1483 1484 static ssize_t uv__fs_write_all(uv_fs_t* req) { 1485 unsigned int iovmax; 1486 unsigned int nbufs; 1487 uv_buf_t* bufs; 1488 ssize_t total; 1489 ssize_t result; 1490 1491 iovmax = uv__getiovmax(); 1492 nbufs = req->nbufs; 1493 bufs = req->bufs; 1494 total = 0; 1495 1496 while (nbufs > 0) { 1497 req->nbufs = nbufs; 1498 if (req->nbufs > iovmax) 1499 req->nbufs = iovmax; 1500 1501 do 1502 result = uv__fs_write(req); 1503 while (result < 0 && errno == EINTR); 1504 1505 if (result <= 0) { 1506 if (total == 0) 1507 total = result; 1508 break; 1509 } 1510 1511 if (req->off >= 0) 1512 req->off += result; 1513 1514 req->nbufs = uv__fs_buf_offset(req->bufs, result); 1515 req->bufs += req->nbufs; 1516 nbufs -= req->nbufs; 1517 total += result; 1518 } 1519 1520 if (bufs != req->bufsml) 1521 uv__free(bufs); 1522 1523 req->bufs = NULL; 1524 req->nbufs = 0; 1525 1526 return total; 1527 } 1528 1529 1530 static void uv__fs_work(struct uv__work* w) { 1531 int retry_on_eintr; 1532 uv_fs_t* req; 1533 ssize_t r; 1534 1535 req = container_of(w, uv_fs_t, work_req); 1536 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE || 1537 req->fs_type == UV_FS_READ); 1538 1539 do { 1540 errno = 0; 1541 1542 #define X(type, action) \ 1543 case UV_FS_ ## type: \ 1544 r = action; \ 1545 break; 1546 1547 switch (req->fs_type) { 1548 X(ACCESS, access(req->path, req->flags)); 1549 X(CHMOD, chmod(req->path, req->mode)); 1550 X(CHOWN, chown(req->path, req->uid, req->gid)); 1551 X(CLOSE, uv__fs_close(req->file)); 1552 X(COPYFILE, uv__fs_copyfile(req)); 1553 X(FCHMOD, fchmod(req->file, req->mode)); 1554 X(FCHOWN, fchown(req->file, req->uid, req->gid)); 1555 X(LCHOWN, lchown(req->path, req->uid, req->gid)); 1556 X(FDATASYNC, uv__fs_fdatasync(req)); 1557 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf)); 1558 X(FSYNC, uv__fs_fsync(req)); 1559 X(FTRUNCATE, ftruncate(req->file, req->off)); 1560 X(FUTIME, uv__fs_futime(req)); 1561 X(LUTIME, uv__fs_lutime(req)); 1562 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf)); 1563 X(LINK, link(req->path, req->new_path)); 1564 X(MKDIR, mkdir(req->path, req->mode)); 1565 X(MKDTEMP, uv__fs_mkdtemp(req)); 1566 X(MKSTEMP, uv__fs_mkstemp(req)); 1567 X(OPEN, uv__fs_open(req)); 1568 X(READ, uv__fs_read(req)); 1569 X(SCANDIR, uv__fs_scandir(req)); 1570 X(OPENDIR, uv__fs_opendir(req)); 1571 X(READDIR, uv__fs_readdir(req)); 1572 X(CLOSEDIR, uv__fs_closedir(req)); 1573 X(READLINK, uv__fs_readlink(req)); 1574 X(REALPATH, uv__fs_realpath(req)); 1575 X(RENAME, rename(req->path, req->new_path)); 1576 X(RMDIR, rmdir(req->path)); 1577 X(SENDFILE, uv__fs_sendfile(req)); 1578 X(STAT, uv__fs_stat(req->path, &req->statbuf)); 1579 X(STATFS, uv__fs_statfs(req)); 1580 X(SYMLINK, symlink(req->path, req->new_path)); 1581 X(UNLINK, unlink(req->path)); 1582 X(UTIME, uv__fs_utime(req)); 1583 X(WRITE, uv__fs_write_all(req)); 1584 default: abort(); 1585 } 1586 #undef X 1587 } while (r == -1 && errno == EINTR && retry_on_eintr); 1588 1589 if (r == -1) 1590 req->result = UV__ERR(errno); 1591 else 1592 req->result = r; 1593 1594 if (r == 0 && (req->fs_type == UV_FS_STAT || 1595 req->fs_type == UV_FS_FSTAT || 1596 req->fs_type == UV_FS_LSTAT)) { 1597 req->ptr = &req->statbuf; 1598 } 1599 } 1600 1601 1602 static void uv__fs_done(struct uv__work* w, int status) { 1603 uv_fs_t* req; 1604 1605 req = container_of(w, uv_fs_t, work_req); 1606 uv__req_unregister(req->loop, req); 1607 1608 if (status == UV_ECANCELED) { 1609 assert(req->result == 0); 1610 req->result = UV_ECANCELED; 1611 } 1612 1613 req->cb(req); 1614 } 1615 1616 1617 int uv_fs_access(uv_loop_t* loop, 1618 uv_fs_t* req, 1619 const char* path, 1620 int flags, 1621 uv_fs_cb cb) { 1622 INIT(ACCESS); 1623 PATH; 1624 req->flags = flags; 1625 POST; 1626 } 1627 1628 1629 int uv_fs_chmod(uv_loop_t* loop, 1630 uv_fs_t* req, 1631 const char* path, 1632 int mode, 1633 uv_fs_cb cb) { 1634 INIT(CHMOD); 1635 PATH; 1636 req->mode = mode; 1637 POST; 1638 } 1639 1640 1641 int uv_fs_chown(uv_loop_t* loop, 1642 uv_fs_t* req, 1643 const char* path, 1644 uv_uid_t uid, 1645 uv_gid_t gid, 1646 uv_fs_cb cb) { 1647 INIT(CHOWN); 1648 PATH; 1649 req->uid = uid; 1650 req->gid = gid; 1651 POST; 1652 } 1653 1654 1655 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { 1656 INIT(CLOSE); 1657 req->file = file; 1658 POST; 1659 } 1660 1661 1662 int uv_fs_fchmod(uv_loop_t* loop, 1663 uv_fs_t* req, 1664 uv_file file, 1665 int mode, 1666 uv_fs_cb cb) { 1667 INIT(FCHMOD); 1668 req->file = file; 1669 req->mode = mode; 1670 POST; 1671 } 1672 1673 1674 int uv_fs_fchown(uv_loop_t* loop, 1675 uv_fs_t* req, 1676 uv_file file, 1677 uv_uid_t uid, 1678 uv_gid_t gid, 1679 uv_fs_cb cb) { 1680 INIT(FCHOWN); 1681 req->file = file; 1682 req->uid = uid; 1683 req->gid = gid; 1684 POST; 1685 } 1686 1687 1688 int uv_fs_lchown(uv_loop_t* loop, 1689 uv_fs_t* req, 1690 const char* path, 1691 uv_uid_t uid, 1692 uv_gid_t gid, 1693 uv_fs_cb cb) { 1694 INIT(LCHOWN); 1695 PATH; 1696 req->uid = uid; 1697 req->gid = gid; 1698 POST; 1699 } 1700 1701 1702 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { 1703 INIT(FDATASYNC); 1704 req->file = file; 1705 POST; 1706 } 1707 1708 1709 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { 1710 INIT(FSTAT); 1711 req->file = file; 1712 POST; 1713 } 1714 1715 1716 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { 1717 INIT(FSYNC); 1718 req->file = file; 1719 POST; 1720 } 1721 1722 1723 int uv_fs_ftruncate(uv_loop_t* loop, 1724 uv_fs_t* req, 1725 uv_file file, 1726 int64_t off, 1727 uv_fs_cb cb) { 1728 INIT(FTRUNCATE); 1729 req->file = file; 1730 req->off = off; 1731 POST; 1732 } 1733 1734 1735 int uv_fs_futime(uv_loop_t* loop, 1736 uv_fs_t* req, 1737 uv_file file, 1738 double atime, 1739 double mtime, 1740 uv_fs_cb cb) { 1741 INIT(FUTIME); 1742 req->file = file; 1743 req->atime = atime; 1744 req->mtime = mtime; 1745 POST; 1746 } 1747 1748 int uv_fs_lutime(uv_loop_t* loop, 1749 uv_fs_t* req, 1750 const char* path, 1751 double atime, 1752 double mtime, 1753 uv_fs_cb cb) { 1754 INIT(LUTIME); 1755 PATH; 1756 req->atime = atime; 1757 req->mtime = mtime; 1758 POST; 1759 } 1760 1761 1762 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { 1763 INIT(LSTAT); 1764 PATH; 1765 POST; 1766 } 1767 1768 1769 int uv_fs_link(uv_loop_t* loop, 1770 uv_fs_t* req, 1771 const char* path, 1772 const char* new_path, 1773 uv_fs_cb cb) { 1774 INIT(LINK); 1775 PATH2; 1776 POST; 1777 } 1778 1779 1780 int uv_fs_mkdir(uv_loop_t* loop, 1781 uv_fs_t* req, 1782 const char* path, 1783 int mode, 1784 uv_fs_cb cb) { 1785 INIT(MKDIR); 1786 PATH; 1787 req->mode = mode; 1788 POST; 1789 } 1790 1791 1792 int uv_fs_mkdtemp(uv_loop_t* loop, 1793 uv_fs_t* req, 1794 const char* tpl, 1795 uv_fs_cb cb) { 1796 INIT(MKDTEMP); 1797 req->path = uv__strdup(tpl); 1798 if (req->path == NULL) 1799 return UV_ENOMEM; 1800 POST; 1801 } 1802 1803 1804 int uv_fs_mkstemp(uv_loop_t* loop, 1805 uv_fs_t* req, 1806 const char* tpl, 1807 uv_fs_cb cb) { 1808 INIT(MKSTEMP); 1809 req->path = uv__strdup(tpl); 1810 if (req->path == NULL) 1811 return UV_ENOMEM; 1812 POST; 1813 } 1814 1815 1816 int uv_fs_open(uv_loop_t* loop, 1817 uv_fs_t* req, 1818 const char* path, 1819 int flags, 1820 int mode, 1821 uv_fs_cb cb) { 1822 INIT(OPEN); 1823 PATH; 1824 req->flags = flags; 1825 req->mode = mode; 1826 POST; 1827 } 1828 1829 1830 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, 1831 uv_file file, 1832 const uv_buf_t bufs[], 1833 unsigned int nbufs, 1834 int64_t off, 1835 uv_fs_cb cb) { 1836 INIT(READ); 1837 1838 if (bufs == NULL || nbufs == 0) 1839 return UV_EINVAL; 1840 1841 req->file = file; 1842 1843 req->nbufs = nbufs; 1844 req->bufs = req->bufsml; 1845 if (nbufs > ARRAY_SIZE(req->bufsml)) 1846 req->bufs = uv__malloc(nbufs * sizeof(*bufs)); 1847 1848 if (req->bufs == NULL) 1849 return UV_ENOMEM; 1850 1851 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); 1852 1853 req->off = off; 1854 POST; 1855 } 1856 1857 1858 int uv_fs_scandir(uv_loop_t* loop, 1859 uv_fs_t* req, 1860 const char* path, 1861 int flags, 1862 uv_fs_cb cb) { 1863 INIT(SCANDIR); 1864 PATH; 1865 req->flags = flags; 1866 POST; 1867 } 1868 1869 int uv_fs_opendir(uv_loop_t* loop, 1870 uv_fs_t* req, 1871 const char* path, 1872 uv_fs_cb cb) { 1873 INIT(OPENDIR); 1874 PATH; 1875 POST; 1876 } 1877 1878 int uv_fs_readdir(uv_loop_t* loop, 1879 uv_fs_t* req, 1880 uv_dir_t* dir, 1881 uv_fs_cb cb) { 1882 INIT(READDIR); 1883 1884 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL) 1885 return UV_EINVAL; 1886 1887 req->ptr = dir; 1888 POST; 1889 } 1890 1891 int uv_fs_closedir(uv_loop_t* loop, 1892 uv_fs_t* req, 1893 uv_dir_t* dir, 1894 uv_fs_cb cb) { 1895 INIT(CLOSEDIR); 1896 1897 if (dir == NULL) 1898 return UV_EINVAL; 1899 1900 req->ptr = dir; 1901 POST; 1902 } 1903 1904 int uv_fs_readlink(uv_loop_t* loop, 1905 uv_fs_t* req, 1906 const char* path, 1907 uv_fs_cb cb) { 1908 INIT(READLINK); 1909 PATH; 1910 POST; 1911 } 1912 1913 1914 int uv_fs_realpath(uv_loop_t* loop, 1915 uv_fs_t* req, 1916 const char * path, 1917 uv_fs_cb cb) { 1918 INIT(REALPATH); 1919 PATH; 1920 POST; 1921 } 1922 1923 1924 int uv_fs_rename(uv_loop_t* loop, 1925 uv_fs_t* req, 1926 const char* path, 1927 const char* new_path, 1928 uv_fs_cb cb) { 1929 INIT(RENAME); 1930 PATH2; 1931 POST; 1932 } 1933 1934 1935 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { 1936 INIT(RMDIR); 1937 PATH; 1938 POST; 1939 } 1940 1941 1942 int uv_fs_sendfile(uv_loop_t* loop, 1943 uv_fs_t* req, 1944 uv_file out_fd, 1945 uv_file in_fd, 1946 int64_t off, 1947 size_t len, 1948 uv_fs_cb cb) { 1949 INIT(SENDFILE); 1950 req->flags = in_fd; /* hack */ 1951 req->file = out_fd; 1952 req->off = off; 1953 req->bufsml[0].len = len; 1954 POST; 1955 } 1956 1957 1958 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { 1959 INIT(STAT); 1960 PATH; 1961 POST; 1962 } 1963 1964 1965 int uv_fs_symlink(uv_loop_t* loop, 1966 uv_fs_t* req, 1967 const char* path, 1968 const char* new_path, 1969 int flags, 1970 uv_fs_cb cb) { 1971 INIT(SYMLINK); 1972 PATH2; 1973 req->flags = flags; 1974 POST; 1975 } 1976 1977 1978 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { 1979 INIT(UNLINK); 1980 PATH; 1981 POST; 1982 } 1983 1984 1985 int uv_fs_utime(uv_loop_t* loop, 1986 uv_fs_t* req, 1987 const char* path, 1988 double atime, 1989 double mtime, 1990 uv_fs_cb cb) { 1991 INIT(UTIME); 1992 PATH; 1993 req->atime = atime; 1994 req->mtime = mtime; 1995 POST; 1996 } 1997 1998 1999 int uv_fs_write(uv_loop_t* loop, 2000 uv_fs_t* req, 2001 uv_file file, 2002 const uv_buf_t bufs[], 2003 unsigned int nbufs, 2004 int64_t off, 2005 uv_fs_cb cb) { 2006 INIT(WRITE); 2007 2008 if (bufs == NULL || nbufs == 0) 2009 return UV_EINVAL; 2010 2011 req->file = file; 2012 2013 req->nbufs = nbufs; 2014 req->bufs = req->bufsml; 2015 if (nbufs > ARRAY_SIZE(req->bufsml)) 2016 req->bufs = uv__malloc(nbufs * sizeof(*bufs)); 2017 2018 if (req->bufs == NULL) 2019 return UV_ENOMEM; 2020 2021 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); 2022 2023 req->off = off; 2024 POST; 2025 } 2026 2027 2028 void uv_fs_req_cleanup(uv_fs_t* req) { 2029 if (req == NULL) 2030 return; 2031 2032 /* Only necessary for asychronous requests, i.e., requests with a callback. 2033 * Synchronous ones don't copy their arguments and have req->path and 2034 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and 2035 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory. 2036 */ 2037 if (req->path != NULL && 2038 (req->cb != NULL || 2039 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP)) 2040 uv__free((void*) req->path); /* Memory is shared with req->new_path. */ 2041 2042 req->path = NULL; 2043 req->new_path = NULL; 2044 2045 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL) 2046 uv__fs_readdir_cleanup(req); 2047 2048 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL) 2049 uv__fs_scandir_cleanup(req); 2050 2051 if (req->bufs != req->bufsml) 2052 uv__free(req->bufs); 2053 req->bufs = NULL; 2054 2055 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf) 2056 uv__free(req->ptr); 2057 req->ptr = NULL; 2058 } 2059 2060 2061 int uv_fs_copyfile(uv_loop_t* loop, 2062 uv_fs_t* req, 2063 const char* path, 2064 const char* new_path, 2065 int flags, 2066 uv_fs_cb cb) { 2067 INIT(COPYFILE); 2068 2069 if (flags & ~(UV_FS_COPYFILE_EXCL | 2070 UV_FS_COPYFILE_FICLONE | 2071 UV_FS_COPYFILE_FICLONE_FORCE)) { 2072 return UV_EINVAL; 2073 } 2074 2075 PATH2; 2076 req->flags = flags; 2077 POST; 2078 } 2079 2080 2081 int uv_fs_statfs(uv_loop_t* loop, 2082 uv_fs_t* req, 2083 const char* path, 2084 uv_fs_cb cb) { 2085 INIT(STATFS); 2086 PATH; 2087 POST; 2088 } 2089 2090 int uv_fs_get_system_error(const uv_fs_t* req) { 2091 return -req->result; 2092 } 2093