1 /* 2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey Hsu and Matthew Dillon. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1982, 1986, 1989, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $ 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/sysproto.h> 75 #include <sys/conf.h> 76 #include <sys/device.h> 77 #include <sys/file.h> 78 #include <sys/filedesc.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 #include <sys/proc.h> 83 #include <sys/nlookup.h> 84 #include <sys/stat.h> 85 #include <sys/filio.h> 86 #include <sys/fcntl.h> 87 #include <sys/unistd.h> 88 #include <sys/resourcevar.h> 89 #include <sys/event.h> 90 #include <sys/kern_syscall.h> 91 #include <sys/kcore.h> 92 #include <sys/kinfo.h> 93 #include <sys/un.h> 94 #include <sys/objcache.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_extern.h> 98 99 #include <sys/thread2.h> 100 #include <sys/file2.h> 101 #include <sys/spinlock2.h> 102 103 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd); 104 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr); 105 static struct file *funsetfd_locked (struct filedesc *fdp, int fd); 106 static void ffree(struct file *fp); 107 108 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 109 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 110 "file desc to leader structures"); 111 MALLOC_DEFINE(M_FILE, "file", "Open file structure"); 112 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 113 114 static struct krate krate_uidinfo = { .freq = 1 }; 115 116 static d_open_t fdopen; 117 #define NUMFDESC 64 118 119 #define CDEV_MAJOR 22 120 static struct dev_ops fildesc_ops = { 121 { "FD", 0, 0 }, 122 .d_open = fdopen, 123 }; 124 125 /* 126 * Descriptor management. 127 */ 128 #ifndef NFILELIST_HEADS 129 #define NFILELIST_HEADS 257 /* primary number */ 130 #endif 131 132 struct filelist_head { 133 struct spinlock spin; 134 struct filelist list; 135 } __cachealign; 136 137 static struct filelist_head filelist_heads[NFILELIST_HEADS]; 138 139 static int nfiles; /* actual number of open files */ 140 extern int cmask; 141 142 struct lwkt_token revoke_token = LWKT_TOKEN_INITIALIZER(revoke_token); 143 144 static struct objcache *file_objcache; 145 146 static struct objcache_malloc_args file_malloc_args = { 147 .objsize = sizeof(struct file), 148 .mtype = M_FILE 149 }; 150 151 /* 152 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared. 153 * 154 * must be called with fdp->fd_spin exclusively held 155 */ 156 static __inline 157 void 158 fdfixup_locked(struct filedesc *fdp, int fd) 159 { 160 if (fd < fdp->fd_freefile) { 161 fdp->fd_freefile = fd; 162 } 163 while (fdp->fd_lastfile >= 0 && 164 fdp->fd_files[fdp->fd_lastfile].fp == NULL && 165 fdp->fd_files[fdp->fd_lastfile].reserved == 0 166 ) { 167 --fdp->fd_lastfile; 168 } 169 } 170 171 /* 172 * Clear the fd thread caches for this fdnode. 173 * 174 * If match_fdc is NULL, all thread caches of fdn will be cleared. 175 * The caller must hold fdp->fd_spin exclusively. The threads caching 176 * the descriptor do not have to be the current thread. The (status) 177 * argument is ignored. 178 * 179 * If match_fdc is not NULL, only the match_fdc's cache will be cleared. 180 * The caller must hold fdp->fd_spin shared and match_fdc must match a 181 * fdcache entry in curthread. match_fdc has been locked by the caller 182 * and had the specified (status). 183 * 184 * Since we are matching against a fp in the fdp (which must still be present 185 * at this time), fp will have at least two refs on any match and we can 186 * decrement the count trivially. 187 */ 188 static 189 void 190 fclearcache(struct fdnode *fdn, struct fdcache *match_fdc, int status) 191 { 192 struct fdcache *fdc; 193 struct file *fp; 194 int i; 195 196 /* 197 * match_fdc == NULL We are cleaning out all tdcache entries 198 * for the fdn and hold fdp->fd_spin exclusively. 199 * This can race against the target threads 200 * cleaning out specific entries. 201 * 202 * match_fdc != NULL We are cleaning out a specific tdcache 203 * entry on behalf of the owning thread 204 * and hold fdp->fd_spin shared. The thread 205 * has already locked the entry. This cannot 206 * race. 207 */ 208 fp = fdn->fp; 209 for (i = 0; i < NTDCACHEFD; ++i) { 210 if ((fdc = fdn->tdcache[i]) == NULL) 211 continue; 212 213 /* 214 * If match_fdc is non-NULL we are being asked to 215 * clear a specific fdc owned by curthread. There must 216 * be exactly one match. The caller has already locked 217 * the cache entry and will dispose of the lock after 218 * we return. 219 * 220 * Since we also have a shared lock on fdp, we 221 * can do this without atomic ops. 222 */ 223 if (match_fdc) { 224 if (fdc != match_fdc) 225 continue; 226 fdn->tdcache[i] = NULL; 227 KASSERT(fp == fdc->fp, 228 ("fclearcache(1): fp mismatch %p/%p\n", 229 fp, fdc->fp)); 230 fdc->fp = NULL; 231 fdc->fd = -1; 232 233 /* 234 * status can be 0 or 2. If 2 the ref is borrowed, 235 * if 0 the ref is not borrowed and we have to drop 236 * it. 237 */ 238 if (status == 0) 239 atomic_add_int(&fp->f_count, -1); 240 fdn->isfull = 0; /* heuristic */ 241 return; 242 } 243 244 /* 245 * Otherwise we hold an exclusive spin-lock and can only 246 * race thread consumers borrowing cache entries. 247 * 248 * Acquire the lock and dispose of the entry. We have to 249 * spin until we get the lock. 250 */ 251 for (;;) { 252 status = atomic_swap_int(&fdc->locked, 1); 253 if (status == 1) { /* foreign lock, retry */ 254 cpu_pause(); 255 continue; 256 } 257 fdn->tdcache[i] = NULL; 258 KASSERT(fp == fdc->fp, 259 ("fclearcache(2): fp mismatch %p/%p\n", 260 fp, fdc->fp)); 261 fdc->fp = NULL; 262 fdc->fd = -1; 263 if (status == 0) 264 atomic_add_int(&fp->f_count, -1); 265 fdn->isfull = 0; /* heuristic */ 266 atomic_swap_int(&fdc->locked, 0); 267 break; 268 } 269 } 270 KKASSERT(match_fdc == NULL); 271 } 272 273 /* 274 * Retrieve the fp for the specified fd given the specified file descriptor 275 * table. The fdp does not have to be owned by the current process. 276 * If flags != -1, fp->f_flag must contain at least one of the flags. 277 * 278 * This function is not able to cache the fp. 279 */ 280 struct file * 281 holdfp_fdp(struct filedesc *fdp, int fd, int flag) 282 { 283 struct file *fp; 284 285 spin_lock_shared(&fdp->fd_spin); 286 if (((u_int)fd) < fdp->fd_nfiles) { 287 fp = fdp->fd_files[fd].fp; /* can be NULL */ 288 if (fp) { 289 if ((fp->f_flag & flag) == 0 && flag != -1) { 290 fp = NULL; 291 } else { 292 fhold(fp); 293 } 294 } 295 } else { 296 fp = NULL; 297 } 298 spin_unlock_shared(&fdp->fd_spin); 299 300 return fp; 301 } 302 303 struct file * 304 holdfp_fdp_locked(struct filedesc *fdp, int fd, int flag) 305 { 306 struct file *fp; 307 308 if (((u_int)fd) < fdp->fd_nfiles) { 309 fp = fdp->fd_files[fd].fp; /* can be NULL */ 310 if (fp) { 311 if ((fp->f_flag & flag) == 0 && flag != -1) { 312 fp = NULL; 313 } else { 314 fhold(fp); 315 } 316 } 317 } else { 318 fp = NULL; 319 } 320 return fp; 321 } 322 323 /* 324 * Acquire the fp for the specified file descriptor, using the thread 325 * cache if possible and caching it if possible. 326 * 327 * td must be the curren thread. 328 */ 329 static 330 struct file * 331 _holdfp_cache(thread_t td, int fd) 332 { 333 struct filedesc *fdp; 334 struct fdcache *fdc; 335 struct fdcache *best; 336 struct fdnode *fdn; 337 struct file *fp; 338 int status; 339 int delta; 340 int i; 341 342 /* 343 * Fast 344 */ 345 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 346 if (fdc->fd != fd || fdc->fp == NULL) 347 continue; 348 status = atomic_swap_int(&fdc->locked, 1); 349 350 /* 351 * If someone else has locked our cache entry they are in 352 * the middle of clearing it, skip the entry. 353 */ 354 if (status == 1) 355 continue; 356 357 /* 358 * We have locked the entry, but if it no longer matches 359 * restore the previous state (0 or 2) and skip the entry. 360 */ 361 if (fdc->fd != fd || fdc->fp == NULL) { 362 atomic_swap_int(&fdc->locked, status); 363 continue; 364 } 365 366 /* 367 * We have locked a valid entry. We can borrow the ref 368 * for a mode 0 entry. We can get a valid fp for a mode 369 * 2 entry but not borrow the ref. 370 */ 371 if (status == 0) { 372 fp = fdc->fp; 373 fdc->lru = ++td->td_fdcache_lru; 374 atomic_swap_int(&fdc->locked, 2); 375 376 return fp; 377 } 378 if (status == 2) { 379 fp = fdc->fp; 380 fhold(fp); 381 fdc->lru = ++td->td_fdcache_lru; 382 atomic_swap_int(&fdc->locked, 2); 383 384 return fp; 385 } 386 KKASSERT(0); 387 } 388 389 /* 390 * Lookup the descriptor the slow way. This can contend against 391 * modifying operations in a multi-threaded environment and cause 392 * cache line ping ponging otherwise. 393 */ 394 fdp = td->td_proc->p_fd; 395 spin_lock_shared(&fdp->fd_spin); 396 397 if (((u_int)fd) < fdp->fd_nfiles) { 398 fp = fdp->fd_files[fd].fp; /* can be NULL */ 399 if (fp) { 400 fhold(fp); 401 if (fdp->fd_files[fd].isfull == 0) 402 goto enter; 403 } 404 } else { 405 fp = NULL; 406 } 407 spin_unlock_shared(&fdp->fd_spin); 408 409 return fp; 410 411 /* 412 * We found a valid fp and held it, fdp is still shared locked. 413 * Enter the fp into the per-thread cache. Find the oldest entry 414 * via lru, or an empty entry. 415 * 416 * Because fdp's spinlock is held (shared is fine), no other 417 * thread should be in the middle of clearing our selected entry. 418 */ 419 enter: 420 best = &td->td_fdcache[0]; 421 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 422 if (fdc->fp == NULL) { 423 best = fdc; 424 break; 425 } 426 delta = fdc->lru - best->lru; 427 if (delta < 0) 428 best = fdc; 429 } 430 431 /* 432 * Replace best 433 * 434 * Don't enter into the cache if we cannot get the lock. 435 */ 436 status = atomic_swap_int(&best->locked, 1); 437 if (status == 1) 438 goto done; 439 440 /* 441 * Clear the previous cache entry if present 442 */ 443 if (best->fp) { 444 KKASSERT(best->fd >= 0); 445 fclearcache(&fdp->fd_files[best->fd], best, status); 446 } 447 448 /* 449 * Create our new cache entry. This entry is 'safe' until we tie 450 * into the fdnode. If we cannot tie in, we will clear the entry. 451 */ 452 best->fd = fd; 453 best->fp = fp; 454 best->lru = ++td->td_fdcache_lru; 455 best->locked = 2; /* borrowed ref */ 456 457 fdn = &fdp->fd_files[fd]; 458 for (i = 0; i < NTDCACHEFD; ++i) { 459 if (fdn->tdcache[i] == NULL && 460 atomic_cmpset_ptr((void **)&fdn->tdcache[i], NULL, best)) { 461 goto done; 462 } 463 } 464 fdn->isfull = 1; /* no space */ 465 best->fd = -1; 466 best->fp = NULL; 467 best->locked = 0; 468 done: 469 spin_unlock_shared(&fdp->fd_spin); 470 471 return fp; 472 } 473 474 /* 475 * Drop the file pointer and return to the thread cache if possible. 476 * 477 * Caller must not hold fdp's spin lock. 478 * td must be the current thread. 479 */ 480 void 481 dropfp(thread_t td, int fd, struct file *fp) 482 { 483 struct filedesc *fdp; 484 struct fdcache *fdc; 485 int status; 486 487 fdp = td->td_proc->p_fd; 488 489 /* 490 * If our placeholder is still present we can re-cache the ref. 491 * 492 * Note that we can race an fclearcache(). 493 */ 494 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 495 if (fdc->fp != fp || fdc->fd != fd) 496 continue; 497 status = atomic_swap_int(&fdc->locked, 1); 498 switch(status) { 499 case 0: 500 /* 501 * Not in mode 2, fdrop fp without caching. 502 */ 503 atomic_swap_int(&fdc->locked, 0); 504 break; 505 case 1: 506 /* 507 * Not in mode 2, locked by someone else. 508 * fdrop fp without caching. 509 */ 510 break; 511 case 2: 512 /* 513 * Intact borrowed ref, return to mode 0 514 * indicating that we have returned the ref. 515 * 516 * Return the borrowed ref (2->1->0) 517 */ 518 if (fdc->fp == fp && fdc->fd == fd) { 519 atomic_swap_int(&fdc->locked, 0); 520 return; 521 } 522 atomic_swap_int(&fdc->locked, 2); 523 break; 524 } 525 } 526 527 /* 528 * Failed to re-cache, drop the fp without caching. 529 */ 530 fdrop(fp); 531 } 532 533 /* 534 * Clear all descriptors cached in the per-thread fd cache for 535 * the specified thread. 536 * 537 * Caller must not hold p_fd->spin. This function will temporarily 538 * obtain a shared spin lock. 539 */ 540 void 541 fexitcache(thread_t td) 542 { 543 struct filedesc *fdp; 544 struct fdcache *fdc; 545 int status; 546 int i; 547 548 if (td->td_proc == NULL) 549 return; 550 fdp = td->td_proc->p_fd; 551 if (fdp == NULL) 552 return; 553 554 /* 555 * A shared lock is sufficient as the caller controls td and we 556 * are only clearing td's cache. 557 */ 558 spin_lock_shared(&fdp->fd_spin); 559 for (i = 0; i < NFDCACHE; ++i) { 560 fdc = &td->td_fdcache[i]; 561 if (fdc->fp) { 562 status = atomic_swap_int(&fdc->locked, 1); 563 if (status == 1) { 564 cpu_pause(); 565 --i; 566 continue; 567 } 568 if (fdc->fp) { 569 KKASSERT(fdc->fd >= 0); 570 fclearcache(&fdp->fd_files[fdc->fd], fdc, 571 status); 572 } 573 atomic_swap_int(&fdc->locked, 0); 574 } 575 } 576 spin_unlock_shared(&fdp->fd_spin); 577 } 578 579 static __inline struct filelist_head * 580 fp2filelist(const struct file *fp) 581 { 582 u_int i; 583 584 i = (u_int)(uintptr_t)fp % NFILELIST_HEADS; 585 return &filelist_heads[i]; 586 } 587 588 static __inline 589 struct plimit * 590 readplimits(struct proc *p) 591 { 592 thread_t td = curthread; 593 struct plimit *limit; 594 595 limit = td->td_limit; 596 if (limit != p->p_limit) { 597 spin_lock_shared(&p->p_spin); 598 limit = p->p_limit; 599 atomic_add_int(&limit->p_refcnt, 1); 600 spin_unlock_shared(&p->p_spin); 601 if (td->td_limit) 602 plimit_free(td->td_limit); 603 td->td_limit = limit; 604 } 605 return limit; 606 } 607 608 /* 609 * System calls on descriptors. 610 */ 611 int 612 sys_getdtablesize(struct getdtablesize_args *uap) 613 { 614 struct proc *p = curproc; 615 struct plimit *limit = readplimits(p); 616 int dtsize; 617 618 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 619 dtsize = INT_MAX; 620 else 621 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 622 623 if (dtsize > maxfilesperproc) 624 dtsize = maxfilesperproc; 625 if (dtsize < minfilesperproc) 626 dtsize = minfilesperproc; 627 if (p->p_ucred->cr_uid && dtsize > maxfilesperuser) 628 dtsize = maxfilesperuser; 629 uap->sysmsg_result = dtsize; 630 return (0); 631 } 632 633 /* 634 * Duplicate a file descriptor to a particular value. 635 * 636 * note: keep in mind that a potential race condition exists when closing 637 * descriptors from a shared descriptor table (via rfork). 638 */ 639 int 640 sys_dup2(struct dup2_args *uap) 641 { 642 int error; 643 int fd = 0; 644 645 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd); 646 uap->sysmsg_fds[0] = fd; 647 648 return (error); 649 } 650 651 /* 652 * Duplicate a file descriptor. 653 */ 654 int 655 sys_dup(struct dup_args *uap) 656 { 657 int error; 658 int fd = 0; 659 660 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd); 661 uap->sysmsg_fds[0] = fd; 662 663 return (error); 664 } 665 666 /* 667 * MPALMOSTSAFE - acquires mplock for fp operations 668 */ 669 int 670 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) 671 { 672 struct thread *td = curthread; 673 struct proc *p = td->td_proc; 674 struct file *fp; 675 struct vnode *vp; 676 u_int newmin; 677 u_int oflags; 678 u_int nflags; 679 int closedcounter; 680 int tmp, error, flg = F_POSIX; 681 682 KKASSERT(p); 683 684 /* 685 * Operations on file descriptors that do not require a file pointer. 686 */ 687 switch (cmd) { 688 case F_GETFD: 689 error = fgetfdflags(p->p_fd, fd, &tmp); 690 if (error == 0) 691 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0; 692 return (error); 693 694 case F_SETFD: 695 if (dat->fc_cloexec & FD_CLOEXEC) 696 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 697 else 698 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 699 return (error); 700 case F_DUPFD: 701 newmin = dat->fc_fd; 702 error = kern_dup(DUP_VARIABLE | DUP_FCNTL, fd, newmin, 703 &dat->fc_fd); 704 return (error); 705 case F_DUPFD_CLOEXEC: 706 newmin = dat->fc_fd; 707 error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC | DUP_FCNTL, 708 fd, newmin, &dat->fc_fd); 709 return (error); 710 case F_DUP2FD: 711 newmin = dat->fc_fd; 712 error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd); 713 return (error); 714 case F_DUP2FD_CLOEXEC: 715 newmin = dat->fc_fd; 716 error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin, 717 &dat->fc_fd); 718 return (error); 719 default: 720 break; 721 } 722 723 /* 724 * Operations on file pointers 725 */ 726 closedcounter = p->p_fd->fd_closedcounter; 727 if ((fp = holdfp(td, fd, -1)) == NULL) 728 return (EBADF); 729 730 switch (cmd) { 731 case F_GETFL: 732 dat->fc_flags = OFLAGS(fp->f_flag); 733 error = 0; 734 break; 735 736 case F_SETFL: 737 oflags = fp->f_flag; 738 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS; 739 nflags |= oflags & ~FCNTLFLAGS; 740 741 error = 0; 742 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY)) 743 error = EINVAL; 744 if (error == 0 && ((nflags ^ oflags) & FASYNC)) { 745 tmp = nflags & FASYNC; 746 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, 747 cred, NULL); 748 } 749 750 /* 751 * If no error, must be atomically set. 752 */ 753 while (error == 0) { 754 oflags = fp->f_flag; 755 cpu_ccfence(); 756 nflags = (oflags & ~FCNTLFLAGS) | (nflags & FCNTLFLAGS); 757 if (atomic_cmpset_int(&fp->f_flag, oflags, nflags)) 758 break; 759 cpu_pause(); 760 } 761 break; 762 763 case F_GETOWN: 764 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, 765 cred, NULL); 766 break; 767 768 case F_SETOWN: 769 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, 770 cred, NULL); 771 break; 772 773 case F_SETLKW: 774 flg |= F_WAIT; 775 /* Fall into F_SETLK */ 776 777 case F_SETLK: 778 if (fp->f_type != DTYPE_VNODE) { 779 error = EBADF; 780 break; 781 } 782 vp = (struct vnode *)fp->f_data; 783 784 /* 785 * copyin/lockop may block 786 */ 787 if (dat->fc_flock.l_whence == SEEK_CUR) 788 dat->fc_flock.l_start += fp->f_offset; 789 790 switch (dat->fc_flock.l_type) { 791 case F_RDLCK: 792 if ((fp->f_flag & FREAD) == 0) { 793 error = EBADF; 794 break; 795 } 796 if (p->p_leader->p_advlock_flag == 0) 797 p->p_leader->p_advlock_flag = 1; 798 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 799 &dat->fc_flock, flg); 800 break; 801 case F_WRLCK: 802 if ((fp->f_flag & FWRITE) == 0) { 803 error = EBADF; 804 break; 805 } 806 if (p->p_leader->p_advlock_flag == 0) 807 p->p_leader->p_advlock_flag = 1; 808 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 809 &dat->fc_flock, flg); 810 break; 811 case F_UNLCK: 812 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 813 &dat->fc_flock, F_POSIX); 814 break; 815 default: 816 error = EINVAL; 817 break; 818 } 819 820 /* 821 * It is possible to race a close() on the descriptor while 822 * we were blocked getting the lock. If this occurs the 823 * close might not have caught the lock. 824 */ 825 if (checkfdclosed(td, p->p_fd, fd, fp, closedcounter)) { 826 dat->fc_flock.l_whence = SEEK_SET; 827 dat->fc_flock.l_start = 0; 828 dat->fc_flock.l_len = 0; 829 dat->fc_flock.l_type = F_UNLCK; 830 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 831 F_UNLCK, &dat->fc_flock, F_POSIX); 832 } 833 break; 834 835 case F_GETLK: 836 if (fp->f_type != DTYPE_VNODE) { 837 error = EBADF; 838 break; 839 } 840 vp = (struct vnode *)fp->f_data; 841 /* 842 * copyin/lockop may block 843 */ 844 if (dat->fc_flock.l_type != F_RDLCK && 845 dat->fc_flock.l_type != F_WRLCK && 846 dat->fc_flock.l_type != F_UNLCK) { 847 error = EINVAL; 848 break; 849 } 850 if (dat->fc_flock.l_whence == SEEK_CUR) 851 dat->fc_flock.l_start += fp->f_offset; 852 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 853 &dat->fc_flock, F_POSIX); 854 break; 855 default: 856 error = EINVAL; 857 break; 858 } 859 860 fdrop(fp); 861 return (error); 862 } 863 864 /* 865 * The file control system call. 866 */ 867 int 868 sys_fcntl(struct fcntl_args *uap) 869 { 870 union fcntl_dat dat; 871 int error; 872 873 switch (uap->cmd) { 874 case F_DUPFD: 875 case F_DUP2FD: 876 case F_DUPFD_CLOEXEC: 877 case F_DUP2FD_CLOEXEC: 878 dat.fc_fd = uap->arg; 879 break; 880 case F_SETFD: 881 dat.fc_cloexec = uap->arg; 882 break; 883 case F_SETFL: 884 dat.fc_flags = uap->arg; 885 break; 886 case F_SETOWN: 887 dat.fc_owner = uap->arg; 888 break; 889 case F_SETLKW: 890 case F_SETLK: 891 case F_GETLK: 892 error = copyin((caddr_t)uap->arg, &dat.fc_flock, 893 sizeof(struct flock)); 894 if (error) 895 return (error); 896 break; 897 } 898 899 error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred); 900 901 if (error == 0) { 902 switch (uap->cmd) { 903 case F_DUPFD: 904 case F_DUP2FD: 905 case F_DUPFD_CLOEXEC: 906 case F_DUP2FD_CLOEXEC: 907 uap->sysmsg_result = dat.fc_fd; 908 break; 909 case F_GETFD: 910 uap->sysmsg_result = dat.fc_cloexec; 911 break; 912 case F_GETFL: 913 uap->sysmsg_result = dat.fc_flags; 914 break; 915 case F_GETOWN: 916 uap->sysmsg_result = dat.fc_owner; 917 break; 918 case F_GETLK: 919 error = copyout(&dat.fc_flock, (caddr_t)uap->arg, 920 sizeof(struct flock)); 921 break; 922 } 923 } 924 925 return (error); 926 } 927 928 /* 929 * Common code for dup, dup2, and fcntl(F_DUPFD). 930 * 931 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and 932 * DUP_CLOEXEC. 933 * 934 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between 935 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX). 936 * The next two flags are mutually exclusive, and the fourth is optional. 937 * DUP_FIXED tells kern_dup() to destructively dup over an existing file 938 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup() 939 * to find the lowest unused file descriptor that is greater than or 940 * equal to "new". DUP_CLOEXEC, which works with either of the first 941 * two flags, sets the close-on-exec flag on the "new" file descriptor. 942 */ 943 int 944 kern_dup(int flags, int old, int new, int *res) 945 { 946 struct thread *td = curthread; 947 struct proc *p = td->td_proc; 948 struct plimit *limit = readplimits(p); 949 struct filedesc *fdp = p->p_fd; 950 struct file *fp; 951 struct file *delfp; 952 int oldflags; 953 int holdleaders; 954 int dtsize; 955 int error, newfd; 956 957 /* 958 * Verify that we have a valid descriptor to dup from and 959 * possibly to dup to. When the new descriptor is out of 960 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must 961 * return EINVAL, while dup2() returns EBADF in 962 * this case. 963 * 964 * NOTE: maxfilesperuser is not applicable to dup() 965 */ 966 retry: 967 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 968 dtsize = INT_MAX; 969 else 970 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 971 if (dtsize > maxfilesperproc) 972 dtsize = maxfilesperproc; 973 if (dtsize < minfilesperproc) 974 dtsize = minfilesperproc; 975 976 if (new < 0 || new > dtsize) 977 return (flags & DUP_FCNTL ? EINVAL : EBADF); 978 979 spin_lock(&fdp->fd_spin); 980 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) { 981 spin_unlock(&fdp->fd_spin); 982 return (EBADF); 983 } 984 if ((flags & DUP_FIXED) && old == new) { 985 *res = new; 986 if (flags & DUP_CLOEXEC) 987 fdp->fd_files[new].fileflags |= UF_EXCLOSE; 988 spin_unlock(&fdp->fd_spin); 989 return (0); 990 } 991 fp = fdp->fd_files[old].fp; 992 oldflags = fdp->fd_files[old].fileflags; 993 fhold(fp); 994 995 /* 996 * Allocate a new descriptor if DUP_VARIABLE, or expand the table 997 * if the requested descriptor is beyond the current table size. 998 * 999 * This can block. Retry if the source descriptor no longer matches 1000 * or if our expectation in the expansion case races. 1001 * 1002 * If we are not expanding or allocating a new decriptor, then reset 1003 * the target descriptor to a reserved state so we have a uniform 1004 * setup for the next code block. 1005 */ 1006 if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) { 1007 spin_unlock(&fdp->fd_spin); 1008 error = fdalloc(p, new, &newfd); 1009 spin_lock(&fdp->fd_spin); 1010 if (error) { 1011 spin_unlock(&fdp->fd_spin); 1012 fdrop(fp); 1013 return (error); 1014 } 1015 /* 1016 * Check for ripout 1017 */ 1018 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) { 1019 fsetfd_locked(fdp, NULL, newfd); 1020 spin_unlock(&fdp->fd_spin); 1021 fdrop(fp); 1022 goto retry; 1023 } 1024 /* 1025 * Check for expansion race 1026 */ 1027 if ((flags & DUP_VARIABLE) == 0 && new != newfd) { 1028 fsetfd_locked(fdp, NULL, newfd); 1029 spin_unlock(&fdp->fd_spin); 1030 fdrop(fp); 1031 goto retry; 1032 } 1033 /* 1034 * Check for ripout, newfd reused old (this case probably 1035 * can't occur). 1036 */ 1037 if (old == newfd) { 1038 fsetfd_locked(fdp, NULL, newfd); 1039 spin_unlock(&fdp->fd_spin); 1040 fdrop(fp); 1041 goto retry; 1042 } 1043 new = newfd; 1044 delfp = NULL; 1045 } else { 1046 if (fdp->fd_files[new].reserved) { 1047 spin_unlock(&fdp->fd_spin); 1048 fdrop(fp); 1049 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new); 1050 tsleep(fdp, 0, "fdres", hz); 1051 goto retry; 1052 } 1053 1054 /* 1055 * If the target descriptor was never allocated we have 1056 * to allocate it. If it was we have to clean out the 1057 * old descriptor. delfp inherits the ref from the 1058 * descriptor table. 1059 */ 1060 ++fdp->fd_closedcounter; 1061 fclearcache(&fdp->fd_files[new], NULL, 0); 1062 ++fdp->fd_closedcounter; 1063 delfp = fdp->fd_files[new].fp; 1064 fdp->fd_files[new].fp = NULL; 1065 fdp->fd_files[new].reserved = 1; 1066 if (delfp == NULL) { 1067 fdreserve_locked(fdp, new, 1); 1068 if (new > fdp->fd_lastfile) 1069 fdp->fd_lastfile = new; 1070 } 1071 1072 } 1073 1074 /* 1075 * NOTE: still holding an exclusive spinlock 1076 */ 1077 1078 /* 1079 * If a descriptor is being overwritten we may hve to tell 1080 * fdfree() to sleep to ensure that all relevant process 1081 * leaders can be traversed in closef(). 1082 */ 1083 if (delfp != NULL && p->p_fdtol != NULL) { 1084 fdp->fd_holdleaderscount++; 1085 holdleaders = 1; 1086 } else { 1087 holdleaders = 0; 1088 } 1089 KASSERT(delfp == NULL || (flags & DUP_FIXED), 1090 ("dup() picked an open file")); 1091 1092 /* 1093 * Duplicate the source descriptor, update lastfile. If the new 1094 * descriptor was not allocated and we aren't replacing an existing 1095 * descriptor we have to mark the descriptor as being in use. 1096 * 1097 * The fd_files[] array inherits fp's hold reference. 1098 */ 1099 fsetfd_locked(fdp, fp, new); 1100 if ((flags & DUP_CLOEXEC) != 0) 1101 fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE; 1102 else 1103 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE; 1104 spin_unlock(&fdp->fd_spin); 1105 fdrop(fp); 1106 *res = new; 1107 1108 /* 1109 * If we dup'd over a valid file, we now own the reference to it 1110 * and must dispose of it using closef() semantics (as if a 1111 * close() were performed on it). 1112 */ 1113 if (delfp) { 1114 if (SLIST_FIRST(&delfp->f_klist)) 1115 knote_fdclose(delfp, fdp, new); 1116 closef(delfp, p); 1117 if (holdleaders) { 1118 spin_lock(&fdp->fd_spin); 1119 fdp->fd_holdleaderscount--; 1120 if (fdp->fd_holdleaderscount == 0 && 1121 fdp->fd_holdleaderswakeup != 0) { 1122 fdp->fd_holdleaderswakeup = 0; 1123 spin_unlock(&fdp->fd_spin); 1124 wakeup(&fdp->fd_holdleaderscount); 1125 } else { 1126 spin_unlock(&fdp->fd_spin); 1127 } 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 /* 1134 * If sigio is on the list associated with a process or process group, 1135 * disable signalling from the device, remove sigio from the list and 1136 * free sigio. 1137 */ 1138 void 1139 funsetown(struct sigio **sigiop) 1140 { 1141 struct pgrp *pgrp; 1142 struct proc *p; 1143 struct sigio *sigio; 1144 1145 if ((sigio = *sigiop) != NULL) { 1146 lwkt_gettoken(&sigio_token); /* protect sigio */ 1147 KKASSERT(sigiop == sigio->sio_myref); 1148 sigio = *sigiop; 1149 *sigiop = NULL; 1150 lwkt_reltoken(&sigio_token); 1151 } 1152 if (sigio == NULL) 1153 return; 1154 1155 if (sigio->sio_pgid < 0) { 1156 pgrp = sigio->sio_pgrp; 1157 sigio->sio_pgrp = NULL; 1158 lwkt_gettoken(&pgrp->pg_token); 1159 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio); 1160 lwkt_reltoken(&pgrp->pg_token); 1161 pgrel(pgrp); 1162 } else /* if ((*sigiop)->sio_pgid > 0) */ { 1163 p = sigio->sio_proc; 1164 sigio->sio_proc = NULL; 1165 PHOLD(p); 1166 lwkt_gettoken(&p->p_token); 1167 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio); 1168 lwkt_reltoken(&p->p_token); 1169 PRELE(p); 1170 } 1171 crfree(sigio->sio_ucred); 1172 sigio->sio_ucred = NULL; 1173 kfree(sigio, M_SIGIO); 1174 } 1175 1176 /* 1177 * Free a list of sigio structures. Caller is responsible for ensuring 1178 * that the list is MPSAFE. 1179 */ 1180 void 1181 funsetownlst(struct sigiolst *sigiolst) 1182 { 1183 struct sigio *sigio; 1184 1185 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 1186 funsetown(sigio->sio_myref); 1187 } 1188 1189 /* 1190 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 1191 * 1192 * After permission checking, add a sigio structure to the sigio list for 1193 * the process or process group. 1194 */ 1195 int 1196 fsetown(pid_t pgid, struct sigio **sigiop) 1197 { 1198 struct proc *proc = NULL; 1199 struct pgrp *pgrp = NULL; 1200 struct sigio *sigio; 1201 int error; 1202 1203 if (pgid == 0) { 1204 funsetown(sigiop); 1205 return (0); 1206 } 1207 1208 if (pgid > 0) { 1209 proc = pfind(pgid); 1210 if (proc == NULL) { 1211 error = ESRCH; 1212 goto done; 1213 } 1214 1215 /* 1216 * Policy - Don't allow a process to FSETOWN a process 1217 * in another session. 1218 * 1219 * Remove this test to allow maximum flexibility or 1220 * restrict FSETOWN to the current process or process 1221 * group for maximum safety. 1222 */ 1223 if (proc->p_session != curproc->p_session) { 1224 error = EPERM; 1225 goto done; 1226 } 1227 } else /* if (pgid < 0) */ { 1228 pgrp = pgfind(-pgid); 1229 if (pgrp == NULL) { 1230 error = ESRCH; 1231 goto done; 1232 } 1233 1234 /* 1235 * Policy - Don't allow a process to FSETOWN a process 1236 * in another session. 1237 * 1238 * Remove this test to allow maximum flexibility or 1239 * restrict FSETOWN to the current process or process 1240 * group for maximum safety. 1241 */ 1242 if (pgrp->pg_session != curproc->p_session) { 1243 error = EPERM; 1244 goto done; 1245 } 1246 } 1247 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO); 1248 if (pgid > 0) { 1249 KKASSERT(pgrp == NULL); 1250 lwkt_gettoken(&proc->p_token); 1251 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 1252 sigio->sio_proc = proc; 1253 lwkt_reltoken(&proc->p_token); 1254 } else { 1255 KKASSERT(proc == NULL); 1256 lwkt_gettoken(&pgrp->pg_token); 1257 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 1258 sigio->sio_pgrp = pgrp; 1259 lwkt_reltoken(&pgrp->pg_token); 1260 pgrp = NULL; 1261 } 1262 sigio->sio_pgid = pgid; 1263 sigio->sio_ucred = crhold(curthread->td_ucred); 1264 /* It would be convenient if p_ruid was in ucred. */ 1265 sigio->sio_ruid = sigio->sio_ucred->cr_ruid; 1266 sigio->sio_myref = sigiop; 1267 1268 lwkt_gettoken(&sigio_token); 1269 while (*sigiop) 1270 funsetown(sigiop); 1271 *sigiop = sigio; 1272 lwkt_reltoken(&sigio_token); 1273 error = 0; 1274 done: 1275 if (pgrp) 1276 pgrel(pgrp); 1277 if (proc) 1278 PRELE(proc); 1279 return (error); 1280 } 1281 1282 /* 1283 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 1284 */ 1285 pid_t 1286 fgetown(struct sigio **sigiop) 1287 { 1288 struct sigio *sigio; 1289 pid_t own; 1290 1291 lwkt_gettoken_shared(&sigio_token); 1292 sigio = *sigiop; 1293 own = (sigio != NULL ? sigio->sio_pgid : 0); 1294 lwkt_reltoken(&sigio_token); 1295 1296 return (own); 1297 } 1298 1299 /* 1300 * Close many file descriptors. 1301 */ 1302 int 1303 sys_closefrom(struct closefrom_args *uap) 1304 { 1305 return(kern_closefrom(uap->fd)); 1306 } 1307 1308 /* 1309 * Close all file descriptors greater then or equal to fd 1310 */ 1311 int 1312 kern_closefrom(int fd) 1313 { 1314 struct thread *td = curthread; 1315 struct proc *p = td->td_proc; 1316 struct filedesc *fdp; 1317 1318 KKASSERT(p); 1319 fdp = p->p_fd; 1320 1321 if (fd < 0) 1322 return (EINVAL); 1323 1324 /* 1325 * NOTE: This function will skip unassociated descriptors and 1326 * reserved descriptors that have not yet been assigned. 1327 * fd_lastfile can change as a side effect of kern_close(). 1328 */ 1329 spin_lock(&fdp->fd_spin); 1330 while (fd <= fdp->fd_lastfile) { 1331 if (fdp->fd_files[fd].fp != NULL) { 1332 spin_unlock(&fdp->fd_spin); 1333 /* ok if this races another close */ 1334 if (kern_close(fd) == EINTR) 1335 return (EINTR); 1336 spin_lock(&fdp->fd_spin); 1337 } 1338 ++fd; 1339 } 1340 spin_unlock(&fdp->fd_spin); 1341 return (0); 1342 } 1343 1344 /* 1345 * Close a file descriptor. 1346 */ 1347 int 1348 sys_close(struct close_args *uap) 1349 { 1350 return(kern_close(uap->fd)); 1351 } 1352 1353 /* 1354 * close() helper 1355 */ 1356 int 1357 kern_close(int fd) 1358 { 1359 struct thread *td = curthread; 1360 struct proc *p = td->td_proc; 1361 struct filedesc *fdp; 1362 struct file *fp; 1363 int error; 1364 int holdleaders; 1365 1366 KKASSERT(p); 1367 fdp = p->p_fd; 1368 1369 /* 1370 * funsetfd*() also clears the fd cache 1371 */ 1372 spin_lock(&fdp->fd_spin); 1373 if ((fp = funsetfd_locked(fdp, fd)) == NULL) { 1374 spin_unlock(&fdp->fd_spin); 1375 return (EBADF); 1376 } 1377 holdleaders = 0; 1378 if (p->p_fdtol != NULL) { 1379 /* 1380 * Ask fdfree() to sleep to ensure that all relevant 1381 * process leaders can be traversed in closef(). 1382 */ 1383 fdp->fd_holdleaderscount++; 1384 holdleaders = 1; 1385 } 1386 1387 /* 1388 * we now hold the fp reference that used to be owned by the descriptor 1389 * array. 1390 */ 1391 spin_unlock(&fdp->fd_spin); 1392 if (SLIST_FIRST(&fp->f_klist)) 1393 knote_fdclose(fp, fdp, fd); 1394 error = closef(fp, p); 1395 if (holdleaders) { 1396 spin_lock(&fdp->fd_spin); 1397 fdp->fd_holdleaderscount--; 1398 if (fdp->fd_holdleaderscount == 0 && 1399 fdp->fd_holdleaderswakeup != 0) { 1400 fdp->fd_holdleaderswakeup = 0; 1401 spin_unlock(&fdp->fd_spin); 1402 wakeup(&fdp->fd_holdleaderscount); 1403 } else { 1404 spin_unlock(&fdp->fd_spin); 1405 } 1406 } 1407 return (error); 1408 } 1409 1410 /* 1411 * shutdown_args(int fd, int how) 1412 */ 1413 int 1414 kern_shutdown(int fd, int how) 1415 { 1416 struct thread *td = curthread; 1417 struct file *fp; 1418 int error; 1419 1420 if ((fp = holdfp(td, fd, -1)) == NULL) 1421 return (EBADF); 1422 error = fo_shutdown(fp, how); 1423 fdrop(fp); 1424 1425 return (error); 1426 } 1427 1428 /* 1429 * MPALMOSTSAFE 1430 */ 1431 int 1432 sys_shutdown(struct shutdown_args *uap) 1433 { 1434 int error; 1435 1436 error = kern_shutdown(uap->s, uap->how); 1437 1438 return (error); 1439 } 1440 1441 /* 1442 * fstat() helper 1443 */ 1444 int 1445 kern_fstat(int fd, struct stat *ub) 1446 { 1447 struct thread *td = curthread; 1448 struct file *fp; 1449 int error; 1450 1451 if ((fp = holdfp(td, fd, -1)) == NULL) 1452 return (EBADF); 1453 error = fo_stat(fp, ub, td->td_ucred); 1454 fdrop(fp); 1455 1456 return (error); 1457 } 1458 1459 /* 1460 * Return status information about a file descriptor. 1461 */ 1462 int 1463 sys_fstat(struct fstat_args *uap) 1464 { 1465 struct stat st; 1466 int error; 1467 1468 error = kern_fstat(uap->fd, &st); 1469 1470 if (error == 0) 1471 error = copyout(&st, uap->sb, sizeof(st)); 1472 return (error); 1473 } 1474 1475 /* 1476 * Return pathconf information about a file descriptor. 1477 * 1478 * MPALMOSTSAFE 1479 */ 1480 int 1481 sys_fpathconf(struct fpathconf_args *uap) 1482 { 1483 struct thread *td = curthread; 1484 struct file *fp; 1485 struct vnode *vp; 1486 int error = 0; 1487 1488 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 1489 return (EBADF); 1490 1491 switch (fp->f_type) { 1492 case DTYPE_PIPE: 1493 case DTYPE_SOCKET: 1494 if (uap->name != _PC_PIPE_BUF) { 1495 error = EINVAL; 1496 } else { 1497 uap->sysmsg_result = PIPE_BUF; 1498 error = 0; 1499 } 1500 break; 1501 case DTYPE_FIFO: 1502 case DTYPE_VNODE: 1503 vp = (struct vnode *)fp->f_data; 1504 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg); 1505 break; 1506 default: 1507 error = EOPNOTSUPP; 1508 break; 1509 } 1510 fdrop(fp); 1511 return(error); 1512 } 1513 1514 /* 1515 * Grow the file table so it can hold through descriptor (want). 1516 * 1517 * The fdp's spinlock must be held exclusively on entry and may be held 1518 * exclusively on return. The spinlock may be cycled by the routine. 1519 */ 1520 static void 1521 fdgrow_locked(struct filedesc *fdp, int want) 1522 { 1523 struct fdnode *newfiles; 1524 struct fdnode *oldfiles; 1525 int nf, extra; 1526 1527 nf = fdp->fd_nfiles; 1528 do { 1529 /* nf has to be of the form 2^n - 1 */ 1530 nf = 2 * nf + 1; 1531 } while (nf <= want); 1532 1533 spin_unlock(&fdp->fd_spin); 1534 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK); 1535 spin_lock(&fdp->fd_spin); 1536 1537 /* 1538 * We could have raced another extend while we were not holding 1539 * the spinlock. 1540 */ 1541 if (fdp->fd_nfiles >= nf) { 1542 spin_unlock(&fdp->fd_spin); 1543 kfree(newfiles, M_FILEDESC); 1544 spin_lock(&fdp->fd_spin); 1545 return; 1546 } 1547 /* 1548 * Copy the existing ofile and ofileflags arrays 1549 * and zero the new portion of each array. 1550 */ 1551 extra = nf - fdp->fd_nfiles; 1552 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode)); 1553 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode)); 1554 1555 oldfiles = fdp->fd_files; 1556 fdp->fd_files = newfiles; 1557 fdp->fd_nfiles = nf; 1558 1559 if (oldfiles != fdp->fd_builtin_files) { 1560 spin_unlock(&fdp->fd_spin); 1561 kfree(oldfiles, M_FILEDESC); 1562 spin_lock(&fdp->fd_spin); 1563 } 1564 } 1565 1566 /* 1567 * Number of nodes in right subtree, including the root. 1568 */ 1569 static __inline int 1570 right_subtree_size(int n) 1571 { 1572 return (n ^ (n | (n + 1))); 1573 } 1574 1575 /* 1576 * Bigger ancestor. 1577 */ 1578 static __inline int 1579 right_ancestor(int n) 1580 { 1581 return (n | (n + 1)); 1582 } 1583 1584 /* 1585 * Smaller ancestor. 1586 */ 1587 static __inline int 1588 left_ancestor(int n) 1589 { 1590 return ((n & (n + 1)) - 1); 1591 } 1592 1593 /* 1594 * Traverse the in-place binary tree buttom-up adjusting the allocation 1595 * count so scans can determine where free descriptors are located. 1596 * 1597 * caller must be holding an exclusive spinlock on fdp 1598 */ 1599 static 1600 void 1601 fdreserve_locked(struct filedesc *fdp, int fd, int incr) 1602 { 1603 while (fd >= 0) { 1604 fdp->fd_files[fd].allocated += incr; 1605 KKASSERT(fdp->fd_files[fd].allocated >= 0); 1606 fd = left_ancestor(fd); 1607 } 1608 } 1609 1610 /* 1611 * Reserve a file descriptor for the process. If no error occurs, the 1612 * caller MUST at some point call fsetfd() or assign a file pointer 1613 * or dispose of the reservation. 1614 */ 1615 int 1616 fdalloc(struct proc *p, int want, int *result) 1617 { 1618 struct plimit *limit = readplimits(p); 1619 struct filedesc *fdp = p->p_fd; 1620 struct uidinfo *uip; 1621 int fd, rsize, rsum, node, lim; 1622 1623 /* 1624 * Check dtable size limit 1625 */ 1626 *result = -1; /* avoid gcc warnings */ 1627 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1628 lim = INT_MAX; 1629 else 1630 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1631 1632 if (lim > maxfilesperproc) 1633 lim = maxfilesperproc; 1634 if (lim < minfilesperproc) 1635 lim = minfilesperproc; 1636 if (want >= lim) 1637 return (EMFILE); 1638 1639 /* 1640 * Check that the user has not run out of descriptors (non-root only). 1641 * As a safety measure the dtable is allowed to have at least 1642 * minfilesperproc open fds regardless of the maxfilesperuser limit. 1643 * 1644 * This isn't as loose a spec as ui_posixlocks, so we use atomic 1645 * ops to force synchronize and recheck if we would otherwise 1646 * error. 1647 */ 1648 if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) { 1649 uip = p->p_ucred->cr_uidinfo; 1650 if (uip->ui_openfiles > maxfilesperuser) { 1651 int n; 1652 int count; 1653 1654 count = 0; 1655 for (n = 0; n < ncpus; ++n) { 1656 count += atomic_swap_int( 1657 &uip->ui_pcpu[n].pu_openfiles, 0); 1658 } 1659 atomic_add_int(&uip->ui_openfiles, count); 1660 if (uip->ui_openfiles > maxfilesperuser) { 1661 krateprintf(&krate_uidinfo, 1662 "Warning: user %d pid %d (%s) " 1663 "ran out of file descriptors " 1664 "(%d/%d)\n", 1665 p->p_ucred->cr_uid, (int)p->p_pid, 1666 p->p_comm, 1667 uip->ui_openfiles, maxfilesperuser); 1668 return(ENFILE); 1669 } 1670 } 1671 } 1672 1673 /* 1674 * Grow the dtable if necessary 1675 */ 1676 spin_lock(&fdp->fd_spin); 1677 if (want >= fdp->fd_nfiles) 1678 fdgrow_locked(fdp, want); 1679 1680 /* 1681 * Search for a free descriptor starting at the higher 1682 * of want or fd_freefile. If that fails, consider 1683 * expanding the ofile array. 1684 * 1685 * NOTE! the 'allocated' field is a cumulative recursive allocation 1686 * count. If we happen to see a value of 0 then we can shortcut 1687 * our search. Otherwise we run through through the tree going 1688 * down branches we know have free descriptor(s) until we hit a 1689 * leaf node. The leaf node will be free but will not necessarily 1690 * have an allocated field of 0. 1691 */ 1692 retry: 1693 /* move up the tree looking for a subtree with a free node */ 1694 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim); 1695 fd = right_ancestor(fd)) { 1696 if (fdp->fd_files[fd].allocated == 0) 1697 goto found; 1698 1699 rsize = right_subtree_size(fd); 1700 if (fdp->fd_files[fd].allocated == rsize) 1701 continue; /* right subtree full */ 1702 1703 /* 1704 * Free fd is in the right subtree of the tree rooted at fd. 1705 * Call that subtree R. Look for the smallest (leftmost) 1706 * subtree of R with an unallocated fd: continue moving 1707 * down the left branch until encountering a full left 1708 * subtree, then move to the right. 1709 */ 1710 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 1711 node = fd + rsize; 1712 rsum += fdp->fd_files[node].allocated; 1713 if (fdp->fd_files[fd].allocated == rsum + rsize) { 1714 fd = node; /* move to the right */ 1715 if (fdp->fd_files[node].allocated == 0) 1716 goto found; 1717 rsum = 0; 1718 } 1719 } 1720 goto found; 1721 } 1722 1723 /* 1724 * No space in current array. Expand? 1725 */ 1726 if (fdp->fd_nfiles >= lim) { 1727 spin_unlock(&fdp->fd_spin); 1728 return (EMFILE); 1729 } 1730 fdgrow_locked(fdp, want); 1731 goto retry; 1732 1733 found: 1734 KKASSERT(fd < fdp->fd_nfiles); 1735 if (fd > fdp->fd_lastfile) 1736 fdp->fd_lastfile = fd; 1737 if (want <= fdp->fd_freefile) 1738 fdp->fd_freefile = fd; 1739 *result = fd; 1740 KKASSERT(fdp->fd_files[fd].fp == NULL); 1741 KKASSERT(fdp->fd_files[fd].reserved == 0); 1742 fdp->fd_files[fd].fileflags = 0; 1743 fdp->fd_files[fd].reserved = 1; 1744 fdreserve_locked(fdp, fd, 1); 1745 spin_unlock(&fdp->fd_spin); 1746 return (0); 1747 } 1748 1749 /* 1750 * Check to see whether n user file descriptors 1751 * are available to the process p. 1752 */ 1753 int 1754 fdavail(struct proc *p, int n) 1755 { 1756 struct plimit *limit = readplimits(p); 1757 struct filedesc *fdp = p->p_fd; 1758 struct fdnode *fdnode; 1759 int i, lim, last; 1760 1761 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1762 lim = INT_MAX; 1763 else 1764 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1765 1766 if (lim > maxfilesperproc) 1767 lim = maxfilesperproc; 1768 if (lim < minfilesperproc) 1769 lim = minfilesperproc; 1770 1771 spin_lock(&fdp->fd_spin); 1772 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { 1773 spin_unlock(&fdp->fd_spin); 1774 return (1); 1775 } 1776 last = min(fdp->fd_nfiles, lim); 1777 fdnode = &fdp->fd_files[fdp->fd_freefile]; 1778 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) { 1779 if (fdnode->fp == NULL && --n <= 0) { 1780 spin_unlock(&fdp->fd_spin); 1781 return (1); 1782 } 1783 } 1784 spin_unlock(&fdp->fd_spin); 1785 return (0); 1786 } 1787 1788 /* 1789 * Revoke open descriptors referencing (f_data, f_type) 1790 * 1791 * Any revoke executed within a prison is only able to 1792 * revoke descriptors for processes within that prison. 1793 * 1794 * Returns 0 on success or an error code. 1795 */ 1796 struct fdrevoke_info { 1797 void *data; 1798 short type; 1799 short unused; 1800 int found; 1801 struct ucred *cred; 1802 struct file *nfp; 1803 }; 1804 1805 static int fdrevoke_check_callback(struct file *fp, void *vinfo); 1806 static int fdrevoke_proc_callback(struct proc *p, void *vinfo); 1807 1808 int 1809 fdrevoke(void *f_data, short f_type, struct ucred *cred) 1810 { 1811 struct fdrevoke_info info; 1812 int error; 1813 1814 bzero(&info, sizeof(info)); 1815 info.data = f_data; 1816 info.type = f_type; 1817 info.cred = cred; 1818 error = falloc(NULL, &info.nfp, NULL); 1819 if (error) 1820 return (error); 1821 1822 /* 1823 * Scan the file pointer table once. dups do not dup file pointers, 1824 * only descriptors, so there is no leak. Set FREVOKED on the fps 1825 * being revoked. 1826 * 1827 * Any fps sent over unix-domain sockets will be revoked by the 1828 * socket code checking for FREVOKED when the fps are externialized. 1829 * revoke_token is used to make sure that fps marked FREVOKED and 1830 * externalized will be picked up by the following allproc_scan(). 1831 */ 1832 lwkt_gettoken(&revoke_token); 1833 allfiles_scan_exclusive(fdrevoke_check_callback, &info); 1834 lwkt_reltoken(&revoke_token); 1835 1836 /* 1837 * If any fps were marked track down the related descriptors 1838 * and close them. Any dup()s at this point will notice 1839 * the FREVOKED already set in the fp and do the right thing. 1840 */ 1841 if (info.found) 1842 allproc_scan(fdrevoke_proc_callback, &info, 0); 1843 fdrop(info.nfp); 1844 return(0); 1845 } 1846 1847 /* 1848 * Locate matching file pointers directly. 1849 * 1850 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls! 1851 */ 1852 static int 1853 fdrevoke_check_callback(struct file *fp, void *vinfo) 1854 { 1855 struct fdrevoke_info *info = vinfo; 1856 1857 /* 1858 * File pointers already flagged for revokation are skipped. 1859 */ 1860 if (fp->f_flag & FREVOKED) 1861 return(0); 1862 1863 /* 1864 * If revoking from a prison file pointers created outside of 1865 * that prison, or file pointers without creds, cannot be revoked. 1866 */ 1867 if (info->cred->cr_prison && 1868 (fp->f_cred == NULL || 1869 info->cred->cr_prison != fp->f_cred->cr_prison)) { 1870 return(0); 1871 } 1872 1873 /* 1874 * If the file pointer matches then mark it for revocation. The 1875 * flag is currently only used by unp_revoke_gc(). 1876 * 1877 * info->found is a heuristic and can race in a SMP environment. 1878 */ 1879 if (info->data == fp->f_data && info->type == fp->f_type) { 1880 atomic_set_int(&fp->f_flag, FREVOKED); 1881 info->found = 1; 1882 } 1883 return(0); 1884 } 1885 1886 /* 1887 * Locate matching file pointers via process descriptor tables. 1888 */ 1889 static int 1890 fdrevoke_proc_callback(struct proc *p, void *vinfo) 1891 { 1892 struct fdrevoke_info *info = vinfo; 1893 struct filedesc *fdp; 1894 struct file *fp; 1895 int n; 1896 1897 if (p->p_stat == SIDL || p->p_stat == SZOMB) 1898 return(0); 1899 if (info->cred->cr_prison && 1900 info->cred->cr_prison != p->p_ucred->cr_prison) { 1901 return(0); 1902 } 1903 1904 /* 1905 * If the controlling terminal of the process matches the 1906 * vnode being revoked we clear the controlling terminal. 1907 * 1908 * The normal spec_close() may not catch this because it 1909 * uses curproc instead of p. 1910 */ 1911 if (p->p_session && info->type == DTYPE_VNODE && 1912 info->data == p->p_session->s_ttyvp) { 1913 p->p_session->s_ttyvp = NULL; 1914 vrele(info->data); 1915 } 1916 1917 /* 1918 * Softref the fdp to prevent it from being destroyed 1919 */ 1920 spin_lock(&p->p_spin); 1921 if ((fdp = p->p_fd) == NULL) { 1922 spin_unlock(&p->p_spin); 1923 return(0); 1924 } 1925 atomic_add_int(&fdp->fd_softrefs, 1); 1926 spin_unlock(&p->p_spin); 1927 1928 /* 1929 * Locate and close any matching file descriptors, replacing 1930 * them with info->nfp. 1931 */ 1932 spin_lock(&fdp->fd_spin); 1933 for (n = 0; n < fdp->fd_nfiles; ++n) { 1934 if ((fp = fdp->fd_files[n].fp) == NULL) 1935 continue; 1936 if (fp->f_flag & FREVOKED) { 1937 ++fdp->fd_closedcounter; 1938 fclearcache(&fdp->fd_files[n], NULL, 0); 1939 ++fdp->fd_closedcounter; 1940 fhold(info->nfp); 1941 fdp->fd_files[n].fp = info->nfp; 1942 spin_unlock(&fdp->fd_spin); 1943 knote_fdclose(fp, fdp, n); /* XXX */ 1944 closef(fp, p); 1945 spin_lock(&fdp->fd_spin); 1946 } 1947 } 1948 spin_unlock(&fdp->fd_spin); 1949 atomic_subtract_int(&fdp->fd_softrefs, 1); 1950 return(0); 1951 } 1952 1953 /* 1954 * falloc: 1955 * Create a new open file structure and reserve a file decriptor 1956 * for the process that refers to it. 1957 * 1958 * Root creds are checked using lp, or assumed if lp is NULL. If 1959 * resultfd is non-NULL then lp must also be non-NULL. No file 1960 * descriptor is reserved (and no process context is needed) if 1961 * resultfd is NULL. 1962 * 1963 * A file pointer with a refcount of 1 is returned. Note that the 1964 * file pointer is NOT associated with the descriptor. If falloc 1965 * returns success, fsetfd() MUST be called to either associate the 1966 * file pointer or clear the reservation. 1967 */ 1968 int 1969 falloc(struct lwp *lp, struct file **resultfp, int *resultfd) 1970 { 1971 static struct timeval lastfail; 1972 static int curfail; 1973 struct filelist_head *head; 1974 struct file *fp; 1975 struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred; 1976 int error; 1977 1978 fp = NULL; 1979 1980 /* 1981 * Handle filetable full issues and root overfill. 1982 */ 1983 if (nfiles >= maxfiles - maxfilesrootres && 1984 (cred->cr_ruid != 0 || nfiles >= maxfiles)) { 1985 if (ppsratecheck(&lastfail, &curfail, 1)) { 1986 kprintf("kern.maxfiles limit exceeded by uid %d, " 1987 "please see tuning(7).\n", 1988 cred->cr_ruid); 1989 } 1990 error = ENFILE; 1991 goto done; 1992 } 1993 1994 /* 1995 * Allocate a new file descriptor. 1996 */ 1997 fp = objcache_get(file_objcache, M_WAITOK); 1998 bzero(fp, sizeof(*fp)); 1999 spin_init(&fp->f_spin, "falloc"); 2000 SLIST_INIT(&fp->f_klist); 2001 fp->f_count = 1; 2002 fp->f_ops = &badfileops; 2003 fp->f_seqcount = 1; 2004 fsetcred(fp, cred); 2005 atomic_add_int(&nfiles, 1); 2006 2007 head = fp2filelist(fp); 2008 spin_lock(&head->spin); 2009 LIST_INSERT_HEAD(&head->list, fp, f_list); 2010 spin_unlock(&head->spin); 2011 2012 if (resultfd) { 2013 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) { 2014 fdrop(fp); 2015 fp = NULL; 2016 } 2017 } else { 2018 error = 0; 2019 } 2020 done: 2021 *resultfp = fp; 2022 return (error); 2023 } 2024 2025 /* 2026 * Check for races against a file descriptor by determining that the 2027 * file pointer is still associated with the specified file descriptor, 2028 * and a close is not currently in progress. 2029 */ 2030 int 2031 checkfdclosed(thread_t td, struct filedesc *fdp, int fd, struct file *fp, 2032 int closedcounter) 2033 { 2034 struct fdcache *fdc; 2035 int error; 2036 2037 cpu_lfence(); 2038 if (fdp->fd_closedcounter == closedcounter) 2039 return 0; 2040 2041 if (td->td_proc && td->td_proc->p_fd == fdp) { 2042 for (fdc = &td->td_fdcache[0]; 2043 fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 2044 if (fdc->fd == fd && fdc->fp == fp) 2045 return 0; 2046 } 2047 } 2048 2049 spin_lock_shared(&fdp->fd_spin); 2050 if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp) 2051 error = EBADF; 2052 else 2053 error = 0; 2054 spin_unlock_shared(&fdp->fd_spin); 2055 return (error); 2056 } 2057 2058 /* 2059 * Associate a file pointer with a previously reserved file descriptor. 2060 * This function always succeeds. 2061 * 2062 * If fp is NULL, the file descriptor is returned to the pool. 2063 * 2064 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2065 */ 2066 static void 2067 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd) 2068 { 2069 KKASSERT((unsigned)fd < fdp->fd_nfiles); 2070 KKASSERT(fdp->fd_files[fd].reserved != 0); 2071 if (fp) { 2072 fhold(fp); 2073 fclearcache(&fdp->fd_files[fd], NULL, 0); 2074 fdp->fd_files[fd].fp = fp; 2075 fdp->fd_files[fd].reserved = 0; 2076 } else { 2077 fdp->fd_files[fd].reserved = 0; 2078 fdreserve_locked(fdp, fd, -1); 2079 fdfixup_locked(fdp, fd); 2080 } 2081 } 2082 2083 /* 2084 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2085 */ 2086 void 2087 fsetfd(struct filedesc *fdp, struct file *fp, int fd) 2088 { 2089 spin_lock(&fdp->fd_spin); 2090 fsetfd_locked(fdp, fp, fd); 2091 spin_unlock(&fdp->fd_spin); 2092 } 2093 2094 /* 2095 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2096 */ 2097 static 2098 struct file * 2099 funsetfd_locked(struct filedesc *fdp, int fd) 2100 { 2101 struct file *fp; 2102 2103 if ((unsigned)fd >= fdp->fd_nfiles) 2104 return (NULL); 2105 if ((fp = fdp->fd_files[fd].fp) == NULL) 2106 return (NULL); 2107 ++fdp->fd_closedcounter; 2108 fclearcache(&fdp->fd_files[fd], NULL, 0); 2109 fdp->fd_files[fd].fp = NULL; 2110 fdp->fd_files[fd].fileflags = 0; 2111 ++fdp->fd_closedcounter; 2112 2113 fdreserve_locked(fdp, fd, -1); 2114 fdfixup_locked(fdp, fd); 2115 2116 return(fp); 2117 } 2118 2119 /* 2120 * WARNING: May not be called before initial fsetfd(). 2121 */ 2122 int 2123 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp) 2124 { 2125 int error; 2126 2127 spin_lock(&fdp->fd_spin); 2128 if (((u_int)fd) >= fdp->fd_nfiles) { 2129 error = EBADF; 2130 } else if (fdp->fd_files[fd].fp == NULL) { 2131 error = EBADF; 2132 } else { 2133 *flagsp = fdp->fd_files[fd].fileflags; 2134 error = 0; 2135 } 2136 spin_unlock(&fdp->fd_spin); 2137 return (error); 2138 } 2139 2140 /* 2141 * WARNING: May not be called before initial fsetfd(). 2142 */ 2143 int 2144 fsetfdflags(struct filedesc *fdp, int fd, int add_flags) 2145 { 2146 int error; 2147 2148 spin_lock(&fdp->fd_spin); 2149 if (((u_int)fd) >= fdp->fd_nfiles) { 2150 error = EBADF; 2151 } else if (fdp->fd_files[fd].fp == NULL) { 2152 error = EBADF; 2153 } else { 2154 fdp->fd_files[fd].fileflags |= add_flags; 2155 error = 0; 2156 } 2157 spin_unlock(&fdp->fd_spin); 2158 return (error); 2159 } 2160 2161 /* 2162 * WARNING: May not be called before initial fsetfd(). 2163 */ 2164 int 2165 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags) 2166 { 2167 int error; 2168 2169 spin_lock(&fdp->fd_spin); 2170 if (((u_int)fd) >= fdp->fd_nfiles) { 2171 error = EBADF; 2172 } else if (fdp->fd_files[fd].fp == NULL) { 2173 error = EBADF; 2174 } else { 2175 fdp->fd_files[fd].fileflags &= ~rem_flags; 2176 error = 0; 2177 } 2178 spin_unlock(&fdp->fd_spin); 2179 return (error); 2180 } 2181 2182 /* 2183 * Set/Change/Clear the creds for a fp and synchronize the uidinfo. 2184 */ 2185 void 2186 fsetcred(struct file *fp, struct ucred *ncr) 2187 { 2188 struct ucred *ocr; 2189 struct uidinfo *uip; 2190 struct uidcount *pup; 2191 int cpu = mycpuid; 2192 int count; 2193 2194 ocr = fp->f_cred; 2195 if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) { 2196 if (ocr) { 2197 uip = ocr->cr_uidinfo; 2198 pup = &uip->ui_pcpu[cpu]; 2199 atomic_add_int(&pup->pu_openfiles, -1); 2200 if (pup->pu_openfiles < -PUP_LIMIT || 2201 pup->pu_openfiles > PUP_LIMIT) { 2202 count = atomic_swap_int(&pup->pu_openfiles, 0); 2203 atomic_add_int(&uip->ui_openfiles, count); 2204 } 2205 } 2206 if (ncr) { 2207 uip = ncr->cr_uidinfo; 2208 pup = &uip->ui_pcpu[cpu]; 2209 atomic_add_int(&pup->pu_openfiles, 1); 2210 if (pup->pu_openfiles < -PUP_LIMIT || 2211 pup->pu_openfiles > PUP_LIMIT) { 2212 count = atomic_swap_int(&pup->pu_openfiles, 0); 2213 atomic_add_int(&uip->ui_openfiles, count); 2214 } 2215 } 2216 } 2217 if (ncr) 2218 crhold(ncr); 2219 fp->f_cred = ncr; 2220 if (ocr) 2221 crfree(ocr); 2222 } 2223 2224 /* 2225 * Free a file descriptor. 2226 */ 2227 static 2228 void 2229 ffree(struct file *fp) 2230 { 2231 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 2232 fsetcred(fp, NULL); 2233 if (fp->f_nchandle.ncp) 2234 cache_drop(&fp->f_nchandle); 2235 objcache_put(file_objcache, fp); 2236 } 2237 2238 /* 2239 * called from init_main, initialize filedesc0 for proc0. 2240 */ 2241 void 2242 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask) 2243 { 2244 p0->p_fd = fdp0; 2245 p0->p_fdtol = NULL; 2246 fdp0->fd_refcnt = 1; 2247 fdp0->fd_cmask = cmask; 2248 fdp0->fd_files = fdp0->fd_builtin_files; 2249 fdp0->fd_nfiles = NDFILE; 2250 fdp0->fd_lastfile = -1; 2251 spin_init(&fdp0->fd_spin, "fdinitbootstrap"); 2252 } 2253 2254 /* 2255 * Build a new filedesc structure. 2256 */ 2257 struct filedesc * 2258 fdinit(struct proc *p) 2259 { 2260 struct filedesc *newfdp; 2261 struct filedesc *fdp = p->p_fd; 2262 2263 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO); 2264 spin_lock(&fdp->fd_spin); 2265 if (fdp->fd_cdir) { 2266 newfdp->fd_cdir = fdp->fd_cdir; 2267 vref(newfdp->fd_cdir); 2268 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2269 } 2270 2271 /* 2272 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of 2273 * proc0, but should unconditionally exist in other processes. 2274 */ 2275 if (fdp->fd_rdir) { 2276 newfdp->fd_rdir = fdp->fd_rdir; 2277 vref(newfdp->fd_rdir); 2278 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2279 } 2280 if (fdp->fd_jdir) { 2281 newfdp->fd_jdir = fdp->fd_jdir; 2282 vref(newfdp->fd_jdir); 2283 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2284 } 2285 spin_unlock(&fdp->fd_spin); 2286 2287 /* Create the file descriptor table. */ 2288 newfdp->fd_refcnt = 1; 2289 newfdp->fd_cmask = cmask; 2290 newfdp->fd_files = newfdp->fd_builtin_files; 2291 newfdp->fd_nfiles = NDFILE; 2292 newfdp->fd_lastfile = -1; 2293 spin_init(&newfdp->fd_spin, "fdinit"); 2294 2295 return (newfdp); 2296 } 2297 2298 /* 2299 * Share a filedesc structure. 2300 */ 2301 struct filedesc * 2302 fdshare(struct proc *p) 2303 { 2304 struct filedesc *fdp; 2305 2306 fdp = p->p_fd; 2307 spin_lock(&fdp->fd_spin); 2308 fdp->fd_refcnt++; 2309 spin_unlock(&fdp->fd_spin); 2310 return (fdp); 2311 } 2312 2313 /* 2314 * Copy a filedesc structure. 2315 */ 2316 int 2317 fdcopy(struct proc *p, struct filedesc **fpp) 2318 { 2319 struct filedesc *fdp = p->p_fd; 2320 struct filedesc *newfdp; 2321 struct fdnode *fdnode; 2322 int i; 2323 int ni; 2324 2325 /* 2326 * Certain daemons might not have file descriptors. 2327 */ 2328 if (fdp == NULL) 2329 return (0); 2330 2331 /* 2332 * Allocate the new filedesc and fd_files[] array. This can race 2333 * with operations by other threads on the fdp so we have to be 2334 * careful. 2335 */ 2336 newfdp = kmalloc(sizeof(struct filedesc), 2337 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK); 2338 if (newfdp == NULL) { 2339 *fpp = NULL; 2340 return (-1); 2341 } 2342 again: 2343 spin_lock(&fdp->fd_spin); 2344 if (fdp->fd_lastfile < NDFILE) { 2345 newfdp->fd_files = newfdp->fd_builtin_files; 2346 i = NDFILE; 2347 } else { 2348 /* 2349 * We have to allocate (N^2-1) entries for our in-place 2350 * binary tree. Allow the table to shrink. 2351 */ 2352 i = fdp->fd_nfiles; 2353 ni = (i - 1) / 2; 2354 while (ni > fdp->fd_lastfile && ni > NDFILE) { 2355 i = ni; 2356 ni = (i - 1) / 2; 2357 } 2358 spin_unlock(&fdp->fd_spin); 2359 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode), 2360 M_FILEDESC, M_WAITOK | M_ZERO); 2361 2362 /* 2363 * Check for race, retry 2364 */ 2365 spin_lock(&fdp->fd_spin); 2366 if (i <= fdp->fd_lastfile) { 2367 spin_unlock(&fdp->fd_spin); 2368 kfree(newfdp->fd_files, M_FILEDESC); 2369 goto again; 2370 } 2371 } 2372 2373 /* 2374 * Dup the remaining fields. vref() and cache_hold() can be 2375 * safely called while holding the read spinlock on fdp. 2376 * 2377 * The read spinlock on fdp is still being held. 2378 * 2379 * NOTE: vref and cache_hold calls for the case where the vnode 2380 * or cache entry already has at least one ref may be called 2381 * while holding spin locks. 2382 */ 2383 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) { 2384 vref(newfdp->fd_cdir); 2385 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2386 } 2387 /* 2388 * We must check for fd_rdir here, at least for now because 2389 * the init process is created before we have access to the 2390 * rootvode to take a reference to it. 2391 */ 2392 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) { 2393 vref(newfdp->fd_rdir); 2394 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2395 } 2396 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) { 2397 vref(newfdp->fd_jdir); 2398 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2399 } 2400 newfdp->fd_refcnt = 1; 2401 newfdp->fd_nfiles = i; 2402 newfdp->fd_lastfile = fdp->fd_lastfile; 2403 newfdp->fd_freefile = fdp->fd_freefile; 2404 newfdp->fd_cmask = fdp->fd_cmask; 2405 spin_init(&newfdp->fd_spin, "fdcopy"); 2406 2407 /* 2408 * Copy the descriptor table through (i). This also copies the 2409 * allocation state. Then go through and ref the file pointers 2410 * and clean up any KQ descriptors. 2411 * 2412 * kq descriptors cannot be copied. Since we haven't ref'd the 2413 * copied files yet we can ignore the return value from funsetfd(). 2414 * 2415 * The read spinlock on fdp is still being held. 2416 * 2417 * Be sure to clean out fdnode->tdcache, otherwise bad things will 2418 * happen. 2419 */ 2420 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode)); 2421 for (i = 0 ; i < newfdp->fd_nfiles; ++i) { 2422 fdnode = &newfdp->fd_files[i]; 2423 if (fdnode->reserved) { 2424 fdreserve_locked(newfdp, i, -1); 2425 fdnode->reserved = 0; 2426 fdfixup_locked(newfdp, i); 2427 } else if (fdnode->fp) { 2428 bzero(&fdnode->tdcache, sizeof(fdnode->tdcache)); 2429 if (fdnode->fp->f_type == DTYPE_KQUEUE) { 2430 (void)funsetfd_locked(newfdp, i); 2431 } else { 2432 fhold(fdnode->fp); 2433 } 2434 } 2435 } 2436 spin_unlock(&fdp->fd_spin); 2437 *fpp = newfdp; 2438 return (0); 2439 } 2440 2441 /* 2442 * Release a filedesc structure. 2443 * 2444 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE) 2445 */ 2446 void 2447 fdfree(struct proc *p, struct filedesc *repl) 2448 { 2449 struct filedesc *fdp; 2450 struct fdnode *fdnode; 2451 int i; 2452 struct filedesc_to_leader *fdtol; 2453 struct file *fp; 2454 struct vnode *vp; 2455 struct flock lf; 2456 2457 /* 2458 * Before destroying or replacing p->p_fd we must be sure to 2459 * clean out the cache of the last thread, which should be 2460 * curthread. 2461 */ 2462 fexitcache(curthread); 2463 2464 /* 2465 * Certain daemons might not have file descriptors. 2466 */ 2467 fdp = p->p_fd; 2468 if (fdp == NULL) { 2469 p->p_fd = repl; 2470 return; 2471 } 2472 2473 /* 2474 * Severe messing around to follow. 2475 */ 2476 spin_lock(&fdp->fd_spin); 2477 2478 /* Check for special need to clear POSIX style locks */ 2479 fdtol = p->p_fdtol; 2480 if (fdtol != NULL) { 2481 KASSERT(fdtol->fdl_refcount > 0, 2482 ("filedesc_to_refcount botch: fdl_refcount=%d", 2483 fdtol->fdl_refcount)); 2484 if (fdtol->fdl_refcount == 1 && p->p_leader->p_advlock_flag) { 2485 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2486 fdnode = &fdp->fd_files[i]; 2487 if (fdnode->fp == NULL || 2488 fdnode->fp->f_type != DTYPE_VNODE) { 2489 continue; 2490 } 2491 fp = fdnode->fp; 2492 fhold(fp); 2493 spin_unlock(&fdp->fd_spin); 2494 2495 lf.l_whence = SEEK_SET; 2496 lf.l_start = 0; 2497 lf.l_len = 0; 2498 lf.l_type = F_UNLCK; 2499 vp = (struct vnode *)fp->f_data; 2500 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 2501 F_UNLCK, &lf, F_POSIX); 2502 fdrop(fp); 2503 spin_lock(&fdp->fd_spin); 2504 } 2505 } 2506 retry: 2507 if (fdtol->fdl_refcount == 1) { 2508 if (fdp->fd_holdleaderscount > 0 && 2509 p->p_leader->p_advlock_flag) { 2510 /* 2511 * close() or do_dup() has cleared a reference 2512 * in a shared file descriptor table. 2513 */ 2514 fdp->fd_holdleaderswakeup = 1; 2515 ssleep(&fdp->fd_holdleaderscount, 2516 &fdp->fd_spin, 0, "fdlhold", 0); 2517 goto retry; 2518 } 2519 if (fdtol->fdl_holdcount > 0) { 2520 /* 2521 * Ensure that fdtol->fdl_leader 2522 * remains valid in closef(). 2523 */ 2524 fdtol->fdl_wakeup = 1; 2525 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0); 2526 goto retry; 2527 } 2528 } 2529 fdtol->fdl_refcount--; 2530 if (fdtol->fdl_refcount == 0 && 2531 fdtol->fdl_holdcount == 0) { 2532 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 2533 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 2534 } else { 2535 fdtol = NULL; 2536 } 2537 p->p_fdtol = NULL; 2538 if (fdtol != NULL) { 2539 spin_unlock(&fdp->fd_spin); 2540 kfree(fdtol, M_FILEDESC_TO_LEADER); 2541 spin_lock(&fdp->fd_spin); 2542 } 2543 } 2544 if (--fdp->fd_refcnt > 0) { 2545 spin_unlock(&fdp->fd_spin); 2546 spin_lock(&p->p_spin); 2547 p->p_fd = repl; 2548 spin_unlock(&p->p_spin); 2549 return; 2550 } 2551 2552 /* 2553 * Even though we are the last reference to the structure allproc 2554 * scans may still reference the structure. Maintain proper 2555 * locks until we can replace p->p_fd. 2556 * 2557 * Also note that kqueue's closef still needs to reference the 2558 * fdp via p->p_fd, so we have to close the descriptors before 2559 * we replace p->p_fd. 2560 */ 2561 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2562 if (fdp->fd_files[i].fp) { 2563 fp = funsetfd_locked(fdp, i); 2564 if (fp) { 2565 spin_unlock(&fdp->fd_spin); 2566 if (SLIST_FIRST(&fp->f_klist)) 2567 knote_fdclose(fp, fdp, i); 2568 closef(fp, p); 2569 spin_lock(&fdp->fd_spin); 2570 } 2571 } 2572 } 2573 spin_unlock(&fdp->fd_spin); 2574 2575 /* 2576 * Interlock against an allproc scan operations (typically frevoke). 2577 */ 2578 spin_lock(&p->p_spin); 2579 p->p_fd = repl; 2580 spin_unlock(&p->p_spin); 2581 2582 /* 2583 * Wait for any softrefs to go away. This race rarely occurs so 2584 * we can use a non-critical-path style poll/sleep loop. The 2585 * race only occurs against allproc scans. 2586 * 2587 * No new softrefs can occur with the fdp disconnected from the 2588 * process. 2589 */ 2590 if (fdp->fd_softrefs) { 2591 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid); 2592 while (fdp->fd_softrefs) 2593 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1); 2594 } 2595 2596 if (fdp->fd_files != fdp->fd_builtin_files) 2597 kfree(fdp->fd_files, M_FILEDESC); 2598 if (fdp->fd_cdir) { 2599 cache_drop(&fdp->fd_ncdir); 2600 vrele(fdp->fd_cdir); 2601 } 2602 if (fdp->fd_rdir) { 2603 cache_drop(&fdp->fd_nrdir); 2604 vrele(fdp->fd_rdir); 2605 } 2606 if (fdp->fd_jdir) { 2607 cache_drop(&fdp->fd_njdir); 2608 vrele(fdp->fd_jdir); 2609 } 2610 kfree(fdp, M_FILEDESC); 2611 } 2612 2613 /* 2614 * Retrieve and reference the file pointer associated with a descriptor. 2615 * 2616 * td must be the current thread. 2617 */ 2618 struct file * 2619 holdfp(thread_t td, int fd, int flag) 2620 { 2621 struct file *fp; 2622 2623 fp = _holdfp_cache(td, fd); 2624 if (fp) { 2625 if ((fp->f_flag & flag) == 0 && flag != -1) { 2626 fdrop(fp); 2627 fp = NULL; 2628 } 2629 } 2630 return fp; 2631 } 2632 2633 /* 2634 * holdsock() - load the struct file pointer associated 2635 * with a socket into *fpp. If an error occurs, non-zero 2636 * will be returned and *fpp will be set to NULL. 2637 * 2638 * td must be the current thread. 2639 */ 2640 int 2641 holdsock(thread_t td, int fd, struct file **fpp) 2642 { 2643 struct file *fp; 2644 int error; 2645 2646 /* 2647 * Lockless shortcut 2648 */ 2649 fp = _holdfp_cache(td, fd); 2650 if (fp) { 2651 if (fp->f_type != DTYPE_SOCKET) { 2652 fdrop(fp); 2653 fp = NULL; 2654 error = ENOTSOCK; 2655 } else { 2656 error = 0; 2657 } 2658 } else { 2659 error = EBADF; 2660 } 2661 *fpp = fp; 2662 2663 return (error); 2664 } 2665 2666 /* 2667 * Convert a user file descriptor to a held file pointer. 2668 * 2669 * td must be the current thread. 2670 */ 2671 int 2672 holdvnode(thread_t td, int fd, struct file **fpp) 2673 { 2674 struct file *fp; 2675 int error; 2676 2677 fp = _holdfp_cache(td, fd); 2678 if (fp) { 2679 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2680 fdrop(fp); 2681 fp = NULL; 2682 error = EINVAL; 2683 } else { 2684 error = 0; 2685 } 2686 } else { 2687 error = EBADF; 2688 } 2689 *fpp = fp; 2690 2691 return (error); 2692 } 2693 2694 /* 2695 * For setugid programs, we don't want to people to use that setugidness 2696 * to generate error messages which write to a file which otherwise would 2697 * otherwise be off-limits to the process. 2698 * 2699 * This is a gross hack to plug the hole. A better solution would involve 2700 * a special vop or other form of generalized access control mechanism. We 2701 * go ahead and just reject all procfs file systems accesses as dangerous. 2702 * 2703 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 2704 * sufficient. We also don't for check setugidness since we know we are. 2705 */ 2706 static int 2707 is_unsafe(struct file *fp) 2708 { 2709 if (fp->f_type == DTYPE_VNODE && 2710 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 2711 return (1); 2712 return (0); 2713 } 2714 2715 /* 2716 * Make this setguid thing safe, if at all possible. 2717 * 2718 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2719 */ 2720 void 2721 setugidsafety(struct proc *p) 2722 { 2723 struct filedesc *fdp = p->p_fd; 2724 int i; 2725 2726 /* Certain daemons might not have file descriptors. */ 2727 if (fdp == NULL) 2728 return; 2729 2730 /* 2731 * note: fdp->fd_files may be reallocated out from under us while 2732 * we are blocked in a close. Be careful! 2733 */ 2734 for (i = 0; i <= fdp->fd_lastfile; i++) { 2735 if (i > 2) 2736 break; 2737 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) { 2738 struct file *fp; 2739 2740 /* 2741 * NULL-out descriptor prior to close to avoid 2742 * a race while close blocks. 2743 */ 2744 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2745 knote_fdclose(fp, fdp, i); 2746 closef(fp, p); 2747 } 2748 } 2749 } 2750 } 2751 2752 /* 2753 * Close all CLOEXEC files on exec. 2754 * 2755 * Only a single thread remains for the current process. 2756 * 2757 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2758 */ 2759 void 2760 fdcloseexec(struct proc *p) 2761 { 2762 struct filedesc *fdp = p->p_fd; 2763 int i; 2764 2765 /* Certain daemons might not have file descriptors. */ 2766 if (fdp == NULL) 2767 return; 2768 2769 /* 2770 * We cannot cache fd_files since operations may block and rip 2771 * them out from under us. 2772 */ 2773 for (i = 0; i <= fdp->fd_lastfile; i++) { 2774 if (fdp->fd_files[i].fp != NULL && 2775 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) { 2776 struct file *fp; 2777 2778 /* 2779 * NULL-out descriptor prior to close to avoid 2780 * a race while close blocks. 2781 * 2782 * (funsetfd*() also clears the fd cache) 2783 */ 2784 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2785 knote_fdclose(fp, fdp, i); 2786 closef(fp, p); 2787 } 2788 } 2789 } 2790 } 2791 2792 /* 2793 * It is unsafe for set[ug]id processes to be started with file 2794 * descriptors 0..2 closed, as these descriptors are given implicit 2795 * significance in the Standard C library. fdcheckstd() will create a 2796 * descriptor referencing /dev/null for each of stdin, stdout, and 2797 * stderr that is not already open. 2798 * 2799 * NOT MPSAFE - calls falloc, vn_open, etc 2800 */ 2801 int 2802 fdcheckstd(struct lwp *lp) 2803 { 2804 struct nlookupdata nd; 2805 struct filedesc *fdp; 2806 struct file *fp; 2807 int retval; 2808 int i, error, flags, devnull; 2809 2810 fdp = lp->lwp_proc->p_fd; 2811 if (fdp == NULL) 2812 return (0); 2813 devnull = -1; 2814 error = 0; 2815 for (i = 0; i < 3; i++) { 2816 if (fdp->fd_files[i].fp != NULL) 2817 continue; 2818 if (devnull < 0) { 2819 if ((error = falloc(lp, &fp, &devnull)) != 0) 2820 break; 2821 2822 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE, 2823 NLC_FOLLOW|NLC_LOCKVP); 2824 flags = FREAD | FWRITE; 2825 if (error == 0) 2826 error = vn_open(&nd, fp, flags, 0); 2827 if (error == 0) 2828 fsetfd(fdp, fp, devnull); 2829 else 2830 fsetfd(fdp, NULL, devnull); 2831 fdrop(fp); 2832 nlookup_done(&nd); 2833 if (error) 2834 break; 2835 KKASSERT(i == devnull); 2836 } else { 2837 error = kern_dup(DUP_FIXED, devnull, i, &retval); 2838 if (error != 0) 2839 break; 2840 } 2841 } 2842 return (error); 2843 } 2844 2845 /* 2846 * Internal form of close. 2847 * Decrement reference count on file structure. 2848 * Note: td and/or p may be NULL when closing a file 2849 * that was being passed in a message. 2850 * 2851 * MPALMOSTSAFE - acquires mplock for VOP operations 2852 */ 2853 int 2854 closef(struct file *fp, struct proc *p) 2855 { 2856 struct vnode *vp; 2857 struct flock lf; 2858 struct filedesc_to_leader *fdtol; 2859 2860 if (fp == NULL) 2861 return (0); 2862 2863 /* 2864 * POSIX record locking dictates that any close releases ALL 2865 * locks owned by this process. This is handled by setting 2866 * a flag in the unlock to free ONLY locks obeying POSIX 2867 * semantics, and not to free BSD-style file locks. 2868 * If the descriptor was in a message, POSIX-style locks 2869 * aren't passed with the descriptor. 2870 */ 2871 if (p != NULL && fp->f_type == DTYPE_VNODE && 2872 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2873 ) { 2874 if (p->p_leader->p_advlock_flag) { 2875 lf.l_whence = SEEK_SET; 2876 lf.l_start = 0; 2877 lf.l_len = 0; 2878 lf.l_type = F_UNLCK; 2879 vp = (struct vnode *)fp->f_data; 2880 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 2881 &lf, F_POSIX); 2882 } 2883 fdtol = p->p_fdtol; 2884 if (fdtol != NULL) { 2885 lwkt_gettoken(&p->p_token); 2886 2887 /* 2888 * Handle special case where file descriptor table 2889 * is shared between multiple process leaders. 2890 */ 2891 for (fdtol = fdtol->fdl_next; 2892 fdtol != p->p_fdtol; 2893 fdtol = fdtol->fdl_next) { 2894 if (fdtol->fdl_leader->p_advlock_flag == 0) 2895 continue; 2896 fdtol->fdl_holdcount++; 2897 lf.l_whence = SEEK_SET; 2898 lf.l_start = 0; 2899 lf.l_len = 0; 2900 lf.l_type = F_UNLCK; 2901 vp = (struct vnode *)fp->f_data; 2902 VOP_ADVLOCK(vp, (caddr_t)fdtol->fdl_leader, 2903 F_UNLCK, &lf, F_POSIX); 2904 fdtol->fdl_holdcount--; 2905 if (fdtol->fdl_holdcount == 0 && 2906 fdtol->fdl_wakeup != 0) { 2907 fdtol->fdl_wakeup = 0; 2908 wakeup(fdtol); 2909 } 2910 } 2911 lwkt_reltoken(&p->p_token); 2912 } 2913 } 2914 return (fdrop(fp)); 2915 } 2916 2917 /* 2918 * fhold() can only be called if f_count is already at least 1 (i.e. the 2919 * caller of fhold() already has a reference to the file pointer in some 2920 * manner or other). 2921 * 2922 * Atomic ops are used for incrementing and decrementing f_count before 2923 * the 1->0 transition. f_count 1->0 transition is special, see the 2924 * comment in fdrop(). 2925 */ 2926 void 2927 fhold(struct file *fp) 2928 { 2929 /* 0->1 transition will never work */ 2930 KASSERT(fp->f_count > 0, ("fhold: invalid f_count %d", fp->f_count)); 2931 atomic_add_int(&fp->f_count, 1); 2932 } 2933 2934 /* 2935 * fdrop() - drop a reference to a descriptor 2936 */ 2937 int 2938 fdrop(struct file *fp) 2939 { 2940 struct flock lf; 2941 struct vnode *vp; 2942 int error, do_free = 0; 2943 2944 /* 2945 * NOTE: 2946 * Simple atomic_fetchadd_int(f_count, -1) here will cause use- 2947 * after-free or double free (due to f_count 0->1 transition), if 2948 * fhold() is called on the fps found through filehead iteration. 2949 */ 2950 for (;;) { 2951 int count = fp->f_count; 2952 2953 cpu_ccfence(); 2954 KASSERT(count > 0, ("fdrop: invalid f_count %d", count)); 2955 if (count == 1) { 2956 struct filelist_head *head = fp2filelist(fp); 2957 2958 /* 2959 * About to drop the last reference, hold the 2960 * filehead spin lock and drop it, so that no 2961 * one could see this fp through filehead anymore, 2962 * let alone fhold() this fp. 2963 */ 2964 spin_lock(&head->spin); 2965 if (atomic_cmpset_int(&fp->f_count, count, 0)) { 2966 LIST_REMOVE(fp, f_list); 2967 spin_unlock(&head->spin); 2968 atomic_subtract_int(&nfiles, 1); 2969 do_free = 1; /* free this fp */ 2970 break; 2971 } 2972 spin_unlock(&head->spin); 2973 /* retry */ 2974 } else if (atomic_cmpset_int(&fp->f_count, count, count - 1)) { 2975 break; 2976 } 2977 /* retry */ 2978 } 2979 if (!do_free) 2980 return (0); 2981 2982 KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL); 2983 2984 /* 2985 * The last reference has gone away, we own the fp structure free 2986 * and clear. 2987 */ 2988 if (fp->f_count < 0) 2989 panic("fdrop: count < 0"); 2990 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE && 2991 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2992 ) { 2993 lf.l_whence = SEEK_SET; 2994 lf.l_start = 0; 2995 lf.l_len = 0; 2996 lf.l_type = F_UNLCK; 2997 vp = (struct vnode *)fp->f_data; 2998 VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 2999 } 3000 if (fp->f_ops != &badfileops) 3001 error = fo_close(fp); 3002 else 3003 error = 0; 3004 ffree(fp); 3005 return (error); 3006 } 3007 3008 /* 3009 * Apply an advisory lock on a file descriptor. 3010 * 3011 * Just attempt to get a record lock of the requested type on 3012 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 3013 * 3014 * MPALMOSTSAFE 3015 */ 3016 int 3017 sys_flock(struct flock_args *uap) 3018 { 3019 thread_t td = curthread; 3020 struct file *fp; 3021 struct vnode *vp; 3022 struct flock lf; 3023 int error; 3024 3025 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 3026 return (EBADF); 3027 if (fp->f_type != DTYPE_VNODE) { 3028 error = EOPNOTSUPP; 3029 goto done; 3030 } 3031 vp = (struct vnode *)fp->f_data; 3032 lf.l_whence = SEEK_SET; 3033 lf.l_start = 0; 3034 lf.l_len = 0; 3035 if (uap->how & LOCK_UN) { 3036 lf.l_type = F_UNLCK; 3037 atomic_clear_int(&fp->f_flag, FHASLOCK); /* race ok */ 3038 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3039 goto done; 3040 } 3041 if (uap->how & LOCK_EX) 3042 lf.l_type = F_WRLCK; 3043 else if (uap->how & LOCK_SH) 3044 lf.l_type = F_RDLCK; 3045 else { 3046 error = EBADF; 3047 goto done; 3048 } 3049 if (uap->how & LOCK_NB) 3050 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0); 3051 else 3052 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT); 3053 atomic_set_int(&fp->f_flag, FHASLOCK); /* race ok */ 3054 done: 3055 fdrop(fp); 3056 return (error); 3057 } 3058 3059 /* 3060 * File Descriptor pseudo-device driver (/dev/fd/). 3061 * 3062 * Opening minor device N dup()s the file (if any) connected to file 3063 * descriptor N belonging to the calling process. Note that this driver 3064 * consists of only the ``open()'' routine, because all subsequent 3065 * references to this file will be direct to the other driver. 3066 */ 3067 static int 3068 fdopen(struct dev_open_args *ap) 3069 { 3070 thread_t td = curthread; 3071 3072 KKASSERT(td->td_lwp != NULL); 3073 3074 /* 3075 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the 3076 * the file descriptor being sought for duplication. The error 3077 * return ensures that the vnode for this device will be released 3078 * by vn_open. Open will detect this special error and take the 3079 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 3080 * will simply report the error. 3081 */ 3082 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev); 3083 return (ENODEV); 3084 } 3085 3086 /* 3087 * The caller has reserved the file descriptor dfd for us. On success we 3088 * must fsetfd() it. On failure the caller will clean it up. 3089 */ 3090 int 3091 dupfdopen(thread_t td, int dfd, int sfd, int mode, int error) 3092 { 3093 struct filedesc *fdp; 3094 struct file *wfp; 3095 struct file *xfp; 3096 int werror; 3097 3098 if ((wfp = holdfp(td, sfd, -1)) == NULL) 3099 return (EBADF); 3100 3101 /* 3102 * Close a revoke/dup race. Duping a descriptor marked as revoked 3103 * will dup a dummy descriptor instead of the real one. 3104 */ 3105 if (wfp->f_flag & FREVOKED) { 3106 kprintf("Warning: attempt to dup() a revoked descriptor\n"); 3107 fdrop(wfp); 3108 wfp = NULL; 3109 werror = falloc(NULL, &wfp, NULL); 3110 if (werror) 3111 return (werror); 3112 } 3113 3114 fdp = td->td_proc->p_fd; 3115 3116 /* 3117 * There are two cases of interest here. 3118 * 3119 * For ENODEV simply dup sfd to file descriptor dfd and return. 3120 * 3121 * For ENXIO steal away the file structure from sfd and store it 3122 * dfd. sfd is effectively closed by this operation. 3123 * 3124 * Any other error code is just returned. 3125 */ 3126 switch (error) { 3127 case ENODEV: 3128 /* 3129 * Check that the mode the file is being opened for is a 3130 * subset of the mode of the existing descriptor. 3131 */ 3132 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 3133 error = EACCES; 3134 break; 3135 } 3136 spin_lock(&fdp->fd_spin); 3137 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 3138 fsetfd_locked(fdp, wfp, dfd); 3139 spin_unlock(&fdp->fd_spin); 3140 error = 0; 3141 break; 3142 case ENXIO: 3143 /* 3144 * Steal away the file pointer from dfd, and stuff it into indx. 3145 */ 3146 spin_lock(&fdp->fd_spin); 3147 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 3148 fsetfd(fdp, wfp, dfd); 3149 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) { 3150 spin_unlock(&fdp->fd_spin); 3151 fdrop(xfp); 3152 } else { 3153 spin_unlock(&fdp->fd_spin); 3154 } 3155 error = 0; 3156 break; 3157 default: 3158 break; 3159 } 3160 fdrop(wfp); 3161 return (error); 3162 } 3163 3164 /* 3165 * NOT MPSAFE - I think these refer to a common file descriptor table 3166 * and we need to spinlock that to link fdtol in. 3167 */ 3168 struct filedesc_to_leader * 3169 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 3170 struct proc *leader) 3171 { 3172 struct filedesc_to_leader *fdtol; 3173 3174 fdtol = kmalloc(sizeof(struct filedesc_to_leader), 3175 M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO); 3176 fdtol->fdl_refcount = 1; 3177 fdtol->fdl_holdcount = 0; 3178 fdtol->fdl_wakeup = 0; 3179 fdtol->fdl_leader = leader; 3180 if (old != NULL) { 3181 fdtol->fdl_next = old->fdl_next; 3182 fdtol->fdl_prev = old; 3183 old->fdl_next = fdtol; 3184 fdtol->fdl_next->fdl_prev = fdtol; 3185 } else { 3186 fdtol->fdl_next = fdtol; 3187 fdtol->fdl_prev = fdtol; 3188 } 3189 return fdtol; 3190 } 3191 3192 /* 3193 * Scan all file pointers in the system. The callback is made with 3194 * the master list spinlock held exclusively. 3195 */ 3196 void 3197 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data) 3198 { 3199 int i; 3200 3201 for (i = 0; i < NFILELIST_HEADS; ++i) { 3202 struct filelist_head *head = &filelist_heads[i]; 3203 struct file *fp; 3204 3205 spin_lock(&head->spin); 3206 LIST_FOREACH(fp, &head->list, f_list) { 3207 int res; 3208 3209 res = callback(fp, data); 3210 if (res < 0) 3211 break; 3212 } 3213 spin_unlock(&head->spin); 3214 } 3215 } 3216 3217 /* 3218 * Get file structures. 3219 * 3220 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe) 3221 */ 3222 3223 struct sysctl_kern_file_info { 3224 int count; 3225 int error; 3226 struct sysctl_req *req; 3227 }; 3228 3229 static int sysctl_kern_file_callback(struct proc *p, void *data); 3230 3231 static int 3232 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 3233 { 3234 struct sysctl_kern_file_info info; 3235 3236 /* 3237 * Note: because the number of file descriptors is calculated 3238 * in different ways for sizing vs returning the data, 3239 * there is information leakage from the first loop. However, 3240 * it is of a similar order of magnitude to the leakage from 3241 * global system statistics such as kern.openfiles. 3242 * 3243 * When just doing a count, note that we cannot just count 3244 * the elements and add f_count via the filehead list because 3245 * threaded processes share their descriptor table and f_count might 3246 * still be '1' in that case. 3247 * 3248 * Since the SYSCTL op can block, we must hold the process to 3249 * prevent it being ripped out from under us either in the 3250 * file descriptor loop or in the greater LIST_FOREACH. The 3251 * process may be in varying states of disrepair. If the process 3252 * is in SZOMB we may have caught it just as it is being removed 3253 * from the allproc list, we must skip it in that case to maintain 3254 * an unbroken chain through the allproc list. 3255 */ 3256 info.count = 0; 3257 info.error = 0; 3258 info.req = req; 3259 allproc_scan(sysctl_kern_file_callback, &info, 0); 3260 3261 /* 3262 * When just calculating the size, overestimate a bit to try to 3263 * prevent system activity from causing the buffer-fill call 3264 * to fail later on. 3265 */ 3266 if (req->oldptr == NULL) { 3267 info.count = (info.count + 16) + (info.count / 10); 3268 info.error = SYSCTL_OUT(req, NULL, 3269 info.count * sizeof(struct kinfo_file)); 3270 } 3271 return (info.error); 3272 } 3273 3274 static int 3275 sysctl_kern_file_callback(struct proc *p, void *data) 3276 { 3277 struct sysctl_kern_file_info *info = data; 3278 struct kinfo_file kf; 3279 struct filedesc *fdp; 3280 struct file *fp; 3281 uid_t uid; 3282 int n; 3283 3284 if (p->p_stat == SIDL || p->p_stat == SZOMB) 3285 return(0); 3286 if (!(PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0)) 3287 return(0); 3288 3289 /* 3290 * Softref the fdp to prevent it from being destroyed 3291 */ 3292 spin_lock(&p->p_spin); 3293 if ((fdp = p->p_fd) == NULL) { 3294 spin_unlock(&p->p_spin); 3295 return(0); 3296 } 3297 atomic_add_int(&fdp->fd_softrefs, 1); 3298 spin_unlock(&p->p_spin); 3299 3300 /* 3301 * The fdp's own spinlock prevents the contents from being 3302 * modified. 3303 */ 3304 spin_lock_shared(&fdp->fd_spin); 3305 for (n = 0; n < fdp->fd_nfiles; ++n) { 3306 if ((fp = fdp->fd_files[n].fp) == NULL) 3307 continue; 3308 if (info->req->oldptr == NULL) { 3309 ++info->count; 3310 } else { 3311 uid = p->p_ucred ? p->p_ucred->cr_uid : -1; 3312 kcore_make_file(&kf, fp, p->p_pid, uid, n); 3313 spin_unlock_shared(&fdp->fd_spin); 3314 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf)); 3315 spin_lock_shared(&fdp->fd_spin); 3316 if (info->error) 3317 break; 3318 } 3319 } 3320 spin_unlock_shared(&fdp->fd_spin); 3321 atomic_subtract_int(&fdp->fd_softrefs, 1); 3322 if (info->error) 3323 return(-1); 3324 return(0); 3325 } 3326 3327 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 3328 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 3329 3330 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW, 3331 &minfilesperproc, 0, "Minimum files allowed open per process"); 3332 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 3333 &maxfilesperproc, 0, "Maximum files allowed open per process"); 3334 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW, 3335 &maxfilesperuser, 0, "Maximum files allowed open per user"); 3336 3337 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 3338 &maxfiles, 0, "Maximum number of files"); 3339 3340 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 3341 &maxfilesrootres, 0, "Descriptors reserved for root use"); 3342 3343 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 3344 &nfiles, 0, "System-wide number of open files"); 3345 3346 static void 3347 fildesc_drvinit(void *unused) 3348 { 3349 int fd; 3350 3351 for (fd = 0; fd < NUMFDESC; fd++) { 3352 make_dev(&fildesc_ops, fd, 3353 UID_BIN, GID_BIN, 0666, "fd/%d", fd); 3354 } 3355 3356 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin"); 3357 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout"); 3358 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr"); 3359 } 3360 3361 struct fileops badfileops = { 3362 .fo_read = badfo_readwrite, 3363 .fo_write = badfo_readwrite, 3364 .fo_ioctl = badfo_ioctl, 3365 .fo_kqfilter = badfo_kqfilter, 3366 .fo_stat = badfo_stat, 3367 .fo_close = badfo_close, 3368 .fo_shutdown = badfo_shutdown 3369 }; 3370 3371 int 3372 badfo_readwrite( 3373 struct file *fp, 3374 struct uio *uio, 3375 struct ucred *cred, 3376 int flags 3377 ) { 3378 return (EBADF); 3379 } 3380 3381 int 3382 badfo_ioctl(struct file *fp, u_long com, caddr_t data, 3383 struct ucred *cred, struct sysmsg *msgv) 3384 { 3385 return (EBADF); 3386 } 3387 3388 /* 3389 * Must return an error to prevent registration, typically 3390 * due to a revoked descriptor (file_filtops assigned). 3391 */ 3392 int 3393 badfo_kqfilter(struct file *fp, struct knote *kn) 3394 { 3395 return (EOPNOTSUPP); 3396 } 3397 3398 int 3399 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 3400 { 3401 return (EBADF); 3402 } 3403 3404 int 3405 badfo_close(struct file *fp) 3406 { 3407 return (EBADF); 3408 } 3409 3410 int 3411 badfo_shutdown(struct file *fp, int how) 3412 { 3413 return (EBADF); 3414 } 3415 3416 int 3417 nofo_shutdown(struct file *fp, int how) 3418 { 3419 return (EOPNOTSUPP); 3420 } 3421 3422 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, 3423 fildesc_drvinit,NULL); 3424 3425 static void 3426 filelist_heads_init(void *arg __unused) 3427 { 3428 int i; 3429 3430 for (i = 0; i < NFILELIST_HEADS; ++i) { 3431 struct filelist_head *head = &filelist_heads[i]; 3432 3433 spin_init(&head->spin, "filehead_spin"); 3434 LIST_INIT(&head->list); 3435 } 3436 } 3437 3438 SYSINIT(filelistheads, SI_BOOT1_LOCK, SI_ORDER_ANY, 3439 filelist_heads_init, NULL); 3440 3441 static void 3442 file_objcache_init(void *dummy __unused) 3443 { 3444 file_objcache = objcache_create("file", maxfiles, maxfiles / 8, 3445 NULL, NULL, NULL, /* TODO: ctor/dtor */ 3446 objcache_malloc_alloc, objcache_malloc_free, &file_malloc_args); 3447 } 3448 SYSINIT(fpobjcache, SI_BOOT2_POST_SMP, SI_ORDER_ANY, file_objcache_init, NULL); 3449