1 /* 2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey Hsu and Matthew Dillon. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1982, 1986, 1989, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $ 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/sysmsg.h> 75 #include <sys/conf.h> 76 #include <sys/device.h> 77 #include <sys/file.h> 78 #include <sys/filedesc.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 #include <sys/proc.h> 83 #include <sys/nlookup.h> 84 #include <sys/stat.h> 85 #include <sys/filio.h> 86 #include <sys/fcntl.h> 87 #include <sys/unistd.h> 88 #include <sys/resourcevar.h> 89 #include <sys/event.h> 90 #include <sys/kern_syscall.h> 91 #include <sys/kcore.h> 92 #include <sys/kinfo.h> 93 #include <sys/un.h> 94 #include <sys/objcache.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_extern.h> 98 99 #include <sys/file2.h> 100 #include <sys/spinlock2.h> 101 102 static int fdalloc_locked(struct proc *p, struct filedesc *fdp, 103 int want, int *result); 104 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd); 105 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr); 106 static struct file *funsetfd_locked (struct filedesc *fdp, int fd); 107 static void ffree(struct file *fp); 108 109 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 110 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 111 "file desc to leader structures"); 112 static MALLOC_DEFINE_OBJ(M_FILE, sizeof(struct file), 113 "file", "Open file structure"); 114 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 115 116 static struct krate krate_uidinfo = { .freq = 1 }; 117 118 static d_open_t fdopen; 119 #define NUMFDESC 64 120 121 #define CDEV_MAJOR 22 122 static struct dev_ops fildesc_ops = { 123 { "FD", 0, 0 }, 124 .d_open = fdopen, 125 }; 126 127 /* 128 * Descriptor management. 129 */ 130 #ifndef NFILELIST_HEADS 131 #define NFILELIST_HEADS 257 /* primary number */ 132 #endif 133 134 struct filelist_head { 135 struct spinlock spin; 136 struct filelist list; 137 } __cachealign; 138 139 static struct filelist_head filelist_heads[NFILELIST_HEADS]; 140 141 static int nfiles; /* actual number of open files */ 142 extern int cmask; 143 144 struct lwkt_token revoke_token = LWKT_TOKEN_INITIALIZER(revoke_token); 145 146 /* 147 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared. 148 * 149 * must be called with fdp->fd_spin exclusively held 150 */ 151 static __inline 152 void 153 fdfixup_locked(struct filedesc *fdp, int fd) 154 { 155 if (fd < fdp->fd_freefile) { 156 fdp->fd_freefile = fd; 157 } 158 while (fdp->fd_lastfile >= 0 && 159 fdp->fd_files[fdp->fd_lastfile].fp == NULL && 160 fdp->fd_files[fdp->fd_lastfile].reserved == 0 161 ) { 162 --fdp->fd_lastfile; 163 } 164 } 165 166 /* 167 * Clear the fd thread caches for this fdnode. 168 * 169 * If match_fdc is NULL, all thread caches of fdn will be cleared. 170 * The caller must hold fdp->fd_spin exclusively. The threads caching 171 * the descriptor do not have to be the current thread. The (status) 172 * argument is ignored. 173 * 174 * If match_fdc is not NULL, only the match_fdc's cache will be cleared. 175 * The caller must hold fdp->fd_spin shared and match_fdc must match a 176 * fdcache entry in curthread. match_fdc has been locked by the caller 177 * and had the specified (status). 178 * 179 * Since we are matching against a fp in the fdp (which must still be present 180 * at this time), fp will have at least two refs on any match and we can 181 * decrement the count trivially. 182 */ 183 static 184 void 185 fclearcache(struct fdnode *fdn, struct fdcache *match_fdc, int status) 186 { 187 struct fdcache *fdc; 188 struct file *fp; 189 int i; 190 191 /* 192 * match_fdc == NULL We are cleaning out all tdcache entries 193 * for the fdn and hold fdp->fd_spin exclusively. 194 * This can race against the target threads 195 * cleaning out specific entries. 196 * 197 * match_fdc != NULL We are cleaning out a specific tdcache 198 * entry on behalf of the owning thread 199 * and hold fdp->fd_spin shared. The thread 200 * has already locked the entry. This cannot 201 * race. 202 */ 203 fp = fdn->fp; 204 for (i = 0; i < NTDCACHEFD; ++i) { 205 if ((fdc = fdn->tdcache[i]) == NULL) 206 continue; 207 208 /* 209 * If match_fdc is non-NULL we are being asked to 210 * clear a specific fdc owned by curthread. There must 211 * be exactly one match. The caller has already locked 212 * the cache entry and will dispose of the lock after 213 * we return. 214 * 215 * Since we also have a shared lock on fdp, we 216 * can do this without atomic ops. 217 */ 218 if (match_fdc) { 219 if (fdc != match_fdc) 220 continue; 221 fdn->tdcache[i] = NULL; 222 KASSERT(fp == fdc->fp, 223 ("fclearcache(1): fp mismatch %p/%p\n", 224 fp, fdc->fp)); 225 fdc->fp = NULL; 226 fdc->fd = -1; 227 228 /* 229 * status can be 0 or 2. If 2 the ref is borrowed, 230 * if 0 the ref is not borrowed and we have to drop 231 * it. 232 */ 233 if (status == 0) 234 atomic_add_int(&fp->f_count, -1); 235 fdn->isfull = 0; /* heuristic */ 236 return; 237 } 238 239 /* 240 * Otherwise we hold an exclusive spin-lock and can only 241 * race thread consumers borrowing cache entries. 242 * 243 * Acquire the lock and dispose of the entry. We have to 244 * spin until we get the lock. 245 */ 246 for (;;) { 247 status = atomic_swap_int(&fdc->locked, 1); 248 if (status == 1) { /* foreign lock, retry */ 249 cpu_pause(); 250 continue; 251 } 252 fdn->tdcache[i] = NULL; 253 KASSERT(fp == fdc->fp, 254 ("fclearcache(2): fp mismatch %p/%p\n", 255 fp, fdc->fp)); 256 fdc->fp = NULL; 257 fdc->fd = -1; 258 if (status == 0) 259 atomic_add_int(&fp->f_count, -1); 260 fdn->isfull = 0; /* heuristic */ 261 atomic_swap_int(&fdc->locked, 0); 262 break; 263 } 264 } 265 KKASSERT(match_fdc == NULL); 266 } 267 268 /* 269 * Retrieve the fp for the specified fd given the specified file descriptor 270 * table. The fdp does not have to be owned by the current process. 271 * If flags != -1, fp->f_flag must contain at least one of the flags. 272 * 273 * This function is not able to cache the fp. 274 */ 275 struct file * 276 holdfp_fdp(struct filedesc *fdp, int fd, int flag) 277 { 278 struct file *fp; 279 280 spin_lock_shared(&fdp->fd_spin); 281 if (((u_int)fd) < fdp->fd_nfiles) { 282 fp = fdp->fd_files[fd].fp; /* can be NULL */ 283 if (fp) { 284 if ((fp->f_flag & flag) == 0 && flag != -1) { 285 fp = NULL; 286 } else { 287 fhold(fp); 288 } 289 } 290 } else { 291 fp = NULL; 292 } 293 spin_unlock_shared(&fdp->fd_spin); 294 295 return fp; 296 } 297 298 struct file * 299 holdfp_fdp_locked(struct filedesc *fdp, int fd, int flag) 300 { 301 struct file *fp; 302 303 if (((u_int)fd) < fdp->fd_nfiles) { 304 fp = fdp->fd_files[fd].fp; /* can be NULL */ 305 if (fp) { 306 if ((fp->f_flag & flag) == 0 && flag != -1) { 307 fp = NULL; 308 } else { 309 fhold(fp); 310 } 311 } 312 } else { 313 fp = NULL; 314 } 315 return fp; 316 } 317 318 /* 319 * Acquire the fp for the specified file descriptor, using the thread 320 * cache if possible and caching it if possible. 321 * 322 * td must be the curren thread. 323 */ 324 static 325 struct file * 326 _holdfp_cache(thread_t td, int fd) 327 { 328 struct filedesc *fdp; 329 struct fdcache *fdc; 330 struct fdcache *best; 331 struct fdnode *fdn; 332 struct file *fp; 333 int status; 334 int delta; 335 int i; 336 337 /* 338 * Fast 339 */ 340 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 341 if (fdc->fd != fd || fdc->fp == NULL) 342 continue; 343 status = atomic_swap_int(&fdc->locked, 1); 344 345 /* 346 * If someone else has locked our cache entry they are in 347 * the middle of clearing it, skip the entry. 348 */ 349 if (status == 1) 350 continue; 351 352 /* 353 * We have locked the entry, but if it no longer matches 354 * restore the previous state (0 or 2) and skip the entry. 355 */ 356 if (fdc->fd != fd || fdc->fp == NULL) { 357 atomic_swap_int(&fdc->locked, status); 358 continue; 359 } 360 361 /* 362 * We have locked a valid entry. We can borrow the ref 363 * for a mode 0 entry. We can get a valid fp for a mode 364 * 2 entry but not borrow the ref. 365 */ 366 if (status == 0) { 367 fp = fdc->fp; 368 fdc->lru = ++td->td_fdcache_lru; 369 atomic_swap_int(&fdc->locked, 2); 370 371 return fp; 372 } 373 if (status == 2) { 374 fp = fdc->fp; 375 fhold(fp); 376 fdc->lru = ++td->td_fdcache_lru; 377 atomic_swap_int(&fdc->locked, 2); 378 379 return fp; 380 } 381 KKASSERT(0); 382 } 383 384 /* 385 * Lookup the descriptor the slow way. This can contend against 386 * modifying operations in a multi-threaded environment and cause 387 * cache line ping ponging otherwise. 388 */ 389 fdp = td->td_proc->p_fd; 390 spin_lock_shared(&fdp->fd_spin); 391 392 if (((u_int)fd) < fdp->fd_nfiles) { 393 fp = fdp->fd_files[fd].fp; /* can be NULL */ 394 if (fp) { 395 fhold(fp); 396 if (fdp->fd_files[fd].isfull == 0) 397 goto enter; 398 } 399 } else { 400 fp = NULL; 401 } 402 spin_unlock_shared(&fdp->fd_spin); 403 404 return fp; 405 406 /* 407 * We found a valid fp and held it, fdp is still shared locked. 408 * Enter the fp into the per-thread cache. Find the oldest entry 409 * via lru, or an empty entry. 410 * 411 * Because fdp's spinlock is held (shared is fine), no other 412 * thread should be in the middle of clearing our selected entry. 413 */ 414 enter: 415 best = &td->td_fdcache[0]; 416 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 417 if (fdc->fp == NULL) { 418 best = fdc; 419 break; 420 } 421 delta = fdc->lru - best->lru; 422 if (delta < 0) 423 best = fdc; 424 } 425 426 /* 427 * Replace best 428 * 429 * Don't enter into the cache if we cannot get the lock. 430 */ 431 status = atomic_swap_int(&best->locked, 1); 432 if (status == 1) 433 goto done; 434 435 /* 436 * Clear the previous cache entry if present 437 */ 438 if (best->fp) { 439 KKASSERT(best->fd >= 0); 440 fclearcache(&fdp->fd_files[best->fd], best, status); 441 } 442 443 /* 444 * Create our new cache entry. This entry is 'safe' until we tie 445 * into the fdnode. If we cannot tie in, we will clear the entry. 446 */ 447 best->fd = fd; 448 best->fp = fp; 449 best->lru = ++td->td_fdcache_lru; 450 best->locked = 2; /* borrowed ref */ 451 452 fdn = &fdp->fd_files[fd]; 453 for (i = 0; i < NTDCACHEFD; ++i) { 454 if (fdn->tdcache[i] == NULL && 455 atomic_cmpset_ptr((void **)&fdn->tdcache[i], NULL, best)) { 456 goto done; 457 } 458 } 459 fdn->isfull = 1; /* no space */ 460 best->fd = -1; 461 best->fp = NULL; 462 best->locked = 0; 463 done: 464 spin_unlock_shared(&fdp->fd_spin); 465 466 return fp; 467 } 468 469 /* 470 * holdfp(), bypassing the cache in order to also be able to return 471 * the descriptor flags. A bit of a hack. 472 */ 473 static 474 struct file * 475 _holdfp2(thread_t td, int fd, char *fflagsp) 476 { 477 struct filedesc *fdp; 478 struct file *fp; 479 480 /* 481 * Lookup the descriptor the slow way. This can contend against 482 * modifying operations in a multi-threaded environment and cause 483 * cache line ping ponging otherwise. 484 */ 485 fdp = td->td_proc->p_fd; 486 spin_lock_shared(&fdp->fd_spin); 487 488 if (((u_int)fd) < fdp->fd_nfiles) { 489 fp = fdp->fd_files[fd].fp; /* can be NULL */ 490 if (fp) { 491 *fflagsp = fdp->fd_files[fd].fileflags; 492 fhold(fp); 493 } 494 } else { 495 fp = NULL; 496 } 497 spin_unlock_shared(&fdp->fd_spin); 498 499 return fp; 500 } 501 502 503 /* 504 * Drop the file pointer and return to the thread cache if possible. 505 * 506 * Caller must not hold fdp's spin lock. 507 * td must be the current thread. 508 */ 509 void 510 dropfp(thread_t td, int fd, struct file *fp) 511 { 512 struct filedesc *fdp; 513 struct fdcache *fdc; 514 int status; 515 516 fdp = td->td_proc->p_fd; 517 518 /* 519 * If our placeholder is still present we can re-cache the ref. 520 * 521 * Note that we can race an fclearcache(). 522 */ 523 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 524 if (fdc->fp != fp || fdc->fd != fd) 525 continue; 526 status = atomic_swap_int(&fdc->locked, 1); 527 switch(status) { 528 case 0: 529 /* 530 * Not in mode 2, fdrop fp without caching. 531 */ 532 atomic_swap_int(&fdc->locked, 0); 533 break; 534 case 1: 535 /* 536 * Not in mode 2, locked by someone else. 537 * fdrop fp without caching. 538 */ 539 break; 540 case 2: 541 /* 542 * Intact borrowed ref, return to mode 0 543 * indicating that we have returned the ref. 544 * 545 * Return the borrowed ref (2->1->0) 546 */ 547 if (fdc->fp == fp && fdc->fd == fd) { 548 atomic_swap_int(&fdc->locked, 0); 549 return; 550 } 551 atomic_swap_int(&fdc->locked, 2); 552 break; 553 } 554 } 555 556 /* 557 * Failed to re-cache, drop the fp without caching. 558 */ 559 fdrop(fp); 560 } 561 562 /* 563 * Clear all descriptors cached in the per-thread fd cache for 564 * the specified thread. 565 * 566 * Caller must not hold p_fd->spin. This function will temporarily 567 * obtain a shared spin lock. 568 */ 569 void 570 fexitcache(thread_t td) 571 { 572 struct filedesc *fdp; 573 struct fdcache *fdc; 574 int status; 575 int i; 576 577 if (td->td_proc == NULL) 578 return; 579 fdp = td->td_proc->p_fd; 580 if (fdp == NULL) 581 return; 582 583 /* 584 * A shared lock is sufficient as the caller controls td and we 585 * are only clearing td's cache. 586 */ 587 spin_lock_shared(&fdp->fd_spin); 588 for (i = 0; i < NFDCACHE; ++i) { 589 fdc = &td->td_fdcache[i]; 590 if (fdc->fp) { 591 status = atomic_swap_int(&fdc->locked, 1); 592 if (status == 1) { 593 cpu_pause(); 594 --i; 595 continue; 596 } 597 if (fdc->fp) { 598 KKASSERT(fdc->fd >= 0); 599 fclearcache(&fdp->fd_files[fdc->fd], fdc, 600 status); 601 } 602 atomic_swap_int(&fdc->locked, 0); 603 } 604 } 605 spin_unlock_shared(&fdp->fd_spin); 606 } 607 608 static __inline struct filelist_head * 609 fp2filelist(const struct file *fp) 610 { 611 u_int i; 612 613 i = (u_int)(uintptr_t)fp % NFILELIST_HEADS; 614 return &filelist_heads[i]; 615 } 616 617 static __inline 618 struct plimit * 619 readplimits(struct proc *p) 620 { 621 thread_t td = curthread; 622 struct plimit *limit; 623 624 limit = td->td_limit; 625 if (limit != p->p_limit) { 626 spin_lock_shared(&p->p_spin); 627 limit = p->p_limit; 628 atomic_add_int(&limit->p_refcnt, 1); 629 spin_unlock_shared(&p->p_spin); 630 if (td->td_limit) 631 plimit_free(td->td_limit); 632 td->td_limit = limit; 633 } 634 return limit; 635 } 636 637 /* 638 * System calls on descriptors. 639 */ 640 int 641 sys_getdtablesize(struct sysmsg *sysmsg, const struct getdtablesize_args *uap) 642 { 643 struct proc *p = curproc; 644 struct plimit *limit = readplimits(p); 645 int dtsize; 646 647 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 648 dtsize = INT_MAX; 649 else 650 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 651 652 if (dtsize > maxfilesperproc) 653 dtsize = maxfilesperproc; 654 if (dtsize < minfilesperproc) 655 dtsize = minfilesperproc; 656 if (p->p_ucred->cr_uid && dtsize > maxfilesperuser) 657 dtsize = maxfilesperuser; 658 sysmsg->sysmsg_result = dtsize; 659 return (0); 660 } 661 662 /* 663 * Duplicate a file descriptor to a particular value. 664 * 665 * note: keep in mind that a potential race condition exists when closing 666 * descriptors from a shared descriptor table (via rfork). 667 */ 668 int 669 sys_dup2(struct sysmsg *sysmsg, const struct dup2_args *uap) 670 { 671 int error; 672 int fd = 0; 673 674 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd); 675 sysmsg->sysmsg_fds[0] = fd; 676 677 return (error); 678 } 679 680 /* 681 * Duplicate a file descriptor. 682 */ 683 int 684 sys_dup(struct sysmsg *sysmsg, const struct dup_args *uap) 685 { 686 int error; 687 int fd = 0; 688 689 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd); 690 sysmsg->sysmsg_fds[0] = fd; 691 692 return (error); 693 } 694 695 /* 696 * MPALMOSTSAFE - acquires mplock for fp operations 697 */ 698 int 699 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) 700 { 701 struct thread *td = curthread; 702 struct proc *p = td->td_proc; 703 struct file *fp; 704 struct vnode *vp; 705 u_int newmin; 706 u_int oflags; 707 u_int nflags; 708 int closedcounter; 709 int tmp, error, flg = F_POSIX; 710 711 KKASSERT(p); 712 713 /* 714 * Operations on file descriptors that do not require a file pointer. 715 */ 716 switch (cmd) { 717 case F_GETFD: 718 error = fgetfdflags(p->p_fd, fd, &tmp); 719 if (error == 0) 720 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0; 721 return (error); 722 723 case F_SETFD: 724 if (dat->fc_cloexec & FD_CLOEXEC) 725 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 726 else 727 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 728 return (error); 729 case F_DUPFD: 730 newmin = dat->fc_fd; 731 error = kern_dup(DUP_VARIABLE | DUP_FCNTL, fd, newmin, 732 &dat->fc_fd); 733 return (error); 734 case F_DUPFD_CLOEXEC: 735 newmin = dat->fc_fd; 736 error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC | DUP_FCNTL, 737 fd, newmin, &dat->fc_fd); 738 return (error); 739 case F_DUP2FD: 740 newmin = dat->fc_fd; 741 error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd); 742 return (error); 743 case F_DUP2FD_CLOEXEC: 744 newmin = dat->fc_fd; 745 error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin, 746 &dat->fc_fd); 747 return (error); 748 default: 749 break; 750 } 751 752 /* 753 * Operations on file pointers 754 */ 755 closedcounter = p->p_fd->fd_closedcounter; 756 if ((fp = holdfp(td, fd, -1)) == NULL) 757 return (EBADF); 758 759 switch (cmd) { 760 case F_GETFL: 761 dat->fc_flags = OFLAGS(fp->f_flag); 762 error = 0; 763 break; 764 765 case F_SETFL: 766 oflags = fp->f_flag; 767 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS; 768 nflags |= oflags & ~FCNTLFLAGS; 769 770 error = 0; 771 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY)) 772 error = EINVAL; 773 if (error == 0 && ((nflags ^ oflags) & FASYNC)) { 774 tmp = nflags & FASYNC; 775 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, 776 cred, NULL); 777 } 778 779 /* 780 * If no error, must be atomically set. 781 */ 782 while (error == 0) { 783 oflags = fp->f_flag; 784 cpu_ccfence(); 785 nflags = (oflags & ~FCNTLFLAGS) | (nflags & FCNTLFLAGS); 786 if (atomic_cmpset_int(&fp->f_flag, oflags, nflags)) 787 break; 788 cpu_pause(); 789 } 790 break; 791 792 case F_GETOWN: 793 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, 794 cred, NULL); 795 break; 796 797 case F_SETOWN: 798 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, 799 cred, NULL); 800 break; 801 802 case F_SETLKW: 803 flg |= F_WAIT; 804 /* Fall into F_SETLK */ 805 806 case F_SETLK: 807 if (fp->f_type != DTYPE_VNODE) { 808 error = EBADF; 809 break; 810 } 811 vp = (struct vnode *)fp->f_data; 812 813 /* 814 * copyin/lockop may block 815 */ 816 if (dat->fc_flock.l_whence == SEEK_CUR) 817 dat->fc_flock.l_start += fp->f_offset; 818 819 switch (dat->fc_flock.l_type) { 820 case F_RDLCK: 821 if ((fp->f_flag & FREAD) == 0) { 822 error = EBADF; 823 break; 824 } 825 if (p->p_leader->p_advlock_flag == 0) 826 p->p_leader->p_advlock_flag = 1; 827 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 828 &dat->fc_flock, flg); 829 break; 830 case F_WRLCK: 831 if ((fp->f_flag & FWRITE) == 0) { 832 error = EBADF; 833 break; 834 } 835 if (p->p_leader->p_advlock_flag == 0) 836 p->p_leader->p_advlock_flag = 1; 837 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 838 &dat->fc_flock, flg); 839 break; 840 case F_UNLCK: 841 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 842 &dat->fc_flock, F_POSIX); 843 break; 844 default: 845 error = EINVAL; 846 break; 847 } 848 849 /* 850 * It is possible to race a close() on the descriptor while 851 * we were blocked getting the lock. If this occurs the 852 * close might not have caught the lock. 853 */ 854 if (checkfdclosed(td, p->p_fd, fd, fp, closedcounter)) { 855 dat->fc_flock.l_whence = SEEK_SET; 856 dat->fc_flock.l_start = 0; 857 dat->fc_flock.l_len = 0; 858 dat->fc_flock.l_type = F_UNLCK; 859 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 860 F_UNLCK, &dat->fc_flock, F_POSIX); 861 } 862 break; 863 864 case F_GETLK: 865 if (fp->f_type != DTYPE_VNODE) { 866 error = EBADF; 867 break; 868 } 869 vp = (struct vnode *)fp->f_data; 870 /* 871 * copyin/lockop may block 872 */ 873 if (dat->fc_flock.l_type != F_RDLCK && 874 dat->fc_flock.l_type != F_WRLCK && 875 dat->fc_flock.l_type != F_UNLCK) { 876 error = EINVAL; 877 break; 878 } 879 if (dat->fc_flock.l_whence == SEEK_CUR) 880 dat->fc_flock.l_start += fp->f_offset; 881 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 882 &dat->fc_flock, F_POSIX); 883 break; 884 885 case F_GETPATH: 886 if (fp->f_type != DTYPE_VNODE) { 887 error = EBADF; 888 break; 889 } 890 error = cache_fullpath(p, &fp->f_nchandle, NULL, &dat->fc_path.ptr, 891 &dat->fc_path.buf, 1); 892 break; 893 894 default: 895 error = EINVAL; 896 break; 897 } 898 899 fdrop(fp); 900 return (error); 901 } 902 903 /* 904 * The file control system call. 905 */ 906 int 907 sys_fcntl(struct sysmsg *sysmsg, const struct fcntl_args *uap) 908 { 909 union fcntl_dat dat; 910 int error; 911 912 switch (uap->cmd) { 913 case F_DUPFD: 914 case F_DUP2FD: 915 case F_DUPFD_CLOEXEC: 916 case F_DUP2FD_CLOEXEC: 917 dat.fc_fd = uap->arg; 918 break; 919 case F_SETFD: 920 dat.fc_cloexec = uap->arg; 921 break; 922 case F_SETFL: 923 dat.fc_flags = uap->arg; 924 break; 925 case F_SETOWN: 926 dat.fc_owner = uap->arg; 927 break; 928 case F_SETLKW: 929 case F_SETLK: 930 case F_GETLK: 931 error = copyin((caddr_t)uap->arg, &dat.fc_flock, 932 sizeof(struct flock)); 933 if (error) 934 return (error); 935 break; 936 } 937 938 error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred); 939 940 if (error == 0) { 941 switch (uap->cmd) { 942 case F_DUPFD: 943 case F_DUP2FD: 944 case F_DUPFD_CLOEXEC: 945 case F_DUP2FD_CLOEXEC: 946 sysmsg->sysmsg_result = dat.fc_fd; 947 break; 948 case F_GETFD: 949 sysmsg->sysmsg_result = dat.fc_cloexec; 950 break; 951 case F_GETFL: 952 sysmsg->sysmsg_result = dat.fc_flags; 953 break; 954 case F_GETOWN: 955 sysmsg->sysmsg_result = dat.fc_owner; 956 break; 957 case F_GETLK: 958 error = copyout(&dat.fc_flock, (caddr_t)uap->arg, 959 sizeof(struct flock)); 960 break; 961 case F_GETPATH: 962 error = copyout(dat.fc_path.ptr, (caddr_t)uap->arg, 963 strlen(dat.fc_path.ptr) + 1); 964 kfree(dat.fc_path.buf, M_TEMP); 965 break; 966 } 967 } 968 969 return (error); 970 } 971 972 /* 973 * Common code for dup, dup2, and fcntl(F_DUPFD). 974 * 975 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and 976 * DUP_CLOEXEC. 977 * 978 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between 979 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX). 980 * The next two flags are mutually exclusive, and the fourth is optional. 981 * DUP_FIXED tells kern_dup() to destructively dup over an existing file 982 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup() 983 * to find the lowest unused file descriptor that is greater than or 984 * equal to "new". DUP_CLOEXEC, which works with either of the first 985 * two flags, sets the close-on-exec flag on the "new" file descriptor. 986 */ 987 int 988 kern_dup(int flags, int old, int new, int *res) 989 { 990 struct thread *td = curthread; 991 struct proc *p = td->td_proc; 992 struct plimit *limit = readplimits(p); 993 struct filedesc *fdp = p->p_fd; 994 struct file *fp; 995 struct file *delfp; 996 int oldflags; 997 int holdleaders; 998 int dtsize; 999 int error, newfd; 1000 1001 /* 1002 * Verify that we have a valid descriptor to dup from and 1003 * possibly to dup to. When the new descriptor is out of 1004 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must 1005 * return EINVAL, while dup2() returns EBADF in 1006 * this case. 1007 * 1008 * NOTE: maxfilesperuser is not applicable to dup() 1009 */ 1010 retry: 1011 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1012 dtsize = INT_MAX; 1013 else 1014 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1015 if (dtsize > maxfilesperproc) 1016 dtsize = maxfilesperproc; 1017 if (dtsize < minfilesperproc) 1018 dtsize = minfilesperproc; 1019 1020 if (new < 0 || new >= dtsize) 1021 return (flags & DUP_FCNTL ? EINVAL : EBADF); 1022 1023 spin_lock(&fdp->fd_spin); 1024 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) { 1025 spin_unlock(&fdp->fd_spin); 1026 return (EBADF); 1027 } 1028 if ((flags & DUP_FIXED) && old == new) { 1029 *res = new; 1030 if (flags & DUP_CLOEXEC) 1031 fdp->fd_files[new].fileflags |= UF_EXCLOSE; 1032 spin_unlock(&fdp->fd_spin); 1033 return (0); 1034 } 1035 fp = fdp->fd_files[old].fp; 1036 oldflags = fdp->fd_files[old].fileflags; 1037 fhold(fp); 1038 1039 /* 1040 * Allocate a new descriptor if DUP_VARIABLE, or expand the table 1041 * if the requested descriptor is beyond the current table size. 1042 * 1043 * This can block. Retry if the source descriptor no longer matches 1044 * or if our expectation in the expansion case races. 1045 * 1046 * If we are not expanding or allocating a new decriptor, then reset 1047 * the target descriptor to a reserved state so we have a uniform 1048 * setup for the next code block. 1049 */ 1050 if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) { 1051 error = fdalloc_locked(p, fdp, new, &newfd); 1052 if (error) { 1053 spin_unlock(&fdp->fd_spin); 1054 fdrop(fp); 1055 return (error); 1056 } 1057 /* 1058 * Check for ripout 1059 */ 1060 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) { 1061 fsetfd_locked(fdp, NULL, newfd); 1062 spin_unlock(&fdp->fd_spin); 1063 fdrop(fp); 1064 goto retry; 1065 } 1066 /* 1067 * Check for expansion race 1068 */ 1069 if ((flags & DUP_VARIABLE) == 0 && new != newfd) { 1070 fsetfd_locked(fdp, NULL, newfd); 1071 spin_unlock(&fdp->fd_spin); 1072 fdrop(fp); 1073 goto retry; 1074 } 1075 /* 1076 * Check for ripout, newfd reused old (this case probably 1077 * can't occur). 1078 */ 1079 if (old == newfd) { 1080 fsetfd_locked(fdp, NULL, newfd); 1081 spin_unlock(&fdp->fd_spin); 1082 fdrop(fp); 1083 goto retry; 1084 } 1085 new = newfd; 1086 delfp = NULL; 1087 } else { 1088 if (fdp->fd_files[new].reserved) { 1089 spin_unlock(&fdp->fd_spin); 1090 fdrop(fp); 1091 kprintf("Warning: dup(): target descriptor %d is " 1092 "reserved, waiting for it to be resolved\n", 1093 new); 1094 tsleep(fdp, 0, "fdres", hz); 1095 goto retry; 1096 } 1097 1098 /* 1099 * If the target descriptor was never allocated we have 1100 * to allocate it. If it was we have to clean out the 1101 * old descriptor. delfp inherits the ref from the 1102 * descriptor table. 1103 */ 1104 ++fdp->fd_closedcounter; 1105 fclearcache(&fdp->fd_files[new], NULL, 0); 1106 ++fdp->fd_closedcounter; 1107 delfp = fdp->fd_files[new].fp; 1108 fdp->fd_files[new].fp = NULL; 1109 fdp->fd_files[new].reserved = 1; 1110 if (delfp == NULL) { 1111 fdreserve_locked(fdp, new, 1); 1112 if (new > fdp->fd_lastfile) 1113 fdp->fd_lastfile = new; 1114 } 1115 1116 } 1117 1118 /* 1119 * NOTE: still holding an exclusive spinlock 1120 */ 1121 1122 /* 1123 * If a descriptor is being overwritten we may hve to tell 1124 * fdfree() to sleep to ensure that all relevant process 1125 * leaders can be traversed in closef(). 1126 */ 1127 if (delfp != NULL && p->p_fdtol != NULL) { 1128 fdp->fd_holdleaderscount++; 1129 holdleaders = 1; 1130 } else { 1131 holdleaders = 0; 1132 } 1133 KASSERT(delfp == NULL || (flags & DUP_FIXED), 1134 ("dup() picked an open file")); 1135 1136 /* 1137 * Duplicate the source descriptor, update lastfile. If the new 1138 * descriptor was not allocated and we aren't replacing an existing 1139 * descriptor we have to mark the descriptor as being in use. 1140 * 1141 * The fd_files[] array inherits fp's hold reference. 1142 */ 1143 fsetfd_locked(fdp, fp, new); 1144 if ((flags & DUP_CLOEXEC) != 0) 1145 fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE; 1146 else 1147 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE; 1148 spin_unlock(&fdp->fd_spin); 1149 fdrop(fp); 1150 *res = new; 1151 1152 /* 1153 * If we dup'd over a valid file, we now own the reference to it 1154 * and must dispose of it using closef() semantics (as if a 1155 * close() were performed on it). 1156 */ 1157 if (delfp) { 1158 if (SLIST_FIRST(&delfp->f_klist)) 1159 knote_fdclose(delfp, fdp, new); 1160 closef(delfp, p); 1161 if (holdleaders) { 1162 spin_lock(&fdp->fd_spin); 1163 fdp->fd_holdleaderscount--; 1164 if (fdp->fd_holdleaderscount == 0 && 1165 fdp->fd_holdleaderswakeup != 0) { 1166 fdp->fd_holdleaderswakeup = 0; 1167 spin_unlock(&fdp->fd_spin); 1168 wakeup(&fdp->fd_holdleaderscount); 1169 } else { 1170 spin_unlock(&fdp->fd_spin); 1171 } 1172 } 1173 } 1174 return (0); 1175 } 1176 1177 /* 1178 * If sigio is on the list associated with a process or process group, 1179 * disable signalling from the device, remove sigio from the list and 1180 * free sigio. 1181 */ 1182 void 1183 funsetown(struct sigio **sigiop) 1184 { 1185 struct pgrp *pgrp; 1186 struct proc *p; 1187 struct sigio *sigio; 1188 1189 if ((sigio = *sigiop) != NULL) { 1190 lwkt_gettoken(&sigio_token); /* protect sigio */ 1191 KKASSERT(sigiop == sigio->sio_myref); 1192 sigio = *sigiop; 1193 *sigiop = NULL; 1194 lwkt_reltoken(&sigio_token); 1195 } 1196 if (sigio == NULL) 1197 return; 1198 1199 if (sigio->sio_pgid < 0) { 1200 pgrp = sigio->sio_pgrp; 1201 sigio->sio_pgrp = NULL; 1202 lwkt_gettoken(&pgrp->pg_token); 1203 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio); 1204 lwkt_reltoken(&pgrp->pg_token); 1205 pgrel(pgrp); 1206 } else /* if ((*sigiop)->sio_pgid > 0) */ { 1207 p = sigio->sio_proc; 1208 sigio->sio_proc = NULL; 1209 PHOLD(p); 1210 lwkt_gettoken(&p->p_token); 1211 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio); 1212 lwkt_reltoken(&p->p_token); 1213 PRELE(p); 1214 } 1215 crfree(sigio->sio_ucred); 1216 sigio->sio_ucred = NULL; 1217 kfree(sigio, M_SIGIO); 1218 } 1219 1220 /* 1221 * Free a list of sigio structures. Caller is responsible for ensuring 1222 * that the list is MPSAFE. 1223 */ 1224 void 1225 funsetownlst(struct sigiolst *sigiolst) 1226 { 1227 struct sigio *sigio; 1228 1229 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 1230 funsetown(sigio->sio_myref); 1231 } 1232 1233 /* 1234 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 1235 * 1236 * After permission checking, add a sigio structure to the sigio list for 1237 * the process or process group. 1238 */ 1239 int 1240 fsetown(pid_t pgid, struct sigio **sigiop) 1241 { 1242 struct proc *proc = NULL; 1243 struct pgrp *pgrp = NULL; 1244 struct sigio *sigio; 1245 int error; 1246 1247 if (pgid == 0) { 1248 funsetown(sigiop); 1249 return (0); 1250 } 1251 1252 if (pgid > 0) { 1253 proc = pfind(pgid); 1254 if (proc == NULL) { 1255 error = ESRCH; 1256 goto done; 1257 } 1258 1259 /* 1260 * Policy - Don't allow a process to FSETOWN a process 1261 * in another session. 1262 * 1263 * Remove this test to allow maximum flexibility or 1264 * restrict FSETOWN to the current process or process 1265 * group for maximum safety. 1266 */ 1267 if (proc->p_session != curproc->p_session) { 1268 error = EPERM; 1269 goto done; 1270 } 1271 } else /* if (pgid < 0) */ { 1272 pgrp = pgfind(-pgid); 1273 if (pgrp == NULL) { 1274 error = ESRCH; 1275 goto done; 1276 } 1277 1278 /* 1279 * Policy - Don't allow a process to FSETOWN a process 1280 * in another session. 1281 * 1282 * Remove this test to allow maximum flexibility or 1283 * restrict FSETOWN to the current process or process 1284 * group for maximum safety. 1285 */ 1286 if (pgrp->pg_session != curproc->p_session) { 1287 error = EPERM; 1288 goto done; 1289 } 1290 } 1291 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO); 1292 if (pgid > 0) { 1293 KKASSERT(pgrp == NULL); 1294 lwkt_gettoken(&proc->p_token); 1295 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 1296 sigio->sio_proc = proc; 1297 lwkt_reltoken(&proc->p_token); 1298 } else { 1299 KKASSERT(proc == NULL); 1300 lwkt_gettoken(&pgrp->pg_token); 1301 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 1302 sigio->sio_pgrp = pgrp; 1303 lwkt_reltoken(&pgrp->pg_token); 1304 pgrp = NULL; 1305 } 1306 sigio->sio_pgid = pgid; 1307 sigio->sio_ucred = crhold(curthread->td_ucred); 1308 /* It would be convenient if p_ruid was in ucred. */ 1309 sigio->sio_ruid = sigio->sio_ucred->cr_ruid; 1310 sigio->sio_myref = sigiop; 1311 1312 lwkt_gettoken(&sigio_token); 1313 while (*sigiop) 1314 funsetown(sigiop); 1315 *sigiop = sigio; 1316 lwkt_reltoken(&sigio_token); 1317 error = 0; 1318 done: 1319 if (pgrp) 1320 pgrel(pgrp); 1321 if (proc) 1322 PRELE(proc); 1323 return (error); 1324 } 1325 1326 /* 1327 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 1328 */ 1329 pid_t 1330 fgetown(struct sigio **sigiop) 1331 { 1332 struct sigio *sigio; 1333 pid_t own; 1334 1335 lwkt_gettoken_shared(&sigio_token); 1336 sigio = *sigiop; 1337 own = (sigio != NULL ? sigio->sio_pgid : 0); 1338 lwkt_reltoken(&sigio_token); 1339 1340 return (own); 1341 } 1342 1343 /* 1344 * Close many file descriptors. 1345 */ 1346 int 1347 sys_closefrom(struct sysmsg *sysmsg, const struct closefrom_args *uap) 1348 { 1349 return(kern_closefrom(uap->fd)); 1350 } 1351 1352 /* 1353 * Close all file descriptors greater then or equal to fd 1354 */ 1355 int 1356 kern_closefrom(int fd) 1357 { 1358 struct thread *td = curthread; 1359 struct proc *p = td->td_proc; 1360 struct filedesc *fdp; 1361 int error; 1362 int e2; 1363 1364 KKASSERT(p); 1365 fdp = p->p_fd; 1366 1367 if (fd < 0) 1368 return (EINVAL); 1369 1370 /* 1371 * NOTE: This function will skip unassociated descriptors and 1372 * reserved descriptors that have not yet been assigned. 1373 * fd_lastfile can change as a side effect of kern_close(). 1374 * 1375 * NOTE: We accumulate EINTR errors and return EINTR if any 1376 * close() returned EINTR. However, the descriptor is 1377 * still closed and we do not break out of the loop. 1378 */ 1379 error = 0; 1380 spin_lock(&fdp->fd_spin); 1381 while (fd <= fdp->fd_lastfile) { 1382 if (fdp->fd_files[fd].fp != NULL) { 1383 spin_unlock(&fdp->fd_spin); 1384 /* ok if this races another close */ 1385 e2 = kern_close(fd); 1386 if (e2 == EINTR) 1387 error = EINTR; 1388 spin_lock(&fdp->fd_spin); 1389 } 1390 ++fd; 1391 } 1392 spin_unlock(&fdp->fd_spin); 1393 1394 return error; 1395 } 1396 1397 /* 1398 * Close a file descriptor. 1399 */ 1400 int 1401 sys_close(struct sysmsg *sysmsg, const struct close_args *uap) 1402 { 1403 return(kern_close(uap->fd)); 1404 } 1405 1406 /* 1407 * close() helper 1408 */ 1409 int 1410 kern_close(int fd) 1411 { 1412 struct thread *td = curthread; 1413 struct proc *p = td->td_proc; 1414 struct filedesc *fdp; 1415 struct file *fp; 1416 int error; 1417 int holdleaders; 1418 1419 KKASSERT(p); 1420 fdp = p->p_fd; 1421 1422 /* 1423 * funsetfd*() also clears the fd cache 1424 */ 1425 spin_lock(&fdp->fd_spin); 1426 if ((fp = funsetfd_locked(fdp, fd)) == NULL) { 1427 spin_unlock(&fdp->fd_spin); 1428 return (EBADF); 1429 } 1430 holdleaders = 0; 1431 if (p->p_fdtol != NULL) { 1432 /* 1433 * Ask fdfree() to sleep to ensure that all relevant 1434 * process leaders can be traversed in closef(). 1435 */ 1436 fdp->fd_holdleaderscount++; 1437 holdleaders = 1; 1438 } 1439 1440 /* 1441 * we now hold the fp reference that used to be owned by the descriptor 1442 * array. 1443 */ 1444 spin_unlock(&fdp->fd_spin); 1445 if (SLIST_FIRST(&fp->f_klist)) 1446 knote_fdclose(fp, fdp, fd); 1447 error = closef(fp, p); 1448 if (holdleaders) { 1449 spin_lock(&fdp->fd_spin); 1450 fdp->fd_holdleaderscount--; 1451 if (fdp->fd_holdleaderscount == 0 && 1452 fdp->fd_holdleaderswakeup != 0) { 1453 fdp->fd_holdleaderswakeup = 0; 1454 spin_unlock(&fdp->fd_spin); 1455 wakeup(&fdp->fd_holdleaderscount); 1456 } else { 1457 spin_unlock(&fdp->fd_spin); 1458 } 1459 } 1460 return (error); 1461 } 1462 1463 /* 1464 * shutdown_args(int fd, int how) 1465 */ 1466 int 1467 kern_shutdown(int fd, int how) 1468 { 1469 struct thread *td = curthread; 1470 struct file *fp; 1471 int error; 1472 1473 if ((fp = holdfp(td, fd, -1)) == NULL) 1474 return (EBADF); 1475 error = fo_shutdown(fp, how); 1476 fdrop(fp); 1477 1478 return (error); 1479 } 1480 1481 /* 1482 * MPALMOSTSAFE 1483 */ 1484 int 1485 sys_shutdown(struct sysmsg *sysmsg, const struct shutdown_args *uap) 1486 { 1487 int error; 1488 1489 error = kern_shutdown(uap->s, uap->how); 1490 1491 return (error); 1492 } 1493 1494 /* 1495 * fstat() helper 1496 */ 1497 int 1498 kern_fstat(int fd, struct stat *ub) 1499 { 1500 struct thread *td = curthread; 1501 struct file *fp; 1502 int error; 1503 1504 if ((fp = holdfp(td, fd, -1)) == NULL) 1505 return (EBADF); 1506 error = fo_stat(fp, ub, td->td_ucred); 1507 fdrop(fp); 1508 1509 return (error); 1510 } 1511 1512 /* 1513 * Return status information about a file descriptor. 1514 */ 1515 int 1516 sys_fstat(struct sysmsg *sysmsg, const struct fstat_args *uap) 1517 { 1518 struct stat st; 1519 int error; 1520 1521 error = kern_fstat(uap->fd, &st); 1522 1523 if (error == 0) 1524 error = copyout(&st, uap->sb, sizeof(st)); 1525 return (error); 1526 } 1527 1528 /* 1529 * Return pathconf information about a file descriptor. 1530 * 1531 * MPALMOSTSAFE 1532 */ 1533 int 1534 sys_fpathconf(struct sysmsg *sysmsg, const struct fpathconf_args *uap) 1535 { 1536 struct thread *td = curthread; 1537 struct file *fp; 1538 struct vnode *vp; 1539 int error = 0; 1540 1541 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 1542 return (EBADF); 1543 1544 switch (fp->f_type) { 1545 case DTYPE_PIPE: 1546 case DTYPE_SOCKET: 1547 if (uap->name != _PC_PIPE_BUF) { 1548 error = EINVAL; 1549 } else { 1550 sysmsg->sysmsg_result = PIPE_BUF; 1551 error = 0; 1552 } 1553 break; 1554 case DTYPE_FIFO: 1555 case DTYPE_VNODE: 1556 vp = (struct vnode *)fp->f_data; 1557 error = VOP_PATHCONF(vp, uap->name, &sysmsg->sysmsg_reg); 1558 break; 1559 default: 1560 error = EOPNOTSUPP; 1561 break; 1562 } 1563 fdrop(fp); 1564 return(error); 1565 } 1566 1567 /* 1568 * Grow the file table so it can hold through descriptor (want). 1569 * 1570 * The fdp's spinlock must be held exclusively on entry and may be held 1571 * exclusively on return. The spinlock may be cycled by the routine. 1572 */ 1573 static void 1574 fdgrow_locked(struct filedesc *fdp, int want) 1575 { 1576 struct fdnode *newfiles; 1577 struct fdnode *oldfiles; 1578 int nf, extra; 1579 1580 nf = fdp->fd_nfiles; 1581 do { 1582 /* nf has to be of the form 2^n - 1 */ 1583 nf = 2 * nf + 1; 1584 } while (nf <= want); 1585 1586 spin_unlock(&fdp->fd_spin); 1587 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK); 1588 spin_lock(&fdp->fd_spin); 1589 1590 /* 1591 * We could have raced another extend while we were not holding 1592 * the spinlock. 1593 */ 1594 if (fdp->fd_nfiles >= nf) { 1595 spin_unlock(&fdp->fd_spin); 1596 kfree(newfiles, M_FILEDESC); 1597 spin_lock(&fdp->fd_spin); 1598 return; 1599 } 1600 /* 1601 * Copy the existing ofile and ofileflags arrays 1602 * and zero the new portion of each array. 1603 */ 1604 extra = nf - fdp->fd_nfiles; 1605 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode)); 1606 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode)); 1607 1608 oldfiles = fdp->fd_files; 1609 fdp->fd_files = newfiles; 1610 fdp->fd_nfiles = nf; 1611 1612 if (oldfiles != fdp->fd_builtin_files) { 1613 spin_unlock(&fdp->fd_spin); 1614 kfree(oldfiles, M_FILEDESC); 1615 spin_lock(&fdp->fd_spin); 1616 } 1617 } 1618 1619 /* 1620 * Number of nodes in right subtree, including the root. 1621 */ 1622 static __inline int 1623 right_subtree_size(int n) 1624 { 1625 return (n ^ (n | (n + 1))); 1626 } 1627 1628 /* 1629 * Bigger ancestor. 1630 */ 1631 static __inline int 1632 right_ancestor(int n) 1633 { 1634 return (n | (n + 1)); 1635 } 1636 1637 /* 1638 * Smaller ancestor. 1639 */ 1640 static __inline int 1641 left_ancestor(int n) 1642 { 1643 return ((n & (n + 1)) - 1); 1644 } 1645 1646 /* 1647 * Traverse the in-place binary tree buttom-up adjusting the allocation 1648 * count so scans can determine where free descriptors are located. 1649 * 1650 * caller must be holding an exclusive spinlock on fdp 1651 */ 1652 static 1653 void 1654 fdreserve_locked(struct filedesc *fdp, int fd, int incr) 1655 { 1656 while (fd >= 0) { 1657 fdp->fd_files[fd].allocated += incr; 1658 KKASSERT(fdp->fd_files[fd].allocated >= 0); 1659 fd = left_ancestor(fd); 1660 } 1661 } 1662 1663 /* 1664 * Reserve a file descriptor for the process. If no error occurs, the 1665 * caller MUST at some point call fsetfd() or assign a file pointer 1666 * or dispose of the reservation. 1667 */ 1668 static 1669 int 1670 fdalloc_locked(struct proc *p, struct filedesc *fdp, int want, int *result) 1671 { 1672 struct plimit *limit = readplimits(p); 1673 struct uidinfo *uip; 1674 int fd, rsize, rsum, node, lim; 1675 1676 /* 1677 * Check dtable size limit 1678 */ 1679 *result = -1; /* avoid gcc warnings */ 1680 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1681 lim = INT_MAX; 1682 else 1683 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1684 1685 if (lim > maxfilesperproc) 1686 lim = maxfilesperproc; 1687 if (lim < minfilesperproc) 1688 lim = minfilesperproc; 1689 if (want >= lim) 1690 return (EINVAL); 1691 1692 /* 1693 * Check that the user has not run out of descriptors (non-root only). 1694 * As a safety measure the dtable is allowed to have at least 1695 * minfilesperproc open fds regardless of the maxfilesperuser limit. 1696 * 1697 * This isn't as loose a spec as ui_posixlocks, so we use atomic 1698 * ops to force synchronize and recheck if we would otherwise 1699 * error. 1700 */ 1701 if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) { 1702 uip = p->p_ucred->cr_uidinfo; 1703 if (uip->ui_openfiles > maxfilesperuser) { 1704 int n; 1705 int count; 1706 1707 count = 0; 1708 for (n = 0; n < ncpus; ++n) { 1709 count += atomic_swap_int( 1710 &uip->ui_pcpu[n].pu_openfiles, 0); 1711 } 1712 atomic_add_int(&uip->ui_openfiles, count); 1713 if (uip->ui_openfiles > maxfilesperuser) { 1714 krateprintf(&krate_uidinfo, 1715 "Warning: user %d pid %d (%s) " 1716 "ran out of file descriptors " 1717 "(%d/%d)\n", 1718 p->p_ucred->cr_uid, (int)p->p_pid, 1719 p->p_comm, 1720 uip->ui_openfiles, maxfilesperuser); 1721 return(ENFILE); 1722 } 1723 } 1724 } 1725 1726 /* 1727 * Grow the dtable if necessary 1728 */ 1729 if (want >= fdp->fd_nfiles) 1730 fdgrow_locked(fdp, want); 1731 1732 /* 1733 * Search for a free descriptor starting at the higher 1734 * of want or fd_freefile. If that fails, consider 1735 * expanding the ofile array. 1736 * 1737 * NOTE! the 'allocated' field is a cumulative recursive allocation 1738 * count. If we happen to see a value of 0 then we can shortcut 1739 * our search. Otherwise we run through through the tree going 1740 * down branches we know have free descriptor(s) until we hit a 1741 * leaf node. The leaf node will be free but will not necessarily 1742 * have an allocated field of 0. 1743 */ 1744 retry: 1745 /* move up the tree looking for a subtree with a free node */ 1746 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim); 1747 fd = right_ancestor(fd)) { 1748 if (fdp->fd_files[fd].allocated == 0) 1749 goto found; 1750 1751 rsize = right_subtree_size(fd); 1752 if (fdp->fd_files[fd].allocated == rsize) 1753 continue; /* right subtree full */ 1754 1755 /* 1756 * Free fd is in the right subtree of the tree rooted at fd. 1757 * Call that subtree R. Look for the smallest (leftmost) 1758 * subtree of R with an unallocated fd: continue moving 1759 * down the left branch until encountering a full left 1760 * subtree, then move to the right. 1761 */ 1762 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 1763 node = fd + rsize; 1764 rsum += fdp->fd_files[node].allocated; 1765 if (fdp->fd_files[fd].allocated == rsum + rsize) { 1766 fd = node; /* move to the right */ 1767 if (fdp->fd_files[node].allocated == 0) 1768 goto found; 1769 rsum = 0; 1770 } 1771 } 1772 goto found; 1773 } 1774 1775 /* 1776 * No space in current array. Expand? 1777 */ 1778 if (fdp->fd_nfiles >= lim) { 1779 return (EMFILE); 1780 } 1781 fdgrow_locked(fdp, want); 1782 goto retry; 1783 1784 found: 1785 KKASSERT(fd < fdp->fd_nfiles); 1786 if (fd > fdp->fd_lastfile) 1787 fdp->fd_lastfile = fd; 1788 if (want <= fdp->fd_freefile) 1789 fdp->fd_freefile = fd; 1790 *result = fd; 1791 KKASSERT(fdp->fd_files[fd].fp == NULL); 1792 KKASSERT(fdp->fd_files[fd].reserved == 0); 1793 fdp->fd_files[fd].fileflags = 0; 1794 fdp->fd_files[fd].reserved = 1; 1795 fdreserve_locked(fdp, fd, 1); 1796 1797 return (0); 1798 } 1799 1800 int 1801 fdalloc(struct proc *p, int want, int *result) 1802 { 1803 struct filedesc *fdp = p->p_fd; 1804 int error; 1805 1806 spin_lock(&fdp->fd_spin); 1807 error = fdalloc_locked(p, fdp, want, result); 1808 spin_unlock(&fdp->fd_spin); 1809 1810 return error; 1811 } 1812 1813 /* 1814 * Check to see whether n user file descriptors 1815 * are available to the process p. 1816 */ 1817 int 1818 fdavail(struct proc *p, int n) 1819 { 1820 struct plimit *limit = readplimits(p); 1821 struct filedesc *fdp = p->p_fd; 1822 struct fdnode *fdnode; 1823 int i, lim, last; 1824 1825 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1826 lim = INT_MAX; 1827 else 1828 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1829 1830 if (lim > maxfilesperproc) 1831 lim = maxfilesperproc; 1832 if (lim < minfilesperproc) 1833 lim = minfilesperproc; 1834 1835 spin_lock(&fdp->fd_spin); 1836 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { 1837 spin_unlock(&fdp->fd_spin); 1838 return (1); 1839 } 1840 last = min(fdp->fd_nfiles, lim); 1841 fdnode = &fdp->fd_files[fdp->fd_freefile]; 1842 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) { 1843 if (fdnode->fp == NULL && --n <= 0) { 1844 spin_unlock(&fdp->fd_spin); 1845 return (1); 1846 } 1847 } 1848 spin_unlock(&fdp->fd_spin); 1849 return (0); 1850 } 1851 1852 /* 1853 * Revoke open descriptors referencing (f_data, f_type) 1854 * 1855 * Any revoke executed within a prison is only able to 1856 * revoke descriptors for processes within that prison. 1857 * 1858 * Returns 0 on success or an error code. 1859 */ 1860 struct fdrevoke_info { 1861 void *data; 1862 short type; 1863 short unused; 1864 int found; 1865 struct ucred *cred; 1866 struct file *nfp; 1867 }; 1868 1869 static int fdrevoke_check_callback(struct file *fp, void *vinfo); 1870 static int fdrevoke_proc_callback(struct proc *p, void *vinfo); 1871 1872 int 1873 fdrevoke(void *f_data, short f_type, struct ucred *cred) 1874 { 1875 struct fdrevoke_info info; 1876 int error; 1877 1878 bzero(&info, sizeof(info)); 1879 info.data = f_data; 1880 info.type = f_type; 1881 info.cred = cred; 1882 error = falloc(NULL, &info.nfp, NULL); 1883 if (error) 1884 return (error); 1885 1886 /* 1887 * Scan the file pointer table once. dups do not dup file pointers, 1888 * only descriptors, so there is no leak. Set FREVOKED on the fps 1889 * being revoked. 1890 * 1891 * Any fps sent over unix-domain sockets will be revoked by the 1892 * socket code checking for FREVOKED when the fps are externialized. 1893 * revoke_token is used to make sure that fps marked FREVOKED and 1894 * externalized will be picked up by the following allproc_scan(). 1895 */ 1896 lwkt_gettoken(&revoke_token); 1897 allfiles_scan_exclusive(fdrevoke_check_callback, &info); 1898 lwkt_reltoken(&revoke_token); 1899 1900 /* 1901 * If any fps were marked track down the related descriptors 1902 * and close them. Any dup()s at this point will notice 1903 * the FREVOKED already set in the fp and do the right thing. 1904 */ 1905 if (info.found) 1906 allproc_scan(fdrevoke_proc_callback, &info, 0); 1907 fdrop(info.nfp); 1908 return(0); 1909 } 1910 1911 /* 1912 * Locate matching file pointers directly. 1913 * 1914 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls! 1915 */ 1916 static int 1917 fdrevoke_check_callback(struct file *fp, void *vinfo) 1918 { 1919 struct fdrevoke_info *info = vinfo; 1920 1921 /* 1922 * File pointers already flagged for revokation are skipped. 1923 */ 1924 if (fp->f_flag & FREVOKED) 1925 return(0); 1926 1927 /* 1928 * If revoking from a prison file pointers created outside of 1929 * that prison, or file pointers without creds, cannot be revoked. 1930 */ 1931 if (info->cred->cr_prison && 1932 (fp->f_cred == NULL || 1933 info->cred->cr_prison != fp->f_cred->cr_prison)) { 1934 return(0); 1935 } 1936 1937 /* 1938 * If the file pointer matches then mark it for revocation. The 1939 * flag is currently only used by unp_revoke_gc(). 1940 * 1941 * info->found is a heuristic and can race in a SMP environment. 1942 */ 1943 if (info->data == fp->f_data && info->type == fp->f_type) { 1944 atomic_set_int(&fp->f_flag, FREVOKED); 1945 info->found = 1; 1946 } 1947 return(0); 1948 } 1949 1950 /* 1951 * Locate matching file pointers via process descriptor tables. 1952 */ 1953 static int 1954 fdrevoke_proc_callback(struct proc *p, void *vinfo) 1955 { 1956 struct fdrevoke_info *info = vinfo; 1957 struct filedesc *fdp; 1958 struct file *fp; 1959 int n; 1960 1961 if (p->p_stat == SIDL || p->p_stat == SZOMB) 1962 return(0); 1963 if (info->cred->cr_prison && 1964 info->cred->cr_prison != p->p_ucred->cr_prison) { 1965 return(0); 1966 } 1967 1968 /* 1969 * If the controlling terminal of the process matches the 1970 * vnode being revoked we clear the controlling terminal. 1971 * 1972 * The normal spec_close() may not catch this because it 1973 * uses curproc instead of p. 1974 */ 1975 if (p->p_session && info->type == DTYPE_VNODE && 1976 info->data == p->p_session->s_ttyvp) { 1977 p->p_session->s_ttyvp = NULL; 1978 vrele(info->data); 1979 } 1980 1981 /* 1982 * Softref the fdp to prevent it from being destroyed 1983 */ 1984 spin_lock(&p->p_spin); 1985 if ((fdp = p->p_fd) == NULL) { 1986 spin_unlock(&p->p_spin); 1987 return(0); 1988 } 1989 atomic_add_int(&fdp->fd_softrefs, 1); 1990 spin_unlock(&p->p_spin); 1991 1992 /* 1993 * Locate and close any matching file descriptors, replacing 1994 * them with info->nfp. 1995 */ 1996 spin_lock(&fdp->fd_spin); 1997 for (n = 0; n < fdp->fd_nfiles; ++n) { 1998 if ((fp = fdp->fd_files[n].fp) == NULL) 1999 continue; 2000 if (fp->f_flag & FREVOKED) { 2001 ++fdp->fd_closedcounter; 2002 fclearcache(&fdp->fd_files[n], NULL, 0); 2003 ++fdp->fd_closedcounter; 2004 fhold(info->nfp); 2005 fdp->fd_files[n].fp = info->nfp; 2006 spin_unlock(&fdp->fd_spin); 2007 knote_fdclose(fp, fdp, n); /* XXX */ 2008 closef(fp, p); 2009 spin_lock(&fdp->fd_spin); 2010 } 2011 } 2012 spin_unlock(&fdp->fd_spin); 2013 atomic_subtract_int(&fdp->fd_softrefs, 1); 2014 return(0); 2015 } 2016 2017 /* 2018 * falloc: 2019 * Create a new open file structure and reserve a file decriptor 2020 * for the process that refers to it. 2021 * 2022 * Root creds are checked using lp, or assumed if lp is NULL. If 2023 * resultfd is non-NULL then lp must also be non-NULL. No file 2024 * descriptor is reserved (and no process context is needed) if 2025 * resultfd is NULL. 2026 * 2027 * A file pointer with a refcount of 1 is returned. Note that the 2028 * file pointer is NOT associated with the descriptor. If falloc 2029 * returns success, fsetfd() MUST be called to either associate the 2030 * file pointer or clear the reservation. 2031 */ 2032 int 2033 falloc(struct lwp *lp, struct file **resultfp, int *resultfd) 2034 { 2035 static struct timeval lastfail; 2036 static int curfail; 2037 struct filelist_head *head; 2038 struct file *fp; 2039 struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred; 2040 int error; 2041 2042 fp = NULL; 2043 2044 /* 2045 * Handle filetable full issues and root overfill. 2046 */ 2047 if (nfiles >= maxfiles - maxfilesrootres && 2048 (cred->cr_ruid != 0 || nfiles >= maxfiles)) { 2049 if (ppsratecheck(&lastfail, &curfail, 1)) { 2050 kprintf("kern.maxfiles limit exceeded by uid %d, " 2051 "please see tuning(7).\n", 2052 cred->cr_ruid); 2053 } 2054 error = ENFILE; 2055 goto done; 2056 } 2057 2058 /* 2059 * Allocate a new file descriptor. 2060 */ 2061 fp = kmalloc_obj(sizeof(*fp), M_FILE, M_WAITOK|M_ZERO); 2062 spin_init(&fp->f_spin, "falloc"); 2063 SLIST_INIT(&fp->f_klist); 2064 fp->f_count = 1; 2065 fp->f_ops = &badfileops; 2066 fp->f_seqcount = 1; 2067 fsetcred(fp, cred); 2068 atomic_add_int(&nfiles, 1); 2069 2070 head = fp2filelist(fp); 2071 spin_lock(&head->spin); 2072 LIST_INSERT_HEAD(&head->list, fp, f_list); 2073 spin_unlock(&head->spin); 2074 2075 if (resultfd) { 2076 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) { 2077 fdrop(fp); 2078 fp = NULL; 2079 } 2080 } else { 2081 error = 0; 2082 } 2083 done: 2084 *resultfp = fp; 2085 return (error); 2086 } 2087 2088 /* 2089 * Check for races against a file descriptor by determining that the 2090 * file pointer is still associated with the specified file descriptor, 2091 * and a close is not currently in progress. 2092 */ 2093 int 2094 checkfdclosed(thread_t td, struct filedesc *fdp, int fd, struct file *fp, 2095 int closedcounter) 2096 { 2097 struct fdcache *fdc; 2098 int error; 2099 2100 cpu_lfence(); 2101 if (fdp->fd_closedcounter == closedcounter) 2102 return 0; 2103 2104 if (td->td_proc && td->td_proc->p_fd == fdp) { 2105 for (fdc = &td->td_fdcache[0]; 2106 fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 2107 if (fdc->fd == fd && fdc->fp == fp) 2108 return 0; 2109 } 2110 } 2111 2112 spin_lock_shared(&fdp->fd_spin); 2113 if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp) 2114 error = EBADF; 2115 else 2116 error = 0; 2117 spin_unlock_shared(&fdp->fd_spin); 2118 return (error); 2119 } 2120 2121 /* 2122 * Associate a file pointer with a previously reserved file descriptor. 2123 * This function always succeeds. 2124 * 2125 * If fp is NULL, the file descriptor is returned to the pool. 2126 * 2127 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2128 */ 2129 static void 2130 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd) 2131 { 2132 KKASSERT((unsigned)fd < fdp->fd_nfiles); 2133 KKASSERT(fdp->fd_files[fd].reserved != 0); 2134 if (fp) { 2135 fhold(fp); 2136 /* fclearcache(&fdp->fd_files[fd], NULL, 0); */ 2137 fdp->fd_files[fd].fp = fp; 2138 fdp->fd_files[fd].reserved = 0; 2139 } else { 2140 fdp->fd_files[fd].reserved = 0; 2141 fdreserve_locked(fdp, fd, -1); 2142 fdfixup_locked(fdp, fd); 2143 } 2144 } 2145 2146 /* 2147 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2148 */ 2149 void 2150 fsetfd(struct filedesc *fdp, struct file *fp, int fd) 2151 { 2152 spin_lock(&fdp->fd_spin); 2153 fsetfd_locked(fdp, fp, fd); 2154 spin_unlock(&fdp->fd_spin); 2155 } 2156 2157 /* 2158 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2159 */ 2160 static 2161 struct file * 2162 funsetfd_locked(struct filedesc *fdp, int fd) 2163 { 2164 struct file *fp; 2165 2166 if ((unsigned)fd >= fdp->fd_nfiles) 2167 return (NULL); 2168 if ((fp = fdp->fd_files[fd].fp) == NULL) 2169 return (NULL); 2170 ++fdp->fd_closedcounter; 2171 fclearcache(&fdp->fd_files[fd], NULL, 0); 2172 fdp->fd_files[fd].fp = NULL; 2173 fdp->fd_files[fd].fileflags = 0; 2174 ++fdp->fd_closedcounter; 2175 2176 fdreserve_locked(fdp, fd, -1); 2177 fdfixup_locked(fdp, fd); 2178 2179 return(fp); 2180 } 2181 2182 /* 2183 * WARNING: May not be called before initial fsetfd(). 2184 */ 2185 int 2186 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp) 2187 { 2188 int error; 2189 2190 spin_lock_shared(&fdp->fd_spin); 2191 if (((u_int)fd) >= fdp->fd_nfiles) { 2192 error = EBADF; 2193 } else if (fdp->fd_files[fd].fp == NULL) { 2194 error = EBADF; 2195 } else { 2196 *flagsp = fdp->fd_files[fd].fileflags; 2197 error = 0; 2198 } 2199 spin_unlock_shared(&fdp->fd_spin); 2200 2201 return (error); 2202 } 2203 2204 /* 2205 * WARNING: May not be called before initial fsetfd(). 2206 */ 2207 int 2208 fsetfdflags(struct filedesc *fdp, int fd, int add_flags) 2209 { 2210 int error; 2211 2212 spin_lock(&fdp->fd_spin); 2213 if (((u_int)fd) >= fdp->fd_nfiles) { 2214 error = EBADF; 2215 } else if (fdp->fd_files[fd].fp == NULL) { 2216 error = EBADF; 2217 } else { 2218 fdp->fd_files[fd].fileflags |= add_flags; 2219 error = 0; 2220 } 2221 spin_unlock(&fdp->fd_spin); 2222 2223 return (error); 2224 } 2225 2226 /* 2227 * WARNING: May not be called before initial fsetfd(). 2228 */ 2229 int 2230 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags) 2231 { 2232 int error; 2233 2234 spin_lock(&fdp->fd_spin); 2235 if (((u_int)fd) >= fdp->fd_nfiles) { 2236 error = EBADF; 2237 } else if (fdp->fd_files[fd].fp == NULL) { 2238 error = EBADF; 2239 } else { 2240 fdp->fd_files[fd].fileflags &= ~rem_flags; 2241 error = 0; 2242 } 2243 spin_unlock(&fdp->fd_spin); 2244 2245 return (error); 2246 } 2247 2248 /* 2249 * Set/Change/Clear the creds for a fp and synchronize the uidinfo. 2250 */ 2251 void 2252 fsetcred(struct file *fp, struct ucred *ncr) 2253 { 2254 struct ucred *ocr; 2255 struct uidinfo *uip; 2256 struct uidcount *pup; 2257 int cpu = mycpuid; 2258 int count; 2259 2260 ocr = fp->f_cred; 2261 if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) { 2262 if (ocr) { 2263 uip = ocr->cr_uidinfo; 2264 pup = &uip->ui_pcpu[cpu]; 2265 atomic_add_int(&pup->pu_openfiles, -1); 2266 if (pup->pu_openfiles < -PUP_LIMIT || 2267 pup->pu_openfiles > PUP_LIMIT) { 2268 count = atomic_swap_int(&pup->pu_openfiles, 0); 2269 atomic_add_int(&uip->ui_openfiles, count); 2270 } 2271 } 2272 if (ncr) { 2273 uip = ncr->cr_uidinfo; 2274 pup = &uip->ui_pcpu[cpu]; 2275 atomic_add_int(&pup->pu_openfiles, 1); 2276 if (pup->pu_openfiles < -PUP_LIMIT || 2277 pup->pu_openfiles > PUP_LIMIT) { 2278 count = atomic_swap_int(&pup->pu_openfiles, 0); 2279 atomic_add_int(&uip->ui_openfiles, count); 2280 } 2281 } 2282 } 2283 if (ncr) 2284 crhold(ncr); 2285 fp->f_cred = ncr; 2286 if (ocr) 2287 crfree(ocr); 2288 } 2289 2290 /* 2291 * Free a file descriptor. 2292 */ 2293 static 2294 void 2295 ffree(struct file *fp) 2296 { 2297 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 2298 fsetcred(fp, NULL); 2299 if (fp->f_nchandle.ncp) 2300 cache_drop(&fp->f_nchandle); 2301 kfree_obj(fp, M_FILE); 2302 } 2303 2304 /* 2305 * called from init_main, initialize filedesc0 for proc0. 2306 */ 2307 void 2308 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask) 2309 { 2310 p0->p_fd = fdp0; 2311 p0->p_fdtol = NULL; 2312 fdp0->fd_refcnt = 1; 2313 fdp0->fd_cmask = cmask; 2314 fdp0->fd_files = fdp0->fd_builtin_files; 2315 fdp0->fd_nfiles = NDFILE; 2316 fdp0->fd_lastfile = -1; 2317 spin_init(&fdp0->fd_spin, "fdinitbootstrap"); 2318 } 2319 2320 /* 2321 * Build a new filedesc structure. 2322 */ 2323 struct filedesc * 2324 fdinit(struct proc *p) 2325 { 2326 struct filedesc *newfdp; 2327 struct filedesc *fdp = p->p_fd; 2328 2329 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO); 2330 spin_lock(&fdp->fd_spin); 2331 if (fdp->fd_cdir) { 2332 newfdp->fd_cdir = fdp->fd_cdir; 2333 vref(newfdp->fd_cdir); 2334 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2335 } 2336 2337 /* 2338 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of 2339 * proc0, but should unconditionally exist in other processes. 2340 */ 2341 if (fdp->fd_rdir) { 2342 newfdp->fd_rdir = fdp->fd_rdir; 2343 vref(newfdp->fd_rdir); 2344 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2345 } 2346 if (fdp->fd_jdir) { 2347 newfdp->fd_jdir = fdp->fd_jdir; 2348 vref(newfdp->fd_jdir); 2349 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2350 } 2351 spin_unlock(&fdp->fd_spin); 2352 2353 /* Create the file descriptor table. */ 2354 newfdp->fd_refcnt = 1; 2355 newfdp->fd_cmask = cmask; 2356 newfdp->fd_files = newfdp->fd_builtin_files; 2357 newfdp->fd_nfiles = NDFILE; 2358 newfdp->fd_lastfile = -1; 2359 spin_init(&newfdp->fd_spin, "fdinit"); 2360 2361 return (newfdp); 2362 } 2363 2364 /* 2365 * Share a filedesc structure. 2366 */ 2367 struct filedesc * 2368 fdshare(struct proc *p) 2369 { 2370 struct filedesc *fdp; 2371 2372 fdp = p->p_fd; 2373 spin_lock(&fdp->fd_spin); 2374 fdp->fd_refcnt++; 2375 spin_unlock(&fdp->fd_spin); 2376 return (fdp); 2377 } 2378 2379 /* 2380 * Copy a filedesc structure. 2381 */ 2382 int 2383 fdcopy(struct proc *p, struct filedesc **fpp) 2384 { 2385 struct filedesc *fdp = p->p_fd; 2386 struct filedesc *newfdp; 2387 struct fdnode *fdnode; 2388 int i; 2389 int ni; 2390 2391 /* 2392 * Certain daemons might not have file descriptors. 2393 */ 2394 if (fdp == NULL) 2395 return (0); 2396 2397 /* 2398 * Allocate the new filedesc and fd_files[] array. This can race 2399 * with operations by other threads on the fdp so we have to be 2400 * careful. 2401 */ 2402 newfdp = kmalloc(sizeof(struct filedesc), 2403 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK); 2404 if (newfdp == NULL) { 2405 *fpp = NULL; 2406 return (-1); 2407 } 2408 again: 2409 spin_lock(&fdp->fd_spin); 2410 if (fdp->fd_lastfile < NDFILE) { 2411 newfdp->fd_files = newfdp->fd_builtin_files; 2412 i = NDFILE; 2413 } else { 2414 /* 2415 * We have to allocate (N^2-1) entries for our in-place 2416 * binary tree. Allow the table to shrink. 2417 */ 2418 i = fdp->fd_nfiles; 2419 ni = (i - 1) / 2; 2420 while (ni > fdp->fd_lastfile && ni > NDFILE) { 2421 i = ni; 2422 ni = (i - 1) / 2; 2423 } 2424 spin_unlock(&fdp->fd_spin); 2425 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode), 2426 M_FILEDESC, M_WAITOK | M_ZERO); 2427 2428 /* 2429 * Check for race, retry 2430 */ 2431 spin_lock(&fdp->fd_spin); 2432 if (i <= fdp->fd_lastfile) { 2433 spin_unlock(&fdp->fd_spin); 2434 kfree(newfdp->fd_files, M_FILEDESC); 2435 goto again; 2436 } 2437 } 2438 2439 /* 2440 * Dup the remaining fields. vref() and cache_hold() can be 2441 * safely called while holding the read spinlock on fdp. 2442 * 2443 * The read spinlock on fdp is still being held. 2444 * 2445 * NOTE: vref and cache_hold calls for the case where the vnode 2446 * or cache entry already has at least one ref may be called 2447 * while holding spin locks. 2448 */ 2449 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) { 2450 vref(newfdp->fd_cdir); 2451 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2452 } 2453 /* 2454 * We must check for fd_rdir here, at least for now because 2455 * the init process is created before we have access to the 2456 * rootvode to take a reference to it. 2457 */ 2458 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) { 2459 vref(newfdp->fd_rdir); 2460 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2461 } 2462 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) { 2463 vref(newfdp->fd_jdir); 2464 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2465 } 2466 newfdp->fd_refcnt = 1; 2467 newfdp->fd_nfiles = i; 2468 newfdp->fd_lastfile = fdp->fd_lastfile; 2469 newfdp->fd_freefile = fdp->fd_freefile; 2470 newfdp->fd_cmask = fdp->fd_cmask; 2471 spin_init(&newfdp->fd_spin, "fdcopy"); 2472 2473 /* 2474 * Copy the descriptor table through (i). This also copies the 2475 * allocation state. Then go through and ref the file pointers 2476 * and clean up any KQ descriptors. 2477 * 2478 * kq descriptors cannot be copied. Since we haven't ref'd the 2479 * copied files yet we can ignore the return value from funsetfd(). 2480 * 2481 * The read spinlock on fdp is still being held. 2482 * 2483 * Be sure to clean out fdnode->tdcache, otherwise bad things will 2484 * happen. 2485 */ 2486 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode)); 2487 for (i = 0 ; i < newfdp->fd_nfiles; ++i) { 2488 fdnode = &newfdp->fd_files[i]; 2489 if (fdnode->reserved) { 2490 fdreserve_locked(newfdp, i, -1); 2491 fdnode->reserved = 0; 2492 fdfixup_locked(newfdp, i); 2493 } else if (fdnode->fp) { 2494 bzero(&fdnode->tdcache, sizeof(fdnode->tdcache)); 2495 if (fdnode->fp->f_type == DTYPE_KQUEUE) { 2496 (void)funsetfd_locked(newfdp, i); 2497 } else { 2498 fhold(fdnode->fp); 2499 } 2500 } 2501 } 2502 spin_unlock(&fdp->fd_spin); 2503 *fpp = newfdp; 2504 return (0); 2505 } 2506 2507 /* 2508 * Release a filedesc structure. 2509 * 2510 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE) 2511 */ 2512 void 2513 fdfree(struct proc *p, struct filedesc *repl) 2514 { 2515 struct filedesc *fdp; 2516 struct fdnode *fdnode; 2517 int i; 2518 struct filedesc_to_leader *fdtol; 2519 struct file *fp; 2520 struct vnode *vp; 2521 struct flock lf; 2522 2523 /* 2524 * Before destroying or replacing p->p_fd we must be sure to 2525 * clean out the cache of the last thread, which should be 2526 * curthread. 2527 */ 2528 fexitcache(curthread); 2529 2530 /* 2531 * Certain daemons might not have file descriptors. 2532 */ 2533 fdp = p->p_fd; 2534 if (fdp == NULL) { 2535 p->p_fd = repl; 2536 return; 2537 } 2538 2539 /* 2540 * Severe messing around to follow. 2541 */ 2542 spin_lock(&fdp->fd_spin); 2543 2544 /* Check for special need to clear POSIX style locks */ 2545 fdtol = p->p_fdtol; 2546 if (fdtol != NULL) { 2547 KASSERT(fdtol->fdl_refcount > 0, 2548 ("filedesc_to_refcount botch: fdl_refcount=%d", 2549 fdtol->fdl_refcount)); 2550 if (fdtol->fdl_refcount == 1 && p->p_leader->p_advlock_flag) { 2551 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2552 fdnode = &fdp->fd_files[i]; 2553 if (fdnode->fp == NULL || 2554 fdnode->fp->f_type != DTYPE_VNODE) { 2555 continue; 2556 } 2557 fp = fdnode->fp; 2558 fhold(fp); 2559 spin_unlock(&fdp->fd_spin); 2560 2561 lf.l_whence = SEEK_SET; 2562 lf.l_start = 0; 2563 lf.l_len = 0; 2564 lf.l_type = F_UNLCK; 2565 vp = (struct vnode *)fp->f_data; 2566 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 2567 F_UNLCK, &lf, F_POSIX); 2568 fdrop(fp); 2569 spin_lock(&fdp->fd_spin); 2570 } 2571 } 2572 retry: 2573 if (fdtol->fdl_refcount == 1) { 2574 if (fdp->fd_holdleaderscount > 0 && 2575 p->p_leader->p_advlock_flag) { 2576 /* 2577 * close() or do_dup() has cleared a reference 2578 * in a shared file descriptor table. 2579 */ 2580 fdp->fd_holdleaderswakeup = 1; 2581 ssleep(&fdp->fd_holdleaderscount, 2582 &fdp->fd_spin, 0, "fdlhold", 0); 2583 goto retry; 2584 } 2585 if (fdtol->fdl_holdcount > 0) { 2586 /* 2587 * Ensure that fdtol->fdl_leader 2588 * remains valid in closef(). 2589 */ 2590 fdtol->fdl_wakeup = 1; 2591 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0); 2592 goto retry; 2593 } 2594 } 2595 fdtol->fdl_refcount--; 2596 if (fdtol->fdl_refcount == 0 && 2597 fdtol->fdl_holdcount == 0) { 2598 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 2599 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 2600 } else { 2601 fdtol = NULL; 2602 } 2603 p->p_fdtol = NULL; 2604 if (fdtol != NULL) { 2605 spin_unlock(&fdp->fd_spin); 2606 kfree(fdtol, M_FILEDESC_TO_LEADER); 2607 spin_lock(&fdp->fd_spin); 2608 } 2609 } 2610 if (--fdp->fd_refcnt > 0) { 2611 spin_unlock(&fdp->fd_spin); 2612 spin_lock(&p->p_spin); 2613 p->p_fd = repl; 2614 spin_unlock(&p->p_spin); 2615 return; 2616 } 2617 2618 /* 2619 * Even though we are the last reference to the structure allproc 2620 * scans may still reference the structure. Maintain proper 2621 * locks until we can replace p->p_fd. 2622 * 2623 * Also note that kqueue's closef still needs to reference the 2624 * fdp via p->p_fd, so we have to close the descriptors before 2625 * we replace p->p_fd. 2626 */ 2627 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2628 if (fdp->fd_files[i].fp) { 2629 fp = funsetfd_locked(fdp, i); 2630 if (fp) { 2631 spin_unlock(&fdp->fd_spin); 2632 if (SLIST_FIRST(&fp->f_klist)) 2633 knote_fdclose(fp, fdp, i); 2634 closef(fp, p); 2635 spin_lock(&fdp->fd_spin); 2636 } 2637 } 2638 } 2639 spin_unlock(&fdp->fd_spin); 2640 2641 /* 2642 * Interlock against an allproc scan operations (typically frevoke). 2643 */ 2644 spin_lock(&p->p_spin); 2645 p->p_fd = repl; 2646 spin_unlock(&p->p_spin); 2647 2648 /* 2649 * Wait for any softrefs to go away. This race rarely occurs so 2650 * we can use a non-critical-path style poll/sleep loop. The 2651 * race only occurs against allproc scans. 2652 * 2653 * No new softrefs can occur with the fdp disconnected from the 2654 * process. 2655 */ 2656 if (fdp->fd_softrefs) { 2657 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid); 2658 while (fdp->fd_softrefs) 2659 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1); 2660 } 2661 2662 if (fdp->fd_files != fdp->fd_builtin_files) 2663 kfree(fdp->fd_files, M_FILEDESC); 2664 if (fdp->fd_cdir) { 2665 cache_drop(&fdp->fd_ncdir); 2666 vrele(fdp->fd_cdir); 2667 } 2668 if (fdp->fd_rdir) { 2669 cache_drop(&fdp->fd_nrdir); 2670 vrele(fdp->fd_rdir); 2671 } 2672 if (fdp->fd_jdir) { 2673 cache_drop(&fdp->fd_njdir); 2674 vrele(fdp->fd_jdir); 2675 } 2676 kfree(fdp, M_FILEDESC); 2677 } 2678 2679 /* 2680 * Retrieve and reference the file pointer associated with a descriptor. 2681 * 2682 * td must be the current thread. 2683 */ 2684 struct file * 2685 holdfp(thread_t td, int fd, int flag) 2686 { 2687 struct file *fp; 2688 2689 fp = _holdfp_cache(td, fd); 2690 if (fp) { 2691 if ((fp->f_flag & flag) == 0 && flag != -1) { 2692 fdrop(fp); 2693 fp = NULL; 2694 } 2695 } 2696 return fp; 2697 } 2698 2699 /* 2700 * holdsock() - load the struct file pointer associated 2701 * with a socket into *fpp. If an error occurs, non-zero 2702 * will be returned and *fpp will be set to NULL. 2703 * 2704 * td must be the current thread. 2705 */ 2706 int 2707 holdsock(thread_t td, int fd, struct file **fpp) 2708 { 2709 struct file *fp; 2710 int error; 2711 2712 /* 2713 * Lockless shortcut 2714 */ 2715 fp = _holdfp_cache(td, fd); 2716 if (fp) { 2717 if (fp->f_type != DTYPE_SOCKET) { 2718 fdrop(fp); 2719 fp = NULL; 2720 error = ENOTSOCK; 2721 } else { 2722 error = 0; 2723 } 2724 } else { 2725 error = EBADF; 2726 } 2727 *fpp = fp; 2728 2729 return (error); 2730 } 2731 2732 /* 2733 * Convert a user file descriptor to a held file pointer. 2734 * 2735 * td must be the current thread. 2736 */ 2737 int 2738 holdvnode(thread_t td, int fd, struct file **fpp) 2739 { 2740 struct file *fp; 2741 int error; 2742 2743 fp = _holdfp_cache(td, fd); 2744 if (fp) { 2745 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2746 fdrop(fp); 2747 fp = NULL; 2748 error = EINVAL; 2749 } else { 2750 error = 0; 2751 } 2752 } else { 2753 error = EBADF; 2754 } 2755 *fpp = fp; 2756 2757 return (error); 2758 } 2759 2760 /* 2761 * Convert a user file descriptor to a held file pointer. 2762 * 2763 * td must be the current thread. 2764 */ 2765 int 2766 holdvnode2(thread_t td, int fd, struct file **fpp, char *fflagsp) 2767 { 2768 struct file *fp; 2769 int error; 2770 2771 fp = _holdfp2(td, fd, fflagsp); 2772 if (fp) { 2773 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2774 fdrop(fp); 2775 fp = NULL; 2776 error = EINVAL; 2777 } else { 2778 error = 0; 2779 } 2780 } else { 2781 error = EBADF; 2782 } 2783 *fpp = fp; 2784 2785 return (error); 2786 } 2787 2788 /* 2789 * For setugid programs, we don't want to people to use that setugidness 2790 * to generate error messages which write to a file which otherwise would 2791 * otherwise be off-limits to the process. 2792 * 2793 * This is a gross hack to plug the hole. A better solution would involve 2794 * a special vop or other form of generalized access control mechanism. We 2795 * go ahead and just reject all procfs file systems accesses as dangerous. 2796 * 2797 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 2798 * sufficient. We also don't for check setugidness since we know we are. 2799 */ 2800 static int 2801 is_unsafe(struct file *fp) 2802 { 2803 if (fp->f_type == DTYPE_VNODE && 2804 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 2805 return (1); 2806 return (0); 2807 } 2808 2809 /* 2810 * Make this setguid thing safe, if at all possible. 2811 * 2812 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2813 */ 2814 void 2815 setugidsafety(struct proc *p) 2816 { 2817 struct filedesc *fdp = p->p_fd; 2818 int i; 2819 2820 /* Certain daemons might not have file descriptors. */ 2821 if (fdp == NULL) 2822 return; 2823 2824 /* 2825 * note: fdp->fd_files may be reallocated out from under us while 2826 * we are blocked in a close. Be careful! 2827 */ 2828 for (i = 0; i <= fdp->fd_lastfile; i++) { 2829 if (i > 2) 2830 break; 2831 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) { 2832 struct file *fp; 2833 2834 /* 2835 * NULL-out descriptor prior to close to avoid 2836 * a race while close blocks. 2837 */ 2838 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2839 knote_fdclose(fp, fdp, i); 2840 closef(fp, p); 2841 } 2842 } 2843 } 2844 } 2845 2846 /* 2847 * Close all CLOEXEC files on exec. 2848 * 2849 * Only a single thread remains for the current process. 2850 * 2851 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2852 */ 2853 void 2854 fdcloseexec(struct proc *p) 2855 { 2856 struct filedesc *fdp = p->p_fd; 2857 int i; 2858 2859 /* Certain daemons might not have file descriptors. */ 2860 if (fdp == NULL) 2861 return; 2862 2863 /* 2864 * We cannot cache fd_files since operations may block and rip 2865 * them out from under us. 2866 */ 2867 for (i = 0; i <= fdp->fd_lastfile; i++) { 2868 if (fdp->fd_files[i].fp != NULL && 2869 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) { 2870 struct file *fp; 2871 2872 /* 2873 * NULL-out descriptor prior to close to avoid 2874 * a race while close blocks. 2875 * 2876 * (funsetfd*() also clears the fd cache) 2877 */ 2878 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2879 knote_fdclose(fp, fdp, i); 2880 closef(fp, p); 2881 } 2882 } 2883 } 2884 } 2885 2886 /* 2887 * It is unsafe for set[ug]id processes to be started with file 2888 * descriptors 0..2 closed, as these descriptors are given implicit 2889 * significance in the Standard C library. fdcheckstd() will create a 2890 * descriptor referencing /dev/null for each of stdin, stdout, and 2891 * stderr that is not already open. 2892 * 2893 * NOT MPSAFE - calls falloc, vn_open, etc 2894 */ 2895 int 2896 fdcheckstd(struct lwp *lp) 2897 { 2898 struct nlookupdata nd; 2899 struct filedesc *fdp; 2900 struct file *fp; 2901 int retval; 2902 int i, error, flags, devnull; 2903 2904 fdp = lp->lwp_proc->p_fd; 2905 if (fdp == NULL) 2906 return (0); 2907 devnull = -1; 2908 error = 0; 2909 for (i = 0; i < 3; i++) { 2910 if (fdp->fd_files[i].fp != NULL) 2911 continue; 2912 if (devnull < 0) { 2913 if ((error = falloc(lp, &fp, &devnull)) != 0) 2914 break; 2915 2916 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE, 2917 NLC_FOLLOW|NLC_LOCKVP); 2918 flags = FREAD | FWRITE; 2919 if (error == 0) 2920 error = vn_open(&nd, &fp, flags, 0); 2921 if (error == 0) 2922 fsetfd(fdp, fp, devnull); 2923 else 2924 fsetfd(fdp, NULL, devnull); 2925 fdrop(fp); 2926 nlookup_done(&nd); 2927 if (error) 2928 break; 2929 KKASSERT(i == devnull); 2930 } else { 2931 error = kern_dup(DUP_FIXED, devnull, i, &retval); 2932 if (error != 0) 2933 break; 2934 } 2935 } 2936 return (error); 2937 } 2938 2939 /* 2940 * Internal form of close. 2941 * Decrement reference count on file structure. 2942 * Note: td and/or p may be NULL when closing a file 2943 * that was being passed in a message. 2944 * 2945 * MPALMOSTSAFE - acquires mplock for VOP operations 2946 */ 2947 int 2948 closef(struct file *fp, struct proc *p) 2949 { 2950 struct vnode *vp; 2951 struct flock lf; 2952 struct filedesc_to_leader *fdtol; 2953 2954 if (fp == NULL) 2955 return (0); 2956 2957 /* 2958 * POSIX record locking dictates that any close releases ALL 2959 * locks owned by this process. This is handled by setting 2960 * a flag in the unlock to free ONLY locks obeying POSIX 2961 * semantics, and not to free BSD-style file locks. 2962 * If the descriptor was in a message, POSIX-style locks 2963 * aren't passed with the descriptor. 2964 */ 2965 if (p != NULL && fp->f_type == DTYPE_VNODE && 2966 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2967 ) { 2968 if (p->p_leader->p_advlock_flag) { 2969 lf.l_whence = SEEK_SET; 2970 lf.l_start = 0; 2971 lf.l_len = 0; 2972 lf.l_type = F_UNLCK; 2973 vp = (struct vnode *)fp->f_data; 2974 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 2975 &lf, F_POSIX); 2976 } 2977 fdtol = p->p_fdtol; 2978 if (fdtol != NULL) { 2979 lwkt_gettoken(&p->p_token); 2980 2981 /* 2982 * Handle special case where file descriptor table 2983 * is shared between multiple process leaders. 2984 */ 2985 for (fdtol = fdtol->fdl_next; 2986 fdtol != p->p_fdtol; 2987 fdtol = fdtol->fdl_next) { 2988 if (fdtol->fdl_leader->p_advlock_flag == 0) 2989 continue; 2990 fdtol->fdl_holdcount++; 2991 lf.l_whence = SEEK_SET; 2992 lf.l_start = 0; 2993 lf.l_len = 0; 2994 lf.l_type = F_UNLCK; 2995 vp = (struct vnode *)fp->f_data; 2996 VOP_ADVLOCK(vp, (caddr_t)fdtol->fdl_leader, 2997 F_UNLCK, &lf, F_POSIX); 2998 fdtol->fdl_holdcount--; 2999 if (fdtol->fdl_holdcount == 0 && 3000 fdtol->fdl_wakeup != 0) { 3001 fdtol->fdl_wakeup = 0; 3002 wakeup(fdtol); 3003 } 3004 } 3005 lwkt_reltoken(&p->p_token); 3006 } 3007 } 3008 return (fdrop(fp)); 3009 } 3010 3011 /* 3012 * fhold() can only be called if f_count is already at least 1 (i.e. the 3013 * caller of fhold() already has a reference to the file pointer in some 3014 * manner or other). 3015 * 3016 * Atomic ops are used for incrementing and decrementing f_count before 3017 * the 1->0 transition. f_count 1->0 transition is special, see the 3018 * comment in fdrop(). 3019 */ 3020 void 3021 fhold(struct file *fp) 3022 { 3023 /* 0->1 transition will never work */ 3024 KASSERT(fp->f_count > 0, ("fhold: invalid f_count %d", fp->f_count)); 3025 atomic_add_int(&fp->f_count, 1); 3026 } 3027 3028 /* 3029 * fdrop() - drop a reference to a descriptor 3030 */ 3031 int 3032 fdrop(struct file *fp) 3033 { 3034 struct flock lf; 3035 struct vnode *vp; 3036 int error, do_free = 0; 3037 3038 /* 3039 * NOTE: 3040 * Simple atomic_fetchadd_int(f_count, -1) here will cause use- 3041 * after-free or double free (due to f_count 0->1 transition), if 3042 * fhold() is called on the fps found through filehead iteration. 3043 */ 3044 for (;;) { 3045 int count = fp->f_count; 3046 3047 cpu_ccfence(); 3048 KASSERT(count > 0, ("fdrop: invalid f_count %d", count)); 3049 if (count == 1) { 3050 struct filelist_head *head = fp2filelist(fp); 3051 3052 /* 3053 * About to drop the last reference, hold the 3054 * filehead spin lock and drop it, so that no 3055 * one could see this fp through filehead anymore, 3056 * let alone fhold() this fp. 3057 */ 3058 spin_lock(&head->spin); 3059 if (atomic_cmpset_int(&fp->f_count, count, 0)) { 3060 LIST_REMOVE(fp, f_list); 3061 spin_unlock(&head->spin); 3062 atomic_subtract_int(&nfiles, 1); 3063 do_free = 1; /* free this fp */ 3064 break; 3065 } 3066 spin_unlock(&head->spin); 3067 /* retry */ 3068 } else if (atomic_cmpset_int(&fp->f_count, count, count - 1)) { 3069 break; 3070 } 3071 /* retry */ 3072 } 3073 if (!do_free) 3074 return (0); 3075 3076 KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL); 3077 3078 /* 3079 * The last reference has gone away, we own the fp structure free 3080 * and clear. 3081 */ 3082 if (fp->f_count < 0) 3083 panic("fdrop: count < 0"); 3084 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE && 3085 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 3086 ) { 3087 lf.l_whence = SEEK_SET; 3088 lf.l_start = 0; 3089 lf.l_len = 0; 3090 lf.l_type = F_UNLCK; 3091 vp = (struct vnode *)fp->f_data; 3092 VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3093 } 3094 if (fp->f_ops != &badfileops) 3095 error = fo_close(fp); 3096 else 3097 error = 0; 3098 ffree(fp); 3099 return (error); 3100 } 3101 3102 /* 3103 * Apply an advisory lock on a file descriptor. 3104 * 3105 * Just attempt to get a record lock of the requested type on 3106 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 3107 * 3108 * MPALMOSTSAFE 3109 */ 3110 int 3111 sys_flock(struct sysmsg *sysmsg, const struct flock_args *uap) 3112 { 3113 thread_t td = curthread; 3114 struct file *fp; 3115 struct vnode *vp; 3116 struct flock lf; 3117 int error; 3118 3119 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 3120 return (EBADF); 3121 if (fp->f_type != DTYPE_VNODE) { 3122 error = EOPNOTSUPP; 3123 goto done; 3124 } 3125 vp = (struct vnode *)fp->f_data; 3126 lf.l_whence = SEEK_SET; 3127 lf.l_start = 0; 3128 lf.l_len = 0; 3129 if (uap->how & LOCK_UN) { 3130 lf.l_type = F_UNLCK; 3131 atomic_clear_int(&fp->f_flag, FHASLOCK); /* race ok */ 3132 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3133 goto done; 3134 } 3135 if (uap->how & LOCK_EX) 3136 lf.l_type = F_WRLCK; 3137 else if (uap->how & LOCK_SH) 3138 lf.l_type = F_RDLCK; 3139 else { 3140 error = EBADF; 3141 goto done; 3142 } 3143 if (uap->how & LOCK_NB) 3144 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0); 3145 else 3146 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT); 3147 atomic_set_int(&fp->f_flag, FHASLOCK); /* race ok */ 3148 done: 3149 fdrop(fp); 3150 return (error); 3151 } 3152 3153 /* 3154 * File Descriptor pseudo-device driver ( /dev/fd/N ). 3155 * 3156 * This interface is now a bit more linux-compatible and attempts to not 3157 * share seek positions by not sharing the fp of the descriptor when 3158 * possible. 3159 * 3160 * Probably a good idea anyhow, but now particularly important for 3161 * fexecve() which uses /dev/fd/N. 3162 * 3163 * The original interface effectively dup()d the descriptor. 3164 */ 3165 static int 3166 fdopen(struct dev_open_args *ap) 3167 { 3168 struct file *wfp; 3169 thread_t td; 3170 int error; 3171 int sfd; 3172 3173 td = curthread; 3174 KKASSERT(td->td_lwp != NULL); 3175 3176 /* 3177 * Get the fp for /dev/fd/N 3178 */ 3179 sfd = minor(ap->a_head.a_dev); 3180 if ((wfp = holdfp(td, sfd, -1)) == NULL) 3181 return (EBADF); 3182 3183 /* 3184 * Close a revoke/dup race. Duping a descriptor marked as revoked 3185 * will dup a dummy descriptor instead of the real one. 3186 */ 3187 if (wfp->f_flag & FREVOKED) { 3188 kprintf("Warning: attempt to dup() a revoked descriptor\n"); 3189 fdrop(wfp); 3190 wfp = NULL; 3191 error = falloc(NULL, &wfp, NULL); 3192 if (error) 3193 return (error); 3194 } 3195 3196 /* 3197 * Check that the mode the file is being opened for is a 3198 * subset of the mode of the existing descriptor. 3199 */ 3200 if (ap->a_fpp == NULL) { 3201 fdrop(wfp); 3202 return EINVAL; 3203 } 3204 if (((ap->a_oflags & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 3205 fdrop(wfp); 3206 return EACCES; 3207 } 3208 if (wfp->f_type == DTYPE_VNODE && wfp->f_data) { 3209 /* 3210 * If wfp is a vnode create a new fp so things like the 3211 * seek position (etc) are not shared with the original. 3212 * 3213 * Don't try to call VOP_OPEN(). Adjust the open-count 3214 * ourselves. 3215 */ 3216 struct vnode *vp; 3217 struct file *fp; 3218 3219 vp = wfp->f_data; 3220 fp = *ap->a_fpp; 3221 3222 /* 3223 * Yah... this wouldn't be good. 3224 */ 3225 if ((ap->a_oflags & (FWRITE|O_TRUNC)) && vp->v_type == VDIR) { 3226 fdrop(wfp); 3227 return EISDIR; 3228 } 3229 3230 /* 3231 * Setup the new fp and simulate an open(), but for now do 3232 * not actually call VOP_OPEN() though we probably could. 3233 */ 3234 fp->f_type = DTYPE_VNODE; 3235 /* retain flags not to be copied */ 3236 fp->f_flag = (fp->f_flag & ~FMASK) | (ap->a_oflags & FMASK); 3237 fp->f_ops = &vnode_fileops; 3238 fp->f_data = vp; 3239 vref(vp); 3240 3241 if (ap->a_oflags & FWRITE) 3242 atomic_add_int(&vp->v_writecount, 1); 3243 KKASSERT(vp->v_opencount >= 0 && vp->v_opencount != INT_MAX); 3244 atomic_add_int(&vp->v_opencount, 1); 3245 fdrop(wfp); 3246 } else { 3247 /* 3248 * If wfp is not a vnode we have to share it directly. 3249 */ 3250 fdrop(*ap->a_fpp); 3251 *ap->a_fpp = wfp; /* transfer hold count */ 3252 } 3253 return EALREADY; 3254 } 3255 3256 /* 3257 * NOT MPSAFE - I think these refer to a common file descriptor table 3258 * and we need to spinlock that to link fdtol in. 3259 */ 3260 struct filedesc_to_leader * 3261 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 3262 struct proc *leader) 3263 { 3264 struct filedesc_to_leader *fdtol; 3265 3266 fdtol = kmalloc(sizeof(struct filedesc_to_leader), 3267 M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO); 3268 fdtol->fdl_refcount = 1; 3269 fdtol->fdl_holdcount = 0; 3270 fdtol->fdl_wakeup = 0; 3271 fdtol->fdl_leader = leader; 3272 if (old != NULL) { 3273 fdtol->fdl_next = old->fdl_next; 3274 fdtol->fdl_prev = old; 3275 old->fdl_next = fdtol; 3276 fdtol->fdl_next->fdl_prev = fdtol; 3277 } else { 3278 fdtol->fdl_next = fdtol; 3279 fdtol->fdl_prev = fdtol; 3280 } 3281 return fdtol; 3282 } 3283 3284 /* 3285 * Scan all file pointers in the system. The callback is made with 3286 * the master list spinlock held exclusively. 3287 */ 3288 void 3289 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data) 3290 { 3291 int i; 3292 3293 for (i = 0; i < NFILELIST_HEADS; ++i) { 3294 struct filelist_head *head = &filelist_heads[i]; 3295 struct file *fp; 3296 3297 spin_lock(&head->spin); 3298 LIST_FOREACH(fp, &head->list, f_list) { 3299 int res; 3300 3301 res = callback(fp, data); 3302 if (res < 0) 3303 break; 3304 } 3305 spin_unlock(&head->spin); 3306 } 3307 } 3308 3309 /* 3310 * Get file structures. 3311 * 3312 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe) 3313 */ 3314 3315 struct sysctl_kern_file_info { 3316 int count; 3317 int error; 3318 struct sysctl_req *req; 3319 }; 3320 3321 static int sysctl_kern_file_callback(struct proc *p, void *data); 3322 3323 static int 3324 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 3325 { 3326 struct sysctl_kern_file_info info; 3327 3328 /* 3329 * Note: because the number of file descriptors is calculated 3330 * in different ways for sizing vs returning the data, 3331 * there is information leakage from the first loop. However, 3332 * it is of a similar order of magnitude to the leakage from 3333 * global system statistics such as kern.openfiles. 3334 * 3335 * When just doing a count, note that we cannot just count 3336 * the elements and add f_count via the filehead list because 3337 * threaded processes share their descriptor table and f_count might 3338 * still be '1' in that case. 3339 * 3340 * Since the SYSCTL op can block, we must hold the process to 3341 * prevent it being ripped out from under us either in the 3342 * file descriptor loop or in the greater LIST_FOREACH. The 3343 * process may be in varying states of disrepair. If the process 3344 * is in SZOMB we may have caught it just as it is being removed 3345 * from the allproc list, we must skip it in that case to maintain 3346 * an unbroken chain through the allproc list. 3347 */ 3348 info.count = 0; 3349 info.error = 0; 3350 info.req = req; 3351 allproc_scan(sysctl_kern_file_callback, &info, 0); 3352 3353 /* 3354 * When just calculating the size, overestimate a bit to try to 3355 * prevent system activity from causing the buffer-fill call 3356 * to fail later on. 3357 */ 3358 if (req->oldptr == NULL) { 3359 info.count = (info.count + 16) + (info.count / 10); 3360 info.error = SYSCTL_OUT(req, NULL, 3361 info.count * sizeof(struct kinfo_file)); 3362 } 3363 return (info.error); 3364 } 3365 3366 static int 3367 sysctl_kern_file_callback(struct proc *p, void *data) 3368 { 3369 struct sysctl_kern_file_info *info = data; 3370 struct kinfo_file kf; 3371 struct filedesc *fdp; 3372 struct file *fp; 3373 uid_t uid; 3374 int n; 3375 3376 if (p->p_stat == SIDL || p->p_stat == SZOMB) 3377 return(0); 3378 if (!(PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0)) 3379 return(0); 3380 3381 /* 3382 * Softref the fdp to prevent it from being destroyed 3383 */ 3384 spin_lock(&p->p_spin); 3385 if ((fdp = p->p_fd) == NULL) { 3386 spin_unlock(&p->p_spin); 3387 return(0); 3388 } 3389 atomic_add_int(&fdp->fd_softrefs, 1); 3390 spin_unlock(&p->p_spin); 3391 3392 /* 3393 * The fdp's own spinlock prevents the contents from being 3394 * modified. 3395 */ 3396 spin_lock_shared(&fdp->fd_spin); 3397 for (n = 0; n < fdp->fd_nfiles; ++n) { 3398 if ((fp = fdp->fd_files[n].fp) == NULL) 3399 continue; 3400 if (info->req->oldptr == NULL) { 3401 ++info->count; 3402 } else { 3403 uid = p->p_ucred ? p->p_ucred->cr_uid : -1; 3404 kcore_make_file(&kf, fp, p->p_pid, uid, n); 3405 spin_unlock_shared(&fdp->fd_spin); 3406 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf)); 3407 spin_lock_shared(&fdp->fd_spin); 3408 if (info->error) 3409 break; 3410 } 3411 } 3412 spin_unlock_shared(&fdp->fd_spin); 3413 atomic_subtract_int(&fdp->fd_softrefs, 1); 3414 if (info->error) 3415 return(-1); 3416 return(0); 3417 } 3418 3419 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 3420 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 3421 3422 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW, 3423 &minfilesperproc, 0, "Minimum files allowed open per process"); 3424 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 3425 &maxfilesperproc, 0, "Maximum files allowed open per process"); 3426 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW, 3427 &maxfilesperuser, 0, "Maximum files allowed open per user"); 3428 3429 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 3430 &maxfiles, 0, "Maximum number of files"); 3431 3432 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 3433 &maxfilesrootres, 0, "Descriptors reserved for root use"); 3434 3435 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 3436 &nfiles, 0, "System-wide number of open files"); 3437 3438 static void 3439 fildesc_drvinit(void *unused) 3440 { 3441 int fd; 3442 3443 for (fd = 0; fd < NUMFDESC; fd++) { 3444 make_dev(&fildesc_ops, fd, 3445 UID_BIN, GID_BIN, 0666, "fd/%d", fd); 3446 } 3447 3448 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin"); 3449 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout"); 3450 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr"); 3451 } 3452 3453 struct fileops badfileops = { 3454 .fo_read = badfo_readwrite, 3455 .fo_write = badfo_readwrite, 3456 .fo_ioctl = badfo_ioctl, 3457 .fo_kqfilter = badfo_kqfilter, 3458 .fo_stat = badfo_stat, 3459 .fo_close = badfo_close, 3460 .fo_shutdown = badfo_shutdown 3461 }; 3462 3463 int 3464 badfo_readwrite( 3465 struct file *fp, 3466 struct uio *uio, 3467 struct ucred *cred, 3468 int flags 3469 ) { 3470 return (EBADF); 3471 } 3472 3473 int 3474 badfo_ioctl(struct file *fp, u_long com, caddr_t data, 3475 struct ucred *cred, struct sysmsg *msgv) 3476 { 3477 return (EBADF); 3478 } 3479 3480 /* 3481 * Must return an error to prevent registration, typically 3482 * due to a revoked descriptor (file_filtops assigned). 3483 */ 3484 int 3485 badfo_kqfilter(struct file *fp, struct knote *kn) 3486 { 3487 return (EOPNOTSUPP); 3488 } 3489 3490 int 3491 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 3492 { 3493 return (EBADF); 3494 } 3495 3496 int 3497 badfo_close(struct file *fp) 3498 { 3499 return (EBADF); 3500 } 3501 3502 int 3503 badfo_shutdown(struct file *fp, int how) 3504 { 3505 return (EBADF); 3506 } 3507 3508 int 3509 nofo_shutdown(struct file *fp, int how) 3510 { 3511 return (EOPNOTSUPP); 3512 } 3513 3514 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, 3515 fildesc_drvinit,NULL); 3516 3517 static void 3518 filelist_heads_init(void *arg __unused) 3519 { 3520 int i; 3521 3522 for (i = 0; i < NFILELIST_HEADS; ++i) { 3523 struct filelist_head *head = &filelist_heads[i]; 3524 3525 spin_init(&head->spin, "filehead_spin"); 3526 LIST_INIT(&head->list); 3527 } 3528 } 3529 3530 SYSINIT(filelistheads, SI_BOOT1_LOCK, SI_ORDER_ANY, filelist_heads_init, NULL); 3531