1 /* 2 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1982, 1986, 1989, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 72 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $ 73 * $DragonFly: src/sys/kern/kern_descrip.c,v 1.79 2008/08/31 13:18:28 aggelos Exp $ 74 */ 75 76 #include "opt_compat.h" 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/conf.h> 82 #include <sys/device.h> 83 #include <sys/file.h> 84 #include <sys/filedesc.h> 85 #include <sys/kernel.h> 86 #include <sys/sysctl.h> 87 #include <sys/vnode.h> 88 #include <sys/proc.h> 89 #include <sys/nlookup.h> 90 #include <sys/file.h> 91 #include <sys/stat.h> 92 #include <sys/filio.h> 93 #include <sys/fcntl.h> 94 #include <sys/unistd.h> 95 #include <sys/resourcevar.h> 96 #include <sys/event.h> 97 #include <sys/kern_syscall.h> 98 #include <sys/kcore.h> 99 #include <sys/kinfo.h> 100 #include <sys/un.h> 101 102 #include <vm/vm.h> 103 #include <vm/vm_extern.h> 104 105 #include <sys/thread2.h> 106 #include <sys/file2.h> 107 #include <sys/spinlock2.h> 108 109 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd); 110 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr); 111 static struct file *funsetfd_locked (struct filedesc *fdp, int fd); 112 static int checkfpclosed(struct filedesc *fdp, int fd, struct file *fp); 113 static void ffree(struct file *fp); 114 115 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 116 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 117 "file desc to leader structures"); 118 MALLOC_DEFINE(M_FILE, "file", "Open file structure"); 119 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 120 121 static d_open_t fdopen; 122 #define NUMFDESC 64 123 124 #define CDEV_MAJOR 22 125 static struct dev_ops fildesc_ops = { 126 { "FD", CDEV_MAJOR, 0 }, 127 .d_open = fdopen, 128 }; 129 130 /* 131 * Descriptor management. 132 */ 133 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead); 134 static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin); 135 static int nfiles; /* actual number of open files */ 136 extern int cmask; 137 138 /* 139 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared. 140 * 141 * MPSAFE - must be called with fdp->fd_spin exclusively held 142 */ 143 static __inline 144 void 145 fdfixup_locked(struct filedesc *fdp, int fd) 146 { 147 if (fd < fdp->fd_freefile) { 148 fdp->fd_freefile = fd; 149 } 150 while (fdp->fd_lastfile >= 0 && 151 fdp->fd_files[fdp->fd_lastfile].fp == NULL && 152 fdp->fd_files[fdp->fd_lastfile].reserved == 0 153 ) { 154 --fdp->fd_lastfile; 155 } 156 } 157 158 /* 159 * System calls on descriptors. 160 * 161 * MPSAFE 162 */ 163 int 164 sys_getdtablesize(struct getdtablesize_args *uap) 165 { 166 struct proc *p = curproc; 167 struct plimit *limit = p->p_limit; 168 169 spin_lock_rd(&limit->p_spin); 170 uap->sysmsg_result = 171 min((int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 172 spin_unlock_rd(&limit->p_spin); 173 return (0); 174 } 175 176 /* 177 * Duplicate a file descriptor to a particular value. 178 * 179 * note: keep in mind that a potential race condition exists when closing 180 * descriptors from a shared descriptor table (via rfork). 181 * 182 * MPSAFE 183 */ 184 int 185 sys_dup2(struct dup2_args *uap) 186 { 187 int error; 188 int fd = 0; 189 190 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd); 191 uap->sysmsg_fds[0] = fd; 192 193 return (error); 194 } 195 196 /* 197 * Duplicate a file descriptor. 198 * 199 * MPSAFE 200 */ 201 int 202 sys_dup(struct dup_args *uap) 203 { 204 int error; 205 int fd = 0; 206 207 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd); 208 uap->sysmsg_fds[0] = fd; 209 210 return (error); 211 } 212 213 /* 214 * MPALMOSTSAFE - acquires mplock for fp operations 215 */ 216 int 217 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) 218 { 219 struct thread *td = curthread; 220 struct proc *p = td->td_proc; 221 struct file *fp; 222 struct vnode *vp; 223 u_int newmin; 224 u_int oflags; 225 u_int nflags; 226 int tmp, error, flg = F_POSIX; 227 228 KKASSERT(p); 229 230 /* 231 * Operations on file descriptors that do not require a file pointer. 232 */ 233 switch (cmd) { 234 case F_GETFD: 235 error = fgetfdflags(p->p_fd, fd, &tmp); 236 if (error == 0) 237 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0; 238 return (error); 239 240 case F_SETFD: 241 if (dat->fc_cloexec & FD_CLOEXEC) 242 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 243 else 244 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 245 return (error); 246 case F_DUPFD: 247 newmin = dat->fc_fd; 248 error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd); 249 return (error); 250 default: 251 break; 252 } 253 254 /* 255 * Operations on file pointers 256 */ 257 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL) 258 return (EBADF); 259 260 get_mplock(); 261 switch (cmd) { 262 case F_GETFL: 263 dat->fc_flags = OFLAGS(fp->f_flag); 264 error = 0; 265 break; 266 267 case F_SETFL: 268 oflags = fp->f_flag; 269 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS; 270 nflags |= oflags & ~FCNTLFLAGS; 271 272 error = 0; 273 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY)) 274 error = EINVAL; 275 if (error == 0 && ((nflags ^ oflags) & FASYNC)) { 276 tmp = nflags & FASYNC; 277 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, 278 cred, NULL); 279 } 280 if (error == 0) 281 fp->f_flag = nflags; 282 break; 283 284 case F_GETOWN: 285 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, 286 cred, NULL); 287 break; 288 289 case F_SETOWN: 290 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, 291 cred, NULL); 292 break; 293 294 case F_SETLKW: 295 flg |= F_WAIT; 296 /* Fall into F_SETLK */ 297 298 case F_SETLK: 299 if (fp->f_type != DTYPE_VNODE) { 300 error = EBADF; 301 break; 302 } 303 vp = (struct vnode *)fp->f_data; 304 305 /* 306 * copyin/lockop may block 307 */ 308 if (dat->fc_flock.l_whence == SEEK_CUR) 309 dat->fc_flock.l_start += fp->f_offset; 310 311 switch (dat->fc_flock.l_type) { 312 case F_RDLCK: 313 if ((fp->f_flag & FREAD) == 0) { 314 error = EBADF; 315 break; 316 } 317 p->p_leader->p_flag |= P_ADVLOCK; 318 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 319 &dat->fc_flock, flg); 320 break; 321 case F_WRLCK: 322 if ((fp->f_flag & FWRITE) == 0) { 323 error = EBADF; 324 break; 325 } 326 p->p_leader->p_flag |= P_ADVLOCK; 327 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 328 &dat->fc_flock, flg); 329 break; 330 case F_UNLCK: 331 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 332 &dat->fc_flock, F_POSIX); 333 break; 334 default: 335 error = EINVAL; 336 break; 337 } 338 339 /* 340 * It is possible to race a close() on the descriptor while 341 * we were blocked getting the lock. If this occurs the 342 * close might not have caught the lock. 343 */ 344 if (checkfpclosed(p->p_fd, fd, fp)) { 345 dat->fc_flock.l_whence = SEEK_SET; 346 dat->fc_flock.l_start = 0; 347 dat->fc_flock.l_len = 0; 348 dat->fc_flock.l_type = F_UNLCK; 349 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 350 F_UNLCK, &dat->fc_flock, F_POSIX); 351 } 352 break; 353 354 case F_GETLK: 355 if (fp->f_type != DTYPE_VNODE) { 356 error = EBADF; 357 break; 358 } 359 vp = (struct vnode *)fp->f_data; 360 /* 361 * copyin/lockop may block 362 */ 363 if (dat->fc_flock.l_type != F_RDLCK && 364 dat->fc_flock.l_type != F_WRLCK && 365 dat->fc_flock.l_type != F_UNLCK) { 366 error = EINVAL; 367 break; 368 } 369 if (dat->fc_flock.l_whence == SEEK_CUR) 370 dat->fc_flock.l_start += fp->f_offset; 371 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 372 &dat->fc_flock, F_POSIX); 373 break; 374 default: 375 error = EINVAL; 376 break; 377 } 378 rel_mplock(); 379 380 fdrop(fp); 381 return (error); 382 } 383 384 /* 385 * The file control system call. 386 * 387 * MPSAFE 388 */ 389 int 390 sys_fcntl(struct fcntl_args *uap) 391 { 392 union fcntl_dat dat; 393 int error; 394 395 switch (uap->cmd) { 396 case F_DUPFD: 397 dat.fc_fd = uap->arg; 398 break; 399 case F_SETFD: 400 dat.fc_cloexec = uap->arg; 401 break; 402 case F_SETFL: 403 dat.fc_flags = uap->arg; 404 break; 405 case F_SETOWN: 406 dat.fc_owner = uap->arg; 407 break; 408 case F_SETLKW: 409 case F_SETLK: 410 case F_GETLK: 411 error = copyin((caddr_t)uap->arg, &dat.fc_flock, 412 sizeof(struct flock)); 413 if (error) 414 return (error); 415 break; 416 } 417 418 error = kern_fcntl(uap->fd, uap->cmd, &dat, curproc->p_ucred); 419 420 if (error == 0) { 421 switch (uap->cmd) { 422 case F_DUPFD: 423 uap->sysmsg_result = dat.fc_fd; 424 break; 425 case F_GETFD: 426 uap->sysmsg_result = dat.fc_cloexec; 427 break; 428 case F_GETFL: 429 uap->sysmsg_result = dat.fc_flags; 430 break; 431 case F_GETOWN: 432 uap->sysmsg_result = dat.fc_owner; 433 case F_GETLK: 434 error = copyout(&dat.fc_flock, (caddr_t)uap->arg, 435 sizeof(struct flock)); 436 break; 437 } 438 } 439 440 return (error); 441 } 442 443 /* 444 * Common code for dup, dup2, and fcntl(F_DUPFD). 445 * 446 * The type flag can be either DUP_FIXED or DUP_VARIABLE. DUP_FIXED tells 447 * kern_dup() to destructively dup over an existing file descriptor if new 448 * is already open. DUP_VARIABLE tells kern_dup() to find the lowest 449 * unused file descriptor that is greater than or equal to new. 450 * 451 * MPSAFE 452 */ 453 int 454 kern_dup(enum dup_type type, int old, int new, int *res) 455 { 456 struct thread *td = curthread; 457 struct proc *p = td->td_proc; 458 struct filedesc *fdp = p->p_fd; 459 struct file *fp; 460 struct file *delfp; 461 int oldflags; 462 int holdleaders; 463 int error, newfd; 464 465 /* 466 * Verify that we have a valid descriptor to dup from and 467 * possibly to dup to. 468 */ 469 retry: 470 spin_lock_wr(&fdp->fd_spin); 471 if (new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur || 472 new >= maxfilesperproc) { 473 spin_unlock_wr(&fdp->fd_spin); 474 return (EINVAL); 475 } 476 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) { 477 spin_unlock_wr(&fdp->fd_spin); 478 return (EBADF); 479 } 480 if (type == DUP_FIXED && old == new) { 481 *res = new; 482 spin_unlock_wr(&fdp->fd_spin); 483 return (0); 484 } 485 fp = fdp->fd_files[old].fp; 486 oldflags = fdp->fd_files[old].fileflags; 487 fhold(fp); /* MPSAFE - can be called with a spinlock held */ 488 489 /* 490 * Allocate a new descriptor if DUP_VARIABLE, or expand the table 491 * if the requested descriptor is beyond the current table size. 492 * 493 * This can block. Retry if the source descriptor no longer matches 494 * or if our expectation in the expansion case races. 495 * 496 * If we are not expanding or allocating a new decriptor, then reset 497 * the target descriptor to a reserved state so we have a uniform 498 * setup for the next code block. 499 */ 500 if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) { 501 spin_unlock_wr(&fdp->fd_spin); 502 error = fdalloc(p, new, &newfd); 503 spin_lock_wr(&fdp->fd_spin); 504 if (error) { 505 spin_unlock_wr(&fdp->fd_spin); 506 fdrop(fp); 507 return (error); 508 } 509 /* 510 * Check for ripout 511 */ 512 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) { 513 fsetfd_locked(fdp, NULL, newfd); 514 spin_unlock_wr(&fdp->fd_spin); 515 fdrop(fp); 516 goto retry; 517 } 518 /* 519 * Check for expansion race 520 */ 521 if (type != DUP_VARIABLE && new != newfd) { 522 fsetfd_locked(fdp, NULL, newfd); 523 spin_unlock_wr(&fdp->fd_spin); 524 fdrop(fp); 525 goto retry; 526 } 527 /* 528 * Check for ripout, newfd reused old (this case probably 529 * can't occur). 530 */ 531 if (old == newfd) { 532 fsetfd_locked(fdp, NULL, newfd); 533 spin_unlock_wr(&fdp->fd_spin); 534 fdrop(fp); 535 goto retry; 536 } 537 new = newfd; 538 delfp = NULL; 539 } else { 540 if (fdp->fd_files[new].reserved) { 541 spin_unlock_wr(&fdp->fd_spin); 542 fdrop(fp); 543 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new); 544 tsleep(fdp, 0, "fdres", hz); 545 goto retry; 546 } 547 548 /* 549 * If the target descriptor was never allocated we have 550 * to allocate it. If it was we have to clean out the 551 * old descriptor. delfp inherits the ref from the 552 * descriptor table. 553 */ 554 delfp = fdp->fd_files[new].fp; 555 fdp->fd_files[new].fp = NULL; 556 fdp->fd_files[new].reserved = 1; 557 if (delfp == NULL) { 558 fdreserve_locked(fdp, new, 1); 559 if (new > fdp->fd_lastfile) 560 fdp->fd_lastfile = new; 561 } 562 563 } 564 565 /* 566 * NOTE: still holding an exclusive spinlock 567 */ 568 569 /* 570 * If a descriptor is being overwritten we may hve to tell 571 * fdfree() to sleep to ensure that all relevant process 572 * leaders can be traversed in closef(). 573 */ 574 if (delfp != NULL && p->p_fdtol != NULL) { 575 fdp->fd_holdleaderscount++; 576 holdleaders = 1; 577 } else { 578 holdleaders = 0; 579 } 580 KASSERT(delfp == NULL || type == DUP_FIXED, 581 ("dup() picked an open file")); 582 583 /* 584 * Duplicate the source descriptor, update lastfile. If the new 585 * descriptor was not allocated and we aren't replacing an existing 586 * descriptor we have to mark the descriptor as being in use. 587 * 588 * The fd_files[] array inherits fp's hold reference. 589 */ 590 fsetfd_locked(fdp, fp, new); 591 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE; 592 spin_unlock_wr(&fdp->fd_spin); 593 fdrop(fp); 594 *res = new; 595 596 /* 597 * If we dup'd over a valid file, we now own the reference to it 598 * and must dispose of it using closef() semantics (as if a 599 * close() were performed on it). 600 */ 601 if (delfp) { 602 closef(delfp, p); 603 if (holdleaders) { 604 spin_lock_wr(&fdp->fd_spin); 605 fdp->fd_holdleaderscount--; 606 if (fdp->fd_holdleaderscount == 0 && 607 fdp->fd_holdleaderswakeup != 0) { 608 fdp->fd_holdleaderswakeup = 0; 609 spin_unlock_wr(&fdp->fd_spin); 610 wakeup(&fdp->fd_holdleaderscount); 611 } else { 612 spin_unlock_wr(&fdp->fd_spin); 613 } 614 } 615 } 616 return (0); 617 } 618 619 /* 620 * If sigio is on the list associated with a process or process group, 621 * disable signalling from the device, remove sigio from the list and 622 * free sigio. 623 */ 624 void 625 funsetown(struct sigio *sigio) 626 { 627 if (sigio == NULL) 628 return; 629 crit_enter(); 630 *(sigio->sio_myref) = NULL; 631 crit_exit(); 632 if (sigio->sio_pgid < 0) { 633 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio, 634 sigio, sio_pgsigio); 635 } else /* if ((*sigiop)->sio_pgid > 0) */ { 636 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio, 637 sigio, sio_pgsigio); 638 } 639 crfree(sigio->sio_ucred); 640 kfree(sigio, M_SIGIO); 641 } 642 643 /* Free a list of sigio structures. */ 644 void 645 funsetownlst(struct sigiolst *sigiolst) 646 { 647 struct sigio *sigio; 648 649 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 650 funsetown(sigio); 651 } 652 653 /* 654 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 655 * 656 * After permission checking, add a sigio structure to the sigio list for 657 * the process or process group. 658 */ 659 int 660 fsetown(pid_t pgid, struct sigio **sigiop) 661 { 662 struct proc *proc; 663 struct pgrp *pgrp; 664 struct sigio *sigio; 665 666 if (pgid == 0) { 667 funsetown(*sigiop); 668 return (0); 669 } 670 if (pgid > 0) { 671 proc = pfind(pgid); 672 if (proc == NULL) 673 return (ESRCH); 674 675 /* 676 * Policy - Don't allow a process to FSETOWN a process 677 * in another session. 678 * 679 * Remove this test to allow maximum flexibility or 680 * restrict FSETOWN to the current process or process 681 * group for maximum safety. 682 */ 683 if (proc->p_session != curproc->p_session) 684 return (EPERM); 685 686 pgrp = NULL; 687 } else /* if (pgid < 0) */ { 688 pgrp = pgfind(-pgid); 689 if (pgrp == NULL) 690 return (ESRCH); 691 692 /* 693 * Policy - Don't allow a process to FSETOWN a process 694 * in another session. 695 * 696 * Remove this test to allow maximum flexibility or 697 * restrict FSETOWN to the current process or process 698 * group for maximum safety. 699 */ 700 if (pgrp->pg_session != curproc->p_session) 701 return (EPERM); 702 703 proc = NULL; 704 } 705 funsetown(*sigiop); 706 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK); 707 if (pgid > 0) { 708 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 709 sigio->sio_proc = proc; 710 } else { 711 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 712 sigio->sio_pgrp = pgrp; 713 } 714 sigio->sio_pgid = pgid; 715 sigio->sio_ucred = crhold(curproc->p_ucred); 716 /* It would be convenient if p_ruid was in ucred. */ 717 sigio->sio_ruid = curproc->p_ucred->cr_ruid; 718 sigio->sio_myref = sigiop; 719 crit_enter(); 720 *sigiop = sigio; 721 crit_exit(); 722 return (0); 723 } 724 725 /* 726 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 727 */ 728 pid_t 729 fgetown(struct sigio *sigio) 730 { 731 return (sigio != NULL ? sigio->sio_pgid : 0); 732 } 733 734 /* 735 * Close many file descriptors. 736 * 737 * MPSAFE 738 */ 739 int 740 sys_closefrom(struct closefrom_args *uap) 741 { 742 return(kern_closefrom(uap->fd)); 743 } 744 745 /* 746 * Close all file descriptors greater then or equal to fd 747 * 748 * MPSAFE 749 */ 750 int 751 kern_closefrom(int fd) 752 { 753 struct thread *td = curthread; 754 struct proc *p = td->td_proc; 755 struct filedesc *fdp; 756 757 KKASSERT(p); 758 fdp = p->p_fd; 759 760 if (fd < 0) 761 return (EINVAL); 762 763 /* 764 * NOTE: This function will skip unassociated descriptors and 765 * reserved descriptors that have not yet been assigned. 766 * fd_lastfile can change as a side effect of kern_close(). 767 */ 768 spin_lock_wr(&fdp->fd_spin); 769 while (fd <= fdp->fd_lastfile) { 770 if (fdp->fd_files[fd].fp != NULL) { 771 spin_unlock_wr(&fdp->fd_spin); 772 /* ok if this races another close */ 773 if (kern_close(fd) == EINTR) 774 return (EINTR); 775 spin_lock_wr(&fdp->fd_spin); 776 } 777 ++fd; 778 } 779 spin_unlock_wr(&fdp->fd_spin); 780 return (0); 781 } 782 783 /* 784 * Close a file descriptor. 785 * 786 * MPSAFE 787 */ 788 int 789 sys_close(struct close_args *uap) 790 { 791 return(kern_close(uap->fd)); 792 } 793 794 /* 795 * MPALMOSTSAFE - acquires mplock around knote_fdclose() calls 796 */ 797 int 798 kern_close(int fd) 799 { 800 struct thread *td = curthread; 801 struct proc *p = td->td_proc; 802 struct filedesc *fdp; 803 struct file *fp; 804 int error; 805 int holdleaders; 806 807 KKASSERT(p); 808 fdp = p->p_fd; 809 810 spin_lock_wr(&fdp->fd_spin); 811 if ((fp = funsetfd_locked(fdp, fd)) == NULL) { 812 spin_unlock_wr(&fdp->fd_spin); 813 return (EBADF); 814 } 815 holdleaders = 0; 816 if (p->p_fdtol != NULL) { 817 /* 818 * Ask fdfree() to sleep to ensure that all relevant 819 * process leaders can be traversed in closef(). 820 */ 821 fdp->fd_holdleaderscount++; 822 holdleaders = 1; 823 } 824 825 /* 826 * we now hold the fp reference that used to be owned by the descriptor 827 * array. 828 */ 829 spin_unlock_wr(&fdp->fd_spin); 830 if (fd < fdp->fd_knlistsize) { 831 get_mplock(); 832 if (fd < fdp->fd_knlistsize) 833 knote_fdclose(p, fd); 834 rel_mplock(); 835 } 836 error = closef(fp, p); 837 if (holdleaders) { 838 spin_lock_wr(&fdp->fd_spin); 839 fdp->fd_holdleaderscount--; 840 if (fdp->fd_holdleaderscount == 0 && 841 fdp->fd_holdleaderswakeup != 0) { 842 fdp->fd_holdleaderswakeup = 0; 843 spin_unlock_wr(&fdp->fd_spin); 844 wakeup(&fdp->fd_holdleaderscount); 845 } else { 846 spin_unlock_wr(&fdp->fd_spin); 847 } 848 } 849 return (error); 850 } 851 852 /* 853 * shutdown_args(int fd, int how) 854 */ 855 int 856 kern_shutdown(int fd, int how) 857 { 858 struct thread *td = curthread; 859 struct proc *p = td->td_proc; 860 struct file *fp; 861 int error; 862 863 KKASSERT(p); 864 865 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL) 866 return (EBADF); 867 error = fo_shutdown(fp, how); 868 fdrop(fp); 869 870 return (error); 871 } 872 873 int 874 sys_shutdown(struct shutdown_args *uap) 875 { 876 int error; 877 878 error = kern_shutdown(uap->s, uap->how); 879 880 return (error); 881 } 882 883 /* 884 * MPSAFE 885 */ 886 int 887 kern_fstat(int fd, struct stat *ub) 888 { 889 struct thread *td = curthread; 890 struct proc *p = td->td_proc; 891 struct file *fp; 892 int error; 893 894 KKASSERT(p); 895 896 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL) 897 return (EBADF); 898 error = fo_stat(fp, ub, p->p_ucred); 899 fdrop(fp); 900 901 return (error); 902 } 903 904 /* 905 * Return status information about a file descriptor. 906 * 907 * MPSAFE 908 */ 909 int 910 sys_fstat(struct fstat_args *uap) 911 { 912 struct stat st; 913 int error; 914 915 error = kern_fstat(uap->fd, &st); 916 917 if (error == 0) 918 error = copyout(&st, uap->sb, sizeof(st)); 919 return (error); 920 } 921 922 /* 923 * Return pathconf information about a file descriptor. 924 */ 925 /* ARGSUSED */ 926 int 927 sys_fpathconf(struct fpathconf_args *uap) 928 { 929 struct thread *td = curthread; 930 struct proc *p = td->td_proc; 931 struct file *fp; 932 struct vnode *vp; 933 int error = 0; 934 935 KKASSERT(p); 936 937 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL) 938 return (EBADF); 939 940 switch (fp->f_type) { 941 case DTYPE_PIPE: 942 case DTYPE_SOCKET: 943 if (uap->name != _PC_PIPE_BUF) { 944 error = EINVAL; 945 } else { 946 uap->sysmsg_result = PIPE_BUF; 947 error = 0; 948 } 949 break; 950 case DTYPE_FIFO: 951 case DTYPE_VNODE: 952 vp = (struct vnode *)fp->f_data; 953 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg); 954 break; 955 default: 956 error = EOPNOTSUPP; 957 break; 958 } 959 fdrop(fp); 960 return(error); 961 } 962 963 static int fdexpand; 964 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, ""); 965 966 /* 967 * Grow the file table so it can hold through descriptor (want). 968 * 969 * The fdp's spinlock must be held exclusively on entry and may be held 970 * exclusively on return. The spinlock may be cycled by the routine. 971 * 972 * MPSAFE 973 */ 974 static void 975 fdgrow_locked(struct filedesc *fdp, int want) 976 { 977 struct fdnode *newfiles; 978 struct fdnode *oldfiles; 979 int nf, extra; 980 981 nf = fdp->fd_nfiles; 982 do { 983 /* nf has to be of the form 2^n - 1 */ 984 nf = 2 * nf + 1; 985 } while (nf <= want); 986 987 spin_unlock_wr(&fdp->fd_spin); 988 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK); 989 spin_lock_wr(&fdp->fd_spin); 990 991 /* 992 * We could have raced another extend while we were not holding 993 * the spinlock. 994 */ 995 if (fdp->fd_nfiles >= nf) { 996 spin_unlock_wr(&fdp->fd_spin); 997 kfree(newfiles, M_FILEDESC); 998 spin_lock_wr(&fdp->fd_spin); 999 return; 1000 } 1001 /* 1002 * Copy the existing ofile and ofileflags arrays 1003 * and zero the new portion of each array. 1004 */ 1005 extra = nf - fdp->fd_nfiles; 1006 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode)); 1007 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode)); 1008 1009 oldfiles = fdp->fd_files; 1010 fdp->fd_files = newfiles; 1011 fdp->fd_nfiles = nf; 1012 1013 if (oldfiles != fdp->fd_builtin_files) { 1014 spin_unlock_wr(&fdp->fd_spin); 1015 kfree(oldfiles, M_FILEDESC); 1016 spin_lock_wr(&fdp->fd_spin); 1017 } 1018 fdexpand++; 1019 } 1020 1021 /* 1022 * Number of nodes in right subtree, including the root. 1023 */ 1024 static __inline int 1025 right_subtree_size(int n) 1026 { 1027 return (n ^ (n | (n + 1))); 1028 } 1029 1030 /* 1031 * Bigger ancestor. 1032 */ 1033 static __inline int 1034 right_ancestor(int n) 1035 { 1036 return (n | (n + 1)); 1037 } 1038 1039 /* 1040 * Smaller ancestor. 1041 */ 1042 static __inline int 1043 left_ancestor(int n) 1044 { 1045 return ((n & (n + 1)) - 1); 1046 } 1047 1048 /* 1049 * Traverse the in-place binary tree buttom-up adjusting the allocation 1050 * count so scans can determine where free descriptors are located. 1051 * 1052 * MPSAFE - caller must be holding an exclusive spinlock on fdp 1053 */ 1054 static 1055 void 1056 fdreserve_locked(struct filedesc *fdp, int fd, int incr) 1057 { 1058 while (fd >= 0) { 1059 fdp->fd_files[fd].allocated += incr; 1060 KKASSERT(fdp->fd_files[fd].allocated >= 0); 1061 fd = left_ancestor(fd); 1062 } 1063 } 1064 1065 /* 1066 * Reserve a file descriptor for the process. If no error occurs, the 1067 * caller MUST at some point call fsetfd() or assign a file pointer 1068 * or dispose of the reservation. 1069 * 1070 * MPSAFE 1071 */ 1072 int 1073 fdalloc(struct proc *p, int want, int *result) 1074 { 1075 struct filedesc *fdp = p->p_fd; 1076 int fd, rsize, rsum, node, lim; 1077 1078 spin_lock_rd(&p->p_limit->p_spin); 1079 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 1080 spin_unlock_rd(&p->p_limit->p_spin); 1081 if (want >= lim) 1082 return (EMFILE); 1083 spin_lock_wr(&fdp->fd_spin); 1084 if (want >= fdp->fd_nfiles) 1085 fdgrow_locked(fdp, want); 1086 1087 /* 1088 * Search for a free descriptor starting at the higher 1089 * of want or fd_freefile. If that fails, consider 1090 * expanding the ofile array. 1091 * 1092 * NOTE! the 'allocated' field is a cumulative recursive allocation 1093 * count. If we happen to see a value of 0 then we can shortcut 1094 * our search. Otherwise we run through through the tree going 1095 * down branches we know have free descriptor(s) until we hit a 1096 * leaf node. The leaf node will be free but will not necessarily 1097 * have an allocated field of 0. 1098 */ 1099 retry: 1100 /* move up the tree looking for a subtree with a free node */ 1101 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim); 1102 fd = right_ancestor(fd)) { 1103 if (fdp->fd_files[fd].allocated == 0) 1104 goto found; 1105 1106 rsize = right_subtree_size(fd); 1107 if (fdp->fd_files[fd].allocated == rsize) 1108 continue; /* right subtree full */ 1109 1110 /* 1111 * Free fd is in the right subtree of the tree rooted at fd. 1112 * Call that subtree R. Look for the smallest (leftmost) 1113 * subtree of R with an unallocated fd: continue moving 1114 * down the left branch until encountering a full left 1115 * subtree, then move to the right. 1116 */ 1117 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 1118 node = fd + rsize; 1119 rsum += fdp->fd_files[node].allocated; 1120 if (fdp->fd_files[fd].allocated == rsum + rsize) { 1121 fd = node; /* move to the right */ 1122 if (fdp->fd_files[node].allocated == 0) 1123 goto found; 1124 rsum = 0; 1125 } 1126 } 1127 goto found; 1128 } 1129 1130 /* 1131 * No space in current array. Expand? 1132 */ 1133 if (fdp->fd_nfiles >= lim) { 1134 spin_unlock_wr(&fdp->fd_spin); 1135 return (EMFILE); 1136 } 1137 fdgrow_locked(fdp, want); 1138 goto retry; 1139 1140 found: 1141 KKASSERT(fd < fdp->fd_nfiles); 1142 if (fd > fdp->fd_lastfile) 1143 fdp->fd_lastfile = fd; 1144 if (want <= fdp->fd_freefile) 1145 fdp->fd_freefile = fd; 1146 *result = fd; 1147 KKASSERT(fdp->fd_files[fd].fp == NULL); 1148 KKASSERT(fdp->fd_files[fd].reserved == 0); 1149 fdp->fd_files[fd].fileflags = 0; 1150 fdp->fd_files[fd].reserved = 1; 1151 fdreserve_locked(fdp, fd, 1); 1152 spin_unlock_wr(&fdp->fd_spin); 1153 return (0); 1154 } 1155 1156 /* 1157 * Check to see whether n user file descriptors 1158 * are available to the process p. 1159 * 1160 * MPSAFE 1161 */ 1162 int 1163 fdavail(struct proc *p, int n) 1164 { 1165 struct filedesc *fdp = p->p_fd; 1166 struct fdnode *fdnode; 1167 int i, lim, last; 1168 1169 spin_lock_rd(&p->p_limit->p_spin); 1170 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 1171 spin_unlock_rd(&p->p_limit->p_spin); 1172 1173 spin_lock_rd(&fdp->fd_spin); 1174 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { 1175 spin_unlock_rd(&fdp->fd_spin); 1176 return (1); 1177 } 1178 last = min(fdp->fd_nfiles, lim); 1179 fdnode = &fdp->fd_files[fdp->fd_freefile]; 1180 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) { 1181 if (fdnode->fp == NULL && --n <= 0) { 1182 spin_unlock_rd(&fdp->fd_spin); 1183 return (1); 1184 } 1185 } 1186 spin_unlock_rd(&fdp->fd_spin); 1187 return (0); 1188 } 1189 1190 /* 1191 * Revoke open descriptors referencing (f_data, f_type) 1192 * 1193 * Any revoke executed within a prison is only able to 1194 * revoke descriptors for processes within that prison. 1195 * 1196 * Returns 0 on success or an error code. 1197 */ 1198 struct fdrevoke_info { 1199 void *data; 1200 short type; 1201 short unused; 1202 int count; 1203 int intransit; 1204 struct ucred *cred; 1205 struct file *nfp; 1206 }; 1207 1208 static int fdrevoke_check_callback(struct file *fp, void *vinfo); 1209 static int fdrevoke_proc_callback(struct proc *p, void *vinfo); 1210 1211 int 1212 fdrevoke(void *f_data, short f_type, struct ucred *cred) 1213 { 1214 struct fdrevoke_info info; 1215 int error; 1216 1217 bzero(&info, sizeof(info)); 1218 info.data = f_data; 1219 info.type = f_type; 1220 info.cred = cred; 1221 error = falloc(NULL, &info.nfp, NULL); 1222 if (error) 1223 return (error); 1224 1225 /* 1226 * Scan the file pointer table once. dups do not dup file pointers, 1227 * only descriptors, so there is no leak. Set FREVOKED on the fps 1228 * being revoked. 1229 */ 1230 allfiles_scan_exclusive(fdrevoke_check_callback, &info); 1231 1232 /* 1233 * If any fps were marked track down the related descriptors 1234 * and close them. Any dup()s at this point will notice 1235 * the FREVOKED already set in the fp and do the right thing. 1236 * 1237 * Any fps with non-zero msgcounts (aka sent over a unix-domain 1238 * socket) bumped the intransit counter and will require a 1239 * scan. Races against fps leaving the socket are closed by 1240 * the socket code checking for FREVOKED. 1241 */ 1242 if (info.count) 1243 allproc_scan(fdrevoke_proc_callback, &info); 1244 if (info.intransit) 1245 unp_revoke_gc(info.nfp); 1246 fdrop(info.nfp); 1247 return(0); 1248 } 1249 1250 /* 1251 * Locate matching file pointers directly. 1252 */ 1253 static int 1254 fdrevoke_check_callback(struct file *fp, void *vinfo) 1255 { 1256 struct fdrevoke_info *info = vinfo; 1257 1258 /* 1259 * File pointers already flagged for revokation are skipped. 1260 */ 1261 if (fp->f_flag & FREVOKED) 1262 return(0); 1263 1264 /* 1265 * If revoking from a prison file pointers created outside of 1266 * that prison, or file pointers without creds, cannot be revoked. 1267 */ 1268 if (info->cred->cr_prison && 1269 (fp->f_cred == NULL || 1270 info->cred->cr_prison != fp->f_cred->cr_prison)) { 1271 return(0); 1272 } 1273 1274 /* 1275 * If the file pointer matches then mark it for revocation. The 1276 * flag is currently only used by unp_revoke_gc(). 1277 * 1278 * info->count is a heuristic and can race in a SMP environment. 1279 */ 1280 if (info->data == fp->f_data && info->type == fp->f_type) { 1281 atomic_set_int(&fp->f_flag, FREVOKED); 1282 info->count += fp->f_count; 1283 if (fp->f_msgcount) 1284 ++info->intransit; 1285 } 1286 return(0); 1287 } 1288 1289 /* 1290 * Locate matching file pointers via process descriptor tables. 1291 */ 1292 static int 1293 fdrevoke_proc_callback(struct proc *p, void *vinfo) 1294 { 1295 struct fdrevoke_info *info = vinfo; 1296 struct filedesc *fdp; 1297 struct file *fp; 1298 int n; 1299 1300 if (p->p_stat == SIDL || p->p_stat == SZOMB) 1301 return(0); 1302 if (info->cred->cr_prison && 1303 info->cred->cr_prison != p->p_ucred->cr_prison) { 1304 return(0); 1305 } 1306 1307 /* 1308 * If the controlling terminal of the process matches the 1309 * vnode being revoked we clear the controlling terminal. 1310 * 1311 * The normal spec_close() may not catch this because it 1312 * uses curproc instead of p. 1313 */ 1314 if (p->p_session && info->type == DTYPE_VNODE && 1315 info->data == p->p_session->s_ttyvp) { 1316 p->p_session->s_ttyvp = NULL; 1317 vrele(info->data); 1318 } 1319 1320 /* 1321 * Softref the fdp to prevent it from being destroyed 1322 */ 1323 spin_lock_wr(&p->p_spin); 1324 if ((fdp = p->p_fd) == NULL) { 1325 spin_unlock_wr(&p->p_spin); 1326 return(0); 1327 } 1328 atomic_add_int(&fdp->fd_softrefs, 1); 1329 spin_unlock_wr(&p->p_spin); 1330 1331 /* 1332 * Locate and close any matching file descriptors. 1333 */ 1334 spin_lock_wr(&fdp->fd_spin); 1335 for (n = 0; n < fdp->fd_nfiles; ++n) { 1336 if ((fp = fdp->fd_files[n].fp) == NULL) 1337 continue; 1338 if (fp->f_flag & FREVOKED) { 1339 fhold(info->nfp); 1340 fdp->fd_files[n].fp = info->nfp; 1341 spin_unlock_wr(&fdp->fd_spin); 1342 closef(fp, p); 1343 spin_lock_wr(&fdp->fd_spin); 1344 --info->count; 1345 } 1346 } 1347 spin_unlock_wr(&fdp->fd_spin); 1348 atomic_subtract_int(&fdp->fd_softrefs, 1); 1349 return(0); 1350 } 1351 1352 /* 1353 * falloc: 1354 * Create a new open file structure and reserve a file decriptor 1355 * for the process that refers to it. 1356 * 1357 * Root creds are checked using p, or assumed if p is NULL. If 1358 * resultfd is non-NULL then p must also be non-NULL. No file 1359 * descriptor is reserved if resultfd is NULL. 1360 * 1361 * A file pointer with a refcount of 1 is returned. Note that the 1362 * file pointer is NOT associated with the descriptor. If falloc 1363 * returns success, fsetfd() MUST be called to either associate the 1364 * file pointer or clear the reservation. 1365 * 1366 * MPSAFE 1367 */ 1368 int 1369 falloc(struct proc *p, struct file **resultfp, int *resultfd) 1370 { 1371 static struct timeval lastfail; 1372 static int curfail; 1373 struct file *fp; 1374 int error; 1375 1376 fp = NULL; 1377 1378 /* 1379 * Handle filetable full issues and root overfill. 1380 */ 1381 if (nfiles >= maxfiles - maxfilesrootres && 1382 ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) { 1383 if (ppsratecheck(&lastfail, &curfail, 1)) { 1384 kprintf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n", 1385 (p ? p->p_ucred->cr_ruid : -1)); 1386 } 1387 error = ENFILE; 1388 goto done; 1389 } 1390 1391 /* 1392 * Allocate a new file descriptor. 1393 */ 1394 fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO); 1395 spin_init(&fp->f_spin); 1396 fp->f_count = 1; 1397 fp->f_ops = &badfileops; 1398 fp->f_seqcount = 1; 1399 if (p) 1400 fp->f_cred = crhold(p->p_ucred); 1401 else 1402 fp->f_cred = crhold(proc0.p_ucred); 1403 spin_lock_wr(&filehead_spin); 1404 nfiles++; 1405 LIST_INSERT_HEAD(&filehead, fp, f_list); 1406 spin_unlock_wr(&filehead_spin); 1407 if (resultfd) { 1408 if ((error = fdalloc(p, 0, resultfd)) != 0) { 1409 fdrop(fp); 1410 fp = NULL; 1411 } 1412 } else { 1413 error = 0; 1414 } 1415 done: 1416 *resultfp = fp; 1417 return (error); 1418 } 1419 1420 /* 1421 * MPSAFE 1422 */ 1423 static 1424 int 1425 checkfpclosed(struct filedesc *fdp, int fd, struct file *fp) 1426 { 1427 int error; 1428 1429 spin_lock_rd(&fdp->fd_spin); 1430 if ((unsigned) fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp) 1431 error = EBADF; 1432 else 1433 error = 0; 1434 spin_unlock_rd(&fdp->fd_spin); 1435 return (error); 1436 } 1437 1438 /* 1439 * Associate a file pointer with a previously reserved file descriptor. 1440 * This function always succeeds. 1441 * 1442 * If fp is NULL, the file descriptor is returned to the pool. 1443 */ 1444 1445 /* 1446 * MPSAFE (exclusive spinlock must be held on call) 1447 */ 1448 static void 1449 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd) 1450 { 1451 KKASSERT((unsigned)fd < fdp->fd_nfiles); 1452 KKASSERT(fdp->fd_files[fd].reserved != 0); 1453 if (fp) { 1454 fhold(fp); 1455 fdp->fd_files[fd].fp = fp; 1456 fdp->fd_files[fd].reserved = 0; 1457 if (fp->f_type == DTYPE_KQUEUE) { 1458 if (fdp->fd_knlistsize < 0) 1459 fdp->fd_knlistsize = 0; 1460 } 1461 } else { 1462 fdp->fd_files[fd].reserved = 0; 1463 fdreserve_locked(fdp, fd, -1); 1464 fdfixup_locked(fdp, fd); 1465 } 1466 } 1467 1468 /* 1469 * MPSAFE 1470 */ 1471 void 1472 fsetfd(struct proc *p, struct file *fp, int fd) 1473 { 1474 struct filedesc *fdp = p->p_fd; 1475 1476 spin_lock_wr(&fdp->fd_spin); 1477 fsetfd_locked(fdp, fp, fd); 1478 spin_unlock_wr(&fdp->fd_spin); 1479 } 1480 1481 /* 1482 * MPSAFE (exclusive spinlock must be held on call) 1483 */ 1484 static 1485 struct file * 1486 funsetfd_locked(struct filedesc *fdp, int fd) 1487 { 1488 struct file *fp; 1489 1490 if ((unsigned)fd >= fdp->fd_nfiles) 1491 return (NULL); 1492 if ((fp = fdp->fd_files[fd].fp) == NULL) 1493 return (NULL); 1494 fdp->fd_files[fd].fp = NULL; 1495 fdp->fd_files[fd].fileflags = 0; 1496 1497 fdreserve_locked(fdp, fd, -1); 1498 fdfixup_locked(fdp, fd); 1499 return(fp); 1500 } 1501 1502 /* 1503 * MPSAFE 1504 */ 1505 int 1506 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp) 1507 { 1508 int error; 1509 1510 spin_lock_rd(&fdp->fd_spin); 1511 if (((u_int)fd) >= fdp->fd_nfiles) { 1512 error = EBADF; 1513 } else if (fdp->fd_files[fd].fp == NULL) { 1514 error = EBADF; 1515 } else { 1516 *flagsp = fdp->fd_files[fd].fileflags; 1517 error = 0; 1518 } 1519 spin_unlock_rd(&fdp->fd_spin); 1520 return (error); 1521 } 1522 1523 /* 1524 * MPSAFE 1525 */ 1526 int 1527 fsetfdflags(struct filedesc *fdp, int fd, int add_flags) 1528 { 1529 int error; 1530 1531 spin_lock_wr(&fdp->fd_spin); 1532 if (((u_int)fd) >= fdp->fd_nfiles) { 1533 error = EBADF; 1534 } else if (fdp->fd_files[fd].fp == NULL) { 1535 error = EBADF; 1536 } else { 1537 fdp->fd_files[fd].fileflags |= add_flags; 1538 error = 0; 1539 } 1540 spin_unlock_wr(&fdp->fd_spin); 1541 return (error); 1542 } 1543 1544 /* 1545 * MPSAFE 1546 */ 1547 int 1548 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags) 1549 { 1550 int error; 1551 1552 spin_lock_wr(&fdp->fd_spin); 1553 if (((u_int)fd) >= fdp->fd_nfiles) { 1554 error = EBADF; 1555 } else if (fdp->fd_files[fd].fp == NULL) { 1556 error = EBADF; 1557 } else { 1558 fdp->fd_files[fd].fileflags &= ~rem_flags; 1559 error = 0; 1560 } 1561 spin_unlock_wr(&fdp->fd_spin); 1562 return (error); 1563 } 1564 1565 void 1566 fsetcred(struct file *fp, struct ucred *cr) 1567 { 1568 crhold(cr); 1569 crfree(fp->f_cred); 1570 fp->f_cred = cr; 1571 } 1572 1573 /* 1574 * Free a file descriptor. 1575 */ 1576 static 1577 void 1578 ffree(struct file *fp) 1579 { 1580 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 1581 spin_lock_wr(&filehead_spin); 1582 LIST_REMOVE(fp, f_list); 1583 nfiles--; 1584 spin_unlock_wr(&filehead_spin); 1585 crfree(fp->f_cred); 1586 if (fp->f_nchandle.ncp) 1587 cache_drop(&fp->f_nchandle); 1588 kfree(fp, M_FILE); 1589 } 1590 1591 /* 1592 * called from init_main, initialize filedesc0 for proc0. 1593 */ 1594 void 1595 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask) 1596 { 1597 p0->p_fd = fdp0; 1598 p0->p_fdtol = NULL; 1599 fdp0->fd_refcnt = 1; 1600 fdp0->fd_cmask = cmask; 1601 fdp0->fd_files = fdp0->fd_builtin_files; 1602 fdp0->fd_nfiles = NDFILE; 1603 fdp0->fd_lastfile = -1; 1604 spin_init(&fdp0->fd_spin); 1605 } 1606 1607 /* 1608 * Build a new filedesc structure. 1609 * 1610 * NOT MPSAFE (vref) 1611 */ 1612 struct filedesc * 1613 fdinit(struct proc *p) 1614 { 1615 struct filedesc *newfdp; 1616 struct filedesc *fdp = p->p_fd; 1617 1618 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO); 1619 spin_lock_rd(&fdp->fd_spin); 1620 if (fdp->fd_cdir) { 1621 newfdp->fd_cdir = fdp->fd_cdir; 1622 vref(newfdp->fd_cdir); 1623 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 1624 } 1625 1626 /* 1627 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of 1628 * proc0, but should unconditionally exist in other processes. 1629 */ 1630 if (fdp->fd_rdir) { 1631 newfdp->fd_rdir = fdp->fd_rdir; 1632 vref(newfdp->fd_rdir); 1633 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 1634 } 1635 if (fdp->fd_jdir) { 1636 newfdp->fd_jdir = fdp->fd_jdir; 1637 vref(newfdp->fd_jdir); 1638 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 1639 } 1640 spin_unlock_rd(&fdp->fd_spin); 1641 1642 /* Create the file descriptor table. */ 1643 newfdp->fd_refcnt = 1; 1644 newfdp->fd_cmask = cmask; 1645 newfdp->fd_files = newfdp->fd_builtin_files; 1646 newfdp->fd_nfiles = NDFILE; 1647 newfdp->fd_knlistsize = -1; 1648 newfdp->fd_lastfile = -1; 1649 spin_init(&newfdp->fd_spin); 1650 1651 return (newfdp); 1652 } 1653 1654 /* 1655 * Share a filedesc structure. 1656 * 1657 * MPSAFE 1658 */ 1659 struct filedesc * 1660 fdshare(struct proc *p) 1661 { 1662 struct filedesc *fdp; 1663 1664 fdp = p->p_fd; 1665 spin_lock_wr(&fdp->fd_spin); 1666 fdp->fd_refcnt++; 1667 spin_unlock_wr(&fdp->fd_spin); 1668 return (fdp); 1669 } 1670 1671 /* 1672 * Copy a filedesc structure. 1673 * 1674 * MPSAFE 1675 */ 1676 struct filedesc * 1677 fdcopy(struct proc *p) 1678 { 1679 struct filedesc *fdp = p->p_fd; 1680 struct filedesc *newfdp; 1681 struct fdnode *fdnode; 1682 int i; 1683 int ni; 1684 1685 /* 1686 * Certain daemons might not have file descriptors. 1687 */ 1688 if (fdp == NULL) 1689 return (NULL); 1690 1691 /* 1692 * Allocate the new filedesc and fd_files[] array. This can race 1693 * with operations by other threads on the fdp so we have to be 1694 * careful. 1695 */ 1696 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK | M_ZERO); 1697 again: 1698 spin_lock_rd(&fdp->fd_spin); 1699 if (fdp->fd_lastfile < NDFILE) { 1700 newfdp->fd_files = newfdp->fd_builtin_files; 1701 i = NDFILE; 1702 } else { 1703 /* 1704 * We have to allocate (N^2-1) entries for our in-place 1705 * binary tree. Allow the table to shrink. 1706 */ 1707 i = fdp->fd_nfiles; 1708 ni = (i - 1) / 2; 1709 while (ni > fdp->fd_lastfile && ni > NDFILE) { 1710 i = ni; 1711 ni = (i - 1) / 2; 1712 } 1713 spin_unlock_rd(&fdp->fd_spin); 1714 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode), 1715 M_FILEDESC, M_WAITOK | M_ZERO); 1716 1717 /* 1718 * Check for race, retry 1719 */ 1720 spin_lock_rd(&fdp->fd_spin); 1721 if (i <= fdp->fd_lastfile) { 1722 spin_unlock_rd(&fdp->fd_spin); 1723 kfree(newfdp->fd_files, M_FILEDESC); 1724 goto again; 1725 } 1726 } 1727 1728 /* 1729 * Dup the remaining fields. vref() and cache_hold() can be 1730 * safely called while holding the read spinlock on fdp. 1731 * 1732 * The read spinlock on fdp is still being held. 1733 * 1734 * NOTE: vref and cache_hold calls for the case where the vnode 1735 * or cache entry already has at least one ref may be called 1736 * while holding spin locks. 1737 */ 1738 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) { 1739 vref(newfdp->fd_cdir); 1740 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 1741 } 1742 /* 1743 * We must check for fd_rdir here, at least for now because 1744 * the init process is created before we have access to the 1745 * rootvode to take a reference to it. 1746 */ 1747 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) { 1748 vref(newfdp->fd_rdir); 1749 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 1750 } 1751 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) { 1752 vref(newfdp->fd_jdir); 1753 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 1754 } 1755 newfdp->fd_refcnt = 1; 1756 newfdp->fd_nfiles = i; 1757 newfdp->fd_lastfile = fdp->fd_lastfile; 1758 newfdp->fd_freefile = fdp->fd_freefile; 1759 newfdp->fd_cmask = fdp->fd_cmask; 1760 newfdp->fd_knlist = NULL; 1761 newfdp->fd_knlistsize = -1; 1762 newfdp->fd_knhash = NULL; 1763 newfdp->fd_knhashmask = 0; 1764 spin_init(&newfdp->fd_spin); 1765 1766 /* 1767 * Copy the descriptor table through (i). This also copies the 1768 * allocation state. Then go through and ref the file pointers 1769 * and clean up any KQ descriptors. 1770 * 1771 * kq descriptors cannot be copied. Since we haven't ref'd the 1772 * copied files yet we can ignore the return value from funsetfd(). 1773 * 1774 * The read spinlock on fdp is still being held. 1775 */ 1776 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode)); 1777 for (i = 0 ; i < newfdp->fd_nfiles; ++i) { 1778 fdnode = &newfdp->fd_files[i]; 1779 if (fdnode->reserved) { 1780 fdreserve_locked(newfdp, i, -1); 1781 fdnode->reserved = 0; 1782 fdfixup_locked(newfdp, i); 1783 } else if (fdnode->fp) { 1784 if (fdnode->fp->f_type == DTYPE_KQUEUE) { 1785 (void)funsetfd_locked(newfdp, i); 1786 } else { 1787 fhold(fdnode->fp); 1788 } 1789 } 1790 } 1791 spin_unlock_rd(&fdp->fd_spin); 1792 return (newfdp); 1793 } 1794 1795 /* 1796 * Release a filedesc structure. 1797 * 1798 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE) 1799 */ 1800 void 1801 fdfree(struct proc *p, struct filedesc *repl) 1802 { 1803 struct filedesc *fdp; 1804 struct fdnode *fdnode; 1805 int i; 1806 struct filedesc_to_leader *fdtol; 1807 struct file *fp; 1808 struct vnode *vp; 1809 struct flock lf; 1810 1811 /* 1812 * Certain daemons might not have file descriptors. 1813 */ 1814 fdp = p->p_fd; 1815 if (fdp == NULL) { 1816 p->p_fd = repl; 1817 return; 1818 } 1819 1820 /* 1821 * Severe messing around to follow. 1822 */ 1823 spin_lock_wr(&fdp->fd_spin); 1824 1825 /* Check for special need to clear POSIX style locks */ 1826 fdtol = p->p_fdtol; 1827 if (fdtol != NULL) { 1828 KASSERT(fdtol->fdl_refcount > 0, 1829 ("filedesc_to_refcount botch: fdl_refcount=%d", 1830 fdtol->fdl_refcount)); 1831 if (fdtol->fdl_refcount == 1 && 1832 (p->p_leader->p_flag & P_ADVLOCK) != 0) { 1833 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1834 fdnode = &fdp->fd_files[i]; 1835 if (fdnode->fp == NULL || 1836 fdnode->fp->f_type != DTYPE_VNODE) { 1837 continue; 1838 } 1839 fp = fdnode->fp; 1840 fhold(fp); 1841 spin_unlock_wr(&fdp->fd_spin); 1842 1843 lf.l_whence = SEEK_SET; 1844 lf.l_start = 0; 1845 lf.l_len = 0; 1846 lf.l_type = F_UNLCK; 1847 vp = (struct vnode *)fp->f_data; 1848 (void) VOP_ADVLOCK(vp, 1849 (caddr_t)p->p_leader, 1850 F_UNLCK, 1851 &lf, 1852 F_POSIX); 1853 fdrop(fp); 1854 spin_lock_wr(&fdp->fd_spin); 1855 } 1856 } 1857 retry: 1858 if (fdtol->fdl_refcount == 1) { 1859 if (fdp->fd_holdleaderscount > 0 && 1860 (p->p_leader->p_flag & P_ADVLOCK) != 0) { 1861 /* 1862 * close() or do_dup() has cleared a reference 1863 * in a shared file descriptor table. 1864 */ 1865 fdp->fd_holdleaderswakeup = 1; 1866 ssleep(&fdp->fd_holdleaderscount, 1867 &fdp->fd_spin, 0, "fdlhold", 0); 1868 goto retry; 1869 } 1870 if (fdtol->fdl_holdcount > 0) { 1871 /* 1872 * Ensure that fdtol->fdl_leader 1873 * remains valid in closef(). 1874 */ 1875 fdtol->fdl_wakeup = 1; 1876 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0); 1877 goto retry; 1878 } 1879 } 1880 fdtol->fdl_refcount--; 1881 if (fdtol->fdl_refcount == 0 && 1882 fdtol->fdl_holdcount == 0) { 1883 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 1884 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 1885 } else { 1886 fdtol = NULL; 1887 } 1888 p->p_fdtol = NULL; 1889 if (fdtol != NULL) { 1890 spin_unlock_wr(&fdp->fd_spin); 1891 kfree(fdtol, M_FILEDESC_TO_LEADER); 1892 spin_lock_wr(&fdp->fd_spin); 1893 } 1894 } 1895 if (--fdp->fd_refcnt > 0) { 1896 spin_unlock_wr(&fdp->fd_spin); 1897 spin_lock_wr(&p->p_spin); 1898 p->p_fd = repl; 1899 spin_unlock_wr(&p->p_spin); 1900 return; 1901 } 1902 1903 /* 1904 * Even though we are the last reference to the structure allproc 1905 * scans may still reference the structure. Maintain proper 1906 * locks until we can replace p->p_fd. 1907 * 1908 * Also note that kqueue's closef still needs to reference the 1909 * fdp via p->p_fd, so we have to close the descriptors before 1910 * we replace p->p_fd. 1911 */ 1912 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1913 if (fdp->fd_files[i].fp) { 1914 fp = funsetfd_locked(fdp, i); 1915 if (fp) { 1916 spin_unlock_wr(&fdp->fd_spin); 1917 closef(fp, p); 1918 spin_lock_wr(&fdp->fd_spin); 1919 } 1920 } 1921 } 1922 spin_unlock_wr(&fdp->fd_spin); 1923 1924 /* 1925 * Interlock against an allproc scan operations (typically frevoke). 1926 */ 1927 spin_lock_wr(&p->p_spin); 1928 p->p_fd = repl; 1929 spin_unlock_wr(&p->p_spin); 1930 1931 /* 1932 * Wait for any softrefs to go away. This race rarely occurs so 1933 * we can use a non-critical-path style poll/sleep loop. The 1934 * race only occurs against allproc scans. 1935 * 1936 * No new softrefs can occur with the fdp disconnected from the 1937 * process. 1938 */ 1939 if (fdp->fd_softrefs) { 1940 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid); 1941 while (fdp->fd_softrefs) 1942 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1); 1943 } 1944 1945 if (fdp->fd_files != fdp->fd_builtin_files) 1946 kfree(fdp->fd_files, M_FILEDESC); 1947 if (fdp->fd_cdir) { 1948 cache_drop(&fdp->fd_ncdir); 1949 vrele(fdp->fd_cdir); 1950 } 1951 if (fdp->fd_rdir) { 1952 cache_drop(&fdp->fd_nrdir); 1953 vrele(fdp->fd_rdir); 1954 } 1955 if (fdp->fd_jdir) { 1956 cache_drop(&fdp->fd_njdir); 1957 vrele(fdp->fd_jdir); 1958 } 1959 if (fdp->fd_knlist) 1960 kfree(fdp->fd_knlist, M_KQUEUE); 1961 if (fdp->fd_knhash) 1962 kfree(fdp->fd_knhash, M_KQUEUE); 1963 kfree(fdp, M_FILEDESC); 1964 } 1965 1966 /* 1967 * Retrieve and reference the file pointer associated with a descriptor. 1968 * 1969 * MPSAFE 1970 */ 1971 struct file * 1972 holdfp(struct filedesc *fdp, int fd, int flag) 1973 { 1974 struct file* fp; 1975 1976 spin_lock_rd(&fdp->fd_spin); 1977 if (((u_int)fd) >= fdp->fd_nfiles) { 1978 fp = NULL; 1979 goto done; 1980 } 1981 if ((fp = fdp->fd_files[fd].fp) == NULL) 1982 goto done; 1983 if ((fp->f_flag & flag) == 0 && flag != -1) { 1984 fp = NULL; 1985 goto done; 1986 } 1987 fhold(fp); 1988 done: 1989 spin_unlock_rd(&fdp->fd_spin); 1990 return (fp); 1991 } 1992 1993 /* 1994 * holdsock() - load the struct file pointer associated 1995 * with a socket into *fpp. If an error occurs, non-zero 1996 * will be returned and *fpp will be set to NULL. 1997 * 1998 * MPSAFE 1999 */ 2000 int 2001 holdsock(struct filedesc *fdp, int fd, struct file **fpp) 2002 { 2003 struct file *fp; 2004 int error; 2005 2006 spin_lock_rd(&fdp->fd_spin); 2007 if ((unsigned)fd >= fdp->fd_nfiles) { 2008 error = EBADF; 2009 fp = NULL; 2010 goto done; 2011 } 2012 if ((fp = fdp->fd_files[fd].fp) == NULL) { 2013 error = EBADF; 2014 goto done; 2015 } 2016 if (fp->f_type != DTYPE_SOCKET) { 2017 error = ENOTSOCK; 2018 goto done; 2019 } 2020 fhold(fp); 2021 error = 0; 2022 done: 2023 spin_unlock_rd(&fdp->fd_spin); 2024 *fpp = fp; 2025 return (error); 2026 } 2027 2028 /* 2029 * Convert a user file descriptor to a held file pointer. 2030 * 2031 * MPSAFE 2032 */ 2033 int 2034 holdvnode(struct filedesc *fdp, int fd, struct file **fpp) 2035 { 2036 struct file *fp; 2037 int error; 2038 2039 spin_lock_rd(&fdp->fd_spin); 2040 if ((unsigned)fd >= fdp->fd_nfiles) { 2041 error = EBADF; 2042 fp = NULL; 2043 goto done; 2044 } 2045 if ((fp = fdp->fd_files[fd].fp) == NULL) { 2046 error = EBADF; 2047 goto done; 2048 } 2049 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2050 fp = NULL; 2051 error = EINVAL; 2052 goto done; 2053 } 2054 fhold(fp); 2055 error = 0; 2056 done: 2057 spin_unlock_rd(&fdp->fd_spin); 2058 *fpp = fp; 2059 return (error); 2060 } 2061 2062 /* 2063 * For setugid programs, we don't want to people to use that setugidness 2064 * to generate error messages which write to a file which otherwise would 2065 * otherwise be off-limits to the process. 2066 * 2067 * This is a gross hack to plug the hole. A better solution would involve 2068 * a special vop or other form of generalized access control mechanism. We 2069 * go ahead and just reject all procfs file systems accesses as dangerous. 2070 * 2071 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 2072 * sufficient. We also don't for check setugidness since we know we are. 2073 */ 2074 static int 2075 is_unsafe(struct file *fp) 2076 { 2077 if (fp->f_type == DTYPE_VNODE && 2078 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 2079 return (1); 2080 return (0); 2081 } 2082 2083 /* 2084 * Make this setguid thing safe, if at all possible. 2085 * 2086 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2087 */ 2088 void 2089 setugidsafety(struct proc *p) 2090 { 2091 struct filedesc *fdp = p->p_fd; 2092 int i; 2093 2094 /* Certain daemons might not have file descriptors. */ 2095 if (fdp == NULL) 2096 return; 2097 2098 /* 2099 * note: fdp->fd_files may be reallocated out from under us while 2100 * we are blocked in a close. Be careful! 2101 */ 2102 for (i = 0; i <= fdp->fd_lastfile; i++) { 2103 if (i > 2) 2104 break; 2105 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) { 2106 struct file *fp; 2107 2108 if (i < fdp->fd_knlistsize) 2109 knote_fdclose(p, i); 2110 /* 2111 * NULL-out descriptor prior to close to avoid 2112 * a race while close blocks. 2113 */ 2114 if ((fp = funsetfd_locked(fdp, i)) != NULL) 2115 closef(fp, p); 2116 } 2117 } 2118 } 2119 2120 /* 2121 * Close any files on exec? 2122 * 2123 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2124 */ 2125 void 2126 fdcloseexec(struct proc *p) 2127 { 2128 struct filedesc *fdp = p->p_fd; 2129 int i; 2130 2131 /* Certain daemons might not have file descriptors. */ 2132 if (fdp == NULL) 2133 return; 2134 2135 /* 2136 * We cannot cache fd_files since operations may block and rip 2137 * them out from under us. 2138 */ 2139 for (i = 0; i <= fdp->fd_lastfile; i++) { 2140 if (fdp->fd_files[i].fp != NULL && 2141 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) { 2142 struct file *fp; 2143 2144 if (i < fdp->fd_knlistsize) 2145 knote_fdclose(p, i); 2146 /* 2147 * NULL-out descriptor prior to close to avoid 2148 * a race while close blocks. 2149 */ 2150 if ((fp = funsetfd_locked(fdp, i)) != NULL) 2151 closef(fp, p); 2152 } 2153 } 2154 } 2155 2156 /* 2157 * It is unsafe for set[ug]id processes to be started with file 2158 * descriptors 0..2 closed, as these descriptors are given implicit 2159 * significance in the Standard C library. fdcheckstd() will create a 2160 * descriptor referencing /dev/null for each of stdin, stdout, and 2161 * stderr that is not already open. 2162 * 2163 * NOT MPSAFE - calls falloc, vn_open, etc 2164 */ 2165 int 2166 fdcheckstd(struct proc *p) 2167 { 2168 struct nlookupdata nd; 2169 struct filedesc *fdp; 2170 struct file *fp; 2171 int retval; 2172 int i, error, flags, devnull; 2173 2174 fdp = p->p_fd; 2175 if (fdp == NULL) 2176 return (0); 2177 devnull = -1; 2178 error = 0; 2179 for (i = 0; i < 3; i++) { 2180 if (fdp->fd_files[i].fp != NULL) 2181 continue; 2182 if (devnull < 0) { 2183 if ((error = falloc(p, &fp, &devnull)) != 0) 2184 break; 2185 2186 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE, 2187 NLC_FOLLOW|NLC_LOCKVP); 2188 flags = FREAD | FWRITE; 2189 if (error == 0) 2190 error = vn_open(&nd, fp, flags, 0); 2191 if (error == 0) 2192 fsetfd(p, fp, devnull); 2193 else 2194 fsetfd(p, NULL, devnull); 2195 fdrop(fp); 2196 nlookup_done(&nd); 2197 if (error) 2198 break; 2199 KKASSERT(i == devnull); 2200 } else { 2201 error = kern_dup(DUP_FIXED, devnull, i, &retval); 2202 if (error != 0) 2203 break; 2204 } 2205 } 2206 return (error); 2207 } 2208 2209 /* 2210 * Internal form of close. 2211 * Decrement reference count on file structure. 2212 * Note: td and/or p may be NULL when closing a file 2213 * that was being passed in a message. 2214 * 2215 * MPALMOSTSAFE - acquires mplock for VOP operations 2216 */ 2217 int 2218 closef(struct file *fp, struct proc *p) 2219 { 2220 struct vnode *vp; 2221 struct flock lf; 2222 struct filedesc_to_leader *fdtol; 2223 2224 if (fp == NULL) 2225 return (0); 2226 2227 /* 2228 * POSIX record locking dictates that any close releases ALL 2229 * locks owned by this process. This is handled by setting 2230 * a flag in the unlock to free ONLY locks obeying POSIX 2231 * semantics, and not to free BSD-style file locks. 2232 * If the descriptor was in a message, POSIX-style locks 2233 * aren't passed with the descriptor. 2234 */ 2235 if (p != NULL && fp->f_type == DTYPE_VNODE && 2236 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2237 ) { 2238 get_mplock(); 2239 if ((p->p_leader->p_flag & P_ADVLOCK) != 0) { 2240 lf.l_whence = SEEK_SET; 2241 lf.l_start = 0; 2242 lf.l_len = 0; 2243 lf.l_type = F_UNLCK; 2244 vp = (struct vnode *)fp->f_data; 2245 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 2246 &lf, F_POSIX); 2247 } 2248 fdtol = p->p_fdtol; 2249 if (fdtol != NULL) { 2250 /* 2251 * Handle special case where file descriptor table 2252 * is shared between multiple process leaders. 2253 */ 2254 for (fdtol = fdtol->fdl_next; 2255 fdtol != p->p_fdtol; 2256 fdtol = fdtol->fdl_next) { 2257 if ((fdtol->fdl_leader->p_flag & 2258 P_ADVLOCK) == 0) 2259 continue; 2260 fdtol->fdl_holdcount++; 2261 lf.l_whence = SEEK_SET; 2262 lf.l_start = 0; 2263 lf.l_len = 0; 2264 lf.l_type = F_UNLCK; 2265 vp = (struct vnode *)fp->f_data; 2266 (void) VOP_ADVLOCK(vp, 2267 (caddr_t)fdtol->fdl_leader, 2268 F_UNLCK, &lf, F_POSIX); 2269 fdtol->fdl_holdcount--; 2270 if (fdtol->fdl_holdcount == 0 && 2271 fdtol->fdl_wakeup != 0) { 2272 fdtol->fdl_wakeup = 0; 2273 wakeup(fdtol); 2274 } 2275 } 2276 } 2277 rel_mplock(); 2278 } 2279 return (fdrop(fp)); 2280 } 2281 2282 /* 2283 * MPSAFE 2284 * 2285 * fhold() can only be called if f_count is already at least 1 (i.e. the 2286 * caller of fhold() already has a reference to the file pointer in some 2287 * manner or other). 2288 * 2289 * f_count is not spin-locked. Instead, atomic ops are used for 2290 * incrementing, decrementing, and handling the 1->0 transition. 2291 */ 2292 void 2293 fhold(struct file *fp) 2294 { 2295 atomic_add_int(&fp->f_count, 1); 2296 } 2297 2298 /* 2299 * fdrop() - drop a reference to a descriptor 2300 * 2301 * MPALMOSTSAFE - acquires mplock for final close sequence 2302 */ 2303 int 2304 fdrop(struct file *fp) 2305 { 2306 struct flock lf; 2307 struct vnode *vp; 2308 int error; 2309 2310 /* 2311 * A combined fetch and subtract is needed to properly detect 2312 * 1->0 transitions, otherwise two cpus dropping from a ref 2313 * count of 2 might both try to run the 1->0 code. 2314 */ 2315 if (atomic_fetchadd_int(&fp->f_count, -1) > 1) 2316 return (0); 2317 2318 get_mplock(); 2319 2320 /* 2321 * The last reference has gone away, we own the fp structure free 2322 * and clear. 2323 */ 2324 if (fp->f_count < 0) 2325 panic("fdrop: count < 0"); 2326 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE && 2327 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2328 ) { 2329 lf.l_whence = SEEK_SET; 2330 lf.l_start = 0; 2331 lf.l_len = 0; 2332 lf.l_type = F_UNLCK; 2333 vp = (struct vnode *)fp->f_data; 2334 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 2335 } 2336 if (fp->f_ops != &badfileops) 2337 error = fo_close(fp); 2338 else 2339 error = 0; 2340 ffree(fp); 2341 rel_mplock(); 2342 return (error); 2343 } 2344 2345 /* 2346 * Apply an advisory lock on a file descriptor. 2347 * 2348 * Just attempt to get a record lock of the requested type on 2349 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 2350 */ 2351 int 2352 sys_flock(struct flock_args *uap) 2353 { 2354 struct proc *p = curproc; 2355 struct file *fp; 2356 struct vnode *vp; 2357 struct flock lf; 2358 int error; 2359 2360 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL) 2361 return (EBADF); 2362 if (fp->f_type != DTYPE_VNODE) { 2363 error = EOPNOTSUPP; 2364 goto done; 2365 } 2366 vp = (struct vnode *)fp->f_data; 2367 lf.l_whence = SEEK_SET; 2368 lf.l_start = 0; 2369 lf.l_len = 0; 2370 if (uap->how & LOCK_UN) { 2371 lf.l_type = F_UNLCK; 2372 fp->f_flag &= ~FHASLOCK; 2373 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 2374 goto done; 2375 } 2376 if (uap->how & LOCK_EX) 2377 lf.l_type = F_WRLCK; 2378 else if (uap->how & LOCK_SH) 2379 lf.l_type = F_RDLCK; 2380 else { 2381 error = EBADF; 2382 goto done; 2383 } 2384 fp->f_flag |= FHASLOCK; 2385 if (uap->how & LOCK_NB) 2386 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0); 2387 else 2388 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT); 2389 done: 2390 fdrop(fp); 2391 return (error); 2392 } 2393 2394 /* 2395 * File Descriptor pseudo-device driver (/dev/fd/). 2396 * 2397 * Opening minor device N dup()s the file (if any) connected to file 2398 * descriptor N belonging to the calling process. Note that this driver 2399 * consists of only the ``open()'' routine, because all subsequent 2400 * references to this file will be direct to the other driver. 2401 */ 2402 /* ARGSUSED */ 2403 static int 2404 fdopen(struct dev_open_args *ap) 2405 { 2406 thread_t td = curthread; 2407 2408 KKASSERT(td->td_lwp != NULL); 2409 2410 /* 2411 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the 2412 * the file descriptor being sought for duplication. The error 2413 * return ensures that the vnode for this device will be released 2414 * by vn_open. Open will detect this special error and take the 2415 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 2416 * will simply report the error. 2417 */ 2418 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev); 2419 return (ENODEV); 2420 } 2421 2422 /* 2423 * The caller has reserved the file descriptor dfd for us. On success we 2424 * must fsetfd() it. On failure the caller will clean it up. 2425 * 2426 * NOT MPSAFE - isn't getting spinlocks, possibly other things 2427 */ 2428 int 2429 dupfdopen(struct proc *p, int dfd, int sfd, int mode, int error) 2430 { 2431 struct filedesc *fdp = p->p_fd; 2432 struct file *wfp; 2433 struct file *xfp; 2434 int werror; 2435 2436 if ((wfp = holdfp(fdp, sfd, -1)) == NULL) 2437 return (EBADF); 2438 2439 /* 2440 * Close a revoke/dup race. Duping a descriptor marked as revoked 2441 * will dup a dummy descriptor instead of the real one. 2442 */ 2443 if (wfp->f_flag & FREVOKED) { 2444 kprintf("Warning: attempt to dup() a revoked descriptor\n"); 2445 fdrop(wfp); 2446 wfp = NULL; 2447 werror = falloc(NULL, &wfp, NULL); 2448 if (werror) 2449 return (werror); 2450 } 2451 2452 /* 2453 * There are two cases of interest here. 2454 * 2455 * For ENODEV simply dup sfd to file descriptor dfd and return. 2456 * 2457 * For ENXIO steal away the file structure from sfd and store it 2458 * dfd. sfd is effectively closed by this operation. 2459 * 2460 * Any other error code is just returned. 2461 */ 2462 switch (error) { 2463 case ENODEV: 2464 /* 2465 * Check that the mode the file is being opened for is a 2466 * subset of the mode of the existing descriptor. 2467 */ 2468 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 2469 error = EACCES; 2470 break; 2471 } 2472 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 2473 fsetfd(p, wfp, dfd); 2474 error = 0; 2475 break; 2476 case ENXIO: 2477 /* 2478 * Steal away the file pointer from dfd, and stuff it into indx. 2479 */ 2480 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 2481 fsetfd(p, wfp, dfd); 2482 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) 2483 fdrop(xfp); 2484 error = 0; 2485 break; 2486 default: 2487 break; 2488 } 2489 fdrop(wfp); 2490 return (error); 2491 } 2492 2493 /* 2494 * NOT MPSAFE - I think these refer to a common file descriptor table 2495 * and we need to spinlock that to link fdtol in. 2496 */ 2497 struct filedesc_to_leader * 2498 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 2499 struct proc *leader) 2500 { 2501 struct filedesc_to_leader *fdtol; 2502 2503 fdtol = kmalloc(sizeof(struct filedesc_to_leader), 2504 M_FILEDESC_TO_LEADER, M_WAITOK); 2505 fdtol->fdl_refcount = 1; 2506 fdtol->fdl_holdcount = 0; 2507 fdtol->fdl_wakeup = 0; 2508 fdtol->fdl_leader = leader; 2509 if (old != NULL) { 2510 fdtol->fdl_next = old->fdl_next; 2511 fdtol->fdl_prev = old; 2512 old->fdl_next = fdtol; 2513 fdtol->fdl_next->fdl_prev = fdtol; 2514 } else { 2515 fdtol->fdl_next = fdtol; 2516 fdtol->fdl_prev = fdtol; 2517 } 2518 return fdtol; 2519 } 2520 2521 /* 2522 * Scan all file pointers in the system. The callback is made with 2523 * the master list spinlock held exclusively. 2524 * 2525 * MPSAFE 2526 */ 2527 void 2528 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data) 2529 { 2530 struct file *fp; 2531 int res; 2532 2533 spin_lock_wr(&filehead_spin); 2534 LIST_FOREACH(fp, &filehead, f_list) { 2535 res = callback(fp, data); 2536 if (res < 0) 2537 break; 2538 } 2539 spin_unlock_wr(&filehead_spin); 2540 } 2541 2542 /* 2543 * Get file structures. 2544 * 2545 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe) 2546 */ 2547 2548 struct sysctl_kern_file_info { 2549 int count; 2550 int error; 2551 struct sysctl_req *req; 2552 }; 2553 2554 static int sysctl_kern_file_callback(struct proc *p, void *data); 2555 2556 static int 2557 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 2558 { 2559 struct sysctl_kern_file_info info; 2560 2561 /* 2562 * Note: because the number of file descriptors is calculated 2563 * in different ways for sizing vs returning the data, 2564 * there is information leakage from the first loop. However, 2565 * it is of a similar order of magnitude to the leakage from 2566 * global system statistics such as kern.openfiles. 2567 * 2568 * When just doing a count, note that we cannot just count 2569 * the elements and add f_count via the filehead list because 2570 * threaded processes share their descriptor table and f_count might 2571 * still be '1' in that case. 2572 * 2573 * Since the SYSCTL op can block, we must hold the process to 2574 * prevent it being ripped out from under us either in the 2575 * file descriptor loop or in the greater LIST_FOREACH. The 2576 * process may be in varying states of disrepair. If the process 2577 * is in SZOMB we may have caught it just as it is being removed 2578 * from the allproc list, we must skip it in that case to maintain 2579 * an unbroken chain through the allproc list. 2580 */ 2581 info.count = 0; 2582 info.error = 0; 2583 info.req = req; 2584 allproc_scan(sysctl_kern_file_callback, &info); 2585 2586 /* 2587 * When just calculating the size, overestimate a bit to try to 2588 * prevent system activity from causing the buffer-fill call 2589 * to fail later on. 2590 */ 2591 if (req->oldptr == NULL) { 2592 info.count = (info.count + 16) + (info.count / 10); 2593 info.error = SYSCTL_OUT(req, NULL, 2594 info.count * sizeof(struct kinfo_file)); 2595 } 2596 return (info.error); 2597 } 2598 2599 static int 2600 sysctl_kern_file_callback(struct proc *p, void *data) 2601 { 2602 struct sysctl_kern_file_info *info = data; 2603 struct kinfo_file kf; 2604 struct filedesc *fdp; 2605 struct file *fp; 2606 uid_t uid; 2607 int n; 2608 2609 if (p->p_stat == SIDL || p->p_stat == SZOMB) 2610 return(0); 2611 if (!PRISON_CHECK(info->req->td->td_proc->p_ucred, p->p_ucred) != 0) 2612 return(0); 2613 2614 /* 2615 * Softref the fdp to prevent it from being destroyed 2616 */ 2617 spin_lock_wr(&p->p_spin); 2618 if ((fdp = p->p_fd) == NULL) { 2619 spin_unlock_wr(&p->p_spin); 2620 return(0); 2621 } 2622 atomic_add_int(&fdp->fd_softrefs, 1); 2623 spin_unlock_wr(&p->p_spin); 2624 2625 /* 2626 * The fdp's own spinlock prevents the contents from being 2627 * modified. 2628 */ 2629 spin_lock_rd(&fdp->fd_spin); 2630 for (n = 0; n < fdp->fd_nfiles; ++n) { 2631 if ((fp = fdp->fd_files[n].fp) == NULL) 2632 continue; 2633 if (info->req->oldptr == NULL) { 2634 ++info->count; 2635 } else { 2636 uid = p->p_ucred ? p->p_ucred->cr_uid : -1; 2637 kcore_make_file(&kf, fp, p->p_pid, uid, n); 2638 spin_unlock_rd(&fdp->fd_spin); 2639 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf)); 2640 spin_lock_rd(&fdp->fd_spin); 2641 if (info->error) 2642 break; 2643 } 2644 } 2645 spin_unlock_rd(&fdp->fd_spin); 2646 atomic_subtract_int(&fdp->fd_softrefs, 1); 2647 if (info->error) 2648 return(-1); 2649 return(0); 2650 } 2651 2652 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 2653 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 2654 2655 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 2656 &maxfilesperproc, 0, "Maximum files allowed open per process"); 2657 2658 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 2659 &maxfiles, 0, "Maximum number of files"); 2660 2661 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 2662 &maxfilesrootres, 0, "Descriptors reserved for root use"); 2663 2664 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 2665 &nfiles, 0, "System-wide number of open files"); 2666 2667 static void 2668 fildesc_drvinit(void *unused) 2669 { 2670 int fd; 2671 2672 for (fd = 0; fd < NUMFDESC; fd++) { 2673 make_dev(&fildesc_ops, fd, 2674 UID_BIN, GID_BIN, 0666, "fd/%d", fd); 2675 } 2676 2677 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin"); 2678 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout"); 2679 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr"); 2680 } 2681 2682 /* 2683 * MPSAFE 2684 */ 2685 struct fileops badfileops = { 2686 .fo_read = badfo_readwrite, 2687 .fo_write = badfo_readwrite, 2688 .fo_ioctl = badfo_ioctl, 2689 .fo_poll = badfo_poll, 2690 .fo_kqfilter = badfo_kqfilter, 2691 .fo_stat = badfo_stat, 2692 .fo_close = badfo_close, 2693 .fo_shutdown = badfo_shutdown 2694 }; 2695 2696 /* 2697 * MPSAFE 2698 */ 2699 int 2700 badfo_readwrite( 2701 struct file *fp, 2702 struct uio *uio, 2703 struct ucred *cred, 2704 int flags 2705 ) { 2706 return (EBADF); 2707 } 2708 2709 /* 2710 * MPSAFE 2711 */ 2712 int 2713 badfo_ioctl(struct file *fp, u_long com, caddr_t data, 2714 struct ucred *cred, struct sysmsg *msgv) 2715 { 2716 return (EBADF); 2717 } 2718 2719 /* 2720 * MPSAFE 2721 */ 2722 int 2723 badfo_poll(struct file *fp, int events, struct ucred *cred) 2724 { 2725 return (0); 2726 } 2727 2728 /* 2729 * MPSAFE 2730 */ 2731 int 2732 badfo_kqfilter(struct file *fp, struct knote *kn) 2733 { 2734 return (0); 2735 } 2736 2737 int 2738 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 2739 { 2740 return (EBADF); 2741 } 2742 2743 /* 2744 * MPSAFE 2745 */ 2746 int 2747 badfo_close(struct file *fp) 2748 { 2749 return (EBADF); 2750 } 2751 2752 /* 2753 * MPSAFE 2754 */ 2755 int 2756 badfo_shutdown(struct file *fp, int how) 2757 { 2758 return (EBADF); 2759 } 2760 2761 /* 2762 * MPSAFE 2763 */ 2764 int 2765 nofo_shutdown(struct file *fp, int how) 2766 { 2767 return (EOPNOTSUPP); 2768 } 2769 2770 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR, 2771 fildesc_drvinit,NULL) 2772