1 /* 2 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1982, 1986, 1989, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 72 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $ 73 * $DragonFly: src/sys/kern/kern_descrip.c,v 1.79 2008/08/31 13:18:28 aggelos Exp $ 74 */ 75 76 #include "opt_compat.h" 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/conf.h> 82 #include <sys/device.h> 83 #include <sys/filedesc.h> 84 #include <sys/kernel.h> 85 #include <sys/sysctl.h> 86 #include <sys/vnode.h> 87 #include <sys/proc.h> 88 #include <sys/nlookup.h> 89 #include <sys/file.h> 90 #include <sys/stat.h> 91 #include <sys/filio.h> 92 #include <sys/fcntl.h> 93 #include <sys/unistd.h> 94 #include <sys/resourcevar.h> 95 #include <sys/event.h> 96 #include <sys/kern_syscall.h> 97 #include <sys/kcore.h> 98 #include <sys/kinfo.h> 99 #include <sys/un.h> 100 101 #include <vm/vm.h> 102 #include <vm/vm_extern.h> 103 104 #include <sys/thread2.h> 105 #include <sys/file2.h> 106 #include <sys/spinlock2.h> 107 108 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd); 109 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr); 110 static struct file *funsetfd_locked (struct filedesc *fdp, int fd); 111 static int checkfpclosed(struct filedesc *fdp, int fd, struct file *fp); 112 static void ffree(struct file *fp); 113 114 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 115 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 116 "file desc to leader structures"); 117 MALLOC_DEFINE(M_FILE, "file", "Open file structure"); 118 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 119 120 static d_open_t fdopen; 121 #define NUMFDESC 64 122 123 #define CDEV_MAJOR 22 124 static struct dev_ops fildesc_ops = { 125 { "FD", CDEV_MAJOR, 0 }, 126 .d_open = fdopen, 127 }; 128 129 static int badfo_readwrite (struct file *fp, struct uio *uio, 130 struct ucred *cred, int flags); 131 static int badfo_ioctl (struct file *fp, u_long com, caddr_t data, 132 struct ucred *cred); 133 static int badfo_poll (struct file *fp, int events, struct ucred *cred); 134 static int badfo_kqfilter (struct file *fp, struct knote *kn); 135 static int badfo_stat (struct file *fp, struct stat *sb, struct ucred *cred); 136 static int badfo_close (struct file *fp); 137 static int badfo_shutdown (struct file *fp, int how); 138 139 /* 140 * Descriptor management. 141 */ 142 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead); 143 static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin); 144 static int nfiles; /* actual number of open files */ 145 extern int cmask; 146 147 /* 148 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared. 149 * 150 * MPSAFE - must be called with fdp->fd_spin exclusively held 151 */ 152 static __inline 153 void 154 fdfixup_locked(struct filedesc *fdp, int fd) 155 { 156 if (fd < fdp->fd_freefile) { 157 fdp->fd_freefile = fd; 158 } 159 while (fdp->fd_lastfile >= 0 && 160 fdp->fd_files[fdp->fd_lastfile].fp == NULL && 161 fdp->fd_files[fdp->fd_lastfile].reserved == 0 162 ) { 163 --fdp->fd_lastfile; 164 } 165 } 166 167 /* 168 * System calls on descriptors. 169 * 170 * MPSAFE 171 */ 172 int 173 sys_getdtablesize(struct getdtablesize_args *uap) 174 { 175 struct proc *p = curproc; 176 struct plimit *limit = p->p_limit; 177 178 spin_lock_rd(&limit->p_spin); 179 uap->sysmsg_result = 180 min((int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 181 spin_unlock_rd(&limit->p_spin); 182 return (0); 183 } 184 185 /* 186 * Duplicate a file descriptor to a particular value. 187 * 188 * note: keep in mind that a potential race condition exists when closing 189 * descriptors from a shared descriptor table (via rfork). 190 * 191 * MPSAFE 192 */ 193 int 194 sys_dup2(struct dup2_args *uap) 195 { 196 int error; 197 int fd = 0; 198 199 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd); 200 uap->sysmsg_fds[0] = fd; 201 202 return (error); 203 } 204 205 /* 206 * Duplicate a file descriptor. 207 * 208 * MPSAFE 209 */ 210 int 211 sys_dup(struct dup_args *uap) 212 { 213 int error; 214 int fd = 0; 215 216 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd); 217 uap->sysmsg_fds[0] = fd; 218 219 return (error); 220 } 221 222 /* 223 * MPALMOSTSAFE - acquires mplock for fp operations 224 */ 225 int 226 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) 227 { 228 struct thread *td = curthread; 229 struct proc *p = td->td_proc; 230 struct file *fp; 231 struct vnode *vp; 232 u_int newmin; 233 u_int oflags; 234 u_int nflags; 235 int tmp, error, flg = F_POSIX; 236 237 KKASSERT(p); 238 239 /* 240 * Operations on file descriptors that do not require a file pointer. 241 */ 242 switch (cmd) { 243 case F_GETFD: 244 error = fgetfdflags(p->p_fd, fd, &tmp); 245 if (error == 0) 246 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0; 247 return (error); 248 249 case F_SETFD: 250 if (dat->fc_cloexec & FD_CLOEXEC) 251 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 252 else 253 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 254 return (error); 255 case F_DUPFD: 256 newmin = dat->fc_fd; 257 error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd); 258 return (error); 259 default: 260 break; 261 } 262 263 /* 264 * Operations on file pointers 265 */ 266 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL) 267 return (EBADF); 268 269 get_mplock(); 270 switch (cmd) { 271 case F_GETFL: 272 dat->fc_flags = OFLAGS(fp->f_flag); 273 error = 0; 274 break; 275 276 case F_SETFL: 277 oflags = fp->f_flag; 278 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS; 279 nflags |= oflags & ~FCNTLFLAGS; 280 281 error = 0; 282 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY)) 283 error = EINVAL; 284 if (error == 0 && ((nflags ^ oflags) & FASYNC)) { 285 tmp = nflags & FASYNC; 286 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred); 287 } 288 if (error == 0) 289 fp->f_flag = nflags; 290 break; 291 292 case F_GETOWN: 293 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, cred); 294 break; 295 296 case F_SETOWN: 297 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, cred); 298 break; 299 300 case F_SETLKW: 301 flg |= F_WAIT; 302 /* Fall into F_SETLK */ 303 304 case F_SETLK: 305 if (fp->f_type != DTYPE_VNODE) { 306 error = EBADF; 307 break; 308 } 309 vp = (struct vnode *)fp->f_data; 310 311 /* 312 * copyin/lockop may block 313 */ 314 if (dat->fc_flock.l_whence == SEEK_CUR) 315 dat->fc_flock.l_start += fp->f_offset; 316 317 switch (dat->fc_flock.l_type) { 318 case F_RDLCK: 319 if ((fp->f_flag & FREAD) == 0) { 320 error = EBADF; 321 break; 322 } 323 p->p_leader->p_flag |= P_ADVLOCK; 324 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 325 &dat->fc_flock, flg); 326 break; 327 case F_WRLCK: 328 if ((fp->f_flag & FWRITE) == 0) { 329 error = EBADF; 330 break; 331 } 332 p->p_leader->p_flag |= P_ADVLOCK; 333 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 334 &dat->fc_flock, flg); 335 break; 336 case F_UNLCK: 337 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 338 &dat->fc_flock, F_POSIX); 339 break; 340 default: 341 error = EINVAL; 342 break; 343 } 344 345 /* 346 * It is possible to race a close() on the descriptor while 347 * we were blocked getting the lock. If this occurs the 348 * close might not have caught the lock. 349 */ 350 if (checkfpclosed(p->p_fd, fd, fp)) { 351 dat->fc_flock.l_whence = SEEK_SET; 352 dat->fc_flock.l_start = 0; 353 dat->fc_flock.l_len = 0; 354 dat->fc_flock.l_type = F_UNLCK; 355 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 356 F_UNLCK, &dat->fc_flock, F_POSIX); 357 } 358 break; 359 360 case F_GETLK: 361 if (fp->f_type != DTYPE_VNODE) { 362 error = EBADF; 363 break; 364 } 365 vp = (struct vnode *)fp->f_data; 366 /* 367 * copyin/lockop may block 368 */ 369 if (dat->fc_flock.l_type != F_RDLCK && 370 dat->fc_flock.l_type != F_WRLCK && 371 dat->fc_flock.l_type != F_UNLCK) { 372 error = EINVAL; 373 break; 374 } 375 if (dat->fc_flock.l_whence == SEEK_CUR) 376 dat->fc_flock.l_start += fp->f_offset; 377 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 378 &dat->fc_flock, F_POSIX); 379 break; 380 default: 381 error = EINVAL; 382 break; 383 } 384 rel_mplock(); 385 386 fdrop(fp); 387 return (error); 388 } 389 390 /* 391 * The file control system call. 392 * 393 * MPSAFE 394 */ 395 int 396 sys_fcntl(struct fcntl_args *uap) 397 { 398 union fcntl_dat dat; 399 int error; 400 401 switch (uap->cmd) { 402 case F_DUPFD: 403 dat.fc_fd = uap->arg; 404 break; 405 case F_SETFD: 406 dat.fc_cloexec = uap->arg; 407 break; 408 case F_SETFL: 409 dat.fc_flags = uap->arg; 410 break; 411 case F_SETOWN: 412 dat.fc_owner = uap->arg; 413 break; 414 case F_SETLKW: 415 case F_SETLK: 416 case F_GETLK: 417 error = copyin((caddr_t)uap->arg, &dat.fc_flock, 418 sizeof(struct flock)); 419 if (error) 420 return (error); 421 break; 422 } 423 424 error = kern_fcntl(uap->fd, uap->cmd, &dat, curproc->p_ucred); 425 426 if (error == 0) { 427 switch (uap->cmd) { 428 case F_DUPFD: 429 uap->sysmsg_result = dat.fc_fd; 430 break; 431 case F_GETFD: 432 uap->sysmsg_result = dat.fc_cloexec; 433 break; 434 case F_GETFL: 435 uap->sysmsg_result = dat.fc_flags; 436 break; 437 case F_GETOWN: 438 uap->sysmsg_result = dat.fc_owner; 439 case F_GETLK: 440 error = copyout(&dat.fc_flock, (caddr_t)uap->arg, 441 sizeof(struct flock)); 442 break; 443 } 444 } 445 446 return (error); 447 } 448 449 /* 450 * Common code for dup, dup2, and fcntl(F_DUPFD). 451 * 452 * The type flag can be either DUP_FIXED or DUP_VARIABLE. DUP_FIXED tells 453 * kern_dup() to destructively dup over an existing file descriptor if new 454 * is already open. DUP_VARIABLE tells kern_dup() to find the lowest 455 * unused file descriptor that is greater than or equal to new. 456 * 457 * MPSAFE 458 */ 459 int 460 kern_dup(enum dup_type type, int old, int new, int *res) 461 { 462 struct thread *td = curthread; 463 struct proc *p = td->td_proc; 464 struct filedesc *fdp = p->p_fd; 465 struct file *fp; 466 struct file *delfp; 467 int oldflags; 468 int holdleaders; 469 int error, newfd; 470 471 /* 472 * Verify that we have a valid descriptor to dup from and 473 * possibly to dup to. 474 */ 475 retry: 476 spin_lock_wr(&fdp->fd_spin); 477 if (new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur || 478 new >= maxfilesperproc) { 479 spin_unlock_wr(&fdp->fd_spin); 480 return (EINVAL); 481 } 482 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) { 483 spin_unlock_wr(&fdp->fd_spin); 484 return (EBADF); 485 } 486 if (type == DUP_FIXED && old == new) { 487 *res = new; 488 spin_unlock_wr(&fdp->fd_spin); 489 return (0); 490 } 491 fp = fdp->fd_files[old].fp; 492 oldflags = fdp->fd_files[old].fileflags; 493 fhold(fp); /* MPSAFE - can be called with a spinlock held */ 494 495 /* 496 * Allocate a new descriptor if DUP_VARIABLE, or expand the table 497 * if the requested descriptor is beyond the current table size. 498 * 499 * This can block. Retry if the source descriptor no longer matches 500 * or if our expectation in the expansion case races. 501 * 502 * If we are not expanding or allocating a new decriptor, then reset 503 * the target descriptor to a reserved state so we have a uniform 504 * setup for the next code block. 505 */ 506 if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) { 507 spin_unlock_wr(&fdp->fd_spin); 508 error = fdalloc(p, new, &newfd); 509 spin_lock_wr(&fdp->fd_spin); 510 if (error) { 511 spin_unlock_wr(&fdp->fd_spin); 512 fdrop(fp); 513 return (error); 514 } 515 /* 516 * Check for ripout 517 */ 518 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) { 519 fsetfd_locked(fdp, NULL, newfd); 520 spin_unlock_wr(&fdp->fd_spin); 521 fdrop(fp); 522 goto retry; 523 } 524 /* 525 * Check for expansion race 526 */ 527 if (type != DUP_VARIABLE && new != newfd) { 528 fsetfd_locked(fdp, NULL, newfd); 529 spin_unlock_wr(&fdp->fd_spin); 530 fdrop(fp); 531 goto retry; 532 } 533 /* 534 * Check for ripout, newfd reused old (this case probably 535 * can't occur). 536 */ 537 if (old == newfd) { 538 fsetfd_locked(fdp, NULL, newfd); 539 spin_unlock_wr(&fdp->fd_spin); 540 fdrop(fp); 541 goto retry; 542 } 543 new = newfd; 544 delfp = NULL; 545 } else { 546 if (fdp->fd_files[new].reserved) { 547 spin_unlock_wr(&fdp->fd_spin); 548 fdrop(fp); 549 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new); 550 tsleep(fdp, 0, "fdres", hz); 551 goto retry; 552 } 553 554 /* 555 * If the target descriptor was never allocated we have 556 * to allocate it. If it was we have to clean out the 557 * old descriptor. delfp inherits the ref from the 558 * descriptor table. 559 */ 560 delfp = fdp->fd_files[new].fp; 561 fdp->fd_files[new].fp = NULL; 562 fdp->fd_files[new].reserved = 1; 563 if (delfp == NULL) { 564 fdreserve_locked(fdp, new, 1); 565 if (new > fdp->fd_lastfile) 566 fdp->fd_lastfile = new; 567 } 568 569 } 570 571 /* 572 * NOTE: still holding an exclusive spinlock 573 */ 574 575 /* 576 * If a descriptor is being overwritten we may hve to tell 577 * fdfree() to sleep to ensure that all relevant process 578 * leaders can be traversed in closef(). 579 */ 580 if (delfp != NULL && p->p_fdtol != NULL) { 581 fdp->fd_holdleaderscount++; 582 holdleaders = 1; 583 } else { 584 holdleaders = 0; 585 } 586 KASSERT(delfp == NULL || type == DUP_FIXED, 587 ("dup() picked an open file")); 588 589 /* 590 * Duplicate the source descriptor, update lastfile. If the new 591 * descriptor was not allocated and we aren't replacing an existing 592 * descriptor we have to mark the descriptor as being in use. 593 * 594 * The fd_files[] array inherits fp's hold reference. 595 */ 596 fsetfd_locked(fdp, fp, new); 597 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE; 598 spin_unlock_wr(&fdp->fd_spin); 599 fdrop(fp); 600 *res = new; 601 602 /* 603 * If we dup'd over a valid file, we now own the reference to it 604 * and must dispose of it using closef() semantics (as if a 605 * close() were performed on it). 606 */ 607 if (delfp) { 608 closef(delfp, p); 609 if (holdleaders) { 610 spin_lock_wr(&fdp->fd_spin); 611 fdp->fd_holdleaderscount--; 612 if (fdp->fd_holdleaderscount == 0 && 613 fdp->fd_holdleaderswakeup != 0) { 614 fdp->fd_holdleaderswakeup = 0; 615 spin_unlock_wr(&fdp->fd_spin); 616 wakeup(&fdp->fd_holdleaderscount); 617 } else { 618 spin_unlock_wr(&fdp->fd_spin); 619 } 620 } 621 } 622 return (0); 623 } 624 625 /* 626 * If sigio is on the list associated with a process or process group, 627 * disable signalling from the device, remove sigio from the list and 628 * free sigio. 629 */ 630 void 631 funsetown(struct sigio *sigio) 632 { 633 if (sigio == NULL) 634 return; 635 crit_enter(); 636 *(sigio->sio_myref) = NULL; 637 crit_exit(); 638 if (sigio->sio_pgid < 0) { 639 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio, 640 sigio, sio_pgsigio); 641 } else /* if ((*sigiop)->sio_pgid > 0) */ { 642 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio, 643 sigio, sio_pgsigio); 644 } 645 crfree(sigio->sio_ucred); 646 kfree(sigio, M_SIGIO); 647 } 648 649 /* Free a list of sigio structures. */ 650 void 651 funsetownlst(struct sigiolst *sigiolst) 652 { 653 struct sigio *sigio; 654 655 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 656 funsetown(sigio); 657 } 658 659 /* 660 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 661 * 662 * After permission checking, add a sigio structure to the sigio list for 663 * the process or process group. 664 */ 665 int 666 fsetown(pid_t pgid, struct sigio **sigiop) 667 { 668 struct proc *proc; 669 struct pgrp *pgrp; 670 struct sigio *sigio; 671 672 if (pgid == 0) { 673 funsetown(*sigiop); 674 return (0); 675 } 676 if (pgid > 0) { 677 proc = pfind(pgid); 678 if (proc == NULL) 679 return (ESRCH); 680 681 /* 682 * Policy - Don't allow a process to FSETOWN a process 683 * in another session. 684 * 685 * Remove this test to allow maximum flexibility or 686 * restrict FSETOWN to the current process or process 687 * group for maximum safety. 688 */ 689 if (proc->p_session != curproc->p_session) 690 return (EPERM); 691 692 pgrp = NULL; 693 } else /* if (pgid < 0) */ { 694 pgrp = pgfind(-pgid); 695 if (pgrp == NULL) 696 return (ESRCH); 697 698 /* 699 * Policy - Don't allow a process to FSETOWN a process 700 * in another session. 701 * 702 * Remove this test to allow maximum flexibility or 703 * restrict FSETOWN to the current process or process 704 * group for maximum safety. 705 */ 706 if (pgrp->pg_session != curproc->p_session) 707 return (EPERM); 708 709 proc = NULL; 710 } 711 funsetown(*sigiop); 712 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK); 713 if (pgid > 0) { 714 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 715 sigio->sio_proc = proc; 716 } else { 717 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 718 sigio->sio_pgrp = pgrp; 719 } 720 sigio->sio_pgid = pgid; 721 sigio->sio_ucred = crhold(curproc->p_ucred); 722 /* It would be convenient if p_ruid was in ucred. */ 723 sigio->sio_ruid = curproc->p_ucred->cr_ruid; 724 sigio->sio_myref = sigiop; 725 crit_enter(); 726 *sigiop = sigio; 727 crit_exit(); 728 return (0); 729 } 730 731 /* 732 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 733 */ 734 pid_t 735 fgetown(struct sigio *sigio) 736 { 737 return (sigio != NULL ? sigio->sio_pgid : 0); 738 } 739 740 /* 741 * Close many file descriptors. 742 * 743 * MPSAFE 744 */ 745 int 746 sys_closefrom(struct closefrom_args *uap) 747 { 748 return(kern_closefrom(uap->fd)); 749 } 750 751 /* 752 * Close all file descriptors greater then or equal to fd 753 * 754 * MPSAFE 755 */ 756 int 757 kern_closefrom(int fd) 758 { 759 struct thread *td = curthread; 760 struct proc *p = td->td_proc; 761 struct filedesc *fdp; 762 763 KKASSERT(p); 764 fdp = p->p_fd; 765 766 if (fd < 0) 767 return (EINVAL); 768 769 /* 770 * NOTE: This function will skip unassociated descriptors and 771 * reserved descriptors that have not yet been assigned. 772 * fd_lastfile can change as a side effect of kern_close(). 773 */ 774 spin_lock_wr(&fdp->fd_spin); 775 while (fd <= fdp->fd_lastfile) { 776 if (fdp->fd_files[fd].fp != NULL) { 777 spin_unlock_wr(&fdp->fd_spin); 778 /* ok if this races another close */ 779 if (kern_close(fd) == EINTR) 780 return (EINTR); 781 spin_lock_wr(&fdp->fd_spin); 782 } 783 ++fd; 784 } 785 spin_unlock_wr(&fdp->fd_spin); 786 return (0); 787 } 788 789 /* 790 * Close a file descriptor. 791 * 792 * MPSAFE 793 */ 794 int 795 sys_close(struct close_args *uap) 796 { 797 return(kern_close(uap->fd)); 798 } 799 800 /* 801 * MPALMOSTSAFE - acquires mplock around knote_fdclose() calls 802 */ 803 int 804 kern_close(int fd) 805 { 806 struct thread *td = curthread; 807 struct proc *p = td->td_proc; 808 struct filedesc *fdp; 809 struct file *fp; 810 int error; 811 int holdleaders; 812 813 KKASSERT(p); 814 fdp = p->p_fd; 815 816 spin_lock_wr(&fdp->fd_spin); 817 if ((fp = funsetfd_locked(fdp, fd)) == NULL) { 818 spin_unlock_wr(&fdp->fd_spin); 819 return (EBADF); 820 } 821 holdleaders = 0; 822 if (p->p_fdtol != NULL) { 823 /* 824 * Ask fdfree() to sleep to ensure that all relevant 825 * process leaders can be traversed in closef(). 826 */ 827 fdp->fd_holdleaderscount++; 828 holdleaders = 1; 829 } 830 831 /* 832 * we now hold the fp reference that used to be owned by the descriptor 833 * array. 834 */ 835 spin_unlock_wr(&fdp->fd_spin); 836 if (fd < fdp->fd_knlistsize) { 837 get_mplock(); 838 if (fd < fdp->fd_knlistsize) 839 knote_fdclose(p, fd); 840 rel_mplock(); 841 } 842 error = closef(fp, p); 843 if (holdleaders) { 844 spin_lock_wr(&fdp->fd_spin); 845 fdp->fd_holdleaderscount--; 846 if (fdp->fd_holdleaderscount == 0 && 847 fdp->fd_holdleaderswakeup != 0) { 848 fdp->fd_holdleaderswakeup = 0; 849 spin_unlock_wr(&fdp->fd_spin); 850 wakeup(&fdp->fd_holdleaderscount); 851 } else { 852 spin_unlock_wr(&fdp->fd_spin); 853 } 854 } 855 return (error); 856 } 857 858 /* 859 * shutdown_args(int fd, int how) 860 */ 861 int 862 kern_shutdown(int fd, int how) 863 { 864 struct thread *td = curthread; 865 struct proc *p = td->td_proc; 866 struct file *fp; 867 int error; 868 869 KKASSERT(p); 870 871 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL) 872 return (EBADF); 873 error = fo_shutdown(fp, how); 874 fdrop(fp); 875 876 return (error); 877 } 878 879 int 880 sys_shutdown(struct shutdown_args *uap) 881 { 882 int error; 883 884 error = kern_shutdown(uap->s, uap->how); 885 886 return (error); 887 } 888 889 /* 890 * MPSAFE 891 */ 892 int 893 kern_fstat(int fd, struct stat *ub) 894 { 895 struct thread *td = curthread; 896 struct proc *p = td->td_proc; 897 struct file *fp; 898 int error; 899 900 KKASSERT(p); 901 902 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL) 903 return (EBADF); 904 error = fo_stat(fp, ub, p->p_ucred); 905 fdrop(fp); 906 907 return (error); 908 } 909 910 /* 911 * Return status information about a file descriptor. 912 * 913 * MPSAFE 914 */ 915 int 916 sys_fstat(struct fstat_args *uap) 917 { 918 struct stat st; 919 int error; 920 921 error = kern_fstat(uap->fd, &st); 922 923 if (error == 0) 924 error = copyout(&st, uap->sb, sizeof(st)); 925 return (error); 926 } 927 928 /* 929 * Return pathconf information about a file descriptor. 930 */ 931 /* ARGSUSED */ 932 int 933 sys_fpathconf(struct fpathconf_args *uap) 934 { 935 struct thread *td = curthread; 936 struct proc *p = td->td_proc; 937 struct file *fp; 938 struct vnode *vp; 939 int error = 0; 940 941 KKASSERT(p); 942 943 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL) 944 return (EBADF); 945 946 switch (fp->f_type) { 947 case DTYPE_PIPE: 948 case DTYPE_SOCKET: 949 if (uap->name != _PC_PIPE_BUF) { 950 error = EINVAL; 951 } else { 952 uap->sysmsg_result = PIPE_BUF; 953 error = 0; 954 } 955 break; 956 case DTYPE_FIFO: 957 case DTYPE_VNODE: 958 vp = (struct vnode *)fp->f_data; 959 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg); 960 break; 961 default: 962 error = EOPNOTSUPP; 963 break; 964 } 965 fdrop(fp); 966 return(error); 967 } 968 969 static int fdexpand; 970 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, ""); 971 972 /* 973 * Grow the file table so it can hold through descriptor (want). 974 * 975 * The fdp's spinlock must be held exclusively on entry and may be held 976 * exclusively on return. The spinlock may be cycled by the routine. 977 * 978 * MPSAFE 979 */ 980 static void 981 fdgrow_locked(struct filedesc *fdp, int want) 982 { 983 struct fdnode *newfiles; 984 struct fdnode *oldfiles; 985 int nf, extra; 986 987 nf = fdp->fd_nfiles; 988 do { 989 /* nf has to be of the form 2^n - 1 */ 990 nf = 2 * nf + 1; 991 } while (nf <= want); 992 993 spin_unlock_wr(&fdp->fd_spin); 994 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK); 995 spin_lock_wr(&fdp->fd_spin); 996 997 /* 998 * We could have raced another extend while we were not holding 999 * the spinlock. 1000 */ 1001 if (fdp->fd_nfiles >= nf) { 1002 spin_unlock_wr(&fdp->fd_spin); 1003 kfree(newfiles, M_FILEDESC); 1004 spin_lock_wr(&fdp->fd_spin); 1005 return; 1006 } 1007 /* 1008 * Copy the existing ofile and ofileflags arrays 1009 * and zero the new portion of each array. 1010 */ 1011 extra = nf - fdp->fd_nfiles; 1012 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode)); 1013 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode)); 1014 1015 oldfiles = fdp->fd_files; 1016 fdp->fd_files = newfiles; 1017 fdp->fd_nfiles = nf; 1018 1019 if (oldfiles != fdp->fd_builtin_files) { 1020 spin_unlock_wr(&fdp->fd_spin); 1021 kfree(oldfiles, M_FILEDESC); 1022 spin_lock_wr(&fdp->fd_spin); 1023 } 1024 fdexpand++; 1025 } 1026 1027 /* 1028 * Number of nodes in right subtree, including the root. 1029 */ 1030 static __inline int 1031 right_subtree_size(int n) 1032 { 1033 return (n ^ (n | (n + 1))); 1034 } 1035 1036 /* 1037 * Bigger ancestor. 1038 */ 1039 static __inline int 1040 right_ancestor(int n) 1041 { 1042 return (n | (n + 1)); 1043 } 1044 1045 /* 1046 * Smaller ancestor. 1047 */ 1048 static __inline int 1049 left_ancestor(int n) 1050 { 1051 return ((n & (n + 1)) - 1); 1052 } 1053 1054 /* 1055 * Traverse the in-place binary tree buttom-up adjusting the allocation 1056 * count so scans can determine where free descriptors are located. 1057 * 1058 * MPSAFE - caller must be holding an exclusive spinlock on fdp 1059 */ 1060 static 1061 void 1062 fdreserve_locked(struct filedesc *fdp, int fd, int incr) 1063 { 1064 while (fd >= 0) { 1065 fdp->fd_files[fd].allocated += incr; 1066 KKASSERT(fdp->fd_files[fd].allocated >= 0); 1067 fd = left_ancestor(fd); 1068 } 1069 } 1070 1071 /* 1072 * Reserve a file descriptor for the process. If no error occurs, the 1073 * caller MUST at some point call fsetfd() or assign a file pointer 1074 * or dispose of the reservation. 1075 * 1076 * MPSAFE 1077 */ 1078 int 1079 fdalloc(struct proc *p, int want, int *result) 1080 { 1081 struct filedesc *fdp = p->p_fd; 1082 int fd, rsize, rsum, node, lim; 1083 1084 spin_lock_rd(&p->p_limit->p_spin); 1085 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 1086 spin_unlock_rd(&p->p_limit->p_spin); 1087 if (want >= lim) 1088 return (EMFILE); 1089 spin_lock_wr(&fdp->fd_spin); 1090 if (want >= fdp->fd_nfiles) 1091 fdgrow_locked(fdp, want); 1092 1093 /* 1094 * Search for a free descriptor starting at the higher 1095 * of want or fd_freefile. If that fails, consider 1096 * expanding the ofile array. 1097 * 1098 * NOTE! the 'allocated' field is a cumulative recursive allocation 1099 * count. If we happen to see a value of 0 then we can shortcut 1100 * our search. Otherwise we run through through the tree going 1101 * down branches we know have free descriptor(s) until we hit a 1102 * leaf node. The leaf node will be free but will not necessarily 1103 * have an allocated field of 0. 1104 */ 1105 retry: 1106 /* move up the tree looking for a subtree with a free node */ 1107 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim); 1108 fd = right_ancestor(fd)) { 1109 if (fdp->fd_files[fd].allocated == 0) 1110 goto found; 1111 1112 rsize = right_subtree_size(fd); 1113 if (fdp->fd_files[fd].allocated == rsize) 1114 continue; /* right subtree full */ 1115 1116 /* 1117 * Free fd is in the right subtree of the tree rooted at fd. 1118 * Call that subtree R. Look for the smallest (leftmost) 1119 * subtree of R with an unallocated fd: continue moving 1120 * down the left branch until encountering a full left 1121 * subtree, then move to the right. 1122 */ 1123 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 1124 node = fd + rsize; 1125 rsum += fdp->fd_files[node].allocated; 1126 if (fdp->fd_files[fd].allocated == rsum + rsize) { 1127 fd = node; /* move to the right */ 1128 if (fdp->fd_files[node].allocated == 0) 1129 goto found; 1130 rsum = 0; 1131 } 1132 } 1133 goto found; 1134 } 1135 1136 /* 1137 * No space in current array. Expand? 1138 */ 1139 if (fdp->fd_nfiles >= lim) { 1140 spin_unlock_wr(&fdp->fd_spin); 1141 return (EMFILE); 1142 } 1143 fdgrow_locked(fdp, want); 1144 goto retry; 1145 1146 found: 1147 KKASSERT(fd < fdp->fd_nfiles); 1148 if (fd > fdp->fd_lastfile) 1149 fdp->fd_lastfile = fd; 1150 if (want <= fdp->fd_freefile) 1151 fdp->fd_freefile = fd; 1152 *result = fd; 1153 KKASSERT(fdp->fd_files[fd].fp == NULL); 1154 KKASSERT(fdp->fd_files[fd].reserved == 0); 1155 fdp->fd_files[fd].fileflags = 0; 1156 fdp->fd_files[fd].reserved = 1; 1157 fdreserve_locked(fdp, fd, 1); 1158 spin_unlock_wr(&fdp->fd_spin); 1159 return (0); 1160 } 1161 1162 /* 1163 * Check to see whether n user file descriptors 1164 * are available to the process p. 1165 * 1166 * MPSAFE 1167 */ 1168 int 1169 fdavail(struct proc *p, int n) 1170 { 1171 struct filedesc *fdp = p->p_fd; 1172 struct fdnode *fdnode; 1173 int i, lim, last; 1174 1175 spin_lock_rd(&p->p_limit->p_spin); 1176 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 1177 spin_unlock_rd(&p->p_limit->p_spin); 1178 1179 spin_lock_rd(&fdp->fd_spin); 1180 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { 1181 spin_unlock_rd(&fdp->fd_spin); 1182 return (1); 1183 } 1184 last = min(fdp->fd_nfiles, lim); 1185 fdnode = &fdp->fd_files[fdp->fd_freefile]; 1186 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) { 1187 if (fdnode->fp == NULL && --n <= 0) { 1188 spin_unlock_rd(&fdp->fd_spin); 1189 return (1); 1190 } 1191 } 1192 spin_unlock_rd(&fdp->fd_spin); 1193 return (0); 1194 } 1195 1196 /* 1197 * Revoke open descriptors referencing (f_data, f_type) 1198 * 1199 * Any revoke executed within a prison is only able to 1200 * revoke descriptors for processes within that prison. 1201 * 1202 * Returns 0 on success or an error code. 1203 */ 1204 struct fdrevoke_info { 1205 void *data; 1206 short type; 1207 short unused; 1208 int count; 1209 int intransit; 1210 struct ucred *cred; 1211 struct file *nfp; 1212 }; 1213 1214 static int fdrevoke_check_callback(struct file *fp, void *vinfo); 1215 static int fdrevoke_proc_callback(struct proc *p, void *vinfo); 1216 1217 int 1218 fdrevoke(void *f_data, short f_type, struct ucred *cred) 1219 { 1220 struct fdrevoke_info info; 1221 int error; 1222 1223 bzero(&info, sizeof(info)); 1224 info.data = f_data; 1225 info.type = f_type; 1226 info.cred = cred; 1227 error = falloc(NULL, &info.nfp, NULL); 1228 if (error) 1229 return (error); 1230 1231 /* 1232 * Scan the file pointer table once. dups do not dup file pointers, 1233 * only descriptors, so there is no leak. Set FREVOKED on the fps 1234 * being revoked. 1235 */ 1236 allfiles_scan_exclusive(fdrevoke_check_callback, &info); 1237 1238 /* 1239 * If any fps were marked track down the related descriptors 1240 * and close them. Any dup()s at this point will notice 1241 * the FREVOKED already set in the fp and do the right thing. 1242 * 1243 * Any fps with non-zero msgcounts (aka sent over a unix-domain 1244 * socket) bumped the intransit counter and will require a 1245 * scan. Races against fps leaving the socket are closed by 1246 * the socket code checking for FREVOKED. 1247 */ 1248 if (info.count) 1249 allproc_scan(fdrevoke_proc_callback, &info); 1250 if (info.intransit) 1251 unp_revoke_gc(info.nfp); 1252 fdrop(info.nfp); 1253 return(0); 1254 } 1255 1256 /* 1257 * Locate matching file pointers directly. 1258 */ 1259 static int 1260 fdrevoke_check_callback(struct file *fp, void *vinfo) 1261 { 1262 struct fdrevoke_info *info = vinfo; 1263 1264 /* 1265 * File pointers already flagged for revokation are skipped. 1266 */ 1267 if (fp->f_flag & FREVOKED) 1268 return(0); 1269 1270 /* 1271 * If revoking from a prison file pointers created outside of 1272 * that prison, or file pointers without creds, cannot be revoked. 1273 */ 1274 if (info->cred->cr_prison && 1275 (fp->f_cred == NULL || 1276 info->cred->cr_prison != fp->f_cred->cr_prison)) { 1277 return(0); 1278 } 1279 1280 /* 1281 * If the file pointer matches then mark it for revocation. The 1282 * flag is currently only used by unp_revoke_gc(). 1283 * 1284 * info->count is a heuristic and can race in a SMP environment. 1285 */ 1286 if (info->data == fp->f_data && info->type == fp->f_type) { 1287 atomic_set_int(&fp->f_flag, FREVOKED); 1288 info->count += fp->f_count; 1289 if (fp->f_msgcount) 1290 ++info->intransit; 1291 } 1292 return(0); 1293 } 1294 1295 /* 1296 * Locate matching file pointers via process descriptor tables. 1297 */ 1298 static int 1299 fdrevoke_proc_callback(struct proc *p, void *vinfo) 1300 { 1301 struct fdrevoke_info *info = vinfo; 1302 struct filedesc *fdp; 1303 struct file *fp; 1304 int n; 1305 1306 if (p->p_stat == SIDL || p->p_stat == SZOMB) 1307 return(0); 1308 if (info->cred->cr_prison && 1309 info->cred->cr_prison != p->p_ucred->cr_prison) { 1310 return(0); 1311 } 1312 1313 /* 1314 * If the controlling terminal of the process matches the 1315 * vnode being revoked we clear the controlling terminal. 1316 * 1317 * The normal spec_close() may not catch this because it 1318 * uses curproc instead of p. 1319 */ 1320 if (p->p_session && info->type == DTYPE_VNODE && 1321 info->data == p->p_session->s_ttyvp) { 1322 p->p_session->s_ttyvp = NULL; 1323 vrele(info->data); 1324 } 1325 1326 /* 1327 * Softref the fdp to prevent it from being destroyed 1328 */ 1329 spin_lock_wr(&p->p_spin); 1330 if ((fdp = p->p_fd) == NULL) { 1331 spin_unlock_wr(&p->p_spin); 1332 return(0); 1333 } 1334 atomic_add_int(&fdp->fd_softrefs, 1); 1335 spin_unlock_wr(&p->p_spin); 1336 1337 /* 1338 * Locate and close any matching file descriptors. 1339 */ 1340 spin_lock_wr(&fdp->fd_spin); 1341 for (n = 0; n < fdp->fd_nfiles; ++n) { 1342 if ((fp = fdp->fd_files[n].fp) == NULL) 1343 continue; 1344 if (fp->f_flag & FREVOKED) { 1345 fhold(info->nfp); 1346 fdp->fd_files[n].fp = info->nfp; 1347 spin_unlock_wr(&fdp->fd_spin); 1348 closef(fp, p); 1349 spin_lock_wr(&fdp->fd_spin); 1350 --info->count; 1351 } 1352 } 1353 spin_unlock_wr(&fdp->fd_spin); 1354 atomic_subtract_int(&fdp->fd_softrefs, 1); 1355 return(0); 1356 } 1357 1358 /* 1359 * falloc: 1360 * Create a new open file structure and reserve a file decriptor 1361 * for the process that refers to it. 1362 * 1363 * Root creds are checked using p, or assumed if p is NULL. If 1364 * resultfd is non-NULL then p must also be non-NULL. No file 1365 * descriptor is reserved if resultfd is NULL. 1366 * 1367 * A file pointer with a refcount of 1 is returned. Note that the 1368 * file pointer is NOT associated with the descriptor. If falloc 1369 * returns success, fsetfd() MUST be called to either associate the 1370 * file pointer or clear the reservation. 1371 * 1372 * MPSAFE 1373 */ 1374 int 1375 falloc(struct proc *p, struct file **resultfp, int *resultfd) 1376 { 1377 static struct timeval lastfail; 1378 static int curfail; 1379 struct file *fp; 1380 int error; 1381 1382 fp = NULL; 1383 1384 /* 1385 * Handle filetable full issues and root overfill. 1386 */ 1387 if (nfiles >= maxfiles - maxfilesrootres && 1388 ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) { 1389 if (ppsratecheck(&lastfail, &curfail, 1)) { 1390 kprintf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n", 1391 (p ? p->p_ucred->cr_ruid : -1)); 1392 } 1393 error = ENFILE; 1394 goto done; 1395 } 1396 1397 /* 1398 * Allocate a new file descriptor. 1399 */ 1400 fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO); 1401 spin_init(&fp->f_spin); 1402 fp->f_count = 1; 1403 fp->f_ops = &badfileops; 1404 fp->f_seqcount = 1; 1405 if (p) 1406 fp->f_cred = crhold(p->p_ucred); 1407 else 1408 fp->f_cred = crhold(proc0.p_ucred); 1409 spin_lock_wr(&filehead_spin); 1410 nfiles++; 1411 LIST_INSERT_HEAD(&filehead, fp, f_list); 1412 spin_unlock_wr(&filehead_spin); 1413 if (resultfd) { 1414 if ((error = fdalloc(p, 0, resultfd)) != 0) { 1415 fdrop(fp); 1416 fp = NULL; 1417 } 1418 } else { 1419 error = 0; 1420 } 1421 done: 1422 *resultfp = fp; 1423 return (error); 1424 } 1425 1426 /* 1427 * MPSAFE 1428 */ 1429 static 1430 int 1431 checkfpclosed(struct filedesc *fdp, int fd, struct file *fp) 1432 { 1433 int error; 1434 1435 spin_lock_rd(&fdp->fd_spin); 1436 if ((unsigned) fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp) 1437 error = EBADF; 1438 else 1439 error = 0; 1440 spin_unlock_rd(&fdp->fd_spin); 1441 return (error); 1442 } 1443 1444 /* 1445 * Associate a file pointer with a previously reserved file descriptor. 1446 * This function always succeeds. 1447 * 1448 * If fp is NULL, the file descriptor is returned to the pool. 1449 */ 1450 1451 /* 1452 * MPSAFE (exclusive spinlock must be held on call) 1453 */ 1454 static void 1455 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd) 1456 { 1457 KKASSERT((unsigned)fd < fdp->fd_nfiles); 1458 KKASSERT(fdp->fd_files[fd].reserved != 0); 1459 if (fp) { 1460 fhold(fp); 1461 fdp->fd_files[fd].fp = fp; 1462 fdp->fd_files[fd].reserved = 0; 1463 if (fp->f_type == DTYPE_KQUEUE) { 1464 if (fdp->fd_knlistsize < 0) 1465 fdp->fd_knlistsize = 0; 1466 } 1467 } else { 1468 fdp->fd_files[fd].reserved = 0; 1469 fdreserve_locked(fdp, fd, -1); 1470 fdfixup_locked(fdp, fd); 1471 } 1472 } 1473 1474 /* 1475 * MPSAFE 1476 */ 1477 void 1478 fsetfd(struct proc *p, struct file *fp, int fd) 1479 { 1480 struct filedesc *fdp = p->p_fd; 1481 1482 spin_lock_wr(&fdp->fd_spin); 1483 fsetfd_locked(fdp, fp, fd); 1484 spin_unlock_wr(&fdp->fd_spin); 1485 } 1486 1487 /* 1488 * MPSAFE (exclusive spinlock must be held on call) 1489 */ 1490 static 1491 struct file * 1492 funsetfd_locked(struct filedesc *fdp, int fd) 1493 { 1494 struct file *fp; 1495 1496 if ((unsigned)fd >= fdp->fd_nfiles) 1497 return (NULL); 1498 if ((fp = fdp->fd_files[fd].fp) == NULL) 1499 return (NULL); 1500 fdp->fd_files[fd].fp = NULL; 1501 fdp->fd_files[fd].fileflags = 0; 1502 1503 fdreserve_locked(fdp, fd, -1); 1504 fdfixup_locked(fdp, fd); 1505 return(fp); 1506 } 1507 1508 /* 1509 * MPSAFE 1510 */ 1511 int 1512 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp) 1513 { 1514 int error; 1515 1516 spin_lock_rd(&fdp->fd_spin); 1517 if (((u_int)fd) >= fdp->fd_nfiles) { 1518 error = EBADF; 1519 } else if (fdp->fd_files[fd].fp == NULL) { 1520 error = EBADF; 1521 } else { 1522 *flagsp = fdp->fd_files[fd].fileflags; 1523 error = 0; 1524 } 1525 spin_unlock_rd(&fdp->fd_spin); 1526 return (error); 1527 } 1528 1529 /* 1530 * MPSAFE 1531 */ 1532 int 1533 fsetfdflags(struct filedesc *fdp, int fd, int add_flags) 1534 { 1535 int error; 1536 1537 spin_lock_wr(&fdp->fd_spin); 1538 if (((u_int)fd) >= fdp->fd_nfiles) { 1539 error = EBADF; 1540 } else if (fdp->fd_files[fd].fp == NULL) { 1541 error = EBADF; 1542 } else { 1543 fdp->fd_files[fd].fileflags |= add_flags; 1544 error = 0; 1545 } 1546 spin_unlock_wr(&fdp->fd_spin); 1547 return (error); 1548 } 1549 1550 /* 1551 * MPSAFE 1552 */ 1553 int 1554 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags) 1555 { 1556 int error; 1557 1558 spin_lock_wr(&fdp->fd_spin); 1559 if (((u_int)fd) >= fdp->fd_nfiles) { 1560 error = EBADF; 1561 } else if (fdp->fd_files[fd].fp == NULL) { 1562 error = EBADF; 1563 } else { 1564 fdp->fd_files[fd].fileflags &= ~rem_flags; 1565 error = 0; 1566 } 1567 spin_unlock_wr(&fdp->fd_spin); 1568 return (error); 1569 } 1570 1571 void 1572 fsetcred(struct file *fp, struct ucred *cr) 1573 { 1574 crhold(cr); 1575 crfree(fp->f_cred); 1576 fp->f_cred = cr; 1577 } 1578 1579 /* 1580 * Free a file descriptor. 1581 */ 1582 static 1583 void 1584 ffree(struct file *fp) 1585 { 1586 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 1587 spin_lock_wr(&filehead_spin); 1588 LIST_REMOVE(fp, f_list); 1589 nfiles--; 1590 spin_unlock_wr(&filehead_spin); 1591 crfree(fp->f_cred); 1592 if (fp->f_nchandle.ncp) 1593 cache_drop(&fp->f_nchandle); 1594 kfree(fp, M_FILE); 1595 } 1596 1597 /* 1598 * called from init_main, initialize filedesc0 for proc0. 1599 */ 1600 void 1601 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask) 1602 { 1603 p0->p_fd = fdp0; 1604 p0->p_fdtol = NULL; 1605 fdp0->fd_refcnt = 1; 1606 fdp0->fd_cmask = cmask; 1607 fdp0->fd_files = fdp0->fd_builtin_files; 1608 fdp0->fd_nfiles = NDFILE; 1609 fdp0->fd_lastfile = -1; 1610 spin_init(&fdp0->fd_spin); 1611 } 1612 1613 /* 1614 * Build a new filedesc structure. 1615 * 1616 * NOT MPSAFE (vref) 1617 */ 1618 struct filedesc * 1619 fdinit(struct proc *p) 1620 { 1621 struct filedesc *newfdp; 1622 struct filedesc *fdp = p->p_fd; 1623 1624 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO); 1625 spin_lock_rd(&fdp->fd_spin); 1626 if (fdp->fd_cdir) { 1627 newfdp->fd_cdir = fdp->fd_cdir; 1628 vref(newfdp->fd_cdir); 1629 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 1630 } 1631 1632 /* 1633 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of 1634 * proc0, but should unconditionally exist in other processes. 1635 */ 1636 if (fdp->fd_rdir) { 1637 newfdp->fd_rdir = fdp->fd_rdir; 1638 vref(newfdp->fd_rdir); 1639 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 1640 } 1641 if (fdp->fd_jdir) { 1642 newfdp->fd_jdir = fdp->fd_jdir; 1643 vref(newfdp->fd_jdir); 1644 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 1645 } 1646 spin_unlock_rd(&fdp->fd_spin); 1647 1648 /* Create the file descriptor table. */ 1649 newfdp->fd_refcnt = 1; 1650 newfdp->fd_cmask = cmask; 1651 newfdp->fd_files = newfdp->fd_builtin_files; 1652 newfdp->fd_nfiles = NDFILE; 1653 newfdp->fd_knlistsize = -1; 1654 newfdp->fd_lastfile = -1; 1655 spin_init(&newfdp->fd_spin); 1656 1657 return (newfdp); 1658 } 1659 1660 /* 1661 * Share a filedesc structure. 1662 * 1663 * MPSAFE 1664 */ 1665 struct filedesc * 1666 fdshare(struct proc *p) 1667 { 1668 struct filedesc *fdp; 1669 1670 fdp = p->p_fd; 1671 spin_lock_wr(&fdp->fd_spin); 1672 fdp->fd_refcnt++; 1673 spin_unlock_wr(&fdp->fd_spin); 1674 return (fdp); 1675 } 1676 1677 /* 1678 * Copy a filedesc structure. 1679 * 1680 * MPSAFE 1681 */ 1682 struct filedesc * 1683 fdcopy(struct proc *p) 1684 { 1685 struct filedesc *fdp = p->p_fd; 1686 struct filedesc *newfdp; 1687 struct fdnode *fdnode; 1688 int i; 1689 int ni; 1690 1691 /* 1692 * Certain daemons might not have file descriptors. 1693 */ 1694 if (fdp == NULL) 1695 return (NULL); 1696 1697 /* 1698 * Allocate the new filedesc and fd_files[] array. This can race 1699 * with operations by other threads on the fdp so we have to be 1700 * careful. 1701 */ 1702 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK | M_ZERO); 1703 again: 1704 spin_lock_rd(&fdp->fd_spin); 1705 if (fdp->fd_lastfile < NDFILE) { 1706 newfdp->fd_files = newfdp->fd_builtin_files; 1707 i = NDFILE; 1708 } else { 1709 /* 1710 * We have to allocate (N^2-1) entries for our in-place 1711 * binary tree. Allow the table to shrink. 1712 */ 1713 i = fdp->fd_nfiles; 1714 ni = (i - 1) / 2; 1715 while (ni > fdp->fd_lastfile && ni > NDFILE) { 1716 i = ni; 1717 ni = (i - 1) / 2; 1718 } 1719 spin_unlock_rd(&fdp->fd_spin); 1720 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode), 1721 M_FILEDESC, M_WAITOK | M_ZERO); 1722 1723 /* 1724 * Check for race, retry 1725 */ 1726 spin_lock_rd(&fdp->fd_spin); 1727 if (i <= fdp->fd_lastfile) { 1728 spin_unlock_rd(&fdp->fd_spin); 1729 kfree(newfdp->fd_files, M_FILEDESC); 1730 goto again; 1731 } 1732 } 1733 1734 /* 1735 * Dup the remaining fields. vref() and cache_hold() can be 1736 * safely called while holding the read spinlock on fdp. 1737 * 1738 * The read spinlock on fdp is still being held. 1739 * 1740 * NOTE: vref and cache_hold calls for the case where the vnode 1741 * or cache entry already has at least one ref may be called 1742 * while holding spin locks. 1743 */ 1744 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) { 1745 vref(newfdp->fd_cdir); 1746 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 1747 } 1748 /* 1749 * We must check for fd_rdir here, at least for now because 1750 * the init process is created before we have access to the 1751 * rootvode to take a reference to it. 1752 */ 1753 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) { 1754 vref(newfdp->fd_rdir); 1755 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 1756 } 1757 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) { 1758 vref(newfdp->fd_jdir); 1759 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 1760 } 1761 newfdp->fd_refcnt = 1; 1762 newfdp->fd_nfiles = i; 1763 newfdp->fd_lastfile = fdp->fd_lastfile; 1764 newfdp->fd_freefile = fdp->fd_freefile; 1765 newfdp->fd_cmask = fdp->fd_cmask; 1766 newfdp->fd_knlist = NULL; 1767 newfdp->fd_knlistsize = -1; 1768 newfdp->fd_knhash = NULL; 1769 newfdp->fd_knhashmask = 0; 1770 spin_init(&newfdp->fd_spin); 1771 1772 /* 1773 * Copy the descriptor table through (i). This also copies the 1774 * allocation state. Then go through and ref the file pointers 1775 * and clean up any KQ descriptors. 1776 * 1777 * kq descriptors cannot be copied. Since we haven't ref'd the 1778 * copied files yet we can ignore the return value from funsetfd(). 1779 * 1780 * The read spinlock on fdp is still being held. 1781 */ 1782 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode)); 1783 for (i = 0 ; i < newfdp->fd_nfiles; ++i) { 1784 fdnode = &newfdp->fd_files[i]; 1785 if (fdnode->reserved) { 1786 fdreserve_locked(newfdp, i, -1); 1787 fdnode->reserved = 0; 1788 fdfixup_locked(newfdp, i); 1789 } else if (fdnode->fp) { 1790 if (fdnode->fp->f_type == DTYPE_KQUEUE) { 1791 (void)funsetfd_locked(newfdp, i); 1792 } else { 1793 fhold(fdnode->fp); 1794 } 1795 } 1796 } 1797 spin_unlock_rd(&fdp->fd_spin); 1798 return (newfdp); 1799 } 1800 1801 /* 1802 * Release a filedesc structure. 1803 * 1804 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE) 1805 */ 1806 void 1807 fdfree(struct proc *p, struct filedesc *repl) 1808 { 1809 struct filedesc *fdp; 1810 struct fdnode *fdnode; 1811 int i; 1812 struct filedesc_to_leader *fdtol; 1813 struct file *fp; 1814 struct vnode *vp; 1815 struct flock lf; 1816 1817 /* 1818 * Certain daemons might not have file descriptors. 1819 */ 1820 fdp = p->p_fd; 1821 if (fdp == NULL) { 1822 p->p_fd = repl; 1823 return; 1824 } 1825 1826 /* 1827 * Severe messing around to follow. 1828 */ 1829 spin_lock_wr(&fdp->fd_spin); 1830 1831 /* Check for special need to clear POSIX style locks */ 1832 fdtol = p->p_fdtol; 1833 if (fdtol != NULL) { 1834 KASSERT(fdtol->fdl_refcount > 0, 1835 ("filedesc_to_refcount botch: fdl_refcount=%d", 1836 fdtol->fdl_refcount)); 1837 if (fdtol->fdl_refcount == 1 && 1838 (p->p_leader->p_flag & P_ADVLOCK) != 0) { 1839 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1840 fdnode = &fdp->fd_files[i]; 1841 if (fdnode->fp == NULL || 1842 fdnode->fp->f_type != DTYPE_VNODE) { 1843 continue; 1844 } 1845 fp = fdnode->fp; 1846 fhold(fp); 1847 spin_unlock_wr(&fdp->fd_spin); 1848 1849 lf.l_whence = SEEK_SET; 1850 lf.l_start = 0; 1851 lf.l_len = 0; 1852 lf.l_type = F_UNLCK; 1853 vp = (struct vnode *)fp->f_data; 1854 (void) VOP_ADVLOCK(vp, 1855 (caddr_t)p->p_leader, 1856 F_UNLCK, 1857 &lf, 1858 F_POSIX); 1859 fdrop(fp); 1860 spin_lock_wr(&fdp->fd_spin); 1861 } 1862 } 1863 retry: 1864 if (fdtol->fdl_refcount == 1) { 1865 if (fdp->fd_holdleaderscount > 0 && 1866 (p->p_leader->p_flag & P_ADVLOCK) != 0) { 1867 /* 1868 * close() or do_dup() has cleared a reference 1869 * in a shared file descriptor table. 1870 */ 1871 fdp->fd_holdleaderswakeup = 1; 1872 ssleep(&fdp->fd_holdleaderscount, 1873 &fdp->fd_spin, 0, "fdlhold", 0); 1874 goto retry; 1875 } 1876 if (fdtol->fdl_holdcount > 0) { 1877 /* 1878 * Ensure that fdtol->fdl_leader 1879 * remains valid in closef(). 1880 */ 1881 fdtol->fdl_wakeup = 1; 1882 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0); 1883 goto retry; 1884 } 1885 } 1886 fdtol->fdl_refcount--; 1887 if (fdtol->fdl_refcount == 0 && 1888 fdtol->fdl_holdcount == 0) { 1889 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 1890 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 1891 } else { 1892 fdtol = NULL; 1893 } 1894 p->p_fdtol = NULL; 1895 if (fdtol != NULL) { 1896 spin_unlock_wr(&fdp->fd_spin); 1897 kfree(fdtol, M_FILEDESC_TO_LEADER); 1898 spin_lock_wr(&fdp->fd_spin); 1899 } 1900 } 1901 if (--fdp->fd_refcnt > 0) { 1902 spin_unlock_wr(&fdp->fd_spin); 1903 spin_lock_wr(&p->p_spin); 1904 p->p_fd = repl; 1905 spin_unlock_wr(&p->p_spin); 1906 return; 1907 } 1908 1909 /* 1910 * Even though we are the last reference to the structure allproc 1911 * scans may still reference the structure. Maintain proper 1912 * locks until we can replace p->p_fd. 1913 * 1914 * Also note that kqueue's closef still needs to reference the 1915 * fdp via p->p_fd, so we have to close the descriptors before 1916 * we replace p->p_fd. 1917 */ 1918 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1919 if (fdp->fd_files[i].fp) { 1920 fp = funsetfd_locked(fdp, i); 1921 if (fp) { 1922 spin_unlock_wr(&fdp->fd_spin); 1923 closef(fp, p); 1924 spin_lock_wr(&fdp->fd_spin); 1925 } 1926 } 1927 } 1928 spin_unlock_wr(&fdp->fd_spin); 1929 1930 /* 1931 * Interlock against an allproc scan operations (typically frevoke). 1932 */ 1933 spin_lock_wr(&p->p_spin); 1934 p->p_fd = repl; 1935 spin_unlock_wr(&p->p_spin); 1936 1937 /* 1938 * Wait for any softrefs to go away. This race rarely occurs so 1939 * we can use a non-critical-path style poll/sleep loop. The 1940 * race only occurs against allproc scans. 1941 * 1942 * No new softrefs can occur with the fdp disconnected from the 1943 * process. 1944 */ 1945 if (fdp->fd_softrefs) { 1946 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid); 1947 while (fdp->fd_softrefs) 1948 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1); 1949 } 1950 1951 if (fdp->fd_files != fdp->fd_builtin_files) 1952 kfree(fdp->fd_files, M_FILEDESC); 1953 if (fdp->fd_cdir) { 1954 cache_drop(&fdp->fd_ncdir); 1955 vrele(fdp->fd_cdir); 1956 } 1957 if (fdp->fd_rdir) { 1958 cache_drop(&fdp->fd_nrdir); 1959 vrele(fdp->fd_rdir); 1960 } 1961 if (fdp->fd_jdir) { 1962 cache_drop(&fdp->fd_njdir); 1963 vrele(fdp->fd_jdir); 1964 } 1965 if (fdp->fd_knlist) 1966 kfree(fdp->fd_knlist, M_KQUEUE); 1967 if (fdp->fd_knhash) 1968 kfree(fdp->fd_knhash, M_KQUEUE); 1969 kfree(fdp, M_FILEDESC); 1970 } 1971 1972 /* 1973 * Retrieve and reference the file pointer associated with a descriptor. 1974 * 1975 * MPSAFE 1976 */ 1977 struct file * 1978 holdfp(struct filedesc *fdp, int fd, int flag) 1979 { 1980 struct file* fp; 1981 1982 spin_lock_rd(&fdp->fd_spin); 1983 if (((u_int)fd) >= fdp->fd_nfiles) { 1984 fp = NULL; 1985 goto done; 1986 } 1987 if ((fp = fdp->fd_files[fd].fp) == NULL) 1988 goto done; 1989 if ((fp->f_flag & flag) == 0 && flag != -1) { 1990 fp = NULL; 1991 goto done; 1992 } 1993 fhold(fp); 1994 done: 1995 spin_unlock_rd(&fdp->fd_spin); 1996 return (fp); 1997 } 1998 1999 /* 2000 * holdsock() - load the struct file pointer associated 2001 * with a socket into *fpp. If an error occurs, non-zero 2002 * will be returned and *fpp will be set to NULL. 2003 * 2004 * MPSAFE 2005 */ 2006 int 2007 holdsock(struct filedesc *fdp, int fd, struct file **fpp) 2008 { 2009 struct file *fp; 2010 int error; 2011 2012 spin_lock_rd(&fdp->fd_spin); 2013 if ((unsigned)fd >= fdp->fd_nfiles) { 2014 error = EBADF; 2015 fp = NULL; 2016 goto done; 2017 } 2018 if ((fp = fdp->fd_files[fd].fp) == NULL) { 2019 error = EBADF; 2020 goto done; 2021 } 2022 if (fp->f_type != DTYPE_SOCKET) { 2023 error = ENOTSOCK; 2024 goto done; 2025 } 2026 fhold(fp); 2027 error = 0; 2028 done: 2029 spin_unlock_rd(&fdp->fd_spin); 2030 *fpp = fp; 2031 return (error); 2032 } 2033 2034 /* 2035 * Convert a user file descriptor to a held file pointer. 2036 * 2037 * MPSAFE 2038 */ 2039 int 2040 holdvnode(struct filedesc *fdp, int fd, struct file **fpp) 2041 { 2042 struct file *fp; 2043 int error; 2044 2045 spin_lock_rd(&fdp->fd_spin); 2046 if ((unsigned)fd >= fdp->fd_nfiles) { 2047 error = EBADF; 2048 fp = NULL; 2049 goto done; 2050 } 2051 if ((fp = fdp->fd_files[fd].fp) == NULL) { 2052 error = EBADF; 2053 goto done; 2054 } 2055 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2056 fp = NULL; 2057 error = EINVAL; 2058 goto done; 2059 } 2060 fhold(fp); 2061 error = 0; 2062 done: 2063 spin_unlock_rd(&fdp->fd_spin); 2064 *fpp = fp; 2065 return (error); 2066 } 2067 2068 /* 2069 * For setugid programs, we don't want to people to use that setugidness 2070 * to generate error messages which write to a file which otherwise would 2071 * otherwise be off-limits to the process. 2072 * 2073 * This is a gross hack to plug the hole. A better solution would involve 2074 * a special vop or other form of generalized access control mechanism. We 2075 * go ahead and just reject all procfs file systems accesses as dangerous. 2076 * 2077 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 2078 * sufficient. We also don't for check setugidness since we know we are. 2079 */ 2080 static int 2081 is_unsafe(struct file *fp) 2082 { 2083 if (fp->f_type == DTYPE_VNODE && 2084 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 2085 return (1); 2086 return (0); 2087 } 2088 2089 /* 2090 * Make this setguid thing safe, if at all possible. 2091 * 2092 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2093 */ 2094 void 2095 setugidsafety(struct proc *p) 2096 { 2097 struct filedesc *fdp = p->p_fd; 2098 int i; 2099 2100 /* Certain daemons might not have file descriptors. */ 2101 if (fdp == NULL) 2102 return; 2103 2104 /* 2105 * note: fdp->fd_files may be reallocated out from under us while 2106 * we are blocked in a close. Be careful! 2107 */ 2108 for (i = 0; i <= fdp->fd_lastfile; i++) { 2109 if (i > 2) 2110 break; 2111 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) { 2112 struct file *fp; 2113 2114 if (i < fdp->fd_knlistsize) 2115 knote_fdclose(p, i); 2116 /* 2117 * NULL-out descriptor prior to close to avoid 2118 * a race while close blocks. 2119 */ 2120 if ((fp = funsetfd_locked(fdp, i)) != NULL) 2121 closef(fp, p); 2122 } 2123 } 2124 } 2125 2126 /* 2127 * Close any files on exec? 2128 * 2129 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2130 */ 2131 void 2132 fdcloseexec(struct proc *p) 2133 { 2134 struct filedesc *fdp = p->p_fd; 2135 int i; 2136 2137 /* Certain daemons might not have file descriptors. */ 2138 if (fdp == NULL) 2139 return; 2140 2141 /* 2142 * We cannot cache fd_files since operations may block and rip 2143 * them out from under us. 2144 */ 2145 for (i = 0; i <= fdp->fd_lastfile; i++) { 2146 if (fdp->fd_files[i].fp != NULL && 2147 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) { 2148 struct file *fp; 2149 2150 if (i < fdp->fd_knlistsize) 2151 knote_fdclose(p, i); 2152 /* 2153 * NULL-out descriptor prior to close to avoid 2154 * a race while close blocks. 2155 */ 2156 if ((fp = funsetfd_locked(fdp, i)) != NULL) 2157 closef(fp, p); 2158 } 2159 } 2160 } 2161 2162 /* 2163 * It is unsafe for set[ug]id processes to be started with file 2164 * descriptors 0..2 closed, as these descriptors are given implicit 2165 * significance in the Standard C library. fdcheckstd() will create a 2166 * descriptor referencing /dev/null for each of stdin, stdout, and 2167 * stderr that is not already open. 2168 * 2169 * NOT MPSAFE - calls falloc, vn_open, etc 2170 */ 2171 int 2172 fdcheckstd(struct proc *p) 2173 { 2174 struct nlookupdata nd; 2175 struct filedesc *fdp; 2176 struct file *fp; 2177 int retval; 2178 int i, error, flags, devnull; 2179 2180 fdp = p->p_fd; 2181 if (fdp == NULL) 2182 return (0); 2183 devnull = -1; 2184 error = 0; 2185 for (i = 0; i < 3; i++) { 2186 if (fdp->fd_files[i].fp != NULL) 2187 continue; 2188 if (devnull < 0) { 2189 if ((error = falloc(p, &fp, &devnull)) != 0) 2190 break; 2191 2192 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE, 2193 NLC_FOLLOW|NLC_LOCKVP); 2194 flags = FREAD | FWRITE; 2195 if (error == 0) 2196 error = vn_open(&nd, fp, flags, 0); 2197 if (error == 0) 2198 fsetfd(p, fp, devnull); 2199 else 2200 fsetfd(p, NULL, devnull); 2201 fdrop(fp); 2202 nlookup_done(&nd); 2203 if (error) 2204 break; 2205 KKASSERT(i == devnull); 2206 } else { 2207 error = kern_dup(DUP_FIXED, devnull, i, &retval); 2208 if (error != 0) 2209 break; 2210 } 2211 } 2212 return (error); 2213 } 2214 2215 /* 2216 * Internal form of close. 2217 * Decrement reference count on file structure. 2218 * Note: td and/or p may be NULL when closing a file 2219 * that was being passed in a message. 2220 * 2221 * MPALMOSTSAFE - acquires mplock for VOP operations 2222 */ 2223 int 2224 closef(struct file *fp, struct proc *p) 2225 { 2226 struct vnode *vp; 2227 struct flock lf; 2228 struct filedesc_to_leader *fdtol; 2229 2230 if (fp == NULL) 2231 return (0); 2232 2233 /* 2234 * POSIX record locking dictates that any close releases ALL 2235 * locks owned by this process. This is handled by setting 2236 * a flag in the unlock to free ONLY locks obeying POSIX 2237 * semantics, and not to free BSD-style file locks. 2238 * If the descriptor was in a message, POSIX-style locks 2239 * aren't passed with the descriptor. 2240 */ 2241 if (p != NULL && fp->f_type == DTYPE_VNODE && 2242 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2243 ) { 2244 get_mplock(); 2245 if ((p->p_leader->p_flag & P_ADVLOCK) != 0) { 2246 lf.l_whence = SEEK_SET; 2247 lf.l_start = 0; 2248 lf.l_len = 0; 2249 lf.l_type = F_UNLCK; 2250 vp = (struct vnode *)fp->f_data; 2251 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 2252 &lf, F_POSIX); 2253 } 2254 fdtol = p->p_fdtol; 2255 if (fdtol != NULL) { 2256 /* 2257 * Handle special case where file descriptor table 2258 * is shared between multiple process leaders. 2259 */ 2260 for (fdtol = fdtol->fdl_next; 2261 fdtol != p->p_fdtol; 2262 fdtol = fdtol->fdl_next) { 2263 if ((fdtol->fdl_leader->p_flag & 2264 P_ADVLOCK) == 0) 2265 continue; 2266 fdtol->fdl_holdcount++; 2267 lf.l_whence = SEEK_SET; 2268 lf.l_start = 0; 2269 lf.l_len = 0; 2270 lf.l_type = F_UNLCK; 2271 vp = (struct vnode *)fp->f_data; 2272 (void) VOP_ADVLOCK(vp, 2273 (caddr_t)fdtol->fdl_leader, 2274 F_UNLCK, &lf, F_POSIX); 2275 fdtol->fdl_holdcount--; 2276 if (fdtol->fdl_holdcount == 0 && 2277 fdtol->fdl_wakeup != 0) { 2278 fdtol->fdl_wakeup = 0; 2279 wakeup(fdtol); 2280 } 2281 } 2282 } 2283 rel_mplock(); 2284 } 2285 return (fdrop(fp)); 2286 } 2287 2288 /* 2289 * MPSAFE 2290 * 2291 * fhold() can only be called if f_count is already at least 1 (i.e. the 2292 * caller of fhold() already has a reference to the file pointer in some 2293 * manner or other). 2294 * 2295 * f_count is not spin-locked. Instead, atomic ops are used for 2296 * incrementing, decrementing, and handling the 1->0 transition. 2297 */ 2298 void 2299 fhold(struct file *fp) 2300 { 2301 atomic_add_int(&fp->f_count, 1); 2302 } 2303 2304 /* 2305 * fdrop() - drop a reference to a descriptor 2306 * 2307 * MPALMOSTSAFE - acquires mplock for final close sequence 2308 */ 2309 int 2310 fdrop(struct file *fp) 2311 { 2312 struct flock lf; 2313 struct vnode *vp; 2314 int error; 2315 2316 /* 2317 * A combined fetch and subtract is needed to properly detect 2318 * 1->0 transitions, otherwise two cpus dropping from a ref 2319 * count of 2 might both try to run the 1->0 code. 2320 */ 2321 if (atomic_fetchadd_int(&fp->f_count, -1) > 1) 2322 return (0); 2323 2324 get_mplock(); 2325 2326 /* 2327 * The last reference has gone away, we own the fp structure free 2328 * and clear. 2329 */ 2330 if (fp->f_count < 0) 2331 panic("fdrop: count < 0"); 2332 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE && 2333 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2334 ) { 2335 lf.l_whence = SEEK_SET; 2336 lf.l_start = 0; 2337 lf.l_len = 0; 2338 lf.l_type = F_UNLCK; 2339 vp = (struct vnode *)fp->f_data; 2340 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 2341 } 2342 if (fp->f_ops != &badfileops) 2343 error = fo_close(fp); 2344 else 2345 error = 0; 2346 ffree(fp); 2347 rel_mplock(); 2348 return (error); 2349 } 2350 2351 /* 2352 * Apply an advisory lock on a file descriptor. 2353 * 2354 * Just attempt to get a record lock of the requested type on 2355 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 2356 */ 2357 int 2358 sys_flock(struct flock_args *uap) 2359 { 2360 struct proc *p = curproc; 2361 struct file *fp; 2362 struct vnode *vp; 2363 struct flock lf; 2364 int error; 2365 2366 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL) 2367 return (EBADF); 2368 if (fp->f_type != DTYPE_VNODE) { 2369 error = EOPNOTSUPP; 2370 goto done; 2371 } 2372 vp = (struct vnode *)fp->f_data; 2373 lf.l_whence = SEEK_SET; 2374 lf.l_start = 0; 2375 lf.l_len = 0; 2376 if (uap->how & LOCK_UN) { 2377 lf.l_type = F_UNLCK; 2378 fp->f_flag &= ~FHASLOCK; 2379 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 2380 goto done; 2381 } 2382 if (uap->how & LOCK_EX) 2383 lf.l_type = F_WRLCK; 2384 else if (uap->how & LOCK_SH) 2385 lf.l_type = F_RDLCK; 2386 else { 2387 error = EBADF; 2388 goto done; 2389 } 2390 fp->f_flag |= FHASLOCK; 2391 if (uap->how & LOCK_NB) 2392 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0); 2393 else 2394 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT); 2395 done: 2396 fdrop(fp); 2397 return (error); 2398 } 2399 2400 /* 2401 * File Descriptor pseudo-device driver (/dev/fd/). 2402 * 2403 * Opening minor device N dup()s the file (if any) connected to file 2404 * descriptor N belonging to the calling process. Note that this driver 2405 * consists of only the ``open()'' routine, because all subsequent 2406 * references to this file will be direct to the other driver. 2407 */ 2408 /* ARGSUSED */ 2409 static int 2410 fdopen(struct dev_open_args *ap) 2411 { 2412 thread_t td = curthread; 2413 2414 KKASSERT(td->td_lwp != NULL); 2415 2416 /* 2417 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the 2418 * the file descriptor being sought for duplication. The error 2419 * return ensures that the vnode for this device will be released 2420 * by vn_open. Open will detect this special error and take the 2421 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 2422 * will simply report the error. 2423 */ 2424 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev); 2425 return (ENODEV); 2426 } 2427 2428 /* 2429 * The caller has reserved the file descriptor dfd for us. On success we 2430 * must fsetfd() it. On failure the caller will clean it up. 2431 * 2432 * NOT MPSAFE - isn't getting spinlocks, possibly other things 2433 */ 2434 int 2435 dupfdopen(struct proc *p, int dfd, int sfd, int mode, int error) 2436 { 2437 struct filedesc *fdp = p->p_fd; 2438 struct file *wfp; 2439 struct file *xfp; 2440 int werror; 2441 2442 if ((wfp = holdfp(fdp, sfd, -1)) == NULL) 2443 return (EBADF); 2444 2445 /* 2446 * Close a revoke/dup race. Duping a descriptor marked as revoked 2447 * will dup a dummy descriptor instead of the real one. 2448 */ 2449 if (wfp->f_flag & FREVOKED) { 2450 kprintf("Warning: attempt to dup() a revoked descriptor\n"); 2451 fdrop(wfp); 2452 wfp = NULL; 2453 werror = falloc(NULL, &wfp, NULL); 2454 if (werror) 2455 return (werror); 2456 } 2457 2458 /* 2459 * There are two cases of interest here. 2460 * 2461 * For ENODEV simply dup sfd to file descriptor dfd and return. 2462 * 2463 * For ENXIO steal away the file structure from sfd and store it 2464 * dfd. sfd is effectively closed by this operation. 2465 * 2466 * Any other error code is just returned. 2467 */ 2468 switch (error) { 2469 case ENODEV: 2470 /* 2471 * Check that the mode the file is being opened for is a 2472 * subset of the mode of the existing descriptor. 2473 */ 2474 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 2475 error = EACCES; 2476 break; 2477 } 2478 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 2479 fsetfd(p, wfp, dfd); 2480 error = 0; 2481 break; 2482 case ENXIO: 2483 /* 2484 * Steal away the file pointer from dfd, and stuff it into indx. 2485 */ 2486 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 2487 fsetfd(p, wfp, dfd); 2488 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) 2489 fdrop(xfp); 2490 error = 0; 2491 break; 2492 default: 2493 break; 2494 } 2495 fdrop(wfp); 2496 return (error); 2497 } 2498 2499 /* 2500 * NOT MPSAFE - I think these refer to a common file descriptor table 2501 * and we need to spinlock that to link fdtol in. 2502 */ 2503 struct filedesc_to_leader * 2504 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 2505 struct proc *leader) 2506 { 2507 struct filedesc_to_leader *fdtol; 2508 2509 fdtol = kmalloc(sizeof(struct filedesc_to_leader), 2510 M_FILEDESC_TO_LEADER, M_WAITOK); 2511 fdtol->fdl_refcount = 1; 2512 fdtol->fdl_holdcount = 0; 2513 fdtol->fdl_wakeup = 0; 2514 fdtol->fdl_leader = leader; 2515 if (old != NULL) { 2516 fdtol->fdl_next = old->fdl_next; 2517 fdtol->fdl_prev = old; 2518 old->fdl_next = fdtol; 2519 fdtol->fdl_next->fdl_prev = fdtol; 2520 } else { 2521 fdtol->fdl_next = fdtol; 2522 fdtol->fdl_prev = fdtol; 2523 } 2524 return fdtol; 2525 } 2526 2527 /* 2528 * Scan all file pointers in the system. The callback is made with 2529 * the master list spinlock held exclusively. 2530 * 2531 * MPSAFE 2532 */ 2533 void 2534 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data) 2535 { 2536 struct file *fp; 2537 int res; 2538 2539 spin_lock_wr(&filehead_spin); 2540 LIST_FOREACH(fp, &filehead, f_list) { 2541 res = callback(fp, data); 2542 if (res < 0) 2543 break; 2544 } 2545 spin_unlock_wr(&filehead_spin); 2546 } 2547 2548 /* 2549 * Get file structures. 2550 * 2551 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe) 2552 */ 2553 2554 struct sysctl_kern_file_info { 2555 int count; 2556 int error; 2557 struct sysctl_req *req; 2558 }; 2559 2560 static int sysctl_kern_file_callback(struct proc *p, void *data); 2561 2562 static int 2563 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 2564 { 2565 struct sysctl_kern_file_info info; 2566 2567 /* 2568 * Note: because the number of file descriptors is calculated 2569 * in different ways for sizing vs returning the data, 2570 * there is information leakage from the first loop. However, 2571 * it is of a similar order of magnitude to the leakage from 2572 * global system statistics such as kern.openfiles. 2573 * 2574 * When just doing a count, note that we cannot just count 2575 * the elements and add f_count via the filehead list because 2576 * threaded processes share their descriptor table and f_count might 2577 * still be '1' in that case. 2578 * 2579 * Since the SYSCTL op can block, we must hold the process to 2580 * prevent it being ripped out from under us either in the 2581 * file descriptor loop or in the greater LIST_FOREACH. The 2582 * process may be in varying states of disrepair. If the process 2583 * is in SZOMB we may have caught it just as it is being removed 2584 * from the allproc list, we must skip it in that case to maintain 2585 * an unbroken chain through the allproc list. 2586 */ 2587 info.count = 0; 2588 info.error = 0; 2589 info.req = req; 2590 allproc_scan(sysctl_kern_file_callback, &info); 2591 2592 /* 2593 * When just calculating the size, overestimate a bit to try to 2594 * prevent system activity from causing the buffer-fill call 2595 * to fail later on. 2596 */ 2597 if (req->oldptr == NULL) { 2598 info.count = (info.count + 16) + (info.count / 10); 2599 info.error = SYSCTL_OUT(req, NULL, 2600 info.count * sizeof(struct kinfo_file)); 2601 } 2602 return (info.error); 2603 } 2604 2605 static int 2606 sysctl_kern_file_callback(struct proc *p, void *data) 2607 { 2608 struct sysctl_kern_file_info *info = data; 2609 struct kinfo_file kf; 2610 struct filedesc *fdp; 2611 struct file *fp; 2612 uid_t uid; 2613 int n; 2614 2615 if (p->p_stat == SIDL || p->p_stat == SZOMB) 2616 return(0); 2617 if (!PRISON_CHECK(info->req->td->td_proc->p_ucred, p->p_ucred) != 0) 2618 return(0); 2619 2620 /* 2621 * Softref the fdp to prevent it from being destroyed 2622 */ 2623 spin_lock_wr(&p->p_spin); 2624 if ((fdp = p->p_fd) == NULL) { 2625 spin_unlock_wr(&p->p_spin); 2626 return(0); 2627 } 2628 atomic_add_int(&fdp->fd_softrefs, 1); 2629 spin_unlock_wr(&p->p_spin); 2630 2631 /* 2632 * The fdp's own spinlock prevents the contents from being 2633 * modified. 2634 */ 2635 spin_lock_rd(&fdp->fd_spin); 2636 for (n = 0; n < fdp->fd_nfiles; ++n) { 2637 if ((fp = fdp->fd_files[n].fp) == NULL) 2638 continue; 2639 if (info->req->oldptr == NULL) { 2640 ++info->count; 2641 } else { 2642 uid = p->p_ucred ? p->p_ucred->cr_uid : -1; 2643 kcore_make_file(&kf, fp, p->p_pid, uid, n); 2644 spin_unlock_rd(&fdp->fd_spin); 2645 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf)); 2646 spin_lock_rd(&fdp->fd_spin); 2647 if (info->error) 2648 break; 2649 } 2650 } 2651 spin_unlock_rd(&fdp->fd_spin); 2652 atomic_subtract_int(&fdp->fd_softrefs, 1); 2653 if (info->error) 2654 return(-1); 2655 return(0); 2656 } 2657 2658 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 2659 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 2660 2661 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 2662 &maxfilesperproc, 0, "Maximum files allowed open per process"); 2663 2664 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 2665 &maxfiles, 0, "Maximum number of files"); 2666 2667 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 2668 &maxfilesrootres, 0, "Descriptors reserved for root use"); 2669 2670 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 2671 &nfiles, 0, "System-wide number of open files"); 2672 2673 static void 2674 fildesc_drvinit(void *unused) 2675 { 2676 int fd; 2677 2678 for (fd = 0; fd < NUMFDESC; fd++) { 2679 make_dev(&fildesc_ops, fd, 2680 UID_BIN, GID_BIN, 0666, "fd/%d", fd); 2681 } 2682 2683 kprintf("fildesc_drvinit() building stdin, stdout, stderr: \n"); 2684 2685 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin"); 2686 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout"); 2687 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr"); 2688 } 2689 2690 /* 2691 * MPSAFE 2692 */ 2693 struct fileops badfileops = { 2694 .fo_read = badfo_readwrite, 2695 .fo_write = badfo_readwrite, 2696 .fo_ioctl = badfo_ioctl, 2697 .fo_poll = badfo_poll, 2698 .fo_kqfilter = badfo_kqfilter, 2699 .fo_stat = badfo_stat, 2700 .fo_close = badfo_close, 2701 .fo_shutdown = badfo_shutdown 2702 }; 2703 2704 /* 2705 * MPSAFE 2706 */ 2707 static int 2708 badfo_readwrite( 2709 struct file *fp, 2710 struct uio *uio, 2711 struct ucred *cred, 2712 int flags 2713 ) { 2714 return (EBADF); 2715 } 2716 2717 /* 2718 * MPSAFE 2719 */ 2720 static int 2721 badfo_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *cred) 2722 { 2723 return (EBADF); 2724 } 2725 2726 /* 2727 * MPSAFE 2728 */ 2729 static int 2730 badfo_poll(struct file *fp, int events, struct ucred *cred) 2731 { 2732 return (0); 2733 } 2734 2735 /* 2736 * MPSAFE 2737 */ 2738 static int 2739 badfo_kqfilter(struct file *fp, struct knote *kn) 2740 { 2741 return (0); 2742 } 2743 2744 static int 2745 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 2746 { 2747 return (EBADF); 2748 } 2749 2750 /* 2751 * MPSAFE 2752 */ 2753 static int 2754 badfo_close(struct file *fp) 2755 { 2756 return (EBADF); 2757 } 2758 2759 /* 2760 * MPSAFE 2761 */ 2762 static int 2763 badfo_shutdown(struct file *fp, int how) 2764 { 2765 return (EBADF); 2766 } 2767 2768 /* 2769 * MPSAFE 2770 */ 2771 int 2772 nofo_shutdown(struct file *fp, int how) 2773 { 2774 return (EOPNOTSUPP); 2775 } 2776 2777 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR, 2778 fildesc_drvinit,NULL) 2779