1 /* $NetBSD: vfs_vnops.c,v 1.237 2023/03/13 18:13:18 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * (c) UNIX System Laboratories, Inc. 36 * All or some portions of this file are derived from material licensed 37 * to the University of California by American Telephone and Telegraph 38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 39 * the permission of UNIX System Laboratories, Inc. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.237 2023/03/13 18:13:18 riastradh Exp $"); 70 71 #include "veriexec.h" 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/file.h> 77 #include <sys/stat.h> 78 #include <sys/buf.h> 79 #include <sys/proc.h> 80 #include <sys/mount.h> 81 #include <sys/namei.h> 82 #include <sys/vnode_impl.h> 83 #include <sys/ioctl.h> 84 #include <sys/tty.h> 85 #include <sys/poll.h> 86 #include <sys/kauth.h> 87 #include <sys/syslog.h> 88 #include <sys/fstrans.h> 89 #include <sys/atomic.h> 90 #include <sys/filedesc.h> 91 #include <sys/wapbl.h> 92 #include <sys/mman.h> 93 94 #include <miscfs/specfs/specdev.h> 95 #include <miscfs/fifofs/fifo.h> 96 97 #include <uvm/uvm_extern.h> 98 #include <uvm/uvm_readahead.h> 99 #include <uvm/uvm_device.h> 100 101 #ifdef UNION 102 #include <fs/union/union.h> 103 #endif 104 105 #ifndef COMPAT_ZERODEV 106 #define COMPAT_ZERODEV(dev) (0) 107 #endif 108 109 int (*vn_union_readdir_hook)(struct vnode **, struct file *, struct lwp *); 110 111 #include <sys/verified_exec.h> 112 113 static int vn_read(file_t *fp, off_t *offset, struct uio *uio, 114 kauth_cred_t cred, int flags); 115 static int vn_write(file_t *fp, off_t *offset, struct uio *uio, 116 kauth_cred_t cred, int flags); 117 static int vn_closefile(file_t *fp); 118 static int vn_poll(file_t *fp, int events); 119 static int vn_fcntl(file_t *fp, u_int com, void *data); 120 static int vn_statfile(file_t *fp, struct stat *sb); 121 static int vn_ioctl(file_t *fp, u_long com, void *data); 122 static int vn_mmap(struct file *, off_t *, size_t, int, int *, int *, 123 struct uvm_object **, int *); 124 static int vn_seek(struct file *, off_t, int, off_t *, int); 125 126 const struct fileops vnops = { 127 .fo_name = "vn", 128 .fo_read = vn_read, 129 .fo_write = vn_write, 130 .fo_ioctl = vn_ioctl, 131 .fo_fcntl = vn_fcntl, 132 .fo_poll = vn_poll, 133 .fo_stat = vn_statfile, 134 .fo_close = vn_closefile, 135 .fo_kqfilter = vn_kqfilter, 136 .fo_restart = fnullop_restart, 137 .fo_mmap = vn_mmap, 138 .fo_seek = vn_seek, 139 }; 140 141 /* 142 * Common code for vnode open operations. 143 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 144 * 145 * at_dvp is the directory for openat(), if any. 146 * pb is the path. 147 * nmode is additional namei flags, restricted to TRYEMULROOT and NOCHROOT. 148 * fmode is the open flags, converted from O_* to F* 149 * cmode is the creation file permissions. 150 * 151 * XXX shouldn't cmode be mode_t? 152 * 153 * On success produces either a locked vnode in *ret_vp, or NULL in 154 * *ret_vp and a file descriptor number in *ret_fd. 155 * 156 * The caller may pass NULL for ret_fd (and ret_domove), in which case 157 * EOPNOTSUPP will be produced in the cases that would otherwise return 158 * a file descriptor. 159 * 160 * Note that callers that want no-follow behavior should pass 161 * O_NOFOLLOW in fmode. Neither FOLLOW nor NOFOLLOW in nmode is 162 * honored. 163 */ 164 int 165 vn_open(struct vnode *at_dvp, struct pathbuf *pb, 166 int nmode, int fmode, int cmode, 167 struct vnode **ret_vp, bool *ret_domove, int *ret_fd) 168 { 169 struct nameidata nd; 170 struct vnode *vp = NULL; 171 struct lwp *l = curlwp; 172 kauth_cred_t cred = l->l_cred; 173 struct vattr va; 174 int error; 175 const char *pathstring; 176 177 KASSERT((nmode & (TRYEMULROOT | NOCHROOT)) == nmode); 178 179 KASSERT(ret_vp != NULL); 180 KASSERT((ret_domove == NULL) == (ret_fd == NULL)); 181 182 if ((fmode & (O_CREAT | O_DIRECTORY)) == (O_CREAT | O_DIRECTORY)) 183 return EINVAL; 184 185 NDINIT(&nd, LOOKUP, nmode, pb); 186 if (at_dvp != NULL) 187 NDAT(&nd, at_dvp); 188 189 nd.ni_cnd.cn_flags &= TRYEMULROOT | NOCHROOT; 190 191 if (fmode & O_CREAT) { 192 nd.ni_cnd.cn_nameiop = CREATE; 193 nd.ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF; 194 if ((fmode & O_EXCL) == 0 && 195 ((fmode & O_NOFOLLOW) == 0)) 196 nd.ni_cnd.cn_flags |= FOLLOW; 197 if ((fmode & O_EXCL) == 0) 198 nd.ni_cnd.cn_flags |= NONEXCLHACK; 199 } else { 200 nd.ni_cnd.cn_nameiop = LOOKUP; 201 nd.ni_cnd.cn_flags |= LOCKLEAF; 202 if ((fmode & O_NOFOLLOW) == 0) 203 nd.ni_cnd.cn_flags |= FOLLOW; 204 } 205 206 pathstring = pathbuf_stringcopy_get(nd.ni_pathbuf); 207 if (pathstring == NULL) { 208 return ENOMEM; 209 } 210 211 /* 212 * When this "interface" was exposed to do_open() it used 213 * to initialize l_dupfd to -newfd-1 (thus passing in the 214 * new file handle number to use)... but nothing in the 215 * kernel uses that value. So just send 0. 216 */ 217 l->l_dupfd = 0; 218 219 error = namei(&nd); 220 if (error) 221 goto out; 222 223 vp = nd.ni_vp; 224 225 #if NVERIEXEC > 0 226 error = veriexec_openchk(l, nd.ni_vp, pathstring, fmode); 227 if (error) { 228 /* We have to release the locks ourselves */ 229 /* 230 * 20210604 dholland passing NONEXCLHACK means we can 231 * get ni_dvp == NULL back if ni_vp exists, and we should 232 * treat that like the non-O_CREAT case. 233 */ 234 if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) { 235 if (vp == NULL) { 236 vput(nd.ni_dvp); 237 } else { 238 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 239 if (nd.ni_dvp == nd.ni_vp) 240 vrele(nd.ni_dvp); 241 else 242 vput(nd.ni_dvp); 243 nd.ni_dvp = NULL; 244 vput(vp); 245 vp = NULL; 246 } 247 } else { 248 vput(vp); 249 vp = NULL; 250 } 251 goto out; 252 } 253 #endif /* NVERIEXEC > 0 */ 254 255 /* 256 * 20210604 dholland ditto 257 */ 258 if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) { 259 if (nd.ni_vp == NULL) { 260 vattr_null(&va); 261 va.va_type = VREG; 262 va.va_mode = cmode; 263 if (fmode & O_EXCL) 264 va.va_vaflags |= VA_EXCLUSIVE; 265 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, 266 &nd.ni_cnd, &va); 267 if (error) { 268 vput(nd.ni_dvp); 269 goto out; 270 } 271 fmode &= ~O_TRUNC; 272 vp = nd.ni_vp; 273 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 274 vput(nd.ni_dvp); 275 } else { 276 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 277 if (nd.ni_dvp == nd.ni_vp) 278 vrele(nd.ni_dvp); 279 else 280 vput(nd.ni_dvp); 281 nd.ni_dvp = NULL; 282 vp = nd.ni_vp; 283 if (fmode & O_EXCL) { 284 error = EEXIST; 285 goto bad; 286 } 287 fmode &= ~O_CREAT; 288 } 289 } else if ((fmode & O_CREAT) != 0) { 290 /* 291 * 20210606 dholland passing NONEXCLHACK means this 292 * case exists; it is the same as the following one 293 * but also needs to do things in the second (exists) 294 * half of the following block. (Besides handle 295 * ni_dvp, anyway.) 296 */ 297 vp = nd.ni_vp; 298 KASSERT((fmode & O_EXCL) == 0); 299 fmode &= ~O_CREAT; 300 } else { 301 vp = nd.ni_vp; 302 } 303 if (vp->v_type == VSOCK) { 304 error = EOPNOTSUPP; 305 goto bad; 306 } 307 if (nd.ni_vp->v_type == VLNK) { 308 error = EFTYPE; 309 goto bad; 310 } 311 312 if ((fmode & O_CREAT) == 0) { 313 error = vn_openchk(vp, cred, fmode); 314 if (error != 0) 315 goto bad; 316 } 317 318 if (fmode & O_TRUNC) { 319 vattr_null(&va); 320 va.va_size = 0; 321 error = VOP_SETATTR(vp, &va, cred); 322 if (error != 0) 323 goto bad; 324 } 325 if ((error = VOP_OPEN(vp, fmode, cred)) != 0) 326 goto bad; 327 if (fmode & FWRITE) { 328 mutex_enter(vp->v_interlock); 329 vp->v_writecount++; 330 mutex_exit(vp->v_interlock); 331 } 332 333 bad: 334 if (error) { 335 vput(vp); 336 vp = NULL; 337 } 338 out: 339 pathbuf_stringcopy_put(nd.ni_pathbuf, pathstring); 340 341 switch (error) { 342 case EDUPFD: 343 case EMOVEFD: 344 /* if the caller isn't prepared to handle fds, fail for them */ 345 if (ret_fd == NULL) { 346 error = EOPNOTSUPP; 347 break; 348 } 349 *ret_vp = NULL; 350 *ret_domove = error == EMOVEFD; 351 *ret_fd = l->l_dupfd; 352 error = 0; 353 break; 354 case 0: 355 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 356 *ret_vp = vp; 357 break; 358 } 359 l->l_dupfd = 0; 360 return error; 361 } 362 363 /* 364 * Check for write permissions on the specified vnode. 365 * Prototype text segments cannot be written. 366 */ 367 int 368 vn_writechk(struct vnode *vp) 369 { 370 371 /* 372 * If the vnode is in use as a process's text, 373 * we can't allow writing. 374 */ 375 if (vp->v_iflag & VI_TEXT) 376 return ETXTBSY; 377 return 0; 378 } 379 380 int 381 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags) 382 { 383 int permbits = 0; 384 int error; 385 386 if (vp->v_type == VNON || vp->v_type == VBAD) 387 return ENXIO; 388 389 if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR) 390 return ENOTDIR; 391 392 if ((fflags & O_REGULAR) != 0 && vp->v_type != VREG) 393 return EFTYPE; 394 395 if ((fflags & FREAD) != 0) { 396 permbits = VREAD; 397 } 398 if ((fflags & FEXEC) != 0) { 399 permbits |= VEXEC; 400 } 401 if ((fflags & (FWRITE | O_TRUNC)) != 0) { 402 permbits |= VWRITE; 403 if (vp->v_type == VDIR) { 404 error = EISDIR; 405 goto bad; 406 } 407 error = vn_writechk(vp); 408 if (error != 0) 409 goto bad; 410 } 411 error = VOP_ACCESS(vp, permbits, cred); 412 bad: 413 return error; 414 } 415 416 /* 417 * Mark a vnode as having executable mappings. 418 */ 419 void 420 vn_markexec(struct vnode *vp) 421 { 422 423 if ((vp->v_iflag & VI_EXECMAP) != 0) { 424 /* Safe unlocked, as long as caller holds a reference. */ 425 return; 426 } 427 428 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 429 mutex_enter(vp->v_interlock); 430 if ((vp->v_iflag & VI_EXECMAP) == 0) { 431 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages); 432 vp->v_iflag |= VI_EXECMAP; 433 } 434 mutex_exit(vp->v_interlock); 435 rw_exit(vp->v_uobj.vmobjlock); 436 } 437 438 /* 439 * Mark a vnode as being the text of a process. 440 * Fail if the vnode is currently writable. 441 */ 442 int 443 vn_marktext(struct vnode *vp) 444 { 445 446 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) { 447 /* Safe unlocked, as long as caller holds a reference. */ 448 return 0; 449 } 450 451 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 452 mutex_enter(vp->v_interlock); 453 if (vp->v_writecount != 0) { 454 KASSERT((vp->v_iflag & VI_TEXT) == 0); 455 mutex_exit(vp->v_interlock); 456 rw_exit(vp->v_uobj.vmobjlock); 457 return ETXTBSY; 458 } 459 if ((vp->v_iflag & VI_EXECMAP) == 0) { 460 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages); 461 } 462 vp->v_iflag |= (VI_TEXT | VI_EXECMAP); 463 mutex_exit(vp->v_interlock); 464 rw_exit(vp->v_uobj.vmobjlock); 465 return 0; 466 } 467 468 /* 469 * Vnode close call 470 * 471 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node. 472 */ 473 int 474 vn_close(struct vnode *vp, int flags, kauth_cred_t cred) 475 { 476 int error; 477 478 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 479 if (flags & FWRITE) { 480 mutex_enter(vp->v_interlock); 481 KASSERT(vp->v_writecount > 0); 482 vp->v_writecount--; 483 mutex_exit(vp->v_interlock); 484 } 485 error = VOP_CLOSE(vp, flags, cred); 486 vput(vp); 487 return error; 488 } 489 490 static int 491 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag) 492 { 493 struct lwp *l = curlwp; 494 off_t testoff; 495 496 if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG) 497 return 0; 498 499 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 500 if (ioflag & IO_APPEND) 501 testoff = vp->v_size; 502 else 503 testoff = uio->uio_offset; 504 505 if (testoff + uio->uio_resid > 506 l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 507 mutex_enter(&proc_lock); 508 psignal(l->l_proc, SIGXFSZ); 509 mutex_exit(&proc_lock); 510 return EFBIG; 511 } 512 513 return 0; 514 } 515 516 /* 517 * Package up an I/O request on a vnode into a uio and do it. 518 */ 519 int 520 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 521 enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid, 522 struct lwp *l) 523 { 524 struct uio auio; 525 struct iovec aiov; 526 int error; 527 528 if ((ioflg & IO_NODELOCKED) == 0) { 529 if (rw == UIO_READ) { 530 vn_lock(vp, LK_SHARED | LK_RETRY); 531 } else /* UIO_WRITE */ { 532 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 533 } 534 } 535 auio.uio_iov = &aiov; 536 auio.uio_iovcnt = 1; 537 aiov.iov_base = base; 538 aiov.iov_len = len; 539 auio.uio_resid = len; 540 auio.uio_offset = offset; 541 auio.uio_rw = rw; 542 if (segflg == UIO_SYSSPACE) { 543 UIO_SETUP_SYSSPACE(&auio); 544 } else { 545 auio.uio_vmspace = l->l_proc->p_vmspace; 546 } 547 548 if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0) 549 goto out; 550 551 if (rw == UIO_READ) { 552 error = VOP_READ(vp, &auio, ioflg, cred); 553 } else { 554 error = VOP_WRITE(vp, &auio, ioflg, cred); 555 } 556 557 if (aresid) 558 *aresid = auio.uio_resid; 559 else 560 if (auio.uio_resid && error == 0) 561 error = EIO; 562 563 out: 564 if ((ioflg & IO_NODELOCKED) == 0) { 565 VOP_UNLOCK(vp); 566 } 567 return error; 568 } 569 570 int 571 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done, 572 struct lwp *l, off_t **cookies, int *ncookies) 573 { 574 struct vnode *vp = fp->f_vnode; 575 struct iovec aiov; 576 struct uio auio; 577 int error, eofflag; 578 579 /* Limit the size on any kernel buffers used by VOP_READDIR */ 580 count = uimin(MAXBSIZE, count); 581 582 unionread: 583 if (vp->v_type != VDIR) 584 return EINVAL; 585 aiov.iov_base = bf; 586 aiov.iov_len = count; 587 auio.uio_iov = &aiov; 588 auio.uio_iovcnt = 1; 589 auio.uio_rw = UIO_READ; 590 if (segflg == UIO_SYSSPACE) { 591 UIO_SETUP_SYSSPACE(&auio); 592 } else { 593 KASSERT(l == curlwp); 594 auio.uio_vmspace = l->l_proc->p_vmspace; 595 } 596 auio.uio_resid = count; 597 vn_lock(vp, LK_SHARED | LK_RETRY); 598 auio.uio_offset = fp->f_offset; 599 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies, 600 ncookies); 601 mutex_enter(&fp->f_lock); 602 fp->f_offset = auio.uio_offset; 603 mutex_exit(&fp->f_lock); 604 VOP_UNLOCK(vp); 605 if (error) 606 return error; 607 608 if (count == auio.uio_resid && vn_union_readdir_hook) { 609 struct vnode *ovp = vp; 610 611 error = (*vn_union_readdir_hook)(&vp, fp, l); 612 if (error) 613 return error; 614 if (vp != ovp) 615 goto unionread; 616 } 617 618 if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) && 619 (vp->v_mount->mnt_flag & MNT_UNION)) { 620 struct vnode *tvp = vp; 621 vp = vp->v_mount->mnt_vnodecovered; 622 vref(vp); 623 mutex_enter(&fp->f_lock); 624 fp->f_vnode = vp; 625 fp->f_offset = 0; 626 mutex_exit(&fp->f_lock); 627 vrele(tvp); 628 goto unionread; 629 } 630 *done = count - auio.uio_resid; 631 return error; 632 } 633 634 /* 635 * File table vnode read routine. 636 */ 637 static int 638 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred, 639 int flags) 640 { 641 struct vnode *vp = fp->f_vnode; 642 int error, ioflag, fflag; 643 size_t count; 644 645 ioflag = IO_ADV_ENCODE(fp->f_advice); 646 fflag = fp->f_flag; 647 if (fflag & FNONBLOCK) 648 ioflag |= IO_NDELAY; 649 if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC)) 650 ioflag |= IO_SYNC; 651 if (fflag & FALTIO) 652 ioflag |= IO_ALTSEMANTICS; 653 if (fflag & FDIRECT) 654 ioflag |= IO_DIRECT; 655 if (offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) != 0) 656 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 657 else 658 vn_lock(vp, LK_SHARED | LK_RETRY); 659 uio->uio_offset = *offset; 660 count = uio->uio_resid; 661 error = VOP_READ(vp, uio, ioflag, cred); 662 if (flags & FOF_UPDATE_OFFSET) 663 *offset += count - uio->uio_resid; 664 VOP_UNLOCK(vp); 665 return error; 666 } 667 668 /* 669 * File table vnode write routine. 670 */ 671 static int 672 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred, 673 int flags) 674 { 675 struct vnode *vp = fp->f_vnode; 676 int error, ioflag, fflag; 677 size_t count; 678 679 ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT; 680 fflag = fp->f_flag; 681 if (vp->v_type == VREG && (fflag & O_APPEND)) 682 ioflag |= IO_APPEND; 683 if (fflag & FNONBLOCK) 684 ioflag |= IO_NDELAY; 685 if (fflag & FFSYNC || 686 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 687 ioflag |= IO_SYNC; 688 else if (fflag & FDSYNC) 689 ioflag |= IO_DSYNC; 690 if (fflag & FALTIO) 691 ioflag |= IO_ALTSEMANTICS; 692 if (fflag & FDIRECT) 693 ioflag |= IO_DIRECT; 694 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 695 uio->uio_offset = *offset; 696 count = uio->uio_resid; 697 698 if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0) 699 goto out; 700 701 error = VOP_WRITE(vp, uio, ioflag, cred); 702 703 if (flags & FOF_UPDATE_OFFSET) { 704 if (ioflag & IO_APPEND) { 705 /* 706 * SUSv3 describes behaviour for count = 0 as following: 707 * "Before any action ... is taken, and if nbyte is zero 708 * and the file is a regular file, the write() function 709 * ... in the absence of errors ... shall return zero 710 * and have no other results." 711 */ 712 if (count) 713 *offset = uio->uio_offset; 714 } else 715 *offset += count - uio->uio_resid; 716 } 717 718 out: 719 VOP_UNLOCK(vp); 720 return error; 721 } 722 723 /* 724 * File table vnode stat routine. 725 */ 726 static int 727 vn_statfile(file_t *fp, struct stat *sb) 728 { 729 struct vnode *vp = fp->f_vnode; 730 int error; 731 732 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 733 error = vn_stat(vp, sb); 734 VOP_UNLOCK(vp); 735 return error; 736 } 737 738 int 739 vn_stat(struct vnode *vp, struct stat *sb) 740 { 741 struct vattr va; 742 int error; 743 mode_t mode; 744 745 memset(&va, 0, sizeof(va)); 746 error = VOP_GETATTR(vp, &va, kauth_cred_get()); 747 if (error) 748 return error; 749 /* 750 * Copy from vattr table 751 */ 752 memset(sb, 0, sizeof(*sb)); 753 sb->st_dev = va.va_fsid; 754 sb->st_ino = va.va_fileid; 755 mode = va.va_mode; 756 switch (vp->v_type) { 757 case VREG: 758 mode |= S_IFREG; 759 break; 760 case VDIR: 761 mode |= S_IFDIR; 762 break; 763 case VBLK: 764 mode |= S_IFBLK; 765 break; 766 case VCHR: 767 mode |= S_IFCHR; 768 break; 769 case VLNK: 770 mode |= S_IFLNK; 771 break; 772 case VSOCK: 773 mode |= S_IFSOCK; 774 break; 775 case VFIFO: 776 mode |= S_IFIFO; 777 break; 778 default: 779 return EBADF; 780 } 781 sb->st_mode = mode; 782 sb->st_nlink = va.va_nlink; 783 sb->st_uid = va.va_uid; 784 sb->st_gid = va.va_gid; 785 sb->st_rdev = va.va_rdev; 786 sb->st_size = va.va_size; 787 sb->st_atimespec = va.va_atime; 788 sb->st_mtimespec = va.va_mtime; 789 sb->st_ctimespec = va.va_ctime; 790 sb->st_birthtimespec = va.va_birthtime; 791 sb->st_blksize = va.va_blocksize; 792 sb->st_flags = va.va_flags; 793 sb->st_gen = 0; 794 sb->st_blocks = va.va_bytes / S_BLKSIZE; 795 return 0; 796 } 797 798 /* 799 * File table vnode fcntl routine. 800 */ 801 static int 802 vn_fcntl(file_t *fp, u_int com, void *data) 803 { 804 struct vnode *vp = fp->f_vnode; 805 int error; 806 807 error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get()); 808 return error; 809 } 810 811 /* 812 * File table vnode ioctl routine. 813 */ 814 static int 815 vn_ioctl(file_t *fp, u_long com, void *data) 816 { 817 struct vnode *vp = fp->f_vnode, *ovp; 818 struct vattr vattr; 819 int error; 820 821 switch (vp->v_type) { 822 823 case VREG: 824 case VDIR: 825 if (com == FIONREAD) { 826 vn_lock(vp, LK_SHARED | LK_RETRY); 827 error = VOP_GETATTR(vp, &vattr, kauth_cred_get()); 828 if (error == 0) 829 *(int *)data = vattr.va_size - fp->f_offset; 830 VOP_UNLOCK(vp); 831 if (error) 832 return error; 833 return 0; 834 } 835 if ((com == FIONWRITE) || (com == FIONSPACE)) { 836 /* 837 * Files don't have send queues, so there never 838 * are any bytes in them, nor is there any 839 * open space in them. 840 */ 841 *(int *)data = 0; 842 return 0; 843 } 844 if (com == FIOGETBMAP) { 845 daddr_t *block; 846 847 if (*(daddr_t *)data < 0) 848 return EINVAL; 849 block = (daddr_t *)data; 850 vn_lock(vp, LK_SHARED | LK_RETRY); 851 error = VOP_BMAP(vp, *block, NULL, block, NULL); 852 VOP_UNLOCK(vp); 853 return error; 854 } 855 if (com == OFIOGETBMAP) { 856 daddr_t ibn, obn; 857 858 if (*(int32_t *)data < 0) 859 return EINVAL; 860 ibn = (daddr_t)*(int32_t *)data; 861 vn_lock(vp, LK_SHARED | LK_RETRY); 862 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL); 863 VOP_UNLOCK(vp); 864 *(int32_t *)data = (int32_t)obn; 865 return error; 866 } 867 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 868 return 0; /* XXX */ 869 /* FALLTHROUGH */ 870 case VFIFO: 871 case VCHR: 872 case VBLK: 873 error = VOP_IOCTL(vp, com, data, fp->f_flag, 874 kauth_cred_get()); 875 if (error == 0 && com == TIOCSCTTY) { 876 vref(vp); 877 mutex_enter(&proc_lock); 878 ovp = curproc->p_session->s_ttyvp; 879 curproc->p_session->s_ttyvp = vp; 880 mutex_exit(&proc_lock); 881 if (ovp != NULL) 882 vrele(ovp); 883 } 884 return error; 885 886 default: 887 return EPASSTHROUGH; 888 } 889 } 890 891 /* 892 * File table vnode poll routine. 893 */ 894 static int 895 vn_poll(file_t *fp, int events) 896 { 897 898 return VOP_POLL(fp->f_vnode, events); 899 } 900 901 /* 902 * File table vnode kqfilter routine. 903 */ 904 int 905 vn_kqfilter(file_t *fp, struct knote *kn) 906 { 907 908 return VOP_KQFILTER(fp->f_vnode, kn); 909 } 910 911 static int 912 vn_mmap(struct file *fp, off_t *offp, size_t size, int prot, int *flagsp, 913 int *advicep, struct uvm_object **uobjp, int *maxprotp) 914 { 915 struct uvm_object *uobj; 916 struct vnode *vp; 917 struct vattr va; 918 struct lwp *l; 919 vm_prot_t maxprot; 920 off_t off; 921 int error, flags; 922 bool needwritemap; 923 924 l = curlwp; 925 926 off = *offp; 927 flags = *flagsp; 928 maxprot = VM_PROT_EXECUTE; 929 930 KASSERT(size > 0); 931 932 vp = fp->f_vnode; 933 if (vp->v_type != VREG && vp->v_type != VCHR && 934 vp->v_type != VBLK) { 935 /* only REG/CHR/BLK support mmap */ 936 return ENODEV; 937 } 938 if (vp->v_type != VCHR && off < 0) { 939 return EINVAL; 940 } 941 #if SIZE_MAX > UINT32_MAX /* XXX -Wtype-limits */ 942 if (vp->v_type != VCHR && size > __type_max(off_t)) { 943 return EOVERFLOW; 944 } 945 #endif 946 if (vp->v_type != VCHR && off > __type_max(off_t) - size) { 947 /* no offset wrapping */ 948 return EOVERFLOW; 949 } 950 951 /* special case: catch SunOS style /dev/zero */ 952 if (vp->v_type == VCHR && 953 (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) { 954 *uobjp = NULL; 955 *maxprotp = VM_PROT_ALL; 956 return 0; 957 } 958 959 /* 960 * Old programs may not select a specific sharing type, so 961 * default to an appropriate one. 962 * 963 * XXX: how does MAP_ANON fit in the picture? 964 */ 965 if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) { 966 #if defined(DEBUG) 967 struct proc *p = l->l_proc; 968 printf("WARNING: defaulted mmap() share type to " 969 "%s (pid %d command %s)\n", vp->v_type == VCHR ? 970 "MAP_SHARED" : "MAP_PRIVATE", p->p_pid, 971 p->p_comm); 972 #endif 973 if (vp->v_type == VCHR) 974 flags |= MAP_SHARED; /* for a device */ 975 else 976 flags |= MAP_PRIVATE; /* for a file */ 977 } 978 979 /* 980 * MAP_PRIVATE device mappings don't make sense (and aren't 981 * supported anyway). However, some programs rely on this, 982 * so just change it to MAP_SHARED. 983 */ 984 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) { 985 flags = (flags & ~MAP_PRIVATE) | MAP_SHARED; 986 } 987 988 /* 989 * now check protection 990 */ 991 992 /* check read access */ 993 if (fp->f_flag & FREAD) 994 maxprot |= VM_PROT_READ; 995 else if (prot & PROT_READ) { 996 return EACCES; 997 } 998 999 /* check write access, shared case first */ 1000 if (flags & MAP_SHARED) { 1001 /* 1002 * if the file is writable, only add PROT_WRITE to 1003 * maxprot if the file is not immutable, append-only. 1004 * otherwise, if we have asked for PROT_WRITE, return 1005 * EPERM. 1006 */ 1007 if (fp->f_flag & FWRITE) { 1008 vn_lock(vp, LK_SHARED | LK_RETRY); 1009 error = VOP_GETATTR(vp, &va, l->l_cred); 1010 VOP_UNLOCK(vp); 1011 if (error) { 1012 return error; 1013 } 1014 if ((va.va_flags & 1015 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) 1016 maxprot |= VM_PROT_WRITE; 1017 else if (prot & PROT_WRITE) { 1018 return EPERM; 1019 } 1020 } else if (prot & PROT_WRITE) { 1021 return EACCES; 1022 } 1023 } else { 1024 /* MAP_PRIVATE mappings can always write to */ 1025 maxprot |= VM_PROT_WRITE; 1026 } 1027 1028 /* 1029 * Don't allow mmap for EXEC if the file system 1030 * is mounted NOEXEC. 1031 */ 1032 if ((prot & PROT_EXEC) != 0 && 1033 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) { 1034 return EACCES; 1035 } 1036 1037 if (vp->v_type != VCHR) { 1038 error = VOP_MMAP(vp, prot, curlwp->l_cred); 1039 if (error) { 1040 return error; 1041 } 1042 vref(vp); 1043 uobj = &vp->v_uobj; 1044 1045 /* 1046 * If the vnode is being mapped with PROT_EXEC, 1047 * then mark it as text. 1048 */ 1049 if (prot & PROT_EXEC) { 1050 vn_markexec(vp); 1051 } 1052 } else { 1053 int i = maxprot; 1054 1055 /* 1056 * XXX Some devices don't like to be mapped with 1057 * XXX PROT_EXEC or PROT_WRITE, but we don't really 1058 * XXX have a better way of handling this, right now 1059 */ 1060 do { 1061 uobj = udv_attach(vp->v_rdev, 1062 (flags & MAP_SHARED) ? i : 1063 (i & ~VM_PROT_WRITE), off, size); 1064 i--; 1065 } while ((uobj == NULL) && (i > 0)); 1066 if (uobj == NULL) { 1067 return EINVAL; 1068 } 1069 *advicep = UVM_ADV_RANDOM; 1070 } 1071 1072 /* 1073 * Set vnode flags to indicate the new kinds of mapping. 1074 * We take the vnode lock in exclusive mode here to serialize 1075 * with direct I/O. 1076 * 1077 * Safe to check for these flag values without a lock, as 1078 * long as a reference to the vnode is held. 1079 */ 1080 needwritemap = (vp->v_iflag & VI_WRMAP) == 0 && 1081 (flags & MAP_SHARED) != 0 && 1082 (maxprot & VM_PROT_WRITE) != 0; 1083 if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) { 1084 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1085 vp->v_vflag |= VV_MAPPED; 1086 if (needwritemap) { 1087 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 1088 mutex_enter(vp->v_interlock); 1089 vp->v_iflag |= VI_WRMAP; 1090 mutex_exit(vp->v_interlock); 1091 rw_exit(vp->v_uobj.vmobjlock); 1092 } 1093 VOP_UNLOCK(vp); 1094 } 1095 1096 #if NVERIEXEC > 0 1097 1098 /* 1099 * Check if the file can be executed indirectly. 1100 * 1101 * XXX: This gives false warnings about "Incorrect access type" 1102 * XXX: if the mapping is not executable. Harmless, but will be 1103 * XXX: fixed as part of other changes. 1104 */ 1105 if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT, 1106 NULL)) { 1107 1108 /* 1109 * Don't allow executable mappings if we can't 1110 * indirectly execute the file. 1111 */ 1112 if (prot & VM_PROT_EXECUTE) { 1113 return EPERM; 1114 } 1115 1116 /* 1117 * Strip the executable bit from 'maxprot' to make sure 1118 * it can't be made executable later. 1119 */ 1120 maxprot &= ~VM_PROT_EXECUTE; 1121 } 1122 #endif /* NVERIEXEC > 0 */ 1123 1124 *uobjp = uobj; 1125 *maxprotp = maxprot; 1126 *flagsp = flags; 1127 1128 return 0; 1129 } 1130 1131 static int 1132 vn_seek(struct file *fp, off_t delta, int whence, off_t *newoffp, 1133 int flags) 1134 { 1135 const off_t OFF_MIN = __type_min(off_t); 1136 const off_t OFF_MAX = __type_max(off_t); 1137 kauth_cred_t cred = fp->f_cred; 1138 off_t oldoff, newoff; 1139 struct vnode *vp = fp->f_vnode; 1140 struct vattr vattr; 1141 int error; 1142 1143 if (vp->v_type == VFIFO) 1144 return ESPIPE; 1145 1146 if (flags & FOF_UPDATE_OFFSET) 1147 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1148 else 1149 vn_lock(vp, LK_SHARED | LK_RETRY); 1150 1151 /* Compute the old and new offsets. */ 1152 oldoff = fp->f_offset; 1153 switch (whence) { 1154 case SEEK_CUR: 1155 if (delta > 0) { 1156 if (oldoff > 0 && delta > OFF_MAX - oldoff) { 1157 newoff = OFF_MAX; 1158 break; 1159 } 1160 } else { 1161 if (oldoff < 0 && delta < OFF_MIN - oldoff) { 1162 newoff = OFF_MIN; 1163 break; 1164 } 1165 } 1166 newoff = oldoff + delta; 1167 break; 1168 case SEEK_END: 1169 error = VOP_GETATTR(vp, &vattr, cred); 1170 if (error) 1171 goto out; 1172 if (vattr.va_size > OFF_MAX || 1173 delta > OFF_MAX - (off_t)vattr.va_size) { 1174 newoff = OFF_MAX; 1175 break; 1176 } 1177 newoff = delta + vattr.va_size; 1178 break; 1179 case SEEK_SET: 1180 newoff = delta; 1181 break; 1182 default: 1183 error = EINVAL; 1184 goto out; 1185 } 1186 1187 /* Pass the proposed change to the file system to audit. */ 1188 error = VOP_SEEK(vp, oldoff, newoff, cred); 1189 if (error) 1190 goto out; 1191 1192 /* Success! */ 1193 if (newoffp) 1194 *newoffp = newoff; 1195 if (flags & FOF_UPDATE_OFFSET) 1196 fp->f_offset = newoff; 1197 error = 0; 1198 1199 out: VOP_UNLOCK(vp); 1200 return error; 1201 } 1202 1203 /* 1204 * Check that the vnode is still valid, and if so 1205 * acquire requested lock. 1206 */ 1207 int 1208 vn_lock(struct vnode *vp, int flags) 1209 { 1210 struct lwp *l; 1211 int error; 1212 1213 KASSERT(vrefcnt(vp) > 0); 1214 KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY| 1215 LK_UPGRADE|LK_DOWNGRADE)) == 0); 1216 KASSERT((flags & LK_NOWAIT) != 0 || !mutex_owned(vp->v_interlock)); 1217 1218 #ifdef DIAGNOSTIC 1219 if (wapbl_vphaswapbl(vp)) 1220 WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp)); 1221 #endif 1222 1223 /* Get a more useful report for lockstat. */ 1224 l = curlwp; 1225 KASSERT(l->l_rwcallsite == 0); 1226 l->l_rwcallsite = (uintptr_t)__builtin_return_address(0); 1227 1228 error = VOP_LOCK(vp, flags); 1229 1230 l->l_rwcallsite = 0; 1231 1232 switch (flags & (LK_RETRY | LK_NOWAIT)) { 1233 case 0: 1234 KASSERT(error == 0 || error == ENOENT); 1235 break; 1236 case LK_RETRY: 1237 KASSERT(error == 0); 1238 break; 1239 case LK_NOWAIT: 1240 KASSERT(error == 0 || error == EBUSY || error == ENOENT); 1241 break; 1242 case LK_RETRY | LK_NOWAIT: 1243 KASSERT(error == 0 || error == EBUSY); 1244 break; 1245 } 1246 1247 return error; 1248 } 1249 1250 /* 1251 * File table vnode close routine. 1252 */ 1253 static int 1254 vn_closefile(file_t *fp) 1255 { 1256 1257 return vn_close(fp->f_vnode, fp->f_flag, fp->f_cred); 1258 } 1259 1260 /* 1261 * Simplified in-kernel wrapper calls for extended attribute access. 1262 * Both calls pass in a NULL credential, authorizing a "kernel" access. 1263 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1264 */ 1265 int 1266 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1267 const char *attrname, size_t *buflen, void *bf, struct lwp *l) 1268 { 1269 struct uio auio; 1270 struct iovec aiov; 1271 int error; 1272 1273 aiov.iov_len = *buflen; 1274 aiov.iov_base = bf; 1275 1276 auio.uio_iov = &aiov; 1277 auio.uio_iovcnt = 1; 1278 auio.uio_rw = UIO_READ; 1279 auio.uio_offset = 0; 1280 auio.uio_resid = *buflen; 1281 UIO_SETUP_SYSSPACE(&auio); 1282 1283 if ((ioflg & IO_NODELOCKED) == 0) 1284 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1285 1286 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, 1287 NOCRED); 1288 1289 if ((ioflg & IO_NODELOCKED) == 0) 1290 VOP_UNLOCK(vp); 1291 1292 if (error == 0) 1293 *buflen = *buflen - auio.uio_resid; 1294 1295 return error; 1296 } 1297 1298 /* 1299 * XXX Failure mode if partially written? 1300 */ 1301 int 1302 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1303 const char *attrname, size_t buflen, const void *bf, struct lwp *l) 1304 { 1305 struct uio auio; 1306 struct iovec aiov; 1307 int error; 1308 1309 aiov.iov_len = buflen; 1310 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */ 1311 1312 auio.uio_iov = &aiov; 1313 auio.uio_iovcnt = 1; 1314 auio.uio_rw = UIO_WRITE; 1315 auio.uio_offset = 0; 1316 auio.uio_resid = buflen; 1317 UIO_SETUP_SYSSPACE(&auio); 1318 1319 if ((ioflg & IO_NODELOCKED) == 0) { 1320 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1321 } 1322 1323 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NOCRED); 1324 1325 if ((ioflg & IO_NODELOCKED) == 0) { 1326 VOP_UNLOCK(vp); 1327 } 1328 1329 return error; 1330 } 1331 1332 int 1333 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1334 const char *attrname, struct lwp *l) 1335 { 1336 int error; 1337 1338 if ((ioflg & IO_NODELOCKED) == 0) { 1339 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1340 } 1341 1342 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NOCRED); 1343 if (error == EOPNOTSUPP) 1344 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1345 NOCRED); 1346 1347 if ((ioflg & IO_NODELOCKED) == 0) { 1348 VOP_UNLOCK(vp); 1349 } 1350 1351 return error; 1352 } 1353 1354 int 1355 vn_fifo_bypass(void *v) 1356 { 1357 struct vop_generic_args *ap = v; 1358 1359 return VOCALL(fifo_vnodeop_p, ap->a_desc->vdesc_offset, v); 1360 } 1361 1362 /* 1363 * Open block device by device number 1364 */ 1365 int 1366 vn_bdev_open(dev_t dev, struct vnode **vpp, struct lwp *l) 1367 { 1368 int error; 1369 1370 if ((error = bdevvp(dev, vpp)) != 0) 1371 return error; 1372 1373 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); 1374 if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) { 1375 vput(*vpp); 1376 return error; 1377 } 1378 mutex_enter((*vpp)->v_interlock); 1379 (*vpp)->v_writecount++; 1380 mutex_exit((*vpp)->v_interlock); 1381 VOP_UNLOCK(*vpp); 1382 1383 return 0; 1384 } 1385 1386 /* 1387 * Lookup the provided name in the filesystem. If the file exists, 1388 * is a valid block device, and isn't being used by anyone else, 1389 * set *vpp to the file's vnode. 1390 */ 1391 int 1392 vn_bdev_openpath(struct pathbuf *pb, struct vnode **vpp, struct lwp *l) 1393 { 1394 struct vnode *vp; 1395 dev_t dev; 1396 enum vtype vt; 1397 int error; 1398 1399 error = vn_open(NULL, pb, 0, FREAD | FWRITE, 0, &vp, NULL, NULL); 1400 if (error != 0) 1401 return error; 1402 1403 dev = vp->v_rdev; 1404 vt = vp->v_type; 1405 1406 VOP_UNLOCK(vp); 1407 (void) vn_close(vp, FREAD | FWRITE, l->l_cred); 1408 1409 if (vt != VBLK) 1410 return ENOTBLK; 1411 1412 return vn_bdev_open(dev, vpp, l); 1413 } 1414 1415 static long 1416 vn_knote_to_interest(const struct knote *kn) 1417 { 1418 switch (kn->kn_filter) { 1419 case EVFILT_READ: 1420 /* 1421 * Writing to the file or changing its attributes can 1422 * set the file size, which impacts the readability 1423 * filter. 1424 * 1425 * (No need to set NOTE_EXTEND here; it's only ever 1426 * send with other hints; see vnode_if.c.) 1427 */ 1428 return NOTE_WRITE | NOTE_ATTRIB; 1429 1430 case EVFILT_VNODE: 1431 return kn->kn_sfflags; 1432 1433 case EVFILT_WRITE: 1434 default: 1435 return 0; 1436 } 1437 } 1438 1439 void 1440 vn_knote_attach(struct vnode *vp, struct knote *kn) 1441 { 1442 struct vnode_klist *vk = vp->v_klist; 1443 long interest = 0; 1444 1445 /* 1446 * In the case of layered / stacked file systems, knotes 1447 * should only ever be associated with the base vnode. 1448 */ 1449 KASSERT(kn->kn_hook == vp); 1450 KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist); 1451 1452 /* 1453 * We maintain a bitmask of the kevents that there is interest in, 1454 * to minimize the impact of having watchers. It's silly to have 1455 * to traverse vn_klist every time a read or write happens simply 1456 * because there is someone interested in knowing when the file 1457 * is deleted, for example. 1458 */ 1459 1460 mutex_enter(vp->v_interlock); 1461 SLIST_INSERT_HEAD(&vk->vk_klist, kn, kn_selnext); 1462 SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) { 1463 interest |= vn_knote_to_interest(kn); 1464 } 1465 vk->vk_interest = interest; 1466 mutex_exit(vp->v_interlock); 1467 } 1468 1469 void 1470 vn_knote_detach(struct vnode *vp, struct knote *kn) 1471 { 1472 struct vnode_klist *vk = vp->v_klist; 1473 long interest = 0; 1474 1475 /* See above. */ 1476 KASSERT(kn->kn_hook == vp); 1477 KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist); 1478 1479 /* 1480 * We special case removing the head of the list, because: 1481 * 1482 * 1. It's extremely likely that we're detaching the only 1483 * knote. 1484 * 1485 * 2. We're already traversing the whole list, so we don't 1486 * want to use the generic SLIST_REMOVE() which would 1487 * traverse it *again*. 1488 */ 1489 1490 mutex_enter(vp->v_interlock); 1491 if (__predict_true(kn == SLIST_FIRST(&vk->vk_klist))) { 1492 SLIST_REMOVE_HEAD(&vk->vk_klist, kn_selnext); 1493 SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) { 1494 interest |= vn_knote_to_interest(kn); 1495 } 1496 vk->vk_interest = interest; 1497 } else { 1498 struct knote *thiskn, *nextkn, *prevkn = NULL; 1499 1500 SLIST_FOREACH_SAFE(thiskn, &vk->vk_klist, kn_selnext, nextkn) { 1501 if (thiskn == kn) { 1502 KASSERT(kn != NULL); 1503 KASSERT(prevkn != NULL); 1504 SLIST_REMOVE_AFTER(prevkn, kn_selnext); 1505 kn = NULL; 1506 } else { 1507 interest |= vn_knote_to_interest(thiskn); 1508 prevkn = thiskn; 1509 } 1510 } 1511 vk->vk_interest = interest; 1512 } 1513 mutex_exit(vp->v_interlock); 1514 } 1515