1 /* $NetBSD: vfs_vnops.c,v 1.105 2006/02/04 11:58:08 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.105 2006/02/04 11:58:08 yamt Exp $"); 41 42 #include "opt_verified_exec.h" 43 44 #include "fs_union.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/file.h> 50 #include <sys/stat.h> 51 #include <sys/buf.h> 52 #include <sys/proc.h> 53 #include <sys/malloc.h> 54 #include <sys/mount.h> 55 #include <sys/namei.h> 56 #include <sys/vnode.h> 57 #include <sys/ioctl.h> 58 #include <sys/tty.h> 59 #include <sys/poll.h> 60 61 #include <miscfs/specfs/specdev.h> 62 63 #include <uvm/uvm_extern.h> 64 #include <uvm/uvm_readahead.h> 65 66 #ifdef UNION 67 #include <fs/union/union.h> 68 #endif 69 70 #if defined(LKM) || defined(UNION) 71 int (*vn_union_readdir_hook) (struct vnode **, struct file *, struct lwp *); 72 #endif 73 74 #ifdef VERIFIED_EXEC 75 #include <sys/verified_exec.h> 76 #endif 77 78 static int vn_read(struct file *fp, off_t *offset, struct uio *uio, 79 struct ucred *cred, int flags); 80 static int vn_write(struct file *fp, off_t *offset, struct uio *uio, 81 struct ucred *cred, int flags); 82 static int vn_closefile(struct file *fp, struct lwp *l); 83 static int vn_poll(struct file *fp, int events, struct lwp *l); 84 static int vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l); 85 static int vn_statfile(struct file *fp, struct stat *sb, struct lwp *l); 86 static int vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l); 87 88 const struct fileops vnops = { 89 vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll, 90 vn_statfile, vn_closefile, vn_kqfilter 91 }; 92 93 /* 94 * Common code for vnode open operations. 95 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 96 */ 97 int 98 vn_open(struct nameidata *ndp, int fmode, int cmode) 99 { 100 struct vnode *vp; 101 struct mount *mp; 102 struct lwp *l = ndp->ni_cnd.cn_lwp; 103 struct ucred *cred = l->l_proc->p_ucred; 104 struct vattr va; 105 int error; 106 #ifdef VERIFIED_EXEC 107 struct veriexec_hash_entry *vhe = NULL; 108 char pathbuf[MAXPATHLEN]; 109 size_t pathlen; 110 int (*copyfun)(const void *, void *, size_t, size_t *) = 111 ndp->ni_segflg == UIO_SYSSPACE ? copystr : copyinstr; 112 #endif /* VERIFIED_EXEC */ 113 114 #ifdef VERIFIED_EXEC 115 error = (*copyfun)(ndp->ni_dirp, pathbuf, sizeof(pathbuf), &pathlen); 116 if (error) { 117 if (veriexec_verbose >= 1) 118 printf("veriexec: Can't copy path. (error=%d)\n", 119 error); 120 121 return (error); 122 } 123 #endif /* VERIFIED_EXEC */ 124 125 restart: 126 if (fmode & O_CREAT) { 127 ndp->ni_cnd.cn_nameiop = CREATE; 128 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 129 if ((fmode & O_EXCL) == 0 && 130 ((fmode & O_NOFOLLOW) == 0)) 131 ndp->ni_cnd.cn_flags |= FOLLOW; 132 if ((error = namei(ndp)) != 0) 133 return (error); 134 if (ndp->ni_vp == NULL) { 135 #ifdef VERIFIED_EXEC 136 /* Lockdown mode: Prevent creation of new files. */ 137 if (veriexec_strict >= 3) { 138 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); 139 140 printf("Veriexec: vn_open: Preventing " 141 "new file creation in %s.\n", 142 pathbuf); 143 144 vp = ndp->ni_dvp; 145 error = EPERM; 146 goto bad; 147 } 148 #endif /* VERIFIED_EXEC */ 149 150 VATTR_NULL(&va); 151 va.va_type = VREG; 152 va.va_mode = cmode; 153 if (fmode & O_EXCL) 154 va.va_vaflags |= VA_EXCLUSIVE; 155 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 156 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); 157 vput(ndp->ni_dvp); 158 if ((error = vn_start_write(NULL, &mp, 159 V_WAIT | V_SLEEPONLY | V_PCATCH)) != 0) 160 return (error); 161 goto restart; 162 } 163 VOP_LEASE(ndp->ni_dvp, l, cred, LEASE_WRITE); 164 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 165 &ndp->ni_cnd, &va); 166 vn_finished_write(mp, 0); 167 if (error) 168 return (error); 169 fmode &= ~O_TRUNC; 170 vp = ndp->ni_vp; 171 } else { 172 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); 173 if (ndp->ni_dvp == ndp->ni_vp) 174 vrele(ndp->ni_dvp); 175 else 176 vput(ndp->ni_dvp); 177 ndp->ni_dvp = NULL; 178 vp = ndp->ni_vp; 179 if (fmode & O_EXCL) { 180 error = EEXIST; 181 goto bad; 182 } 183 fmode &= ~O_CREAT; 184 } 185 } else { 186 ndp->ni_cnd.cn_nameiop = LOOKUP; 187 ndp->ni_cnd.cn_flags = LOCKLEAF; 188 if ((fmode & O_NOFOLLOW) == 0) 189 ndp->ni_cnd.cn_flags |= FOLLOW; 190 if ((error = namei(ndp)) != 0) 191 return (error); 192 vp = ndp->ni_vp; 193 } 194 if (vp->v_type == VSOCK) { 195 error = EOPNOTSUPP; 196 goto bad; 197 } 198 if (ndp->ni_vp->v_type == VLNK) { 199 error = EFTYPE; 200 goto bad; 201 } 202 203 #ifdef VERIFIED_EXEC 204 if ((error = VOP_GETATTR(vp, &va, cred, l)) != 0) 205 goto bad; 206 #endif 207 208 if ((fmode & O_CREAT) == 0) { 209 #ifdef VERIFIED_EXEC 210 if ((error = veriexec_verify(l, vp, &va, pathbuf, 211 VERIEXEC_FILE, &vhe)) != 0) 212 goto bad; 213 #endif 214 215 if (fmode & FREAD) { 216 if ((error = VOP_ACCESS(vp, VREAD, cred, l)) != 0) 217 goto bad; 218 } 219 220 if (fmode & (FWRITE | O_TRUNC)) { 221 if (vp->v_type == VDIR) { 222 error = EISDIR; 223 goto bad; 224 } 225 if ((error = vn_writechk(vp)) != 0 || 226 (error = VOP_ACCESS(vp, VWRITE, cred, l)) != 0) 227 goto bad; 228 #ifdef VERIFIED_EXEC 229 if (vhe != NULL) { 230 veriexec_report("Write access request.", 231 pathbuf, &va, l, 232 REPORT_NOVERBOSE, 233 REPORT_ALARM, 234 REPORT_NOPANIC); 235 236 /* IPS mode: Deny writing to monitored files. */ 237 if (veriexec_strict >= 2) { 238 error = EPERM; 239 goto bad; 240 } else { 241 vhe->status = FINGERPRINT_NOTEVAL; 242 } 243 } 244 #endif 245 } 246 } 247 248 if (fmode & O_TRUNC) { 249 VOP_UNLOCK(vp, 0); /* XXX */ 250 if ((error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0) { 251 vrele(vp); 252 return (error); 253 } 254 VOP_LEASE(vp, l, cred, LEASE_WRITE); 255 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 256 VATTR_NULL(&va); 257 va.va_size = 0; 258 error = VOP_SETATTR(vp, &va, cred, l); 259 vn_finished_write(mp, 0); 260 if (error != 0) 261 goto bad; 262 } 263 if ((error = VOP_OPEN(vp, fmode, cred, l)) != 0) 264 goto bad; 265 if (vp->v_type == VREG && 266 uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) { 267 error = EIO; 268 goto bad; 269 } 270 if (fmode & FWRITE) 271 vp->v_writecount++; 272 273 return (0); 274 bad: 275 vput(vp); 276 return (error); 277 } 278 279 /* 280 * Check for write permissions on the specified vnode. 281 * Prototype text segments cannot be written. 282 */ 283 int 284 vn_writechk(struct vnode *vp) 285 { 286 287 /* 288 * If the vnode is in use as a process's text, 289 * we can't allow writing. 290 */ 291 if (vp->v_flag & VTEXT) 292 return (ETXTBSY); 293 return (0); 294 } 295 296 /* 297 * Mark a vnode as having executable mappings. 298 */ 299 void 300 vn_markexec(struct vnode *vp) 301 { 302 if ((vp->v_flag & VEXECMAP) == 0) { 303 uvmexp.filepages -= vp->v_uobj.uo_npages; 304 uvmexp.execpages += vp->v_uobj.uo_npages; 305 } 306 vp->v_flag |= VEXECMAP; 307 } 308 309 /* 310 * Mark a vnode as being the text of a process. 311 * Fail if the vnode is currently writable. 312 */ 313 int 314 vn_marktext(struct vnode *vp) 315 { 316 317 if (vp->v_writecount != 0) { 318 KASSERT((vp->v_flag & VTEXT) == 0); 319 return (ETXTBSY); 320 } 321 vp->v_flag |= VTEXT; 322 vn_markexec(vp); 323 return (0); 324 } 325 326 /* 327 * Vnode close call 328 * 329 * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node. 330 */ 331 int 332 vn_close(struct vnode *vp, int flags, struct ucred *cred, struct lwp *l) 333 { 334 int error; 335 336 if (flags & FWRITE) 337 vp->v_writecount--; 338 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 339 error = VOP_CLOSE(vp, flags, cred, l); 340 vput(vp); 341 return (error); 342 } 343 344 /* 345 * Package up an I/O request on a vnode into a uio and do it. 346 */ 347 int 348 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset, 349 enum uio_seg segflg, int ioflg, struct ucred *cred, size_t *aresid, 350 struct lwp *l) 351 { 352 struct uio auio; 353 struct iovec aiov; 354 struct mount *mp; 355 int error; 356 357 if ((ioflg & IO_NODELOCKED) == 0) { 358 if (rw == UIO_READ) { 359 vn_lock(vp, LK_SHARED | LK_RETRY); 360 } else /* UIO_WRITE */ { 361 if (vp->v_type != VCHR && 362 (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) 363 != 0) 364 return (error); 365 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 366 } 367 } 368 auio.uio_iov = &aiov; 369 auio.uio_iovcnt = 1; 370 aiov.iov_base = base; 371 aiov.iov_len = len; 372 auio.uio_resid = len; 373 auio.uio_offset = offset; 374 auio.uio_segflg = segflg; 375 auio.uio_rw = rw; 376 auio.uio_lwp = l; 377 if (rw == UIO_READ) { 378 error = VOP_READ(vp, &auio, ioflg, cred); 379 } else { 380 error = VOP_WRITE(vp, &auio, ioflg, cred); 381 } 382 if (aresid) 383 *aresid = auio.uio_resid; 384 else 385 if (auio.uio_resid && error == 0) 386 error = EIO; 387 if ((ioflg & IO_NODELOCKED) == 0) { 388 if (rw == UIO_WRITE) 389 vn_finished_write(mp, 0); 390 VOP_UNLOCK(vp, 0); 391 } 392 return (error); 393 } 394 395 int 396 vn_readdir(struct file *fp, char *bf, int segflg, u_int count, int *done, 397 struct lwp *l, off_t **cookies, int *ncookies) 398 { 399 struct vnode *vp = (struct vnode *)fp->f_data; 400 struct iovec aiov; 401 struct uio auio; 402 int error, eofflag; 403 404 unionread: 405 if (vp->v_type != VDIR) 406 return (EINVAL); 407 aiov.iov_base = bf; 408 aiov.iov_len = count; 409 auio.uio_iov = &aiov; 410 auio.uio_iovcnt = 1; 411 auio.uio_rw = UIO_READ; 412 auio.uio_segflg = segflg; 413 auio.uio_lwp = l; 414 auio.uio_resid = count; 415 vn_lock(vp, LK_SHARED | LK_RETRY); 416 auio.uio_offset = fp->f_offset; 417 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies, 418 ncookies); 419 fp->f_offset = auio.uio_offset; 420 VOP_UNLOCK(vp, 0); 421 if (error) 422 return (error); 423 424 #if defined(UNION) || defined(LKM) 425 if (count == auio.uio_resid && vn_union_readdir_hook) { 426 struct vnode *ovp = vp; 427 428 error = (*vn_union_readdir_hook)(&vp, fp, l); 429 if (error) 430 return (error); 431 if (vp != ovp) 432 goto unionread; 433 } 434 #endif /* UNION || LKM */ 435 436 if (count == auio.uio_resid && (vp->v_flag & VROOT) && 437 (vp->v_mount->mnt_flag & MNT_UNION)) { 438 struct vnode *tvp = vp; 439 vp = vp->v_mount->mnt_vnodecovered; 440 VREF(vp); 441 fp->f_data = vp; 442 fp->f_offset = 0; 443 vrele(tvp); 444 goto unionread; 445 } 446 *done = count - auio.uio_resid; 447 return error; 448 } 449 450 /* 451 * File table vnode read routine. 452 */ 453 static int 454 vn_read(struct file *fp, off_t *offset, struct uio *uio, struct ucred *cred, 455 int flags) 456 { 457 struct vnode *vp = (struct vnode *)fp->f_data; 458 int count, error, ioflag; 459 460 VOP_LEASE(vp, uio->uio_lwp, cred, LEASE_READ); 461 ioflag = IO_ADV_ENCODE(fp->f_advice); 462 if (fp->f_flag & FNONBLOCK) 463 ioflag |= IO_NDELAY; 464 if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC)) 465 ioflag |= IO_SYNC; 466 if (fp->f_flag & FALTIO) 467 ioflag |= IO_ALTSEMANTICS; 468 vn_lock(vp, LK_SHARED | LK_RETRY); 469 uio->uio_offset = *offset; 470 count = uio->uio_resid; 471 error = VOP_READ(vp, uio, ioflag, cred); 472 if (flags & FOF_UPDATE_OFFSET) 473 *offset += count - uio->uio_resid; 474 VOP_UNLOCK(vp, 0); 475 return (error); 476 } 477 478 /* 479 * File table vnode write routine. 480 */ 481 static int 482 vn_write(struct file *fp, off_t *offset, struct uio *uio, struct ucred *cred, 483 int flags) 484 { 485 struct vnode *vp = (struct vnode *)fp->f_data; 486 struct mount *mp; 487 int count, error, ioflag = IO_UNIT; 488 489 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 490 ioflag |= IO_APPEND; 491 if (fp->f_flag & FNONBLOCK) 492 ioflag |= IO_NDELAY; 493 if (fp->f_flag & FFSYNC || 494 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 495 ioflag |= IO_SYNC; 496 else if (fp->f_flag & FDSYNC) 497 ioflag |= IO_DSYNC; 498 if (fp->f_flag & FALTIO) 499 ioflag |= IO_ALTSEMANTICS; 500 mp = NULL; 501 if (vp->v_type != VCHR && 502 (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0) 503 return (error); 504 VOP_LEASE(vp, uio->uio_lwp, cred, LEASE_WRITE); 505 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 506 uio->uio_offset = *offset; 507 count = uio->uio_resid; 508 error = VOP_WRITE(vp, uio, ioflag, cred); 509 if (flags & FOF_UPDATE_OFFSET) { 510 if (ioflag & IO_APPEND) 511 *offset = uio->uio_offset; 512 else 513 *offset += count - uio->uio_resid; 514 } 515 VOP_UNLOCK(vp, 0); 516 vn_finished_write(mp, 0); 517 return (error); 518 } 519 520 /* 521 * File table vnode stat routine. 522 */ 523 static int 524 vn_statfile(struct file *fp, struct stat *sb, struct lwp *l) 525 { 526 struct vnode *vp = (struct vnode *)fp->f_data; 527 528 return vn_stat(vp, sb, l); 529 } 530 531 int 532 vn_stat(struct vnode *vp, struct stat *sb, struct lwp *l) 533 { 534 struct vattr va; 535 int error; 536 mode_t mode; 537 538 error = VOP_GETATTR(vp, &va, l->l_proc->p_ucred, l); 539 if (error) 540 return (error); 541 /* 542 * Copy from vattr table 543 */ 544 sb->st_dev = va.va_fsid; 545 sb->st_ino = va.va_fileid; 546 mode = va.va_mode; 547 switch (vp->v_type) { 548 case VREG: 549 mode |= S_IFREG; 550 break; 551 case VDIR: 552 mode |= S_IFDIR; 553 break; 554 case VBLK: 555 mode |= S_IFBLK; 556 break; 557 case VCHR: 558 mode |= S_IFCHR; 559 break; 560 case VLNK: 561 mode |= S_IFLNK; 562 break; 563 case VSOCK: 564 mode |= S_IFSOCK; 565 break; 566 case VFIFO: 567 mode |= S_IFIFO; 568 break; 569 default: 570 return (EBADF); 571 }; 572 sb->st_mode = mode; 573 sb->st_nlink = va.va_nlink; 574 sb->st_uid = va.va_uid; 575 sb->st_gid = va.va_gid; 576 sb->st_rdev = va.va_rdev; 577 sb->st_size = va.va_size; 578 sb->st_atimespec = va.va_atime; 579 sb->st_mtimespec = va.va_mtime; 580 sb->st_ctimespec = va.va_ctime; 581 sb->st_birthtimespec = va.va_birthtime; 582 sb->st_blksize = va.va_blocksize; 583 sb->st_flags = va.va_flags; 584 sb->st_gen = 0; 585 sb->st_blocks = va.va_bytes / S_BLKSIZE; 586 return (0); 587 } 588 589 /* 590 * File table vnode fcntl routine. 591 */ 592 static int 593 vn_fcntl(struct file *fp, u_int com, void *data, struct lwp *l) 594 { 595 struct vnode *vp = ((struct vnode *)fp->f_data); 596 int error; 597 598 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 599 error = VOP_FCNTL(vp, com, data, fp->f_flag, l->l_proc->p_ucred, l); 600 VOP_UNLOCK(vp, 0); 601 return (error); 602 } 603 604 /* 605 * File table vnode ioctl routine. 606 */ 607 static int 608 vn_ioctl(struct file *fp, u_long com, void *data, struct lwp *l) 609 { 610 struct vnode *vp = ((struct vnode *)fp->f_data); 611 struct proc *p = l->l_proc; 612 struct vattr vattr; 613 int error; 614 615 switch (vp->v_type) { 616 617 case VREG: 618 case VDIR: 619 if (com == FIONREAD) { 620 error = VOP_GETATTR(vp, &vattr, l->l_proc->p_ucred, l); 621 if (error) 622 return (error); 623 *(int *)data = vattr.va_size - fp->f_offset; 624 return (0); 625 } 626 if ((com == FIONWRITE) || (com == FIONSPACE)) { 627 /* 628 * Files don't have send queues, so there never 629 * are any bytes in them, nor is there any 630 * open space in them. 631 */ 632 *(int *)data = 0; 633 return (0); 634 } 635 if (com == FIOGETBMAP) { 636 daddr_t *block; 637 638 if (*(daddr_t *)data < 0) 639 return (EINVAL); 640 block = (daddr_t *)data; 641 return (VOP_BMAP(vp, *block, NULL, block, NULL)); 642 } 643 if (com == OFIOGETBMAP) { 644 daddr_t ibn, obn; 645 646 if (*(int32_t *)data < 0) 647 return (EINVAL); 648 ibn = (daddr_t)*(int32_t *)data; 649 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL); 650 *(int32_t *)data = (int32_t)obn; 651 return error; 652 } 653 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 654 return (0); /* XXX */ 655 /* fall into ... */ 656 case VFIFO: 657 case VCHR: 658 case VBLK: 659 error = VOP_IOCTL(vp, com, data, fp->f_flag, 660 l->l_proc->p_ucred, l); 661 if (error == 0 && com == TIOCSCTTY) { 662 if (p->p_session->s_ttyvp) 663 vrele(p->p_session->s_ttyvp); 664 p->p_session->s_ttyvp = vp; 665 VREF(vp); 666 } 667 return (error); 668 669 default: 670 return (EPASSTHROUGH); 671 } 672 } 673 674 /* 675 * File table vnode poll routine. 676 */ 677 static int 678 vn_poll(struct file *fp, int events, struct lwp *l) 679 { 680 681 return (VOP_POLL(((struct vnode *)fp->f_data), events, l)); 682 } 683 684 /* 685 * File table vnode kqfilter routine. 686 */ 687 int 688 vn_kqfilter(struct file *fp, struct knote *kn) 689 { 690 691 return (VOP_KQFILTER((struct vnode *)fp->f_data, kn)); 692 } 693 694 /* 695 * Check that the vnode is still valid, and if so 696 * acquire requested lock. 697 */ 698 int 699 vn_lock(struct vnode *vp, int flags) 700 { 701 int error; 702 703 #if 0 704 KASSERT(vp->v_usecount > 0 || (flags & LK_INTERLOCK) != 0 705 || (vp->v_flag & VONWORKLST) != 0); 706 #endif 707 KASSERT((flags & 708 ~(LK_INTERLOCK|LK_SHARED|LK_EXCLUSIVE|LK_DRAIN|LK_NOWAIT|LK_RETRY| 709 LK_SETRECURSE|LK_CANRECURSE)) 710 == 0); 711 712 do { 713 if ((flags & LK_INTERLOCK) == 0) 714 simple_lock(&vp->v_interlock); 715 if (vp->v_flag & VXLOCK) { 716 if (flags & LK_NOWAIT) { 717 simple_unlock(&vp->v_interlock); 718 return EBUSY; 719 } 720 vp->v_flag |= VXWANT; 721 ltsleep(vp, PINOD | PNORELOCK, 722 "vn_lock", 0, &vp->v_interlock); 723 error = ENOENT; 724 } else { 725 error = VOP_LOCK(vp, 726 (flags & ~LK_RETRY) | LK_INTERLOCK); 727 if (error == 0 || error == EDEADLK || error == EBUSY) 728 return (error); 729 } 730 flags &= ~LK_INTERLOCK; 731 } while (flags & LK_RETRY); 732 return (error); 733 } 734 735 /* 736 * File table vnode close routine. 737 */ 738 static int 739 vn_closefile(struct file *fp, struct lwp *l) 740 { 741 742 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag, 743 fp->f_cred, l)); 744 } 745 746 /* 747 * Enable LK_CANRECURSE on lock. Return prior status. 748 */ 749 u_int 750 vn_setrecurse(struct vnode *vp) 751 { 752 struct lock *lkp = &vp->v_lock; 753 u_int retval = lkp->lk_flags & LK_CANRECURSE; 754 755 lkp->lk_flags |= LK_CANRECURSE; 756 return retval; 757 } 758 759 /* 760 * Called when done with locksetrecurse. 761 */ 762 void 763 vn_restorerecurse(struct vnode *vp, u_int flags) 764 { 765 struct lock *lkp = &vp->v_lock; 766 767 lkp->lk_flags &= ~LK_CANRECURSE; 768 lkp->lk_flags |= flags; 769 } 770 771 int 772 vn_cow_establish(struct vnode *vp, 773 int (*func)(void *, struct buf *), void *cookie) 774 { 775 int s; 776 struct spec_cow_entry *e; 777 778 MALLOC(e, struct spec_cow_entry *, sizeof(struct spec_cow_entry), 779 M_DEVBUF, M_WAITOK); 780 e->ce_func = func; 781 e->ce_cookie = cookie; 782 783 SPEC_COW_LOCK(vp->v_specinfo, s); 784 vp->v_spec_cow_req++; 785 while (vp->v_spec_cow_count > 0) 786 ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0, 787 &vp->v_spec_cow_slock); 788 789 SLIST_INSERT_HEAD(&vp->v_spec_cow_head, e, ce_list); 790 791 vp->v_spec_cow_req--; 792 if (vp->v_spec_cow_req == 0) 793 wakeup(&vp->v_spec_cow_req); 794 SPEC_COW_UNLOCK(vp->v_specinfo, s); 795 796 return 0; 797 } 798 799 int 800 vn_cow_disestablish(struct vnode *vp, 801 int (*func)(void *, struct buf *), void *cookie) 802 { 803 int s; 804 struct spec_cow_entry *e; 805 806 SPEC_COW_LOCK(vp->v_specinfo, s); 807 vp->v_spec_cow_req++; 808 while (vp->v_spec_cow_count > 0) 809 ltsleep(&vp->v_spec_cow_req, PRIBIO, "cowlist", 0, 810 &vp->v_spec_cow_slock); 811 812 SLIST_FOREACH(e, &vp->v_spec_cow_head, ce_list) 813 if (e->ce_func == func && e->ce_cookie == cookie) { 814 SLIST_REMOVE(&vp->v_spec_cow_head, e, 815 spec_cow_entry, ce_list); 816 FREE(e, M_DEVBUF); 817 break; 818 } 819 820 vp->v_spec_cow_req--; 821 if (vp->v_spec_cow_req == 0) 822 wakeup(&vp->v_spec_cow_req); 823 SPEC_COW_UNLOCK(vp->v_specinfo, s); 824 825 return e ? 0 : EINVAL; 826 } 827 828 /* 829 * Simplified in-kernel wrapper calls for extended attribute access. 830 * Both calls pass in a NULL credential, authorizing a "kernel" access. 831 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 832 */ 833 int 834 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 835 const char *attrname, size_t *buflen, void *bf, struct lwp *l) 836 { 837 struct uio auio; 838 struct iovec aiov; 839 int error; 840 841 aiov.iov_len = *buflen; 842 aiov.iov_base = bf; 843 844 auio.uio_iov = &aiov; 845 auio.uio_iovcnt = 1; 846 auio.uio_rw = UIO_READ; 847 auio.uio_segflg = UIO_SYSSPACE; 848 auio.uio_lwp = l; 849 auio.uio_offset = 0; 850 auio.uio_resid = *buflen; 851 852 if ((ioflg & IO_NODELOCKED) == 0) 853 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 854 855 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 856 l); 857 858 if ((ioflg & IO_NODELOCKED) == 0) 859 VOP_UNLOCK(vp, 0); 860 861 if (error == 0) 862 *buflen = *buflen - auio.uio_resid; 863 864 return (error); 865 } 866 867 /* 868 * XXX Failure mode if partially written? 869 */ 870 int 871 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 872 const char *attrname, size_t buflen, const void *bf, struct lwp *l) 873 { 874 struct uio auio; 875 struct iovec aiov; 876 struct mount *mp; 877 int error; 878 879 aiov.iov_len = buflen; 880 aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */ 881 882 auio.uio_iov = &aiov; 883 auio.uio_iovcnt = 1; 884 auio.uio_rw = UIO_WRITE; 885 auio.uio_segflg = UIO_SYSSPACE; 886 auio.uio_lwp = l; 887 auio.uio_offset = 0; 888 auio.uio_resid = buflen; 889 890 if ((ioflg & IO_NODELOCKED) == 0) { 891 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 892 return (error); 893 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 894 } 895 896 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, l); 897 898 if ((ioflg & IO_NODELOCKED) == 0) { 899 vn_finished_write(mp, 0); 900 VOP_UNLOCK(vp, 0); 901 } 902 903 return (error); 904 } 905 906 int 907 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 908 const char *attrname, struct lwp *l) 909 { 910 struct mount *mp; 911 int error; 912 913 if ((ioflg & IO_NODELOCKED) == 0) { 914 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 915 return (error); 916 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 917 } 918 919 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, l); 920 if (error == EOPNOTSUPP) 921 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 922 NULL, l); 923 924 if ((ioflg & IO_NODELOCKED) == 0) { 925 vn_finished_write(mp, 0); 926 VOP_UNLOCK(vp, 0); 927 } 928 929 return (error); 930 } 931 932 /* 933 * Preparing to start a filesystem write operation. If the operation is 934 * permitted, then we bump the count of operations in progress and 935 * proceed. If a suspend request is in progress, we wait until the 936 * suspension is over, and then proceed. 937 * V_PCATCH adds PCATCH to the tsleep flags. 938 * V_WAIT waits until suspension is over. Otherwise returns EWOULDBLOCK. 939 * V_SLEEPONLY wait, but do not bump the operations count. 940 * V_LOWER this is a lower level operation. No further vnodes should be 941 * locked. Otherwise it is a upper level operation. No vnodes 942 * should be locked. 943 */ 944 int 945 vn_start_write(struct vnode *vp, struct mount **mpp, int flags) 946 { 947 struct mount *mp; 948 int error, mask, prio; 949 950 /* 951 * If a vnode is provided, get and return the mount point that 952 * to which it will write. 953 */ 954 if (vp != NULL) { 955 *mpp = vp->v_mount; 956 } 957 if ((mp = *mpp) == NULL) 958 return (0); 959 mp = mp->mnt_leaf; 960 /* 961 * Check on status of suspension. 962 */ 963 prio = PUSER - 1; 964 if (flags & V_PCATCH) 965 prio |= PCATCH; 966 967 if ((flags & V_LOWER) == 0) 968 mask = IMNT_SUSPEND; 969 else 970 mask = IMNT_SUSPENDLOW; 971 972 while ((mp->mnt_iflag & mask) != 0) { 973 if ((flags & V_WAIT) == 0) 974 return (EWOULDBLOCK); 975 error = tsleep(&mp->mnt_flag, prio, "suspfs", 0); 976 if (error) 977 return (error); 978 } 979 if (flags & V_SLEEPONLY) 980 return (0); 981 simple_lock(&mp->mnt_slock); 982 if ((flags & V_LOWER) == 0) 983 mp->mnt_writeopcountupper++; 984 else 985 mp->mnt_writeopcountlower++; 986 simple_unlock(&mp->mnt_slock); 987 return (0); 988 } 989 990 /* 991 * Filesystem write operation has completed. If we are suspending and this 992 * operation is the last one, notify the suspender that the suspension is 993 * now in effect. 994 */ 995 void 996 vn_finished_write(struct mount *mp, int flags) 997 { 998 if (mp == NULL) 999 return; 1000 mp = mp->mnt_leaf; 1001 simple_lock(&mp->mnt_slock); 1002 if ((flags & V_LOWER) == 0) { 1003 mp->mnt_writeopcountupper--; 1004 if (mp->mnt_writeopcountupper < 0) 1005 printf("vn_finished_write: neg cnt upper=%d\n", 1006 mp->mnt_writeopcountupper); 1007 if ((mp->mnt_iflag & IMNT_SUSPEND) != 0 && 1008 mp->mnt_writeopcountupper <= 0) 1009 wakeup(&mp->mnt_writeopcountupper); 1010 } else { 1011 mp->mnt_writeopcountlower--; 1012 if (mp->mnt_writeopcountlower < 0) 1013 printf("vn_finished_write: neg cnt lower=%d\n", 1014 mp->mnt_writeopcountlower); 1015 if ((mp->mnt_iflag & IMNT_SUSPENDLOW) != 0 && 1016 mp->mnt_writeopcountupper <= 0) 1017 wakeup(&mp->mnt_writeopcountlower); 1018 } 1019 simple_unlock(&mp->mnt_slock); 1020 } 1021 1022 void 1023 vn_ra_allocctx(struct vnode *vp) 1024 { 1025 struct uvm_ractx *ra = NULL; 1026 1027 if (vp->v_type != VREG) { 1028 return; 1029 } 1030 if (vp->v_ractx != NULL) { 1031 return; 1032 } 1033 simple_lock(&vp->v_interlock); 1034 if (vp->v_ractx == NULL) { 1035 simple_unlock(&vp->v_interlock); 1036 ra = uvm_ra_allocctx(); 1037 simple_lock(&vp->v_interlock); 1038 if (ra != NULL && vp->v_ractx == NULL) { 1039 vp->v_ractx = ra; 1040 ra = NULL; 1041 } 1042 } 1043 simple_unlock(&vp->v_interlock); 1044 if (ra != NULL) { 1045 uvm_ra_freectx(ra); 1046 } 1047 } 1048