1 /* $OpenBSD: vfs_vnops.c,v 1.113 2020/02/22 11:58:29 anton Exp $ */ 2 /* $NetBSD: vfs_vnops.c,v 1.20 1996/02/04 02:18:41 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vfs_vnops.c 8.5 (Berkeley) 12/8/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/stat.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/signalvar.h> 50 #include <sys/mount.h> 51 #include <sys/namei.h> 52 #include <sys/lock.h> 53 #include <sys/vnode.h> 54 #include <sys/ioctl.h> 55 #include <sys/tty.h> 56 #include <sys/cdio.h> 57 #include <sys/poll.h> 58 #include <sys/filedesc.h> 59 #include <sys/specdev.h> 60 #include <sys/unistd.h> 61 62 int vn_read(struct file *, struct uio *, int); 63 int vn_write(struct file *, struct uio *, int); 64 int vn_poll(struct file *, int, struct proc *); 65 int vn_kqfilter(struct file *, struct knote *); 66 int vn_closefile(struct file *, struct proc *); 67 int vn_seek(struct file *, off_t *, int, struct proc *); 68 69 const struct fileops vnops = { 70 .fo_read = vn_read, 71 .fo_write = vn_write, 72 .fo_ioctl = vn_ioctl, 73 .fo_poll = vn_poll, 74 .fo_kqfilter = vn_kqfilter, 75 .fo_stat = vn_statfile, 76 .fo_close = vn_closefile, 77 .fo_seek = vn_seek, 78 }; 79 80 /* 81 * Common code for vnode open operations. 82 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 83 */ 84 int 85 vn_open(struct nameidata *ndp, int fmode, int cmode) 86 { 87 struct vnode *vp; 88 struct proc *p = ndp->ni_cnd.cn_proc; 89 struct ucred *cred = p->p_ucred; 90 struct vattr va; 91 struct cloneinfo *cip; 92 int error; 93 94 /* 95 * The only valid flag to pass in here from NDINIT is 96 * KERNELPATH, This function will override the nameiop based 97 * on the fmode and cmode flags, So validate that our caller 98 * has not set other flags or operations in the nameidata 99 * structure. 100 */ 101 KASSERT(ndp->ni_cnd.cn_flags == 0 || ndp->ni_cnd.cn_flags == KERNELPATH); 102 KASSERT(ndp->ni_cnd.cn_nameiop == 0); 103 104 if ((fmode & (FREAD|FWRITE)) == 0) 105 return (EINVAL); 106 if ((fmode & (O_TRUNC | FWRITE)) == O_TRUNC) 107 return (EINVAL); 108 if (fmode & O_CREAT) { 109 ndp->ni_cnd.cn_nameiop = CREATE; 110 ndp->ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF; 111 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 112 ndp->ni_cnd.cn_flags |= FOLLOW; 113 if ((error = namei(ndp)) != 0) 114 return (error); 115 116 if (ndp->ni_vp == NULL) { 117 VATTR_NULL(&va); 118 va.va_type = VREG; 119 va.va_mode = cmode; 120 if (fmode & O_EXCL) 121 va.va_vaflags |= VA_EXCLUSIVE; 122 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 123 &ndp->ni_cnd, &va); 124 vput(ndp->ni_dvp); 125 if (error) 126 return (error); 127 fmode &= ~O_TRUNC; 128 vp = ndp->ni_vp; 129 } else { 130 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); 131 if (ndp->ni_dvp == ndp->ni_vp) 132 vrele(ndp->ni_dvp); 133 else 134 vput(ndp->ni_dvp); 135 ndp->ni_dvp = NULL; 136 vp = ndp->ni_vp; 137 if (fmode & O_EXCL) { 138 error = EEXIST; 139 goto bad; 140 } 141 fmode &= ~O_CREAT; 142 } 143 } else { 144 ndp->ni_cnd.cn_nameiop = LOOKUP; 145 ndp->ni_cnd.cn_flags |= ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 146 if ((error = namei(ndp)) != 0) 147 return (error); 148 vp = ndp->ni_vp; 149 } 150 if (vp->v_type == VSOCK) { 151 error = EOPNOTSUPP; 152 goto bad; 153 } 154 if (vp->v_type == VLNK) { 155 error = ELOOP; 156 goto bad; 157 } 158 if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) { 159 error = ENOTDIR; 160 goto bad; 161 } 162 if ((fmode & O_CREAT) == 0) { 163 if (fmode & FREAD) { 164 if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0) 165 goto bad; 166 } 167 if (fmode & FWRITE) { 168 if (vp->v_type == VDIR) { 169 error = EISDIR; 170 goto bad; 171 } 172 if ((error = vn_writechk(vp)) != 0 || 173 (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0) 174 goto bad; 175 } 176 } 177 if ((fmode & O_TRUNC) && vp->v_type == VREG) { 178 VATTR_NULL(&va); 179 va.va_size = 0; 180 if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0) 181 goto bad; 182 } 183 if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0) 184 goto bad; 185 186 if (vp->v_flag & VCLONED) { 187 cip = (struct cloneinfo *)vp->v_data; 188 189 vp->v_flag &= ~VCLONED; 190 191 ndp->ni_vp = cip->ci_vp; /* return cloned vnode */ 192 vp->v_data = cip->ci_data; /* restore v_data */ 193 VOP_UNLOCK(vp); /* keep a reference */ 194 vp = ndp->ni_vp; /* for the increment below */ 195 196 free(cip, M_TEMP, sizeof(*cip)); 197 } 198 199 if (fmode & FWRITE) 200 vp->v_writecount++; 201 return (0); 202 bad: 203 vput(vp); 204 return (error); 205 } 206 207 /* 208 * Check for write permissions on the specified vnode. 209 * Prototype text segments cannot be written. 210 */ 211 int 212 vn_writechk(struct vnode *vp) 213 { 214 /* 215 * Disallow write attempts on read-only file systems; 216 * unless the file is a socket or a block or character 217 * device resident on the file system. 218 */ 219 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 220 switch (vp->v_type) { 221 case VREG: 222 case VDIR: 223 case VLNK: 224 return (EROFS); 225 case VNON: 226 case VCHR: 227 case VSOCK: 228 case VFIFO: 229 case VBAD: 230 case VBLK: 231 break; 232 } 233 } 234 /* 235 * If there's shared text associated with 236 * the vnode, try to free it up once. If 237 * we fail, we can't allow writing. 238 */ 239 if ((vp->v_flag & VTEXT) && !uvm_vnp_uncache(vp)) 240 return (ETXTBSY); 241 242 return (0); 243 } 244 245 /* 246 * Check whether a write operation would exceed the file size rlimit 247 * for the process, if one should be applied for this operation. 248 * If a partial write should take place, the uio is adjusted and the 249 * amount by which the request would have exceeded the limit is returned 250 * via the 'overrun' argument. 251 */ 252 int 253 vn_fsizechk(struct vnode *vp, struct uio *uio, int ioflag, ssize_t *overrun) 254 { 255 struct proc *p = uio->uio_procp; 256 257 *overrun = 0; 258 if (vp->v_type == VREG && p != NULL && !(ioflag & IO_NOLIMIT)) { 259 rlim_t limit = lim_cur_proc(p, RLIMIT_FSIZE); 260 261 /* if already at or over the limit, send the signal and fail */ 262 if (uio->uio_offset >= limit) { 263 psignal(p, SIGXFSZ); 264 return (EFBIG); 265 } 266 267 /* otherwise, clamp the write to stay under the limit */ 268 if (uio->uio_resid > limit - uio->uio_offset) { 269 *overrun = uio->uio_resid - (limit - uio->uio_offset); 270 uio->uio_resid = limit - uio->uio_offset; 271 } 272 } 273 274 return (0); 275 } 276 277 278 /* 279 * Mark a vnode as being the text image of a running process. 280 */ 281 void 282 vn_marktext(struct vnode *vp) 283 { 284 vp->v_flag |= VTEXT; 285 } 286 287 /* 288 * Vnode close call 289 */ 290 int 291 vn_close(struct vnode *vp, int flags, struct ucred *cred, struct proc *p) 292 { 293 int error; 294 295 if (flags & FWRITE) 296 vp->v_writecount--; 297 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 298 error = VOP_CLOSE(vp, flags, cred, p); 299 vput(vp); 300 return (error); 301 } 302 303 /* 304 * Package up an I/O request on a vnode into a uio and do it. 305 */ 306 int 307 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset, 308 enum uio_seg segflg, int ioflg, struct ucred *cred, size_t *aresid, 309 struct proc *p) 310 { 311 struct uio auio; 312 struct iovec aiov; 313 int error; 314 315 auio.uio_iov = &aiov; 316 auio.uio_iovcnt = 1; 317 aiov.iov_base = base; 318 aiov.iov_len = len; 319 auio.uio_resid = len; 320 auio.uio_offset = offset; 321 auio.uio_segflg = segflg; 322 auio.uio_rw = rw; 323 auio.uio_procp = p; 324 325 if ((ioflg & IO_NODELOCKED) == 0) 326 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 327 if (rw == UIO_READ) { 328 error = VOP_READ(vp, &auio, ioflg, cred); 329 } else { 330 error = VOP_WRITE(vp, &auio, ioflg, cred); 331 } 332 if ((ioflg & IO_NODELOCKED) == 0) 333 VOP_UNLOCK(vp); 334 335 if (aresid) 336 *aresid = auio.uio_resid; 337 else 338 if (auio.uio_resid && error == 0) 339 error = EIO; 340 return (error); 341 } 342 343 /* 344 * File table vnode read routine. 345 */ 346 int 347 vn_read(struct file *fp, struct uio *uio, int fflags) 348 { 349 struct vnode *vp = fp->f_data; 350 struct ucred *cred = fp->f_cred; 351 size_t count = uio->uio_resid; 352 off_t offset; 353 int error; 354 355 KERNEL_LOCK(); 356 357 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 358 359 if ((fflags & FO_POSITION) == 0) 360 offset = uio->uio_offset = fp->f_offset; 361 else 362 offset = uio->uio_offset; 363 364 /* no wrap around of offsets except on character devices */ 365 if (vp->v_type != VCHR && count > LLONG_MAX - offset) { 366 error = EINVAL; 367 goto done; 368 } 369 370 if (vp->v_type == VDIR) { 371 error = EISDIR; 372 goto done; 373 } 374 375 error = VOP_READ(vp, uio, (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0, 376 cred); 377 if ((fflags & FO_POSITION) == 0) { 378 mtx_enter(&fp->f_mtx); 379 fp->f_offset += count - uio->uio_resid; 380 mtx_leave(&fp->f_mtx); 381 } 382 done: 383 VOP_UNLOCK(vp); 384 KERNEL_UNLOCK(); 385 return (error); 386 } 387 388 /* 389 * File table vnode write routine. 390 */ 391 int 392 vn_write(struct file *fp, struct uio *uio, int fflags) 393 { 394 struct vnode *vp = fp->f_data; 395 struct ucred *cred = fp->f_cred; 396 int error, ioflag = IO_UNIT; 397 size_t count; 398 399 KERNEL_LOCK(); 400 401 /* note: pwrite/pwritev are unaffected by O_APPEND */ 402 if (vp->v_type == VREG && (fp->f_flag & O_APPEND) && 403 (fflags & FO_POSITION) == 0) 404 ioflag |= IO_APPEND; 405 if (fp->f_flag & FNONBLOCK) 406 ioflag |= IO_NDELAY; 407 if ((fp->f_flag & FFSYNC) || 408 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 409 ioflag |= IO_SYNC; 410 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 411 if ((fflags & FO_POSITION) == 0) 412 uio->uio_offset = fp->f_offset; 413 count = uio->uio_resid; 414 error = VOP_WRITE(vp, uio, ioflag, cred); 415 if ((fflags & FO_POSITION) == 0) { 416 mtx_enter(&fp->f_mtx); 417 if (ioflag & IO_APPEND) 418 fp->f_offset = uio->uio_offset; 419 else 420 fp->f_offset += count - uio->uio_resid; 421 mtx_leave(&fp->f_mtx); 422 } 423 VOP_UNLOCK(vp); 424 425 KERNEL_UNLOCK(); 426 return (error); 427 } 428 429 /* 430 * File table wrapper for vn_stat 431 */ 432 int 433 vn_statfile(struct file *fp, struct stat *sb, struct proc *p) 434 { 435 struct vnode *vp = fp->f_data; 436 return vn_stat(vp, sb, p); 437 } 438 439 /* 440 * vnode stat routine. 441 */ 442 int 443 vn_stat(struct vnode *vp, struct stat *sb, struct proc *p) 444 { 445 struct vattr va; 446 int error; 447 mode_t mode; 448 449 error = VOP_GETATTR(vp, &va, p->p_ucred, p); 450 if (error) 451 return (error); 452 /* 453 * Copy from vattr table 454 */ 455 memset(sb, 0, sizeof(*sb)); 456 sb->st_dev = va.va_fsid; 457 sb->st_ino = va.va_fileid; 458 mode = va.va_mode; 459 switch (vp->v_type) { 460 case VREG: 461 mode |= S_IFREG; 462 break; 463 case VDIR: 464 mode |= S_IFDIR; 465 break; 466 case VBLK: 467 mode |= S_IFBLK; 468 break; 469 case VCHR: 470 mode |= S_IFCHR; 471 break; 472 case VLNK: 473 mode |= S_IFLNK; 474 break; 475 case VSOCK: 476 mode |= S_IFSOCK; 477 break; 478 case VFIFO: 479 mode |= S_IFIFO; 480 break; 481 default: 482 return (EBADF); 483 } 484 sb->st_mode = mode; 485 sb->st_nlink = va.va_nlink; 486 sb->st_uid = va.va_uid; 487 sb->st_gid = va.va_gid; 488 sb->st_rdev = va.va_rdev; 489 sb->st_size = va.va_size; 490 sb->st_atim.tv_sec = va.va_atime.tv_sec; 491 sb->st_atim.tv_nsec = va.va_atime.tv_nsec; 492 sb->st_mtim.tv_sec = va.va_mtime.tv_sec; 493 sb->st_mtim.tv_nsec = va.va_mtime.tv_nsec; 494 sb->st_ctim.tv_sec = va.va_ctime.tv_sec; 495 sb->st_ctim.tv_nsec = va.va_ctime.tv_nsec; 496 sb->st_blksize = va.va_blocksize; 497 sb->st_flags = va.va_flags; 498 sb->st_gen = va.va_gen; 499 sb->st_blocks = va.va_bytes / S_BLKSIZE; 500 return (0); 501 } 502 503 /* 504 * File table vnode ioctl routine. 505 */ 506 int 507 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 508 { 509 struct vnode *vp = fp->f_data; 510 struct vattr vattr; 511 int error = ENOTTY; 512 513 KERNEL_LOCK(); 514 switch (vp->v_type) { 515 516 case VREG: 517 case VDIR: 518 if (com == FIONREAD) { 519 error = VOP_GETATTR(vp, &vattr, p->p_ucred, p); 520 if (error) 521 break; 522 *(int *)data = vattr.va_size - foffset(fp); 523 524 } else if (com == FIONBIO || com == FIOASYNC) /* XXX */ 525 error = 0; /* XXX */ 526 break; 527 528 case VFIFO: 529 case VCHR: 530 case VBLK: 531 error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p); 532 if (error == 0 && com == TIOCSCTTY) { 533 struct session *s = p->p_p->ps_session; 534 struct vnode *ovp = s->s_ttyvp; 535 s->s_ttyvp = vp; 536 vref(vp); 537 if (ovp) 538 vrele(ovp); 539 } 540 break; 541 542 default: 543 break; 544 } 545 KERNEL_UNLOCK(); 546 547 return (error); 548 } 549 550 /* 551 * File table vnode poll routine. 552 */ 553 int 554 vn_poll(struct file *fp, int events, struct proc *p) 555 { 556 return (VOP_POLL(fp->f_data, fp->f_flag, events, p)); 557 } 558 559 /* 560 * Check that the vnode is still valid, and if so 561 * acquire requested lock. 562 */ 563 int 564 vn_lock(struct vnode *vp, int flags) 565 { 566 int error; 567 568 do { 569 if (vp->v_flag & VXLOCK) { 570 vp->v_flag |= VXWANT; 571 tsleep_nsec(vp, PINOD, "vn_lock", INFSLP); 572 error = ENOENT; 573 } else { 574 vp->v_lockcount++; 575 error = VOP_LOCK(vp, flags); 576 vp->v_lockcount--; 577 if (error == 0) { 578 if ((vp->v_flag & VXLOCK) == 0) 579 return (0); 580 581 /* 582 * The vnode was exclusively locked while 583 * acquiring the requested lock. Release it and 584 * try again. 585 */ 586 error = ENOENT; 587 VOP_UNLOCK(vp); 588 if (vp->v_lockcount == 0) 589 wakeup_one(&vp->v_lockcount); 590 } 591 } 592 } while (flags & LK_RETRY); 593 return (error); 594 } 595 596 /* 597 * File table vnode close routine. 598 */ 599 int 600 vn_closefile(struct file *fp, struct proc *p) 601 { 602 struct vnode *vp = fp->f_data; 603 struct flock lf; 604 int error; 605 606 KERNEL_LOCK(); 607 if ((fp->f_iflags & FIF_HASLOCK)) { 608 lf.l_whence = SEEK_SET; 609 lf.l_start = 0; 610 lf.l_len = 0; 611 lf.l_type = F_UNLCK; 612 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 613 } 614 error = vn_close(vp, fp->f_flag, fp->f_cred, p); 615 KERNEL_UNLOCK(); 616 return (error); 617 } 618 619 int 620 vn_kqfilter(struct file *fp, struct knote *kn) 621 { 622 return (VOP_KQFILTER(fp->f_data, kn)); 623 } 624 625 int 626 vn_seek(struct file *fp, off_t *offset, int whence, struct proc *p) 627 { 628 struct ucred *cred = p->p_ucred; 629 struct vnode *vp = fp->f_data; 630 struct vattr vattr; 631 off_t newoff; 632 int error = 0; 633 int special; 634 635 if (vp->v_type == VFIFO) 636 return (ESPIPE); 637 638 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 639 640 if (vp->v_type == VCHR) 641 special = 1; 642 else 643 special = 0; 644 645 switch (whence) { 646 case SEEK_CUR: 647 newoff = fp->f_offset + *offset; 648 break; 649 case SEEK_END: 650 error = VOP_GETATTR(vp, &vattr, cred, p); 651 if (error) 652 goto out; 653 newoff = *offset + (off_t)vattr.va_size; 654 break; 655 case SEEK_SET: 656 newoff = *offset; 657 break; 658 default: 659 error = EINVAL; 660 goto out; 661 } 662 if (!special && newoff < 0) { 663 error = EINVAL; 664 goto out; 665 } 666 mtx_enter(&fp->f_mtx); 667 fp->f_offset = newoff; 668 mtx_leave(&fp->f_mtx); 669 *offset = newoff; 670 671 out: 672 VOP_UNLOCK(vp); 673 return (error); 674 } 675 676 /* 677 * Common code for vnode access operations. 678 */ 679 680 /* Check if a directory can be found inside another in the hierarchy */ 681 int 682 vn_isunder(struct vnode *lvp, struct vnode *rvp, struct proc *p) 683 { 684 int error; 685 686 error = vfs_getcwd_common(lvp, rvp, NULL, NULL, MAXPATHLEN/2, 0, p); 687 688 if (!error) 689 return (1); 690 691 return (0); 692 } 693