1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $ 40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $ 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/fcntl.h> 46 #include <sys/file.h> 47 #include <sys/stat.h> 48 #include <sys/proc.h> 49 #include <sys/priv.h> 50 #include <sys/mount.h> 51 #include <sys/nlookup.h> 52 #include <sys/vnode.h> 53 #include <sys/buf.h> 54 #include <sys/filio.h> 55 #include <sys/ttycom.h> 56 #include <sys/conf.h> 57 #include <sys/sysctl.h> 58 #include <sys/syslog.h> 59 60 #include <sys/thread2.h> 61 #include <sys/mplock2.h> 62 63 static int vn_closefile (struct file *fp); 64 static int vn_ioctl (struct file *fp, u_long com, caddr_t data, 65 struct ucred *cred, struct sysmsg *msg); 66 static int vn_read (struct file *fp, struct uio *uio, 67 struct ucred *cred, int flags); 68 static int vn_poll (struct file *fp, int events, struct ucred *cred); 69 static int vn_kqfilter (struct file *fp, struct knote *kn); 70 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred); 71 static int vn_write (struct file *fp, struct uio *uio, 72 struct ucred *cred, int flags); 73 74 struct fileops vnode_fileops = { 75 .fo_read = vn_read, 76 .fo_write = vn_write, 77 .fo_ioctl = vn_ioctl, 78 .fo_poll = vn_poll, 79 .fo_kqfilter = vn_kqfilter, 80 .fo_stat = vn_statfile, 81 .fo_close = vn_closefile, 82 .fo_shutdown = nofo_shutdown 83 }; 84 85 /* 86 * Common code for vnode open operations. Check permissions, and call 87 * the VOP_NOPEN or VOP_NCREATE routine. 88 * 89 * The caller is responsible for setting up nd with nlookup_init() and 90 * for cleaning it up with nlookup_done(), whether we return an error 91 * or not. 92 * 93 * On success nd->nl_open_vp will hold a referenced and, if requested, 94 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp 95 * is non-NULL the vnode will be installed in the file pointer. 96 * 97 * NOTE: The vnode is referenced just once on return whether or not it 98 * is also installed in the file pointer. 99 */ 100 int 101 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode) 102 { 103 struct vnode *vp; 104 struct ucred *cred = nd->nl_cred; 105 struct vattr vat; 106 struct vattr *vap = &vat; 107 int error; 108 u_int flags; 109 110 /* 111 * Certain combinations are illegal 112 */ 113 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC) 114 return(EACCES); 115 116 /* 117 * Lookup the path and create or obtain the vnode. After a 118 * successful lookup a locked nd->nl_nch will be returned. 119 * 120 * The result of this section should be a locked vnode. 121 * 122 * XXX with only a little work we should be able to avoid locking 123 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set. 124 */ 125 nd->nl_flags |= NLC_OPEN; 126 if (fmode & O_APPEND) 127 nd->nl_flags |= NLC_APPEND; 128 if (fmode & O_TRUNC) 129 nd->nl_flags |= NLC_TRUNCATE; 130 if (fmode & FREAD) 131 nd->nl_flags |= NLC_READ; 132 if (fmode & FWRITE) 133 nd->nl_flags |= NLC_WRITE; 134 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 135 nd->nl_flags |= NLC_FOLLOW; 136 137 if (fmode & O_CREAT) { 138 /* 139 * CONDITIONAL CREATE FILE CASE 140 * 141 * Setting NLC_CREATE causes a negative hit to store 142 * the negative hit ncp and not return an error. Then 143 * nc_error or nc_vp may be checked to see if the ncp 144 * represents a negative hit. NLC_CREATE also requires 145 * write permission on the governing directory or EPERM 146 * is returned. 147 */ 148 nd->nl_flags |= NLC_CREATE; 149 nd->nl_flags |= NLC_REFDVP; 150 bwillinode(1); 151 error = nlookup(nd); 152 } else { 153 /* 154 * NORMAL OPEN FILE CASE 155 */ 156 error = nlookup(nd); 157 } 158 159 if (error) 160 return (error); 161 162 /* 163 * split case to allow us to re-resolve and retry the ncp in case 164 * we get ESTALE. 165 */ 166 again: 167 if (fmode & O_CREAT) { 168 if (nd->nl_nch.ncp->nc_vp == NULL) { 169 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 170 return (error); 171 VATTR_NULL(vap); 172 vap->va_type = VREG; 173 vap->va_mode = cmode; 174 if (fmode & O_EXCL) 175 vap->va_vaflags |= VA_EXCLUSIVE; 176 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp, 177 nd->nl_cred, vap); 178 if (error) 179 return (error); 180 fmode &= ~O_TRUNC; 181 /* locked vnode is returned */ 182 } else { 183 if (fmode & O_EXCL) { 184 error = EEXIST; 185 } else { 186 error = cache_vget(&nd->nl_nch, cred, 187 LK_EXCLUSIVE, &vp); 188 } 189 if (error) 190 return (error); 191 fmode &= ~O_CREAT; 192 } 193 } else { 194 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp); 195 if (error) 196 return (error); 197 } 198 199 /* 200 * We have a locked vnode and ncp now. Note that the ncp will 201 * be cleaned up by the caller if nd->nl_nch is left intact. 202 */ 203 if (vp->v_type == VLNK) { 204 error = EMLINK; 205 goto bad; 206 } 207 if (vp->v_type == VSOCK) { 208 error = EOPNOTSUPP; 209 goto bad; 210 } 211 if ((fmode & O_CREAT) == 0) { 212 if (fmode & (FWRITE | O_TRUNC)) { 213 if (vp->v_type == VDIR) { 214 error = EISDIR; 215 goto bad; 216 } 217 error = vn_writechk(vp, &nd->nl_nch); 218 if (error) { 219 /* 220 * Special stale handling, re-resolve the 221 * vnode. 222 */ 223 if (error == ESTALE) { 224 vput(vp); 225 vp = NULL; 226 cache_setunresolved(&nd->nl_nch); 227 error = cache_resolve(&nd->nl_nch, cred); 228 if (error == 0) 229 goto again; 230 } 231 goto bad; 232 } 233 } 234 } 235 if (fmode & O_TRUNC) { 236 vn_unlock(vp); /* XXX */ 237 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 238 VATTR_NULL(vap); 239 vap->va_size = 0; 240 error = VOP_SETATTR(vp, vap, cred); 241 if (error) 242 goto bad; 243 } 244 245 /* 246 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag. 247 * These particular bits a tracked all the way from the root. 248 * 249 * NOTE: Might not work properly on NFS servers due to the 250 * disconnected namecache. 251 */ 252 flags = nd->nl_nch.ncp->nc_flag; 253 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) && 254 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) { 255 vsetflags(vp, VSWAPCACHE); 256 } else { 257 vclrflags(vp, VSWAPCACHE); 258 } 259 260 /* 261 * Setup the fp so VOP_OPEN can override it. No descriptor has been 262 * associated with the fp yet so we own it clean. 263 * 264 * f_nchandle inherits nl_nch. This used to be necessary only for 265 * directories but now we do it unconditionally so f*() ops 266 * such as fchmod() can access the actual namespace that was 267 * used to open the file. 268 */ 269 if (fp) { 270 if (nd->nl_flags & NLC_APPENDONLY) 271 fmode |= FAPPENDONLY; 272 fp->f_nchandle = nd->nl_nch; 273 cache_zero(&nd->nl_nch); 274 cache_unlock(&fp->f_nchandle); 275 } 276 277 /* 278 * Get rid of nl_nch. vn_open does not return it (it returns the 279 * vnode or the file pointer). Note: we can't leave nl_nch locked 280 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g. 281 * on /dev/ttyd0 282 */ 283 if (nd->nl_nch.ncp) 284 cache_put(&nd->nl_nch); 285 286 error = VOP_OPEN(vp, fmode, cred, fp); 287 if (error) { 288 /* 289 * setting f_ops to &badfileops will prevent the descriptor 290 * code from trying to close and release the vnode, since 291 * the open failed we do not want to call close. 292 */ 293 if (fp) { 294 fp->f_data = NULL; 295 fp->f_ops = &badfileops; 296 } 297 goto bad; 298 } 299 300 #if 0 301 /* 302 * Assert that VREG files have been setup for vmio. 303 */ 304 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 305 ("vn_open: regular file was not VMIO enabled!")); 306 #endif 307 308 /* 309 * Return the vnode. XXX needs some cleaning up. The vnode is 310 * only returned in the fp == NULL case. 311 */ 312 if (fp == NULL) { 313 nd->nl_open_vp = vp; 314 nd->nl_vp_fmode = fmode; 315 if ((nd->nl_flags & NLC_LOCKVP) == 0) 316 vn_unlock(vp); 317 } else { 318 vput(vp); 319 } 320 return (0); 321 bad: 322 if (vp) 323 vput(vp); 324 return (error); 325 } 326 327 int 328 vn_opendisk(const char *devname, int fmode, struct vnode **vpp) 329 { 330 struct vnode *vp; 331 int error; 332 333 if (strncmp(devname, "/dev/", 5) == 0) 334 devname += 5; 335 if ((vp = getsynthvnode(devname)) == NULL) { 336 error = ENODEV; 337 } else { 338 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL); 339 vn_unlock(vp); 340 if (error) { 341 vrele(vp); 342 vp = NULL; 343 } 344 } 345 *vpp = vp; 346 return (error); 347 } 348 349 /* 350 * Check for write permissions on the specified vnode. nch may be NULL. 351 */ 352 int 353 vn_writechk(struct vnode *vp, struct nchandle *nch) 354 { 355 /* 356 * If there's shared text associated with 357 * the vnode, try to free it up once. If 358 * we fail, we can't allow writing. 359 */ 360 if (vp->v_flag & VTEXT) 361 return (ETXTBSY); 362 363 /* 364 * If the vnode represents a regular file, check the mount 365 * point via the nch. This may be a different mount point 366 * then the one embedded in the vnode (e.g. nullfs). 367 * 368 * We can still write to non-regular files (e.g. devices) 369 * via read-only mounts. 370 */ 371 if (nch && nch->ncp && vp->v_type == VREG) 372 return (ncp_writechk(nch)); 373 return (0); 374 } 375 376 /* 377 * Check whether the underlying mount is read-only. The mount point 378 * referenced by the namecache may be different from the mount point 379 * used by the underlying vnode in the case of NULLFS, so a separate 380 * check is needed. 381 */ 382 int 383 ncp_writechk(struct nchandle *nch) 384 { 385 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY)) 386 return (EROFS); 387 return(0); 388 } 389 390 /* 391 * Vnode close call 392 * 393 * MPSAFE 394 */ 395 int 396 vn_close(struct vnode *vp, int flags) 397 { 398 int error; 399 400 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 401 if (error == 0) { 402 error = VOP_CLOSE(vp, flags); 403 vn_unlock(vp); 404 } 405 vrele(vp); 406 return (error); 407 } 408 409 /* 410 * Sequential heuristic. 411 * 412 * MPSAFE (f_seqcount and f_nextoff are allowed to race) 413 */ 414 static __inline 415 int 416 sequential_heuristic(struct uio *uio, struct file *fp) 417 { 418 /* 419 * Sequential heuristic - detect sequential operation 420 * 421 * NOTE: SMP: We allow f_seqcount updates to race. 422 */ 423 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 424 uio->uio_offset == fp->f_nextoff) { 425 int tmpseq = fp->f_seqcount; 426 427 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 428 if (tmpseq > IO_SEQMAX) 429 tmpseq = IO_SEQMAX; 430 fp->f_seqcount = tmpseq; 431 return(fp->f_seqcount << IO_SEQSHIFT); 432 } 433 434 /* 435 * Not sequential, quick draw-down of seqcount 436 * 437 * NOTE: SMP: We allow f_seqcount updates to race. 438 */ 439 if (fp->f_seqcount > 1) 440 fp->f_seqcount = 1; 441 else 442 fp->f_seqcount = 0; 443 return(0); 444 } 445 446 /* 447 * get - lock and return the f_offset field. 448 * set - set and unlock the f_offset field. 449 * 450 * These routines serve the dual purpose of serializing access to the 451 * f_offset field (at least on i386) and guaranteeing operational integrity 452 * when multiple read()ers and write()ers are present on the same fp. 453 * 454 * MPSAFE 455 */ 456 static __inline off_t 457 vn_get_fpf_offset(struct file *fp) 458 { 459 u_int flags; 460 u_int nflags; 461 462 /* 463 * Shortcut critical path. 464 */ 465 flags = fp->f_flag & ~FOFFSETLOCK; 466 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK)) 467 return(fp->f_offset); 468 469 /* 470 * The hard way 471 */ 472 for (;;) { 473 flags = fp->f_flag; 474 if (flags & FOFFSETLOCK) { 475 nflags = flags | FOFFSETWAKE; 476 tsleep_interlock(&fp->f_flag, 0); 477 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 478 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0); 479 } else { 480 nflags = flags | FOFFSETLOCK; 481 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 482 break; 483 } 484 } 485 return(fp->f_offset); 486 } 487 488 /* 489 * MPSAFE 490 */ 491 static __inline void 492 vn_set_fpf_offset(struct file *fp, off_t offset) 493 { 494 u_int flags; 495 u_int nflags; 496 497 /* 498 * We hold the lock so we can set the offset without interference. 499 */ 500 fp->f_offset = offset; 501 502 /* 503 * Normal release is already a reasonably critical path. 504 */ 505 for (;;) { 506 flags = fp->f_flag; 507 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE); 508 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) { 509 if (flags & FOFFSETWAKE) 510 wakeup(&fp->f_flag); 511 break; 512 } 513 } 514 } 515 516 /* 517 * MPSAFE 518 */ 519 static __inline off_t 520 vn_poll_fpf_offset(struct file *fp) 521 { 522 #if defined(__x86_64__) || !defined(SMP) 523 return(fp->f_offset); 524 #else 525 off_t off = vn_get_fpf_offset(fp); 526 vn_set_fpf_offset(fp, off); 527 return(off); 528 #endif 529 } 530 531 /* 532 * Package up an I/O request on a vnode into a uio and do it. 533 * 534 * MPSAFE 535 */ 536 int 537 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 538 off_t offset, enum uio_seg segflg, int ioflg, 539 struct ucred *cred, int *aresid) 540 { 541 struct uio auio; 542 struct iovec aiov; 543 struct ccms_lock ccms_lock; 544 int error; 545 546 if ((ioflg & IO_NODELOCKED) == 0) 547 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 548 auio.uio_iov = &aiov; 549 auio.uio_iovcnt = 1; 550 aiov.iov_base = base; 551 aiov.iov_len = len; 552 auio.uio_resid = len; 553 auio.uio_offset = offset; 554 auio.uio_segflg = segflg; 555 auio.uio_rw = rw; 556 auio.uio_td = curthread; 557 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio); 558 if (rw == UIO_READ) { 559 error = VOP_READ(vp, &auio, ioflg, cred); 560 } else { 561 error = VOP_WRITE(vp, &auio, ioflg, cred); 562 } 563 ccms_lock_put(&vp->v_ccms, &ccms_lock); 564 if (aresid) 565 *aresid = auio.uio_resid; 566 else 567 if (auio.uio_resid && error == 0) 568 error = EIO; 569 if ((ioflg & IO_NODELOCKED) == 0) 570 vn_unlock(vp); 571 return (error); 572 } 573 574 /* 575 * Package up an I/O request on a vnode into a uio and do it. The I/O 576 * request is split up into smaller chunks and we try to avoid saturating 577 * the buffer cache while potentially holding a vnode locked, so we 578 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 579 * to give other processes a chance to lock the vnode (either other processes 580 * core'ing the same binary, or unrelated processes scanning the directory). 581 * 582 * MPSAFE 583 */ 584 int 585 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 586 off_t offset, enum uio_seg segflg, int ioflg, 587 struct ucred *cred, int *aresid) 588 { 589 int error = 0; 590 591 do { 592 int chunk; 593 594 /* 595 * Force `offset' to a multiple of MAXBSIZE except possibly 596 * for the first chunk, so that filesystems only need to 597 * write full blocks except possibly for the first and last 598 * chunks. 599 */ 600 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 601 602 if (chunk > len) 603 chunk = len; 604 if (vp->v_type == VREG) { 605 switch(rw) { 606 case UIO_READ: 607 bwillread(chunk); 608 break; 609 case UIO_WRITE: 610 bwillwrite(chunk); 611 break; 612 } 613 } 614 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 615 ioflg, cred, aresid); 616 len -= chunk; /* aresid calc already includes length */ 617 if (error) 618 break; 619 offset += chunk; 620 base += chunk; 621 uio_yield(); 622 } while (len); 623 if (aresid) 624 *aresid += len; 625 return (error); 626 } 627 628 /* 629 * File pointers can no longer get ripped up by revoke so 630 * we don't need to lock access to the vp. 631 * 632 * f_offset updates are not guaranteed against multiple readers 633 * 634 * MPSAFE 635 */ 636 static int 637 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 638 { 639 struct ccms_lock ccms_lock; 640 struct vnode *vp; 641 int error, ioflag; 642 643 KASSERT(uio->uio_td == curthread, 644 ("uio_td %p is not td %p", uio->uio_td, curthread)); 645 vp = (struct vnode *)fp->f_data; 646 647 ioflag = 0; 648 if (flags & O_FBLOCKING) { 649 /* ioflag &= ~IO_NDELAY; */ 650 } else if (flags & O_FNONBLOCKING) { 651 ioflag |= IO_NDELAY; 652 } else if (fp->f_flag & FNONBLOCK) { 653 ioflag |= IO_NDELAY; 654 } 655 if (flags & O_FBUFFERED) { 656 /* ioflag &= ~IO_DIRECT; */ 657 } else if (flags & O_FUNBUFFERED) { 658 ioflag |= IO_DIRECT; 659 } else if (fp->f_flag & O_DIRECT) { 660 ioflag |= IO_DIRECT; 661 } 662 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 663 uio->uio_offset = vn_get_fpf_offset(fp); 664 vn_lock(vp, LK_SHARED | LK_RETRY); 665 ioflag |= sequential_heuristic(uio, fp); 666 667 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio); 668 error = VOP_READ(vp, uio, ioflag, cred); 669 ccms_lock_put(&vp->v_ccms, &ccms_lock); 670 fp->f_nextoff = uio->uio_offset; 671 vn_unlock(vp); 672 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 673 vn_set_fpf_offset(fp, uio->uio_offset); 674 return (error); 675 } 676 677 /* 678 * MPSAFE 679 */ 680 static int 681 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 682 { 683 struct ccms_lock ccms_lock; 684 struct vnode *vp; 685 int error, ioflag; 686 687 KASSERT(uio->uio_td == curthread, 688 ("uio_td %p is not p %p", uio->uio_td, curthread)); 689 vp = (struct vnode *)fp->f_data; 690 691 ioflag = IO_UNIT; 692 if (vp->v_type == VREG && 693 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 694 ioflag |= IO_APPEND; 695 } 696 697 if (flags & O_FBLOCKING) { 698 /* ioflag &= ~IO_NDELAY; */ 699 } else if (flags & O_FNONBLOCKING) { 700 ioflag |= IO_NDELAY; 701 } else if (fp->f_flag & FNONBLOCK) { 702 ioflag |= IO_NDELAY; 703 } 704 if (flags & O_FBUFFERED) { 705 /* ioflag &= ~IO_DIRECT; */ 706 } else if (flags & O_FUNBUFFERED) { 707 ioflag |= IO_DIRECT; 708 } else if (fp->f_flag & O_DIRECT) { 709 ioflag |= IO_DIRECT; 710 } 711 if (flags & O_FASYNCWRITE) { 712 /* ioflag &= ~IO_SYNC; */ 713 } else if (flags & O_FSYNCWRITE) { 714 ioflag |= IO_SYNC; 715 } else if (fp->f_flag & O_FSYNC) { 716 ioflag |= IO_SYNC; 717 } 718 719 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 720 ioflag |= IO_SYNC; 721 if ((flags & O_FOFFSET) == 0) 722 uio->uio_offset = vn_get_fpf_offset(fp); 723 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 724 ioflag |= sequential_heuristic(uio, fp); 725 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio); 726 error = VOP_WRITE(vp, uio, ioflag, cred); 727 ccms_lock_put(&vp->v_ccms, &ccms_lock); 728 fp->f_nextoff = uio->uio_offset; 729 vn_unlock(vp); 730 if ((flags & O_FOFFSET) == 0) 731 vn_set_fpf_offset(fp, uio->uio_offset); 732 return (error); 733 } 734 735 /* 736 * MPSAFE 737 */ 738 static int 739 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred) 740 { 741 struct vnode *vp; 742 int error; 743 744 vp = (struct vnode *)fp->f_data; 745 error = vn_stat(vp, sb, cred); 746 return (error); 747 } 748 749 /* 750 * MPSAFE 751 */ 752 int 753 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred) 754 { 755 struct vattr vattr; 756 struct vattr *vap; 757 int error; 758 u_short mode; 759 cdev_t dev; 760 761 vap = &vattr; 762 error = VOP_GETATTR(vp, vap); 763 if (error) 764 return (error); 765 766 /* 767 * Zero the spare stat fields 768 */ 769 sb->st_lspare = 0; 770 sb->st_qspare1 = 0; 771 sb->st_qspare2 = 0; 772 773 /* 774 * Copy from vattr table 775 */ 776 if (vap->va_fsid != VNOVAL) 777 sb->st_dev = vap->va_fsid; 778 else 779 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 780 sb->st_ino = vap->va_fileid; 781 mode = vap->va_mode; 782 switch (vap->va_type) { 783 case VREG: 784 mode |= S_IFREG; 785 break; 786 case VDATABASE: 787 mode |= S_IFDB; 788 break; 789 case VDIR: 790 mode |= S_IFDIR; 791 break; 792 case VBLK: 793 mode |= S_IFBLK; 794 break; 795 case VCHR: 796 mode |= S_IFCHR; 797 break; 798 case VLNK: 799 mode |= S_IFLNK; 800 /* This is a cosmetic change, symlinks do not have a mode. */ 801 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 802 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 803 else 804 sb->st_mode |= ACCESSPERMS; /* 0777 */ 805 break; 806 case VSOCK: 807 mode |= S_IFSOCK; 808 break; 809 case VFIFO: 810 mode |= S_IFIFO; 811 break; 812 default: 813 return (EBADF); 814 } 815 sb->st_mode = mode; 816 if (vap->va_nlink > (nlink_t)-1) 817 sb->st_nlink = (nlink_t)-1; 818 else 819 sb->st_nlink = vap->va_nlink; 820 sb->st_uid = vap->va_uid; 821 sb->st_gid = vap->va_gid; 822 sb->st_rdev = dev2udev(vp->v_rdev); 823 sb->st_size = vap->va_size; 824 sb->st_atimespec = vap->va_atime; 825 sb->st_mtimespec = vap->va_mtime; 826 sb->st_ctimespec = vap->va_ctime; 827 828 /* 829 * A VCHR and VBLK device may track the last access and last modified 830 * time independantly of the filesystem. This is particularly true 831 * because device read and write calls may bypass the filesystem. 832 */ 833 if (vp->v_type == VCHR || vp->v_type == VBLK) { 834 dev = vp->v_rdev; 835 if (dev != NULL) { 836 if (dev->si_lastread) { 837 sb->st_atimespec.tv_sec = dev->si_lastread; 838 sb->st_atimespec.tv_nsec = 0; 839 } 840 if (dev->si_lastwrite) { 841 sb->st_atimespec.tv_sec = dev->si_lastwrite; 842 sb->st_atimespec.tv_nsec = 0; 843 } 844 } 845 } 846 847 /* 848 * According to www.opengroup.org, the meaning of st_blksize is 849 * "a filesystem-specific preferred I/O block size for this 850 * object. In some filesystem types, this may vary from file 851 * to file" 852 * Default to PAGE_SIZE after much discussion. 853 */ 854 855 if (vap->va_type == VREG) { 856 sb->st_blksize = vap->va_blocksize; 857 } else if (vn_isdisk(vp, NULL)) { 858 /* 859 * XXX this is broken. If the device is not yet open (aka 860 * stat() call, aka v_rdev == NULL), how are we supposed 861 * to get a valid block size out of it? 862 */ 863 dev = vp->v_rdev; 864 865 sb->st_blksize = dev->si_bsize_best; 866 if (sb->st_blksize < dev->si_bsize_phys) 867 sb->st_blksize = dev->si_bsize_phys; 868 if (sb->st_blksize < BLKDEV_IOSIZE) 869 sb->st_blksize = BLKDEV_IOSIZE; 870 } else { 871 sb->st_blksize = PAGE_SIZE; 872 } 873 874 sb->st_flags = vap->va_flags; 875 876 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 877 if (error) 878 sb->st_gen = 0; 879 else 880 sb->st_gen = (u_int32_t)vap->va_gen; 881 882 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 883 return (0); 884 } 885 886 /* 887 * MPALMOSTSAFE - acquires mplock 888 */ 889 static int 890 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred, 891 struct sysmsg *msg) 892 { 893 struct vnode *vp = ((struct vnode *)fp->f_data); 894 struct vnode *ovp; 895 struct vattr vattr; 896 int error; 897 off_t size; 898 899 switch (vp->v_type) { 900 case VREG: 901 case VDIR: 902 if (com == FIONREAD) { 903 error = VOP_GETATTR(vp, &vattr); 904 if (error) 905 break; 906 size = vattr.va_size; 907 if ((vp->v_flag & VNOTSEEKABLE) == 0) 908 size -= vn_poll_fpf_offset(fp); 909 if (size > 0x7FFFFFFF) 910 size = 0x7FFFFFFF; 911 *(int *)data = size; 912 error = 0; 913 break; 914 } 915 if (com == FIOASYNC) { /* XXX */ 916 error = 0; /* XXX */ 917 break; 918 } 919 /* fall into ... */ 920 default: 921 #if 0 922 return (ENOTTY); 923 #endif 924 case VFIFO: 925 case VCHR: 926 case VBLK: 927 if (com == FIODTYPE) { 928 if (vp->v_type != VCHR && vp->v_type != VBLK) { 929 error = ENOTTY; 930 break; 931 } 932 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK; 933 error = 0; 934 break; 935 } 936 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg); 937 if (error == 0 && com == TIOCSCTTY) { 938 struct proc *p = curthread->td_proc; 939 struct session *sess; 940 941 if (p == NULL) { 942 error = ENOTTY; 943 break; 944 } 945 946 get_mplock(); 947 sess = p->p_session; 948 /* Do nothing if reassigning same control tty */ 949 if (sess->s_ttyvp == vp) { 950 error = 0; 951 rel_mplock(); 952 break; 953 } 954 955 /* Get rid of reference to old control tty */ 956 ovp = sess->s_ttyvp; 957 vref(vp); 958 sess->s_ttyvp = vp; 959 if (ovp) 960 vrele(ovp); 961 rel_mplock(); 962 } 963 break; 964 } 965 return (error); 966 } 967 968 /* 969 * MPSAFE 970 */ 971 static int 972 vn_poll(struct file *fp, int events, struct ucred *cred) 973 { 974 int error; 975 976 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred); 977 return (error); 978 } 979 980 /* 981 * Check that the vnode is still valid, and if so 982 * acquire requested lock. 983 */ 984 int 985 #ifndef DEBUG_LOCKS 986 vn_lock(struct vnode *vp, int flags) 987 #else 988 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line) 989 #endif 990 { 991 int error; 992 993 do { 994 #ifdef DEBUG_LOCKS 995 vp->filename = filename; 996 vp->line = line; 997 error = debuglockmgr(&vp->v_lock, flags, 998 "vn_lock", filename, line); 999 #else 1000 error = lockmgr(&vp->v_lock, flags); 1001 #endif 1002 if (error == 0) 1003 break; 1004 } while (flags & LK_RETRY); 1005 1006 /* 1007 * Because we (had better!) have a ref on the vnode, once it 1008 * goes to VRECLAIMED state it will not be recycled until all 1009 * refs go away. So we can just check the flag. 1010 */ 1011 if (error == 0 && (vp->v_flag & VRECLAIMED)) { 1012 lockmgr(&vp->v_lock, LK_RELEASE); 1013 error = ENOENT; 1014 } 1015 return (error); 1016 } 1017 1018 /* 1019 * MPSAFE 1020 */ 1021 void 1022 vn_unlock(struct vnode *vp) 1023 { 1024 lockmgr(&vp->v_lock, LK_RELEASE); 1025 } 1026 1027 /* 1028 * MPSAFE 1029 */ 1030 int 1031 vn_islocked(struct vnode *vp) 1032 { 1033 return (lockstatus(&vp->v_lock, curthread)); 1034 } 1035 1036 /* 1037 * Return the lock status of a vnode and unlock the vnode 1038 * if we owned the lock. This is not a boolean, if the 1039 * caller cares what the lock status is the caller must 1040 * check the various possible values. 1041 * 1042 * This only unlocks exclusive locks held by the caller, 1043 * it will NOT unlock shared locks (there is no way to 1044 * tell who the shared lock belongs to). 1045 * 1046 * MPSAFE 1047 */ 1048 int 1049 vn_islocked_unlock(struct vnode *vp) 1050 { 1051 int vpls; 1052 1053 vpls = lockstatus(&vp->v_lock, curthread); 1054 if (vpls == LK_EXCLUSIVE) 1055 lockmgr(&vp->v_lock, LK_RELEASE); 1056 return(vpls); 1057 } 1058 1059 /* 1060 * Restore a vnode lock that we previously released via 1061 * vn_islocked_unlock(). This is a NOP if we did not 1062 * own the original lock. 1063 * 1064 * MPSAFE 1065 */ 1066 void 1067 vn_islocked_relock(struct vnode *vp, int vpls) 1068 { 1069 int error; 1070 1071 if (vpls == LK_EXCLUSIVE) 1072 error = lockmgr(&vp->v_lock, vpls); 1073 } 1074 1075 /* 1076 * MPSAFE 1077 */ 1078 static int 1079 vn_closefile(struct file *fp) 1080 { 1081 int error; 1082 1083 fp->f_ops = &badfileops; 1084 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag); 1085 return (error); 1086 } 1087 1088 /* 1089 * MPSAFE 1090 */ 1091 static int 1092 vn_kqfilter(struct file *fp, struct knote *kn) 1093 { 1094 int error; 1095 1096 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn); 1097 return (error); 1098 } 1099