1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * The statvfs->statfs conversion code was contributed to the DragonFly 9 * Project by Joerg Sonnenberger <joerg@bec.de>. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 36 * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/buf.h> 42 #include <sys/conf.h> 43 #include <sys/fcntl.h> 44 #include <sys/file.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/unistd.h> 50 #include <sys/vnode.h> 51 #include <sys/namei.h> 52 #include <sys/mountctl.h> 53 #include <sys/vfs_quota.h> 54 #include <sys/uio.h> 55 56 #include <machine/limits.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 #include <vm/vnode_pager.h> 63 64 static int vop_nolookup (struct vop_old_lookup_args *); 65 static int vop_nostrategy (struct vop_strategy_args *); 66 67 /* 68 * This vnode table stores what we want to do if the filesystem doesn't 69 * implement a particular VOP. 70 * 71 * If there is no specific entry here, we will return EOPNOTSUPP. 72 */ 73 struct vop_ops default_vnode_vops = { 74 .vop_default = vop_eopnotsupp, 75 .vop_advlock = (void *)vop_einval, 76 .vop_fsync = (void *)vop_null, 77 .vop_ioctl = (void *)vop_enotty, 78 .vop_mmap = (void *)vop_einval, 79 .vop_old_lookup = vop_nolookup, 80 .vop_open = vop_stdopen, 81 .vop_close = vop_stdclose, 82 .vop_getattr_lite = vop_stdgetattr_lite, 83 .vop_pathconf = vop_stdpathconf, 84 .vop_readlink = (void *)vop_einval, 85 .vop_reallocblks = (void *)vop_eopnotsupp, 86 .vop_strategy = vop_nostrategy, 87 .vop_getacl = (void *)vop_eopnotsupp, 88 .vop_setacl = (void *)vop_eopnotsupp, 89 .vop_aclcheck = (void *)vop_eopnotsupp, 90 .vop_getextattr = (void *)vop_eopnotsupp, 91 .vop_setextattr = (void *)vop_eopnotsupp, 92 .vop_markatime = vop_stdmarkatime, 93 .vop_allocate = vop_stdallocate, 94 .vop_nresolve = vop_compat_nresolve, 95 .vop_nlookupdotdot = vop_compat_nlookupdotdot, 96 .vop_ncreate = vop_compat_ncreate, 97 .vop_nmkdir = vop_compat_nmkdir, 98 .vop_nmknod = vop_compat_nmknod, 99 .vop_nlink = vop_compat_nlink, 100 .vop_nsymlink = vop_compat_nsymlink, 101 .vop_nwhiteout = vop_compat_nwhiteout, 102 .vop_nremove = vop_compat_nremove, 103 .vop_nrmdir = vop_compat_nrmdir, 104 .vop_nrename = vop_compat_nrename, 105 .vop_mountctl = vop_stdmountctl 106 }; 107 108 VNODEOP_SET(default_vnode_vops); 109 110 int 111 vop_eopnotsupp(struct vop_generic_args *ap) 112 { 113 return (EOPNOTSUPP); 114 } 115 116 int 117 vop_ebadf(struct vop_generic_args *ap) 118 { 119 return (EBADF); 120 } 121 122 int 123 vop_enotty(struct vop_generic_args *ap) 124 { 125 return (ENOTTY); 126 } 127 128 int 129 vop_einval(struct vop_generic_args *ap) 130 { 131 return (EINVAL); 132 } 133 134 int 135 vop_stdmarkatime(struct vop_markatime_args *ap) 136 { 137 return (EOPNOTSUPP); 138 } 139 140 int 141 vop_stdallocate(struct vop_allocate_args *ap) 142 { 143 struct thread *td; 144 struct vnode *vp; 145 struct vattr vattr, *vap; 146 struct uio auio; 147 struct iovec aiov; 148 uint8_t *buf; 149 off_t offset, len, fsize; 150 size_t iosize; 151 int error; 152 153 td = curthread; 154 vap = &vattr; 155 buf = NULL; 156 157 vp = ap->a_vp; 158 offset = ap->a_offset; 159 len = ap->a_len; 160 161 error = VOP_GETATTR(vp, vap); 162 if (error != 0) 163 goto out; 164 fsize = vap->va_size; 165 iosize = vap->va_blocksize; 166 if (iosize == 0) 167 iosize = BLKDEV_IOSIZE; 168 if (iosize > vmaxiosize(vp)) 169 iosize = vmaxiosize(vp); 170 buf = kmalloc(iosize, M_TEMP, M_WAITOK); 171 172 if (offset + len > vap->va_size) { 173 /* 174 * Test offset + len against the filesystem's maxfilesize. 175 */ 176 VATTR_NULL(&vattr); 177 vap->va_size = offset + len; 178 error = VOP_SETATTR(vp, vap, td->td_ucred); 179 if (error != 0) 180 goto out; 181 VATTR_NULL(&vattr); 182 vap->va_size = fsize; 183 error = VOP_SETATTR(vp, vap, td->td_ucred); 184 if (error != 0) 185 goto out; 186 } 187 188 for (;;) { 189 /* 190 * Read and write back anything below the nominal file 191 * size. There's currently no way outside the filesystem 192 * to know whether this area is sparse or not. 193 */ 194 off_t cur = iosize; 195 if ((offset % iosize) != 0) 196 cur -= (offset % iosize); 197 if (cur > len) 198 cur = len; 199 if (offset < fsize) { 200 aiov.iov_base = buf; 201 aiov.iov_len = cur; 202 auio.uio_iov = &aiov; 203 auio.uio_iovcnt = 1; 204 auio.uio_offset = offset; 205 auio.uio_resid = cur; 206 auio.uio_segflg = UIO_SYSSPACE; 207 auio.uio_rw = UIO_READ; 208 auio.uio_td = td; 209 error = VOP_READ(vp, &auio, 0, td->td_ucred); 210 if (error != 0) 211 break; 212 if (auio.uio_resid > 0) { 213 bzero(buf + cur - auio.uio_resid, 214 auio.uio_resid); 215 } 216 } else { 217 bzero(buf, cur); 218 } 219 220 aiov.iov_base = buf; 221 aiov.iov_len = cur; 222 auio.uio_iov = &aiov; 223 auio.uio_iovcnt = 1; 224 auio.uio_offset = offset; 225 auio.uio_resid = cur; 226 auio.uio_segflg = UIO_SYSSPACE; 227 auio.uio_rw = UIO_WRITE; 228 auio.uio_td = td; 229 230 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 231 if (error != 0) 232 break; 233 234 len -= cur; 235 offset += cur; 236 if (len == 0) 237 break; 238 /* 239 if (should_yield()) 240 break; 241 */ 242 } 243 out: 244 ap->a_offset = offset; 245 ap->a_len = len; 246 kfree(buf, M_TEMP); 247 248 return (error); 249 } 250 251 int 252 vop_null(struct vop_generic_args *ap) 253 { 254 return (0); 255 } 256 257 int 258 vop_defaultop(struct vop_generic_args *ap) 259 { 260 return (VOCALL(&default_vnode_vops, ap)); 261 } 262 263 /* 264 * vop_compat_resolve { struct nchandle *a_nch, struct vnode *dvp } 265 * XXX STOPGAP FUNCTION 266 * 267 * XXX OLD API ROUTINE! WHEN ALL VFSs HAVE BEEN CLEANED UP THIS PROCEDURE 268 * WILL BE REMOVED. This procedure exists for all VFSs which have not 269 * yet implemented VOP_NRESOLVE(). It converts VOP_NRESOLVE() into a 270 * vop_old_lookup() and does appropriate translations. 271 * 272 * Resolve a ncp for VFSs which do not support the VOP. Eventually all 273 * VFSs will support this VOP and this routine can be removed, since 274 * VOP_NRESOLVE() is far less complex then the older LOOKUP/CACHEDLOOKUP 275 * API. 276 * 277 * A locked ncp is passed in to be resolved. The NCP is resolved by 278 * figuring out the vnode (if any) and calling cache_setvp() to attach the 279 * vnode to the entry. If the entry represents a non-existant node then 280 * cache_setvp() is called with a NULL vnode to resolve the entry into a 281 * negative cache entry. No vnode locks are retained and the 282 * ncp is left locked on return. 283 * 284 * The ncp will NEVER represent "", "." or "..", or contain any slashes. 285 * 286 * There is a potential directory and vnode interlock. The lock order 287 * requirement is: namecache, governing directory, resolved vnode. 288 */ 289 int 290 vop_compat_nresolve(struct vop_nresolve_args *ap) 291 { 292 int error; 293 struct vnode *dvp; 294 struct vnode *vp; 295 struct nchandle *nch; 296 struct namecache *ncp; 297 struct componentname cnp; 298 299 nch = ap->a_nch; /* locked namecache node */ 300 ncp = nch->ncp; 301 dvp = ap->a_dvp; 302 303 /* 304 * UFS currently stores all sorts of side effects, including a loop 305 * variable, in the directory inode. That needs to be fixed and the 306 * other VFS's audited before we can switch to LK_SHARED. 307 */ 308 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 309 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 310 ncp, ncp->nc_name); 311 return(EAGAIN); 312 } 313 314 bzero(&cnp, sizeof(cnp)); 315 cnp.cn_nameiop = NAMEI_LOOKUP; 316 cnp.cn_flags = 0; 317 cnp.cn_nameptr = ncp->nc_name; 318 cnp.cn_namelen = ncp->nc_nlen; 319 cnp.cn_cred = ap->a_cred; 320 cnp.cn_td = curthread; /* XXX */ 321 322 /* 323 * vop_old_lookup() always returns vp locked. dvp may or may not be 324 * left locked depending on CNP_PDIRUNLOCK. 325 */ 326 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 327 if (error == 0) 328 vn_unlock(vp); 329 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 330 vn_unlock(dvp); 331 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 332 /* was resolved by another process while we were unlocked */ 333 if (error == 0) 334 vrele(vp); 335 } else if (error == 0) { 336 KKASSERT(vp != NULL); 337 cache_setvp(nch, vp); 338 vrele(vp); 339 } else if (error == ENOENT) { 340 KKASSERT(vp == NULL); 341 if (cnp.cn_flags & CNP_ISWHITEOUT) 342 ncp->nc_flag |= NCF_WHITEOUT; 343 cache_setvp(nch, NULL); 344 } 345 vrele(dvp); 346 return (error); 347 } 348 349 /* 350 * vop_compat_nlookupdotdot { struct vnode *a_dvp, 351 * struct vnode **a_vpp, 352 * struct ucred *a_cred } 353 * 354 * Lookup the vnode representing the parent directory of the specified 355 * directory vnode. a_dvp should not be locked. If no error occurs *a_vpp 356 * will contained the parent vnode, locked and refd, else *a_vpp will be NULL. 357 * 358 * This function is designed to aid NFS server-side operations and is 359 * used by cache_fromdvp() to create a consistent, connected namecache 360 * topology. 361 * 362 * As part of the NEW API work, VFSs will first split their CNP_ISDOTDOT 363 * code out from their *_lookup() and create *_nlookupdotdot(). Then as time 364 * permits VFSs will implement the remaining *_n*() calls and finally get 365 * rid of their *_lookup() call. 366 */ 367 int 368 vop_compat_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 369 { 370 struct componentname cnp; 371 int error; 372 373 /* 374 * UFS currently stores all sorts of side effects, including a loop 375 * variable, in the directory inode. That needs to be fixed and the 376 * other VFS's audited before we can switch to LK_SHARED. 377 */ 378 *ap->a_vpp = NULL; 379 if ((error = vget(ap->a_dvp, LK_EXCLUSIVE)) != 0) 380 return (error); 381 if (ap->a_dvp->v_type != VDIR) { 382 vput(ap->a_dvp); 383 return (ENOTDIR); 384 } 385 386 bzero(&cnp, sizeof(cnp)); 387 cnp.cn_nameiop = NAMEI_LOOKUP; 388 cnp.cn_flags = CNP_ISDOTDOT; 389 cnp.cn_nameptr = ".."; 390 cnp.cn_namelen = 2; 391 cnp.cn_cred = ap->a_cred; 392 cnp.cn_td = curthread; /* XXX */ 393 394 /* 395 * vop_old_lookup() always returns vp locked. dvp may or may not be 396 * left locked depending on CNP_PDIRUNLOCK. 397 * 398 * (*vpp) will be returned locked if no error occured, which is the 399 * state we want. 400 */ 401 error = vop_old_lookup(ap->a_head.a_ops, ap->a_dvp, ap->a_vpp, &cnp); 402 if (cnp.cn_flags & CNP_PDIRUNLOCK) 403 vrele(ap->a_dvp); 404 else 405 vput(ap->a_dvp); 406 return (error); 407 } 408 409 /* 410 * vop_compat_ncreate { struct nchandle *a_nch, XXX STOPGAP FUNCTION 411 * struct vnode *a_dvp, 412 * struct vnode **a_vpp, 413 * struct ucred *a_cred, 414 * struct vattr *a_vap } 415 * 416 * Create a file as specified by a_vap. Compatibility requires us to issue 417 * the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_CREATE in order 418 * to setup the directory inode's i_offset and i_count (e.g. in UFS). 419 */ 420 int 421 vop_compat_ncreate(struct vop_ncreate_args *ap) 422 { 423 struct thread *td = curthread; 424 struct componentname cnp; 425 struct nchandle *nch; 426 struct namecache *ncp; 427 struct vnode *dvp; 428 int error; 429 430 /* 431 * Sanity checks, get a locked directory vnode. 432 */ 433 nch = ap->a_nch; /* locked namecache node */ 434 dvp = ap->a_dvp; 435 ncp = nch->ncp; 436 437 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 438 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 439 ncp, ncp->nc_name); 440 return(EAGAIN); 441 } 442 443 /* 444 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 445 * caches all information required to create the entry in the 446 * directory inode. We expect a return code of EJUSTRETURN for 447 * the CREATE case. The cnp must simulated a saved-name situation. 448 */ 449 bzero(&cnp, sizeof(cnp)); 450 cnp.cn_nameiop = NAMEI_CREATE; 451 cnp.cn_flags = CNP_LOCKPARENT; 452 cnp.cn_nameptr = ncp->nc_name; 453 cnp.cn_namelen = ncp->nc_nlen; 454 cnp.cn_cred = ap->a_cred; 455 cnp.cn_td = td; 456 *ap->a_vpp = NULL; 457 458 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp); 459 460 /* 461 * EJUSTRETURN should be returned for this case, which means that 462 * the VFS has setup the directory inode for the create. The dvp we 463 * passed in is expected to remain in a locked state. 464 * 465 * If the VOP_OLD_CREATE is successful we are responsible for updating 466 * the cache state of the locked ncp that was passed to us. 467 */ 468 if (error == EJUSTRETURN) { 469 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 470 error = VOP_OLD_CREATE(dvp, ap->a_vpp, &cnp, ap->a_vap); 471 if (error == 0) { 472 cache_setunresolved(nch); 473 cache_setvp(nch, *ap->a_vpp); 474 } 475 } else { 476 if (error == 0) { 477 vput(*ap->a_vpp); 478 *ap->a_vpp = NULL; 479 error = EEXIST; 480 } 481 KKASSERT(*ap->a_vpp == NULL); 482 } 483 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 484 vn_unlock(dvp); 485 vrele(dvp); 486 return (error); 487 } 488 489 /* 490 * vop_compat_nmkdir { struct nchandle *a_nch, XXX STOPGAP FUNCTION 491 * struct vnode *a_dvp, 492 * struct vnode **a_vpp, 493 * struct ucred *a_cred, 494 * struct vattr *a_vap } 495 * 496 * Create a directory as specified by a_vap. Compatibility requires us to 497 * issue the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_MKDIR in 498 * order to setup the directory inode's i_offset and i_count (e.g. in UFS). 499 */ 500 int 501 vop_compat_nmkdir(struct vop_nmkdir_args *ap) 502 { 503 struct thread *td = curthread; 504 struct componentname cnp; 505 struct nchandle *nch; 506 struct namecache *ncp; 507 struct vnode *dvp; 508 int error; 509 510 /* 511 * Sanity checks, get a locked directory vnode. 512 */ 513 nch = ap->a_nch; /* locked namecache node */ 514 ncp = nch->ncp; 515 dvp = ap->a_dvp; 516 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 517 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 518 ncp, ncp->nc_name); 519 return(EAGAIN); 520 } 521 522 /* 523 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 524 * caches all information required to create the entry in the 525 * directory inode. We expect a return code of EJUSTRETURN for 526 * the CREATE case. The cnp must simulated a saved-name situation. 527 */ 528 bzero(&cnp, sizeof(cnp)); 529 cnp.cn_nameiop = NAMEI_CREATE; 530 cnp.cn_flags = CNP_LOCKPARENT; 531 cnp.cn_nameptr = ncp->nc_name; 532 cnp.cn_namelen = ncp->nc_nlen; 533 cnp.cn_cred = ap->a_cred; 534 cnp.cn_td = td; 535 *ap->a_vpp = NULL; 536 537 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp); 538 539 /* 540 * EJUSTRETURN should be returned for this case, which means that 541 * the VFS has setup the directory inode for the create. The dvp we 542 * passed in is expected to remain in a locked state. 543 * 544 * If the VOP_OLD_MKDIR is successful we are responsible for updating 545 * the cache state of the locked ncp that was passed to us. 546 */ 547 if (error == EJUSTRETURN) { 548 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 549 error = VOP_OLD_MKDIR(dvp, ap->a_vpp, &cnp, ap->a_vap); 550 if (error == 0) { 551 cache_setunresolved(nch); 552 cache_setvp(nch, *ap->a_vpp); 553 } 554 } else { 555 if (error == 0) { 556 vput(*ap->a_vpp); 557 *ap->a_vpp = NULL; 558 error = EEXIST; 559 } 560 KKASSERT(*ap->a_vpp == NULL); 561 } 562 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 563 vn_unlock(dvp); 564 vrele(dvp); 565 return (error); 566 } 567 568 /* 569 * vop_compat_nmknod { struct nchandle *a_nch, XXX STOPGAP FUNCTION 570 * struct vnode *a_dvp, 571 * struct vnode **a_vpp, 572 * struct ucred *a_cred, 573 * struct vattr *a_vap } 574 * 575 * Create a device or fifo node as specified by a_vap. Compatibility requires 576 * us to issue the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_MKNOD 577 * in order to setup the directory inode's i_offset and i_count (e.g. in UFS). 578 */ 579 int 580 vop_compat_nmknod(struct vop_nmknod_args *ap) 581 { 582 struct thread *td = curthread; 583 struct componentname cnp; 584 struct nchandle *nch; 585 struct namecache *ncp; 586 struct vnode *dvp; 587 int error; 588 589 /* 590 * Sanity checks, get a locked directory vnode. 591 */ 592 nch = ap->a_nch; /* locked namecache node */ 593 ncp = nch->ncp; 594 dvp = ap->a_dvp; 595 596 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 597 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 598 ncp, ncp->nc_name); 599 return(EAGAIN); 600 } 601 602 /* 603 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 604 * caches all information required to create the entry in the 605 * directory inode. We expect a return code of EJUSTRETURN for 606 * the CREATE case. The cnp must simulated a saved-name situation. 607 */ 608 bzero(&cnp, sizeof(cnp)); 609 cnp.cn_nameiop = NAMEI_CREATE; 610 cnp.cn_flags = CNP_LOCKPARENT; 611 cnp.cn_nameptr = ncp->nc_name; 612 cnp.cn_namelen = ncp->nc_nlen; 613 cnp.cn_cred = ap->a_cred; 614 cnp.cn_td = td; 615 *ap->a_vpp = NULL; 616 617 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp); 618 619 /* 620 * EJUSTRETURN should be returned for this case, which means that 621 * the VFS has setup the directory inode for the create. The dvp we 622 * passed in is expected to remain in a locked state. 623 * 624 * If the VOP_OLD_MKNOD is successful we are responsible for updating 625 * the cache state of the locked ncp that was passed to us. 626 */ 627 if (error == EJUSTRETURN) { 628 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 629 error = VOP_OLD_MKNOD(dvp, ap->a_vpp, &cnp, ap->a_vap); 630 if (error == 0) { 631 cache_setunresolved(nch); 632 cache_setvp(nch, *ap->a_vpp); 633 } 634 } else { 635 if (error == 0) { 636 vput(*ap->a_vpp); 637 *ap->a_vpp = NULL; 638 error = EEXIST; 639 } 640 KKASSERT(*ap->a_vpp == NULL); 641 } 642 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 643 vn_unlock(dvp); 644 vrele(dvp); 645 return (error); 646 } 647 648 /* 649 * vop_compat_nlink { struct nchandle *a_nch, XXX STOPGAP FUNCTION 650 * struct vnode *a_dvp, 651 * struct vnode *a_vp, 652 * struct ucred *a_cred } 653 * 654 * The passed vp is locked and represents the source. The passed ncp is 655 * locked and represents the target to create. 656 */ 657 int 658 vop_compat_nlink(struct vop_nlink_args *ap) 659 { 660 struct thread *td = curthread; 661 struct componentname cnp; 662 struct nchandle *nch; 663 struct namecache *ncp; 664 struct vnode *dvp; 665 struct vnode *tvp; 666 int error; 667 668 /* 669 * Sanity checks, get a locked directory vnode. 670 */ 671 nch = ap->a_nch; /* locked namecache node */ 672 ncp = nch->ncp; 673 dvp = ap->a_dvp; 674 675 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 676 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 677 ncp, ncp->nc_name); 678 return(EAGAIN); 679 } 680 681 /* 682 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 683 * caches all information required to create the entry in the 684 * directory inode. We expect a return code of EJUSTRETURN for 685 * the CREATE case. The cnp must simulated a saved-name situation. 686 * 687 * It should not be possible for there to be a vnode collision 688 * between the source vp and target (name lookup). However NFS 689 * clients racing each other can cause NFS to alias the same vnode 690 * across several names without the rest of the system knowing it. 691 * Use CNP_NOTVP to avoid a panic in this situation. 692 */ 693 bzero(&cnp, sizeof(cnp)); 694 cnp.cn_nameiop = NAMEI_CREATE; 695 cnp.cn_flags = CNP_LOCKPARENT | CNP_NOTVP; 696 cnp.cn_nameptr = ncp->nc_name; 697 cnp.cn_namelen = ncp->nc_nlen; 698 cnp.cn_cred = ap->a_cred; 699 cnp.cn_td = td; 700 cnp.cn_notvp = ap->a_vp; 701 702 tvp = NULL; 703 error = vop_old_lookup(ap->a_head.a_ops, dvp, &tvp, &cnp); 704 705 /* 706 * EJUSTRETURN should be returned for this case, which means that 707 * the VFS has setup the directory inode for the create. The dvp we 708 * passed in is expected to remain in a locked state. 709 * 710 * If the VOP_OLD_LINK is successful we are responsible for updating 711 * the cache state of the locked ncp that was passed to us. 712 */ 713 if (error == EJUSTRETURN) { 714 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 715 error = VOP_OLD_LINK(dvp, ap->a_vp, &cnp); 716 if (error == 0) { 717 cache_setunresolved(nch); 718 cache_setvp(nch, ap->a_vp); 719 } 720 } else { 721 if (error == 0) { 722 vput(tvp); 723 error = EEXIST; 724 } 725 } 726 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 727 vn_unlock(dvp); 728 vrele(dvp); 729 return (error); 730 } 731 732 int 733 vop_compat_nsymlink(struct vop_nsymlink_args *ap) 734 { 735 struct thread *td = curthread; 736 struct componentname cnp; 737 struct nchandle *nch; 738 struct namecache *ncp; 739 struct vnode *dvp; 740 struct vnode *vp; 741 int error; 742 743 /* 744 * Sanity checks, get a locked directory vnode. 745 */ 746 *ap->a_vpp = NULL; 747 nch = ap->a_nch; /* locked namecache node */ 748 ncp = nch->ncp; 749 dvp = ap->a_dvp; 750 751 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 752 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 753 ncp, ncp->nc_name); 754 return(EAGAIN); 755 } 756 757 /* 758 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 759 * caches all information required to create the entry in the 760 * directory inode. We expect a return code of EJUSTRETURN for 761 * the CREATE case. The cnp must simulated a saved-name situation. 762 */ 763 bzero(&cnp, sizeof(cnp)); 764 cnp.cn_nameiop = NAMEI_CREATE; 765 cnp.cn_flags = CNP_LOCKPARENT; 766 cnp.cn_nameptr = ncp->nc_name; 767 cnp.cn_namelen = ncp->nc_nlen; 768 cnp.cn_cred = ap->a_cred; 769 cnp.cn_td = td; 770 771 vp = NULL; 772 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 773 774 /* 775 * EJUSTRETURN should be returned for this case, which means that 776 * the VFS has setup the directory inode for the create. The dvp we 777 * passed in is expected to remain in a locked state. 778 * 779 * If the VOP_OLD_SYMLINK is successful we are responsible for updating 780 * the cache state of the locked ncp that was passed to us. 781 */ 782 if (error == EJUSTRETURN) { 783 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 784 error = VOP_OLD_SYMLINK(dvp, &vp, &cnp, ap->a_vap, ap->a_target); 785 if (error == 0) { 786 cache_setunresolved(nch); 787 cache_setvp(nch, vp); 788 *ap->a_vpp = vp; 789 } 790 } else { 791 if (error == 0) { 792 vput(vp); 793 vp = NULL; 794 error = EEXIST; 795 } 796 KKASSERT(vp == NULL); 797 } 798 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 799 vn_unlock(dvp); 800 vrele(dvp); 801 return (error); 802 } 803 804 /* 805 * vop_compat_nwhiteout { struct nchandle *a_nch, XXX STOPGAP FUNCTION 806 * struct vnode *a_dvp, 807 * struct ucred *a_cred, 808 * int a_flags } 809 * 810 * Issie a whiteout operation (create, lookup, or delete). Compatibility 811 * requires us to issue the appropriate VOP_OLD_LOOKUP before we issue 812 * VOP_OLD_WHITEOUT in order to setup the directory inode's i_offset and i_count 813 * (e.g. in UFS) for the NAMEI_CREATE and NAMEI_DELETE ops. For NAMEI_LOOKUP 814 * no lookup is necessary. 815 */ 816 int 817 vop_compat_nwhiteout(struct vop_nwhiteout_args *ap) 818 { 819 struct thread *td = curthread; 820 struct componentname cnp; 821 struct nchandle *nch; 822 struct namecache *ncp; 823 struct vnode *dvp; 824 struct vnode *vp; 825 int error; 826 827 /* 828 * Sanity checks, get a locked directory vnode. 829 */ 830 nch = ap->a_nch; /* locked namecache node */ 831 ncp = nch->ncp; 832 dvp = ap->a_dvp; 833 834 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 835 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 836 ncp, ncp->nc_name); 837 return(EAGAIN); 838 } 839 840 /* 841 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 842 * caches all information required to create the entry in the 843 * directory inode. We expect a return code of EJUSTRETURN for 844 * the CREATE case. The cnp must simulated a saved-name situation. 845 */ 846 bzero(&cnp, sizeof(cnp)); 847 cnp.cn_nameiop = ap->a_flags; 848 cnp.cn_flags = CNP_LOCKPARENT; 849 cnp.cn_nameptr = ncp->nc_name; 850 cnp.cn_namelen = ncp->nc_nlen; 851 cnp.cn_cred = ap->a_cred; 852 cnp.cn_td = td; 853 854 vp = NULL; 855 856 /* 857 * EJUSTRETURN should be returned for the CREATE or DELETE cases. 858 * The VFS has setup the directory inode for the create. The dvp we 859 * passed in is expected to remain in a locked state. 860 * 861 * If the VOP_OLD_WHITEOUT is successful we are responsible for updating 862 * the cache state of the locked ncp that was passed to us. 863 */ 864 switch(ap->a_flags) { 865 case NAMEI_DELETE: 866 cnp.cn_flags |= CNP_DOWHITEOUT; 867 /* fall through */ 868 case NAMEI_CREATE: 869 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 870 if (error == EJUSTRETURN) { 871 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 872 error = VOP_OLD_WHITEOUT(dvp, &cnp, ap->a_flags); 873 if (error == 0) 874 cache_setunresolved(nch); 875 } else { 876 if (error == 0) { 877 vput(vp); 878 vp = NULL; 879 error = EEXIST; 880 } 881 KKASSERT(vp == NULL); 882 } 883 break; 884 case NAMEI_LOOKUP: 885 error = VOP_OLD_WHITEOUT(dvp, NULL, ap->a_flags); 886 break; 887 default: 888 error = EINVAL; 889 break; 890 } 891 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 892 vn_unlock(dvp); 893 vrele(dvp); 894 return (error); 895 } 896 897 898 /* 899 * vop_compat_nremove { struct nchandle *a_nch, XXX STOPGAP FUNCTION 900 * struct vnode *a_dvp, 901 * struct ucred *a_cred } 902 */ 903 int 904 vop_compat_nremove(struct vop_nremove_args *ap) 905 { 906 struct thread *td = curthread; 907 struct componentname cnp; 908 struct nchandle *nch; 909 struct namecache *ncp; 910 struct vnode *dvp; 911 struct vnode *vp; 912 int error; 913 914 /* 915 * Sanity checks, get a locked directory vnode. 916 */ 917 nch = ap->a_nch; /* locked namecache node */ 918 ncp = nch->ncp; 919 dvp = ap->a_dvp; 920 921 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 922 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 923 ncp, ncp->nc_name); 924 return(EAGAIN); 925 } 926 927 /* 928 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 929 * caches all information required to delete the entry in the 930 * directory inode. We expect a return code of 0 for the DELETE 931 * case (meaning that a vp has been found). The cnp must simulated 932 * a saved-name situation. 933 */ 934 bzero(&cnp, sizeof(cnp)); 935 cnp.cn_nameiop = NAMEI_DELETE; 936 cnp.cn_flags = CNP_LOCKPARENT; 937 cnp.cn_nameptr = ncp->nc_name; 938 cnp.cn_namelen = ncp->nc_nlen; 939 cnp.cn_cred = ap->a_cred; 940 cnp.cn_td = td; 941 942 /* 943 * The vnode must be a directory and must not represent the 944 * current directory. 945 */ 946 vp = NULL; 947 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 948 if (error == 0 && vp->v_type == VDIR) 949 error = EPERM; 950 if (error == 0) { 951 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 952 error = VOP_OLD_REMOVE(dvp, vp, &cnp); 953 if (error == 0) 954 cache_unlink(nch); 955 } 956 if (vp) { 957 if (dvp == vp) 958 vrele(vp); 959 else 960 vput(vp); 961 } 962 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 963 vn_unlock(dvp); 964 vrele(dvp); 965 return (error); 966 } 967 968 /* 969 * vop_compat_nrmdir { struct nchandle *a_nch, XXX STOPGAP FUNCTION 970 * struct vnode *dvp, 971 * struct ucred *a_cred } 972 */ 973 int 974 vop_compat_nrmdir(struct vop_nrmdir_args *ap) 975 { 976 struct thread *td = curthread; 977 struct componentname cnp; 978 struct nchandle *nch; 979 struct namecache *ncp; 980 struct vnode *dvp; 981 struct vnode *vp; 982 int error; 983 984 /* 985 * Sanity checks, get a locked directory vnode. 986 */ 987 nch = ap->a_nch; /* locked namecache node */ 988 ncp = nch->ncp; 989 dvp = ap->a_dvp; 990 991 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 992 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 993 ncp, ncp->nc_name); 994 return(EAGAIN); 995 } 996 997 /* 998 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 999 * caches all information required to delete the entry in the 1000 * directory inode. We expect a return code of 0 for the DELETE 1001 * case (meaning that a vp has been found). The cnp must simulated 1002 * a saved-name situation. 1003 */ 1004 bzero(&cnp, sizeof(cnp)); 1005 cnp.cn_nameiop = NAMEI_DELETE; 1006 cnp.cn_flags = CNP_LOCKPARENT; 1007 cnp.cn_nameptr = ncp->nc_name; 1008 cnp.cn_namelen = ncp->nc_nlen; 1009 cnp.cn_cred = ap->a_cred; 1010 cnp.cn_td = td; 1011 1012 /* 1013 * The vnode must be a directory and must not represent the 1014 * current directory. 1015 */ 1016 vp = NULL; 1017 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 1018 if (error == 0 && vp->v_type != VDIR) 1019 error = ENOTDIR; 1020 if (error == 0 && vp == dvp) 1021 error = EINVAL; 1022 if (error == 0 && (vp->v_flag & VROOT)) 1023 error = EBUSY; 1024 if (error == 0) { 1025 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 1026 error = VOP_OLD_RMDIR(dvp, vp, &cnp); 1027 1028 /* 1029 * Note that this invalidation will cause any process 1030 * currently CD'd into the directory being removed to be 1031 * disconnected from the topology and not be able to ".." 1032 * back out. 1033 */ 1034 if (error == 0) { 1035 cache_inval(nch, CINV_DESTROY); 1036 cache_inval_vp(vp, CINV_DESTROY); 1037 } 1038 } 1039 if (vp) { 1040 if (dvp == vp) 1041 vrele(vp); 1042 else 1043 vput(vp); 1044 } 1045 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 1046 vn_unlock(dvp); 1047 vrele(dvp); 1048 return (error); 1049 } 1050 1051 /* 1052 * vop_compat_nrename { struct nchandle *a_fnch, XXX STOPGAP FUNCTION 1053 * struct nchandle *a_tnch, 1054 * struct ucred *a_cred } 1055 * 1056 * This is a fairly difficult procedure. The old VOP_OLD_RENAME requires that 1057 * the source directory and vnode be unlocked and the target directory and 1058 * vnode (if it exists) be locked. All arguments will be vrele'd and 1059 * the targets will also be unlocked regardless of the return code. 1060 */ 1061 int 1062 vop_compat_nrename(struct vop_nrename_args *ap) 1063 { 1064 struct thread *td = curthread; 1065 struct componentname fcnp; 1066 struct componentname tcnp; 1067 struct nchandle *fnch; 1068 struct nchandle *tnch; 1069 struct namecache *fncp; 1070 struct namecache *tncp; 1071 struct vnode *fdvp, *fvp; 1072 struct vnode *tdvp, *tvp; 1073 int error; 1074 1075 /* 1076 * Sanity checks, get referenced vnodes representing the source. 1077 */ 1078 fnch = ap->a_fnch; /* locked namecache node */ 1079 fncp = fnch->ncp; 1080 fdvp = ap->a_fdvp; 1081 1082 /* 1083 * Temporarily lock the source directory and lookup in DELETE mode to 1084 * check permissions. XXX delete permissions should have been 1085 * checked by nlookup(), we need to add NLC_DELETE for delete 1086 * checking. It is unclear whether VFS's require the directory setup 1087 * info NAMEI_DELETE causes to be stored in the fdvp's inode, but 1088 * since it isn't locked and since UFS always does a relookup of 1089 * the source, it is believed that the only side effect that matters 1090 * is the permissions check. 1091 */ 1092 if ((error = vget(fdvp, LK_EXCLUSIVE)) != 0) { 1093 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 1094 fncp, fncp->nc_name); 1095 return(EAGAIN); 1096 } 1097 1098 bzero(&fcnp, sizeof(fcnp)); 1099 fcnp.cn_nameiop = NAMEI_DELETE; 1100 fcnp.cn_flags = CNP_LOCKPARENT; 1101 fcnp.cn_nameptr = fncp->nc_name; 1102 fcnp.cn_namelen = fncp->nc_nlen; 1103 fcnp.cn_cred = ap->a_cred; 1104 fcnp.cn_td = td; 1105 1106 /* 1107 * note: vop_old_lookup (i.e. VOP_OLD_LOOKUP) always returns a locked 1108 * fvp. 1109 */ 1110 fvp = NULL; 1111 error = vop_old_lookup(ap->a_head.a_ops, fdvp, &fvp, &fcnp); 1112 if (error == 0 && (fvp->v_flag & VROOT)) { 1113 vput(fvp); /* as if vop_old_lookup had failed */ 1114 error = EBUSY; 1115 } 1116 if ((fcnp.cn_flags & CNP_PDIRUNLOCK) == 0) { 1117 fcnp.cn_flags |= CNP_PDIRUNLOCK; 1118 vn_unlock(fdvp); 1119 } 1120 if (error) { 1121 vrele(fdvp); 1122 return (error); 1123 } 1124 vn_unlock(fvp); 1125 1126 /* 1127 * fdvp and fvp are now referenced and unlocked. 1128 * 1129 * Get a locked directory vnode for the target and lookup the target 1130 * in CREATE mode so it places the required information in the 1131 * directory inode. 1132 */ 1133 tnch = ap->a_tnch; /* locked namecache node */ 1134 tncp = tnch->ncp; 1135 tdvp = ap->a_tdvp; 1136 if (error) { 1137 vrele(fdvp); 1138 vrele(fvp); 1139 return (error); 1140 } 1141 if ((error = vget(tdvp, LK_EXCLUSIVE)) != 0) { 1142 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 1143 tncp, tncp->nc_name); 1144 vrele(fdvp); 1145 vrele(fvp); 1146 return(EAGAIN); 1147 } 1148 1149 /* 1150 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 1151 * caches all information required to create the entry in the 1152 * target directory inode. 1153 */ 1154 bzero(&tcnp, sizeof(tcnp)); 1155 tcnp.cn_nameiop = NAMEI_RENAME; 1156 tcnp.cn_flags = CNP_LOCKPARENT; 1157 tcnp.cn_nameptr = tncp->nc_name; 1158 tcnp.cn_namelen = tncp->nc_nlen; 1159 tcnp.cn_cred = ap->a_cred; 1160 tcnp.cn_td = td; 1161 1162 tvp = NULL; 1163 error = vop_old_lookup(ap->a_head.a_ops, tdvp, &tvp, &tcnp); 1164 1165 if (error == EJUSTRETURN) { 1166 /* 1167 * Target does not exist. tvp should be NULL. 1168 */ 1169 KKASSERT(tvp == NULL); 1170 KKASSERT((tcnp.cn_flags & CNP_PDIRUNLOCK) == 0); 1171 error = VOP_OLD_RENAME(fdvp, fvp, &fcnp, tdvp, tvp, &tcnp); 1172 if (error == 0) 1173 cache_rename(fnch, tnch); 1174 } else if (error == 0) { 1175 /* 1176 * Target exists. VOP_OLD_RENAME should correctly delete the 1177 * target. 1178 */ 1179 KKASSERT((tcnp.cn_flags & CNP_PDIRUNLOCK) == 0); 1180 error = VOP_OLD_RENAME(fdvp, fvp, &fcnp, tdvp, tvp, &tcnp); 1181 if (error == 0) 1182 cache_rename(fnch, tnch); 1183 } else { 1184 vrele(fdvp); 1185 vrele(fvp); 1186 if (tcnp.cn_flags & CNP_PDIRUNLOCK) 1187 vrele(tdvp); 1188 else 1189 vput(tdvp); 1190 } 1191 return (error); 1192 } 1193 1194 static int 1195 vop_nolookup(struct vop_old_lookup_args *ap) 1196 { 1197 1198 *ap->a_vpp = NULL; 1199 return (ENOTDIR); 1200 } 1201 1202 /* 1203 * vop_nostrategy: 1204 * 1205 * Strategy routine for VFS devices that have none. 1206 * 1207 * B_ERROR and B_INVAL must be cleared prior to calling any strategy 1208 * routine. Typically this is done for a BUF_CMD_READ strategy call. 1209 * Typically B_INVAL is assumed to already be clear prior to a write 1210 * and should not be cleared manually unless you just made the buffer 1211 * invalid. B_ERROR should be cleared either way. 1212 */ 1213 1214 static int 1215 vop_nostrategy (struct vop_strategy_args *ap) 1216 { 1217 kprintf("No strategy for buffer at %p\n", ap->a_bio->bio_buf); 1218 vprint("", ap->a_vp); 1219 ap->a_bio->bio_buf->b_flags |= B_ERROR; 1220 ap->a_bio->bio_buf->b_error = EOPNOTSUPP; 1221 biodone(ap->a_bio); 1222 return (EOPNOTSUPP); 1223 } 1224 1225 int 1226 vop_stdpathconf(struct vop_pathconf_args *ap) 1227 { 1228 int error = 0; 1229 1230 switch (ap->a_name) { 1231 case _PC_CHOWN_RESTRICTED: 1232 *ap->a_retval = _POSIX_CHOWN_RESTRICTED; 1233 break; 1234 case _PC_LINK_MAX: 1235 *ap->a_retval = LINK_MAX; 1236 break; 1237 case _PC_MAX_CANON: 1238 *ap->a_retval = MAX_CANON; 1239 break; 1240 case _PC_MAX_INPUT: 1241 *ap->a_retval = MAX_INPUT; 1242 break; 1243 case _PC_NAME_MAX: 1244 *ap->a_retval = NAME_MAX; 1245 break; 1246 case _PC_NO_TRUNC: 1247 *ap->a_retval = _POSIX_NO_TRUNC; 1248 break; 1249 case _PC_PATH_MAX: 1250 *ap->a_retval = PATH_MAX; 1251 break; 1252 case _PC_PIPE_BUF: 1253 *ap->a_retval = PIPE_BUF; 1254 break; 1255 case _PC_VDISABLE: 1256 *ap->a_retval = _POSIX_VDISABLE; 1257 break; 1258 default: 1259 error = EINVAL; 1260 break; 1261 } 1262 return (error); 1263 } 1264 1265 /* 1266 * Standard open. 1267 * 1268 * (struct vnode *a_vp, int a_mode, struct ucred *a_ucred, struct file *a_fp) 1269 * 1270 * a_mode: note, 'F' modes, e.g. FREAD, FWRITE 1271 */ 1272 int 1273 vop_stdopen(struct vop_open_args *ap) 1274 { 1275 struct vnode *vp = ap->a_vp; 1276 struct file *fp; 1277 1278 if (ap->a_fpp) { 1279 fp = *ap->a_fpp; 1280 1281 switch(vp->v_type) { 1282 case VFIFO: 1283 fp->f_type = DTYPE_FIFO; 1284 break; 1285 default: 1286 fp->f_type = DTYPE_VNODE; 1287 break; 1288 } 1289 /* retain flags not to be copied */ 1290 fp->f_flag = (fp->f_flag & ~FMASK) | (ap->a_mode & FMASK); 1291 fp->f_ops = &vnode_fileops; 1292 fp->f_data = vp; 1293 vref(vp); 1294 } 1295 if (ap->a_mode & FWRITE) 1296 atomic_add_int(&vp->v_writecount, 1); 1297 KKASSERT(vp->v_opencount >= 0 && vp->v_opencount != INT_MAX); 1298 atomic_add_int(&vp->v_opencount, 1); 1299 return (0); 1300 } 1301 1302 /* 1303 * Standard close. 1304 * 1305 * (struct vnode *a_vp, int a_fflag) 1306 * 1307 * a_fflag: note, 'F' modes, e.g. FREAD, FWRITE. same as a_mode in stdopen? 1308 * 1309 * v_lastwrite_ts is used to record the timestamp that should be used to 1310 * set the file mtime for any asynchronously flushed pages modified via 1311 * mmap(), which can occur after the last close(). 1312 */ 1313 int 1314 vop_stdclose(struct vop_close_args *ap) 1315 { 1316 struct vnode *vp = ap->a_vp; 1317 1318 KASSERT(vp->v_opencount > 0, 1319 ("VOP_STDCLOSE: BAD OPENCOUNT %p %d type=%d ops=%p flgs=%08x", 1320 vp, vp->v_opencount, vp->v_type, *vp->v_ops, vp->v_flag)); 1321 if (ap->a_fflag & FWRITE) { 1322 KASSERT(vp->v_writecount > 0, 1323 ("VOP_STDCLOSE: BAD WRITECOUNT %p %d", 1324 vp, vp->v_writecount)); 1325 atomic_add_int(&vp->v_writecount, -1); 1326 } 1327 atomic_add_int(&vp->v_opencount, -1); 1328 return (0); 1329 } 1330 1331 /* 1332 * Standard getattr_lite 1333 * 1334 * Just calls getattr 1335 */ 1336 int 1337 vop_stdgetattr_lite(struct vop_getattr_lite_args *ap) 1338 { 1339 struct vattr va; 1340 struct vattr_lite *lvap; 1341 int error; 1342 1343 error = VOP_GETATTR(ap->a_vp, &va); 1344 if (__predict_true(error == 0)) { 1345 lvap = ap->a_lvap; 1346 lvap->va_type = va.va_type; 1347 lvap->va_nlink = va.va_nlink; 1348 lvap->va_mode = va.va_mode; 1349 lvap->va_uid = va.va_uid; 1350 lvap->va_gid = va.va_gid; 1351 lvap->va_size = va.va_size; 1352 lvap->va_flags = va.va_flags; 1353 } 1354 return error; 1355 } 1356 1357 /* 1358 * Implement standard getpages and putpages. All filesystems must use 1359 * the buffer cache to back regular files. 1360 */ 1361 int 1362 vop_stdgetpages(struct vop_getpages_args *ap) 1363 { 1364 struct mount *mp; 1365 int error; 1366 1367 if ((mp = ap->a_vp->v_mount) != NULL) { 1368 error = vnode_pager_generic_getpages( 1369 ap->a_vp, ap->a_m, ap->a_count, 1370 ap->a_reqpage, ap->a_seqaccess); 1371 } else { 1372 error = VM_PAGER_BAD; 1373 } 1374 return (error); 1375 } 1376 1377 int 1378 vop_stdputpages(struct vop_putpages_args *ap) 1379 { 1380 struct mount *mp; 1381 int error; 1382 1383 if ((mp = ap->a_vp->v_mount) != NULL) { 1384 error = vnode_pager_generic_putpages( 1385 ap->a_vp, ap->a_m, ap->a_count, 1386 ap->a_flags, ap->a_rtvals); 1387 } else { 1388 error = VM_PAGER_BAD; 1389 } 1390 return (error); 1391 } 1392 1393 int 1394 vop_stdnoread(struct vop_read_args *ap) 1395 { 1396 return (EINVAL); 1397 } 1398 1399 int 1400 vop_stdnowrite(struct vop_write_args *ap) 1401 { 1402 return (EINVAL); 1403 } 1404 1405 /* 1406 * vfs default ops 1407 * used to fill the vfs fucntion table to get reasonable default return values. 1408 */ 1409 int 1410 vop_stdmountctl(struct vop_mountctl_args *ap) 1411 { 1412 1413 struct mount *mp; 1414 int error = 0; 1415 1416 mp = ap->a_head.a_ops->head.vv_mount; 1417 1418 switch(ap->a_op) { 1419 case MOUNTCTL_MOUNTFLAGS: 1420 /* 1421 * Get a string buffer with all the mount flags 1422 * names comman separated. 1423 * mount(2) will use this information. 1424 */ 1425 *ap->a_res = vfs_flagstostr(mp->mnt_flag & MNT_VISFLAGMASK, NULL, 1426 ap->a_buf, ap->a_buflen, &error); 1427 break; 1428 case MOUNTCTL_INSTALL_VFS_JOURNAL: 1429 case MOUNTCTL_RESTART_VFS_JOURNAL: 1430 case MOUNTCTL_REMOVE_VFS_JOURNAL: 1431 case MOUNTCTL_RESYNC_VFS_JOURNAL: 1432 case MOUNTCTL_STATUS_VFS_JOURNAL: 1433 error = journal_mountctl(ap); 1434 break; 1435 default: 1436 error = EOPNOTSUPP; 1437 break; 1438 } 1439 return (error); 1440 } 1441 1442 int 1443 vfs_stdroot(struct mount *mp, struct vnode **vpp) 1444 { 1445 return (EOPNOTSUPP); 1446 } 1447 1448 int 1449 vfs_stdstatfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 1450 { 1451 return (EOPNOTSUPP); 1452 } 1453 1454 /* 1455 * If the VFS does not implement statvfs, then call statfs and convert 1456 * the values. This code was taken from libc's __cvtstatvfs() function, 1457 * contributed by Joerg Sonnenberger. 1458 */ 1459 int 1460 vfs_stdstatvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 1461 { 1462 struct statfs *in; 1463 int error; 1464 1465 in = &mp->mnt_stat; 1466 error = VFS_STATFS(mp, in, cred); 1467 if (error == 0) { 1468 bzero(sbp, sizeof(*sbp)); 1469 1470 sbp->f_bsize = in->f_bsize; 1471 sbp->f_frsize = in->f_bsize; 1472 sbp->f_blocks = in->f_blocks; 1473 sbp->f_bfree = in->f_bfree; 1474 sbp->f_bavail = in->f_bavail; 1475 sbp->f_files = in->f_files; 1476 sbp->f_ffree = in->f_ffree; 1477 1478 /* 1479 * XXX 1480 * This field counts the number of available inodes to non-root 1481 * users, but this information is not available via statfs. 1482 * Just ignore this issue by returning the total number 1483 * instead. 1484 */ 1485 sbp->f_favail = in->f_ffree; 1486 1487 /* 1488 * XXX 1489 * This field has a different meaning for statfs and statvfs. 1490 * For the former it is the cookie exported for NFS and not 1491 * intended for normal userland use. 1492 */ 1493 sbp->f_fsid = 0; 1494 1495 sbp->f_flag = 0; 1496 if (in->f_flags & MNT_RDONLY) 1497 sbp->f_flag |= ST_RDONLY; 1498 if (in->f_flags & MNT_NOSUID) 1499 sbp->f_flag |= ST_NOSUID; 1500 sbp->f_namemax = 0; 1501 sbp->f_owner = in->f_owner; 1502 /* 1503 * XXX 1504 * statfs contains the type as string, statvfs expects it as 1505 * enumeration. 1506 */ 1507 sbp->f_type = 0; 1508 1509 sbp->f_syncreads = in->f_syncreads; 1510 sbp->f_syncwrites = in->f_syncwrites; 1511 sbp->f_asyncreads = in->f_asyncreads; 1512 sbp->f_asyncwrites = in->f_asyncwrites; 1513 } 1514 return (error); 1515 } 1516 1517 int 1518 vfs_stdvptofh(struct vnode *vp, struct fid *fhp) 1519 { 1520 return (EOPNOTSUPP); 1521 } 1522 1523 int 1524 vfs_stdstart(struct mount *mp, int flags) 1525 { 1526 return (0); 1527 } 1528 1529 int 1530 vfs_stdquotactl(struct mount *mp, int cmds, uid_t uid, 1531 caddr_t arg, struct ucred *cred) 1532 { 1533 return (EOPNOTSUPP); 1534 } 1535 1536 int 1537 vfs_stdsync(struct mount *mp, int waitfor) 1538 { 1539 return (0); 1540 } 1541 1542 int 1543 vfs_stdnosync(struct mount *mp, int waitfor) 1544 { 1545 return (EOPNOTSUPP); 1546 } 1547 1548 int 1549 vfs_stdvget(struct mount *mp, struct vnode *dvp, ino_t ino, struct vnode **vpp) 1550 { 1551 return (EOPNOTSUPP); 1552 } 1553 1554 int 1555 vfs_stdfhtovp(struct mount *mp, struct vnode *rootvp, 1556 struct fid *fhp, struct vnode **vpp) 1557 { 1558 return (EOPNOTSUPP); 1559 } 1560 1561 int 1562 vfs_stdcheckexp(struct mount *mp, struct sockaddr *nam, int *extflagsp, 1563 struct ucred **credanonp) 1564 { 1565 return (EOPNOTSUPP); 1566 } 1567 1568 int 1569 vfs_stdinit(struct vfsconf *vfsp) 1570 { 1571 return (0); 1572 } 1573 1574 int 1575 vfs_stduninit(struct vfsconf *vfsp) 1576 { 1577 return(0); 1578 } 1579 1580 int 1581 vfs_stdextattrctl(struct mount *mp, int cmd, struct vnode *vp, 1582 int attrnamespace, const char *attrname, 1583 struct ucred *cred) 1584 { 1585 return(EOPNOTSUPP); 1586 } 1587 1588 #define ACCOUNTING_NB_FSTYPES 7 1589 1590 static const char *accounting_fstypes[ACCOUNTING_NB_FSTYPES] = { 1591 "ext2fs", "hammer", "mfs", "ntfs", "null", "tmpfs", "ufs" }; 1592 1593 int 1594 vfs_stdac_init(struct mount *mp) 1595 { 1596 const char* fs_type; 1597 int i, fstype_ok = 0; 1598 1599 /* is mounted fs type one we want to do some accounting for ? */ 1600 for (i=0; i<ACCOUNTING_NB_FSTYPES; i++) { 1601 fs_type = accounting_fstypes[i]; 1602 if (strncmp(mp->mnt_stat.f_fstypename, fs_type, 1603 sizeof(mp->mnt_stat)) == 0) { 1604 fstype_ok = 1; 1605 break; 1606 } 1607 } 1608 if (fstype_ok == 0) 1609 return (0); 1610 1611 vq_init(mp); 1612 return (0); 1613 } 1614 1615 void 1616 vfs_stdac_done(struct mount *mp) 1617 { 1618 vq_done(mp); 1619 } 1620 1621 void 1622 vfs_stdncpgen_set(struct mount *mp, struct namecache *ncp) 1623 { 1624 } 1625 1626 int 1627 vfs_stdncpgen_test(struct mount *mp, struct namecache *ncp) 1628 { 1629 return 0; 1630 } 1631 1632 int 1633 vfs_stdmodifying(struct mount *mp) 1634 { 1635 if (mp->mnt_flag & MNT_RDONLY) 1636 return EROFS; 1637 return 0; 1638 } 1639 /* end of vfs default ops */ 1640