1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * The statvfs->statfs conversion code was contributed to the DragonFly 9 * Project by Joerg Sonnenberger <joerg@bec.de>. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 36 * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/buf.h> 42 #include <sys/conf.h> 43 #include <sys/fcntl.h> 44 #include <sys/file.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/unistd.h> 50 #include <sys/vnode.h> 51 #include <sys/namei.h> 52 #include <sys/mountctl.h> 53 #include <sys/vfs_quota.h> 54 #include <sys/uio.h> 55 56 #include <machine/limits.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_pager.h> 62 #include <vm/vnode_pager.h> 63 64 static int vop_nolookup (struct vop_old_lookup_args *); 65 static int vop_nostrategy (struct vop_strategy_args *); 66 67 /* 68 * This vnode table stores what we want to do if the filesystem doesn't 69 * implement a particular VOP. 70 * 71 * If there is no specific entry here, we will return EOPNOTSUPP. 72 */ 73 struct vop_ops default_vnode_vops = { 74 .vop_default = vop_eopnotsupp, 75 .vop_advlock = (void *)vop_einval, 76 .vop_fsync = (void *)vop_null, 77 .vop_ioctl = (void *)vop_enotty, 78 .vop_mmap = (void *)vop_einval, 79 .vop_old_lookup = vop_nolookup, 80 .vop_open = vop_stdopen, 81 .vop_close = vop_stdclose, 82 .vop_getattr_lite = vop_stdgetattr_lite, 83 .vop_pathconf = vop_stdpathconf, 84 .vop_readlink = (void *)vop_einval, 85 .vop_reallocblks = (void *)vop_eopnotsupp, 86 .vop_strategy = vop_nostrategy, 87 .vop_getacl = (void *)vop_eopnotsupp, 88 .vop_setacl = (void *)vop_eopnotsupp, 89 .vop_aclcheck = (void *)vop_eopnotsupp, 90 .vop_getextattr = (void *)vop_eopnotsupp, 91 .vop_setextattr = (void *)vop_eopnotsupp, 92 .vop_markatime = vop_stdmarkatime, 93 .vop_allocate = vop_stdallocate, 94 .vop_nresolve = vop_compat_nresolve, 95 .vop_nlookupdotdot = vop_compat_nlookupdotdot, 96 .vop_ncreate = vop_compat_ncreate, 97 .vop_nmkdir = vop_compat_nmkdir, 98 .vop_nmknod = vop_compat_nmknod, 99 .vop_nlink = vop_compat_nlink, 100 .vop_nsymlink = vop_compat_nsymlink, 101 .vop_nwhiteout = vop_compat_nwhiteout, 102 .vop_nremove = vop_compat_nremove, 103 .vop_nrmdir = vop_compat_nrmdir, 104 .vop_nrename = vop_compat_nrename, 105 .vop_mountctl = vop_stdmountctl 106 }; 107 108 VNODEOP_SET(default_vnode_vops); 109 110 int 111 vop_eopnotsupp(struct vop_generic_args *ap) 112 { 113 return (EOPNOTSUPP); 114 } 115 116 int 117 vop_ebadf(struct vop_generic_args *ap) 118 { 119 return (EBADF); 120 } 121 122 int 123 vop_enotty(struct vop_generic_args *ap) 124 { 125 return (ENOTTY); 126 } 127 128 int 129 vop_einval(struct vop_generic_args *ap) 130 { 131 return (EINVAL); 132 } 133 134 int 135 vop_stdmarkatime(struct vop_markatime_args *ap) 136 { 137 return (EOPNOTSUPP); 138 } 139 140 int 141 vop_null(struct vop_generic_args *ap) 142 { 143 return (0); 144 } 145 146 int 147 vop_defaultop(struct vop_generic_args *ap) 148 { 149 return (VOCALL(&default_vnode_vops, ap)); 150 } 151 152 /* 153 * vop_compat_resolve { struct nchandle *a_nch, struct vnode *dvp } 154 * XXX STOPGAP FUNCTION 155 * 156 * XXX OLD API ROUTINE! WHEN ALL VFSs HAVE BEEN CLEANED UP THIS PROCEDURE 157 * WILL BE REMOVED. This procedure exists for all VFSs which have not 158 * yet implemented VOP_NRESOLVE(). It converts VOP_NRESOLVE() into a 159 * vop_old_lookup() and does appropriate translations. 160 * 161 * Resolve a ncp for VFSs which do not support the VOP. Eventually all 162 * VFSs will support this VOP and this routine can be removed, since 163 * VOP_NRESOLVE() is far less complex then the older LOOKUP/CACHEDLOOKUP 164 * API. 165 * 166 * A locked ncp is passed in to be resolved. The NCP is resolved by 167 * figuring out the vnode (if any) and calling cache_setvp() to attach the 168 * vnode to the entry. If the entry represents a non-existant node then 169 * cache_setvp() is called with a NULL vnode to resolve the entry into a 170 * negative cache entry. No vnode locks are retained and the 171 * ncp is left locked on return. 172 * 173 * The ncp will NEVER represent "", "." or "..", or contain any slashes. 174 * 175 * There is a potential directory and vnode interlock. The lock order 176 * requirement is: namecache, governing directory, resolved vnode. 177 */ 178 int 179 vop_compat_nresolve(struct vop_nresolve_args *ap) 180 { 181 int error; 182 struct vnode *dvp; 183 struct vnode *vp; 184 struct nchandle *nch; 185 struct namecache *ncp; 186 struct componentname cnp; 187 188 nch = ap->a_nch; /* locked namecache node */ 189 ncp = nch->ncp; 190 dvp = ap->a_dvp; 191 192 /* 193 * UFS currently stores all sorts of side effects, including a loop 194 * variable, in the directory inode. That needs to be fixed and the 195 * other VFS's audited before we can switch to LK_SHARED. 196 */ 197 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 198 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 199 ncp, ncp->nc_name); 200 return(EAGAIN); 201 } 202 203 bzero(&cnp, sizeof(cnp)); 204 cnp.cn_nameiop = NAMEI_LOOKUP; 205 cnp.cn_flags = 0; 206 cnp.cn_nameptr = ncp->nc_name; 207 cnp.cn_namelen = ncp->nc_nlen; 208 cnp.cn_cred = ap->a_cred; 209 cnp.cn_td = curthread; /* XXX */ 210 211 /* 212 * vop_old_lookup() always returns vp locked. dvp may or may not be 213 * left locked depending on CNP_PDIRUNLOCK. 214 */ 215 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 216 if (error == 0) 217 vn_unlock(vp); 218 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 219 vn_unlock(dvp); 220 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 221 /* was resolved by another process while we were unlocked */ 222 if (error == 0) 223 vrele(vp); 224 } else if (error == 0) { 225 KKASSERT(vp != NULL); 226 cache_setvp(nch, vp); 227 vrele(vp); 228 } else if (error == ENOENT) { 229 KKASSERT(vp == NULL); 230 if (cnp.cn_flags & CNP_ISWHITEOUT) 231 ncp->nc_flag |= NCF_WHITEOUT; 232 cache_setvp(nch, NULL); 233 } 234 vrele(dvp); 235 return (error); 236 } 237 238 /* 239 * vop_compat_nlookupdotdot { struct vnode *a_dvp, 240 * struct vnode **a_vpp, 241 * struct ucred *a_cred } 242 * 243 * Lookup the vnode representing the parent directory of the specified 244 * directory vnode. a_dvp should not be locked. If no error occurs *a_vpp 245 * will contained the parent vnode, locked and refd, else *a_vpp will be NULL. 246 * 247 * This function is designed to aid NFS server-side operations and is 248 * used by cache_fromdvp() to create a consistent, connected namecache 249 * topology. 250 * 251 * As part of the NEW API work, VFSs will first split their CNP_ISDOTDOT 252 * code out from their *_lookup() and create *_nlookupdotdot(). Then as time 253 * permits VFSs will implement the remaining *_n*() calls and finally get 254 * rid of their *_lookup() call. 255 */ 256 int 257 vop_compat_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 258 { 259 struct componentname cnp; 260 int error; 261 262 /* 263 * UFS currently stores all sorts of side effects, including a loop 264 * variable, in the directory inode. That needs to be fixed and the 265 * other VFS's audited before we can switch to LK_SHARED. 266 */ 267 *ap->a_vpp = NULL; 268 if ((error = vget(ap->a_dvp, LK_EXCLUSIVE)) != 0) 269 return (error); 270 if (ap->a_dvp->v_type != VDIR) { 271 vput(ap->a_dvp); 272 return (ENOTDIR); 273 } 274 275 bzero(&cnp, sizeof(cnp)); 276 cnp.cn_nameiop = NAMEI_LOOKUP; 277 cnp.cn_flags = CNP_ISDOTDOT; 278 cnp.cn_nameptr = ".."; 279 cnp.cn_namelen = 2; 280 cnp.cn_cred = ap->a_cred; 281 cnp.cn_td = curthread; /* XXX */ 282 283 /* 284 * vop_old_lookup() always returns vp locked. dvp may or may not be 285 * left locked depending on CNP_PDIRUNLOCK. 286 * 287 * (*vpp) will be returned locked if no error occured, which is the 288 * state we want. 289 */ 290 error = vop_old_lookup(ap->a_head.a_ops, ap->a_dvp, ap->a_vpp, &cnp); 291 if (cnp.cn_flags & CNP_PDIRUNLOCK) 292 vrele(ap->a_dvp); 293 else 294 vput(ap->a_dvp); 295 return (error); 296 } 297 298 /* 299 * vop_compat_ncreate { struct nchandle *a_nch, XXX STOPGAP FUNCTION 300 * struct vnode *a_dvp, 301 * struct vnode **a_vpp, 302 * struct ucred *a_cred, 303 * struct vattr *a_vap } 304 * 305 * Create a file as specified by a_vap. Compatibility requires us to issue 306 * the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_CREATE in order 307 * to setup the directory inode's i_offset and i_count (e.g. in UFS). 308 */ 309 int 310 vop_compat_ncreate(struct vop_ncreate_args *ap) 311 { 312 struct thread *td = curthread; 313 struct componentname cnp; 314 struct nchandle *nch; 315 struct namecache *ncp; 316 struct vnode *dvp; 317 int error; 318 319 /* 320 * Sanity checks, get a locked directory vnode. 321 */ 322 nch = ap->a_nch; /* locked namecache node */ 323 dvp = ap->a_dvp; 324 ncp = nch->ncp; 325 326 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 327 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 328 ncp, ncp->nc_name); 329 return(EAGAIN); 330 } 331 332 /* 333 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 334 * caches all information required to create the entry in the 335 * directory inode. We expect a return code of EJUSTRETURN for 336 * the CREATE case. The cnp must simulated a saved-name situation. 337 */ 338 bzero(&cnp, sizeof(cnp)); 339 cnp.cn_nameiop = NAMEI_CREATE; 340 cnp.cn_flags = CNP_LOCKPARENT; 341 cnp.cn_nameptr = ncp->nc_name; 342 cnp.cn_namelen = ncp->nc_nlen; 343 cnp.cn_cred = ap->a_cred; 344 cnp.cn_td = td; 345 *ap->a_vpp = NULL; 346 347 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp); 348 349 /* 350 * EJUSTRETURN should be returned for this case, which means that 351 * the VFS has setup the directory inode for the create. The dvp we 352 * passed in is expected to remain in a locked state. 353 * 354 * If the VOP_OLD_CREATE is successful we are responsible for updating 355 * the cache state of the locked ncp that was passed to us. 356 */ 357 if (error == EJUSTRETURN) { 358 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 359 error = VOP_OLD_CREATE(dvp, ap->a_vpp, &cnp, ap->a_vap); 360 if (error == 0) { 361 cache_setunresolved(nch); 362 cache_setvp(nch, *ap->a_vpp); 363 } 364 } else { 365 if (error == 0) { 366 vput(*ap->a_vpp); 367 *ap->a_vpp = NULL; 368 error = EEXIST; 369 } 370 KKASSERT(*ap->a_vpp == NULL); 371 } 372 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 373 vn_unlock(dvp); 374 vrele(dvp); 375 return (error); 376 } 377 378 /* 379 * vop_compat_nmkdir { struct nchandle *a_nch, XXX STOPGAP FUNCTION 380 * struct vnode *a_dvp, 381 * struct vnode **a_vpp, 382 * struct ucred *a_cred, 383 * struct vattr *a_vap } 384 * 385 * Create a directory as specified by a_vap. Compatibility requires us to 386 * issue the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_MKDIR in 387 * order to setup the directory inode's i_offset and i_count (e.g. in UFS). 388 */ 389 int 390 vop_compat_nmkdir(struct vop_nmkdir_args *ap) 391 { 392 struct thread *td = curthread; 393 struct componentname cnp; 394 struct nchandle *nch; 395 struct namecache *ncp; 396 struct vnode *dvp; 397 int error; 398 399 /* 400 * Sanity checks, get a locked directory vnode. 401 */ 402 nch = ap->a_nch; /* locked namecache node */ 403 ncp = nch->ncp; 404 dvp = ap->a_dvp; 405 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 406 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 407 ncp, ncp->nc_name); 408 return(EAGAIN); 409 } 410 411 /* 412 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 413 * caches all information required to create the entry in the 414 * directory inode. We expect a return code of EJUSTRETURN for 415 * the CREATE case. The cnp must simulated a saved-name situation. 416 */ 417 bzero(&cnp, sizeof(cnp)); 418 cnp.cn_nameiop = NAMEI_CREATE; 419 cnp.cn_flags = CNP_LOCKPARENT; 420 cnp.cn_nameptr = ncp->nc_name; 421 cnp.cn_namelen = ncp->nc_nlen; 422 cnp.cn_cred = ap->a_cred; 423 cnp.cn_td = td; 424 *ap->a_vpp = NULL; 425 426 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp); 427 428 /* 429 * EJUSTRETURN should be returned for this case, which means that 430 * the VFS has setup the directory inode for the create. The dvp we 431 * passed in is expected to remain in a locked state. 432 * 433 * If the VOP_OLD_MKDIR is successful we are responsible for updating 434 * the cache state of the locked ncp that was passed to us. 435 */ 436 if (error == EJUSTRETURN) { 437 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 438 error = VOP_OLD_MKDIR(dvp, ap->a_vpp, &cnp, ap->a_vap); 439 if (error == 0) { 440 cache_setunresolved(nch); 441 cache_setvp(nch, *ap->a_vpp); 442 } 443 } else { 444 if (error == 0) { 445 vput(*ap->a_vpp); 446 *ap->a_vpp = NULL; 447 error = EEXIST; 448 } 449 KKASSERT(*ap->a_vpp == NULL); 450 } 451 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 452 vn_unlock(dvp); 453 vrele(dvp); 454 return (error); 455 } 456 457 /* 458 * vop_compat_nmknod { struct nchandle *a_nch, XXX STOPGAP FUNCTION 459 * struct vnode *a_dvp, 460 * struct vnode **a_vpp, 461 * struct ucred *a_cred, 462 * struct vattr *a_vap } 463 * 464 * Create a device or fifo node as specified by a_vap. Compatibility requires 465 * us to issue the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_MKNOD 466 * in order to setup the directory inode's i_offset and i_count (e.g. in UFS). 467 */ 468 int 469 vop_compat_nmknod(struct vop_nmknod_args *ap) 470 { 471 struct thread *td = curthread; 472 struct componentname cnp; 473 struct nchandle *nch; 474 struct namecache *ncp; 475 struct vnode *dvp; 476 int error; 477 478 /* 479 * Sanity checks, get a locked directory vnode. 480 */ 481 nch = ap->a_nch; /* locked namecache node */ 482 ncp = nch->ncp; 483 dvp = ap->a_dvp; 484 485 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 486 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 487 ncp, ncp->nc_name); 488 return(EAGAIN); 489 } 490 491 /* 492 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 493 * caches all information required to create the entry in the 494 * directory inode. We expect a return code of EJUSTRETURN for 495 * the CREATE case. The cnp must simulated a saved-name situation. 496 */ 497 bzero(&cnp, sizeof(cnp)); 498 cnp.cn_nameiop = NAMEI_CREATE; 499 cnp.cn_flags = CNP_LOCKPARENT; 500 cnp.cn_nameptr = ncp->nc_name; 501 cnp.cn_namelen = ncp->nc_nlen; 502 cnp.cn_cred = ap->a_cred; 503 cnp.cn_td = td; 504 *ap->a_vpp = NULL; 505 506 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp); 507 508 /* 509 * EJUSTRETURN should be returned for this case, which means that 510 * the VFS has setup the directory inode for the create. The dvp we 511 * passed in is expected to remain in a locked state. 512 * 513 * If the VOP_OLD_MKNOD is successful we are responsible for updating 514 * the cache state of the locked ncp that was passed to us. 515 */ 516 if (error == EJUSTRETURN) { 517 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 518 error = VOP_OLD_MKNOD(dvp, ap->a_vpp, &cnp, ap->a_vap); 519 if (error == 0) { 520 cache_setunresolved(nch); 521 cache_setvp(nch, *ap->a_vpp); 522 } 523 } else { 524 if (error == 0) { 525 vput(*ap->a_vpp); 526 *ap->a_vpp = NULL; 527 error = EEXIST; 528 } 529 KKASSERT(*ap->a_vpp == NULL); 530 } 531 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 532 vn_unlock(dvp); 533 vrele(dvp); 534 return (error); 535 } 536 537 /* 538 * vop_compat_nlink { struct nchandle *a_nch, XXX STOPGAP FUNCTION 539 * struct vnode *a_dvp, 540 * struct vnode *a_vp, 541 * struct ucred *a_cred } 542 * 543 * The passed vp is locked and represents the source. The passed ncp is 544 * locked and represents the target to create. 545 */ 546 int 547 vop_compat_nlink(struct vop_nlink_args *ap) 548 { 549 struct thread *td = curthread; 550 struct componentname cnp; 551 struct nchandle *nch; 552 struct namecache *ncp; 553 struct vnode *dvp; 554 struct vnode *tvp; 555 int error; 556 557 /* 558 * Sanity checks, get a locked directory vnode. 559 */ 560 nch = ap->a_nch; /* locked namecache node */ 561 ncp = nch->ncp; 562 dvp = ap->a_dvp; 563 564 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 565 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 566 ncp, ncp->nc_name); 567 return(EAGAIN); 568 } 569 570 /* 571 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 572 * caches all information required to create the entry in the 573 * directory inode. We expect a return code of EJUSTRETURN for 574 * the CREATE case. The cnp must simulated a saved-name situation. 575 * 576 * It should not be possible for there to be a vnode collision 577 * between the source vp and target (name lookup). However NFS 578 * clients racing each other can cause NFS to alias the same vnode 579 * across several names without the rest of the system knowing it. 580 * Use CNP_NOTVP to avoid a panic in this situation. 581 */ 582 bzero(&cnp, sizeof(cnp)); 583 cnp.cn_nameiop = NAMEI_CREATE; 584 cnp.cn_flags = CNP_LOCKPARENT | CNP_NOTVP; 585 cnp.cn_nameptr = ncp->nc_name; 586 cnp.cn_namelen = ncp->nc_nlen; 587 cnp.cn_cred = ap->a_cred; 588 cnp.cn_td = td; 589 cnp.cn_notvp = ap->a_vp; 590 591 tvp = NULL; 592 error = vop_old_lookup(ap->a_head.a_ops, dvp, &tvp, &cnp); 593 594 /* 595 * EJUSTRETURN should be returned for this case, which means that 596 * the VFS has setup the directory inode for the create. The dvp we 597 * passed in is expected to remain in a locked state. 598 * 599 * If the VOP_OLD_LINK is successful we are responsible for updating 600 * the cache state of the locked ncp that was passed to us. 601 */ 602 if (error == EJUSTRETURN) { 603 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 604 error = VOP_OLD_LINK(dvp, ap->a_vp, &cnp); 605 if (error == 0) { 606 cache_setunresolved(nch); 607 cache_setvp(nch, ap->a_vp); 608 } 609 } else { 610 if (error == 0) { 611 vput(tvp); 612 error = EEXIST; 613 } 614 } 615 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 616 vn_unlock(dvp); 617 vrele(dvp); 618 return (error); 619 } 620 621 int 622 vop_compat_nsymlink(struct vop_nsymlink_args *ap) 623 { 624 struct thread *td = curthread; 625 struct componentname cnp; 626 struct nchandle *nch; 627 struct namecache *ncp; 628 struct vnode *dvp; 629 struct vnode *vp; 630 int error; 631 632 /* 633 * Sanity checks, get a locked directory vnode. 634 */ 635 *ap->a_vpp = NULL; 636 nch = ap->a_nch; /* locked namecache node */ 637 ncp = nch->ncp; 638 dvp = ap->a_dvp; 639 640 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 641 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 642 ncp, ncp->nc_name); 643 return(EAGAIN); 644 } 645 646 /* 647 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 648 * caches all information required to create the entry in the 649 * directory inode. We expect a return code of EJUSTRETURN for 650 * the CREATE case. The cnp must simulated a saved-name situation. 651 */ 652 bzero(&cnp, sizeof(cnp)); 653 cnp.cn_nameiop = NAMEI_CREATE; 654 cnp.cn_flags = CNP_LOCKPARENT; 655 cnp.cn_nameptr = ncp->nc_name; 656 cnp.cn_namelen = ncp->nc_nlen; 657 cnp.cn_cred = ap->a_cred; 658 cnp.cn_td = td; 659 660 vp = NULL; 661 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 662 663 /* 664 * EJUSTRETURN should be returned for this case, which means that 665 * the VFS has setup the directory inode for the create. The dvp we 666 * passed in is expected to remain in a locked state. 667 * 668 * If the VOP_OLD_SYMLINK is successful we are responsible for updating 669 * the cache state of the locked ncp that was passed to us. 670 */ 671 if (error == EJUSTRETURN) { 672 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 673 error = VOP_OLD_SYMLINK(dvp, &vp, &cnp, ap->a_vap, ap->a_target); 674 if (error == 0) { 675 cache_setunresolved(nch); 676 cache_setvp(nch, vp); 677 *ap->a_vpp = vp; 678 } 679 } else { 680 if (error == 0) { 681 vput(vp); 682 vp = NULL; 683 error = EEXIST; 684 } 685 KKASSERT(vp == NULL); 686 } 687 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 688 vn_unlock(dvp); 689 vrele(dvp); 690 return (error); 691 } 692 693 /* 694 * vop_compat_nwhiteout { struct nchandle *a_nch, XXX STOPGAP FUNCTION 695 * struct vnode *a_dvp, 696 * struct ucred *a_cred, 697 * int a_flags } 698 * 699 * Issie a whiteout operation (create, lookup, or delete). Compatibility 700 * requires us to issue the appropriate VOP_OLD_LOOKUP before we issue 701 * VOP_OLD_WHITEOUT in order to setup the directory inode's i_offset and i_count 702 * (e.g. in UFS) for the NAMEI_CREATE and NAMEI_DELETE ops. For NAMEI_LOOKUP 703 * no lookup is necessary. 704 */ 705 int 706 vop_compat_nwhiteout(struct vop_nwhiteout_args *ap) 707 { 708 struct thread *td = curthread; 709 struct componentname cnp; 710 struct nchandle *nch; 711 struct namecache *ncp; 712 struct vnode *dvp; 713 struct vnode *vp; 714 int error; 715 716 /* 717 * Sanity checks, get a locked directory vnode. 718 */ 719 nch = ap->a_nch; /* locked namecache node */ 720 ncp = nch->ncp; 721 dvp = ap->a_dvp; 722 723 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 724 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 725 ncp, ncp->nc_name); 726 return(EAGAIN); 727 } 728 729 /* 730 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 731 * caches all information required to create the entry in the 732 * directory inode. We expect a return code of EJUSTRETURN for 733 * the CREATE case. The cnp must simulated a saved-name situation. 734 */ 735 bzero(&cnp, sizeof(cnp)); 736 cnp.cn_nameiop = ap->a_flags; 737 cnp.cn_flags = CNP_LOCKPARENT; 738 cnp.cn_nameptr = ncp->nc_name; 739 cnp.cn_namelen = ncp->nc_nlen; 740 cnp.cn_cred = ap->a_cred; 741 cnp.cn_td = td; 742 743 vp = NULL; 744 745 /* 746 * EJUSTRETURN should be returned for the CREATE or DELETE cases. 747 * The VFS has setup the directory inode for the create. The dvp we 748 * passed in is expected to remain in a locked state. 749 * 750 * If the VOP_OLD_WHITEOUT is successful we are responsible for updating 751 * the cache state of the locked ncp that was passed to us. 752 */ 753 switch(ap->a_flags) { 754 case NAMEI_DELETE: 755 cnp.cn_flags |= CNP_DOWHITEOUT; 756 /* fall through */ 757 case NAMEI_CREATE: 758 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 759 if (error == EJUSTRETURN) { 760 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 761 error = VOP_OLD_WHITEOUT(dvp, &cnp, ap->a_flags); 762 if (error == 0) 763 cache_setunresolved(nch); 764 } else { 765 if (error == 0) { 766 vput(vp); 767 vp = NULL; 768 error = EEXIST; 769 } 770 KKASSERT(vp == NULL); 771 } 772 break; 773 case NAMEI_LOOKUP: 774 error = VOP_OLD_WHITEOUT(dvp, NULL, ap->a_flags); 775 break; 776 default: 777 error = EINVAL; 778 break; 779 } 780 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 781 vn_unlock(dvp); 782 vrele(dvp); 783 return (error); 784 } 785 786 787 /* 788 * vop_compat_nremove { struct nchandle *a_nch, XXX STOPGAP FUNCTION 789 * struct vnode *a_dvp, 790 * struct ucred *a_cred } 791 */ 792 int 793 vop_compat_nremove(struct vop_nremove_args *ap) 794 { 795 struct thread *td = curthread; 796 struct componentname cnp; 797 struct nchandle *nch; 798 struct namecache *ncp; 799 struct vnode *dvp; 800 struct vnode *vp; 801 int error; 802 803 /* 804 * Sanity checks, get a locked directory vnode. 805 */ 806 nch = ap->a_nch; /* locked namecache node */ 807 ncp = nch->ncp; 808 dvp = ap->a_dvp; 809 810 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 811 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 812 ncp, ncp->nc_name); 813 return(EAGAIN); 814 } 815 816 /* 817 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 818 * caches all information required to delete the entry in the 819 * directory inode. We expect a return code of 0 for the DELETE 820 * case (meaning that a vp has been found). The cnp must simulated 821 * a saved-name situation. 822 */ 823 bzero(&cnp, sizeof(cnp)); 824 cnp.cn_nameiop = NAMEI_DELETE; 825 cnp.cn_flags = CNP_LOCKPARENT; 826 cnp.cn_nameptr = ncp->nc_name; 827 cnp.cn_namelen = ncp->nc_nlen; 828 cnp.cn_cred = ap->a_cred; 829 cnp.cn_td = td; 830 831 /* 832 * The vnode must be a directory and must not represent the 833 * current directory. 834 */ 835 vp = NULL; 836 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 837 if (error == 0 && vp->v_type == VDIR) 838 error = EPERM; 839 if (error == 0) { 840 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 841 error = VOP_OLD_REMOVE(dvp, vp, &cnp); 842 if (error == 0) 843 cache_unlink(nch); 844 } 845 if (vp) { 846 if (dvp == vp) 847 vrele(vp); 848 else 849 vput(vp); 850 } 851 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 852 vn_unlock(dvp); 853 vrele(dvp); 854 return (error); 855 } 856 857 /* 858 * vop_compat_nrmdir { struct nchandle *a_nch, XXX STOPGAP FUNCTION 859 * struct vnode *dvp, 860 * struct ucred *a_cred } 861 */ 862 int 863 vop_compat_nrmdir(struct vop_nrmdir_args *ap) 864 { 865 struct thread *td = curthread; 866 struct componentname cnp; 867 struct nchandle *nch; 868 struct namecache *ncp; 869 struct vnode *dvp; 870 struct vnode *vp; 871 int error; 872 873 /* 874 * Sanity checks, get a locked directory vnode. 875 */ 876 nch = ap->a_nch; /* locked namecache node */ 877 ncp = nch->ncp; 878 dvp = ap->a_dvp; 879 880 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) { 881 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 882 ncp, ncp->nc_name); 883 return(EAGAIN); 884 } 885 886 /* 887 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 888 * caches all information required to delete the entry in the 889 * directory inode. We expect a return code of 0 for the DELETE 890 * case (meaning that a vp has been found). The cnp must simulated 891 * a saved-name situation. 892 */ 893 bzero(&cnp, sizeof(cnp)); 894 cnp.cn_nameiop = NAMEI_DELETE; 895 cnp.cn_flags = CNP_LOCKPARENT; 896 cnp.cn_nameptr = ncp->nc_name; 897 cnp.cn_namelen = ncp->nc_nlen; 898 cnp.cn_cred = ap->a_cred; 899 cnp.cn_td = td; 900 901 /* 902 * The vnode must be a directory and must not represent the 903 * current directory. 904 */ 905 vp = NULL; 906 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp); 907 if (error == 0 && vp->v_type != VDIR) 908 error = ENOTDIR; 909 if (error == 0 && vp == dvp) 910 error = EINVAL; 911 if (error == 0 && (vp->v_flag & VROOT)) 912 error = EBUSY; 913 if (error == 0) { 914 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0); 915 error = VOP_OLD_RMDIR(dvp, vp, &cnp); 916 917 /* 918 * Note that this invalidation will cause any process 919 * currently CD'd into the directory being removed to be 920 * disconnected from the topology and not be able to ".." 921 * back out. 922 */ 923 if (error == 0) { 924 cache_inval(nch, CINV_DESTROY); 925 cache_inval_vp(vp, CINV_DESTROY); 926 } 927 } 928 if (vp) { 929 if (dvp == vp) 930 vrele(vp); 931 else 932 vput(vp); 933 } 934 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0) 935 vn_unlock(dvp); 936 vrele(dvp); 937 return (error); 938 } 939 940 /* 941 * vop_compat_nrename { struct nchandle *a_fnch, XXX STOPGAP FUNCTION 942 * struct nchandle *a_tnch, 943 * struct ucred *a_cred } 944 * 945 * This is a fairly difficult procedure. The old VOP_OLD_RENAME requires that 946 * the source directory and vnode be unlocked and the target directory and 947 * vnode (if it exists) be locked. All arguments will be vrele'd and 948 * the targets will also be unlocked regardless of the return code. 949 */ 950 int 951 vop_compat_nrename(struct vop_nrename_args *ap) 952 { 953 struct thread *td = curthread; 954 struct componentname fcnp; 955 struct componentname tcnp; 956 struct nchandle *fnch; 957 struct nchandle *tnch; 958 struct namecache *fncp; 959 struct namecache *tncp; 960 struct vnode *fdvp, *fvp; 961 struct vnode *tdvp, *tvp; 962 int error; 963 964 /* 965 * Sanity checks, get referenced vnodes representing the source. 966 */ 967 fnch = ap->a_fnch; /* locked namecache node */ 968 fncp = fnch->ncp; 969 fdvp = ap->a_fdvp; 970 971 /* 972 * Temporarily lock the source directory and lookup in DELETE mode to 973 * check permissions. XXX delete permissions should have been 974 * checked by nlookup(), we need to add NLC_DELETE for delete 975 * checking. It is unclear whether VFS's require the directory setup 976 * info NAMEI_DELETE causes to be stored in the fdvp's inode, but 977 * since it isn't locked and since UFS always does a relookup of 978 * the source, it is believed that the only side effect that matters 979 * is the permissions check. 980 */ 981 if ((error = vget(fdvp, LK_EXCLUSIVE)) != 0) { 982 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 983 fncp, fncp->nc_name); 984 return(EAGAIN); 985 } 986 987 bzero(&fcnp, sizeof(fcnp)); 988 fcnp.cn_nameiop = NAMEI_DELETE; 989 fcnp.cn_flags = CNP_LOCKPARENT; 990 fcnp.cn_nameptr = fncp->nc_name; 991 fcnp.cn_namelen = fncp->nc_nlen; 992 fcnp.cn_cred = ap->a_cred; 993 fcnp.cn_td = td; 994 995 /* 996 * note: vop_old_lookup (i.e. VOP_OLD_LOOKUP) always returns a locked 997 * fvp. 998 */ 999 fvp = NULL; 1000 error = vop_old_lookup(ap->a_head.a_ops, fdvp, &fvp, &fcnp); 1001 if (error == 0 && (fvp->v_flag & VROOT)) { 1002 vput(fvp); /* as if vop_old_lookup had failed */ 1003 error = EBUSY; 1004 } 1005 if ((fcnp.cn_flags & CNP_PDIRUNLOCK) == 0) { 1006 fcnp.cn_flags |= CNP_PDIRUNLOCK; 1007 vn_unlock(fdvp); 1008 } 1009 if (error) { 1010 vrele(fdvp); 1011 return (error); 1012 } 1013 vn_unlock(fvp); 1014 1015 /* 1016 * fdvp and fvp are now referenced and unlocked. 1017 * 1018 * Get a locked directory vnode for the target and lookup the target 1019 * in CREATE mode so it places the required information in the 1020 * directory inode. 1021 */ 1022 tnch = ap->a_tnch; /* locked namecache node */ 1023 tncp = tnch->ncp; 1024 tdvp = ap->a_tdvp; 1025 if (error) { 1026 vrele(fdvp); 1027 vrele(fvp); 1028 return (error); 1029 } 1030 if ((error = vget(tdvp, LK_EXCLUSIVE)) != 0) { 1031 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n", 1032 tncp, tncp->nc_name); 1033 vrele(fdvp); 1034 vrele(fvp); 1035 return(EAGAIN); 1036 } 1037 1038 /* 1039 * Setup the cnp for a traditional vop_old_lookup() call. The lookup 1040 * caches all information required to create the entry in the 1041 * target directory inode. 1042 */ 1043 bzero(&tcnp, sizeof(tcnp)); 1044 tcnp.cn_nameiop = NAMEI_RENAME; 1045 tcnp.cn_flags = CNP_LOCKPARENT; 1046 tcnp.cn_nameptr = tncp->nc_name; 1047 tcnp.cn_namelen = tncp->nc_nlen; 1048 tcnp.cn_cred = ap->a_cred; 1049 tcnp.cn_td = td; 1050 1051 tvp = NULL; 1052 error = vop_old_lookup(ap->a_head.a_ops, tdvp, &tvp, &tcnp); 1053 1054 if (error == EJUSTRETURN) { 1055 /* 1056 * Target does not exist. tvp should be NULL. 1057 */ 1058 KKASSERT(tvp == NULL); 1059 KKASSERT((tcnp.cn_flags & CNP_PDIRUNLOCK) == 0); 1060 error = VOP_OLD_RENAME(fdvp, fvp, &fcnp, tdvp, tvp, &tcnp); 1061 if (error == 0) 1062 cache_rename(fnch, tnch); 1063 } else if (error == 0) { 1064 /* 1065 * Target exists. VOP_OLD_RENAME should correctly delete the 1066 * target. 1067 */ 1068 KKASSERT((tcnp.cn_flags & CNP_PDIRUNLOCK) == 0); 1069 error = VOP_OLD_RENAME(fdvp, fvp, &fcnp, tdvp, tvp, &tcnp); 1070 if (error == 0) 1071 cache_rename(fnch, tnch); 1072 } else { 1073 vrele(fdvp); 1074 vrele(fvp); 1075 if (tcnp.cn_flags & CNP_PDIRUNLOCK) 1076 vrele(tdvp); 1077 else 1078 vput(tdvp); 1079 } 1080 return (error); 1081 } 1082 1083 static int 1084 vop_nolookup(struct vop_old_lookup_args *ap) 1085 { 1086 1087 *ap->a_vpp = NULL; 1088 return (ENOTDIR); 1089 } 1090 1091 /* 1092 * vop_nostrategy: 1093 * 1094 * Strategy routine for VFS devices that have none. 1095 * 1096 * B_ERROR and B_INVAL must be cleared prior to calling any strategy 1097 * routine. Typically this is done for a BUF_CMD_READ strategy call. 1098 * Typically B_INVAL is assumed to already be clear prior to a write 1099 * and should not be cleared manually unless you just made the buffer 1100 * invalid. B_ERROR should be cleared either way. 1101 */ 1102 1103 static int 1104 vop_nostrategy (struct vop_strategy_args *ap) 1105 { 1106 kprintf("No strategy for buffer at %p\n", ap->a_bio->bio_buf); 1107 vprint("", ap->a_vp); 1108 ap->a_bio->bio_buf->b_flags |= B_ERROR; 1109 ap->a_bio->bio_buf->b_error = EOPNOTSUPP; 1110 biodone(ap->a_bio); 1111 return (EOPNOTSUPP); 1112 } 1113 1114 int 1115 vop_stdpathconf(struct vop_pathconf_args *ap) 1116 { 1117 int error = 0; 1118 1119 switch (ap->a_name) { 1120 case _PC_CHOWN_RESTRICTED: 1121 *ap->a_retval = _POSIX_CHOWN_RESTRICTED; 1122 break; 1123 case _PC_LINK_MAX: 1124 *ap->a_retval = LINK_MAX; 1125 break; 1126 case _PC_MAX_CANON: 1127 *ap->a_retval = MAX_CANON; 1128 break; 1129 case _PC_MAX_INPUT: 1130 *ap->a_retval = MAX_INPUT; 1131 break; 1132 case _PC_NAME_MAX: 1133 *ap->a_retval = NAME_MAX; 1134 break; 1135 case _PC_NO_TRUNC: 1136 *ap->a_retval = _POSIX_NO_TRUNC; 1137 break; 1138 case _PC_PATH_MAX: 1139 *ap->a_retval = PATH_MAX; 1140 break; 1141 case _PC_PIPE_BUF: 1142 *ap->a_retval = PIPE_BUF; 1143 break; 1144 case _PC_VDISABLE: 1145 *ap->a_retval = _POSIX_VDISABLE; 1146 break; 1147 default: 1148 error = EINVAL; 1149 break; 1150 } 1151 return (error); 1152 } 1153 1154 /* 1155 * Standard open. 1156 * 1157 * (struct vnode *a_vp, int a_mode, struct ucred *a_ucred, struct file *a_fp) 1158 * 1159 * a_mode: note, 'F' modes, e.g. FREAD, FWRITE 1160 */ 1161 int 1162 vop_stdopen(struct vop_open_args *ap) 1163 { 1164 struct vnode *vp = ap->a_vp; 1165 struct file *fp; 1166 1167 if (ap->a_fpp) { 1168 fp = *ap->a_fpp; 1169 1170 switch(vp->v_type) { 1171 case VFIFO: 1172 fp->f_type = DTYPE_FIFO; 1173 break; 1174 default: 1175 fp->f_type = DTYPE_VNODE; 1176 break; 1177 } 1178 /* retain flags not to be copied */ 1179 fp->f_flag = (fp->f_flag & ~FMASK) | (ap->a_mode & FMASK); 1180 fp->f_ops = &vnode_fileops; 1181 fp->f_data = vp; 1182 vref(vp); 1183 } 1184 if (ap->a_mode & FWRITE) 1185 atomic_add_int(&vp->v_writecount, 1); 1186 KKASSERT(vp->v_opencount >= 0 && vp->v_opencount != INT_MAX); 1187 atomic_add_int(&vp->v_opencount, 1); 1188 return (0); 1189 } 1190 1191 /* 1192 * Standard close. 1193 * 1194 * (struct vnode *a_vp, int a_fflag) 1195 * 1196 * a_fflag: note, 'F' modes, e.g. FREAD, FWRITE. same as a_mode in stdopen? 1197 * 1198 * v_lastwrite_ts is used to record the timestamp that should be used to 1199 * set the file mtime for any asynchronously flushed pages modified via 1200 * mmap(), which can occur after the last close(). 1201 */ 1202 int 1203 vop_stdclose(struct vop_close_args *ap) 1204 { 1205 struct vnode *vp = ap->a_vp; 1206 1207 KASSERT(vp->v_opencount > 0, 1208 ("VOP_STDCLOSE: BAD OPENCOUNT %p %d type=%d ops=%p flgs=%08x", 1209 vp, vp->v_opencount, vp->v_type, *vp->v_ops, vp->v_flag)); 1210 if (ap->a_fflag & FWRITE) { 1211 KASSERT(vp->v_writecount > 0, 1212 ("VOP_STDCLOSE: BAD WRITECOUNT %p %d", 1213 vp, vp->v_writecount)); 1214 atomic_add_int(&vp->v_writecount, -1); 1215 } 1216 atomic_add_int(&vp->v_opencount, -1); 1217 return (0); 1218 } 1219 1220 /* 1221 * Standard getattr_lite 1222 * 1223 * Just calls getattr 1224 */ 1225 int 1226 vop_stdgetattr_lite(struct vop_getattr_lite_args *ap) 1227 { 1228 struct vattr va; 1229 struct vattr_lite *lvap; 1230 int error; 1231 1232 error = VOP_GETATTR(ap->a_vp, &va); 1233 if (__predict_true(error == 0)) { 1234 lvap = ap->a_lvap; 1235 lvap->va_type = va.va_type; 1236 lvap->va_nlink = va.va_nlink; 1237 lvap->va_mode = va.va_mode; 1238 lvap->va_uid = va.va_uid; 1239 lvap->va_gid = va.va_gid; 1240 lvap->va_size = va.va_size; 1241 lvap->va_flags = va.va_flags; 1242 } 1243 return error; 1244 } 1245 1246 /* 1247 * Implement standard getpages and putpages. All filesystems must use 1248 * the buffer cache to back regular files. 1249 */ 1250 int 1251 vop_stdgetpages(struct vop_getpages_args *ap) 1252 { 1253 struct mount *mp; 1254 int error; 1255 1256 if ((mp = ap->a_vp->v_mount) != NULL) { 1257 error = vnode_pager_generic_getpages( 1258 ap->a_vp, ap->a_m, ap->a_count, 1259 ap->a_reqpage, ap->a_seqaccess); 1260 } else { 1261 error = VM_PAGER_BAD; 1262 } 1263 return (error); 1264 } 1265 1266 int 1267 vop_stdputpages(struct vop_putpages_args *ap) 1268 { 1269 struct mount *mp; 1270 int error; 1271 1272 if ((mp = ap->a_vp->v_mount) != NULL) { 1273 error = vnode_pager_generic_putpages( 1274 ap->a_vp, ap->a_m, ap->a_count, 1275 ap->a_flags, ap->a_rtvals); 1276 } else { 1277 error = VM_PAGER_BAD; 1278 } 1279 return (error); 1280 } 1281 1282 int 1283 vop_stdnoread(struct vop_read_args *ap) 1284 { 1285 return (EINVAL); 1286 } 1287 1288 int 1289 vop_stdnowrite(struct vop_write_args *ap) 1290 { 1291 return (EINVAL); 1292 } 1293 1294 /* 1295 * vfs default ops 1296 * used to fill the vfs fucntion table to get reasonable default return values. 1297 */ 1298 int 1299 vop_stdmountctl(struct vop_mountctl_args *ap) 1300 { 1301 1302 struct mount *mp; 1303 int error = 0; 1304 1305 mp = ap->a_head.a_ops->head.vv_mount; 1306 1307 switch(ap->a_op) { 1308 case MOUNTCTL_MOUNTFLAGS: 1309 /* 1310 * Get a string buffer with all the mount flags 1311 * names comman separated. 1312 * mount(2) will use this information. 1313 */ 1314 *ap->a_res = vfs_flagstostr(mp->mnt_flag & MNT_VISFLAGMASK, NULL, 1315 ap->a_buf, ap->a_buflen, &error); 1316 break; 1317 case MOUNTCTL_INSTALL_VFS_JOURNAL: 1318 case MOUNTCTL_RESTART_VFS_JOURNAL: 1319 case MOUNTCTL_REMOVE_VFS_JOURNAL: 1320 case MOUNTCTL_RESYNC_VFS_JOURNAL: 1321 case MOUNTCTL_STATUS_VFS_JOURNAL: 1322 error = journal_mountctl(ap); 1323 break; 1324 default: 1325 error = EOPNOTSUPP; 1326 break; 1327 } 1328 return (error); 1329 } 1330 1331 int 1332 vop_stdallocate(struct vop_allocate_args *ap) 1333 { 1334 struct thread *td; 1335 struct vnode *vp; 1336 struct vattr vattr, *vap; 1337 struct uio auio; 1338 struct iovec aiov; 1339 uint8_t *buf; 1340 off_t offset, len, fsize; 1341 size_t iosize; 1342 int error; 1343 1344 td = curthread; 1345 vap = &vattr; 1346 buf = NULL; 1347 1348 vp = ap->a_vp; 1349 offset = ap->a_offset; 1350 len = ap->a_len; 1351 1352 error = VOP_GETATTR(vp, vap); 1353 if (error != 0) 1354 goto out; 1355 fsize = vap->va_size; 1356 iosize = vap->va_blocksize; 1357 if (iosize == 0) 1358 iosize = BLKDEV_IOSIZE; 1359 if (iosize > vmaxiosize(vp)) 1360 iosize = vmaxiosize(vp); 1361 buf = kmalloc(iosize, M_TEMP, M_WAITOK); 1362 1363 if (offset + len > vap->va_size) { 1364 /* 1365 * Test offset + len against the filesystem's maxfilesize. 1366 */ 1367 VATTR_NULL(&vattr); 1368 vap->va_size = offset + len; 1369 error = VOP_SETATTR(vp, vap, td->td_ucred); 1370 if (error != 0) 1371 goto out; 1372 VATTR_NULL(&vattr); 1373 vap->va_size = fsize; 1374 error = VOP_SETATTR(vp, vap, td->td_ucred); 1375 if (error != 0) 1376 goto out; 1377 } 1378 1379 for (;;) { 1380 /* 1381 * Read and write back anything below the nominal file 1382 * size. There's currently no way outside the filesystem 1383 * to know whether this area is sparse or not. 1384 */ 1385 off_t cur = iosize; 1386 if ((offset % iosize) != 0) 1387 cur -= (offset % iosize); 1388 if (cur > len) 1389 cur = len; 1390 if (offset < fsize) { 1391 aiov.iov_base = buf; 1392 aiov.iov_len = cur; 1393 auio.uio_iov = &aiov; 1394 auio.uio_iovcnt = 1; 1395 auio.uio_offset = offset; 1396 auio.uio_resid = cur; 1397 auio.uio_segflg = UIO_SYSSPACE; 1398 auio.uio_rw = UIO_READ; 1399 auio.uio_td = td; 1400 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1401 if (error != 0) 1402 break; 1403 if (auio.uio_resid > 0) { 1404 bzero(buf + cur - auio.uio_resid, 1405 auio.uio_resid); 1406 } 1407 } else { 1408 bzero(buf, cur); 1409 } 1410 1411 aiov.iov_base = buf; 1412 aiov.iov_len = cur; 1413 auio.uio_iov = &aiov; 1414 auio.uio_iovcnt = 1; 1415 auio.uio_offset = offset; 1416 auio.uio_resid = cur; 1417 auio.uio_segflg = UIO_SYSSPACE; 1418 auio.uio_rw = UIO_WRITE; 1419 auio.uio_td = td; 1420 1421 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1422 if (error != 0) 1423 break; 1424 1425 len -= cur; 1426 offset += cur; 1427 if (len == 0) 1428 break; 1429 /* 1430 if (should_yield()) 1431 break; 1432 */ 1433 } 1434 out: 1435 ap->a_offset = offset; 1436 ap->a_len = len; 1437 kfree(buf, M_TEMP); 1438 1439 return (error); 1440 } 1441 1442 int 1443 vfs_stdroot(struct mount *mp, struct vnode **vpp) 1444 { 1445 return (EOPNOTSUPP); 1446 } 1447 1448 int 1449 vfs_stdstatfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 1450 { 1451 return (EOPNOTSUPP); 1452 } 1453 1454 /* 1455 * If the VFS does not implement statvfs, then call statfs and convert 1456 * the values. This code was taken from libc's __cvtstatvfs() function, 1457 * contributed by Joerg Sonnenberger. 1458 */ 1459 int 1460 vfs_stdstatvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 1461 { 1462 struct statfs *in; 1463 int error; 1464 1465 in = &mp->mnt_stat; 1466 error = VFS_STATFS(mp, in, cred); 1467 if (error == 0) { 1468 bzero(sbp, sizeof(*sbp)); 1469 1470 sbp->f_bsize = in->f_bsize; 1471 sbp->f_frsize = in->f_bsize; 1472 sbp->f_blocks = in->f_blocks; 1473 sbp->f_bfree = in->f_bfree; 1474 sbp->f_bavail = in->f_bavail; 1475 sbp->f_files = in->f_files; 1476 sbp->f_ffree = in->f_ffree; 1477 1478 /* 1479 * XXX 1480 * This field counts the number of available inodes to non-root 1481 * users, but this information is not available via statfs. 1482 * Just ignore this issue by returning the total number 1483 * instead. 1484 */ 1485 sbp->f_favail = in->f_ffree; 1486 1487 /* 1488 * XXX 1489 * This field has a different meaning for statfs and statvfs. 1490 * For the former it is the cookie exported for NFS and not 1491 * intended for normal userland use. 1492 */ 1493 sbp->f_fsid = 0; 1494 1495 sbp->f_flag = 0; 1496 if (in->f_flags & MNT_RDONLY) 1497 sbp->f_flag |= ST_RDONLY; 1498 if (in->f_flags & MNT_NOSUID) 1499 sbp->f_flag |= ST_NOSUID; 1500 sbp->f_namemax = 0; 1501 sbp->f_owner = in->f_owner; 1502 /* 1503 * XXX 1504 * statfs contains the type as string, statvfs expects it as 1505 * enumeration. 1506 */ 1507 sbp->f_type = 0; 1508 1509 sbp->f_syncreads = in->f_syncreads; 1510 sbp->f_syncwrites = in->f_syncwrites; 1511 sbp->f_asyncreads = in->f_asyncreads; 1512 sbp->f_asyncwrites = in->f_asyncwrites; 1513 } 1514 return (error); 1515 } 1516 1517 int 1518 vfs_stdvptofh(struct vnode *vp, struct fid *fhp) 1519 { 1520 return (EOPNOTSUPP); 1521 } 1522 1523 int 1524 vfs_stdstart(struct mount *mp, int flags) 1525 { 1526 return (0); 1527 } 1528 1529 int 1530 vfs_stdquotactl(struct mount *mp, int cmds, uid_t uid, 1531 caddr_t arg, struct ucred *cred) 1532 { 1533 return (EOPNOTSUPP); 1534 } 1535 1536 int 1537 vfs_stdsync(struct mount *mp, int waitfor) 1538 { 1539 return (0); 1540 } 1541 1542 int 1543 vfs_stdnosync(struct mount *mp, int waitfor) 1544 { 1545 return (EOPNOTSUPP); 1546 } 1547 1548 int 1549 vfs_stdvget(struct mount *mp, struct vnode *dvp, ino_t ino, struct vnode **vpp) 1550 { 1551 return (EOPNOTSUPP); 1552 } 1553 1554 int 1555 vfs_stdfhtovp(struct mount *mp, struct vnode *rootvp, 1556 struct fid *fhp, struct vnode **vpp) 1557 { 1558 return (EOPNOTSUPP); 1559 } 1560 1561 int 1562 vfs_stdcheckexp(struct mount *mp, struct sockaddr *nam, int *extflagsp, 1563 struct ucred **credanonp) 1564 { 1565 return (EOPNOTSUPP); 1566 } 1567 1568 int 1569 vfs_stdinit(struct vfsconf *vfsp) 1570 { 1571 return (0); 1572 } 1573 1574 int 1575 vfs_stduninit(struct vfsconf *vfsp) 1576 { 1577 return(0); 1578 } 1579 1580 int 1581 vfs_stdextattrctl(struct mount *mp, int cmd, struct vnode *vp, 1582 int attrnamespace, const char *attrname, 1583 struct ucred *cred) 1584 { 1585 return(EOPNOTSUPP); 1586 } 1587 1588 #define ACCOUNTING_NB_FSTYPES 7 1589 1590 static const char *accounting_fstypes[ACCOUNTING_NB_FSTYPES] = { 1591 "ext2fs", "hammer", "mfs", "ntfs", "null", "tmpfs", "ufs" }; 1592 1593 int 1594 vfs_stdac_init(struct mount *mp) 1595 { 1596 const char* fs_type; 1597 int i, fstype_ok = 0; 1598 1599 /* is mounted fs type one we want to do some accounting for ? */ 1600 for (i=0; i<ACCOUNTING_NB_FSTYPES; i++) { 1601 fs_type = accounting_fstypes[i]; 1602 if (strncmp(mp->mnt_stat.f_fstypename, fs_type, 1603 sizeof(mp->mnt_stat)) == 0) { 1604 fstype_ok = 1; 1605 break; 1606 } 1607 } 1608 if (fstype_ok == 0) 1609 return (0); 1610 1611 vq_init(mp); 1612 return (0); 1613 } 1614 1615 void 1616 vfs_stdac_done(struct mount *mp) 1617 { 1618 vq_done(mp); 1619 } 1620 1621 void 1622 vfs_stdncpgen_set(struct mount *mp, struct namecache *ncp) 1623 { 1624 } 1625 1626 int 1627 vfs_stdncpgen_test(struct mount *mp, struct namecache *ncp) 1628 { 1629 return 0; 1630 } 1631 1632 int 1633 vfs_stdmodifying(struct mount *mp) 1634 { 1635 if (mp->mnt_flag & MNT_RDONLY) 1636 return EROFS; 1637 return 0; 1638 } 1639 /* end of vfs default ops */ 1640