1 /* $NetBSD: layer_vnops.c,v 1.11 2003/08/07 16:32:36 agc Exp $ */ 2 3 /* 4 * Copyright (c) 1999 National Aeronautics & Space Administration 5 * All rights reserved. 6 * 7 * This software was written by William Studenmund of the 8 * Numerical Aerospace Simulation Facility, NASA Ames Research Center. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the National Aeronautics & Space Administration 19 * nor the names of its contributors may be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB- 27 * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 /* 36 * Copyright (c) 1992, 1993 37 * The Regents of the University of California. All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * John Heidemann of the UCLA Ficus project. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 67 * 68 * Ancestors: 69 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 70 * $Id: layer_vnops.c,v 1.11 2003/08/07 16:32:36 agc Exp $ 71 * ...and... 72 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 73 */ 74 75 /* 76 * Null Layer vnode routines. 77 * 78 * (See mount_null(8) for more information.) 79 * 80 * The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide 81 * the core implimentation of the null file system and most other stacked 82 * fs's. The description below refers to the null file system, but the 83 * services provided by the layer* files are useful for all layered fs's. 84 * 85 * The null layer duplicates a portion of the file system 86 * name space under a new name. In this respect, it is 87 * similar to the loopback file system. It differs from 88 * the loopback fs in two respects: it is implemented using 89 * a stackable layers techniques, and it's "null-node"s stack above 90 * all lower-layer vnodes, not just over directory vnodes. 91 * 92 * The null layer has two purposes. First, it serves as a demonstration 93 * of layering by proving a layer which does nothing. (It actually 94 * does everything the loopback file system does, which is slightly 95 * more than nothing.) Second, the null layer can serve as a prototype 96 * layer. Since it provides all necessary layer framework, 97 * new file system layers can be created very easily be starting 98 * with a null layer. 99 * 100 * The remainder of the man page examines the null layer as a basis 101 * for constructing new layers. 102 * 103 * 104 * INSTANTIATING NEW NULL LAYERS 105 * 106 * New null layers are created with mount_null(8). 107 * Mount_null(8) takes two arguments, the pathname 108 * of the lower vfs (target-pn) and the pathname where the null 109 * layer will appear in the namespace (alias-pn). After 110 * the null layer is put into place, the contents 111 * of target-pn subtree will be aliased under alias-pn. 112 * 113 * It is conceivable that other overlay filesystems will take different 114 * parameters. For instance, data migration or access controll layers might 115 * only take one pathname which will serve both as the target-pn and 116 * alias-pn described above. 117 * 118 * 119 * OPERATION OF A NULL LAYER 120 * 121 * The null layer is the minimum file system layer, 122 * simply bypassing all possible operations to the lower layer 123 * for processing there. The majority of its activity centers 124 * on the bypass routine, though which nearly all vnode operations 125 * pass. 126 * 127 * The bypass routine accepts arbitrary vnode operations for 128 * handling by the lower layer. It begins by examing vnode 129 * operation arguments and replacing any layered nodes by their 130 * lower-layer equivlants. It then invokes the operation 131 * on the lower layer. Finally, it replaces the layered nodes 132 * in the arguments and, if a vnode is return by the operation, 133 * stacks a layered node on top of the returned vnode. 134 * 135 * The bypass routine in this file, layer_bypass(), is suitable for use 136 * by many different layered filesystems. It can be used by multiple 137 * filesystems simultaneously. Alternatively, a layered fs may provide 138 * its own bypass routine, in which case layer_bypass() should be used as 139 * a model. For instance, the main functionality provided by umapfs, the user 140 * identity mapping file system, is handled by a custom bypass routine. 141 * 142 * Typically a layered fs registers its selected bypass routine as the 143 * default vnode operation in its vnodeopv_entry_desc table. Additionally 144 * the filesystem must store the bypass entry point in the layerm_bypass 145 * field of struct layer_mount. All other layer routines in this file will 146 * use the layerm_bypass routine. 147 * 148 * Although the bypass routine handles most operations outright, a number 149 * of operations are special cased, and handled by the layered fs. One 150 * group, layer_setattr, layer_getattr, layer_access, layer_open, and 151 * layer_fsync, perform layer-specific manipulation in addition to calling 152 * the bypass routine. The other group 153 154 * Although bypass handles most operations, vop_getattr, vop_lock, 155 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 156 * bypassed. Vop_getattr must change the fsid being returned. 157 * Vop_lock and vop_unlock must handle any locking for the 158 * current vnode as well as pass the lock request down. 159 * Vop_inactive and vop_reclaim are not bypassed so that 160 * they can handle freeing null-layer specific data. Vop_print 161 * is not bypassed to avoid excessive debugging information. 162 * Also, certain vnode operations change the locking state within 163 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 164 * and symlink). Ideally these operations should not change the 165 * lock state, but should be changed to let the caller of the 166 * function unlock them. Otherwise all intermediate vnode layers 167 * (such as union, umapfs, etc) must catch these functions to do 168 * the necessary locking at their layer. 169 * 170 * 171 * INSTANTIATING VNODE STACKS 172 * 173 * Mounting associates the null layer with a lower layer, 174 * effect stacking two VFSes. Vnode stacks are instead 175 * created on demand as files are accessed. 176 * 177 * The initial mount creates a single vnode stack for the 178 * root of the new null layer. All other vnode stacks 179 * are created as a result of vnode operations on 180 * this or other null vnode stacks. 181 * 182 * New vnode stacks come into existance as a result of 183 * an operation which returns a vnode. 184 * The bypass routine stacks a null-node above the new 185 * vnode before returning it to the caller. 186 * 187 * For example, imagine mounting a null layer with 188 * "mount_null /usr/include /dev/layer/null". 189 * Changing directory to /dev/layer/null will assign 190 * the root null-node (which was created when the null layer was mounted). 191 * Now consider opening "sys". A vop_lookup would be 192 * done on the root null-node. This operation would bypass through 193 * to the lower layer which would return a vnode representing 194 * the UFS "sys". layer_bypass then builds a null-node 195 * aliasing the UFS "sys" and returns this to the caller. 196 * Later operations on the null-node "sys" will repeat this 197 * process when constructing other vnode stacks. 198 * 199 * 200 * CREATING OTHER FILE SYSTEM LAYERS 201 * 202 * One of the easiest ways to construct new file system layers is to make 203 * a copy of the null layer, rename all files and variables, and 204 * then begin modifing the copy. Sed can be used to easily rename 205 * all variables. 206 * 207 * The umap layer is an example of a layer descended from the 208 * null layer. 209 * 210 * 211 * INVOKING OPERATIONS ON LOWER LAYERS 212 * 213 * There are two techniques to invoke operations on a lower layer 214 * when the operation cannot be completely bypassed. Each method 215 * is appropriate in different situations. In both cases, 216 * it is the responsibility of the aliasing layer to make 217 * the operation arguments "correct" for the lower layer 218 * by mapping an vnode arguments to the lower layer. 219 * 220 * The first approach is to call the aliasing layer's bypass routine. 221 * This method is most suitable when you wish to invoke the operation 222 * currently being hanldled on the lower layer. It has the advantage 223 * that the bypass routine already must do argument mapping. 224 * An example of this is null_getattrs in the null layer. 225 * 226 * A second approach is to directly invoked vnode operations on 227 * the lower layer with the VOP_OPERATIONNAME interface. 228 * The advantage of this method is that it is easy to invoke 229 * arbitrary operations on the lower layer. The disadvantage 230 * is that vnodes arguments must be manualy mapped. 231 * 232 */ 233 234 #include <sys/cdefs.h> 235 __KERNEL_RCSID(0, "$NetBSD: layer_vnops.c,v 1.11 2003/08/07 16:32:36 agc Exp $"); 236 237 #include <sys/param.h> 238 #include <sys/systm.h> 239 #include <sys/proc.h> 240 #include <sys/time.h> 241 #include <sys/vnode.h> 242 #include <sys/mount.h> 243 #include <sys/namei.h> 244 #include <sys/malloc.h> 245 #include <sys/buf.h> 246 #include <miscfs/genfs/layer.h> 247 #include <miscfs/genfs/layer_extern.h> 248 #include <miscfs/genfs/genfs.h> 249 250 251 /* 252 * This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass 253 * routine by John Heidemann. 254 * The new element for this version is that the whole nullfs 255 * system gained the concept of locks on the lower node, and locks on 256 * our nodes. When returning from a call to the lower layer, we may 257 * need to update lock state ONLY on our layer. The LAYERFS_UPPER*LOCK() 258 * macros provide this functionality. 259 * The 10-Apr-92 version was optimized for speed, throwing away some 260 * safety checks. It should still always work, but it's not as 261 * robust to programmer errors. 262 * Define SAFETY to include some error checking code. 263 * 264 * In general, we map all vnodes going down and unmap them on the way back. 265 * 266 * Also, some BSD vnode operations have the side effect of vrele'ing 267 * their arguments. With stacking, the reference counts are held 268 * by the upper node, not the lower one, so we must handle these 269 * side-effects here. This is not of concern in Sun-derived systems 270 * since there are no such side-effects. 271 * 272 * New for the 08-June-99 version: we also handle operations which unlock 273 * the passed-in node (typically they vput the node). 274 * 275 * This makes the following assumptions: 276 * - only one returned vpp 277 * - no INOUT vpp's (Sun's vop_open has one of these) 278 * - the vnode operation vector of the first vnode should be used 279 * to determine what implementation of the op should be invoked 280 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 281 * problems on rmdir'ing mount points and renaming?) 282 */ 283 int 284 layer_bypass(v) 285 void *v; 286 { 287 struct vop_generic_args /* { 288 struct vnodeop_desc *a_desc; 289 <other random data follows, presumably> 290 } */ *ap = v; 291 int (**our_vnodeop_p) __P((void *)); 292 struct vnode **this_vp_p; 293 int error, error1; 294 struct vnode *old_vps[VDESC_MAX_VPS], *vp0; 295 struct vnode **vps_p[VDESC_MAX_VPS]; 296 struct vnode ***vppp; 297 struct vnodeop_desc *descp = ap->a_desc; 298 int reles, i, flags; 299 300 #ifdef SAFETY 301 /* 302 * We require at least one vp. 303 */ 304 if (descp->vdesc_vp_offsets == NULL || 305 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 306 panic ("layer_bypass: no vp's in map.\n"); 307 #endif 308 309 vps_p[0] = VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[0],ap); 310 vp0 = *vps_p[0]; 311 flags = MOUNTTOLAYERMOUNT(vp0->v_mount)->layerm_flags; 312 our_vnodeop_p = vp0->v_op; 313 314 if (flags & LAYERFS_MBYPASSDEBUG) 315 printf ("layer_bypass: %s\n", descp->vdesc_name); 316 317 /* 318 * Map the vnodes going in. 319 * Later, we'll invoke the operation based on 320 * the first mapped vnode's operation vector. 321 */ 322 reles = descp->vdesc_flags; 323 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 324 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 325 break; /* bail out at end of list */ 326 vps_p[i] = this_vp_p = 327 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 328 /* 329 * We're not guaranteed that any but the first vnode 330 * are of our type. Check for and don't map any 331 * that aren't. (We must always map first vp or vclean fails.) 332 */ 333 if (i && (*this_vp_p == NULL || 334 (*this_vp_p)->v_op != our_vnodeop_p)) { 335 old_vps[i] = NULL; 336 } else { 337 old_vps[i] = *this_vp_p; 338 *(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p); 339 /* 340 * XXX - Several operations have the side effect 341 * of vrele'ing their vp's. We must account for 342 * that. (This should go away in the future.) 343 */ 344 if (reles & VDESC_VP0_WILLRELE) 345 VREF(*this_vp_p); 346 } 347 348 } 349 350 /* 351 * Call the operation on the lower layer 352 * with the modified argument structure. 353 */ 354 error = VCALL(*vps_p[0], descp->vdesc_offset, ap); 355 356 /* 357 * Maintain the illusion of call-by-value 358 * by restoring vnodes in the argument structure 359 * to their original value. 360 */ 361 reles = descp->vdesc_flags; 362 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 363 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 364 break; /* bail out at end of list */ 365 if (old_vps[i]) { 366 *(vps_p[i]) = old_vps[i]; 367 if (reles & VDESC_VP0_WILLUNLOCK) 368 LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1); 369 if (reles & VDESC_VP0_WILLRELE) 370 vrele(*(vps_p[i])); 371 } 372 } 373 374 /* 375 * Map the possible out-going vpp 376 * (Assumes that the lower layer always returns 377 * a VREF'ed vpp unless it gets an error.) 378 */ 379 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 380 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 381 !error) { 382 /* 383 * XXX - even though some ops have vpp returned vp's, 384 * several ops actually vrele this before returning. 385 * We must avoid these ops. 386 * (This should go away when these ops are regularized.) 387 */ 388 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 389 goto out; 390 vppp = VOPARG_OFFSETTO(struct vnode***, 391 descp->vdesc_vpp_offset,ap); 392 /* 393 * Only vop_lookup, vop_create, vop_makedir, vop_bmap, 394 * vop_mknod, and vop_symlink return vpp's. vop_bmap 395 * doesn't call bypass as the lower vpp is fine (we're just 396 * going to do i/o on it). vop_loookup doesn't call bypass 397 * as a lookup on "." would generate a locking error. 398 * So all the calls which get us here have a locked vpp. :-) 399 */ 400 error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp); 401 } 402 403 out: 404 return (error); 405 } 406 407 /* 408 * We have to carry on the locking protocol on the layer vnodes 409 * as we progress through the tree. We also have to enforce read-only 410 * if this layer is mounted read-only. 411 */ 412 int 413 layer_lookup(v) 414 void *v; 415 { 416 struct vop_lookup_args /* { 417 struct vnodeop_desc *a_desc; 418 struct vnode * a_dvp; 419 struct vnode ** a_vpp; 420 struct componentname * a_cnp; 421 } */ *ap = v; 422 struct componentname *cnp = ap->a_cnp; 423 int flags = cnp->cn_flags; 424 struct vnode *dvp, *vp, *ldvp; 425 int error, r; 426 427 dvp = ap->a_dvp; 428 429 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 430 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 431 return (EROFS); 432 433 ldvp = LAYERVPTOLOWERVP(dvp); 434 ap->a_dvp = ldvp; 435 error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap); 436 vp = *ap->a_vpp; 437 438 if (error == EJUSTRETURN && (flags & ISLASTCN) && 439 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 440 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 441 error = EROFS; 442 /* 443 * We must do the same locking and unlocking at this layer as 444 * is done in the layers below us. It used to be we would try 445 * to guess based on what was set with the flags and error codes. 446 * 447 * But that doesn't work. So now we have the underlying VOP_LOOKUP 448 * tell us if it released the parent vnode, and we adjust the 449 * upper node accordingly. We can't just look at the lock states 450 * of the lower nodes as someone else might have come along and 451 * locked the parent node after our call to VOP_LOOKUP locked it. 452 */ 453 if ((cnp->cn_flags & PDIRUNLOCK)) { 454 LAYERFS_UPPERUNLOCK(dvp, 0, r); 455 } 456 if (ldvp == vp) { 457 /* 458 * Did lookup on "." or ".." in the root node of a mount point. 459 * So we return dvp after a VREF. 460 */ 461 *ap->a_vpp = dvp; 462 VREF(dvp); 463 vrele(vp); 464 } else if (vp != NULL) { 465 error = layer_node_create(dvp->v_mount, vp, ap->a_vpp); 466 } 467 return (error); 468 } 469 470 /* 471 * Setattr call. Disallow write attempts if the layer is mounted read-only. 472 */ 473 int 474 layer_setattr(v) 475 void *v; 476 { 477 struct vop_setattr_args /* { 478 struct vnodeop_desc *a_desc; 479 struct vnode *a_vp; 480 struct vattr *a_vap; 481 struct ucred *a_cred; 482 struct proc *a_p; 483 } */ *ap = v; 484 struct vnode *vp = ap->a_vp; 485 struct vattr *vap = ap->a_vap; 486 487 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 488 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 489 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 490 (vp->v_mount->mnt_flag & MNT_RDONLY)) 491 return (EROFS); 492 if (vap->va_size != VNOVAL) { 493 switch (vp->v_type) { 494 case VDIR: 495 return (EISDIR); 496 case VCHR: 497 case VBLK: 498 case VSOCK: 499 case VFIFO: 500 return (0); 501 case VREG: 502 case VLNK: 503 default: 504 /* 505 * Disallow write attempts if the filesystem is 506 * mounted read-only. 507 */ 508 if (vp->v_mount->mnt_flag & MNT_RDONLY) 509 return (EROFS); 510 } 511 } 512 return (LAYERFS_DO_BYPASS(vp, ap)); 513 } 514 515 /* 516 * We handle getattr only to change the fsid. 517 */ 518 int 519 layer_getattr(v) 520 void *v; 521 { 522 struct vop_getattr_args /* { 523 struct vnode *a_vp; 524 struct vattr *a_vap; 525 struct ucred *a_cred; 526 struct proc *a_p; 527 } */ *ap = v; 528 struct vnode *vp = ap->a_vp; 529 int error; 530 531 if ((error = LAYERFS_DO_BYPASS(vp, ap)) != 0) 532 return (error); 533 /* Requires that arguments be restored. */ 534 ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 535 return (0); 536 } 537 538 int 539 layer_access(v) 540 void *v; 541 { 542 struct vop_access_args /* { 543 struct vnode *a_vp; 544 int a_mode; 545 struct ucred *a_cred; 546 struct proc *a_p; 547 } */ *ap = v; 548 struct vnode *vp = ap->a_vp; 549 mode_t mode = ap->a_mode; 550 551 /* 552 * Disallow write attempts on read-only layers; 553 * unless the file is a socket, fifo, or a block or 554 * character device resident on the file system. 555 */ 556 if (mode & VWRITE) { 557 switch (vp->v_type) { 558 case VDIR: 559 case VLNK: 560 case VREG: 561 if (vp->v_mount->mnt_flag & MNT_RDONLY) 562 return (EROFS); 563 break; 564 default: 565 break; 566 } 567 } 568 return (LAYERFS_DO_BYPASS(vp, ap)); 569 } 570 571 /* 572 * We must handle open to be able to catch MNT_NODEV and friends. 573 */ 574 int 575 layer_open(v) 576 void *v; 577 { 578 struct vop_open_args *ap = v; 579 struct vnode *vp = ap->a_vp; 580 enum vtype lower_type = LAYERVPTOLOWERVP(vp)->v_type; 581 582 if (((lower_type == VBLK) || (lower_type == VCHR)) && 583 (vp->v_mount->mnt_flag & MNT_NODEV)) 584 return ENXIO; 585 586 return LAYERFS_DO_BYPASS(vp, ap); 587 } 588 589 /* 590 * We need to process our own vnode lock and then clear the 591 * interlock flag as it applies only to our vnode, not the 592 * vnodes below us on the stack. 593 */ 594 int 595 layer_lock(v) 596 void *v; 597 { 598 struct vop_lock_args /* { 599 struct vnode *a_vp; 600 int a_flags; 601 struct proc *a_p; 602 } */ *ap = v; 603 struct vnode *vp = ap->a_vp, *lowervp; 604 int flags = ap->a_flags, error; 605 606 if (vp->v_vnlock != NULL) { 607 /* 608 * The lower level has exported a struct lock to us. Use 609 * it so that all vnodes in the stack lock and unlock 610 * simultaneously. Note: we don't DRAIN the lock as DRAIN 611 * decommissions the lock - just because our vnode is 612 * going away doesn't mean the struct lock below us is. 613 * LK_EXCLUSIVE is fine. 614 */ 615 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 616 return(lockmgr(vp->v_vnlock, 617 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, 618 &vp->v_interlock)); 619 } else 620 return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock)); 621 } else { 622 /* 623 * Ahh well. It would be nice if the fs we're over would 624 * export a struct lock for us to use, but it doesn't. 625 * 626 * To prevent race conditions involving doing a lookup 627 * on "..", we have to lock the lower node, then lock our 628 * node. Most of the time it won't matter that we lock our 629 * node (as any locking would need the lower one locked 630 * first). But we can LK_DRAIN the upper lock as a step 631 * towards decomissioning it. 632 */ 633 lowervp = LAYERVPTOLOWERVP(vp); 634 if (flags & LK_INTERLOCK) { 635 simple_unlock(&vp->v_interlock); 636 flags &= ~LK_INTERLOCK; 637 } 638 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 639 error = VOP_LOCK(lowervp, 640 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE); 641 } else 642 error = VOP_LOCK(lowervp, flags); 643 if (error) 644 return (error); 645 if ((error = lockmgr(&vp->v_lock, flags, &vp->v_interlock))) { 646 VOP_UNLOCK(lowervp, 0); 647 } 648 return (error); 649 } 650 } 651 652 /* 653 */ 654 int 655 layer_unlock(v) 656 void *v; 657 { 658 struct vop_unlock_args /* { 659 struct vnode *a_vp; 660 int a_flags; 661 struct proc *a_p; 662 } */ *ap = v; 663 struct vnode *vp = ap->a_vp; 664 int flags = ap->a_flags; 665 666 if (vp->v_vnlock != NULL) { 667 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, 668 &vp->v_interlock)); 669 } else { 670 if (flags & LK_INTERLOCK) { 671 simple_unlock(&vp->v_interlock); 672 flags &= ~LK_INTERLOCK; 673 } 674 VOP_UNLOCK(LAYERVPTOLOWERVP(vp), flags); 675 return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, 676 &vp->v_interlock)); 677 } 678 } 679 680 /* 681 * As long as genfs_nolock is in use, don't call VOP_ISLOCKED(lowervp) 682 * if vp->v_vnlock == NULL as genfs_noislocked will always report 0. 683 */ 684 int 685 layer_islocked(v) 686 void *v; 687 { 688 struct vop_islocked_args /* { 689 struct vnode *a_vp; 690 } */ *ap = v; 691 struct vnode *vp = ap->a_vp; 692 693 if (vp->v_vnlock != NULL) 694 return (lockstatus(vp->v_vnlock)); 695 else 696 return (lockstatus(&vp->v_lock)); 697 } 698 699 /* 700 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother 701 * syncing the underlying vnodes, since they'll be fsync'ed when 702 * reclaimed; otherwise, 703 * pass it through to the underlying layer. 704 * 705 * XXX Do we still need to worry about shallow fsync? 706 */ 707 708 int 709 layer_fsync(v) 710 void *v; 711 { 712 struct vop_fsync_args /* { 713 struct vnode *a_vp; 714 struct ucred *a_cred; 715 int a_flags; 716 off_t offlo; 717 off_t offhi; 718 struct proc *a_p; 719 } */ *ap = v; 720 721 if (ap->a_flags & FSYNC_RECLAIM) { 722 return 0; 723 } 724 725 return (LAYERFS_DO_BYPASS(ap->a_vp, ap)); 726 } 727 728 729 int 730 layer_inactive(v) 731 void *v; 732 { 733 struct vop_inactive_args /* { 734 struct vnode *a_vp; 735 struct proc *a_p; 736 } */ *ap = v; 737 struct vnode *vp = ap->a_vp; 738 739 /* 740 * Do nothing (and _don't_ bypass). 741 * Wait to vrele lowervp until reclaim, 742 * so that until then our layer_node is in the 743 * cache and reusable. 744 * 745 * NEEDSWORK: Someday, consider inactive'ing 746 * the lowervp and then trying to reactivate it 747 * with capabilities (v_id) 748 * like they do in the name lookup cache code. 749 * That's too much work for now. 750 */ 751 VOP_UNLOCK(vp, 0); 752 753 /* ..., but don't cache the device node. */ 754 if (vp->v_type == VBLK || vp->v_type == VCHR) 755 vgone(vp); 756 return (0); 757 } 758 759 int 760 layer_reclaim(v) 761 void *v; 762 { 763 struct vop_reclaim_args /* { 764 struct vnode *a_vp; 765 struct proc *a_p; 766 } */ *ap = v; 767 struct vnode *vp = ap->a_vp; 768 struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount); 769 struct layer_node *xp = VTOLAYER(vp); 770 struct vnode *lowervp = xp->layer_lowervp; 771 772 /* 773 * Note: in vop_reclaim, the node's struct lock has been 774 * decomissioned, so we have to be careful about calling 775 * VOP's on ourself. Even if we turned a LK_DRAIN into an 776 * LK_EXCLUSIVE in layer_lock, we still must be careful as VXLOCK is 777 * set. 778 */ 779 /* After this assignment, this node will not be re-used. */ 780 if ((vp == lmp->layerm_rootvp)) { 781 /* 782 * Oops! We no longer have a root node. Most likely reason is 783 * that someone forcably unmunted the underlying fs. 784 * 785 * Now getting the root vnode will fail. We're dead. :-( 786 */ 787 lmp->layerm_rootvp = NULL; 788 } 789 xp->layer_lowervp = NULL; 790 simple_lock(&lmp->layerm_hashlock); 791 LIST_REMOVE(xp, layer_hash); 792 simple_unlock(&lmp->layerm_hashlock); 793 FREE(vp->v_data, M_TEMP); 794 vp->v_data = NULL; 795 vrele (lowervp); 796 return (0); 797 } 798 799 /* 800 * We just feed the returned vnode up to the caller - there's no need 801 * to build a layer node on top of the node on which we're going to do 802 * i/o. :-) 803 */ 804 int 805 layer_bmap(v) 806 void *v; 807 { 808 struct vop_bmap_args /* { 809 struct vnode *a_vp; 810 daddr_t a_bn; 811 struct vnode **a_vpp; 812 daddr_t *a_bnp; 813 int *a_runp; 814 } */ *ap = v; 815 struct vnode *vp; 816 817 ap->a_vp = vp = LAYERVPTOLOWERVP(ap->a_vp); 818 819 return (VCALL(vp, ap->a_desc->vdesc_offset, ap)); 820 } 821 822 int 823 layer_print(v) 824 void *v; 825 { 826 struct vop_print_args /* { 827 struct vnode *a_vp; 828 } */ *ap = v; 829 struct vnode *vp = ap->a_vp; 830 printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp)); 831 return (0); 832 } 833 834 /* 835 * XXX - vop_strategy must be hand coded because it has no 836 * vnode in its arguments. 837 * This goes away with a merged VM/buffer cache. 838 */ 839 int 840 layer_strategy(v) 841 void *v; 842 { 843 struct vop_strategy_args /* { 844 struct buf *a_bp; 845 } */ *ap = v; 846 struct buf *bp = ap->a_bp; 847 int error; 848 struct vnode *savedvp; 849 850 savedvp = bp->b_vp; 851 bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp); 852 853 error = VOP_STRATEGY(bp); 854 855 bp->b_vp = savedvp; 856 857 return (error); 858 } 859 860 /* 861 * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no 862 * vnode in its arguments. 863 * This goes away with a merged VM/buffer cache. 864 */ 865 int 866 layer_bwrite(v) 867 void *v; 868 { 869 struct vop_bwrite_args /* { 870 struct buf *a_bp; 871 } */ *ap = v; 872 struct buf *bp = ap->a_bp; 873 int error; 874 struct vnode *savedvp; 875 876 savedvp = bp->b_vp; 877 bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp); 878 879 error = VOP_BWRITE(bp); 880 881 bp->b_vp = savedvp; 882 883 return (error); 884 } 885 886 int 887 layer_getpages(v) 888 void *v; 889 { 890 struct vop_getpages_args /* { 891 struct vnode *a_vp; 892 voff_t a_offset; 893 struct vm_page **a_m; 894 int *a_count; 895 int a_centeridx; 896 vm_prot_t a_access_type; 897 int a_advice; 898 int a_flags; 899 } */ *ap = v; 900 struct vnode *vp = ap->a_vp; 901 int error; 902 903 /* 904 * just pass the request on to the underlying layer. 905 */ 906 907 if (ap->a_flags & PGO_LOCKED) { 908 return EBUSY; 909 } 910 ap->a_vp = LAYERVPTOLOWERVP(vp); 911 simple_unlock(&vp->v_interlock); 912 simple_lock(&ap->a_vp->v_interlock); 913 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap); 914 return error; 915 } 916 917 int 918 layer_putpages(v) 919 void *v; 920 { 921 struct vop_putpages_args /* { 922 struct vnode *a_vp; 923 voff_t a_offlo; 924 voff_t a_offhi; 925 int a_flags; 926 } */ *ap = v; 927 struct vnode *vp = ap->a_vp; 928 int error; 929 930 /* 931 * just pass the request on to the underlying layer. 932 */ 933 934 ap->a_vp = LAYERVPTOLOWERVP(vp); 935 simple_unlock(&vp->v_interlock); 936 simple_lock(&ap->a_vp->v_interlock); 937 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap); 938 return error; 939 } 940