1 /* $NetBSD: layer_vnops.c,v 1.23 2004/06/30 17:42:55 hannken Exp $ */ 2 3 /* 4 * Copyright (c) 1999 National Aeronautics & Space Administration 5 * All rights reserved. 6 * 7 * This software was written by William Studenmund of the 8 * Numerical Aerospace Simulation Facility, NASA Ames Research Center. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the National Aeronautics & Space Administration 19 * nor the names of its contributors may be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB- 27 * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 /* 36 * Copyright (c) 1992, 1993 37 * The Regents of the University of California. All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * John Heidemann of the UCLA Ficus project. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 67 * 68 * Ancestors: 69 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 70 * $Id: layer_vnops.c,v 1.23 2004/06/30 17:42:55 hannken Exp $ 71 * ...and... 72 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 73 */ 74 75 /* 76 * Null Layer vnode routines. 77 * 78 * (See mount_null(8) for more information.) 79 * 80 * The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide 81 * the core implementation of the null file system and most other stacked 82 * fs's. The description below refers to the null file system, but the 83 * services provided by the layer* files are useful for all layered fs's. 84 * 85 * The null layer duplicates a portion of the file system 86 * name space under a new name. In this respect, it is 87 * similar to the loopback file system. It differs from 88 * the loopback fs in two respects: it is implemented using 89 * a stackable layers techniques, and it's "null-node"s stack above 90 * all lower-layer vnodes, not just over directory vnodes. 91 * 92 * The null layer has two purposes. First, it serves as a demonstration 93 * of layering by proving a layer which does nothing. (It actually 94 * does everything the loopback file system does, which is slightly 95 * more than nothing.) Second, the null layer can serve as a prototype 96 * layer. Since it provides all necessary layer framework, 97 * new file system layers can be created very easily be starting 98 * with a null layer. 99 * 100 * The remainder of the man page examines the null layer as a basis 101 * for constructing new layers. 102 * 103 * 104 * INSTANTIATING NEW NULL LAYERS 105 * 106 * New null layers are created with mount_null(8). 107 * Mount_null(8) takes two arguments, the pathname 108 * of the lower vfs (target-pn) and the pathname where the null 109 * layer will appear in the namespace (alias-pn). After 110 * the null layer is put into place, the contents 111 * of target-pn subtree will be aliased under alias-pn. 112 * 113 * It is conceivable that other overlay filesystems will take different 114 * parameters. For instance, data migration or access controll layers might 115 * only take one pathname which will serve both as the target-pn and 116 * alias-pn described above. 117 * 118 * 119 * OPERATION OF A NULL LAYER 120 * 121 * The null layer is the minimum file system layer, 122 * simply bypassing all possible operations to the lower layer 123 * for processing there. The majority of its activity centers 124 * on the bypass routine, through which nearly all vnode operations 125 * pass. 126 * 127 * The bypass routine accepts arbitrary vnode operations for 128 * handling by the lower layer. It begins by examing vnode 129 * operation arguments and replacing any layered nodes by their 130 * lower-layer equivalents. It then invokes the operation 131 * on the lower layer. Finally, it replaces the layered nodes 132 * in the arguments and, if a vnode is return by the operation, 133 * stacks a layered node on top of the returned vnode. 134 * 135 * The bypass routine in this file, layer_bypass(), is suitable for use 136 * by many different layered filesystems. It can be used by multiple 137 * filesystems simultaneously. Alternatively, a layered fs may provide 138 * its own bypass routine, in which case layer_bypass() should be used as 139 * a model. For instance, the main functionality provided by umapfs, the user 140 * identity mapping file system, is handled by a custom bypass routine. 141 * 142 * Typically a layered fs registers its selected bypass routine as the 143 * default vnode operation in its vnodeopv_entry_desc table. Additionally 144 * the filesystem must store the bypass entry point in the layerm_bypass 145 * field of struct layer_mount. All other layer routines in this file will 146 * use the layerm_bypass routine. 147 * 148 * Although the bypass routine handles most operations outright, a number 149 * of operations are special cased, and handled by the layered fs. One 150 * group, layer_setattr, layer_getattr, layer_access, layer_open, and 151 * layer_fsync, perform layer-specific manipulation in addition to calling 152 * the bypass routine. The other group 153 154 * Although bypass handles most operations, vop_getattr, vop_lock, 155 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 156 * bypassed. Vop_getattr must change the fsid being returned. 157 * Vop_lock and vop_unlock must handle any locking for the 158 * current vnode as well as pass the lock request down. 159 * Vop_inactive and vop_reclaim are not bypassed so that 160 * they can handle freeing null-layer specific data. Vop_print 161 * is not bypassed to avoid excessive debugging information. 162 * Also, certain vnode operations change the locking state within 163 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 164 * and symlink). Ideally these operations should not change the 165 * lock state, but should be changed to let the caller of the 166 * function unlock them. Otherwise all intermediate vnode layers 167 * (such as union, umapfs, etc) must catch these functions to do 168 * the necessary locking at their layer. 169 * 170 * 171 * INSTANTIATING VNODE STACKS 172 * 173 * Mounting associates the null layer with a lower layer, 174 * effect stacking two VFSes. Vnode stacks are instead 175 * created on demand as files are accessed. 176 * 177 * The initial mount creates a single vnode stack for the 178 * root of the new null layer. All other vnode stacks 179 * are created as a result of vnode operations on 180 * this or other null vnode stacks. 181 * 182 * New vnode stacks come into existence as a result of 183 * an operation which returns a vnode. 184 * The bypass routine stacks a null-node above the new 185 * vnode before returning it to the caller. 186 * 187 * For example, imagine mounting a null layer with 188 * "mount_null /usr/include /dev/layer/null". 189 * Changing directory to /dev/layer/null will assign 190 * the root null-node (which was created when the null layer was mounted). 191 * Now consider opening "sys". A vop_lookup would be 192 * done on the root null-node. This operation would bypass through 193 * to the lower layer which would return a vnode representing 194 * the UFS "sys". layer_bypass then builds a null-node 195 * aliasing the UFS "sys" and returns this to the caller. 196 * Later operations on the null-node "sys" will repeat this 197 * process when constructing other vnode stacks. 198 * 199 * 200 * CREATING OTHER FILE SYSTEM LAYERS 201 * 202 * One of the easiest ways to construct new file system layers is to make 203 * a copy of the null layer, rename all files and variables, and 204 * then begin modifing the copy. Sed can be used to easily rename 205 * all variables. 206 * 207 * The umap layer is an example of a layer descended from the 208 * null layer. 209 * 210 * 211 * INVOKING OPERATIONS ON LOWER LAYERS 212 * 213 * There are two techniques to invoke operations on a lower layer 214 * when the operation cannot be completely bypassed. Each method 215 * is appropriate in different situations. In both cases, 216 * it is the responsibility of the aliasing layer to make 217 * the operation arguments "correct" for the lower layer 218 * by mapping an vnode arguments to the lower layer. 219 * 220 * The first approach is to call the aliasing layer's bypass routine. 221 * This method is most suitable when you wish to invoke the operation 222 * currently being handled on the lower layer. It has the advantage 223 * that the bypass routine already must do argument mapping. 224 * An example of this is null_getattrs in the null layer. 225 * 226 * A second approach is to directly invoke vnode operations on 227 * the lower layer with the VOP_OPERATIONNAME interface. 228 * The advantage of this method is that it is easy to invoke 229 * arbitrary operations on the lower layer. The disadvantage 230 * is that vnodes' arguments must be manually mapped. 231 * 232 */ 233 234 #include <sys/cdefs.h> 235 __KERNEL_RCSID(0, "$NetBSD: layer_vnops.c,v 1.23 2004/06/30 17:42:55 hannken Exp $"); 236 237 #include <sys/param.h> 238 #include <sys/systm.h> 239 #include <sys/proc.h> 240 #include <sys/time.h> 241 #include <sys/vnode.h> 242 #include <sys/mount.h> 243 #include <sys/namei.h> 244 #include <sys/malloc.h> 245 #include <sys/buf.h> 246 #include <miscfs/genfs/layer.h> 247 #include <miscfs/genfs/layer_extern.h> 248 #include <miscfs/genfs/genfs.h> 249 250 251 /* 252 * This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass 253 * routine by John Heidemann. 254 * The new element for this version is that the whole nullfs 255 * system gained the concept of locks on the lower node, and locks on 256 * our nodes. When returning from a call to the lower layer, we may 257 * need to update lock state ONLY on our layer. The LAYERFS_UPPER*LOCK() 258 * macros provide this functionality. 259 * The 10-Apr-92 version was optimized for speed, throwing away some 260 * safety checks. It should still always work, but it's not as 261 * robust to programmer errors. 262 * Define SAFETY to include some error checking code. 263 * 264 * In general, we map all vnodes going down and unmap them on the way back. 265 * 266 * Also, some BSD vnode operations have the side effect of vrele'ing 267 * their arguments. With stacking, the reference counts are held 268 * by the upper node, not the lower one, so we must handle these 269 * side-effects here. This is not of concern in Sun-derived systems 270 * since there are no such side-effects. 271 * 272 * New for the 08-June-99 version: we also handle operations which unlock 273 * the passed-in node (typically they vput the node). 274 * 275 * This makes the following assumptions: 276 * - only one returned vpp 277 * - no INOUT vpp's (Sun's vop_open has one of these) 278 * - the vnode operation vector of the first vnode should be used 279 * to determine what implementation of the op should be invoked 280 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 281 * problems on rmdir'ing mount points and renaming?) 282 */ 283 int 284 layer_bypass(v) 285 void *v; 286 { 287 struct vop_generic_args /* { 288 struct vnodeop_desc *a_desc; 289 <other random data follows, presumably> 290 } */ *ap = v; 291 int (**our_vnodeop_p) __P((void *)); 292 struct vnode **this_vp_p; 293 int error, error1; 294 struct vnode *old_vps[VDESC_MAX_VPS], *vp0; 295 struct vnode **vps_p[VDESC_MAX_VPS]; 296 struct vnode ***vppp; 297 struct vnodeop_desc *descp = ap->a_desc; 298 int reles, i, flags; 299 300 #ifdef SAFETY 301 /* 302 * We require at least one vp. 303 */ 304 if (descp->vdesc_vp_offsets == NULL || 305 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 306 panic("%s: no vp's in map.\n", __func__); 307 #endif 308 309 vps_p[0] = 310 VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap); 311 vp0 = *vps_p[0]; 312 flags = MOUNTTOLAYERMOUNT(vp0->v_mount)->layerm_flags; 313 our_vnodeop_p = vp0->v_op; 314 315 if (flags & LAYERFS_MBYPASSDEBUG) 316 printf("%s: %s\n", __func__, descp->vdesc_name); 317 318 /* 319 * Map the vnodes going in. 320 * Later, we'll invoke the operation based on 321 * the first mapped vnode's operation vector. 322 */ 323 reles = descp->vdesc_flags; 324 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 325 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 326 break; /* bail out at end of list */ 327 vps_p[i] = this_vp_p = 328 VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], 329 ap); 330 /* 331 * We're not guaranteed that any but the first vnode 332 * are of our type. Check for and don't map any 333 * that aren't. (We must always map first vp or vclean fails.) 334 */ 335 if (i && (*this_vp_p == NULL || 336 (*this_vp_p)->v_op != our_vnodeop_p)) { 337 old_vps[i] = NULL; 338 } else { 339 old_vps[i] = *this_vp_p; 340 *(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p); 341 /* 342 * XXX - Several operations have the side effect 343 * of vrele'ing their vp's. We must account for 344 * that. (This should go away in the future.) 345 */ 346 if (reles & VDESC_VP0_WILLRELE) 347 VREF(*this_vp_p); 348 } 349 350 } 351 352 /* 353 * Call the operation on the lower layer 354 * with the modified argument structure. 355 */ 356 error = VCALL(*vps_p[0], descp->vdesc_offset, ap); 357 358 /* 359 * Maintain the illusion of call-by-value 360 * by restoring vnodes in the argument structure 361 * to their original value. 362 */ 363 reles = descp->vdesc_flags; 364 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 365 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 366 break; /* bail out at end of list */ 367 if (old_vps[i]) { 368 *(vps_p[i]) = old_vps[i]; 369 if (reles & VDESC_VP0_WILLUNLOCK) 370 LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1); 371 if (reles & VDESC_VP0_WILLRELE) 372 vrele(*(vps_p[i])); 373 } 374 } 375 376 /* 377 * Map the possible out-going vpp 378 * (Assumes that the lower layer always returns 379 * a VREF'ed vpp unless it gets an error.) 380 */ 381 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 382 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 383 !error) { 384 /* 385 * XXX - even though some ops have vpp returned vp's, 386 * several ops actually vrele this before returning. 387 * We must avoid these ops. 388 * (This should go away when these ops are regularized.) 389 */ 390 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 391 goto out; 392 vppp = VOPARG_OFFSETTO(struct vnode***, 393 descp->vdesc_vpp_offset, ap); 394 /* 395 * Only vop_lookup, vop_create, vop_makedir, vop_bmap, 396 * vop_mknod, and vop_symlink return vpp's. vop_bmap 397 * doesn't call bypass as the lower vpp is fine (we're just 398 * going to do i/o on it). vop_lookup doesn't call bypass 399 * as a lookup on "." would generate a locking error. 400 * So all the calls which get us here have a locked vpp. :-) 401 */ 402 error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp); 403 if (error) { 404 vput(**vppp); 405 **vppp = NULL; 406 } 407 } 408 409 out: 410 return (error); 411 } 412 413 /* 414 * We have to carry on the locking protocol on the layer vnodes 415 * as we progress through the tree. We also have to enforce read-only 416 * if this layer is mounted read-only. 417 */ 418 int 419 layer_lookup(v) 420 void *v; 421 { 422 struct vop_lookup_args /* { 423 struct vnodeop_desc *a_desc; 424 struct vnode * a_dvp; 425 struct vnode ** a_vpp; 426 struct componentname * a_cnp; 427 } */ *ap = v; 428 struct componentname *cnp = ap->a_cnp; 429 int flags = cnp->cn_flags; 430 struct vnode *dvp, *vp, *ldvp; 431 int error, r; 432 433 dvp = ap->a_dvp; 434 435 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 436 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 437 return (EROFS); 438 439 ldvp = LAYERVPTOLOWERVP(dvp); 440 ap->a_dvp = ldvp; 441 error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap); 442 vp = *ap->a_vpp; 443 *ap->a_vpp = NULL; 444 445 if (error == EJUSTRETURN && (flags & ISLASTCN) && 446 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 447 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 448 error = EROFS; 449 /* 450 * We must do the same locking and unlocking at this layer as 451 * is done in the layers below us. It used to be we would try 452 * to guess based on what was set with the flags and error codes. 453 * 454 * But that doesn't work. So now we have the underlying VOP_LOOKUP 455 * tell us if it released the parent vnode, and we adjust the 456 * upper node accordingly. We can't just look at the lock states 457 * of the lower nodes as someone else might have come along and 458 * locked the parent node after our call to VOP_LOOKUP locked it. 459 */ 460 if ((cnp->cn_flags & PDIRUNLOCK)) { 461 LAYERFS_UPPERUNLOCK(dvp, 0, r); 462 } 463 if (ldvp == vp) { 464 /* 465 * Did lookup on "." or ".." in the root node of a mount point. 466 * So we return dvp after a VREF. 467 */ 468 *ap->a_vpp = dvp; 469 VREF(dvp); 470 vrele(vp); 471 } else if (vp != NULL) { 472 error = layer_node_create(dvp->v_mount, vp, ap->a_vpp); 473 if (error) { 474 vput(vp); 475 if (cnp->cn_flags & PDIRUNLOCK) { 476 if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY) == 0) 477 cnp->cn_flags &= ~PDIRUNLOCK; 478 } 479 } 480 } 481 return (error); 482 } 483 484 /* 485 * Setattr call. Disallow write attempts if the layer is mounted read-only. 486 */ 487 int 488 layer_setattr(v) 489 void *v; 490 { 491 struct vop_setattr_args /* { 492 struct vnodeop_desc *a_desc; 493 struct vnode *a_vp; 494 struct vattr *a_vap; 495 struct ucred *a_cred; 496 struct proc *a_p; 497 } */ *ap = v; 498 struct vnode *vp = ap->a_vp; 499 struct vattr *vap = ap->a_vap; 500 501 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 502 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 503 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 504 (vp->v_mount->mnt_flag & MNT_RDONLY)) 505 return (EROFS); 506 if (vap->va_size != VNOVAL) { 507 switch (vp->v_type) { 508 case VDIR: 509 return (EISDIR); 510 case VCHR: 511 case VBLK: 512 case VSOCK: 513 case VFIFO: 514 return (0); 515 case VREG: 516 case VLNK: 517 default: 518 /* 519 * Disallow write attempts if the filesystem is 520 * mounted read-only. 521 */ 522 if (vp->v_mount->mnt_flag & MNT_RDONLY) 523 return (EROFS); 524 } 525 } 526 return (LAYERFS_DO_BYPASS(vp, ap)); 527 } 528 529 /* 530 * We handle getattr only to change the fsid. 531 */ 532 int 533 layer_getattr(v) 534 void *v; 535 { 536 struct vop_getattr_args /* { 537 struct vnode *a_vp; 538 struct vattr *a_vap; 539 struct ucred *a_cred; 540 struct proc *a_p; 541 } */ *ap = v; 542 struct vnode *vp = ap->a_vp; 543 int error; 544 545 if ((error = LAYERFS_DO_BYPASS(vp, ap)) != 0) 546 return (error); 547 /* Requires that arguments be restored. */ 548 ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0]; 549 return (0); 550 } 551 552 int 553 layer_access(v) 554 void *v; 555 { 556 struct vop_access_args /* { 557 struct vnode *a_vp; 558 int a_mode; 559 struct ucred *a_cred; 560 struct proc *a_p; 561 } */ *ap = v; 562 struct vnode *vp = ap->a_vp; 563 mode_t mode = ap->a_mode; 564 565 /* 566 * Disallow write attempts on read-only layers; 567 * unless the file is a socket, fifo, or a block or 568 * character device resident on the file system. 569 */ 570 if (mode & VWRITE) { 571 switch (vp->v_type) { 572 case VDIR: 573 case VLNK: 574 case VREG: 575 if (vp->v_mount->mnt_flag & MNT_RDONLY) 576 return (EROFS); 577 break; 578 default: 579 break; 580 } 581 } 582 return (LAYERFS_DO_BYPASS(vp, ap)); 583 } 584 585 /* 586 * We must handle open to be able to catch MNT_NODEV and friends. 587 */ 588 int 589 layer_open(v) 590 void *v; 591 { 592 struct vop_open_args *ap = v; 593 struct vnode *vp = ap->a_vp; 594 enum vtype lower_type = LAYERVPTOLOWERVP(vp)->v_type; 595 596 if (((lower_type == VBLK) || (lower_type == VCHR)) && 597 (vp->v_mount->mnt_flag & MNT_NODEV)) 598 return ENXIO; 599 600 return LAYERFS_DO_BYPASS(vp, ap); 601 } 602 603 /* 604 * We need to process our own vnode lock and then clear the 605 * interlock flag as it applies only to our vnode, not the 606 * vnodes below us on the stack. 607 */ 608 int 609 layer_lock(v) 610 void *v; 611 { 612 struct vop_lock_args /* { 613 struct vnode *a_vp; 614 int a_flags; 615 struct proc *a_p; 616 } */ *ap = v; 617 struct vnode *vp = ap->a_vp, *lowervp; 618 int flags = ap->a_flags, error; 619 620 if (vp->v_vnlock != NULL) { 621 /* 622 * The lower level has exported a struct lock to us. Use 623 * it so that all vnodes in the stack lock and unlock 624 * simultaneously. Note: we don't DRAIN the lock as DRAIN 625 * decommissions the lock - just because our vnode is 626 * going away doesn't mean the struct lock below us is. 627 * LK_EXCLUSIVE is fine. 628 */ 629 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 630 return(lockmgr(vp->v_vnlock, 631 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, 632 &vp->v_interlock)); 633 } else 634 return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock)); 635 } else { 636 /* 637 * Ahh well. It would be nice if the fs we're over would 638 * export a struct lock for us to use, but it doesn't. 639 * 640 * To prevent race conditions involving doing a lookup 641 * on "..", we have to lock the lower node, then lock our 642 * node. Most of the time it won't matter that we lock our 643 * node (as any locking would need the lower one locked 644 * first). But we can LK_DRAIN the upper lock as a step 645 * towards decomissioning it. 646 */ 647 lowervp = LAYERVPTOLOWERVP(vp); 648 if (flags & LK_INTERLOCK) { 649 simple_unlock(&vp->v_interlock); 650 flags &= ~LK_INTERLOCK; 651 } 652 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 653 error = VOP_LOCK(lowervp, 654 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE); 655 } else 656 error = VOP_LOCK(lowervp, flags); 657 if (error) 658 return (error); 659 if ((error = lockmgr(&vp->v_lock, flags, &vp->v_interlock))) { 660 VOP_UNLOCK(lowervp, 0); 661 } 662 return (error); 663 } 664 } 665 666 /* 667 */ 668 int 669 layer_unlock(v) 670 void *v; 671 { 672 struct vop_unlock_args /* { 673 struct vnode *a_vp; 674 int a_flags; 675 struct proc *a_p; 676 } */ *ap = v; 677 struct vnode *vp = ap->a_vp; 678 int flags = ap->a_flags; 679 680 if (vp->v_vnlock != NULL) { 681 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, 682 &vp->v_interlock)); 683 } else { 684 if (flags & LK_INTERLOCK) { 685 simple_unlock(&vp->v_interlock); 686 flags &= ~LK_INTERLOCK; 687 } 688 VOP_UNLOCK(LAYERVPTOLOWERVP(vp), flags); 689 return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, 690 &vp->v_interlock)); 691 } 692 } 693 694 int 695 layer_islocked(v) 696 void *v; 697 { 698 struct vop_islocked_args /* { 699 struct vnode *a_vp; 700 } */ *ap = v; 701 struct vnode *vp = ap->a_vp; 702 int lkstatus; 703 704 if (vp->v_vnlock != NULL) 705 return lockstatus(vp->v_vnlock); 706 707 lkstatus = VOP_ISLOCKED(LAYERVPTOLOWERVP(vp)); 708 if (lkstatus) 709 return lkstatus; 710 711 return lockstatus(&vp->v_lock); 712 } 713 714 /* 715 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother 716 * syncing the underlying vnodes, since they'll be fsync'ed when 717 * reclaimed; otherwise, 718 * pass it through to the underlying layer. 719 * 720 * XXX Do we still need to worry about shallow fsync? 721 */ 722 723 int 724 layer_fsync(v) 725 void *v; 726 { 727 struct vop_fsync_args /* { 728 struct vnode *a_vp; 729 struct ucred *a_cred; 730 int a_flags; 731 off_t offlo; 732 off_t offhi; 733 struct proc *a_p; 734 } */ *ap = v; 735 736 if (ap->a_flags & FSYNC_RECLAIM) { 737 return 0; 738 } 739 740 return (LAYERFS_DO_BYPASS(ap->a_vp, ap)); 741 } 742 743 744 int 745 layer_inactive(v) 746 void *v; 747 { 748 struct vop_inactive_args /* { 749 struct vnode *a_vp; 750 struct proc *a_p; 751 } */ *ap = v; 752 struct vnode *vp = ap->a_vp; 753 754 /* 755 * Do nothing (and _don't_ bypass). 756 * Wait to vrele lowervp until reclaim, 757 * so that until then our layer_node is in the 758 * cache and reusable. 759 * 760 * NEEDSWORK: Someday, consider inactive'ing 761 * the lowervp and then trying to reactivate it 762 * with capabilities (v_id) 763 * like they do in the name lookup cache code. 764 * That's too much work for now. 765 */ 766 VOP_UNLOCK(vp, 0); 767 768 /* 769 * ..., but don't cache the device node. Also, if we did a 770 * remove, don't cache the node. 771 */ 772 if (vp->v_type == VBLK || vp->v_type == VCHR 773 || (VTOLAYER(vp)->layer_flags & LAYERFS_REMOVED)) 774 vgone(vp); 775 return (0); 776 } 777 778 int 779 layer_remove(v) 780 void *v; 781 { 782 struct vop_remove_args /* { 783 struct vonde *a_dvp; 784 struct vnode *a_vp; 785 struct componentname *a_cnp; 786 } */ *ap = v; 787 788 int error; 789 struct vnode *vp = ap->a_vp; 790 791 vref(vp); 792 if ((error = LAYERFS_DO_BYPASS(vp, ap)) == 0) 793 VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED; 794 795 vrele(vp); 796 797 return (error); 798 } 799 800 int 801 layer_rename(v) 802 void *v; 803 { 804 struct vop_rename_args /* { 805 struct vnode *a_fdvp; 806 struct vnode *a_fvp; 807 struct componentname *a_fcnp; 808 struct vnode *a_tdvp; 809 struct vnode *a_tvp; 810 struct componentname *a_tcnp; 811 } */ *ap = v; 812 813 int error; 814 struct vnode *fdvp = ap->a_fdvp; 815 struct vnode *tvp; 816 817 tvp = ap->a_tvp; 818 if (tvp) { 819 if (tvp->v_mount != fdvp->v_mount) 820 tvp = NULL; 821 else 822 vref(tvp); 823 } 824 error = LAYERFS_DO_BYPASS(fdvp, ap); 825 if (tvp) { 826 if (error == 0) 827 VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED; 828 vrele(tvp); 829 } 830 831 return (error); 832 } 833 834 int 835 layer_rmdir(v) 836 void *v; 837 { 838 struct vop_rmdir_args /* { 839 struct vnode *a_dvp; 840 struct vnode *a_vp; 841 struct componentname *a_cnp; 842 } */ *ap = v; 843 int error; 844 struct vnode *vp = ap->a_vp; 845 846 vref(vp); 847 if ((error = LAYERFS_DO_BYPASS(vp, ap)) == 0) 848 VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED; 849 850 vrele(vp); 851 852 return (error); 853 } 854 855 int 856 layer_reclaim(v) 857 void *v; 858 { 859 struct vop_reclaim_args /* { 860 struct vnode *a_vp; 861 struct proc *a_p; 862 } */ *ap = v; 863 struct vnode *vp = ap->a_vp; 864 struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount); 865 struct layer_node *xp = VTOLAYER(vp); 866 struct vnode *lowervp = xp->layer_lowervp; 867 868 /* 869 * Note: in vop_reclaim, the node's struct lock has been 870 * decomissioned, so we have to be careful about calling 871 * VOP's on ourself. Even if we turned a LK_DRAIN into an 872 * LK_EXCLUSIVE in layer_lock, we still must be careful as VXLOCK is 873 * set. 874 */ 875 /* After this assignment, this node will not be re-used. */ 876 if ((vp == lmp->layerm_rootvp)) { 877 /* 878 * Oops! We no longer have a root node. Most likely reason is 879 * that someone forcably unmunted the underlying fs. 880 * 881 * Now getting the root vnode will fail. We're dead. :-( 882 */ 883 lmp->layerm_rootvp = NULL; 884 } 885 xp->layer_lowervp = NULL; 886 simple_lock(&lmp->layerm_hashlock); 887 LIST_REMOVE(xp, layer_hash); 888 simple_unlock(&lmp->layerm_hashlock); 889 FREE(vp->v_data, M_TEMP); 890 vp->v_data = NULL; 891 vrele (lowervp); 892 return (0); 893 } 894 895 /* 896 * We just feed the returned vnode up to the caller - there's no need 897 * to build a layer node on top of the node on which we're going to do 898 * i/o. :-) 899 */ 900 int 901 layer_bmap(v) 902 void *v; 903 { 904 struct vop_bmap_args /* { 905 struct vnode *a_vp; 906 daddr_t a_bn; 907 struct vnode **a_vpp; 908 daddr_t *a_bnp; 909 int *a_runp; 910 } */ *ap = v; 911 struct vnode *vp; 912 913 ap->a_vp = vp = LAYERVPTOLOWERVP(ap->a_vp); 914 915 return (VCALL(vp, ap->a_desc->vdesc_offset, ap)); 916 } 917 918 int 919 layer_print(v) 920 void *v; 921 { 922 struct vop_print_args /* { 923 struct vnode *a_vp; 924 } */ *ap = v; 925 struct vnode *vp = ap->a_vp; 926 printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp)); 927 return (0); 928 } 929 930 /* 931 * XXX - vop_bwrite must be hand coded because it has no 932 * vnode in its arguments. 933 * This goes away with a merged VM/buffer cache. 934 */ 935 int 936 layer_bwrite(v) 937 void *v; 938 { 939 struct vop_bwrite_args /* { 940 struct buf *a_bp; 941 } */ *ap = v; 942 struct buf *bp = ap->a_bp; 943 int error; 944 struct vnode *savedvp; 945 946 savedvp = bp->b_vp; 947 bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp); 948 949 error = VOP_BWRITE(bp); 950 951 bp->b_vp = savedvp; 952 953 return (error); 954 } 955 956 int 957 layer_getpages(v) 958 void *v; 959 { 960 struct vop_getpages_args /* { 961 struct vnode *a_vp; 962 voff_t a_offset; 963 struct vm_page **a_m; 964 int *a_count; 965 int a_centeridx; 966 vm_prot_t a_access_type; 967 int a_advice; 968 int a_flags; 969 } */ *ap = v; 970 struct vnode *vp = ap->a_vp; 971 int error; 972 973 /* 974 * just pass the request on to the underlying layer. 975 */ 976 977 if (ap->a_flags & PGO_LOCKED) { 978 return EBUSY; 979 } 980 ap->a_vp = LAYERVPTOLOWERVP(vp); 981 simple_unlock(&vp->v_interlock); 982 simple_lock(&ap->a_vp->v_interlock); 983 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap); 984 return error; 985 } 986 987 int 988 layer_putpages(v) 989 void *v; 990 { 991 struct vop_putpages_args /* { 992 struct vnode *a_vp; 993 voff_t a_offlo; 994 voff_t a_offhi; 995 int a_flags; 996 } */ *ap = v; 997 struct vnode *vp = ap->a_vp; 998 int error; 999 1000 /* 1001 * just pass the request on to the underlying layer. 1002 */ 1003 1004 ap->a_vp = LAYERVPTOLOWERVP(vp); 1005 simple_unlock(&vp->v_interlock); 1006 simple_lock(&ap->a_vp->v_interlock); 1007 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap); 1008 return error; 1009 } 1010