1 /* $NetBSD: null_vnops.c,v 1.7 1996/05/10 22:51:01 jtk Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * John Heidemann of the UCLA Ficus project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)null_vnops.c 8.1 (Berkeley) 6/10/93 39 * 40 * Ancestors: 41 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 42 * Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp 43 * ...and... 44 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 45 */ 46 47 /* 48 * Null Layer 49 * 50 * (See mount_null(8) for more information.) 51 * 52 * The null layer duplicates a portion of the file system 53 * name space under a new name. In this respect, it is 54 * similar to the loopback file system. It differs from 55 * the loopback fs in two respects: it is implemented using 56 * a stackable layers techniques, and it's "null-node"s stack above 57 * all lower-layer vnodes, not just over directory vnodes. 58 * 59 * The null layer has two purposes. First, it serves as a demonstration 60 * of layering by proving a layer which does nothing. (It actually 61 * does everything the loopback file system does, which is slightly 62 * more than nothing.) Second, the null layer can serve as a prototype 63 * layer. Since it provides all necessary layer framework, 64 * new file system layers can be created very easily be starting 65 * with a null layer. 66 * 67 * The remainder of this man page examines the null layer as a basis 68 * for constructing new layers. 69 * 70 * 71 * INSTANTIATING NEW NULL LAYERS 72 * 73 * New null layers are created with mount_null(8). 74 * Mount_null(8) takes two arguments, the pathname 75 * of the lower vfs (target-pn) and the pathname where the null 76 * layer will appear in the namespace (alias-pn). After 77 * the null layer is put into place, the contents 78 * of target-pn subtree will be aliased under alias-pn. 79 * 80 * 81 * OPERATION OF A NULL LAYER 82 * 83 * The null layer is the minimum file system layer, 84 * simply bypassing all possible operations to the lower layer 85 * for processing there. The majority of its activity centers 86 * on the bypass routine, though which nearly all vnode operations 87 * pass. 88 * 89 * The bypass routine accepts arbitrary vnode operations for 90 * handling by the lower layer. It begins by examing vnode 91 * operation arguments and replacing any null-nodes by their 92 * lower-layer equivlants. It then invokes the operation 93 * on the lower layer. Finally, it replaces the null-nodes 94 * in the arguments and, if a vnode is return by the operation, 95 * stacks a null-node on top of the returned vnode. 96 * 97 * Although bypass handles most operations, 98 * vop_getattr, _inactive, _reclaim, and _print are not bypassed. 99 * Vop_getattr must change the fsid being returned. 100 * Vop_inactive and vop_reclaim are not bypassed so that 101 * they can handle freeing null-layer specific data. 102 * Vop_print is not bypassed to avoid excessive debugging 103 * information. 104 * 105 * 106 * INSTANTIATING VNODE STACKS 107 * 108 * Mounting associates the null layer with a lower layer, 109 * effect stacking two VFSes. Vnode stacks are instead 110 * created on demand as files are accessed. 111 * 112 * The initial mount creates a single vnode stack for the 113 * root of the new null layer. All other vnode stacks 114 * are created as a result of vnode operations on 115 * this or other null vnode stacks. 116 * 117 * New vnode stacks come into existance as a result of 118 * an operation which returns a vnode. 119 * The bypass routine stacks a null-node above the new 120 * vnode before returning it to the caller. 121 * 122 * For example, imagine mounting a null layer with 123 * "mount_null /usr/include /dev/layer/null". 124 * Changing directory to /dev/layer/null will assign 125 * the root null-node (which was created when the null layer was mounted). 126 * Now consider opening "sys". A vop_lookup would be 127 * done on the root null-node. This operation would bypass through 128 * to the lower layer which would return a vnode representing 129 * the UFS "sys". Null_bypass then builds a null-node 130 * aliasing the UFS "sys" and returns this to the caller. 131 * Later operations on the null-node "sys" will repeat this 132 * process when constructing other vnode stacks. 133 * 134 * 135 * CREATING OTHER FILE SYSTEM LAYERS 136 * 137 * One of the easiest ways to construct new file system layers is to make 138 * a copy of the null layer, rename all files and variables, and 139 * then begin modifing the copy. Sed can be used to easily rename 140 * all variables. 141 * 142 * The umap layer is an example of a layer descended from the 143 * null layer. 144 * 145 * 146 * INVOKING OPERATIONS ON LOWER LAYERS 147 * 148 * There are two techniques to invoke operations on a lower layer 149 * when the operation cannot be completely bypassed. Each method 150 * is appropriate in different situations. In both cases, 151 * it is the responsibility of the aliasing layer to make 152 * the operation arguments "correct" for the lower layer 153 * by mapping an vnode arguments to the lower layer. 154 * 155 * The first approach is to call the aliasing layer's bypass routine. 156 * This method is most suitable when you wish to invoke the operation 157 * currently being hanldled on the lower layer. It has the advantage 158 * that the bypass routine already must do argument mapping. 159 * An example of this is null_getattrs in the null layer. 160 * 161 * A second approach is to directly invoked vnode operations on 162 * the lower layer with the VOP_OPERATIONNAME interface. 163 * The advantage of this method is that it is easy to invoke 164 * arbitrary operations on the lower layer. The disadvantage 165 * is that vnodes arguments must be manualy mapped. 166 * 167 */ 168 169 #include <sys/param.h> 170 #include <sys/systm.h> 171 #include <sys/proc.h> 172 #include <sys/time.h> 173 #include <sys/types.h> 174 #include <sys/vnode.h> 175 #include <sys/mount.h> 176 #include <sys/namei.h> 177 #include <sys/malloc.h> 178 #include <sys/buf.h> 179 #include <miscfs/nullfs/null.h> 180 181 182 int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 183 184 int null_bypass __P((void *)); 185 int null_getattr __P((void *)); 186 int null_inactive __P((void *)); 187 int null_reclaim __P((void *)); 188 int null_print __P((void *)); 189 int null_strategy __P((void *)); 190 int null_bwrite __P((void *)); 191 int null_lock __P((void *)); 192 int null_unlock __P((void *)); 193 int null_islocked __P((void *)); 194 int null_lookup __P((void *)); 195 196 /* 197 * This is the 10-Apr-92 bypass routine. 198 * This version has been optimized for speed, throwing away some 199 * safety checks. It should still always work, but it's not as 200 * robust to programmer errors. 201 * Define SAFETY to include some error checking code. 202 * 203 * In general, we map all vnodes going down and unmap them on the way back. 204 * As an exception to this, vnodes can be marked "unmapped" by setting 205 * the Nth bit in operation's vdesc_flags. 206 * 207 * Also, some BSD vnode operations have the side effect of vrele'ing 208 * their arguments. With stacking, the reference counts are held 209 * by the upper node, not the lower one, so we must handle these 210 * side-effects here. This is not of concern in Sun-derived systems 211 * since there are no such side-effects. 212 * 213 * This makes the following assumptions: 214 * - only one returned vpp 215 * - no INOUT vpp's (Sun's vop_open has one of these) 216 * - the vnode operation vector of the first vnode should be used 217 * to determine what implementation of the op should be invoked 218 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 219 * problems on rmdir'ing mount points and renaming?) 220 */ 221 int 222 null_bypass(v) 223 void *v; 224 { 225 struct vop_generic_args /* { 226 struct vnodeop_desc *a_desc; 227 <other random data follows, presumably> 228 } */ *ap = v; 229 register struct vnode **this_vp_p; 230 int error; 231 struct vnode *old_vps[VDESC_MAX_VPS]; 232 struct vnode **vps_p[VDESC_MAX_VPS]; 233 struct vnode ***vppp; 234 struct vnodeop_desc *descp = ap->a_desc; 235 int reles, i; 236 237 if (null_bug_bypass) 238 printf ("null_bypass: %s\n", descp->vdesc_name); 239 240 #ifdef SAFETY 241 /* 242 * We require at least one vp. 243 */ 244 if (descp->vdesc_vp_offsets == NULL || 245 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 246 panic ("null_bypass: no vp's in map.\n"); 247 #endif 248 249 /* 250 * Map the vnodes going in. 251 * Later, we'll invoke the operation based on 252 * the first mapped vnode's operation vector. 253 */ 254 reles = descp->vdesc_flags; 255 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 256 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 257 break; /* bail out at end of list */ 258 vps_p[i] = this_vp_p = 259 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 260 /* 261 * We're not guaranteed that any but the first vnode 262 * are of our type. Check for and don't map any 263 * that aren't. (We must always map first vp or vclean fails.) 264 */ 265 if (i && (*this_vp_p == NULLVP || 266 (*this_vp_p)->v_op != null_vnodeop_p)) { 267 old_vps[i] = NULLVP; 268 } else { 269 old_vps[i] = *this_vp_p; 270 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 271 /* 272 * XXX - Several operations have the side effect 273 * of vrele'ing their vp's. We must account for 274 * that. (This should go away in the future.) 275 */ 276 if (reles & 1) 277 VREF(*this_vp_p); 278 } 279 280 } 281 282 /* 283 * Call the operation on the lower layer 284 * with the modified argument structure. 285 */ 286 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap); 287 288 /* 289 * Maintain the illusion of call-by-value 290 * by restoring vnodes in the argument structure 291 * to their original value. 292 */ 293 reles = descp->vdesc_flags; 294 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 295 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 296 break; /* bail out at end of list */ 297 if (old_vps[i] != NULLVP) { 298 *(vps_p[i]) = old_vps[i]; 299 if (reles & 1) { 300 /* they really vput them, so we must drop 301 our locks (but mark underneath as 302 unlocked first). 303 Beware of vnode duplication--put it once, 304 and rele the rest. Check this 305 by looking at our upper flag. */ 306 if (VTONULL(*(vps_p[i]))->null_flags & NULL_LOCKED) { 307 VTONULL(*(vps_p[i]))->null_flags &= ~NULL_LLOCK; 308 vput(*(vps_p[i])); 309 } else 310 vrele(*(vps_p[i])); 311 } 312 } 313 } 314 315 /* 316 * Map the possible out-going vpp 317 * (Assumes that the lower layer always returns 318 * a VREF'ed vpp unless it gets an error.) 319 */ 320 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 321 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 322 !error) { 323 /* 324 * XXX - even though some ops have vpp returned vp's, 325 * several ops actually vrele this before returning. 326 * We must avoid these ops. 327 * (This should go away when these ops are regularized.) 328 */ 329 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 330 goto out; 331 vppp = VOPARG_OFFSETTO(struct vnode***, 332 descp->vdesc_vpp_offset,ap); 333 /* 334 * This assumes that **vppp is a locked vnode (it is always 335 * so as of this writing, NetBSD-current 1995/02/16) 336 */ 337 /* 338 * (don't want to lock it if being called on behalf 339 * of lookup--it plays weird locking games depending 340 * on whether or not it's looking up ".", "..", etc. 341 */ 342 error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp, 343 descp == &vop_lookup_desc ? 0 : 1); 344 } 345 346 out: 347 return (error); 348 } 349 350 351 /* 352 * We handle getattr only to change the fsid. 353 */ 354 int 355 null_getattr(v) 356 void *v; 357 { 358 struct vop_getattr_args /* { 359 struct vnode *a_vp; 360 struct vattr *a_vap; 361 struct ucred *a_cred; 362 struct proc *a_p; 363 } */ *ap = v; 364 int error; 365 if ((error = null_bypass(ap)) != NULL) 366 return (error); 367 /* Requires that arguments be restored. */ 368 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 369 return (0); 370 } 371 372 373 int 374 null_inactive(v) 375 void *v; 376 { 377 /* 378 * Do nothing (and _don't_ bypass). 379 * Wait to vrele lowervp until reclaim, 380 * so that until then our null_node is in the 381 * cache and reusable. 382 * 383 * NEEDSWORK: Someday, consider inactive'ing 384 * the lowervp and then trying to reactivate it 385 * with capabilities (v_id) 386 * like they do in the name lookup cache code. 387 * That's too much work for now. 388 */ 389 return (0); 390 } 391 392 int 393 null_reclaim(v) 394 void *v; 395 { 396 struct vop_reclaim_args /* { 397 struct vnode *a_vp; 398 } */ *ap = v; 399 struct vnode *vp = ap->a_vp; 400 struct null_node *xp = VTONULL(vp); 401 struct vnode *lowervp = xp->null_lowervp; 402 403 /* 404 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, 405 * so we can't call VOPs on ourself. 406 */ 407 /* After this assignment, this node will not be re-used. */ 408 xp->null_lowervp = NULL; 409 LIST_REMOVE(xp, null_hash); 410 FREE(vp->v_data, M_TEMP); 411 vp->v_data = NULL; 412 vrele (lowervp); 413 return (0); 414 } 415 416 417 int 418 null_print(v) 419 void *v; 420 { 421 struct vop_print_args /* { 422 struct vnode *a_vp; 423 } */ *ap = v; 424 register struct vnode *vp = ap->a_vp; 425 register struct null_node *nn = VTONULL(vp); 426 427 printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp)); 428 #ifdef DIAGNOSTIC 429 printf("%s%s owner pid %d retpc %p retret %p\n", 430 (nn->null_flags & NULL_LOCKED) ? "(LOCKED) " : "", 431 (nn->null_flags & NULL_LLOCK) ? "(LLOCK) " : "", 432 nn->null_pid, nn->null_lockpc, nn->null_lockpc2); 433 #else 434 printf("%s%s\n", 435 (nn->null_flags & NULL_LOCKED) ? "(LOCKED) " : "", 436 (nn->null_flags & NULL_LLOCK) ? "(LLOCK) " : ""); 437 #endif 438 vprint("nullfs lowervp", NULLVPTOLOWERVP(vp)); 439 return (0); 440 } 441 442 443 /* 444 * XXX - vop_strategy must be hand coded because it has no 445 * vnode in its arguments. 446 * This goes away with a merged VM/buffer cache. 447 */ 448 int 449 null_strategy(v) 450 void *v; 451 { 452 struct vop_strategy_args /* { 453 struct buf *a_bp; 454 } */ *ap = v; 455 struct buf *bp = ap->a_bp; 456 int error; 457 struct vnode *savedvp; 458 459 savedvp = bp->b_vp; 460 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 461 462 error = VOP_STRATEGY(bp); 463 464 bp->b_vp = savedvp; 465 466 return (error); 467 } 468 469 470 /* 471 * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no 472 * vnode in its arguments. 473 * This goes away with a merged VM/buffer cache. 474 */ 475 int 476 null_bwrite(v) 477 void *v; 478 { 479 struct vop_bwrite_args /* { 480 struct buf *a_bp; 481 } */ *ap = v; 482 struct buf *bp = ap->a_bp; 483 int error; 484 struct vnode *savedvp; 485 486 savedvp = bp->b_vp; 487 bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); 488 489 error = VOP_BWRITE(bp); 490 491 bp->b_vp = savedvp; 492 493 return (error); 494 } 495 496 /* 497 * We need a separate null lock routine, to avoid deadlocks at reclaim time. 498 * If a process holds the lower-vnode locked when it tries to reclaim 499 * the null upper-vnode, _and_ null_bypass is used as the locking operation, 500 * then a process can end up locking against itself. 501 * This has been observed when a null mount is set up to "tunnel" beneath a 502 * union mount (that setup is useful if you still wish to be able to access 503 * the non-union version of either the above or below union layer) 504 */ 505 int 506 null_lock(v) 507 void *v; 508 { 509 struct vop_lock_args *ap = v; 510 struct vnode *vp = ap->a_vp; 511 struct null_node *nn; 512 513 #ifdef NULLFS_DIAGNOSTIC 514 vprint("null_lock_e", ap->a_vp); 515 printf("retpc=%lx, retretpc=%lx\n", 516 RETURN_PC(0), 517 RETURN_PC(1)); 518 #endif 519 start: 520 while (vp->v_flag & VXLOCK) { 521 vp->v_flag |= VXWANT; 522 tsleep((caddr_t)vp, PINOD, "nulllock1", 0); 523 } 524 525 nn = VTONULL(vp); 526 527 if ((nn->null_flags & NULL_LLOCK) == 0 && 528 (vp->v_usecount != 0)) { 529 /* 530 * only lock underlying node if we haven't locked it yet 531 * for null ops, and our refcount is nonzero. If usecount 532 * is zero, we are probably being reclaimed so we need to 533 * keep our hands off the lower node. 534 */ 535 VOP_LOCK(nn->null_lowervp); 536 nn->null_flags |= NULL_LLOCK; 537 } 538 539 if (nn->null_flags & NULL_LOCKED) { 540 #ifdef DIAGNOSTIC 541 if (curproc && nn->null_pid == curproc->p_pid && 542 nn->null_pid > -1 && curproc->p_pid > -1) { 543 vprint("self-lock", vp); 544 panic("null: locking against myself"); 545 } 546 #endif 547 nn->null_flags |= NULL_WANTED; 548 tsleep((caddr_t)nn, PINOD, "nulllock2", 0); 549 goto start; 550 } 551 552 #ifdef DIAGNOSTIC 553 if (curproc) 554 nn->null_pid = curproc->p_pid; 555 else 556 nn->null_pid = -1; 557 nn->null_lockpc = RETURN_PC(0); 558 nn->null_lockpc2 = RETURN_PC(1); 559 #endif 560 561 nn->null_flags |= NULL_LOCKED; 562 return (0); 563 } 564 565 int 566 null_unlock(v) 567 void *v; 568 { 569 struct vop_lock_args *ap = v; 570 struct null_node *nn = VTONULL(ap->a_vp); 571 572 #ifdef NULLFS_DIAGNOSTIC 573 vprint("null_unlock_e", ap->a_vp); 574 #endif 575 #ifdef DIAGNOSTIC 576 if ((nn->null_flags & NULL_LOCKED) == 0) { 577 vprint("null_unlock", ap->a_vp); 578 panic("null: unlocking unlocked node"); 579 } 580 if (curproc && nn->null_pid != curproc->p_pid && 581 curproc->p_pid > -1 && nn->null_pid > -1) { 582 vprint("null_unlock", ap->a_vp); 583 panic("null: unlocking other process's null node"); 584 } 585 #endif 586 nn->null_flags &= ~NULL_LOCKED; 587 588 if ((nn->null_flags & NULL_LLOCK) != 0) 589 VOP_UNLOCK(nn->null_lowervp); 590 591 nn->null_flags &= ~NULL_LLOCK; 592 593 if (nn->null_flags & NULL_WANTED) { 594 nn->null_flags &= ~NULL_WANTED; 595 wakeup((caddr_t)nn); 596 } 597 #ifdef DIAGNOSTIC 598 nn->null_pid = 0; 599 nn->null_lockpc = nn->null_lockpc2 = 0; 600 #endif 601 return (0); 602 } 603 604 int 605 null_islocked(v) 606 void *v; 607 { 608 struct vop_islocked_args *ap = v; 609 return ((VTONULL(ap->a_vp)->null_flags & NULL_LOCKED) ? 1 : 0); 610 } 611 612 int 613 null_lookup(v) 614 void *v; 615 { 616 register struct vop_lookup_args /* { 617 struct vnodeop_desc *a_desc; 618 struct vnode *a_dvp; 619 struct vnode **a_vpp; 620 struct componentname *a_cnp; 621 } */ *ap = v; 622 register int error; 623 register struct vnode *dvp; 624 int flags = ap->a_cnp->cn_flags; 625 626 #ifdef NULLFS_DIAGNOSTIC 627 printf("null_lookup: dvp=%lx, name='%s'\n", 628 ap->a_dvp, ap->a_cnp->cn_nameptr); 629 #endif 630 /* 631 * the starting dir (ap->a_dvp) comes in locked. 632 */ 633 634 /* set LOCKPARENT to hold on to it until done below */ 635 ap->a_cnp->cn_flags |= LOCKPARENT; 636 error = null_bypass(ap); 637 if (!(flags & LOCKPARENT)) 638 ap->a_cnp->cn_flags &= ~LOCKPARENT; 639 640 if (error) 641 /* 642 * starting dir is still locked/has been relocked 643 * on error return. 644 */ 645 return error; 646 647 if (ap->a_dvp != *ap->a_vpp) { 648 /* 649 * Lookup returns node locked; we mark both lower and 650 * upper nodes as locked by setting the lower lock 651 * flag (it came back locked), and then call lock to 652 * set upper lock flag & record pid, etc. see 653 * null_node_create() 654 */ 655 VTONULL(*ap->a_vpp)->null_flags |= NULL_LLOCK; 656 657 dvp = ap->a_dvp; 658 if (flags & ISDOTDOT) { 659 /* 660 * If we're looking up `..' and this isn't the 661 * last component, then the starting directory 662 * ("parent") is _unlocked_ as a side-effect 663 * of lookups. This is to avoid deadlocks: 664 * lock order is always parent, child, so 665 * looking up `..' requires dropping the lock 666 * on the starting directory. 667 */ 668 /* see ufs_lookup() for hairy ugly locking protocol 669 examples */ 670 /* 671 * underlying starting dir comes back locked if flags & 672 * LOCKPARENT (which we artificially set above) and 673 * ISLASTCN. 674 */ 675 if (flags & ISLASTCN) { 676 VTONULL(dvp)->null_flags |= NULL_LLOCK; /* no-op, right? */ 677 #ifdef NULLFS_DIAGNOSTIC 678 if (!VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) { 679 vprint("lowerdvp not locked after lookup\n", dvp); 680 panic("null_lookup not locked"); 681 } 682 #endif 683 } else { 684 VTONULL(dvp)->null_flags &= ~NULL_LLOCK; 685 #ifdef NULLFS_DIAGNOSTIC 686 if (VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) { 687 vprint("lowerdvp locked after lookup?\n", dvp); 688 panic("null_lookup locked"); 689 } 690 #endif 691 } 692 /* 693 * locking order: drop lock on lower-in-tree 694 * element, then get lock on higher-in-tree 695 * element, then (if needed) re-fetch lower 696 * lock. No need for vget() since we hold a 697 * refcount to the starting directory 698 */ 699 VOP_UNLOCK(dvp); 700 VOP_LOCK(*ap->a_vpp); 701 /* 702 * we should return our directory locked if 703 * (flags & LOCKPARENT) and (flags & ISLASTCN) 704 */ 705 if ((flags & LOCKPARENT) && (flags & ISLASTCN)) 706 VOP_LOCK(dvp); 707 } else { 708 /* 709 * Normal directory locking order: we hold the starting 710 * directory locked; now lock our layer of the target. 711 */ 712 VOP_LOCK(*ap->a_vpp); 713 /* 714 * underlying starting dir comes back locked 715 * if lockparent (we set it) and no error 716 * (this leg) and ISLASTCN 717 */ 718 if (flags & ISLASTCN) { 719 VTONULL(dvp)->null_flags |= NULL_LLOCK; /* no op, right? */ 720 #ifdef NULLFS_DIAGNOSTIC 721 if (!VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) { 722 vprint("lowerdvp not locked after lookup\n", dvp); 723 panic("null_lookup not locked"); 724 } 725 #endif 726 } else { 727 VTONULL(dvp)->null_flags &= ~NULL_LLOCK; 728 #ifdef NULLFS_DIAGNOSTIC 729 if (VOP_ISLOCKED(VTONULL(dvp)->null_lowervp)) { 730 vprint("lowerdvp locked after lookup?\n", dvp); 731 panic("null_lookup locked"); 732 } 733 #endif 734 } 735 /* 736 * we should return our directory unlocked if 737 * our caller didn't want the parent locked, 738 * !(flags & LOCKPARENT), or we're not at the 739 * end yet, !(flags & ISLASTCN) 740 */ 741 if (!(flags & LOCKPARENT) || !(flags & ISLASTCN)) 742 VOP_UNLOCK(dvp); 743 } 744 } 745 return error; 746 } 747 748 /* 749 * Global vfs data structures 750 */ 751 int (**null_vnodeop_p) __P((void *)); 752 struct vnodeopv_entry_desc null_vnodeop_entries[] = { 753 { &vop_default_desc, null_bypass }, 754 755 { &vop_getattr_desc, null_getattr }, 756 { &vop_inactive_desc, null_inactive }, 757 { &vop_reclaim_desc, null_reclaim }, 758 { &vop_print_desc, null_print }, 759 760 { &vop_lock_desc, null_lock }, 761 { &vop_unlock_desc, null_unlock }, 762 { &vop_islocked_desc, null_islocked }, 763 { &vop_lookup_desc, null_lookup }, /* special locking frob */ 764 765 { &vop_strategy_desc, null_strategy }, 766 { &vop_bwrite_desc, null_bwrite }, 767 768 { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL } 769 }; 770 struct vnodeopv_desc null_vnodeop_opv_desc = 771 { &null_vnodeop_p, null_vnodeop_entries }; 772