1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * John Heidemann of the UCLA Ficus project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 37 * 38 * Ancestors: 39 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 40 * $FreeBSD: src/sys/miscfs/nullfs/null_vnops.c,v 1.38.2.6 2002/07/31 00:32:28 semenu Exp $ 41 * $DragonFly: src/sys/vfs/nullfs/null_vnops.c,v 1.17 2004/10/07 01:13:21 dillon Exp $ 42 * ...and... 43 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 44 * 45 * $FreeBSD: src/sys/miscfs/nullfs/null_vnops.c,v 1.38.2.6 2002/07/31 00:32:28 semenu Exp $ 46 */ 47 48 /* 49 * Null Layer 50 * 51 * (See mount_null(8) for more information.) 52 * 53 * The null layer duplicates a portion of the file system 54 * name space under a new name. In this respect, it is 55 * similar to the loopback file system. It differs from 56 * the loopback fs in two respects: it is implemented using 57 * a stackable layers techniques, and its "null-node"s stack above 58 * all lower-layer vnodes, not just over directory vnodes. 59 * 60 * The null layer has two purposes. First, it serves as a demonstration 61 * of layering by proving a layer which does nothing. (It actually 62 * does everything the loopback file system does, which is slightly 63 * more than nothing.) Second, the null layer can serve as a prototype 64 * layer. Since it provides all necessary layer framework, 65 * new file system layers can be created very easily be starting 66 * with a null layer. 67 * 68 * The remainder of this man page examines the null layer as a basis 69 * for constructing new layers. 70 * 71 * 72 * INSTANTIATING NEW NULL LAYERS 73 * 74 * New null layers are created with mount_null(8). 75 * Mount_null(8) takes two arguments, the pathname 76 * of the lower vfs (target-pn) and the pathname where the null 77 * layer will appear in the namespace (alias-pn). After 78 * the null layer is put into place, the contents 79 * of target-pn subtree will be aliased under alias-pn. 80 * 81 * 82 * OPERATION OF A NULL LAYER 83 * 84 * The null layer is the minimum file system layer, 85 * simply bypassing all possible operations to the lower layer 86 * for processing there. The majority of its activity centers 87 * on the bypass routine, through which nearly all vnode operations 88 * pass. 89 * 90 * The bypass routine accepts arbitrary vnode operations for 91 * handling by the lower layer. It begins by examing vnode 92 * operation arguments and replacing any null-nodes by their 93 * lower-layer equivlants. It then invokes the operation 94 * on the lower layer. Finally, it replaces the null-nodes 95 * in the arguments and, if a vnode is return by the operation, 96 * stacks a null-node on top of the returned vnode. 97 * 98 * Although bypass handles most operations, vop_getattr, vop_lock, 99 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 100 * bypassed. Vop_getattr must change the fsid being returned. 101 * Vop_lock and vop_unlock must handle any locking for the 102 * current vnode as well as pass the lock request down. 103 * Vop_inactive and vop_reclaim are not bypassed so that 104 * they can handle freeing null-layer specific data. Vop_print 105 * is not bypassed to avoid excessive debugging information. 106 * Also, certain vnode operations change the locking state within 107 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 108 * and symlink). Ideally these operations should not change the 109 * lock state, but should be changed to let the caller of the 110 * function unlock them. Otherwise all intermediate vnode layers 111 * (such as union, umapfs, etc) must catch these functions to do 112 * the necessary locking at their layer. 113 * 114 * 115 * INSTANTIATING VNODE STACKS 116 * 117 * Mounting associates the null layer with a lower layer, 118 * effect stacking two VFSes. Vnode stacks are instead 119 * created on demand as files are accessed. 120 * 121 * The initial mount creates a single vnode stack for the 122 * root of the new null layer. All other vnode stacks 123 * are created as a result of vnode operations on 124 * this or other null vnode stacks. 125 * 126 * New vnode stacks come into existance as a result of 127 * an operation which returns a vnode. 128 * The bypass routine stacks a null-node above the new 129 * vnode before returning it to the caller. 130 * 131 * For example, imagine mounting a null layer with 132 * "mount_null /usr/include /dev/layer/null". 133 * Changing directory to /dev/layer/null will assign 134 * the root null-node (which was created when the null layer was mounted). 135 * Now consider opening "sys". A vop_lookup would be 136 * done on the root null-node. This operation would bypass through 137 * to the lower layer which would return a vnode representing 138 * the UFS "sys". Null_bypass then builds a null-node 139 * aliasing the UFS "sys" and returns this to the caller. 140 * Later operations on the null-node "sys" will repeat this 141 * process when constructing other vnode stacks. 142 * 143 * 144 * CREATING OTHER FILE SYSTEM LAYERS 145 * 146 * One of the easiest ways to construct new file system layers is to make 147 * a copy of the null layer, rename all files and variables, and 148 * then begin modifing the copy. Sed can be used to easily rename 149 * all variables. 150 * 151 * The umap layer is an example of a layer descended from the 152 * null layer. 153 * 154 * 155 * INVOKING OPERATIONS ON LOWER LAYERS 156 * 157 * There are two techniques to invoke operations on a lower layer 158 * when the operation cannot be completely bypassed. Each method 159 * is appropriate in different situations. In both cases, 160 * it is the responsibility of the aliasing layer to make 161 * the operation arguments "correct" for the lower layer 162 * by mapping an vnode arguments to the lower layer. 163 * 164 * The first approach is to call the aliasing layer's bypass routine. 165 * This method is most suitable when you wish to invoke the operation 166 * currently being handled on the lower layer. It has the advantage 167 * that the bypass routine already must do argument mapping. 168 * An example of this is null_getattrs in the null layer. 169 * 170 * A second approach is to directly invoke vnode operations on 171 * the lower layer with the VOP_OPERATIONNAME interface. 172 * The advantage of this method is that it is easy to invoke 173 * arbitrary operations on the lower layer. The disadvantage 174 * is that vnode arguments must be manualy mapped. 175 * 176 */ 177 178 #include <sys/param.h> 179 #include <sys/systm.h> 180 #include <sys/kernel.h> 181 #include <sys/sysctl.h> 182 #include <sys/vnode.h> 183 #include <sys/mount.h> 184 #include <sys/proc.h> 185 #include <sys/namei.h> 186 #include <sys/malloc.h> 187 #include <sys/buf.h> 188 #include "null.h" 189 190 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 191 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 192 &null_bug_bypass, 0, ""); 193 194 static int null_resolve(struct vop_resolve_args *ap); 195 static int null_access(struct vop_access_args *ap); 196 static int null_createvobject(struct vop_createvobject_args *ap); 197 static int null_destroyvobject(struct vop_destroyvobject_args *ap); 198 static int null_getattr(struct vop_getattr_args *ap); 199 static int null_getvobject(struct vop_getvobject_args *ap); 200 static int null_inactive(struct vop_inactive_args *ap); 201 static int null_islocked(struct vop_islocked_args *ap); 202 static int null_lock(struct vop_lock_args *ap); 203 static int null_lookup(struct vop_lookup_args *ap); 204 static int null_open(struct vop_open_args *ap); 205 static int null_print(struct vop_print_args *ap); 206 static int null_reclaim(struct vop_reclaim_args *ap); 207 static int null_rename(struct vop_rename_args *ap); 208 static int null_setattr(struct vop_setattr_args *ap); 209 static int null_unlock(struct vop_unlock_args *ap); 210 211 /* 212 * This is the 10-Apr-92 bypass routine. 213 * This version has been optimized for speed, throwing away some 214 * safety checks. It should still always work, but it's not as 215 * robust to programmer errors. 216 * 217 * In general, we map all vnodes going down and unmap them on the way back. 218 * As an exception to this, vnodes can be marked "unmapped" by setting 219 * the Nth bit in operation's vdesc_flags. 220 * 221 * Also, some BSD vnode operations have the side effect of vrele'ing 222 * their arguments. With stacking, the reference counts are held 223 * by the upper node, not the lower one, so we must handle these 224 * side-effects here. This is not of concern in Sun-derived systems 225 * since there are no such side-effects. 226 * 227 * This makes the following assumptions: 228 * - only one returned vpp 229 * - no INOUT vpp's (Sun's vop_open has one of these) 230 * - the vnode operation vector of the first vnode should be used 231 * to determine what implementation of the op should be invoked 232 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 233 * problems on rmdir'ing mount points and renaming?) 234 * 235 * null_bypass(struct vnodeop_desc *a_desc, ...) 236 */ 237 int 238 null_bypass(struct vop_generic_args *ap) 239 { 240 struct vnode **this_vp_p; 241 int error; 242 struct vnode *old_vps[VDESC_MAX_VPS]; 243 struct vnode **vps_p[VDESC_MAX_VPS]; 244 struct vnode ***vppp; 245 struct vnodeop_desc *descp = ap->a_desc; 246 int reles, i, j; 247 248 if (null_bug_bypass) 249 printf ("null_bypass: %s\n", descp->vdesc_name); 250 251 #ifdef DIAGNOSTIC 252 /* 253 * We require at least one vp. 254 */ 255 if (descp->vdesc_vp_offsets == NULL || 256 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 257 panic ("null_bypass: no vp's in map"); 258 #endif 259 260 /* 261 * Map the vnodes going in. 262 */ 263 reles = descp->vdesc_flags; 264 for (i = 0; i < VDESC_MAX_VPS; ++i) { 265 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 266 break; /* bail out at end of list */ 267 vps_p[i] = this_vp_p = 268 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 269 /* 270 * We're not guaranteed that any but the first vnode 271 * are of our type. Check for and don't map any 272 * that aren't. (We must always map first vp or vclean fails.) 273 */ 274 if (i && (*this_vp_p == NULLVP || 275 (*this_vp_p)->v_tag != VT_NULL)) { 276 old_vps[i] = NULLVP; 277 } else { 278 old_vps[i] = *this_vp_p; 279 *this_vp_p = NULLVPTOLOWERVP(*this_vp_p); 280 /* 281 * Several operations have the side effect of vrele'ing 282 * their vp's. We must account for that in the lower 283 * vp we pass down. 284 */ 285 if (reles & (VDESC_VP0_WILLRELE << i)) 286 vref(*this_vp_p); 287 } 288 289 } 290 291 /* 292 * Call the operation on the lower layer with the modified 293 * argument structure. We have to adjust a_fm to point to the 294 * lower vp's vop_ops structure. 295 */ 296 if (vps_p[0] && *vps_p[0]) { 297 ap->a_ops = (*(vps_p[0]))->v_ops; 298 error = vop_vnoperate_ap(ap); 299 } else { 300 printf("null_bypass: no map for %s\n", descp->vdesc_name); 301 error = EINVAL; 302 } 303 304 /* 305 * Maintain the illusion of call-by-value by restoring vnodes in the 306 * argument structure to their original value. 307 */ 308 reles = descp->vdesc_flags; 309 for (i = 0; i < VDESC_MAX_VPS; ++i) { 310 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 311 break; /* bail out at end of list */ 312 if (old_vps[i]) { 313 *(vps_p[i]) = old_vps[i]; 314 315 /* 316 * Since we operated on the lowervp's instead of the 317 * null node vp's, we have to adjust the null node 318 * vp's based on what the VOP did to the lower vp. 319 * 320 * Note: the unlock case only occurs with rename. 321 * tdvp and tvp are both locked on call and must be 322 * unlocked on return. 323 * 324 * Unlock semantics indicate that if two locked vp's 325 * are passed and they are the same vp, they are only 326 * actually locked once. 327 */ 328 if (reles & (VDESC_VP0_WILLUNLOCK << i)) { 329 VOP_UNLOCK(old_vps[i], NULL, 330 LK_THISLAYER, curthread); 331 for (j = i + 1; j < VDESC_MAX_VPS; ++j) { 332 if (descp->vdesc_vp_offsets[j] == VDESC_NO_OFFSET) 333 break; 334 if (old_vps[i] == old_vps[j]) { 335 reles &= ~(1 << (VDESC_VP0_WILLUNLOCK << j)); 336 } 337 } 338 } 339 340 if (reles & (VDESC_VP0_WILLRELE << i)) 341 vrele(old_vps[i]); 342 } 343 } 344 345 /* 346 * Map the possible out-going vpp 347 * (Assumes that the lower layer always returns 348 * a vref'ed vpp unless it gets an error.) 349 */ 350 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 351 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 352 !error) { 353 /* 354 * XXX - even though some ops have vpp returned vp's, 355 * several ops actually vrele this before returning. 356 * We must avoid these ops. 357 * (This should go away when these ops are regularized.) 358 */ 359 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 360 goto out; 361 vppp = VOPARG_OFFSETTO(struct vnode***, 362 descp->vdesc_vpp_offset,ap); 363 if (*vppp) 364 error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp); 365 } 366 367 out: 368 return (error); 369 } 370 371 /* 372 * We have to carry on the locking protocol on the null layer vnodes 373 * as we progress through the tree. We also have to enforce read-only 374 * if this layer is mounted read-only. 375 * 376 * null_lookup(struct vnode *a_dvp, struct vnode **a_vpp, 377 * struct componentname *a_cnp) 378 */ 379 static int 380 null_lookup(struct vop_lookup_args *ap) 381 { 382 struct componentname *cnp = ap->a_cnp; 383 struct vnode *dvp = ap->a_dvp; 384 struct thread *td = cnp->cn_td; 385 int flags = cnp->cn_flags; 386 struct vnode *vp, *ldvp, *lvp; 387 int error; 388 389 if ((flags & CNP_ISLASTCN) && 390 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 391 (cnp->cn_nameiop == NAMEI_DELETE || 392 cnp->cn_nameiop == NAMEI_RENAME)) { 393 return (EROFS); 394 } 395 ldvp = NULLVPTOLOWERVP(dvp); 396 397 /* 398 * If we are doing a ".." lookup we must release the lock on dvp 399 * now, before we run a lookup in the underlying fs, or we may 400 * deadlock. If we do this we must protect ldvp by ref'ing it. 401 */ 402 if (flags & CNP_ISDOTDOT) { 403 vref(ldvp); 404 VOP_UNLOCK(dvp, NULL, LK_THISLAYER, td); 405 } 406 407 /* 408 * Due to the non-deterministic nature of the handling of the 409 * parent directory lock by lookup, we cannot call null_bypass() 410 * here. We must make a direct call. It's faster to do a direct 411 * call, anyway. 412 */ 413 vp = lvp = NULL; 414 error = VOP_LOOKUP(ldvp, &lvp, cnp); 415 if (error == EJUSTRETURN && (flags & CNP_ISLASTCN) && 416 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 417 (cnp->cn_nameiop == NAMEI_CREATE || 418 cnp->cn_nameiop == NAMEI_RENAME)) { 419 error = EROFS; 420 } 421 422 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 423 if (ldvp == lvp) { 424 *ap->a_vpp = dvp; 425 vref(dvp); 426 vrele(lvp); 427 } else { 428 error = null_node_create(dvp->v_mount, lvp, &vp); 429 if (error == 0) 430 *ap->a_vpp = vp; 431 } 432 } 433 434 /* 435 * The underlying fs will set PDIRUNLOCK if it unlocked the parent 436 * directory, which means we have to follow suit in the nullfs layer. 437 * Note that the parent directory may have already been unlocked due 438 * to the ".." case. Note that use of cnp->cn_flags instead of flags. 439 */ 440 if (flags & CNP_ISDOTDOT) { 441 if ((cnp->cn_flags & CNP_PDIRUNLOCK) == 0) 442 VOP_LOCK(dvp, NULL, LK_THISLAYER | LK_EXCLUSIVE, td); 443 vrele(ldvp); 444 } else if (cnp->cn_flags & CNP_PDIRUNLOCK) { 445 VOP_UNLOCK(dvp, NULL, LK_THISLAYER, td); 446 } 447 return (error); 448 } 449 450 /* 451 * Setattr call. Disallow write attempts if the layer is mounted read-only. 452 * 453 * null_setattr(struct vnodeop_desc *a_desc, struct vnode *a_vp, 454 * struct vattr *a_vap, struct ucred *a_cred, 455 * struct thread *a_td) 456 */ 457 int 458 null_setattr(struct vop_setattr_args *ap) 459 { 460 struct vnode *vp = ap->a_vp; 461 struct vattr *vap = ap->a_vap; 462 463 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 464 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 465 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 466 (vp->v_mount->mnt_flag & MNT_RDONLY)) 467 return (EROFS); 468 if (vap->va_size != VNOVAL) { 469 switch (vp->v_type) { 470 case VDIR: 471 return (EISDIR); 472 case VCHR: 473 case VBLK: 474 case VSOCK: 475 case VFIFO: 476 if (vap->va_flags != VNOVAL) 477 return (EOPNOTSUPP); 478 return (0); 479 case VREG: 480 case VLNK: 481 default: 482 /* 483 * Disallow write attempts if the filesystem is 484 * mounted read-only. 485 */ 486 if (vp->v_mount->mnt_flag & MNT_RDONLY) 487 return (EROFS); 488 } 489 } 490 491 return (null_bypass(&ap->a_head)); 492 } 493 494 /* 495 * We handle getattr only to change the fsid. 496 * 497 * null_getattr(struct vnode *a_vp, struct vattr *a_vap, struct ucred *a_cred, 498 * struct thread *a_td) 499 */ 500 static int 501 null_getattr(struct vop_getattr_args *ap) 502 { 503 int error; 504 505 if ((error = null_bypass(&ap->a_head)) != 0) 506 return (error); 507 508 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 509 return (0); 510 } 511 512 /* 513 * Resolve a locked ncp at the nullfs layer. 514 */ 515 static int 516 null_resolve(struct vop_resolve_args *ap) 517 { 518 return(vop_noresolve(ap)); 519 } 520 521 /* 522 * Handle to disallow write access if mounted read-only. 523 * 524 * null_access(struct vnode *a_vp, int a_mode, struct ucred *a_cred, 525 * struct thread *a_td) 526 */ 527 static int 528 null_access(struct vop_access_args *ap) 529 { 530 struct vnode *vp = ap->a_vp; 531 mode_t mode = ap->a_mode; 532 533 /* 534 * Disallow write attempts on read-only layers; 535 * unless the file is a socket, fifo, or a block or 536 * character device resident on the file system. 537 */ 538 if (mode & VWRITE) { 539 switch (vp->v_type) { 540 case VDIR: 541 case VLNK: 542 case VREG: 543 if (vp->v_mount->mnt_flag & MNT_RDONLY) 544 return (EROFS); 545 break; 546 default: 547 break; 548 } 549 } 550 return (null_bypass(&ap->a_head)); 551 } 552 553 /* 554 * We must handle open to be able to catch MNT_NODEV and friends. 555 * 556 * null_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred, 557 * struct thread *a_td) 558 */ 559 static int 560 null_open(struct vop_open_args *ap) 561 { 562 struct vnode *vp = ap->a_vp; 563 struct vnode *lvp = NULLVPTOLOWERVP(ap->a_vp); 564 565 if ((vp->v_mount->mnt_flag & MNT_NODEV) && 566 (lvp->v_type == VBLK || lvp->v_type == VCHR)) 567 return ENXIO; 568 569 return (null_bypass(&ap->a_head)); 570 } 571 572 /* 573 * We handle this to eliminate null FS to lower FS 574 * file moving. Don't know why we don't allow this, 575 * possibly we should. 576 * 577 * null_rename(struct vnode *a_fdvp, struct vnode *a_fvp, 578 * struct componentname *a_fcnp, struct vnode *a_tdvp, 579 * struct vnode *a_tvp, struct componentname *a_tcnp) 580 */ 581 static int 582 null_rename(struct vop_rename_args *ap) 583 { 584 struct vnode *tdvp = ap->a_tdvp; 585 struct vnode *fvp = ap->a_fvp; 586 struct vnode *fdvp = ap->a_fdvp; 587 struct vnode *tvp = ap->a_tvp; 588 589 /* Check for cross-device rename. */ 590 if ((fvp->v_mount != tdvp->v_mount) || 591 (tvp && (fvp->v_mount != tvp->v_mount))) { 592 if (tdvp == tvp) 593 vrele(tdvp); 594 else 595 vput(tdvp); 596 if (tvp) 597 vput(tvp); 598 vrele(fdvp); 599 vrele(fvp); 600 return (EXDEV); 601 } 602 603 return (null_bypass(&ap->a_head)); 604 } 605 606 /* 607 * A special flag, LK_THISLAYER, causes the locking function to operate 608 * ONLY on the nullfs layer. Otherwise we are responsible for locking not 609 * only our layer, but the lower layer as well. 610 * 611 * null_lock(struct vnode *a_vp, lwkt_tokref_t a_vlock, int a_flags, 612 * struct thread *a_td) 613 */ 614 static int 615 null_lock(struct vop_lock_args *ap) 616 { 617 struct vnode *vp = ap->a_vp; 618 int flags = ap->a_flags; 619 struct null_node *np = VTONULL(vp); 620 struct vnode *lvp; 621 int error; 622 623 /* 624 * Lock the nullfs layer first, disposing of the interlock in the 625 * process. 626 */ 627 error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER, 628 ap->a_vlock, ap->a_td); 629 flags &= ~LK_INTERLOCK; 630 631 /* 632 * If locking only the nullfs layer, or if there is no lower layer, 633 * or if an error occured while attempting to lock the nullfs layer, 634 * we are done. 635 * 636 * np can be NULL is the vnode is being recycled from a previous 637 * hash collision. 638 */ 639 if ((flags & LK_THISLAYER) || np == NULL || 640 np->null_lowervp == NULL || error) { 641 return (error); 642 } 643 644 /* 645 * Lock the underlying vnode. If we are draining we should not drain 646 * the underlying vnode, since it is not being destroyed, but we do 647 * lock it exclusively in that case. Note that any interlocks have 648 * already been disposed of above. 649 */ 650 lvp = np->null_lowervp; 651 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 652 NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n"); 653 error = vn_lock(lvp, NULL, 654 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, 655 ap->a_td); 656 } else { 657 error = vn_lock(lvp, NULL, flags, ap->a_td); 658 } 659 660 /* 661 * If an error occured we have to undo our nullfs lock, then return 662 * the original error. 663 */ 664 if (error) 665 lockmgr(&vp->v_lock, LK_RELEASE, NULL, ap->a_td); 666 return(error); 667 } 668 669 /* 670 * A special flag, LK_THISLAYER, causes the unlocking function to operate 671 * ONLY on the nullfs layer. Otherwise we are responsible for unlocking not 672 * only our layer, but the lower layer as well. 673 * 674 * null_unlock(struct vnode *a_vp, lwkt_tokref_t a_vlock, int a_flags, 675 * struct thread *a_td) 676 */ 677 static int 678 null_unlock(struct vop_unlock_args *ap) 679 { 680 struct vnode *vp = ap->a_vp; 681 int flags = ap->a_flags; 682 struct null_node *np = VTONULL(vp); 683 struct vnode *lvp; 684 int error; 685 686 /* 687 * nullfs layer only 688 */ 689 if (flags & LK_THISLAYER) { 690 error = lockmgr(&vp->v_lock, 691 (flags & ~LK_THISLAYER) | LK_RELEASE, 692 ap->a_vlock, 693 ap->a_td); 694 return (error); 695 } 696 697 /* 698 * If there is no underlying vnode the lock operation occurs at 699 * the nullfs layer. np can be NULL is the vnode is being recycled 700 * from a previous hash collision. 701 */ 702 if (np == NULL || (lvp = np->null_lowervp) == NULL) { 703 error = lockmgr(&vp->v_lock, flags | LK_RELEASE, 704 ap->a_vlock, ap->a_td); 705 return(error); 706 } 707 708 /* 709 * Unlock the lower layer first, then our nullfs layer. 710 */ 711 VOP_UNLOCK(lvp, NULL, flags & ~LK_INTERLOCK, ap->a_td); 712 error = lockmgr(&vp->v_lock, flags | LK_RELEASE, 713 ap->a_vlock, ap->a_td); 714 return (error); 715 } 716 717 /* 718 * null_islocked(struct vnode *a_vp, struct thread *a_td) 719 * 720 * If a lower layer exists return the lock status of the lower layer, 721 * otherwise return the lock status of our nullfs layer. 722 */ 723 static int 724 null_islocked(struct vop_islocked_args *ap) 725 { 726 struct vnode *vp = ap->a_vp; 727 struct vnode *lvp; 728 struct null_node *np = VTONULL(vp); 729 int error; 730 731 lvp = np->null_lowervp; 732 if (lvp == NULL) 733 error = lockstatus(&vp->v_lock, ap->a_td); 734 else 735 error = VOP_ISLOCKED(lvp, ap->a_td); 736 return (error); 737 } 738 739 740 /* 741 * The vnode is no longer active. However, the new VFS API may retain 742 * the node in the vfs cache. There is no way to tell that someone issued 743 * a remove/rmdir operation on the underlying filesystem (yet), but we can't 744 * remove the lowervp reference here. 745 * 746 * null_inactive(struct vnode *a_vp, struct thread *a_td) 747 */ 748 static int 749 null_inactive(struct vop_inactive_args *ap) 750 { 751 struct vnode *vp = ap->a_vp; 752 /*struct null_node *np = VTONULL(vp);*/ 753 754 /* 755 * At the moment don't do anything here. All the rest of the code 756 * assumes that lowervp will remain inact, and the inactive nullvp 757 * may be reactivated at any time. XXX I'm not sure why the 4.x code 758 * even worked. 759 */ 760 761 /* 762 * Now it is safe to release our nullfs layer vnode. 763 */ 764 VOP_UNLOCK(vp, NULL, 0, ap->a_td); 765 return (0); 766 } 767 768 /* 769 * We can free memory in null_inactive, but we do this 770 * here. (Possible to guard vp->v_data to point somewhere) 771 * 772 * null_reclaim(struct vnode *a_vp, struct thread *a_td) 773 */ 774 static int 775 null_reclaim(struct vop_reclaim_args *ap) 776 { 777 struct vnode *vp = ap->a_vp; 778 struct vnode *lowervp; 779 struct null_node *np; 780 781 np = VTONULL(vp); 782 vp->v_data = NULL; 783 /* 784 * null_lowervp reference to lowervp. The lower vnode's 785 * inactive routine may or may not be called when we do the 786 * final vrele(). 787 */ 788 if (np) { 789 null_node_rem(np); 790 lowervp = np->null_lowervp; 791 np->null_lowervp = NULLVP; 792 if (lowervp) 793 vrele(lowervp); 794 free(np, M_NULLFSNODE); 795 } 796 return (0); 797 } 798 799 /* 800 * null_print(struct vnode *a_vp) 801 */ 802 static int 803 null_print(struct vop_print_args *ap) 804 { 805 struct vnode *vp = ap->a_vp; 806 struct null_node *np = VTONULL(vp); 807 808 if (np == NULL) { 809 printf ("\ttag VT_NULLFS, vp=%p, NULL v_data!\n", vp); 810 return(0); 811 } 812 printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, np->null_lowervp); 813 if (np->null_lowervp != NULL) { 814 printf("\tlowervp_lock: "); 815 lockmgr_printinfo(&np->null_lowervp->v_lock); 816 } else { 817 printf("\tnull_lock: "); 818 lockmgr_printinfo(&vp->v_lock); 819 } 820 printf("\n"); 821 return (0); 822 } 823 824 /* 825 * Let an underlying filesystem do the work 826 * 827 * null_createvobject(struct vnode *vp, struct ucred *cred, struct proc *p) 828 */ 829 static int 830 null_createvobject(struct vop_createvobject_args *ap) 831 { 832 struct vnode *vp = ap->a_vp; 833 struct vnode *lowervp = VTONULL(vp) ? NULLVPTOLOWERVP(vp) : NULL; 834 int error; 835 836 if (vp->v_type == VNON || lowervp == NULL) 837 return 0; 838 error = VOP_CREATEVOBJECT(lowervp, ap->a_td); 839 if (error) 840 return (error); 841 vp->v_flag |= VOBJBUF; 842 return (0); 843 } 844 845 /* 846 * We have nothing to destroy and this operation shouldn't be bypassed. 847 * 848 * null_destroyvobject(struct vnode *vp) 849 */ 850 static int 851 null_destroyvobject(struct vop_destroyvobject_args *ap) 852 { 853 struct vnode *vp = ap->a_vp; 854 855 vp->v_flag &= ~VOBJBUF; 856 return (0); 857 } 858 859 /* 860 * null_getvobject(struct vnode *vp, struct vm_object **objpp) 861 * 862 * Note that this can be called when a vnode is being recycled, and 863 * v_data may be NULL in that case if nullfs had to recycle a vnode 864 * due to a null_node collision. 865 */ 866 static int 867 null_getvobject(struct vop_getvobject_args *ap) 868 { 869 struct vnode *lvp; 870 871 if (ap->a_vp->v_data == NULL) 872 return EINVAL; 873 874 lvp = NULLVPTOLOWERVP(ap->a_vp); 875 if (lvp == NULL) 876 return EINVAL; 877 return (VOP_GETVOBJECT(lvp, ap->a_objpp)); 878 } 879 880 /* 881 * Global vfs data structures 882 */ 883 struct vnodeopv_entry_desc null_vnodeop_entries[] = { 884 { &vop_default_desc, (void *) null_bypass }, 885 { &vop_resolve_desc, (void *) null_resolve }, 886 { &vop_access_desc, (void *) null_access }, 887 { &vop_createvobject_desc, (void *) null_createvobject }, 888 { &vop_destroyvobject_desc, (void *) null_destroyvobject }, 889 { &vop_getattr_desc, (void *) null_getattr }, 890 { &vop_getvobject_desc, (void *) null_getvobject }, 891 { &vop_inactive_desc, (void *) null_inactive }, 892 { &vop_islocked_desc, (void *) null_islocked }, 893 { &vop_lock_desc, (void *) null_lock }, 894 { &vop_lookup_desc, (void *) null_lookup }, 895 { &vop_open_desc, (void *) null_open }, 896 { &vop_print_desc, (void *) null_print }, 897 { &vop_reclaim_desc, (void *) null_reclaim }, 898 { &vop_rename_desc, (void *) null_rename }, 899 { &vop_setattr_desc, (void *) null_setattr }, 900 { &vop_unlock_desc, (void *) null_unlock }, 901 { NULL, NULL } 902 }; 903 904