1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * John Heidemann of the UCLA Ficus project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 37 * 38 * Ancestors: 39 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 40 * $FreeBSD: src/sys/miscfs/nullfs/null_vnops.c,v 1.38.2.6 2002/07/31 00:32:28 semenu Exp $ 41 * $DragonFly: src/sys/vfs/nullfs/null_vnops.c,v 1.19 2004/10/27 08:52:06 dillon Exp $ 42 * ...and... 43 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 44 * 45 * $FreeBSD: src/sys/miscfs/nullfs/null_vnops.c,v 1.38.2.6 2002/07/31 00:32:28 semenu Exp $ 46 */ 47 48 /* 49 * Null Layer 50 * 51 * (See mount_null(8) for more information.) 52 * 53 * The null layer duplicates a portion of the file system 54 * name space under a new name. In this respect, it is 55 * similar to the loopback file system. It differs from 56 * the loopback fs in two respects: it is implemented using 57 * a stackable layers techniques, and its "null-node"s stack above 58 * all lower-layer vnodes, not just over directory vnodes. 59 * 60 * The null layer has two purposes. First, it serves as a demonstration 61 * of layering by proving a layer which does nothing. (It actually 62 * does everything the loopback file system does, which is slightly 63 * more than nothing.) Second, the null layer can serve as a prototype 64 * layer. Since it provides all necessary layer framework, 65 * new file system layers can be created very easily be starting 66 * with a null layer. 67 * 68 * The remainder of this man page examines the null layer as a basis 69 * for constructing new layers. 70 * 71 * 72 * INSTANTIATING NEW NULL LAYERS 73 * 74 * New null layers are created with mount_null(8). 75 * Mount_null(8) takes two arguments, the pathname 76 * of the lower vfs (target-pn) and the pathname where the null 77 * layer will appear in the namespace (alias-pn). After 78 * the null layer is put into place, the contents 79 * of target-pn subtree will be aliased under alias-pn. 80 * 81 * 82 * OPERATION OF A NULL LAYER 83 * 84 * The null layer is the minimum file system layer, 85 * simply bypassing all possible operations to the lower layer 86 * for processing there. The majority of its activity centers 87 * on the bypass routine, through which nearly all vnode operations 88 * pass. 89 * 90 * The bypass routine accepts arbitrary vnode operations for 91 * handling by the lower layer. It begins by examing vnode 92 * operation arguments and replacing any null-nodes by their 93 * lower-layer equivlants. It then invokes the operation 94 * on the lower layer. Finally, it replaces the null-nodes 95 * in the arguments and, if a vnode is return by the operation, 96 * stacks a null-node on top of the returned vnode. 97 * 98 * Although bypass handles most operations, vop_getattr, vop_lock, 99 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 100 * bypassed. Vop_getattr must change the fsid being returned. 101 * Vop_lock and vop_unlock must handle any locking for the 102 * current vnode as well as pass the lock request down. 103 * Vop_inactive and vop_reclaim are not bypassed so that 104 * they can handle freeing null-layer specific data. Vop_print 105 * is not bypassed to avoid excessive debugging information. 106 * Also, certain vnode operations change the locking state within 107 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 108 * and symlink). Ideally these operations should not change the 109 * lock state, but should be changed to let the caller of the 110 * function unlock them. Otherwise all intermediate vnode layers 111 * (such as union, umapfs, etc) must catch these functions to do 112 * the necessary locking at their layer. 113 * 114 * 115 * INSTANTIATING VNODE STACKS 116 * 117 * Mounting associates the null layer with a lower layer, 118 * effect stacking two VFSes. Vnode stacks are instead 119 * created on demand as files are accessed. 120 * 121 * The initial mount creates a single vnode stack for the 122 * root of the new null layer. All other vnode stacks 123 * are created as a result of vnode operations on 124 * this or other null vnode stacks. 125 * 126 * New vnode stacks come into existance as a result of 127 * an operation which returns a vnode. 128 * The bypass routine stacks a null-node above the new 129 * vnode before returning it to the caller. 130 * 131 * For example, imagine mounting a null layer with 132 * "mount_null /usr/include /dev/layer/null". 133 * Changing directory to /dev/layer/null will assign 134 * the root null-node (which was created when the null layer was mounted). 135 * Now consider opening "sys". A vop_lookup would be 136 * done on the root null-node. This operation would bypass through 137 * to the lower layer which would return a vnode representing 138 * the UFS "sys". Null_bypass then builds a null-node 139 * aliasing the UFS "sys" and returns this to the caller. 140 * Later operations on the null-node "sys" will repeat this 141 * process when constructing other vnode stacks. 142 * 143 * 144 * CREATING OTHER FILE SYSTEM LAYERS 145 * 146 * One of the easiest ways to construct new file system layers is to make 147 * a copy of the null layer, rename all files and variables, and 148 * then begin modifing the copy. Sed can be used to easily rename 149 * all variables. 150 * 151 * The umap layer is an example of a layer descended from the 152 * null layer. 153 * 154 * 155 * INVOKING OPERATIONS ON LOWER LAYERS 156 * 157 * There are two techniques to invoke operations on a lower layer 158 * when the operation cannot be completely bypassed. Each method 159 * is appropriate in different situations. In both cases, 160 * it is the responsibility of the aliasing layer to make 161 * the operation arguments "correct" for the lower layer 162 * by mapping an vnode arguments to the lower layer. 163 * 164 * The first approach is to call the aliasing layer's bypass routine. 165 * This method is most suitable when you wish to invoke the operation 166 * currently being handled on the lower layer. It has the advantage 167 * that the bypass routine already must do argument mapping. 168 * An example of this is null_getattrs in the null layer. 169 * 170 * A second approach is to directly invoke vnode operations on 171 * the lower layer with the VOP_OPERATIONNAME interface. 172 * The advantage of this method is that it is easy to invoke 173 * arbitrary operations on the lower layer. The disadvantage 174 * is that vnode arguments must be manualy mapped. 175 * 176 */ 177 178 #include <sys/param.h> 179 #include <sys/systm.h> 180 #include <sys/kernel.h> 181 #include <sys/sysctl.h> 182 #include <sys/vnode.h> 183 #include <sys/mount.h> 184 #include <sys/proc.h> 185 #include <sys/namei.h> 186 #include <sys/malloc.h> 187 #include <sys/buf.h> 188 #include "null.h" 189 190 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 191 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 192 &null_bug_bypass, 0, ""); 193 194 static int null_resolve(struct vop_resolve_args *ap); 195 static int null_revoke(struct vop_revoke_args *ap); 196 static int null_access(struct vop_access_args *ap); 197 static int null_createvobject(struct vop_createvobject_args *ap); 198 static int null_destroyvobject(struct vop_destroyvobject_args *ap); 199 static int null_getattr(struct vop_getattr_args *ap); 200 static int null_getvobject(struct vop_getvobject_args *ap); 201 static int null_inactive(struct vop_inactive_args *ap); 202 static int null_islocked(struct vop_islocked_args *ap); 203 static int null_lock(struct vop_lock_args *ap); 204 static int null_lookup(struct vop_lookup_args *ap); 205 static int null_open(struct vop_open_args *ap); 206 static int null_print(struct vop_print_args *ap); 207 static int null_reclaim(struct vop_reclaim_args *ap); 208 static int null_rename(struct vop_rename_args *ap); 209 static int null_setattr(struct vop_setattr_args *ap); 210 static int null_unlock(struct vop_unlock_args *ap); 211 212 /* 213 * This is the 10-Apr-92 bypass routine. 214 * This version has been optimized for speed, throwing away some 215 * safety checks. It should still always work, but it's not as 216 * robust to programmer errors. 217 * 218 * In general, we map all vnodes going down and unmap them on the way back. 219 * As an exception to this, vnodes can be marked "unmapped" by setting 220 * the Nth bit in operation's vdesc_flags. 221 * 222 * Also, some BSD vnode operations have the side effect of vrele'ing 223 * their arguments. With stacking, the reference counts are held 224 * by the upper node, not the lower one, so we must handle these 225 * side-effects here. This is not of concern in Sun-derived systems 226 * since there are no such side-effects. 227 * 228 * This makes the following assumptions: 229 * - only one returned vpp 230 * - no INOUT vpp's (Sun's vop_open has one of these) 231 * - the vnode operation vector of the first vnode should be used 232 * to determine what implementation of the op should be invoked 233 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 234 * problems on rmdir'ing mount points and renaming?) 235 * 236 * null_bypass(struct vnodeop_desc *a_desc, ...) 237 */ 238 int 239 null_bypass(struct vop_generic_args *ap) 240 { 241 struct vnode **this_vp_p; 242 int error; 243 struct vnode *old_vps[VDESC_MAX_VPS]; 244 struct vnode **vps_p[VDESC_MAX_VPS]; 245 struct vnode ***vppp; 246 struct vnodeop_desc *descp = ap->a_desc; 247 int reles, i, j; 248 249 if (null_bug_bypass) 250 printf ("null_bypass: %s\n", descp->vdesc_name); 251 252 #ifdef DIAGNOSTIC 253 /* 254 * We require at least one vp. 255 */ 256 if (descp->vdesc_vp_offsets == NULL || 257 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 258 panic ("null_bypass: no vp's in map"); 259 #endif 260 261 /* 262 * Map the vnodes going in. 263 */ 264 reles = descp->vdesc_flags; 265 for (i = 0; i < VDESC_MAX_VPS; ++i) { 266 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 267 break; /* bail out at end of list */ 268 vps_p[i] = this_vp_p = 269 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 270 /* 271 * We're not guaranteed that any but the first vnode 272 * are of our type. Check for and don't map any 273 * that aren't. (We must always map first vp or vclean fails.) 274 */ 275 if (i && (*this_vp_p == NULLVP || 276 (*this_vp_p)->v_tag != VT_NULL)) { 277 old_vps[i] = NULLVP; 278 } else { 279 old_vps[i] = *this_vp_p; 280 *this_vp_p = NULLVPTOLOWERVP(*this_vp_p); 281 /* 282 * Several operations have the side effect of vrele'ing 283 * their vp's. We must account for that in the lower 284 * vp we pass down. 285 */ 286 if (reles & (VDESC_VP0_WILLRELE << i)) 287 vref(*this_vp_p); 288 } 289 290 } 291 292 /* 293 * Call the operation on the lower layer with the modified 294 * argument structure. We have to adjust a_fm to point to the 295 * lower vp's vop_ops structure. 296 */ 297 if (vps_p[0] && *vps_p[0]) { 298 ap->a_ops = (*(vps_p[0]))->v_ops; 299 error = vop_vnoperate_ap(ap); 300 } else { 301 printf("null_bypass: no map for %s\n", descp->vdesc_name); 302 error = EINVAL; 303 } 304 305 /* 306 * Maintain the illusion of call-by-value by restoring vnodes in the 307 * argument structure to their original value. 308 */ 309 reles = descp->vdesc_flags; 310 for (i = 0; i < VDESC_MAX_VPS; ++i) { 311 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 312 break; /* bail out at end of list */ 313 if (old_vps[i]) { 314 *(vps_p[i]) = old_vps[i]; 315 316 /* 317 * Since we operated on the lowervp's instead of the 318 * null node vp's, we have to adjust the null node 319 * vp's based on what the VOP did to the lower vp. 320 * 321 * Note: the unlock case only occurs with rename. 322 * tdvp and tvp are both locked on call and must be 323 * unlocked on return. 324 * 325 * Unlock semantics indicate that if two locked vp's 326 * are passed and they are the same vp, they are only 327 * actually locked once. 328 */ 329 if (reles & (VDESC_VP0_WILLUNLOCK << i)) { 330 VOP_UNLOCK(old_vps[i], LK_THISLAYER, curthread); 331 for (j = i + 1; j < VDESC_MAX_VPS; ++j) { 332 if (descp->vdesc_vp_offsets[j] == VDESC_NO_OFFSET) 333 break; 334 if (old_vps[i] == old_vps[j]) { 335 reles &= ~(1 << (VDESC_VP0_WILLUNLOCK << j)); 336 } 337 } 338 } 339 340 if (reles & (VDESC_VP0_WILLRELE << i)) 341 vrele(old_vps[i]); 342 } 343 } 344 345 /* 346 * Map the possible out-going vpp 347 * (Assumes that the lower layer always returns 348 * a vref'ed vpp unless it gets an error.) 349 */ 350 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 351 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 352 !error) { 353 /* 354 * XXX - even though some ops have vpp returned vp's, 355 * several ops actually vrele this before returning. 356 * We must avoid these ops. 357 * (This should go away when these ops are regularized.) 358 */ 359 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 360 goto out; 361 vppp = VOPARG_OFFSETTO(struct vnode***, 362 descp->vdesc_vpp_offset,ap); 363 if (*vppp) 364 error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp); 365 } 366 367 out: 368 return (error); 369 } 370 371 /* 372 * We have to carry on the locking protocol on the null layer vnodes 373 * as we progress through the tree. We also have to enforce read-only 374 * if this layer is mounted read-only. 375 * 376 * null_lookup(struct vnode *a_dvp, struct vnode **a_vpp, 377 * struct componentname *a_cnp) 378 */ 379 static int 380 null_lookup(struct vop_lookup_args *ap) 381 { 382 struct componentname *cnp = ap->a_cnp; 383 struct vnode *dvp = ap->a_dvp; 384 struct thread *td = cnp->cn_td; 385 int flags = cnp->cn_flags; 386 struct vnode *vp, *ldvp, *lvp; 387 int error; 388 389 if ((flags & CNP_ISLASTCN) && 390 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 391 (cnp->cn_nameiop == NAMEI_DELETE || 392 cnp->cn_nameiop == NAMEI_RENAME)) { 393 return (EROFS); 394 } 395 ldvp = NULLVPTOLOWERVP(dvp); 396 397 /* 398 * If we are doing a ".." lookup we must release the lock on dvp 399 * now, before we run a lookup in the underlying fs, or we may 400 * deadlock. If we do this we must protect ldvp by ref'ing it. 401 */ 402 if (flags & CNP_ISDOTDOT) { 403 vref(ldvp); 404 VOP_UNLOCK(dvp, LK_THISLAYER, td); 405 } 406 407 /* 408 * Due to the non-deterministic nature of the handling of the 409 * parent directory lock by lookup, we cannot call null_bypass() 410 * here. We must make a direct call. It's faster to do a direct 411 * call, anyway. 412 */ 413 vp = lvp = NULL; 414 error = VOP_LOOKUP(ldvp, &lvp, cnp); 415 if (error == EJUSTRETURN && (flags & CNP_ISLASTCN) && 416 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 417 (cnp->cn_nameiop == NAMEI_CREATE || 418 cnp->cn_nameiop == NAMEI_RENAME)) { 419 error = EROFS; 420 } 421 422 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 423 if (ldvp == lvp) { 424 *ap->a_vpp = dvp; 425 vref(dvp); 426 vrele(lvp); 427 } else { 428 error = null_node_create(dvp->v_mount, lvp, &vp); 429 if (error == 0) 430 *ap->a_vpp = vp; 431 } 432 } 433 434 /* 435 * The underlying fs will set PDIRUNLOCK if it unlocked the parent 436 * directory, which means we have to follow suit in the nullfs layer. 437 * Note that the parent directory may have already been unlocked due 438 * to the ".." case. Note that use of cnp->cn_flags instead of flags. 439 */ 440 if (flags & CNP_ISDOTDOT) { 441 if ((cnp->cn_flags & CNP_PDIRUNLOCK) == 0) 442 VOP_LOCK(dvp, LK_THISLAYER | LK_EXCLUSIVE, td); 443 vrele(ldvp); 444 } else if (cnp->cn_flags & CNP_PDIRUNLOCK) { 445 VOP_UNLOCK(dvp, LK_THISLAYER, td); 446 } 447 return (error); 448 } 449 450 /* 451 * Setattr call. Disallow write attempts if the layer is mounted read-only. 452 * 453 * null_setattr(struct vnodeop_desc *a_desc, struct vnode *a_vp, 454 * struct vattr *a_vap, struct ucred *a_cred, 455 * struct thread *a_td) 456 */ 457 int 458 null_setattr(struct vop_setattr_args *ap) 459 { 460 struct vnode *vp = ap->a_vp; 461 struct vattr *vap = ap->a_vap; 462 463 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 464 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 465 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 466 (vp->v_mount->mnt_flag & MNT_RDONLY)) 467 return (EROFS); 468 if (vap->va_size != VNOVAL) { 469 switch (vp->v_type) { 470 case VDIR: 471 return (EISDIR); 472 case VCHR: 473 case VBLK: 474 case VSOCK: 475 case VFIFO: 476 if (vap->va_flags != VNOVAL) 477 return (EOPNOTSUPP); 478 return (0); 479 case VREG: 480 case VLNK: 481 default: 482 /* 483 * Disallow write attempts if the filesystem is 484 * mounted read-only. 485 */ 486 if (vp->v_mount->mnt_flag & MNT_RDONLY) 487 return (EROFS); 488 } 489 } 490 491 return (null_bypass(&ap->a_head)); 492 } 493 494 /* 495 * We handle getattr only to change the fsid. 496 * 497 * null_getattr(struct vnode *a_vp, struct vattr *a_vap, struct ucred *a_cred, 498 * struct thread *a_td) 499 */ 500 static int 501 null_getattr(struct vop_getattr_args *ap) 502 { 503 int error; 504 505 if ((error = null_bypass(&ap->a_head)) != 0) 506 return (error); 507 508 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 509 return (0); 510 } 511 512 /* 513 * Resolve a locked ncp at the nullfs layer. 514 */ 515 static int 516 null_resolve(struct vop_resolve_args *ap) 517 { 518 return(vop_noresolve(ap)); 519 } 520 521 /* 522 * revoke is VX locked, we can't go through null_bypass 523 */ 524 static int 525 null_revoke(struct vop_revoke_args *ap) 526 { 527 struct null_node *np; 528 struct vnode *lvp; 529 530 np = VTONULL(ap->a_vp); 531 vx_unlock(ap->a_vp); 532 if ((lvp = np->null_lowervp) != NULL) { 533 vx_get(lvp); 534 VOP_REVOKE(lvp, ap->a_flags); 535 vx_put(lvp); 536 } 537 vx_lock(ap->a_vp); 538 vgone(ap->a_vp); 539 return(0); 540 } 541 542 /* 543 * Handle to disallow write access if mounted read-only. 544 * 545 * null_access(struct vnode *a_vp, int a_mode, struct ucred *a_cred, 546 * struct thread *a_td) 547 */ 548 static int 549 null_access(struct vop_access_args *ap) 550 { 551 struct vnode *vp = ap->a_vp; 552 mode_t mode = ap->a_mode; 553 554 /* 555 * Disallow write attempts on read-only layers; 556 * unless the file is a socket, fifo, or a block or 557 * character device resident on the file system. 558 */ 559 if (mode & VWRITE) { 560 switch (vp->v_type) { 561 case VDIR: 562 case VLNK: 563 case VREG: 564 if (vp->v_mount->mnt_flag & MNT_RDONLY) 565 return (EROFS); 566 break; 567 default: 568 break; 569 } 570 } 571 return (null_bypass(&ap->a_head)); 572 } 573 574 /* 575 * We must handle open to be able to catch MNT_NODEV and friends. 576 * 577 * null_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred, 578 * struct thread *a_td) 579 */ 580 static int 581 null_open(struct vop_open_args *ap) 582 { 583 struct vnode *vp = ap->a_vp; 584 struct vnode *lvp = NULLVPTOLOWERVP(ap->a_vp); 585 586 if ((vp->v_mount->mnt_flag & MNT_NODEV) && 587 (lvp->v_type == VBLK || lvp->v_type == VCHR)) 588 return ENXIO; 589 590 return (null_bypass(&ap->a_head)); 591 } 592 593 /* 594 * We handle this to eliminate null FS to lower FS 595 * file moving. Don't know why we don't allow this, 596 * possibly we should. 597 * 598 * null_rename(struct vnode *a_fdvp, struct vnode *a_fvp, 599 * struct componentname *a_fcnp, struct vnode *a_tdvp, 600 * struct vnode *a_tvp, struct componentname *a_tcnp) 601 */ 602 static int 603 null_rename(struct vop_rename_args *ap) 604 { 605 struct vnode *tdvp = ap->a_tdvp; 606 struct vnode *fvp = ap->a_fvp; 607 struct vnode *fdvp = ap->a_fdvp; 608 struct vnode *tvp = ap->a_tvp; 609 610 /* Check for cross-device rename. */ 611 if ((fvp->v_mount != tdvp->v_mount) || 612 (tvp && (fvp->v_mount != tvp->v_mount))) { 613 if (tdvp == tvp) 614 vrele(tdvp); 615 else 616 vput(tdvp); 617 if (tvp) 618 vput(tvp); 619 vrele(fdvp); 620 vrele(fvp); 621 return (EXDEV); 622 } 623 624 return (null_bypass(&ap->a_head)); 625 } 626 627 /* 628 * A special flag, LK_THISLAYER, causes the locking function to operate 629 * ONLY on the nullfs layer. Otherwise we are responsible for locking not 630 * only our layer, but the lower layer as well. 631 * 632 * null_lock(struct vnode *a_vp, int a_flags, struct thread *a_td) 633 */ 634 static int 635 null_lock(struct vop_lock_args *ap) 636 { 637 struct vnode *vp = ap->a_vp; 638 int flags = ap->a_flags; 639 struct null_node *np = VTONULL(vp); 640 struct vnode *lvp; 641 int error; 642 643 /* 644 * Lock the nullfs layer first, disposing of the interlock in the 645 * process. 646 */ 647 KKASSERT((flags & LK_INTERLOCK) == 0); 648 error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER, 649 NULL, ap->a_td); 650 651 /* 652 * If locking only the nullfs layer, or if there is no lower layer, 653 * or if an error occured while attempting to lock the nullfs layer, 654 * we are done. 655 * 656 * np can be NULL is the vnode is being recycled from a previous 657 * hash collision. 658 */ 659 if ((flags & LK_THISLAYER) || np == NULL || 660 np->null_lowervp == NULL || error) { 661 return (error); 662 } 663 664 /* 665 * Lock the underlying vnode. If we are draining we should not drain 666 * the underlying vnode, since it is not being destroyed, but we do 667 * lock it exclusively in that case. Note that any interlocks have 668 * already been disposed of above. 669 */ 670 lvp = np->null_lowervp; 671 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 672 NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n"); 673 error = vn_lock(lvp, (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, 674 ap->a_td); 675 } else { 676 error = vn_lock(lvp, flags, ap->a_td); 677 } 678 679 /* 680 * If an error occured we have to undo our nullfs lock, then return 681 * the original error. 682 */ 683 if (error) 684 lockmgr(&vp->v_lock, LK_RELEASE, NULL, ap->a_td); 685 return(error); 686 } 687 688 /* 689 * A special flag, LK_THISLAYER, causes the unlocking function to operate 690 * ONLY on the nullfs layer. Otherwise we are responsible for unlocking not 691 * only our layer, but the lower layer as well. 692 * 693 * null_unlock(struct vnode *a_vp, int a_flags, struct thread *a_td) 694 */ 695 static int 696 null_unlock(struct vop_unlock_args *ap) 697 { 698 struct vnode *vp = ap->a_vp; 699 int flags = ap->a_flags; 700 struct null_node *np = VTONULL(vp); 701 struct vnode *lvp; 702 int error; 703 704 KKASSERT((flags & LK_INTERLOCK) == 0); 705 /* 706 * nullfs layer only 707 */ 708 if (flags & LK_THISLAYER) { 709 error = lockmgr(&vp->v_lock, 710 (flags & ~LK_THISLAYER) | LK_RELEASE, 711 NULL, ap->a_td); 712 return (error); 713 } 714 715 /* 716 * If there is no underlying vnode the lock operation occurs at 717 * the nullfs layer. np can be NULL is the vnode is being recycled 718 * from a previous hash collision. 719 */ 720 if (np == NULL || (lvp = np->null_lowervp) == NULL) { 721 error = lockmgr(&vp->v_lock, flags | LK_RELEASE, 722 NULL, ap->a_td); 723 return(error); 724 } 725 726 /* 727 * Unlock the lower layer first, then our nullfs layer. 728 */ 729 VOP_UNLOCK(lvp, flags, ap->a_td); 730 error = lockmgr(&vp->v_lock, flags | LK_RELEASE, NULL, ap->a_td); 731 return (error); 732 } 733 734 /* 735 * null_islocked(struct vnode *a_vp, struct thread *a_td) 736 * 737 * If a lower layer exists return the lock status of the lower layer, 738 * otherwise return the lock status of our nullfs layer. 739 */ 740 static int 741 null_islocked(struct vop_islocked_args *ap) 742 { 743 struct vnode *vp = ap->a_vp; 744 struct vnode *lvp; 745 struct null_node *np = VTONULL(vp); 746 int error; 747 748 lvp = np->null_lowervp; 749 if (lvp == NULL) 750 error = lockstatus(&vp->v_lock, ap->a_td); 751 else 752 error = VOP_ISLOCKED(lvp, ap->a_td); 753 return (error); 754 } 755 756 757 /* 758 * The vnode is no longer active. However, the new VFS API may retain 759 * the node in the vfs cache. There is no way to tell that someone issued 760 * a remove/rmdir operation on the underlying filesystem (yet), but we can't 761 * remove the lowervp reference here. 762 * 763 * null_inactive(struct vnode *a_vp, struct thread *a_td) 764 */ 765 static int 766 null_inactive(struct vop_inactive_args *ap) 767 { 768 /*struct vnode *vp = ap->a_vp;*/ 769 /*struct null_node *np = VTONULL(vp);*/ 770 771 /* 772 * At the moment don't do anything here. All the rest of the code 773 * assumes that lowervp will remain inact, and the inactive nullvp 774 * may be reactivated at any time. XXX I'm not sure why the 4.x code 775 * even worked. 776 */ 777 778 /* 779 * Now it is safe to release our nullfs layer vnode. 780 */ 781 return (0); 782 } 783 784 /* 785 * We can free memory in null_inactive, but we do this 786 * here. (Possible to guard vp->v_data to point somewhere) 787 * 788 * null_reclaim(struct vnode *a_vp, struct thread *a_td) 789 */ 790 static int 791 null_reclaim(struct vop_reclaim_args *ap) 792 { 793 struct vnode *vp = ap->a_vp; 794 struct vnode *lowervp; 795 struct null_node *np; 796 797 np = VTONULL(vp); 798 vp->v_data = NULL; 799 /* 800 * null_lowervp reference to lowervp. The lower vnode's 801 * inactive routine may or may not be called when we do the 802 * final vrele(). 803 */ 804 if (np) { 805 null_node_rem(np); 806 lowervp = np->null_lowervp; 807 np->null_lowervp = NULLVP; 808 if (lowervp) 809 vrele(lowervp); 810 free(np, M_NULLFSNODE); 811 } 812 return (0); 813 } 814 815 /* 816 * null_print(struct vnode *a_vp) 817 */ 818 static int 819 null_print(struct vop_print_args *ap) 820 { 821 struct vnode *vp = ap->a_vp; 822 struct null_node *np = VTONULL(vp); 823 824 if (np == NULL) { 825 printf ("\ttag VT_NULLFS, vp=%p, NULL v_data!\n", vp); 826 return(0); 827 } 828 printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, np->null_lowervp); 829 if (np->null_lowervp != NULL) { 830 printf("\tlowervp_lock: "); 831 lockmgr_printinfo(&np->null_lowervp->v_lock); 832 } else { 833 printf("\tnull_lock: "); 834 lockmgr_printinfo(&vp->v_lock); 835 } 836 printf("\n"); 837 return (0); 838 } 839 840 /* 841 * Let an underlying filesystem do the work 842 * 843 * null_createvobject(struct vnode *vp, struct ucred *cred, struct proc *p) 844 */ 845 static int 846 null_createvobject(struct vop_createvobject_args *ap) 847 { 848 struct vnode *vp = ap->a_vp; 849 struct vnode *lowervp = VTONULL(vp) ? NULLVPTOLOWERVP(vp) : NULL; 850 int error; 851 852 if (vp->v_type == VNON || lowervp == NULL) 853 return 0; 854 error = VOP_CREATEVOBJECT(lowervp, ap->a_td); 855 if (error) 856 return (error); 857 vp->v_flag |= VOBJBUF; 858 return (0); 859 } 860 861 /* 862 * We have nothing to destroy and this operation shouldn't be bypassed. 863 * 864 * null_destroyvobject(struct vnode *vp) 865 */ 866 static int 867 null_destroyvobject(struct vop_destroyvobject_args *ap) 868 { 869 struct vnode *vp = ap->a_vp; 870 871 vp->v_flag &= ~VOBJBUF; 872 return (0); 873 } 874 875 /* 876 * null_getvobject(struct vnode *vp, struct vm_object **objpp) 877 * 878 * Note that this can be called when a vnode is being recycled, and 879 * v_data may be NULL in that case if nullfs had to recycle a vnode 880 * due to a null_node collision. 881 */ 882 static int 883 null_getvobject(struct vop_getvobject_args *ap) 884 { 885 struct vnode *lvp; 886 887 if (ap->a_vp->v_data == NULL) 888 return EINVAL; 889 890 lvp = NULLVPTOLOWERVP(ap->a_vp); 891 if (lvp == NULL) 892 return EINVAL; 893 return (VOP_GETVOBJECT(lvp, ap->a_objpp)); 894 } 895 896 /* 897 * Global vfs data structures 898 */ 899 struct vnodeopv_entry_desc null_vnodeop_entries[] = { 900 { &vop_default_desc, (void *) null_bypass }, 901 { &vop_resolve_desc, (void *) null_resolve }, 902 { &vop_access_desc, (void *) null_access }, 903 { &vop_createvobject_desc, (void *) null_createvobject }, 904 { &vop_destroyvobject_desc, (void *) null_destroyvobject }, 905 { &vop_getattr_desc, (void *) null_getattr }, 906 { &vop_getvobject_desc, (void *) null_getvobject }, 907 { &vop_inactive_desc, (void *) null_inactive }, 908 { &vop_islocked_desc, (void *) null_islocked }, 909 { &vop_lock_desc, (void *) null_lock }, 910 { &vop_lookup_desc, (void *) null_lookup }, 911 { &vop_open_desc, (void *) null_open }, 912 { &vop_print_desc, (void *) null_print }, 913 { &vop_reclaim_desc, (void *) null_reclaim }, 914 { &vop_rename_desc, (void *) null_rename }, 915 { &vop_setattr_desc, (void *) null_setattr }, 916 { &vop_unlock_desc, (void *) null_unlock }, 917 { &vop_revoke_desc, (void *) null_revoke }, 918 { NULL, NULL } 919 }; 920 921