xref: /netbsd-src/sys/miscfs/genfs/layer_vnops.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /*	$NetBSD: layer_vnops.c,v 1.7 2001/07/24 15:39:32 assar Exp $	*/
2 
3 /*
4  * Copyright (c) 1999 National Aeronautics & Space Administration
5  * All rights reserved.
6  *
7  * This software was written by William Studenmund of the
8  * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the National Aeronautics & Space Administration
19  *    nor the names of its contributors may be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
27  * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 /*
36  * Copyright (c) 1992, 1993
37  *	The Regents of the University of California.  All rights reserved.
38  *
39  * This code is derived from software contributed to Berkeley by
40  * John Heidemann of the UCLA Ficus project.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
71  *
72  * Ancestors:
73  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
74  *	$Id: layer_vnops.c,v 1.7 2001/07/24 15:39:32 assar Exp $
75  *	...and...
76  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
77  */
78 
79 /*
80  * Null Layer vnode routines.
81  *
82  * (See mount_null(8) for more information.)
83  *
84  * The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide
85  * the core implimentation of the null file system and most other stacked
86  * fs's. The description below refers to the null file system, but the
87  * services provided by the layer* files are useful for all layered fs's.
88  *
89  * The null layer duplicates a portion of the file system
90  * name space under a new name.  In this respect, it is
91  * similar to the loopback file system.  It differs from
92  * the loopback fs in two respects:  it is implemented using
93  * a stackable layers techniques, and it's "null-node"s stack above
94  * all lower-layer vnodes, not just over directory vnodes.
95  *
96  * The null layer has two purposes.  First, it serves as a demonstration
97  * of layering by proving a layer which does nothing.  (It actually
98  * does everything the loopback file system does, which is slightly
99  * more than nothing.)  Second, the null layer can serve as a prototype
100  * layer.  Since it provides all necessary layer framework,
101  * new file system layers can be created very easily be starting
102  * with a null layer.
103  *
104  * The remainder of the man page examines the null layer as a basis
105  * for constructing new layers.
106  *
107  *
108  * INSTANTIATING NEW NULL LAYERS
109  *
110  * New null layers are created with mount_null(8).
111  * Mount_null(8) takes two arguments, the pathname
112  * of the lower vfs (target-pn) and the pathname where the null
113  * layer will appear in the namespace (alias-pn).  After
114  * the null layer is put into place, the contents
115  * of target-pn subtree will be aliased under alias-pn.
116  *
117  * It is conceivable that other overlay filesystems will take different
118  * parameters. For instance, data migration or access controll layers might
119  * only take one pathname which will serve both as the target-pn and
120  * alias-pn described above.
121  *
122  *
123  * OPERATION OF A NULL LAYER
124  *
125  * The null layer is the minimum file system layer,
126  * simply bypassing all possible operations to the lower layer
127  * for processing there.  The majority of its activity centers
128  * on the bypass routine, though which nearly all vnode operations
129  * pass.
130  *
131  * The bypass routine accepts arbitrary vnode operations for
132  * handling by the lower layer.  It begins by examing vnode
133  * operation arguments and replacing any layered nodes by their
134  * lower-layer equivlants.  It then invokes the operation
135  * on the lower layer.  Finally, it replaces the layered nodes
136  * in the arguments and, if a vnode is return by the operation,
137  * stacks a layered node on top of the returned vnode.
138  *
139  * The bypass routine in this file, layer_bypass(), is suitable for use
140  * by many different layered filesystems. It can be used by multiple
141  * filesystems simultaneously. Alternatively, a layered fs may provide
142  * its own bypass routine, in which case layer_bypass() should be used as
143  * a model. For instance, the main functionality provided by umapfs, the user
144  * identity mapping file system, is handled by a custom bypass routine.
145  *
146  * Typically a layered fs registers its selected bypass routine as the
147  * default vnode operation in its vnodeopv_entry_desc table. Additionally
148  * the filesystem must store the bypass entry point in the layerm_bypass
149  * field of struct layer_mount. All other layer routines in this file will
150  * use the layerm_bypass routine.
151  *
152  * Although the bypass routine handles most operations outright, a number
153  * of operations are special cased, and handled by the layered fs. One
154  * group, layer_setattr, layer_getattr, layer_access, layer_open, and
155  * layer_fsync, perform layer-specific manipulation in addition to calling
156  * the bypass routine. The other group
157 
158  * Although bypass handles most operations, vop_getattr, vop_lock,
159  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
160  * bypassed. Vop_getattr must change the fsid being returned.
161  * Vop_lock and vop_unlock must handle any locking for the
162  * current vnode as well as pass the lock request down.
163  * Vop_inactive and vop_reclaim are not bypassed so that
164  * they can handle freeing null-layer specific data. Vop_print
165  * is not bypassed to avoid excessive debugging information.
166  * Also, certain vnode operations change the locking state within
167  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
168  * and symlink). Ideally these operations should not change the
169  * lock state, but should be changed to let the caller of the
170  * function unlock them. Otherwise all intermediate vnode layers
171  * (such as union, umapfs, etc) must catch these functions to do
172  * the necessary locking at their layer.
173  *
174  *
175  * INSTANTIATING VNODE STACKS
176  *
177  * Mounting associates the null layer with a lower layer,
178  * effect stacking two VFSes.  Vnode stacks are instead
179  * created on demand as files are accessed.
180  *
181  * The initial mount creates a single vnode stack for the
182  * root of the new null layer.  All other vnode stacks
183  * are created as a result of vnode operations on
184  * this or other null vnode stacks.
185  *
186  * New vnode stacks come into existance as a result of
187  * an operation which returns a vnode.
188  * The bypass routine stacks a null-node above the new
189  * vnode before returning it to the caller.
190  *
191  * For example, imagine mounting a null layer with
192  * "mount_null /usr/include /dev/layer/null".
193  * Changing directory to /dev/layer/null will assign
194  * the root null-node (which was created when the null layer was mounted).
195  * Now consider opening "sys".  A vop_lookup would be
196  * done on the root null-node.  This operation would bypass through
197  * to the lower layer which would return a vnode representing
198  * the UFS "sys".  layer_bypass then builds a null-node
199  * aliasing the UFS "sys" and returns this to the caller.
200  * Later operations on the null-node "sys" will repeat this
201  * process when constructing other vnode stacks.
202  *
203  *
204  * CREATING OTHER FILE SYSTEM LAYERS
205  *
206  * One of the easiest ways to construct new file system layers is to make
207  * a copy of the null layer, rename all files and variables, and
208  * then begin modifing the copy.  Sed can be used to easily rename
209  * all variables.
210  *
211  * The umap layer is an example of a layer descended from the
212  * null layer.
213  *
214  *
215  * INVOKING OPERATIONS ON LOWER LAYERS
216  *
217  * There are two techniques to invoke operations on a lower layer
218  * when the operation cannot be completely bypassed.  Each method
219  * is appropriate in different situations.  In both cases,
220  * it is the responsibility of the aliasing layer to make
221  * the operation arguments "correct" for the lower layer
222  * by mapping an vnode arguments to the lower layer.
223  *
224  * The first approach is to call the aliasing layer's bypass routine.
225  * This method is most suitable when you wish to invoke the operation
226  * currently being hanldled on the lower layer.  It has the advantage
227  * that the bypass routine already must do argument mapping.
228  * An example of this is null_getattrs in the null layer.
229  *
230  * A second approach is to directly invoked vnode operations on
231  * the lower layer with the VOP_OPERATIONNAME interface.
232  * The advantage of this method is that it is easy to invoke
233  * arbitrary operations on the lower layer.  The disadvantage
234  * is that vnodes arguments must be manualy mapped.
235  *
236  */
237 
238 #include <sys/param.h>
239 #include <sys/systm.h>
240 #include <sys/proc.h>
241 #include <sys/time.h>
242 #include <sys/types.h>
243 #include <sys/vnode.h>
244 #include <sys/mount.h>
245 #include <sys/namei.h>
246 #include <sys/malloc.h>
247 #include <sys/buf.h>
248 #include <miscfs/genfs/layer.h>
249 #include <miscfs/genfs/layer_extern.h>
250 #include <miscfs/genfs/genfs.h>
251 
252 
253 /*
254  * This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass
255  *		routine by John Heidemann.
256  *	The new element for this version is that the whole nullfs
257  * system gained the concept of locks on the lower node, and locks on
258  * our nodes. When returning from a call to the lower layer, we may
259  * need to update lock state ONLY on our layer. The LAYERFS_UPPER*LOCK()
260  * macros provide this functionality.
261  *    The 10-Apr-92 version was optimized for speed, throwing away some
262  * safety checks.  It should still always work, but it's not as
263  * robust to programmer errors.
264  *    Define SAFETY to include some error checking code.
265  *
266  * In general, we map all vnodes going down and unmap them on the way back.
267  *
268  * Also, some BSD vnode operations have the side effect of vrele'ing
269  * their arguments.  With stacking, the reference counts are held
270  * by the upper node, not the lower one, so we must handle these
271  * side-effects here.  This is not of concern in Sun-derived systems
272  * since there are no such side-effects.
273  *
274  * New for the 08-June-99 version: we also handle operations which unlock
275  * the passed-in node (typically they vput the node).
276  *
277  * This makes the following assumptions:
278  * - only one returned vpp
279  * - no INOUT vpp's (Sun's vop_open has one of these)
280  * - the vnode operation vector of the first vnode should be used
281  *   to determine what implementation of the op should be invoked
282  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
283  *   problems on rmdir'ing mount points and renaming?)
284  */
285 int
286 layer_bypass(v)
287 	void *v;
288 {
289 	struct vop_generic_args /* {
290 		struct vnodeop_desc *a_desc;
291 		<other random data follows, presumably>
292 	} */ *ap = v;
293 	int (**our_vnodeop_p) __P((void *));
294 	struct vnode **this_vp_p;
295 	int error, error1;
296 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
297 	struct vnode **vps_p[VDESC_MAX_VPS];
298 	struct vnode ***vppp;
299 	struct vnodeop_desc *descp = ap->a_desc;
300 	int reles, i, flags;
301 
302 #ifdef SAFETY
303 	/*
304 	 * We require at least one vp.
305 	 */
306 	if (descp->vdesc_vp_offsets == NULL ||
307 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
308 		panic ("layer_bypass: no vp's in map.\n");
309 #endif
310 
311 	vps_p[0] = VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[0],ap);
312 	vp0 = *vps_p[0];
313 	flags = MOUNTTOLAYERMOUNT(vp0->v_mount)->layerm_flags;
314 	our_vnodeop_p = vp0->v_op;
315 
316 	if (flags & LAYERFS_MBYPASSDEBUG)
317 		printf ("layer_bypass: %s\n", descp->vdesc_name);
318 
319 	/*
320 	 * Map the vnodes going in.
321 	 * Later, we'll invoke the operation based on
322 	 * the first mapped vnode's operation vector.
323 	 */
324 	reles = descp->vdesc_flags;
325 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
326 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
327 			break;   /* bail out at end of list */
328 		vps_p[i] = this_vp_p =
329 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
330 		/*
331 		 * We're not guaranteed that any but the first vnode
332 		 * are of our type.  Check for and don't map any
333 		 * that aren't.  (We must always map first vp or vclean fails.)
334 		 */
335 		if (i && (*this_vp_p == NULL ||
336 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
337 			old_vps[i] = NULL;
338 		} else {
339 			old_vps[i] = *this_vp_p;
340 			*(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p);
341 			/*
342 			 * XXX - Several operations have the side effect
343 			 * of vrele'ing their vp's.  We must account for
344 			 * that.  (This should go away in the future.)
345 			 */
346 			if (reles & VDESC_VP0_WILLRELE)
347 				VREF(*this_vp_p);
348 		}
349 
350 	}
351 
352 	/*
353 	 * Call the operation on the lower layer
354 	 * with the modified argument structure.
355 	 */
356 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
357 
358 	/*
359 	 * Maintain the illusion of call-by-value
360 	 * by restoring vnodes in the argument structure
361 	 * to their original value.
362 	 */
363 	reles = descp->vdesc_flags;
364 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
365 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
366 			break;   /* bail out at end of list */
367 		if (old_vps[i]) {
368 			*(vps_p[i]) = old_vps[i];
369 			if (reles & VDESC_VP0_WILLUNLOCK)
370 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
371 			if (reles & VDESC_VP0_WILLRELE)
372 				vrele(*(vps_p[i]));
373 		}
374 	}
375 
376 	/*
377 	 * Map the possible out-going vpp
378 	 * (Assumes that the lower layer always returns
379 	 * a VREF'ed vpp unless it gets an error.)
380 	 */
381 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
382 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
383 	    !error) {
384 		/*
385 		 * XXX - even though some ops have vpp returned vp's,
386 		 * several ops actually vrele this before returning.
387 		 * We must avoid these ops.
388 		 * (This should go away when these ops are regularized.)
389 		 */
390 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
391 			goto out;
392 		vppp = VOPARG_OFFSETTO(struct vnode***,
393 				 descp->vdesc_vpp_offset,ap);
394 		/*
395 		 * Only vop_lookup, vop_create, vop_makedir, vop_bmap,
396 		 * vop_mknod, and vop_symlink return vpp's. vop_bmap
397 		 * doesn't call bypass as the lower vpp is fine (we're just
398 		 * going to do i/o on it). vop_loookup doesn't call bypass
399 		 * as a lookup on "." would generate a locking error.
400 		 * So all the calls which get us here have a locked vpp. :-)
401 		 */
402 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
403 	}
404 
405  out:
406 	return (error);
407 }
408 
409 /*
410  * We have to carry on the locking protocol on the layer vnodes
411  * as we progress through the tree. We also have to enforce read-only
412  * if this layer is mounted read-only.
413  */
414 int
415 layer_lookup(v)
416 	void *v;
417 {
418 	struct vop_lookup_args /* {
419 		struct vnodeop_desc *a_desc;
420 		struct vnode * a_dvp;
421 		struct vnode ** a_vpp;
422 		struct componentname * a_cnp;
423 	} */ *ap = v;
424 	struct componentname *cnp = ap->a_cnp;
425 	int flags = cnp->cn_flags;
426 	struct vnode *dvp, *vp, *ldvp;
427 	int error, r;
428 
429 	dvp = ap->a_dvp;
430 
431 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
432 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
433 		return (EROFS);
434 
435 	ldvp = LAYERVPTOLOWERVP(dvp);
436 	ap->a_dvp = ldvp;
437 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
438 	vp = *ap->a_vpp;
439 
440 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
441 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
442 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
443 		error = EROFS;
444 	/*
445 	 * We must do the same locking and unlocking at this layer as
446 	 * is done in the layers below us. It used to be we would try
447 	 * to guess based on what was set with the flags and error codes.
448 	 *
449 	 * But that doesn't work. So now we have the underlying VOP_LOOKUP
450 	 * tell us if it released the parent vnode, and we adjust the
451 	 * upper node accordingly. We can't just look at the lock states
452 	 * of the lower nodes as someone else might have come along and
453 	 * locked the parent node after our call to VOP_LOOKUP locked it.
454 	 */
455 	if ((cnp->cn_flags & PDIRUNLOCK)) {
456 		LAYERFS_UPPERUNLOCK(dvp, 0, r);
457 	}
458 	if (ldvp == vp) {
459 		/*
460 		 * Did lookup on "." or ".." in the root node of a mount point.
461 		 * So we return dvp after a VREF.
462 		 */
463 		*ap->a_vpp = dvp;
464 		VREF(dvp);
465 		vrele(vp);
466 	} else if (vp != NULL) {
467 		error = layer_node_create(dvp->v_mount, vp, ap->a_vpp);
468 	}
469 	return (error);
470 }
471 
472 /*
473  * Setattr call. Disallow write attempts if the layer is mounted read-only.
474  */
475 int
476 layer_setattr(v)
477 	void *v;
478 {
479 	struct vop_setattr_args /* {
480 		struct vnodeop_desc *a_desc;
481 		struct vnode *a_vp;
482 		struct vattr *a_vap;
483 		struct ucred *a_cred;
484 		struct proc *a_p;
485 	} */ *ap = v;
486 	struct vnode *vp = ap->a_vp;
487 	struct vattr *vap = ap->a_vap;
488 
489   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
490 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
491 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
492 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
493 		return (EROFS);
494 	if (vap->va_size != VNOVAL) {
495  		switch (vp->v_type) {
496  		case VDIR:
497  			return (EISDIR);
498  		case VCHR:
499  		case VBLK:
500  		case VSOCK:
501  		case VFIFO:
502 			return (0);
503 		case VREG:
504 		case VLNK:
505  		default:
506 			/*
507 			 * Disallow write attempts if the filesystem is
508 			 * mounted read-only.
509 			 */
510 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
511 				return (EROFS);
512 		}
513 	}
514 	return (LAYERFS_DO_BYPASS(vp, ap));
515 }
516 
517 /*
518  *  We handle getattr only to change the fsid.
519  */
520 int
521 layer_getattr(v)
522 	void *v;
523 {
524 	struct vop_getattr_args /* {
525 		struct vnode *a_vp;
526 		struct vattr *a_vap;
527 		struct ucred *a_cred;
528 		struct proc *a_p;
529 	} */ *ap = v;
530 	struct vnode *vp = ap->a_vp;
531 	int error;
532 
533 	if ((error = LAYERFS_DO_BYPASS(vp, ap)) != 0)
534 		return (error);
535 	/* Requires that arguments be restored. */
536 	ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
537 	return (0);
538 }
539 
540 int
541 layer_access(v)
542 	void *v;
543 {
544 	struct vop_access_args /* {
545 		struct vnode *a_vp;
546 		int  a_mode;
547 		struct ucred *a_cred;
548 		struct proc *a_p;
549 	} */ *ap = v;
550 	struct vnode *vp = ap->a_vp;
551 	mode_t mode = ap->a_mode;
552 
553 	/*
554 	 * Disallow write attempts on read-only layers;
555 	 * unless the file is a socket, fifo, or a block or
556 	 * character device resident on the file system.
557 	 */
558 	if (mode & VWRITE) {
559 		switch (vp->v_type) {
560 		case VDIR:
561 		case VLNK:
562 		case VREG:
563 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
564 				return (EROFS);
565 			break;
566 		default:
567 			break;
568 		}
569 	}
570 	return (LAYERFS_DO_BYPASS(vp, ap));
571 }
572 
573 /*
574  * We must handle open to be able to catch MNT_NODEV and friends.
575  */
576 int
577 layer_open(v)
578 	void *v;
579 {
580 	struct vop_open_args *ap = v;
581 	struct vnode *vp = ap->a_vp;
582 	enum vtype lower_type = LAYERVPTOLOWERVP(vp)->v_type;
583 
584 	if (((lower_type == VBLK) || (lower_type == VCHR)) &&
585 	    (vp->v_mount->mnt_flag & MNT_NODEV))
586 		return ENXIO;
587 
588 	return LAYERFS_DO_BYPASS(vp, ap);
589 }
590 
591 /*
592  * We need to process our own vnode lock and then clear the
593  * interlock flag as it applies only to our vnode, not the
594  * vnodes below us on the stack.
595  */
596 int
597 layer_lock(v)
598 	void *v;
599 {
600 	struct vop_lock_args /* {
601 		struct vnode *a_vp;
602 		int a_flags;
603 		struct proc *a_p;
604 	} */ *ap = v;
605 	struct vnode *vp = ap->a_vp, *lowervp;
606 	int	flags = ap->a_flags, error;
607 
608 	if (vp->v_vnlock != NULL) {
609 		/*
610 		 * The lower level has exported a struct lock to us. Use
611 		 * it so that all vnodes in the stack lock and unlock
612 		 * simultaneously. Note: we don't DRAIN the lock as DRAIN
613 		 * decommissions the lock - just because our vnode is
614 		 * going away doesn't mean the struct lock below us is.
615 		 * LK_EXCLUSIVE is fine.
616 		 */
617 		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
618 			return(lockmgr(vp->v_vnlock,
619 				(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
620 				&vp->v_interlock));
621 		} else
622 			return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock));
623 	} else {
624 		/*
625 		 * Ahh well. It would be nice if the fs we're over would
626 		 * export a struct lock for us to use, but it doesn't.
627 		 *
628 		 * To prevent race conditions involving doing a lookup
629 		 * on "..", we have to lock the lower node, then lock our
630 		 * node. Most of the time it won't matter that we lock our
631 		 * node (as any locking would need the lower one locked
632 		 * first). But we can LK_DRAIN the upper lock as a step
633 		 * towards decomissioning it.
634 		 */
635 		lowervp = LAYERVPTOLOWERVP(vp);
636 		if (flags & LK_INTERLOCK) {
637 			simple_unlock(&vp->v_interlock);
638 			flags &= ~LK_INTERLOCK;
639 		}
640 		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
641 			error = VOP_LOCK(lowervp,
642 				(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE);
643 		} else
644 			error = VOP_LOCK(lowervp, flags);
645 		if (error)
646 			return (error);
647 		if ((error = lockmgr(&vp->v_lock, flags, &vp->v_interlock))) {
648 			VOP_UNLOCK(lowervp, 0);
649 		}
650 		return (error);
651 	}
652 }
653 
654 /*
655  */
656 int
657 layer_unlock(v)
658 	void *v;
659 {
660 	struct vop_unlock_args /* {
661 		struct vnode *a_vp;
662 		int a_flags;
663 		struct proc *a_p;
664 	} */ *ap = v;
665 	struct vnode *vp = ap->a_vp;
666 	int	flags = ap->a_flags;
667 
668 	if (vp->v_vnlock != NULL) {
669 		return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
670 			&vp->v_interlock));
671 	} else {
672 		if (flags & LK_INTERLOCK) {
673 			simple_unlock(&vp->v_interlock);
674 			flags &= ~LK_INTERLOCK;
675 		}
676 		VOP_UNLOCK(LAYERVPTOLOWERVP(vp), flags);
677 		return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
678 			&vp->v_interlock));
679 	}
680 }
681 
682 /*
683  * As long as genfs_nolock is in use, don't call VOP_ISLOCKED(lowervp)
684  * if vp->v_vnlock == NULL as genfs_noislocked will always report 0.
685  */
686 int
687 layer_islocked(v)
688 	void *v;
689 {
690 	struct vop_islocked_args /* {
691 		struct vnode *a_vp;
692 	} */ *ap = v;
693 	struct vnode *vp = ap->a_vp;
694 
695 	if (vp->v_vnlock != NULL)
696 		return (lockstatus(vp->v_vnlock));
697 	else
698 		return (lockstatus(&vp->v_lock));
699 }
700 
701 /*
702  * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
703  * syncing the underlying vnodes, since they'll be fsync'ed when
704  * reclaimed; otherwise,
705  * pass it through to the underlying layer.
706  *
707  * XXX Do we still need to worry about shallow fsync?
708  */
709 
710 int
711 layer_fsync(v)
712 	void *v;
713 {
714 	struct vop_fsync_args /* {
715 		struct vnode *a_vp;
716 		struct ucred *a_cred;
717 		int  a_flags;
718 		off_t offlo;
719 		off_t offhi;
720 		struct proc *a_p;
721 	} */ *ap = v;
722 
723 	if (ap->a_flags & FSYNC_RECLAIM) {
724 		return 0;
725 	}
726 
727 	return (LAYERFS_DO_BYPASS(ap->a_vp, ap));
728 }
729 
730 
731 int
732 layer_inactive(v)
733 	void *v;
734 {
735 	struct vop_inactive_args /* {
736 		struct vnode *a_vp;
737 		struct proc *a_p;
738 	} */ *ap = v;
739 	struct vnode *vp = ap->a_vp;
740 
741 	/*
742 	 * Do nothing (and _don't_ bypass).
743 	 * Wait to vrele lowervp until reclaim,
744 	 * so that until then our layer_node is in the
745 	 * cache and reusable.
746 	 *
747 	 * NEEDSWORK: Someday, consider inactive'ing
748 	 * the lowervp and then trying to reactivate it
749 	 * with capabilities (v_id)
750 	 * like they do in the name lookup cache code.
751 	 * That's too much work for now.
752 	 */
753 	VOP_UNLOCK(vp, 0);
754 
755 	/* ..., but don't cache the device node. */
756 	if (vp->v_type == VBLK || vp->v_type == VCHR)
757 		vgone(vp);
758 	return (0);
759 }
760 
761 int
762 layer_reclaim(v)
763 	void *v;
764 {
765 	struct vop_reclaim_args /* {
766 		struct vnode *a_vp;
767 		struct proc *a_p;
768 	} */ *ap = v;
769 	struct vnode *vp = ap->a_vp;
770 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount);
771 	struct layer_node *xp = VTOLAYER(vp);
772 	struct vnode *lowervp = xp->layer_lowervp;
773 
774 	/*
775 	 * Note: in vop_reclaim, the node's struct lock has been
776 	 * decomissioned, so we have to be careful about calling
777 	 * VOP's on ourself. Even if we turned a LK_DRAIN into an
778 	 * LK_EXCLUSIVE in layer_lock, we still must be careful as VXLOCK is
779 	 * set.
780 	 */
781 	/* After this assignment, this node will not be re-used. */
782 	if ((vp == lmp->layerm_rootvp)) {
783 		/*
784 		 * Oops! We no longer have a root node. Most likely reason is
785 		 * that someone forcably unmunted the underlying fs.
786 		 *
787 		 * Now getting the root vnode will fail. We're dead. :-(
788 		 */
789 		lmp->layerm_rootvp = NULL;
790 	}
791 	xp->layer_lowervp = NULL;
792 	simple_lock(&lmp->layerm_hashlock);
793 	LIST_REMOVE(xp, layer_hash);
794 	simple_unlock(&lmp->layerm_hashlock);
795 	FREE(vp->v_data, M_TEMP);
796 	vp->v_data = NULL;
797 	vrele (lowervp);
798 	return (0);
799 }
800 
801 /*
802  * We just feed the returned vnode up to the caller - there's no need
803  * to build a layer node on top of the node on which we're going to do
804  * i/o. :-)
805  */
806 int
807 layer_bmap(v)
808 	void *v;
809 {
810 	struct vop_bmap_args /* {
811 		struct vnode *a_vp;
812 		daddr_t  a_bn;
813 		struct vnode **a_vpp;
814 		daddr_t *a_bnp;
815 		int *a_runp;
816 	} */ *ap = v;
817 	struct vnode *vp;
818 
819 	ap->a_vp = vp = LAYERVPTOLOWERVP(ap->a_vp);
820 
821 	return (VCALL(vp, ap->a_desc->vdesc_offset, ap));
822 }
823 
824 int
825 layer_print(v)
826 	void *v;
827 {
828 	struct vop_print_args /* {
829 		struct vnode *a_vp;
830 	} */ *ap = v;
831 	struct vnode *vp = ap->a_vp;
832 	printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp));
833 	return (0);
834 }
835 
836 /*
837  * XXX - vop_strategy must be hand coded because it has no
838  * vnode in its arguments.
839  * This goes away with a merged VM/buffer cache.
840  */
841 int
842 layer_strategy(v)
843 	void *v;
844 {
845 	struct vop_strategy_args /* {
846 		struct buf *a_bp;
847 	} */ *ap = v;
848 	struct buf *bp = ap->a_bp;
849 	int error;
850 	struct vnode *savedvp;
851 
852 	savedvp = bp->b_vp;
853 	bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp);
854 
855 	error = VOP_STRATEGY(bp);
856 
857 	bp->b_vp = savedvp;
858 
859 	return (error);
860 }
861 
862 /*
863  * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
864  * vnode in its arguments.
865  * This goes away with a merged VM/buffer cache.
866  */
867 int
868 layer_bwrite(v)
869 	void *v;
870 {
871 	struct vop_bwrite_args /* {
872 		struct buf *a_bp;
873 	} */ *ap = v;
874 	struct buf *bp = ap->a_bp;
875 	int error;
876 	struct vnode *savedvp;
877 
878 	savedvp = bp->b_vp;
879 	bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp);
880 
881 	error = VOP_BWRITE(bp);
882 
883 	bp->b_vp = savedvp;
884 
885 	return (error);
886 }
887