xref: /netbsd-src/sys/miscfs/genfs/layer_vnops.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: layer_vnops.c,v 1.32 2007/10/10 20:42:29 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1999 National Aeronautics & Space Administration
5  * All rights reserved.
6  *
7  * This software was written by William Studenmund of the
8  * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the National Aeronautics & Space Administration
19  *    nor the names of its contributors may be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
27  * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 /*
36  * Copyright (c) 1992, 1993
37  *	The Regents of the University of California.  All rights reserved.
38  *
39  * This code is derived from software contributed to Berkeley by
40  * John Heidemann of the UCLA Ficus project.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
67  *
68  * Ancestors:
69  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
70  *	Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp jsp
71  *	...and...
72  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
73  */
74 
75 /*
76  * Null Layer vnode routines.
77  *
78  * (See mount_null(8) for more information.)
79  *
80  * The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide
81  * the core implementation of the null file system and most other stacked
82  * fs's. The description below refers to the null file system, but the
83  * services provided by the layer* files are useful for all layered fs's.
84  *
85  * The null layer duplicates a portion of the file system
86  * name space under a new name.  In this respect, it is
87  * similar to the loopback file system.  It differs from
88  * the loopback fs in two respects:  it is implemented using
89  * a stackable layers techniques, and it's "null-node"s stack above
90  * all lower-layer vnodes, not just over directory vnodes.
91  *
92  * The null layer has two purposes.  First, it serves as a demonstration
93  * of layering by proving a layer which does nothing.  (It actually
94  * does everything the loopback file system does, which is slightly
95  * more than nothing.)  Second, the null layer can serve as a prototype
96  * layer.  Since it provides all necessary layer framework,
97  * new file system layers can be created very easily be starting
98  * with a null layer.
99  *
100  * The remainder of the man page examines the null layer as a basis
101  * for constructing new layers.
102  *
103  *
104  * INSTANTIATING NEW NULL LAYERS
105  *
106  * New null layers are created with mount_null(8).
107  * Mount_null(8) takes two arguments, the pathname
108  * of the lower vfs (target-pn) and the pathname where the null
109  * layer will appear in the namespace (alias-pn).  After
110  * the null layer is put into place, the contents
111  * of target-pn subtree will be aliased under alias-pn.
112  *
113  * It is conceivable that other overlay filesystems will take different
114  * parameters. For instance, data migration or access controll layers might
115  * only take one pathname which will serve both as the target-pn and
116  * alias-pn described above.
117  *
118  *
119  * OPERATION OF A NULL LAYER
120  *
121  * The null layer is the minimum file system layer,
122  * simply bypassing all possible operations to the lower layer
123  * for processing there.  The majority of its activity centers
124  * on the bypass routine, through which nearly all vnode operations
125  * pass.
126  *
127  * The bypass routine accepts arbitrary vnode operations for
128  * handling by the lower layer.  It begins by examing vnode
129  * operation arguments and replacing any layered nodes by their
130  * lower-layer equivalents.  It then invokes the operation
131  * on the lower layer.  Finally, it replaces the layered nodes
132  * in the arguments and, if a vnode is return by the operation,
133  * stacks a layered node on top of the returned vnode.
134  *
135  * The bypass routine in this file, layer_bypass(), is suitable for use
136  * by many different layered filesystems. It can be used by multiple
137  * filesystems simultaneously. Alternatively, a layered fs may provide
138  * its own bypass routine, in which case layer_bypass() should be used as
139  * a model. For instance, the main functionality provided by umapfs, the user
140  * identity mapping file system, is handled by a custom bypass routine.
141  *
142  * Typically a layered fs registers its selected bypass routine as the
143  * default vnode operation in its vnodeopv_entry_desc table. Additionally
144  * the filesystem must store the bypass entry point in the layerm_bypass
145  * field of struct layer_mount. All other layer routines in this file will
146  * use the layerm_bypass routine.
147  *
148  * Although the bypass routine handles most operations outright, a number
149  * of operations are special cased, and handled by the layered fs. One
150  * group, layer_setattr, layer_getattr, layer_access, layer_open, and
151  * layer_fsync, perform layer-specific manipulation in addition to calling
152  * the bypass routine. The other group
153 
154  * Although bypass handles most operations, vop_getattr, vop_lock,
155  * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
156  * bypassed. Vop_getattr must change the fsid being returned.
157  * Vop_lock and vop_unlock must handle any locking for the
158  * current vnode as well as pass the lock request down.
159  * Vop_inactive and vop_reclaim are not bypassed so that
160  * they can handle freeing null-layer specific data. Vop_print
161  * is not bypassed to avoid excessive debugging information.
162  * Also, certain vnode operations change the locking state within
163  * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
164  * and symlink). Ideally these operations should not change the
165  * lock state, but should be changed to let the caller of the
166  * function unlock them. Otherwise all intermediate vnode layers
167  * (such as union, umapfs, etc) must catch these functions to do
168  * the necessary locking at their layer.
169  *
170  *
171  * INSTANTIATING VNODE STACKS
172  *
173  * Mounting associates the null layer with a lower layer,
174  * effect stacking two VFSes.  Vnode stacks are instead
175  * created on demand as files are accessed.
176  *
177  * The initial mount creates a single vnode stack for the
178  * root of the new null layer.  All other vnode stacks
179  * are created as a result of vnode operations on
180  * this or other null vnode stacks.
181  *
182  * New vnode stacks come into existence as a result of
183  * an operation which returns a vnode.
184  * The bypass routine stacks a null-node above the new
185  * vnode before returning it to the caller.
186  *
187  * For example, imagine mounting a null layer with
188  * "mount_null /usr/include /dev/layer/null".
189  * Changing directory to /dev/layer/null will assign
190  * the root null-node (which was created when the null layer was mounted).
191  * Now consider opening "sys".  A vop_lookup would be
192  * done on the root null-node.  This operation would bypass through
193  * to the lower layer which would return a vnode representing
194  * the UFS "sys".  layer_bypass then builds a null-node
195  * aliasing the UFS "sys" and returns this to the caller.
196  * Later operations on the null-node "sys" will repeat this
197  * process when constructing other vnode stacks.
198  *
199  *
200  * CREATING OTHER FILE SYSTEM LAYERS
201  *
202  * One of the easiest ways to construct new file system layers is to make
203  * a copy of the null layer, rename all files and variables, and
204  * then begin modifing the copy.  Sed can be used to easily rename
205  * all variables.
206  *
207  * The umap layer is an example of a layer descended from the
208  * null layer.
209  *
210  *
211  * INVOKING OPERATIONS ON LOWER LAYERS
212  *
213  * There are two techniques to invoke operations on a lower layer
214  * when the operation cannot be completely bypassed.  Each method
215  * is appropriate in different situations.  In both cases,
216  * it is the responsibility of the aliasing layer to make
217  * the operation arguments "correct" for the lower layer
218  * by mapping an vnode arguments to the lower layer.
219  *
220  * The first approach is to call the aliasing layer's bypass routine.
221  * This method is most suitable when you wish to invoke the operation
222  * currently being handled on the lower layer.  It has the advantage
223  * that the bypass routine already must do argument mapping.
224  * An example of this is null_getattrs in the null layer.
225  *
226  * A second approach is to directly invoke vnode operations on
227  * the lower layer with the VOP_OPERATIONNAME interface.
228  * The advantage of this method is that it is easy to invoke
229  * arbitrary operations on the lower layer.  The disadvantage
230  * is that vnodes' arguments must be manually mapped.
231  *
232  */
233 
234 #include <sys/cdefs.h>
235 __KERNEL_RCSID(0, "$NetBSD: layer_vnops.c,v 1.32 2007/10/10 20:42:29 ad Exp $");
236 
237 #include <sys/param.h>
238 #include <sys/systm.h>
239 #include <sys/proc.h>
240 #include <sys/time.h>
241 #include <sys/vnode.h>
242 #include <sys/mount.h>
243 #include <sys/namei.h>
244 #include <sys/malloc.h>
245 #include <sys/buf.h>
246 #include <sys/kauth.h>
247 
248 #include <miscfs/genfs/layer.h>
249 #include <miscfs/genfs/layer_extern.h>
250 #include <miscfs/genfs/genfs.h>
251 
252 
253 /*
254  * This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass
255  *		routine by John Heidemann.
256  *	The new element for this version is that the whole nullfs
257  * system gained the concept of locks on the lower node, and locks on
258  * our nodes. When returning from a call to the lower layer, we may
259  * need to update lock state ONLY on our layer. The LAYERFS_UPPER*LOCK()
260  * macros provide this functionality.
261  *    The 10-Apr-92 version was optimized for speed, throwing away some
262  * safety checks.  It should still always work, but it's not as
263  * robust to programmer errors.
264  *    Define SAFETY to include some error checking code.
265  *
266  * In general, we map all vnodes going down and unmap them on the way back.
267  *
268  * Also, some BSD vnode operations have the side effect of vrele'ing
269  * their arguments.  With stacking, the reference counts are held
270  * by the upper node, not the lower one, so we must handle these
271  * side-effects here.  This is not of concern in Sun-derived systems
272  * since there are no such side-effects.
273  *
274  * New for the 08-June-99 version: we also handle operations which unlock
275  * the passed-in node (typically they vput the node).
276  *
277  * This makes the following assumptions:
278  * - only one returned vpp
279  * - no INOUT vpp's (Sun's vop_open has one of these)
280  * - the vnode operation vector of the first vnode should be used
281  *   to determine what implementation of the op should be invoked
282  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
283  *   problems on rmdir'ing mount points and renaming?)
284  */
285 int
286 layer_bypass(v)
287 	void *v;
288 {
289 	struct vop_generic_args /* {
290 		struct vnodeop_desc *a_desc;
291 		<other random data follows, presumably>
292 	} */ *ap = v;
293 	int (**our_vnodeop_p)(void *);
294 	struct vnode **this_vp_p;
295 	int error, error1;
296 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
297 	struct vnode **vps_p[VDESC_MAX_VPS];
298 	struct vnode ***vppp;
299 	struct vnodeop_desc *descp = ap->a_desc;
300 	int reles, i, flags;
301 
302 #ifdef SAFETY
303 	/*
304 	 * We require at least one vp.
305 	 */
306 	if (descp->vdesc_vp_offsets == NULL ||
307 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
308 		panic("%s: no vp's in map.\n", __func__);
309 #endif
310 
311 	vps_p[0] =
312 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
313 	vp0 = *vps_p[0];
314 	flags = MOUNTTOLAYERMOUNT(vp0->v_mount)->layerm_flags;
315 	our_vnodeop_p = vp0->v_op;
316 
317 	if (flags & LAYERFS_MBYPASSDEBUG)
318 		printf("%s: %s\n", __func__, descp->vdesc_name);
319 
320 	/*
321 	 * Map the vnodes going in.
322 	 * Later, we'll invoke the operation based on
323 	 * the first mapped vnode's operation vector.
324 	 */
325 	reles = descp->vdesc_flags;
326 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
327 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
328 			break;   /* bail out at end of list */
329 		vps_p[i] = this_vp_p =
330 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
331 		    ap);
332 		/*
333 		 * We're not guaranteed that any but the first vnode
334 		 * are of our type.  Check for and don't map any
335 		 * that aren't.  (We must always map first vp or vclean fails.)
336 		 */
337 		if (i && (*this_vp_p == NULL ||
338 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
339 			old_vps[i] = NULL;
340 		} else {
341 			old_vps[i] = *this_vp_p;
342 			*(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p);
343 			/*
344 			 * XXX - Several operations have the side effect
345 			 * of vrele'ing their vp's.  We must account for
346 			 * that.  (This should go away in the future.)
347 			 */
348 			if (reles & VDESC_VP0_WILLRELE)
349 				VREF(*this_vp_p);
350 		}
351 
352 	}
353 
354 	/*
355 	 * Call the operation on the lower layer
356 	 * with the modified argument structure.
357 	 */
358 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
359 
360 	/*
361 	 * Maintain the illusion of call-by-value
362 	 * by restoring vnodes in the argument structure
363 	 * to their original value.
364 	 */
365 	reles = descp->vdesc_flags;
366 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
367 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
368 			break;   /* bail out at end of list */
369 		if (old_vps[i]) {
370 			*(vps_p[i]) = old_vps[i];
371 			if (reles & VDESC_VP0_WILLUNLOCK)
372 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
373 			if (reles & VDESC_VP0_WILLRELE)
374 				vrele(*(vps_p[i]));
375 		}
376 	}
377 
378 	/*
379 	 * Map the possible out-going vpp
380 	 * (Assumes that the lower layer always returns
381 	 * a VREF'ed vpp unless it gets an error.)
382 	 */
383 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
384 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
385 	    !error) {
386 		/*
387 		 * XXX - even though some ops have vpp returned vp's,
388 		 * several ops actually vrele this before returning.
389 		 * We must avoid these ops.
390 		 * (This should go away when these ops are regularized.)
391 		 */
392 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
393 			goto out;
394 		vppp = VOPARG_OFFSETTO(struct vnode***,
395 				 descp->vdesc_vpp_offset, ap);
396 		/*
397 		 * Only vop_lookup, vop_create, vop_makedir, vop_bmap,
398 		 * vop_mknod, and vop_symlink return vpp's. vop_bmap
399 		 * doesn't call bypass as the lower vpp is fine (we're just
400 		 * going to do i/o on it). vop_lookup doesn't call bypass
401 		 * as a lookup on "." would generate a locking error.
402 		 * So all the calls which get us here have a locked vpp. :-)
403 		 */
404 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
405 		if (error) {
406 			vput(**vppp);
407 			**vppp = NULL;
408 		}
409 	}
410 
411  out:
412 	return (error);
413 }
414 
415 /*
416  * We have to carry on the locking protocol on the layer vnodes
417  * as we progress through the tree. We also have to enforce read-only
418  * if this layer is mounted read-only.
419  */
420 int
421 layer_lookup(v)
422 	void *v;
423 {
424 	struct vop_lookup_args /* {
425 		struct vnodeop_desc *a_desc;
426 		struct vnode * a_dvp;
427 		struct vnode ** a_vpp;
428 		struct componentname * a_cnp;
429 	} */ *ap = v;
430 	struct componentname *cnp = ap->a_cnp;
431 	int flags = cnp->cn_flags;
432 	struct vnode *dvp, *lvp, *ldvp;
433 	int error;
434 
435 	dvp = ap->a_dvp;
436 
437 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
438 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
439 		return (EROFS);
440 
441 	ldvp = LAYERVPTOLOWERVP(dvp);
442 	ap->a_dvp = ldvp;
443 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
444 	lvp = *ap->a_vpp;
445 	*ap->a_vpp = NULL;
446 
447 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
448 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
449 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
450 		error = EROFS;
451 
452 	/*
453 	 * We must do the same locking and unlocking at this layer as
454 	 * is done in the layers below us.
455 	 */
456 	if (ldvp == lvp) {
457 
458 		/*
459 		 * Did lookup on "." or ".." in the root node of a mount point.
460 		 * So we return dvp after a VREF.
461 		 */
462 		VREF(dvp);
463 		*ap->a_vpp = dvp;
464 		vrele(lvp);
465 	} else if (lvp != NULL) {
466 		/* dvp, ldvp and vp are all locked */
467 		error = layer_node_create(dvp->v_mount, lvp, ap->a_vpp);
468 		if (error) {
469 			vput(lvp);
470 		}
471 	}
472 	return (error);
473 }
474 
475 /*
476  * Setattr call. Disallow write attempts if the layer is mounted read-only.
477  */
478 int
479 layer_setattr(v)
480 	void *v;
481 {
482 	struct vop_setattr_args /* {
483 		struct vnodeop_desc *a_desc;
484 		struct vnode *a_vp;
485 		struct vattr *a_vap;
486 		kauth_cred_t a_cred;
487 		struct lwp *a_l;
488 	} */ *ap = v;
489 	struct vnode *vp = ap->a_vp;
490 	struct vattr *vap = ap->a_vap;
491 
492   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
493 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
494 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
495 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
496 		return (EROFS);
497 	if (vap->va_size != VNOVAL) {
498  		switch (vp->v_type) {
499  		case VDIR:
500  			return (EISDIR);
501  		case VCHR:
502  		case VBLK:
503  		case VSOCK:
504  		case VFIFO:
505 			return (0);
506 		case VREG:
507 		case VLNK:
508  		default:
509 			/*
510 			 * Disallow write attempts if the filesystem is
511 			 * mounted read-only.
512 			 */
513 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
514 				return (EROFS);
515 		}
516 	}
517 	return (LAYERFS_DO_BYPASS(vp, ap));
518 }
519 
520 /*
521  *  We handle getattr only to change the fsid.
522  */
523 int
524 layer_getattr(v)
525 	void *v;
526 {
527 	struct vop_getattr_args /* {
528 		struct vnode *a_vp;
529 		struct vattr *a_vap;
530 		kauth_cred_t a_cred;
531 		struct lwp *a_l;
532 	} */ *ap = v;
533 	struct vnode *vp = ap->a_vp;
534 	int error;
535 
536 	if ((error = LAYERFS_DO_BYPASS(vp, ap)) != 0)
537 		return (error);
538 	/* Requires that arguments be restored. */
539 	ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
540 	return (0);
541 }
542 
543 int
544 layer_access(v)
545 	void *v;
546 {
547 	struct vop_access_args /* {
548 		struct vnode *a_vp;
549 		int  a_mode;
550 		kauth_cred_t a_cred;
551 		struct lwp *a_l;
552 	} */ *ap = v;
553 	struct vnode *vp = ap->a_vp;
554 	mode_t mode = ap->a_mode;
555 
556 	/*
557 	 * Disallow write attempts on read-only layers;
558 	 * unless the file is a socket, fifo, or a block or
559 	 * character device resident on the file system.
560 	 */
561 	if (mode & VWRITE) {
562 		switch (vp->v_type) {
563 		case VDIR:
564 		case VLNK:
565 		case VREG:
566 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
567 				return (EROFS);
568 			break;
569 		default:
570 			break;
571 		}
572 	}
573 	return (LAYERFS_DO_BYPASS(vp, ap));
574 }
575 
576 /*
577  * We must handle open to be able to catch MNT_NODEV and friends.
578  */
579 int
580 layer_open(v)
581 	void *v;
582 {
583 	struct vop_open_args *ap = v;
584 	struct vnode *vp = ap->a_vp;
585 	enum vtype lower_type = LAYERVPTOLOWERVP(vp)->v_type;
586 
587 	if (((lower_type == VBLK) || (lower_type == VCHR)) &&
588 	    (vp->v_mount->mnt_flag & MNT_NODEV))
589 		return ENXIO;
590 
591 	return LAYERFS_DO_BYPASS(vp, ap);
592 }
593 
594 /*
595  * We need to process our own vnode lock and then clear the
596  * interlock flag as it applies only to our vnode, not the
597  * vnodes below us on the stack.
598  */
599 int
600 layer_lock(v)
601 	void *v;
602 {
603 	struct vop_lock_args /* {
604 		struct vnode *a_vp;
605 		int a_flags;
606 		struct proc *a_p;
607 	} */ *ap = v;
608 	struct vnode *vp = ap->a_vp, *lowervp;
609 	int	flags = ap->a_flags, error;
610 
611 	if (vp->v_vnlock != NULL) {
612 		/*
613 		 * The lower level has exported a struct lock to us. Use
614 		 * it so that all vnodes in the stack lock and unlock
615 		 * simultaneously. Note: we don't DRAIN the lock as DRAIN
616 		 * decommissions the lock - just because our vnode is
617 		 * going away doesn't mean the struct lock below us is.
618 		 * LK_EXCLUSIVE is fine.
619 		 */
620 		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
621 			return(lockmgr(vp->v_vnlock,
622 				(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
623 				&vp->v_interlock));
624 		} else
625 			return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock));
626 	} else {
627 		/*
628 		 * Ahh well. It would be nice if the fs we're over would
629 		 * export a struct lock for us to use, but it doesn't.
630 		 *
631 		 * To prevent race conditions involving doing a lookup
632 		 * on "..", we have to lock the lower node, then lock our
633 		 * node. Most of the time it won't matter that we lock our
634 		 * node (as any locking would need the lower one locked
635 		 * first). But we can LK_DRAIN the upper lock as a step
636 		 * towards decomissioning it.
637 		 */
638 		lowervp = LAYERVPTOLOWERVP(vp);
639 		if (flags & LK_INTERLOCK) {
640 			simple_unlock(&vp->v_interlock);
641 			flags &= ~LK_INTERLOCK;
642 		}
643 		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
644 			error = VOP_LOCK(lowervp,
645 				(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE);
646 		} else
647 			error = VOP_LOCK(lowervp, flags);
648 		if (error)
649 			return (error);
650 		if ((error = lockmgr(&vp->v_lock, flags, &vp->v_interlock))) {
651 			VOP_UNLOCK(lowervp, 0);
652 		}
653 		return (error);
654 	}
655 }
656 
657 /*
658  */
659 int
660 layer_unlock(v)
661 	void *v;
662 {
663 	struct vop_unlock_args /* {
664 		struct vnode *a_vp;
665 		int a_flags;
666 		struct proc *a_p;
667 	} */ *ap = v;
668 	struct vnode *vp = ap->a_vp;
669 	int	flags = ap->a_flags;
670 
671 	if (vp->v_vnlock != NULL) {
672 		return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
673 			&vp->v_interlock));
674 	} else {
675 		if (flags & LK_INTERLOCK) {
676 			simple_unlock(&vp->v_interlock);
677 			flags &= ~LK_INTERLOCK;
678 		}
679 		VOP_UNLOCK(LAYERVPTOLOWERVP(vp), flags);
680 		return (lockmgr(&vp->v_lock, flags | LK_RELEASE,
681 			&vp->v_interlock));
682 	}
683 }
684 
685 int
686 layer_islocked(v)
687 	void *v;
688 {
689 	struct vop_islocked_args /* {
690 		struct vnode *a_vp;
691 	} */ *ap = v;
692 	struct vnode *vp = ap->a_vp;
693 	int lkstatus;
694 
695 	if (vp->v_vnlock != NULL)
696 		return lockstatus(vp->v_vnlock);
697 
698 	lkstatus = VOP_ISLOCKED(LAYERVPTOLOWERVP(vp));
699 	if (lkstatus)
700 		return lkstatus;
701 
702 	return lockstatus(&vp->v_lock);
703 }
704 
705 /*
706  * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
707  * syncing the underlying vnodes, since they'll be fsync'ed when
708  * reclaimed; otherwise,
709  * pass it through to the underlying layer.
710  *
711  * XXX Do we still need to worry about shallow fsync?
712  */
713 
714 int
715 layer_fsync(v)
716 	void *v;
717 {
718 	struct vop_fsync_args /* {
719 		struct vnode *a_vp;
720 		kauth_cred_t a_cred;
721 		int  a_flags;
722 		off_t offlo;
723 		off_t offhi;
724 		struct lwp *a_l;
725 	} */ *ap = v;
726 
727 	if (ap->a_flags & FSYNC_RECLAIM) {
728 		return 0;
729 	}
730 
731 	return (LAYERFS_DO_BYPASS(ap->a_vp, ap));
732 }
733 
734 
735 int
736 layer_inactive(v)
737 	void *v;
738 {
739 	struct vop_inactive_args /* {
740 		struct vnode *a_vp;
741 		struct lwp *a_l;
742 	} */ *ap = v;
743 	struct vnode *vp = ap->a_vp;
744 
745 	/*
746 	 * Do nothing (and _don't_ bypass).
747 	 * Wait to vrele lowervp until reclaim,
748 	 * so that until then our layer_node is in the
749 	 * cache and reusable.
750 	 *
751 	 * NEEDSWORK: Someday, consider inactive'ing
752 	 * the lowervp and then trying to reactivate it
753 	 * with capabilities (v_id)
754 	 * like they do in the name lookup cache code.
755 	 * That's too much work for now.
756 	 */
757 	VOP_UNLOCK(vp, 0);
758 
759 	/*
760 	 * ..., but don't cache the device node. Also, if we did a
761 	 * remove, don't cache the node.
762 	 */
763 	if (vp->v_type == VBLK || vp->v_type == VCHR
764 	    || (VTOLAYER(vp)->layer_flags & LAYERFS_REMOVED))
765 		vgone(vp);
766 	return (0);
767 }
768 
769 int
770 layer_remove(v)
771 	void *v;
772 {
773 	struct vop_remove_args /* {
774 		struct vonde		*a_dvp;
775 		struct vnode		*a_vp;
776 		struct componentname	*a_cnp;
777 	} */ *ap = v;
778 
779 	int		error;
780 	struct vnode	*vp = ap->a_vp;
781 
782 	vref(vp);
783 	if ((error = LAYERFS_DO_BYPASS(vp, ap)) == 0)
784 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
785 
786 	vrele(vp);
787 
788 	return (error);
789 }
790 
791 int
792 layer_rename(v)
793 	void *v;
794 {
795 	struct vop_rename_args  /* {
796 		struct vnode		*a_fdvp;
797 		struct vnode		*a_fvp;
798 		struct componentname	*a_fcnp;
799 		struct vnode		*a_tdvp;
800 		struct vnode		*a_tvp;
801 		struct componentname	*a_tcnp;
802 	} */ *ap = v;
803 
804 	int error;
805 	struct vnode *fdvp = ap->a_fdvp;
806 	struct vnode *tvp;
807 
808 	tvp = ap->a_tvp;
809 	if (tvp) {
810 		if (tvp->v_mount != fdvp->v_mount)
811 			tvp = NULL;
812 		else
813 			vref(tvp);
814 	}
815 	error = LAYERFS_DO_BYPASS(fdvp, ap);
816 	if (tvp) {
817 		if (error == 0)
818 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
819 		vrele(tvp);
820 	}
821 
822 	return (error);
823 }
824 
825 int
826 layer_rmdir(v)
827 	void *v;
828 {
829 	struct vop_rmdir_args /* {
830 		struct vnode		*a_dvp;
831 		struct vnode		*a_vp;
832 		struct componentname	*a_cnp;
833 	} */ *ap = v;
834 	int		error;
835 	struct vnode	*vp = ap->a_vp;
836 
837 	vref(vp);
838 	if ((error = LAYERFS_DO_BYPASS(vp, ap)) == 0)
839 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
840 
841 	vrele(vp);
842 
843 	return (error);
844 }
845 
846 int
847 layer_reclaim(v)
848 	void *v;
849 {
850 	struct vop_reclaim_args /* {
851 		struct vnode *a_vp;
852 		struct lwp *a_l;
853 	} */ *ap = v;
854 	struct vnode *vp = ap->a_vp;
855 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount);
856 	struct layer_node *xp = VTOLAYER(vp);
857 	struct vnode *lowervp = xp->layer_lowervp;
858 
859 	/*
860 	 * Note: in vop_reclaim, the node's struct lock has been
861 	 * decomissioned, so we have to be careful about calling
862 	 * VOP's on ourself. Even if we turned a LK_DRAIN into an
863 	 * LK_EXCLUSIVE in layer_lock, we still must be careful as VXLOCK is
864 	 * set.
865 	 */
866 	/* After this assignment, this node will not be re-used. */
867 	if ((vp == lmp->layerm_rootvp)) {
868 		/*
869 		 * Oops! We no longer have a root node. Most likely reason is
870 		 * that someone forcably unmunted the underlying fs.
871 		 *
872 		 * Now getting the root vnode will fail. We're dead. :-(
873 		 */
874 		lmp->layerm_rootvp = NULL;
875 	}
876 	xp->layer_lowervp = NULL;
877 	mutex_enter(&lmp->layerm_hashlock);
878 	LIST_REMOVE(xp, layer_hash);
879 	mutex_exit(&lmp->layerm_hashlock);
880 	FREE(vp->v_data, M_TEMP);
881 	vp->v_data = NULL;
882 	vrele(lowervp);
883 	return (0);
884 }
885 
886 /*
887  * We just feed the returned vnode up to the caller - there's no need
888  * to build a layer node on top of the node on which we're going to do
889  * i/o. :-)
890  */
891 int
892 layer_bmap(v)
893 	void *v;
894 {
895 	struct vop_bmap_args /* {
896 		struct vnode *a_vp;
897 		daddr_t  a_bn;
898 		struct vnode **a_vpp;
899 		daddr_t *a_bnp;
900 		int *a_runp;
901 	} */ *ap = v;
902 	struct vnode *vp;
903 
904 	ap->a_vp = vp = LAYERVPTOLOWERVP(ap->a_vp);
905 
906 	return (VCALL(vp, ap->a_desc->vdesc_offset, ap));
907 }
908 
909 int
910 layer_print(v)
911 	void *v;
912 {
913 	struct vop_print_args /* {
914 		struct vnode *a_vp;
915 	} */ *ap = v;
916 	struct vnode *vp = ap->a_vp;
917 	printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp));
918 	return (0);
919 }
920 
921 /*
922  * XXX - vop_bwrite must be hand coded because it has no
923  * vnode in its arguments.
924  * This goes away with a merged VM/buffer cache.
925  */
926 int
927 layer_bwrite(v)
928 	void *v;
929 {
930 	struct vop_bwrite_args /* {
931 		struct buf *a_bp;
932 	} */ *ap = v;
933 	struct buf *bp = ap->a_bp;
934 	int error;
935 	struct vnode *savedvp;
936 
937 	savedvp = bp->b_vp;
938 	bp->b_vp = LAYERVPTOLOWERVP(bp->b_vp);
939 
940 	error = VOP_BWRITE(bp);
941 
942 	bp->b_vp = savedvp;
943 
944 	return (error);
945 }
946 
947 int
948 layer_getpages(v)
949 	void *v;
950 {
951 	struct vop_getpages_args /* {
952 		struct vnode *a_vp;
953 		voff_t a_offset;
954 		struct vm_page **a_m;
955 		int *a_count;
956 		int a_centeridx;
957 		vm_prot_t a_access_type;
958 		int a_advice;
959 		int a_flags;
960 	} */ *ap = v;
961 	struct vnode *vp = ap->a_vp;
962 	int error;
963 
964 	/*
965 	 * just pass the request on to the underlying layer.
966 	 */
967 
968 	if (ap->a_flags & PGO_LOCKED) {
969 		return EBUSY;
970 	}
971 	ap->a_vp = LAYERVPTOLOWERVP(vp);
972 	simple_unlock(&vp->v_interlock);
973 	simple_lock(&ap->a_vp->v_interlock);
974 	error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
975 	return error;
976 }
977 
978 int
979 layer_putpages(v)
980 	void *v;
981 {
982 	struct vop_putpages_args /* {
983 		struct vnode *a_vp;
984 		voff_t a_offlo;
985 		voff_t a_offhi;
986 		int a_flags;
987 	} */ *ap = v;
988 	struct vnode *vp = ap->a_vp;
989 	int error;
990 
991 	/*
992 	 * just pass the request on to the underlying layer.
993 	 */
994 
995 	ap->a_vp = LAYERVPTOLOWERVP(vp);
996 	simple_unlock(&vp->v_interlock);
997 	if (ap->a_flags & PGO_RECLAIM) {
998 		return 0;
999 	}
1000 	simple_lock(&ap->a_vp->v_interlock);
1001 	error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
1002 	return error;
1003 }
1004