xref: /csrg-svn/sys/miscfs/nullfs/null_vnops.c (revision 54951)
1 /*
2  * Copyright (c) 1992 The Regents of the University of California
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * John Heidemann of the UCLA Ficus project.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)null_vnops.c	1.7 (Berkeley) 07/11/92
11  *
12  * Ancestors:
13  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
14  *	$Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp jsp $
15  *	...and...
16  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
17  */
18 
19 /*
20  * Null Layer
21  *
22  * (See mount_null(8) for more information.)
23  *
24  * The null layer duplicates a portion of the file system
25  * name space under a new name.  In this respect, it is
26  * similar to the loopback file system.  It differs from
27  * the loopback fs in two respects:  it is implemented using
28  * a stackable layers techniques, and it's "null-node"s stack above
29  * all lower-layer vnodes, not just over directory vnodes.
30  *
31  * The null layer has two purposes.  First, it serves as a demonstration
32  * of layering by proving a layer which does nothing.  (It actually
33  * does everything the loopback file system does, which is slightly
34  * more than nothing.)  Second, the null layer can serve as a prototype
35  * layer.  Since it provides all necessary layer framework,
36  * new file system layers can be created very easily be starting
37  * with a null layer.
38  *
39  * The remainder of this man page examines the null layer as a basis
40  * for constructing new layers.
41  *
42  *
43  * INSTANTIATING NEW NULL LAYERS
44  *
45  * New null layers are created with mount_null(8).
46  * Mount_null(8) takes two arguments, the pathname
47  * of the lower vfs (target-pn) and the pathname where the null
48  * layer will appear in the namespace (alias-pn).  After
49  * the null layer is put into place, the contents
50  * of target-pn subtree will be aliased under alias-pn.
51  *
52  *
53  * OPERATION OF A NULL LAYER
54  *
55  * The null layer is the minimum file system layer,
56  * simply bypassing all possible operations to the lower layer
57  * for processing there.  The majority of its activity centers
58  * on the bypass routine, though which nearly all vnode operations
59  * pass.
60  *
61  * The bypass routine accepts arbitrary vnode operations for
62  * handling by the lower layer.  It begins by examing vnode
63  * operation arguments and replacing any null-nodes by their
64  * lower-layer equivlants.  It then invokes the operation
65  * on the lower layer.  Finally, it replaces the null-nodes
66  * in the arguments and, if a vnode is return by the operation,
67  * stacks a null-node on top of the returned vnode.
68  *
69  * Although bypass handles most operations,
70  * vop_getattr, _inactive, _reclaim, and _print are not bypassed.
71  * Vop_getattr must change the fsid being returned.
72  * Vop_inactive and vop_reclaim are not bypassed so that
73  * they can handle freeing null-layer specific data.
74  * Vop_print is not bypassed to avoid excessive debugging
75  * information.
76  *
77  *
78  * INSTANTIATING VNODE STACKS
79  *
80  * Mounting associates the null layer with a lower layer,
81  * effect stacking two VFSes.  Vnode stacks are instead
82  * created on demand as files are accessed.
83  *
84  * The initial mount creates a single vnode stack for the
85  * root of the new null layer.  All other vnode stacks
86  * are created as a result of vnode operations on
87  * this or other null vnode stacks.
88  *
89  * New vnode stacks come into existance as a result of
90  * an operation which returns a vnode.
91  * The bypass routine stacks a null-node above the new
92  * vnode before returning it to the caller.
93  *
94  * For example, imagine mounting a null layer with
95  * "mount_null /usr/include /dev/layer/null".
96  * Chainging directory to /dev/layer/null will assign
97  * the root null-node (which was created when the null layer was mounted).
98  * Now consider opening "sys".  A vop_lookup would be
99  * done on the root null-node.  This operation would bypass through
100  * to the lower layer which would return a vnode representing
101  * the UFS "sys".  Null_bypass then builds a null-node
102  * aliasing the UFS "sys" and returns this to the caller.
103  * Later operations on the null-node "sys" will repeat this
104  * process when constructing other vnode stacks.
105  *
106  *
107  * CREATING OTHER FILE SYSTEM LAYERS
108  *
109  * One of the easiest ways to construct new file system layers is to make
110  * a copy of the null layer, rename all files and variables, and
111  * then begin modifing the copy.  Sed can be used to easily rename
112  * all variables.
113  *
114  * The umap layer is an example of a layer descended from the
115  * null layer.
116  *
117  *
118  * INVOKING OPERATIONS ON LOWER LAYERS
119  *
120  * There are two techniques to invoke operations on a lower layer
121  * when the operation cannot be completely bypassed.  Each method
122  * is appropriate in different situations.  In both cases,
123  * it is the responsibility of the aliasing layer to make
124  * the operation arguments "correct" for the lower layer
125  * by mapping an vnode arguments to the lower layer.
126  *
127  * The first approach is to call the aliasing layer's bypass routine.
128  * This method is most suitable when you wish to invoke the operation
129  * currently being hanldled on the lower layer.  It has the advantage
130  * the the bypass routine already must do argument mapping.
131  * An example of this is null_getattrs in the null layer.
132  *
133  * A second approach is to directly invoked vnode operations on
134  * the lower layer with the VOP_OPERATIONNAME interface.
135  * The advantage of this method is that it is easy to invoke
136  * arbitrary operations on the lower layer.  The disadvantage
137  * is that vnodes arguments must be manualy mapped.
138  *
139  */
140 
141 #include <sys/param.h>
142 #include <sys/systm.h>
143 #include <sys/proc.h>
144 #include <sys/time.h>
145 #include <sys/types.h>
146 #include <sys/vnode.h>
147 #include <sys/mount.h>
148 #include <sys/namei.h>
149 #include <sys/malloc.h>
150 #include <sys/buf.h>
151 #include <nullfs/null.h>
152 
153 
154 int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
155 
156 /*
157  * This is the 10-Apr-92 bypass routine.
158  *    This version has been optimized for speed, throwing away some
159  * safety checks.  It should still always work, but it's not as
160  * robust to programmer errors.
161  *    Define SAFETY to include some error checking code.
162  *
163  * In general, we map all vnodes going down and unmap them on the way back.
164  * As an exception to this, vnodes can be marked "unmapped" by setting
165  * the Nth bit in operation's vdesc_flags.
166  *
167  * Also, some BSD vnode operations have the side effect of vrele'ing
168  * their arguments.  With stacking, the reference counts are held
169  * by the upper node, not the lower one, so we must handle these
170  * side-effects here.  This is not of concern in Sun-derived systems
171  * since there are no such side-effects.
172  *
173  * This makes the following assumptions:
174  * - only one returned vpp
175  * - no INOUT vpp's (Sun's vop_open has one of these)
176  * - the vnode operation vector of the first vnode should be used
177  *   to determine what implementation of the op should be invoked
178  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
179  *   problems on rmdir'ing mount points and renaming?)
180  */
181 int
182 null_bypass(ap)
183 	struct vop_generic_args *ap;
184 {
185 	extern int (**null_vnodeop_p)();  /* not extern, really "forward" */
186 	register struct vnode **this_vp_p;
187 	int error;
188 	struct vnode *old_vps[VDESC_MAX_VPS];
189 	struct vnode **vps_p[VDESC_MAX_VPS];
190 	struct vnode ***vppp;
191 	struct vnodeop_desc *descp = ap->a_desc;
192 	int reles, i;
193 
194 	if (null_bug_bypass)
195 		printf ("null_bypass: %s\n", descp->vdesc_name);
196 
197 #ifdef SAFETY
198 	/*
199 	 * We require at least one vp.
200 	 */
201 	if (descp->vdesc_vp_offsets == NULL ||
202 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
203 		panic ("null_bypass: no vp's in map.\n");
204 #endif
205 
206 	/*
207 	 * Map the vnodes going in.
208 	 * Later, we'll invoke the operation based on
209 	 * the first mapped vnode's operation vector.
210 	 */
211 	reles = descp->vdesc_flags;
212 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
213 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
214 			break;   /* bail out at end of list */
215 		vps_p[i] = this_vp_p =
216 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
217 		/*
218 		 * We're not guaranteed that any but the first vnode
219 		 * are of our type.  Check for and don't map any
220 		 * that aren't.  (We must always map first vp or vclean fails.)
221 		 */
222 		if (i && (*this_vp_p)->v_op != null_vnodeop_p) {
223 			old_vps[i] = NULL;
224 		} else {
225 			old_vps[i] = *this_vp_p;
226 			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
227 			/*
228 			 * XXX - Several operations have the side effect
229 			 * of vrele'ing their vp's.  We must account for
230 			 * that.  (This should go away in the future.)
231 			 */
232 			if (reles & 1)
233 				VREF(*this_vp_p);
234 		}
235 
236 	}
237 
238 	/*
239 	 * Call the operation on the lower layer
240 	 * with the modified argument structure.
241 	 */
242 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
243 
244 	/*
245 	 * Maintain the illusion of call-by-value
246 	 * by restoring vnodes in the argument structure
247 	 * to their original value.
248 	 */
249 	reles = descp->vdesc_flags;
250 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
251 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
252 			break;   /* bail out at end of list */
253 		if (old_vps[i]) {
254 			*(vps_p[i]) = old_vps[i];
255 			if (reles & 1)
256 				vrele(*(vps_p[i]));
257 		}
258 	}
259 
260 	/*
261 	 * Map the possible out-going vpp
262 	 * (Assumes that the lower layer always returns
263 	 * a VREF'ed vpp unless it gets an error.)
264 	 */
265 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
266 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
267 	    !error) {
268 		/*
269 		 * XXX - even though some ops have vpp returned vp's,
270 		 * several ops actually vrele this before returning.
271 		 * We must avoid these ops.
272 		 * (This should go away when these ops are regularized.)
273 		 */
274 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
275 			goto out;
276 		vppp = VOPARG_OFFSETTO(struct vnode***,
277 				 descp->vdesc_vpp_offset,ap);
278 		error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp);
279 	}
280 
281  out:
282 	return (error);
283 }
284 
285 
286 /*
287  *  We handle getattr only to change the fsid.
288  */
289 int
290 null_getattr(ap)
291 	struct vop_getattr_args *ap;
292 {
293 	int error;
294 	if (error = null_bypass(ap))
295 		return error;
296 	/* Requires that arguments be restored. */
297 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
298 	return 0;
299 }
300 
301 
302 int
303 null_inactive (ap)
304 	struct vop_inactive_args *ap;
305 {
306 	/*
307 	 * Do nothing (and _don't_ bypass).
308 	 * Wait to vrele lowervp until reclaim,
309 	 * so that until then our null_node is in the
310 	 * cache and reusable.
311 	 *
312 	 * NEEDSWORK: Someday, consider inactive'ing
313 	 * the lowervp and then trying to reactivate it
314 	 * with capabilities (v_id)
315 	 * like they do in the name lookup cache code.
316 	 * That's too much work for now.
317 	 */
318 	return 0;
319 }
320 
321 int
322 null_reclaim (ap)
323 	struct vop_reclaim_args *ap;
324 {
325 	struct vnode *vp = ap->a_vp;
326 	struct null_node *xp = VTONULL(vp);
327 	struct vnode *lowervp = xp->null_lowervp;
328 
329 	/*
330 	 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
331 	 * so we can't call VOPs on ourself.
332 	 */
333 	/* After this assignment, this node will not be re-used. */
334 	xp->null_lowervp = NULL;
335 	remque(xp);
336 	FREE(vp->v_data, M_TEMP);
337 	vp->v_data = NULL;
338 	vrele (lowervp);
339 	return 0;
340 }
341 
342 
343 int
344 null_print (ap)
345 	struct vop_print_args *ap;
346 {
347 	register struct vnode *vp = ap->a_vp;
348 	printf ("\ttag VT_NULLFS, vp=%x, lowervp=%x\n", vp, NULLVPTOLOWERVP(vp));
349 	return 0;
350 }
351 
352 
353 /*
354  * XXX - vop_strategy must be hand coded because it has no
355  * vnode in its arguments.
356  * This goes away with a merged VM/buffer cache.
357  */
358 int
359 null_strategy (ap)
360 	struct vop_strategy_args *ap;
361 {
362 	struct buf *bp = ap->a_bp;
363 	int error;
364 	struct vnode *savedvp;
365 
366 	savedvp = bp->b_vp;
367 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
368 
369 	error = VOP_STRATEGY(bp);
370 
371 	bp->b_vp = savedvp;
372 
373 	return error;
374 }
375 
376 
377 /*
378  * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
379  * vnode in its arguments.
380  * This goes away with a merged VM/buffer cache.
381  */
382 int
383 null_bwrite (ap)
384 	struct vop_bwrite_args *ap;
385 {
386 	struct buf *bp = ap->a_bp;
387 	int error;
388 	struct vnode *savedvp;
389 
390 	savedvp = bp->b_vp;
391 	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
392 
393 	error = VOP_BWRITE(bp);
394 
395 	bp->b_vp = savedvp;
396 
397 	return error;
398 }
399 
400 
401 /*
402  * Global vfs data structures
403  */
404 int (**null_vnodeop_p)();
405 struct vnodeopv_entry_desc null_vnodeop_entries[] = {
406 	{ &vop_default_desc, null_bypass },
407 
408 	{ &vop_getattr_desc, null_getattr },
409 	{ &vop_inactive_desc, null_inactive },
410 	{ &vop_reclaim_desc, null_reclaim },
411 	{ &vop_print_desc, null_print },
412 
413 	{ &vop_strategy_desc, null_strategy },
414 	{ &vop_bwrite_desc, null_bwrite },
415 
416 	{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
417 };
418 struct vnodeopv_desc null_vnodeop_opv_desc =
419 	{ &null_vnodeop_p, null_vnodeop_entries };
420