xref: /csrg-svn/sys/miscfs/umapfs/umap_vnops.c (revision 54948)
1*54948Sheideman /*
2*54948Sheideman  * Copyright (c) 1992 The Regents of the University of California
3*54948Sheideman  * All rights reserved.
4*54948Sheideman  *
5*54948Sheideman  * This code is derived from the null layer of
6*54948Sheideman  * John Heidemann from the UCLA Ficus project and
7*54948Sheideman  * Jan-Simon Pendry's loopback file system.
8*54948Sheideman  *
9*54948Sheideman  * %sccs.include.redist.c%
10*54948Sheideman  *
11*54948Sheideman  *	@(#)umap_vnops.c	1.1 (Berkeley) 07/11/92
12*54948Sheideman  *
13*54948Sheideman  * Ancestors:
14*54948Sheideman  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
15*54948Sheideman  *	$Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp jsp $
16*54948Sheideman  *	...and...
17*54948Sheideman  *	@(#)umap_vnodeops.c 1.20 92/07/07 UCLA Ficus project
18*54948Sheideman  */
19*54948Sheideman 
20*54948Sheideman /*
21*54948Sheideman  * Umap Layer
22*54948Sheideman  *
23*54948Sheideman  * The umap layer duplicates a portion of the file system
24*54948Sheideman  * name space under a new name.  In this respect, it is
25*54948Sheideman  * similar to the loopback file system.  It differs from
26*54948Sheideman  * the loopback fs in two respects:  it is implemented using
27*54948Sheideman  * a bypass operation, and it's "umap-node"s stack above
28*54948Sheideman  * all lower-layer vnodes, not just over directory vnodes.
29*54948Sheideman  *
30*54948Sheideman  * The umap layer is the minimum file system layer,
31*54948Sheideman  * simply bypassing all possible operations to the lower layer
32*54948Sheideman  * for processing there.  All but vop_getattr, _inactive, _reclaim,
33*54948Sheideman  * and _print are bypassed.
34*54948Sheideman  *
35*54948Sheideman  * Vop_getattr is not bypassed so that we can change the fsid being
36*54948Sheideman  * returned.  Vop_{inactive,reclaim} are bypassed so that
37*54948Sheideman  * they can handle freeing umap-layer specific data.
38*54948Sheideman  * Vop_print is not bypassed for debugging.
39*54948Sheideman  *
40*54948Sheideman  *
41*54948Sheideman  * INVOKING OPERATIONS ON LOWER LAYERS
42*54948Sheideman  *
43*54948Sheideman  * NEEDSWORK: Describe methods to invoke operations on the lower layer
44*54948Sheideman  * (bypass vs. VOP).
45*54948Sheideman  *
46*54948Sheideman  *
47*54948Sheideman  * CREATING NEW FILESYSTEM LAYERS
48*54948Sheideman  *
49*54948Sheideman  * One of the easiest ways to construct new file system layers is to make
50*54948Sheideman  * a copy of the umap layer, rename all files and variables, and
51*54948Sheideman  * then begin modifing the copy.  Sed can be used to easily rename
52*54948Sheideman  * all variables.
53*54948Sheideman  *
54*54948Sheideman  */
55*54948Sheideman 
56*54948Sheideman #include <sys/param.h>
57*54948Sheideman #include <sys/systm.h>
58*54948Sheideman #include <sys/user.h>
59*54948Sheideman #include <sys/proc.h>
60*54948Sheideman #include <sys/time.h>
61*54948Sheideman #include <sys/types.h>
62*54948Sheideman #include <sys/vnode.h>
63*54948Sheideman #include <sys/mount.h>
64*54948Sheideman #include <sys/namei.h>
65*54948Sheideman #include <sys/malloc.h>
66*54948Sheideman #include <sys/buf.h>
67*54948Sheideman #include <umapfs/umap.h>
68*54948Sheideman 
69*54948Sheideman 
70*54948Sheideman int umap_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
71*54948Sheideman 
72*54948Sheideman /*
73*54948Sheideman  * This is the 10-Apr-92 bypass routine.
74*54948Sheideman  *    This version has been optimized for speed, throwing away some
75*54948Sheideman  * safety checks.  It should still always work, but it's not as
76*54948Sheideman  * robust to programmer errors.
77*54948Sheideman  *    Define SAFETY to include some error checking code.
78*54948Sheideman  *
79*54948Sheideman  * In general, we map all vnodes going down and unmap them on the way back.
80*54948Sheideman  * As an exception to this, vnodes can be marked "unmapped" by setting
81*54948Sheideman  * the Nth bit in operation's vdesc_flags.
82*54948Sheideman  *
83*54948Sheideman  * Also, some BSD vnode operations have the side effect of vrele'ing
84*54948Sheideman  * their arguments.  With stacking, the reference counts are held
85*54948Sheideman  * by the upper node, not the lower one, so we must handle these
86*54948Sheideman  * side-effects here.  This is not of concern in Sun-derived systems
87*54948Sheideman  * since there are no such side-effects.
88*54948Sheideman  *
89*54948Sheideman  * This makes the following assumptions:
90*54948Sheideman  * - only one returned vpp
91*54948Sheideman  * - no INOUT vpp's (Sun's vop_open has one of these)
92*54948Sheideman  * - the vnode operation vector of the first vnode should be used
93*54948Sheideman  *   to determine what implementation of the op should be invoked
94*54948Sheideman  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
95*54948Sheideman  *   problems on rmdir'ing mount points and renaming?)
96*54948Sheideman  */
97*54948Sheideman int
98*54948Sheideman umap_bypass(ap)
99*54948Sheideman 	struct vop_generic_args *ap;
100*54948Sheideman {
101*54948Sheideman 	extern int (**umap_vnodeop_p)();  /* not extern, really "forward" */
102*54948Sheideman 	int *mapdata, nentries ;
103*54948Sheideman 	int *gmapdata, gnentries ;
104*54948Sheideman 	struct ucred **credpp,*credp, *savecredp, *saveucredp ;
105*54948Sheideman 	register struct vnode **this_vp_p;
106*54948Sheideman 	int error;
107*54948Sheideman 	struct vnode *old_vps[VDESC_MAX_VPS];
108*54948Sheideman 	struct vnode **vps_p[VDESC_MAX_VPS];
109*54948Sheideman 	struct vnode ***vppp;
110*54948Sheideman 	struct vnodeop_desc *descp = ap->a_desc;
111*54948Sheideman 	int reles, i;
112*54948Sheideman 
113*54948Sheideman 	if (umap_bug_bypass)
114*54948Sheideman 		printf ("umap_bypass: %s\n", descp->vdesc_name);
115*54948Sheideman 
116*54948Sheideman #ifdef SAFETY
117*54948Sheideman 	/*
118*54948Sheideman 	 * We require at least one vp.
119*54948Sheideman 	 */
120*54948Sheideman 	if (descp->vdesc_vp_offsets==UMAP ||
121*54948Sheideman 	    descp->vdesc_vp_offsets[0]==VDESC_NO_OFFSET)
122*54948Sheideman 		panic ("umap_bypass: no vp's in map.\n");
123*54948Sheideman #endif
124*54948Sheideman 
125*54948Sheideman 	/*
126*54948Sheideman 	 * Map the vnodes going in.
127*54948Sheideman 	 * Later, we'll invoke the operation based on
128*54948Sheideman 	 * the first mapped vnode's operation vector.
129*54948Sheideman 	 */
130*54948Sheideman 	reles = descp->vdesc_flags;
131*54948Sheideman 	for (i=0; i<VDESC_MAX_VPS; reles>>=1, i++) {
132*54948Sheideman 		if (descp->vdesc_vp_offsets[i]==VDESC_NO_OFFSET)
133*54948Sheideman 			break;   /* bail out at end of list */
134*54948Sheideman 		vps_p[i] = this_vp_p =
135*54948Sheideman 			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
136*54948Sheideman 
137*54948Sheideman 		if (i == 0)
138*54948Sheideman 		{
139*54948Sheideman 			vp1 = *vps_p[0];
140*54948Sheideman 		}
141*54948Sheideman 
142*54948Sheideman 		/*
143*54948Sheideman 		 * We're not guaranteed that any but the first vnode
144*54948Sheideman 		 * are of our type.  Check for and don't map any
145*54948Sheideman 		 * that aren't.
146*54948Sheideman 		 */
147*54948Sheideman 
148*54948Sheideman 		if ((*this_vp_p)->v_op != umap_vnodeop_p) {
149*54948Sheideman 			old_vps[i] = UMAP;
150*54948Sheideman 		} else {
151*54948Sheideman 			old_vps[i] = *this_vp_p;
152*54948Sheideman 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
153*54948Sheideman 			if (reles & 1)
154*54948Sheideman 				VREF(*this_vp_p);
155*54948Sheideman 		};
156*54948Sheideman 
157*54948Sheideman 	};
158*54948Sheideman 
159*54948Sheideman 	/* Doctor the credentials.  (That's the purpose of this layer.) */
160*54948Sheideman 
161*54948Sheideman 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
162*54948Sheideman 
163*54948Sheideman 		credpp = VOPARG_OFFSETTO(struct ucred**,
164*54948Sheideman 		    descp->vdesc_cred_offset, ap);
165*54948Sheideman 
166*54948Sheideman 		/* Save old values */
167*54948Sheideman 
168*54948Sheideman 		savecredp = (*credpp);
169*54948Sheideman 		saveucredp = u.u_cred;
170*54948Sheideman 		(*credpp) = u.u_cred = crdup(savecredp);
171*54948Sheideman 		credp = *credpp;
172*54948Sheideman 
173*54948Sheideman 		if (umap_bug_bypass && credp->cr_uid != 0 )
174*54948Sheideman 			printf("umap_bypass: user was %d, group %d\n",
175*54948Sheideman 			    credp->cr_uid,credp->cr_gid);
176*54948Sheideman 
177*54948Sheideman 		nentries =  MOUNTTOUMAPMOUNT(vp1->v_vfsp)->info_nentries;
178*54948Sheideman 		mapdata =  &(MOUNTTOUMAPMOUNT(vp1->v_vfsp)->info_mapdata[0][0]);
179*54948Sheideman 		gnentries =  MOUNTTOUMAPMOUNT(vp1->v_vfsp)->info_gnentries;
180*54948Sheideman 		gmapdata =  &(MOUNTTOUMAPMOUNT(vp1->v_vfsp)->info_gmapdata[0][0]);
181*54948Sheideman 
182*54948Sheideman 		if (umap_bug_bypass && credp->cr_uid != 0 )
183*54948Sheideman 			printf("nentries = %d, gnentries = %d\n", nentries,
184*54948Sheideman 			    gnentries);
185*54948Sheideman 
186*54948Sheideman 		/* Map all ids in the credential structure. */
187*54948Sheideman 
188*54948Sheideman 		umap_mapids(credp,mapdata,nentries,gmapdata,gnentries);
189*54948Sheideman 
190*54948Sheideman 		if (umap_bug_bypass && credp->cr_uid != 0 )
191*54948Sheideman 			printf("umap_bypass: user now %d, group %d\n",
192*54948Sheideman 			    credp->cr_uid,credp->cr_gid);
193*54948Sheideman 	}
194*54948Sheideman 
195*54948Sheideman 	/*
196*54948Sheideman 	 * Call the operation on the lower layer
197*54948Sheideman 	 * with the modified argument structure.
198*54948Sheideman 	 */
199*54948Sheideman 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
200*54948Sheideman 
201*54948Sheideman 	/*
202*54948Sheideman 	 * Maintain the illusion of call-by-value
203*54948Sheideman 	 * by restoring vnodes in the argument structure
204*54948Sheideman 	 * to their original value.
205*54948Sheideman 	 */
206*54948Sheideman 	reles = descp->vdesc_flags;
207*54948Sheideman 	for (i=0; i<VDESC_MAX_VPS; reles>>=1, i++) {
208*54948Sheideman 		if (descp->vdesc_vp_offsets[i]==VDESC_NO_OFFSET)
209*54948Sheideman 			break;   /* bail out at end of list */
210*54948Sheideman 		if (old_vps[i]) {
211*54948Sheideman 			*(vps_p[i]) = old_vps[i];
212*54948Sheideman 			if (reles & 1)
213*54948Sheideman 				vrele(*(vps_p[i]));
214*54948Sheideman 		};
215*54948Sheideman 	};
216*54948Sheideman 
217*54948Sheideman 	/*
218*54948Sheideman 	 * Map the possible out-going vpp
219*54948Sheideman 	 * (Assumes that the lower layer always returns
220*54948Sheideman 	 * a VREF'ed vpp unless it gets an error.)
221*54948Sheideman 	 */
222*54948Sheideman 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
223*54948Sheideman 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
224*54948Sheideman 	    !error) {
225*54948Sheideman 		/*
226*54948Sheideman 		 * XXX - even though symlink has a vpp arg,
227*54948Sheideman 		 * it doesn't return a VREF'ed vpp in that
228*54948Sheideman 		 * field.  The vpp arg should become a vp
229*54948Sheideman 		 * arg.
230*54948Sheideman 		 */
231*54948Sheideman 		if (descp == VDESC(vop_symlink)) {
232*54948Sheideman #ifdef UMAPFS_DIAGNOSTIC
233*54948Sheideman 			printf("umap_bypass (symlink), lowervp->usecount = %d\n", (**vppp)->v_usecount);
234*54948Sheideman #endif
235*54948Sheideman 			return (error);
236*54948Sheideman 		};
237*54948Sheideman 		vppp=VOPARG_OFFSETTO(struct vnode***,
238*54948Sheideman 				 descp->vdesc_vpp_offset,ap);
239*54948Sheideman 		error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp);
240*54948Sheideman 	};
241*54948Sheideman 
242*54948Sheideman 	/*
243*54948Sheideman 	 * Free duplicate cred structure and restore old one.
244*54948Sheideman 	 */
245*54948Sheideman 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
246*54948Sheideman 		if (umap_bug_bypass && credp->cr_uid != 0 )
247*54948Sheideman 		printf("umap_bypass: returning-user was %d\n", credp->cr_uid);
248*54948Sheideman 
249*54948Sheideman 		crfree(credp);
250*54948Sheideman 		(*credpp) = savecredp;
251*54948Sheideman 		u.u_cred = saveucredp;
252*54948Sheideman 		if (umap_bug_bypass && (*credpp)->cr_uid != 0 )
253*54948Sheideman 		 	printf("umap_bypass: returning-user now %d\n\n",
254*54948Sheideman 			    (*credpp)->cr_uid);
255*54948Sheideman 	}
256*54948Sheideman 
257*54948Sheideman 	return (error);
258*54948Sheideman }
259*54948Sheideman 
260*54948Sheideman 
261*54948Sheideman /*
262*54948Sheideman  *  We handle getattr to change the fsid.
263*54948Sheideman  */
264*54948Sheideman int
265*54948Sheideman umap_getattr(ap)
266*54948Sheideman 	struct vop_getattr_args *ap;
267*54948Sheideman {
268*54948Sheideman 	short uid, gid;
269*54948Sheideman 	int error, tmpid, *mapdata, nentries, *gmapdata, gnentries;
270*54948Sheideman 
271*54948Sheideman 	if (error=umap_bypass(ap))
272*54948Sheideman 		return error;
273*54948Sheideman 	/* Requires that arguments be restored. */
274*54948Sheideman 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
275*54948Sheideman 
276*54948Sheideman 	/* umap needs to map the uid and gid returned by a stat
277*54948Sheideman 		into the proper values for this site.  This involves
278*54948Sheideman 		finding the returned uid in the mapping information,
279*54948Sheideman 		translating it into the uid on the other end,
280*54948Sheideman 		and filling in the proper field in the vattr
281*54948Sheideman 		structure pointed to by ap->a_vap.  The group
282*54948Sheideman 		is easier, since currently all groups will be
283*54948Sheideman 		translate to the NULLGROUP. */
284*54948Sheideman 
285*54948Sheideman 	/* Find entry in map */
286*54948Sheideman 
287*54948Sheideman 	uid = ap->a_vap->va_uid;
288*54948Sheideman 	gid = ap->a_vap->va_gid;
289*54948Sheideman 	if (umap_bug_bypass)
290*54948Sheideman 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n",uid,
291*54948Sheideman 		    gid);
292*54948Sheideman 
293*54948Sheideman 	vp1p = VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[0],ap);
294*54948Sheideman 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_vfsp)->info_nentries;
295*54948Sheideman 	mapdata =  &(MOUNTTOUMAPMOUNT((*vp1p)->v_vfsp)->info_mapdata[0][0]);
296*54948Sheideman 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_vfsp)->info_gnentries;
297*54948Sheideman 	gmapdata =  &(MOUNTTOUMAPMOUNT((*vp1p)->v_vfsp)->info_gmapdata[0][0]);
298*54948Sheideman 
299*54948Sheideman 	/* Reverse map the uid for the vnode.  Since it's a reverse
300*54948Sheideman 		map, we can't use umap_mapids() to do it. */
301*54948Sheideman 
302*54948Sheideman 	tmpid = umap_reverse_findid(uid,mapdata,nentries);
303*54948Sheideman 
304*54948Sheideman 	if (tmpid != -1 ) {
305*54948Sheideman 
306*54948Sheideman 		ap->a_vap->va_uid = (uid_t)tmpid;
307*54948Sheideman 		if (umap_bug_bypass)
308*54948Sheideman 			printf("umap_getattr: original uid = %d\n",uid);
309*54948Sheideman 	} else
310*54948Sheideman 		ap->a_vap->va_uid = (uid_t)NOBODY;
311*54948Sheideman 
312*54948Sheideman 	/* Reverse map the gid for the vnode. */
313*54948Sheideman 
314*54948Sheideman 	tmpid = umap_reverse_findid(gid,gmapdata,gnentries);
315*54948Sheideman 
316*54948Sheideman 	if (tmpid != -1) {
317*54948Sheideman 
318*54948Sheideman 		ap->a_vap->va_gid = (gid_t)tmpid;
319*54948Sheideman 		if (umap_bug_bypass)
320*54948Sheideman 			printf("umap_getattr: original gid = %d\n",gid);
321*54948Sheideman 	} else
322*54948Sheideman 		ap->a_vap->va_gid = (gid_t)NULLGROUP;
323*54948Sheideman 
324*54948Sheideman 	return 0;
325*54948Sheideman }
326*54948Sheideman 
327*54948Sheideman int
328*54948Sheideman umap_inactive (ap)
329*54948Sheideman 	struct vop_inactive_args *ap;
330*54948Sheideman {
331*54948Sheideman #ifdef UMAPFS_DIAGNOSTIC
332*54948Sheideman 	printf("umap_inactive(ap->a_vp = %x->%x)\n", ap->a_vp, UMAPVPTOLOWERVP(ap->a_vp));
333*54948Sheideman #endif
334*54948Sheideman 	/*
335*54948Sheideman 	 * Do nothing (and _don't_ bypass).
336*54948Sheideman 	 * Wait to vrele lowervp until reclaim,
337*54948Sheideman 	 * so that until then our umap_node is in the
338*54948Sheideman 	 * cache and reusable.
339*54948Sheideman 	 *
340*54948Sheideman 	 * NEEDSWORK: Someday, consider inactive'ing
341*54948Sheideman 	 * the lowervp and then trying to reactivate it
342*54948Sheideman 	 * like they do in the name lookup cache code.
343*54948Sheideman 	 * That's too much work for now.
344*54948Sheideman 	 */
345*54948Sheideman 	return 0;
346*54948Sheideman }
347*54948Sheideman 
348*54948Sheideman int
349*54948Sheideman umap_reclaim (ap)
350*54948Sheideman 	struct vop_reclaim_args *ap;
351*54948Sheideman {
352*54948Sheideman 	struct vnode *targetvp;
353*54948Sheideman 
354*54948Sheideman 	/*
355*54948Sheideman 	 * Note: at this point, ap->a_vp->v_op == dead_vnodeop_p.
356*54948Sheideman 	 */
357*54948Sheideman #ifdef UMAPFS_DIAGNOSTIC
358*54948Sheideman 	printf("umap_reclaim(ap->a_vp = %x->%x)\n", ap->a_vp, UMAPVPTOLOWERVP(ap->a_vp));
359*54948Sheideman #endif
360*54948Sheideman 	remque(VTOUMAP(ap->a_vp));	     /* NEEDSWORK: What? */
361*54948Sheideman 	vrele (UMAPVPTOLOWERVP(ap->a_vp));   /* release lower layer */
362*54948Sheideman 	FREE(ap->a_vp->v_data, M_TEMP);
363*54948Sheideman 	ap->a_vp->v_data = 0;
364*54948Sheideman 	return (0);
365*54948Sheideman }
366*54948Sheideman 
367*54948Sheideman int
368*54948Sheideman umap_bmap (ap)
369*54948Sheideman 	struct vop_bmap_args *ap;
370*54948Sheideman {
371*54948Sheideman #ifdef UMAPFS_DIAGNOSTIC
372*54948Sheideman 	printf("umap_bmap(ap->a_vp = %x->%x)\n", ap->a_vp, UMAPVPTOLOWERVP(ap->a_vp));
373*54948Sheideman #endif
374*54948Sheideman 
375*54948Sheideman 	return VOP_BMAP(UMAPVPTOLOWERVP(ap->a_vp), ap->a_bn, ap->a_vpp, ap->a_bnp);
376*54948Sheideman }
377*54948Sheideman 
378*54948Sheideman int
379*54948Sheideman umap_strategy (ap)
380*54948Sheideman 	struct vop_strategy_args *ap;
381*54948Sheideman {
382*54948Sheideman 	int error;
383*54948Sheideman 	struct vnode *savedvp;
384*54948Sheideman 
385*54948Sheideman #ifdef UMAPFS_DIAGNOSTIC
386*54948Sheideman 	printf("umap_strategy(vp = %x->%x)\n", ap->a_bp->b_vp, UMAPVPTOLOWERVP(ap->a_bp->b_vp));
387*54948Sheideman #endif
388*54948Sheideman 
389*54948Sheideman 	savedvp = ap->a_bp->b_vp;
390*54948Sheideman 
391*54948Sheideman 	error = VOP_STRATEGY(ap->a_bp);
392*54948Sheideman 
393*54948Sheideman 	ap->a_bp->b_vp = savedvp;
394*54948Sheideman 
395*54948Sheideman 	return error;
396*54948Sheideman }
397*54948Sheideman 
398*54948Sheideman 
399*54948Sheideman int
400*54948Sheideman umap_print (ap)
401*54948Sheideman 	struct vop_print_args *ap;
402*54948Sheideman {
403*54948Sheideman 	register struct vnode *vp = ap->a_vp;
404*54948Sheideman 	printf ("\ttag VT_UMAPFS, vp=%x, lowervp=%x\n", vp, UMAPVPTOLOWERVP(vp));
405*54948Sheideman 	return 0;
406*54948Sheideman }
407*54948Sheideman 
408*54948Sheideman 
409*54948Sheideman /*
410*54948Sheideman  * Global vfs data structures
411*54948Sheideman  */
412*54948Sheideman /*
413*54948Sheideman  * NEEDSWORK: strategy,bmap are hand coded currently.  They should
414*54948Sheideman  * go away with a merged buffer/block cache.
415*54948Sheideman  *
416*54948Sheideman  */
417*54948Sheideman int (**umap_vnodeop_p)();
418*54948Sheideman struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
419*54948Sheideman 	{ &vop_default_desc, umap_bypass },
420*54948Sheideman 
421*54948Sheideman 	{ &vop_getattr_desc, umap_getattr },
422*54948Sheideman 	{ &vop_inactive_desc, umap_inactive },
423*54948Sheideman 	{ &vop_reclaim_desc, umap_reclaim },
424*54948Sheideman 	{ &vop_print_desc, umap_print },
425*54948Sheideman 
426*54948Sheideman 	{ &vop_bmap_desc, umap_bmap },
427*54948Sheideman 	{ &vop_strategy_desc, umap_strategy },
428*54948Sheideman 
429*54948Sheideman 	{ (struct vnodeop_desc*)UMAP, (int(*)())UMAP }
430*54948Sheideman };
431*54948Sheideman struct vnodeopv_desc umap_vnodeop_opv_desc =
432*54948Sheideman 	{ &umap_vnodeop_p, umap_vnodeop_entries };
433