xref: /netbsd-src/sys/miscfs/umapfs/umap_vnops.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: umap_vnops.c,v 1.32 2004/06/30 17:42:55 hannken Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software donated to Berkeley by
8  * the UCLA Ficus project.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)umap_vnops.c	8.6 (Berkeley) 5/22/95
35  */
36 
37 /*
38  * Umap Layer
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: umap_vnops.c,v 1.32 2004/06/30 17:42:55 hannken Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/time.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <miscfs/umapfs/umap.h>
53 #include <miscfs/genfs/genfs.h>
54 #include <miscfs/genfs/layer_extern.h>
55 
56 int	umap_lookup	__P((void *));
57 int	umap_getattr	__P((void *));
58 int	umap_print	__P((void *));
59 int	umap_rename	__P((void *));
60 
61 /*
62  * Global vfs data structures
63  */
64 /*
65  * XXX - strategy, bwrite are hand coded currently.  They should
66  * go away with a merged buffer/block cache.
67  *
68  */
69 int (**umap_vnodeop_p) __P((void *));
70 const struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
71 	{ &vop_default_desc,	umap_bypass },
72 
73 	{ &vop_lookup_desc,	umap_lookup },
74 	{ &vop_getattr_desc,	umap_getattr },
75 	{ &vop_print_desc,	umap_print },
76 	{ &vop_rename_desc,	umap_rename },
77 
78 	{ &vop_lock_desc,	layer_lock },
79 	{ &vop_unlock_desc,	layer_unlock },
80 	{ &vop_islocked_desc,	layer_islocked },
81 	{ &vop_fsync_desc,	layer_fsync },
82 	{ &vop_inactive_desc,	layer_inactive },
83 	{ &vop_reclaim_desc,	layer_reclaim },
84 	{ &vop_open_desc,	layer_open },
85 	{ &vop_setattr_desc,	layer_setattr },
86 	{ &vop_access_desc,	layer_access },
87 	{ &vop_remove_desc,	layer_remove },
88 	{ &vop_rmdir_desc,	layer_rmdir },
89 
90 	{ &vop_bwrite_desc,	layer_bwrite },
91 	{ &vop_bmap_desc,	layer_bmap },
92 	{ &vop_getpages_desc,	layer_getpages },
93 	{ &vop_putpages_desc,	layer_putpages },
94 
95 	{ NULL, NULL }
96 };
97 const struct vnodeopv_desc umapfs_vnodeop_opv_desc =
98 	{ &umap_vnodeop_p, umap_vnodeop_entries };
99 
100 /*
101  * This is the 08-June-1999 bypass routine.
102  * See layer_vnops.c:layer_bypass for more details.
103  */
104 int
105 umap_bypass(v)
106 	void *v;
107 {
108 	struct vop_generic_args /* {
109 		struct vnodeop_desc *a_desc;
110 		<other random data follows, presumably>
111 	} */ *ap = v;
112 	int (**our_vnodeop_p) __P((void *));
113 	struct ucred **credpp = 0, *credp = 0;
114 	struct ucred *savecredp = 0, *savecompcredp = 0;
115 	struct ucred *compcredp = 0;
116 	struct vnode **this_vp_p;
117 	int error, error1;
118 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
119 	struct vnode **vps_p[VDESC_MAX_VPS];
120 	struct vnode ***vppp;
121 	struct vnodeop_desc *descp = ap->a_desc;
122 	int reles, i, flags;
123 	struct componentname **compnamepp = 0;
124 
125 #ifdef SAFETY
126 	/*
127 	 * We require at least one vp.
128 	 */
129 	if (descp->vdesc_vp_offsets == NULL ||
130 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
131 		panic("%s: no vp's in map.\n", __func__);
132 #endif
133 
134 	vps_p[0] =
135 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
136 	vp0 = *vps_p[0];
137 	flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
138 	our_vnodeop_p = vp0->v_op;
139 
140 	if (flags & LAYERFS_MBYPASSDEBUG)
141 		printf("%s: %s\n", __func__, descp->vdesc_name);
142 
143 	/*
144 	 * Map the vnodes going in.
145 	 * Later, we'll invoke the operation based on
146 	 * the first mapped vnode's operation vector.
147 	 */
148 	reles = descp->vdesc_flags;
149 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
150 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
151 			break;   /* bail out at end of list */
152 		vps_p[i] = this_vp_p =
153 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
154 		    ap);
155 		/*
156 		 * We're not guaranteed that any but the first vnode
157 		 * are of our type.  Check for and don't map any
158 		 * that aren't.  (We must always map first vp or vclean fails.)
159 		 */
160 		if (i && (*this_vp_p == NULL ||
161 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
162 			old_vps[i] = NULL;
163 		} else {
164 			old_vps[i] = *this_vp_p;
165 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
166 			/*
167 			 * XXX - Several operations have the side effect
168 			 * of vrele'ing their vp's.  We must account for
169 			 * that.  (This should go away in the future.)
170 			 */
171 			if (reles & VDESC_VP0_WILLRELE)
172 				VREF(*this_vp_p);
173 		}
174 
175 	}
176 
177 	/*
178 	 * Fix the credentials.  (That's the purpose of this layer.)
179 	 */
180 
181 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
182 
183 		credpp = VOPARG_OFFSETTO(struct ucred**,
184 		    descp->vdesc_cred_offset, ap);
185 
186 		/* Save old values */
187 
188 		savecredp = *credpp;
189 		if (savecredp != NOCRED)
190 			*credpp = crdup(savecredp);
191 		credp = *credpp;
192 
193 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
194 			printf("umap_bypass: user was %d, group %d\n",
195 			    credp->cr_uid, credp->cr_gid);
196 
197 		/* Map all ids in the credential structure. */
198 
199 		umap_mapids(vp0->v_mount, credp);
200 
201 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
202 			printf("umap_bypass: user now %d, group %d\n",
203 			    credp->cr_uid, credp->cr_gid);
204 	}
205 
206 	/* BSD often keeps a credential in the componentname structure
207 	 * for speed.  If there is one, it better get mapped, too.
208 	 */
209 
210 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
211 
212 		compnamepp = VOPARG_OFFSETTO(struct componentname**,
213 		    descp->vdesc_componentname_offset, ap);
214 
215 		savecompcredp = (*compnamepp)->cn_cred;
216 		if (savecompcredp != NOCRED)
217 			(*compnamepp)->cn_cred = crdup(savecompcredp);
218 		compcredp = (*compnamepp)->cn_cred;
219 
220 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
221 			printf("umap_bypass: component credit user was %d, group %d\n",
222 			    compcredp->cr_uid, compcredp->cr_gid);
223 
224 		/* Map all ids in the credential structure. */
225 
226 		umap_mapids(vp0->v_mount, compcredp);
227 
228 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
229 			printf("umap_bypass: component credit user now %d, group %d\n",
230 			    compcredp->cr_uid, compcredp->cr_gid);
231 	}
232 
233 	/*
234 	 * Call the operation on the lower layer
235 	 * with the modified argument structure.
236 	 */
237 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
238 
239 	/*
240 	 * Maintain the illusion of call-by-value
241 	 * by restoring vnodes in the argument structure
242 	 * to their original value.
243 	 */
244 	reles = descp->vdesc_flags;
245 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
246 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
247 			break;   /* bail out at end of list */
248 		if (old_vps[i]) {
249 			*(vps_p[i]) = old_vps[i];
250 			if (reles & VDESC_VP0_WILLUNLOCK)
251 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
252 			if (reles & VDESC_VP0_WILLRELE)
253 				vrele(*(vps_p[i]));
254 		}
255 	}
256 
257 	/*
258 	 * Map the possible out-going vpp
259 	 * (Assumes that the lower layer always returns
260 	 * a VREF'ed vpp unless it gets an error.)
261 	 */
262 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
263 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
264 	    !error) {
265 		/*
266 		 * XXX - even though some ops have vpp returned vp's,
267 		 * several ops actually vrele this before returning.
268 		 * We must avoid these ops.
269 		 * (This should go away when these ops are regularized.)
270 		 */
271 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
272 			goto out;
273 		vppp = VOPARG_OFFSETTO(struct vnode***,
274 				 descp->vdesc_vpp_offset, ap);
275 		/*
276 		 * Only vop_lookup, vop_create, vop_makedir, vop_bmap,
277 		 * vop_mknod, and vop_symlink return vpp's. vop_bmap
278 		 * doesn't call bypass as the lower vpp is fine (we're just
279 		 * going to do i/o on it). vop_lookup doesn't call bypass
280 		 * as a lookup on "." would generate a locking error.
281 		 * So all the calls which get us here have a locked vpp. :-)
282 		 */
283 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
284 		if (error) {
285 			vput(**vppp);
286 			**vppp = NULL;
287 		}
288 	}
289 
290  out:
291 	/*
292 	 * Free duplicate cred structure and restore old one.
293 	 */
294 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
295 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
296 					credp->cr_uid != 0)
297 			printf("umap_bypass: returning-user was %d\n",
298 			    credp->cr_uid);
299 
300 		if (savecredp != NOCRED) {
301 			crfree(credp);
302 			*credpp = savecredp;
303 			if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
304 					(*credpp)->cr_uid != 0)
305 			 	printf("umap_bypass: returning-user now %d\n\n",
306 				    savecredp->cr_uid);
307 		}
308 	}
309 
310 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
311 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
312 					compcredp->cr_uid != 0)
313 			printf("umap_bypass: returning-component-user was %d\n",
314 			    compcredp->cr_uid);
315 
316 		if (savecompcredp != NOCRED) {
317 			crfree(compcredp);
318 			(*compnamepp)->cn_cred = savecompcredp;
319 			if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
320 					savecompcredp->cr_uid != 0)
321 			 	printf("umap_bypass: returning-component-user now %d\n",
322 				    savecompcredp->cr_uid);
323 		}
324 	}
325 
326 	return (error);
327 }
328 
329 /*
330  * This is based on the 08-June-1999 bypass routine.
331  * See layer_vnops.c:layer_bypass for more details.
332  */
333 int
334 umap_lookup(v)
335 	void *v;
336 {
337 	struct vop_lookup_args /* {
338 		struct vnodeop_desc *a_desc;
339 		struct vnode * a_dvp;
340 		struct vnode ** a_vpp;
341 		struct componentname * a_cnp;
342 	} */ *ap = v;
343 	struct componentname *cnp = ap->a_cnp;
344 	struct ucred *savecompcredp = NULL;
345 	struct ucred *compcredp = NULL;
346 	struct vnode *dvp, *vp, *ldvp;
347 	struct mount *mp;
348 	int error;
349 	int i, flags, cnf = cnp->cn_flags;
350 
351 	dvp = ap->a_dvp;
352 	mp = dvp->v_mount;
353 
354 	if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
355 		(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
356 		return (EROFS);
357 
358 	flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
359 	ldvp = UMAPVPTOLOWERVP(dvp);
360 
361 	if (flags & LAYERFS_MBYPASSDEBUG)
362 		printf("umap_lookup\n");
363 
364 	/*
365 	 * Fix the credentials.  (That's the purpose of this layer.)
366 	 *
367 	 * BSD often keeps a credential in the componentname structure
368 	 * for speed.  If there is one, it better get mapped, too.
369 	 */
370 
371 	if ((savecompcredp = cnp->cn_cred)) {
372 		compcredp = crdup(savecompcredp);
373 		cnp->cn_cred = compcredp;
374 
375 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
376 			printf("umap_lookup: component credit user was %d, group %d\n",
377 			    compcredp->cr_uid, compcredp->cr_gid);
378 
379 		/* Map all ids in the credential structure. */
380 		umap_mapids(mp, compcredp);
381 	}
382 
383 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
384 		printf("umap_lookup: component credit user now %d, group %d\n",
385 		    compcredp->cr_uid, compcredp->cr_gid);
386 
387 	ap->a_dvp = ldvp;
388 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
389 	vp = *ap->a_vpp;
390 	*ap->a_vpp = NULL;
391 
392 	if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
393 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
394 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
395 		error = EROFS;
396 
397 	/* Do locking fixup as appropriate. See layer_lookup() for info */
398 	if ((cnp->cn_flags & PDIRUNLOCK)) {
399 		LAYERFS_UPPERUNLOCK(dvp, 0, i);
400 	}
401 	if (ldvp == vp) {
402 		*ap->a_vpp = dvp;
403 		VREF(dvp);
404 		vrele(vp);
405 	} else if (vp != NULL) {
406 		error = layer_node_create(mp, vp, ap->a_vpp);
407 		if (error) {
408 			vput(vp);
409 			if (cnp->cn_flags & PDIRUNLOCK) {
410 				if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY) == 0)
411 					cnp->cn_flags &= ~PDIRUNLOCK;
412 			}
413 		}
414 	}
415 
416 	/*
417 	 * Free duplicate cred structure and restore old one.
418 	 */
419 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
420 					compcredp->cr_uid != 0)
421 		printf("umap_lookup: returning-component-user was %d\n",
422 			    compcredp->cr_uid);
423 
424 	if (savecompcredp != NOCRED) {
425 		crfree(compcredp);
426 		cnp->cn_cred = savecompcredp;
427 		if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
428 				savecompcredp->cr_uid != 0)
429 		 	printf("umap_lookup: returning-component-user now %d\n",
430 			    savecompcredp->cr_uid);
431 	}
432 
433 	return (error);
434 }
435 
436 /*
437  *  We handle getattr to change the fsid.
438  */
439 int
440 umap_getattr(v)
441 	void *v;
442 {
443 	struct vop_getattr_args /* {
444 		struct vnode *a_vp;
445 		struct vattr *a_vap;
446 		struct ucred *a_cred;
447 		struct proc *a_p;
448 	} */ *ap = v;
449 	uid_t uid;
450 	gid_t gid;
451 	int error, tmpid, nentries, gnentries, flags;
452 	u_long (*mapdata)[2];
453 	u_long (*gmapdata)[2];
454 	struct vnode **vp1p;
455 	const struct vnodeop_desc *descp = ap->a_desc;
456 
457 	if ((error = umap_bypass(ap)) != 0)
458 		return (error);
459 	/* Requires that arguments be restored. */
460 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
461 
462 	flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
463 	/*
464 	 * Umap needs to map the uid and gid returned by a stat
465 	 * into the proper values for this site.  This involves
466 	 * finding the returned uid in the mapping information,
467 	 * translating it into the uid on the other end,
468 	 * and filling in the proper field in the vattr
469 	 * structure pointed to by ap->a_vap.  The group
470 	 * is easier, since currently all groups will be
471 	 * translate to the NULLGROUP.
472 	 */
473 
474 	/* Find entry in map */
475 
476 	uid = ap->a_vap->va_uid;
477 	gid = ap->a_vap->va_gid;
478 	if ((flags & LAYERFS_MBYPASSDEBUG))
479 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
480 		    gid);
481 
482 	vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
483 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
484 	mapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
485 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
486 	gmapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
487 
488 	/* Reverse map the uid for the vnode.  Since it's a reverse
489 		map, we can't use umap_mapids() to do it. */
490 
491 	tmpid = umap_reverse_findid(uid, mapdata, nentries);
492 
493 	if (tmpid != -1) {
494 		ap->a_vap->va_uid = (uid_t) tmpid;
495 		if ((flags & LAYERFS_MBYPASSDEBUG))
496 			printf("umap_getattr: original uid = %d\n", uid);
497 	} else
498 		ap->a_vap->va_uid = (uid_t) NOBODY;
499 
500 	/* Reverse map the gid for the vnode. */
501 
502 	tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
503 
504 	if (tmpid != -1) {
505 		ap->a_vap->va_gid = (gid_t) tmpid;
506 		if ((flags & LAYERFS_MBYPASSDEBUG))
507 			printf("umap_getattr: original gid = %d\n", gid);
508 	} else
509 		ap->a_vap->va_gid = (gid_t) NULLGROUP;
510 
511 	return (0);
512 }
513 
514 int
515 umap_print(v)
516 	void *v;
517 {
518 	struct vop_print_args /* {
519 		struct vnode *a_vp;
520 	} */ *ap = v;
521 	struct vnode *vp = ap->a_vp;
522 	printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
523 	    UMAPVPTOLOWERVP(vp));
524 	return (0);
525 }
526 
527 int
528 umap_rename(v)
529 	void *v;
530 {
531 	struct vop_rename_args  /* {
532 		struct vnode *a_fdvp;
533 		struct vnode *a_fvp;
534 		struct componentname *a_fcnp;
535 		struct vnode *a_tdvp;
536 		struct vnode *a_tvp;
537 		struct componentname *a_tcnp;
538 	} */ *ap = v;
539 	int error, flags;
540 	struct componentname *compnamep;
541 	struct ucred *compcredp, *savecompcredp;
542 	struct vnode *vp;
543 	struct vnode *tvp;
544 
545 	/*
546 	 * Rename is irregular, having two componentname structures.
547 	 * We need to map the cre in the second structure,
548 	 * and then bypass takes care of the rest.
549 	 */
550 
551 	vp = ap->a_fdvp;
552 	flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
553 	compnamep = ap->a_tcnp;
554 	compcredp = compnamep->cn_cred;
555 
556 	savecompcredp = compcredp;
557 	compcredp = compnamep->cn_cred = crdup(savecompcredp);
558 
559 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
560 		printf("umap_rename: rename component credit user was %d, group %d\n",
561 		    compcredp->cr_uid, compcredp->cr_gid);
562 
563 	/* Map all ids in the credential structure. */
564 
565 	umap_mapids(vp->v_mount, compcredp);
566 
567 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
568 		printf("umap_rename: rename component credit user now %d, group %d\n",
569 		    compcredp->cr_uid, compcredp->cr_gid);
570 
571 	tvp = ap->a_tvp;
572 	if (tvp) {
573 		if (tvp->v_mount != vp->v_mount)
574 			tvp = NULL;
575 		else
576 			vref(tvp);
577 	}
578 	error = umap_bypass(ap);
579 	if (tvp) {
580 		if (error == 0)
581 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
582 		vrele(tvp);
583 	}
584 
585 	/* Restore the additional mapped componentname cred structure. */
586 
587 	crfree(compcredp);
588 	compnamep->cn_cred = savecompcredp;
589 
590 	return error;
591 }
592