xref: /netbsd-src/sys/miscfs/umapfs/umap_vnops.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: umap_vnops.c,v 1.47 2009/03/14 21:04:25 dsl Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software donated to Berkeley by
8  * the UCLA Ficus project.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)umap_vnops.c	8.6 (Berkeley) 5/22/95
35  */
36 
37 /*
38  * Umap Layer
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: umap_vnops.c,v 1.47 2009/03/14 21:04:25 dsl Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/time.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <sys/kauth.h>
53 
54 #include <miscfs/umapfs/umap.h>
55 #include <miscfs/genfs/genfs.h>
56 #include <miscfs/genfs/layer_extern.h>
57 
58 /*
59  * Note: If the LAYERFS_MBYPASSDEBUG flag is set, it is possible
60  * that the debug printing will bomb out, because kauth routines
61  * do not handle NOCRED or FSCRED like other credentials and end
62  * up dereferencing an inappropriate pointer.
63  *
64  * That should be fixed in kauth rather than here.
65  */
66 
67 int	umap_lookup(void *);
68 int	umap_getattr(void *);
69 int	umap_print(void *);
70 int	umap_rename(void *);
71 
72 /*
73  * Global vfs data structures
74  */
75 /*
76  * XXX - strategy, bwrite are hand coded currently.  They should
77  * go away with a merged buffer/block cache.
78  *
79  */
80 int (**umap_vnodeop_p)(void *);
81 const struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
82 	{ &vop_default_desc,	umap_bypass },
83 
84 	{ &vop_lookup_desc,	umap_lookup },
85 	{ &vop_getattr_desc,	umap_getattr },
86 	{ &vop_print_desc,	umap_print },
87 	{ &vop_rename_desc,	umap_rename },
88 
89 	{ &vop_lock_desc,	layer_lock },
90 	{ &vop_unlock_desc,	layer_unlock },
91 	{ &vop_islocked_desc,	layer_islocked },
92 	{ &vop_fsync_desc,	layer_fsync },
93 	{ &vop_inactive_desc,	layer_inactive },
94 	{ &vop_reclaim_desc,	layer_reclaim },
95 	{ &vop_open_desc,	layer_open },
96 	{ &vop_setattr_desc,	layer_setattr },
97 	{ &vop_access_desc,	layer_access },
98 	{ &vop_remove_desc,	layer_remove },
99 	{ &vop_rmdir_desc,	layer_rmdir },
100 
101 	{ &vop_bwrite_desc,	layer_bwrite },
102 	{ &vop_bmap_desc,	layer_bmap },
103 	{ &vop_getpages_desc,	layer_getpages },
104 	{ &vop_putpages_desc,	layer_putpages },
105 
106 	{ NULL, NULL }
107 };
108 const struct vnodeopv_desc umapfs_vnodeop_opv_desc =
109 	{ &umap_vnodeop_p, umap_vnodeop_entries };
110 
111 /*
112  * This is the 08-June-1999 bypass routine.
113  * See layer_vnops.c:layer_bypass for more details.
114  */
115 int
116 umap_bypass(void *v)
117 {
118 	struct vop_generic_args /* {
119 		struct vnodeop_desc *a_desc;
120 		<other random data follows, presumably>
121 	} */ *ap = v;
122 	int (**our_vnodeop_p)(void *);
123 	kauth_cred_t *credpp = NULL, credp = 0;
124 	kauth_cred_t savecredp = 0, savecompcredp = 0;
125 	kauth_cred_t compcredp = 0;
126 	struct vnode **this_vp_p;
127 	int error, error1;
128 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
129 	struct vnode **vps_p[VDESC_MAX_VPS];
130 	struct vnode ***vppp;
131 	struct vnodeop_desc *descp = ap->a_desc;
132 	int reles, i, flags;
133 	struct componentname **compnamepp = 0;
134 
135 #ifdef DIAGNOSTIC
136 	/*
137 	 * We require at least one vp.
138 	 */
139 	if (descp->vdesc_vp_offsets == NULL ||
140 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
141 		panic("%s: no vp's in map.\n", __func__);
142 #endif
143 
144 	vps_p[0] =
145 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
146 	vp0 = *vps_p[0];
147 	flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
148 	our_vnodeop_p = vp0->v_op;
149 
150 	if (flags & LAYERFS_MBYPASSDEBUG)
151 		printf("%s: %s\n", __func__, descp->vdesc_name);
152 
153 	/*
154 	 * Map the vnodes going in.
155 	 * Later, we'll invoke the operation based on
156 	 * the first mapped vnode's operation vector.
157 	 */
158 	reles = descp->vdesc_flags;
159 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
160 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
161 			break;   /* bail out at end of list */
162 		vps_p[i] = this_vp_p =
163 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
164 		    ap);
165 		/*
166 		 * We're not guaranteed that any but the first vnode
167 		 * are of our type.  Check for and don't map any
168 		 * that aren't.  (We must always map first vp or vclean fails.)
169 		 */
170 		if (i && (*this_vp_p == NULL ||
171 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
172 			old_vps[i] = NULL;
173 		} else {
174 			old_vps[i] = *this_vp_p;
175 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
176 			/*
177 			 * XXX - Several operations have the side effect
178 			 * of vrele'ing their vp's.  We must account for
179 			 * that.  (This should go away in the future.)
180 			 */
181 			if (reles & VDESC_VP0_WILLRELE)
182 				VREF(*this_vp_p);
183 		}
184 
185 	}
186 
187 	/*
188 	 * Fix the credentials.  (That's the purpose of this layer.)
189 	 */
190 
191 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
192 
193 		credpp = VOPARG_OFFSETTO(kauth_cred_t*,
194 		    descp->vdesc_cred_offset, ap);
195 
196 		/* Save old values */
197 
198 		savecredp = *credpp;
199 		if (savecredp != NOCRED && savecredp != FSCRED)
200 			*credpp = kauth_cred_dup(savecredp);
201 		credp = *credpp;
202 
203 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
204 		    kauth_cred_geteuid(credp) != 0)
205 			printf("umap_bypass: user was %d, group %d\n",
206 			    kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
207 
208 		/* Map all ids in the credential structure. */
209 
210 		umap_mapids(vp0->v_mount, credp);
211 
212 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
213 		    kauth_cred_geteuid(credp) != 0)
214 			printf("umap_bypass: user now %d, group %d\n",
215 			    kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
216 	}
217 
218 	/* BSD often keeps a credential in the componentname structure
219 	 * for speed.  If there is one, it better get mapped, too.
220 	 */
221 
222 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
223 
224 		compnamepp = VOPARG_OFFSETTO(struct componentname**,
225 		    descp->vdesc_componentname_offset, ap);
226 
227 		savecompcredp = (*compnamepp)->cn_cred;
228 		if (savecompcredp != NOCRED && savecompcredp != FSCRED)
229 			(*compnamepp)->cn_cred = kauth_cred_dup(savecompcredp);
230 		compcredp = (*compnamepp)->cn_cred;
231 
232 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
233 		    kauth_cred_geteuid(compcredp) != 0)
234 			printf("umap_bypass: component credit user was %d, group %d\n",
235 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
236 
237 		/* Map all ids in the credential structure. */
238 
239 		umap_mapids(vp0->v_mount, compcredp);
240 
241 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
242 		    kauth_cred_geteuid(compcredp) != 0)
243 			printf("umap_bypass: component credit user now %d, group %d\n",
244 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
245 	}
246 
247 	/*
248 	 * Call the operation on the lower layer
249 	 * with the modified argument structure.
250 	 */
251 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
252 
253 	/*
254 	 * Maintain the illusion of call-by-value
255 	 * by restoring vnodes in the argument structure
256 	 * to their original value.
257 	 */
258 	reles = descp->vdesc_flags;
259 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
260 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
261 			break;   /* bail out at end of list */
262 		if (old_vps[i]) {
263 			*(vps_p[i]) = old_vps[i];
264 			if (reles & VDESC_VP0_WILLUNLOCK)
265 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
266 			if (reles & VDESC_VP0_WILLRELE)
267 				vrele(*(vps_p[i]));
268 		}
269 	}
270 
271 	/*
272 	 * Map the possible out-going vpp
273 	 * (Assumes that the lower layer always returns
274 	 * a VREF'ed vpp unless it gets an error.)
275 	 */
276 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
277 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
278 	    !error) {
279 		/*
280 		 * XXX - even though some ops have vpp returned vp's,
281 		 * several ops actually vrele this before returning.
282 		 * We must avoid these ops.
283 		 * (This should go away when these ops are regularized.)
284 		 */
285 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
286 			goto out;
287 		vppp = VOPARG_OFFSETTO(struct vnode***,
288 				 descp->vdesc_vpp_offset, ap);
289 		/*
290 		 * Only vop_lookup, vop_create, vop_makedir, vop_bmap,
291 		 * vop_mknod, and vop_symlink return vpp's. vop_bmap
292 		 * doesn't call bypass as the lower vpp is fine (we're just
293 		 * going to do i/o on it). vop_lookup doesn't call bypass
294 		 * as a lookup on "." would generate a locking error.
295 		 * So all the calls which get us here have a locked vpp. :-)
296 		 */
297 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
298 		if (error) {
299 			vput(**vppp);
300 			**vppp = NULL;
301 		}
302 	}
303 
304  out:
305 	/*
306 	 * Free duplicate cred structure and restore old one.
307 	 */
308 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
309 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
310 		    kauth_cred_geteuid(credp) != 0)
311 			printf("umap_bypass: returning-user was %d\n",
312 			    kauth_cred_geteuid(credp));
313 
314 		if (savecredp != NOCRED && savecredp != FSCRED && credpp) {
315 			kauth_cred_free(credp);
316 			*credpp = savecredp;
317 			if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
318 			    kauth_cred_geteuid(*credpp) != 0)
319 			 	printf("umap_bypass: returning-user now %d\n\n",
320 				    kauth_cred_geteuid(savecredp));
321 		}
322 	}
323 
324 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
325 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
326 		    kauth_cred_geteuid(compcredp) != 0)
327 			printf("umap_bypass: returning-component-user was %d\n",
328 			    kauth_cred_geteuid(compcredp));
329 
330 		if (savecompcredp != NOCRED && savecompcredp != FSCRED) {
331 			kauth_cred_free(compcredp);
332 			(*compnamepp)->cn_cred = savecompcredp;
333 			if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
334 			    kauth_cred_geteuid(savecompcredp) != 0)
335 			 	printf("umap_bypass: returning-component-user now %d\n",
336 				    kauth_cred_geteuid(savecompcredp));
337 		}
338 	}
339 
340 	return (error);
341 }
342 
343 /*
344  * This is based on the 08-June-1999 bypass routine.
345  * See layer_vnops.c:layer_bypass for more details.
346  */
347 int
348 umap_lookup(void *v)
349 {
350 	struct vop_lookup_args /* {
351 		struct vnodeop_desc *a_desc;
352 		struct vnode * a_dvp;
353 		struct vnode ** a_vpp;
354 		struct componentname * a_cnp;
355 	} */ *ap = v;
356 	struct componentname *cnp = ap->a_cnp;
357 	kauth_cred_t savecompcredp = NULL;
358 	kauth_cred_t compcredp = NULL;
359 	struct vnode *dvp, *vp, *ldvp;
360 	struct mount *mp;
361 	int error;
362 	int flags, cnf = cnp->cn_flags;
363 
364 	dvp = ap->a_dvp;
365 	mp = dvp->v_mount;
366 
367 	if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
368 		(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
369 		return (EROFS);
370 
371 	flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
372 	ldvp = UMAPVPTOLOWERVP(dvp);
373 
374 	if (flags & LAYERFS_MBYPASSDEBUG)
375 		printf("umap_lookup\n");
376 
377 	/*
378 	 * Fix the credentials.  (That's the purpose of this layer.)
379 	 *
380 	 * BSD often keeps a credential in the componentname structure
381 	 * for speed.  If there is one, it better get mapped, too.
382 	 */
383 
384 	if ((savecompcredp = cnp->cn_cred)) {
385 		compcredp = kauth_cred_dup(savecompcredp);
386 		cnp->cn_cred = compcredp;
387 
388 		if ((flags & LAYERFS_MBYPASSDEBUG) &&
389 		    kauth_cred_geteuid(compcredp) != 0)
390 			printf("umap_lookup: component credit user was %d, group %d\n",
391 			    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
392 
393 		/* Map all ids in the credential structure. */
394 		umap_mapids(mp, compcredp);
395 	}
396 
397 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
398 	    kauth_cred_geteuid(compcredp) != 0)
399 		printf("umap_lookup: component credit user now %d, group %d\n",
400 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
401 
402 	ap->a_dvp = ldvp;
403 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
404 	vp = *ap->a_vpp;
405 	*ap->a_vpp = NULL;
406 
407 	if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
408 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
409 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
410 		error = EROFS;
411 
412 	/* Do locking fixup as appropriate. See layer_lookup() for info */
413 	if (ldvp == vp) {
414 		*ap->a_vpp = dvp;
415 		VREF(dvp);
416 		vrele(vp);
417 	} else if (vp != NULL) {
418 		error = layer_node_create(mp, vp, ap->a_vpp);
419 		if (error) {
420 			vput(vp);
421 		}
422 	}
423 
424 	/*
425 	 * Free duplicate cred structure and restore old one.
426 	 */
427 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
428 	    kauth_cred_geteuid(compcredp) != 0)
429 		printf("umap_lookup: returning-component-user was %d\n",
430 			    kauth_cred_geteuid(compcredp));
431 
432 	if (savecompcredp != NOCRED && savecompcredp != FSCRED) {
433 		if (compcredp)
434 			kauth_cred_free(compcredp);
435 		cnp->cn_cred = savecompcredp;
436 		if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
437 		    kauth_cred_geteuid(savecompcredp) != 0)
438 		 	printf("umap_lookup: returning-component-user now %d\n",
439 			    kauth_cred_geteuid(savecompcredp));
440 	}
441 
442 	return (error);
443 }
444 
445 /*
446  *  We handle getattr to change the fsid.
447  */
448 int
449 umap_getattr(void *v)
450 {
451 	struct vop_getattr_args /* {
452 		struct vnode *a_vp;
453 		struct vattr *a_vap;
454 		kauth_cred_t a_cred;
455 		struct lwp *a_l;
456 	} */ *ap = v;
457 	uid_t uid;
458 	gid_t gid;
459 	int error, tmpid, nentries, gnentries, flags;
460 	u_long (*mapdata)[2];
461 	u_long (*gmapdata)[2];
462 	struct vnode **vp1p;
463 	const struct vnodeop_desc *descp = ap->a_desc;
464 
465 	if ((error = umap_bypass(ap)) != 0)
466 		return (error);
467 	/* Requires that arguments be restored. */
468 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
469 
470 	flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
471 	/*
472 	 * Umap needs to map the uid and gid returned by a stat
473 	 * into the proper values for this site.  This involves
474 	 * finding the returned uid in the mapping information,
475 	 * translating it into the uid on the other end,
476 	 * and filling in the proper field in the vattr
477 	 * structure pointed to by ap->a_vap.  The group
478 	 * is easier, since currently all groups will be
479 	 * translate to the NULLGROUP.
480 	 */
481 
482 	/* Find entry in map */
483 
484 	uid = ap->a_vap->va_uid;
485 	gid = ap->a_vap->va_gid;
486 	if ((flags & LAYERFS_MBYPASSDEBUG))
487 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
488 		    gid);
489 
490 	vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
491 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
492 	mapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
493 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
494 	gmapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
495 
496 	/* Reverse map the uid for the vnode.  Since it's a reverse
497 		map, we can't use umap_mapids() to do it. */
498 
499 	tmpid = umap_reverse_findid(uid, mapdata, nentries);
500 
501 	if (tmpid != -1) {
502 		ap->a_vap->va_uid = (uid_t) tmpid;
503 		if ((flags & LAYERFS_MBYPASSDEBUG))
504 			printf("umap_getattr: original uid = %d\n", uid);
505 	} else
506 		ap->a_vap->va_uid = (uid_t) NOBODY;
507 
508 	/* Reverse map the gid for the vnode. */
509 
510 	tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
511 
512 	if (tmpid != -1) {
513 		ap->a_vap->va_gid = (gid_t) tmpid;
514 		if ((flags & LAYERFS_MBYPASSDEBUG))
515 			printf("umap_getattr: original gid = %d\n", gid);
516 	} else
517 		ap->a_vap->va_gid = (gid_t) NULLGROUP;
518 
519 	return (0);
520 }
521 
522 int
523 umap_print(void *v)
524 {
525 	struct vop_print_args /* {
526 		struct vnode *a_vp;
527 	} */ *ap = v;
528 	struct vnode *vp = ap->a_vp;
529 	printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
530 	    UMAPVPTOLOWERVP(vp));
531 	return (0);
532 }
533 
534 int
535 umap_rename(void *v)
536 {
537 	struct vop_rename_args  /* {
538 		struct vnode *a_fdvp;
539 		struct vnode *a_fvp;
540 		struct componentname *a_fcnp;
541 		struct vnode *a_tdvp;
542 		struct vnode *a_tvp;
543 		struct componentname *a_tcnp;
544 	} */ *ap = v;
545 	int error, flags;
546 	struct componentname *compnamep;
547 	kauth_cred_t compcredp, savecompcredp;
548 	struct vnode *vp;
549 	struct vnode *tvp;
550 
551 	/*
552 	 * Rename is irregular, having two componentname structures.
553 	 * We need to map the cre in the second structure,
554 	 * and then bypass takes care of the rest.
555 	 */
556 
557 	vp = ap->a_fdvp;
558 	flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
559 	compnamep = ap->a_tcnp;
560 	compcredp = compnamep->cn_cred;
561 
562 	savecompcredp = compcredp;
563 	compcredp = compnamep->cn_cred = kauth_cred_dup(savecompcredp);
564 
565 	if ((flags & LAYERFS_MBYPASSDEBUG) &&
566 	    kauth_cred_geteuid(compcredp) != 0)
567 		printf("umap_rename: rename component credit user was %d, group %d\n",
568 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
569 
570 	/* Map all ids in the credential structure. */
571 
572 	umap_mapids(vp->v_mount, compcredp);
573 
574 	if ((flags & LAYERFS_MBYPASSDEBUG) &&
575 	    kauth_cred_geteuid(compcredp) != 0)
576 		printf("umap_rename: rename component credit user now %d, group %d\n",
577 		    kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
578 
579 	tvp = ap->a_tvp;
580 	if (tvp) {
581 		if (tvp->v_mount != vp->v_mount)
582 			tvp = NULL;
583 		else
584 			vref(tvp);
585 	}
586 	error = umap_bypass(ap);
587 	if (tvp) {
588 		if (error == 0)
589 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
590 		vrele(tvp);
591 	}
592 
593 	/* Restore the additional mapped componentname cred structure. */
594 
595 	kauth_cred_free(compcredp);
596 	compnamep->cn_cred = savecompcredp;
597 
598 	return error;
599 }
600