xref: /netbsd-src/sys/miscfs/umapfs/umap_vnops.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*	$NetBSD: umap_vnops.c,v 1.16 1999/08/16 21:24:53 wrstuden Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software donated to Berkeley by
8  * the UCLA Ficus project.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)umap_vnops.c	8.6 (Berkeley) 5/22/95
39  */
40 
41 /*
42  * Umap Layer
43  */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/time.h>
48 #include <sys/types.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/malloc.h>
53 #include <sys/buf.h>
54 #include <miscfs/umapfs/umap.h>
55 #include <miscfs/genfs/genfs.h>
56 #include <miscfs/genfs/layer_extern.h>
57 
58 
59 int	umap_lookup	__P((void *));
60 int	umap_getattr	__P((void *));
61 int	umap_print	__P((void *));
62 int	umap_rename	__P((void *));
63 
64 /*
65  * Global vfs data structures
66  */
67 /*
68  * XXX - strategy, bwrite are hand coded currently.  They should
69  * go away with a merged buffer/block cache.
70  *
71  */
72 int (**umap_vnodeop_p) __P((void *));
73 struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
74 	{ &vop_default_desc,	umap_bypass },
75 
76 	{ &vop_lookup_desc,	umap_lookup },
77 	{ &vop_getattr_desc,	umap_getattr },
78 	{ &vop_print_desc,	umap_print },
79 	{ &vop_rename_desc,	umap_rename },
80 
81 	{ &vop_lock_desc,	layer_lock },
82 	{ &vop_unlock_desc,	layer_unlock },
83 	{ &vop_islocked_desc,	layer_islocked },
84 	{ &vop_fsync_desc,	layer_fsync },
85 	{ &vop_inactive_desc,	layer_inactive },
86 	{ &vop_reclaim_desc,	layer_reclaim },
87 	{ &vop_open_desc,	layer_open },
88 	{ &vop_setattr_desc,	layer_setattr },
89 	{ &vop_access_desc,	layer_access },
90 
91 	{ &vop_strategy_desc,	layer_strategy },
92 	{ &vop_bwrite_desc,	layer_bwrite },
93 	{ &vop_bmap_desc,	layer_bmap },
94 
95 	{ (struct vnodeop_desc*) NULL, (int(*) __P((void *))) NULL }
96 };
97 struct vnodeopv_desc umapfs_vnodeop_opv_desc =
98 	{ &umap_vnodeop_p, umap_vnodeop_entries };
99 
100 /*
101  * This is the 08-June-1999 bypass routine.
102  * See layer_vnops.c:layer_bypass for more details.
103  */
104 int
105 umap_bypass(v)
106 	void *v;
107 {
108 	struct vop_generic_args /* {
109 		struct vnodeop_desc *a_desc;
110 		<other random data follows, presumably>
111 	} */ *ap = v;
112 	struct ucred **credpp = 0, *credp = 0;
113 	struct ucred *savecredp = 0, *savecompcredp = 0;
114 	struct ucred *compcredp = 0;
115 	struct vnode **this_vp_p;
116 	int error, error1;
117 	int (**our_vnodeop_p) __P((void *));
118 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
119 	struct vnode **vps_p[VDESC_MAX_VPS];
120 	struct vnode ***vppp;
121 	struct vnodeop_desc *descp = ap->a_desc;
122 	int reles, i, flags;
123 	struct componentname **compnamepp = 0;
124 
125 #ifdef SAFETY
126 	/*
127 	 * We require at least one vp.
128 	 */
129 	if (descp->vdesc_vp_offsets == NULL ||
130 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
131 		panic ("umap_bypass: no vp's in map.\n");
132 #endif
133 	vps_p[0] = VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[0],
134 				ap);
135 	vp0 = *vps_p[0];
136 	flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
137 	our_vnodeop_p = vp0->v_op;
138 
139 	if (flags & LAYERFS_MBYPASSDEBUG)
140 		printf("umap_bypass: %s\n", descp->vdesc_name);
141 
142 	/*
143 	 * Map the vnodes going in.
144 	 * Later, we'll invoke the operation based on
145 	 * the first mapped vnode's operation vector.
146 	 */
147 	reles = descp->vdesc_flags;
148 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
149 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
150 			break;   /* bail out at end of list */
151 		vps_p[i] = this_vp_p =
152 			VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
153 
154 		/*
155 		 * We're not guaranteed that any but the first vnode
156 		 * are of our type.  Check for and don't map any
157 		 * that aren't.  (Must map first vp or vclean fails.)
158 		 */
159 
160 		if (i && ((*this_vp_p)==NULL ||
161 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
162 			old_vps[i] = NULL;
163 		} else {
164 			old_vps[i] = *this_vp_p;
165 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
166 			if (reles & 1)
167 				VREF(*this_vp_p);
168 		}
169 
170 	}
171 
172 	/*
173 	 * Fix the credentials.  (That's the purpose of this layer.)
174 	 */
175 
176 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
177 
178 		credpp = VOPARG_OFFSETTO(struct ucred**,
179 		    descp->vdesc_cred_offset, ap);
180 
181 		/* Save old values */
182 
183 		savecredp = *credpp;
184 		if (savecredp != NOCRED)
185 			*credpp = crdup(savecredp);
186 		credp = *credpp;
187 
188 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
189 			printf("umap_bypass: user was %d, group %d\n",
190 			    credp->cr_uid, credp->cr_gid);
191 
192 		/* Map all ids in the credential structure. */
193 
194 		umap_mapids(vp0->v_mount, credp);
195 
196 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp->cr_uid != 0)
197 			printf("umap_bypass: user now %d, group %d\n",
198 			    credp->cr_uid, credp->cr_gid);
199 	}
200 
201 	/* BSD often keeps a credential in the componentname structure
202 	 * for speed.  If there is one, it better get mapped, too.
203 	 */
204 
205 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
206 
207 		compnamepp = VOPARG_OFFSETTO(struct componentname**,
208 		    descp->vdesc_componentname_offset, ap);
209 
210 		savecompcredp = (*compnamepp)->cn_cred;
211 		if (savecompcredp != NOCRED)
212 			(*compnamepp)->cn_cred = crdup(savecompcredp);
213 		compcredp = (*compnamepp)->cn_cred;
214 
215 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
216 			printf("umap_bypass: component credit user was %d, group %d\n",
217 			    compcredp->cr_uid, compcredp->cr_gid);
218 
219 		/* Map all ids in the credential structure. */
220 
221 		umap_mapids(vp0->v_mount, compcredp);
222 
223 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
224 			printf("umap_bypass: component credit user now %d, group %d\n",
225 			    compcredp->cr_uid, compcredp->cr_gid);
226 	}
227 
228 	/*
229 	 * Call the operation on the lower layer
230 	 * with the modified argument structure.
231 	 */
232 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
233 
234 	/*
235 	 * Maintain the illusion of call-by-value
236 	 * by restoring vnodes in the argument structure
237 	 * to their original value.
238 	 */
239 	reles = descp->vdesc_flags;
240 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
241 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
242 			break;   /* bail out at end of list */
243 		if (old_vps[i]) {
244 			*(vps_p[i]) = old_vps[i];
245 			if (reles & VDESC_VP0_WILLUNLOCK)
246 				LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
247 			if (reles & VDESC_VP0_WILLRELE)
248 				vrele(*(vps_p[i]));
249 		};
250 	};
251 
252 	/*
253 	 * Map the possible out-going vpp
254 	 * (Assumes that the lower layer always returns
255 	 * a VREF'ed vpp unless it gets an error.)
256 	 */
257 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
258 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
259 	    !error) {
260 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
261 			goto out;
262 		vppp = VOPARG_OFFSETTO(struct vnode***,
263 				 descp->vdesc_vpp_offset, ap);
264 		error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
265 	};
266 
267  out:
268 	/*
269 	 * Free duplicate cred structure and restore old one.
270 	 */
271 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
272 		if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
273 					credp->cr_uid != 0)
274 			printf("umap_bypass: returning-user was %d\n",
275 			    credp->cr_uid);
276 
277 		if (savecredp != NOCRED) {
278 			crfree(credp);
279 			*credpp = savecredp;
280 			if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
281 					(*credpp)->cr_uid != 0)
282 			 	printf("umap_bypass: returning-user now %d\n\n",
283 				    savecredp->cr_uid);
284 		}
285 	}
286 
287 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
288 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
289 					compcredp->cr_uid != 0)
290 			printf("umap_bypass: returning-component-user was %d\n",
291 			    compcredp->cr_uid);
292 
293 		if (savecompcredp != NOCRED) {
294 			crfree(compcredp);
295 			(*compnamepp)->cn_cred = savecompcredp;
296 			if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
297 					savecompcredp->cr_uid != 0)
298 			 	printf("umap_bypass: returning-component-user now %d\n",
299 				    savecompcredp->cr_uid);
300 		}
301 	}
302 
303 	return (error);
304 }
305 
306 /*
307  * This is based on the 08-June-1999 bypass routine.
308  * See layer_vnops.c:layer_bypass for more details.
309  */
310 int
311 umap_lookup(v)
312 	void *v;
313 {
314 	struct vop_lookup_args /* {
315 		struct vnodeop_desc *a_desc;
316 		struct vnode * a_dvp;
317 		struct vnode ** a_vpp;
318 		struct componentname * a_cnp;
319 	} */ *ap = v;
320 	struct componentname *cnp = ap->a_cnp;
321 	struct ucred *savecompcredp = NULL;
322 	struct ucred *compcredp = NULL;
323 	struct vnode *dvp, *vp, *ldvp;
324 	struct mount *mp;
325 	int error;
326 	int i, flags, cnf = cnp->cn_flags;
327 
328 	dvp = ap->a_dvp;
329 	mp = dvp->v_mount;
330 
331 	if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
332 		(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
333 		return (EROFS);
334 
335 	flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
336 	ldvp = UMAPVPTOLOWERVP(dvp);
337 
338 	if (flags & LAYERFS_MBYPASSDEBUG)
339 		printf("umap_lookup\n");
340 
341 	/*
342 	 * Fix the credentials.  (That's the purpose of this layer.)
343 	 *
344 	 * BSD often keeps a credential in the componentname structure
345 	 * for speed.  If there is one, it better get mapped, too.
346 	 */
347 
348 	if ((savecompcredp = cnp->cn_cred)) {
349 		compcredp = crdup(savecompcredp);
350 		cnp->cn_cred = compcredp;
351 
352 		if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
353 			printf("umap_lookup: component credit user was %d, group %d\n",
354 			    compcredp->cr_uid, compcredp->cr_gid);
355 
356 		/* Map all ids in the credential structure. */
357 		umap_mapids(mp, compcredp);
358 	}
359 
360 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
361 		printf("umap_lookup: component credit user now %d, group %d\n",
362 		    compcredp->cr_uid, compcredp->cr_gid);
363 
364 	ap->a_dvp = ldvp;
365 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
366 	vp = *ap->a_vpp;
367 
368 	if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
369 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
370 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
371 		error = EROFS;
372 
373 	/* Do locking fixup as appropriate. See layer_lookup() for info */
374 	if ((cnp->cn_flags & PDIRUNLOCK)) {
375 		LAYERFS_UPPERUNLOCK(dvp, 0, i);
376 	}
377 	if (ldvp == vp) {
378 		*ap->a_vpp = dvp;
379 		VREF(dvp);
380 		vrele(vp);
381 	} else if (vp != NULL) {
382 		error = layer_node_create(mp, vp, ap->a_vpp);
383 	}
384 
385 	/*
386 	 * Free duplicate cred structure and restore old one.
387 	 */
388 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
389 					compcredp->cr_uid != 0)
390 		printf("umap_lookup: returning-component-user was %d\n",
391 			    compcredp->cr_uid);
392 
393 	if (savecompcredp != NOCRED) {
394 		crfree(compcredp);
395 		cnp->cn_cred = savecompcredp;
396 		if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
397 				savecompcredp->cr_uid != 0)
398 		 	printf("umap_lookup: returning-component-user now %d\n",
399 			    savecompcredp->cr_uid);
400 	}
401 
402 	return (error);
403 }
404 
405 /*
406  *  We handle getattr to change the fsid.
407  */
408 int
409 umap_getattr(v)
410 	void *v;
411 {
412 	struct vop_getattr_args /* {
413 		struct vnode *a_vp;
414 		struct vattr *a_vap;
415 		struct ucred *a_cred;
416 		struct proc *a_p;
417 	} */ *ap = v;
418 	uid_t uid;
419 	gid_t gid;
420 	int error, tmpid, nentries, gnentries, flags;
421 	u_long (*mapdata)[2];
422 	u_long (*gmapdata)[2];
423 	struct vnode **vp1p;
424 	struct vnodeop_desc *descp = ap->a_desc;
425 
426 	if ((error = umap_bypass(ap)) != 0)
427 		return (error);
428 	/* Requires that arguments be restored. */
429 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
430 
431 	flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
432 	/*
433 	 * Umap needs to map the uid and gid returned by a stat
434 	 * into the proper values for this site.  This involves
435 	 * finding the returned uid in the mapping information,
436 	 * translating it into the uid on the other end,
437 	 * and filling in the proper field in the vattr
438 	 * structure pointed to by ap->a_vap.  The group
439 	 * is easier, since currently all groups will be
440 	 * translate to the NULLGROUP.
441 	 */
442 
443 	/* Find entry in map */
444 
445 	uid = ap->a_vap->va_uid;
446 	gid = ap->a_vap->va_gid;
447 	if ((flags & LAYERFS_MBYPASSDEBUG))
448 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
449 		    gid);
450 
451 	vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
452 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
453 	mapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
454 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
455 	gmapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
456 
457 	/* Reverse map the uid for the vnode.  Since it's a reverse
458 		map, we can't use umap_mapids() to do it. */
459 
460 	tmpid = umap_reverse_findid(uid, mapdata, nentries);
461 
462 	if (tmpid != -1) {
463 		ap->a_vap->va_uid = (uid_t) tmpid;
464 		if ((flags & LAYERFS_MBYPASSDEBUG))
465 			printf("umap_getattr: original uid = %d\n", uid);
466 	} else
467 		ap->a_vap->va_uid = (uid_t) NOBODY;
468 
469 	/* Reverse map the gid for the vnode. */
470 
471 	tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
472 
473 	if (tmpid != -1) {
474 		ap->a_vap->va_gid = (gid_t) tmpid;
475 		if ((flags & LAYERFS_MBYPASSDEBUG))
476 			printf("umap_getattr: original gid = %d\n", gid);
477 	} else
478 		ap->a_vap->va_gid = (gid_t) NULLGROUP;
479 
480 	return (0);
481 }
482 
483 int
484 umap_print(v)
485 	void *v;
486 {
487 	struct vop_print_args /* {
488 		struct vnode *a_vp;
489 	} */ *ap = v;
490 	struct vnode *vp = ap->a_vp;
491 	printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
492 	    UMAPVPTOLOWERVP(vp));
493 	return (0);
494 }
495 
496 int
497 umap_rename(v)
498 	void *v;
499 {
500 	struct vop_rename_args  /* {
501 		struct vnode *a_fdvp;
502 		struct vnode *a_fvp;
503 		struct componentname *a_fcnp;
504 		struct vnode *a_tdvp;
505 		struct vnode *a_tvp;
506 		struct componentname *a_tcnp;
507 	} */ *ap = v;
508 	int error, flags;
509 	struct componentname *compnamep;
510 	struct ucred *compcredp, *savecompcredp;
511 	struct vnode *vp;
512 
513 	/*
514 	 * Rename is irregular, having two componentname structures.
515 	 * We need to map the cre in the second structure,
516 	 * and then bypass takes care of the rest.
517 	 */
518 
519 	vp = ap->a_fdvp;
520 	flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
521 	compnamep = ap->a_tcnp;
522 	compcredp = compnamep->cn_cred;
523 
524 	savecompcredp = compcredp;
525 	compcredp = compnamep->cn_cred = crdup(savecompcredp);
526 
527 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
528 		printf("umap_rename: rename component credit user was %d, group %d\n",
529 		    compcredp->cr_uid, compcredp->cr_gid);
530 
531 	/* Map all ids in the credential structure. */
532 
533 	umap_mapids(vp->v_mount, compcredp);
534 
535 	if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp->cr_uid != 0)
536 		printf("umap_rename: rename component credit user now %d, group %d\n",
537 		    compcredp->cr_uid, compcredp->cr_gid);
538 
539 	error = umap_bypass(ap);
540 
541 	/* Restore the additional mapped componentname cred structure. */
542 
543 	crfree(compcredp);
544 	compnamep->cn_cred = savecompcredp;
545 
546 	return error;
547 }
548