xref: /netbsd-src/sys/fs/union/union_subr.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: union_subr.c,v 1.79 2020/08/18 09:44:07 hannken Exp $	*/
2 
3 /*
4  * Copyright (c) 1994
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Jan-Simon Pendry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)union_subr.c	8.20 (Berkeley) 5/20/95
35  */
36 
37 /*
38  * Copyright (c) 1994 Jan-Simon Pendry
39  *
40  * This code is derived from software contributed to Berkeley by
41  * Jan-Simon Pendry.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)union_subr.c	8.20 (Berkeley) 5/20/95
72  */
73 
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: union_subr.c,v 1.79 2020/08/18 09:44:07 hannken Exp $");
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/time.h>
81 #include <sys/kernel.h>
82 #include <sys/vnode.h>
83 #include <sys/namei.h>
84 #include <sys/malloc.h>
85 #include <sys/dirent.h>
86 #include <sys/file.h>
87 #include <sys/filedesc.h>
88 #include <sys/queue.h>
89 #include <sys/mount.h>
90 #include <sys/stat.h>
91 #include <sys/kauth.h>
92 
93 #include <uvm/uvm_extern.h>
94 
95 #include <fs/union/union.h>
96 #include <miscfs/genfs/genfs.h>
97 #include <miscfs/specfs/specdev.h>
98 
99 static LIST_HEAD(uhashhead, union_node) *uhashtbl;
100 static u_long uhash_mask;		/* size of hash table - 1 */
101 #define UNION_HASH(u, l) \
102 	((((u_long) (u) + (u_long) (l)) >> 8) & uhash_mask)
103 #define NOHASH	((u_long)-1)
104 
105 static kmutex_t uhash_lock;
106 
107 static void union_newupper(struct union_node *, struct vnode *);
108 static void union_newlower(struct union_node *, struct vnode *);
109 static void union_ref(struct union_node *);
110 static void union_rele(struct union_node *);
111 static int union_do_lookup(struct vnode *, struct componentname *, kauth_cred_t,    const char *);
112 int union_vn_close(struct vnode *, int, kauth_cred_t, struct lwp *);
113 static void union_dircache_r(struct vnode *, struct vnode ***, int *);
114 struct vnode *union_dircache(struct vnode *, struct lwp *);
115 
116 void
117 union_init(void)
118 {
119 
120 	mutex_init(&uhash_lock, MUTEX_DEFAULT, IPL_NONE);
121 	uhashtbl = hashinit(desiredvnodes, HASH_LIST, true, &uhash_mask);
122 }
123 
124 void
125 union_reinit(void)
126 {
127 	struct union_node *un;
128 	struct uhashhead *oldhash, *hash;
129 	u_long oldmask, mask, val;
130 	int i;
131 
132 	hash = hashinit(desiredvnodes, HASH_LIST, true, &mask);
133 	mutex_enter(&uhash_lock);
134 	oldhash = uhashtbl;
135 	oldmask = uhash_mask;
136 	uhashtbl = hash;
137 	uhash_mask = mask;
138 	for (i = 0; i <= oldmask; i++) {
139 		while ((un = LIST_FIRST(&oldhash[i])) != NULL) {
140 			LIST_REMOVE(un, un_cache);
141 			val = UNION_HASH(un->un_uppervp, un->un_lowervp);
142 			LIST_INSERT_HEAD(&hash[val], un, un_cache);
143 		}
144 	}
145 	mutex_exit(&uhash_lock);
146 	hashdone(oldhash, HASH_LIST, oldmask);
147 }
148 
149 /*
150  * Free global unionfs resources.
151  */
152 void
153 union_done(void)
154 {
155 
156 	hashdone(uhashtbl, HASH_LIST, uhash_mask);
157 	mutex_destroy(&uhash_lock);
158 
159 	/* Make sure to unset the readdir hook. */
160 	vn_union_readdir_hook = NULL;
161 }
162 
163 void
164 union_newlower(struct union_node *un, struct vnode *lowervp)
165 {
166 	int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
167 	int nhash = UNION_HASH(un->un_uppervp, lowervp);
168 
169 	if (un->un_lowervp == lowervp)
170 		return;
171 
172 	KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
173 	KASSERT(un->un_lowervp == NULL);
174 
175 	mutex_enter(&uhash_lock);
176 
177 	if (ohash != nhash && (un->un_cflags & UN_CACHED)) {
178 		un->un_cflags &= ~UN_CACHED;
179 		LIST_REMOVE(un, un_cache);
180 	}
181 	mutex_enter(&un->un_lock);
182 	un->un_lowervp = lowervp;
183 	un->un_lowersz = VNOVAL;
184 	mutex_exit(&un->un_lock);
185 	if (ohash != nhash) {
186 		LIST_INSERT_HEAD(&uhashtbl[nhash], un, un_cache);
187 		un->un_cflags |= UN_CACHED;
188 	}
189 
190 	mutex_exit(&uhash_lock);
191 }
192 
193 void
194 union_newupper(struct union_node *un, struct vnode *uppervp)
195 {
196 	int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
197 	int nhash = UNION_HASH(uppervp, un->un_lowervp);
198 	struct vop_lock_args lock_ap;
199 	struct vop_unlock_args unlock_ap;
200 	int error __diagused;
201 
202 	if (un->un_uppervp == uppervp)
203 		return;
204 
205 	KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
206 	KASSERT(un->un_uppervp == NULL);
207 
208 	/*
209 	 * We have to transfer the vnode lock from the union vnode to
210 	 * the upper vnode.  Lock the upper vnode first.  We cannot use
211 	 * VOP_LOCK() here as it would break the fstrans state.
212 	 */
213 	lock_ap.a_desc = VDESC(vop_lock);
214 	lock_ap.a_vp = uppervp;
215 	lock_ap.a_flags = LK_EXCLUSIVE;
216 	error = VCALL(lock_ap.a_vp,  VOFFSET(vop_lock), &lock_ap);
217 	KASSERT(error == 0);
218 
219 	mutex_enter(&uhash_lock);
220 
221 	if (ohash != nhash && (un->un_cflags & UN_CACHED)) {
222 		un->un_cflags &= ~UN_CACHED;
223 		LIST_REMOVE(un, un_cache);
224 	}
225 	mutex_enter(&un->un_lock);
226 	un->un_uppervp = uppervp;
227 	un->un_uppersz = VNOVAL;
228 	/*
229 	 * With the upper vnode in place unlock the union vnode to
230 	 * finalize the lock transfer.
231 	 */
232 	unlock_ap.a_desc = VDESC(vop_unlock);
233 	unlock_ap.a_vp = UNIONTOV(un);
234 	genfs_unlock(&unlock_ap);
235 	/* Update union vnode interlock & vmobjlock. */
236 	vshareilock(UNIONTOV(un), uppervp);
237 	rw_obj_hold(uppervp->v_uobj.vmobjlock);
238 	uvm_obj_setlock(&UNIONTOV(un)->v_uobj, uppervp->v_uobj.vmobjlock);
239 	mutex_exit(&un->un_lock);
240 	if (ohash != nhash) {
241 		LIST_INSERT_HEAD(&uhashtbl[nhash], un, un_cache);
242 		un->un_cflags |= UN_CACHED;
243 	}
244 
245 	mutex_exit(&uhash_lock);
246 }
247 
248 /*
249  * Keep track of size changes in the underlying vnodes.
250  * If the size changes, then callback to the vm layer
251  * giving priority to the upper layer size.
252  *
253  * Mutex un_lock hold on entry and released on return.
254  */
255 void
256 union_newsize(struct vnode *vp, off_t uppersz, off_t lowersz)
257 {
258 	struct union_node *un = VTOUNION(vp);
259 	off_t sz;
260 
261 	KASSERT(mutex_owned(&un->un_lock));
262 	/* only interested in regular files */
263 	if (vp->v_type != VREG) {
264 		mutex_exit(&un->un_lock);
265 		uvm_vnp_setsize(vp, 0);
266 		return;
267 	}
268 
269 	sz = VNOVAL;
270 
271 	if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
272 		un->un_uppersz = uppersz;
273 		if (sz == VNOVAL)
274 			sz = un->un_uppersz;
275 	}
276 
277 	if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) {
278 		un->un_lowersz = lowersz;
279 		if (sz == VNOVAL)
280 			sz = un->un_lowersz;
281 	}
282 	mutex_exit(&un->un_lock);
283 
284 	if (sz != VNOVAL) {
285 #ifdef UNION_DIAGNOSTIC
286 		printf("union: %s size now %qd\n",
287 		    uppersz != VNOVAL ? "upper" : "lower", sz);
288 #endif
289 		uvm_vnp_setsize(vp, sz);
290 	}
291 }
292 
293 static void
294 union_ref(struct union_node *un)
295 {
296 
297 	KASSERT(mutex_owned(&uhash_lock));
298 	un->un_refs++;
299 }
300 
301 static void
302 union_rele(struct union_node *un)
303 {
304 
305 	mutex_enter(&uhash_lock);
306 	un->un_refs--;
307 	if (un->un_refs > 0) {
308 		mutex_exit(&uhash_lock);
309 		return;
310 	}
311 	if (un->un_cflags & UN_CACHED) {
312 		un->un_cflags &= ~UN_CACHED;
313 		LIST_REMOVE(un, un_cache);
314 	}
315 	mutex_exit(&uhash_lock);
316 
317 	if (un->un_pvp != NULLVP)
318 		vrele(un->un_pvp);
319 	if (un->un_uppervp != NULLVP)
320 		vrele(un->un_uppervp);
321 	if (un->un_lowervp != NULLVP)
322 		vrele(un->un_lowervp);
323 	if (un->un_dirvp != NULLVP)
324 		vrele(un->un_dirvp);
325 	if (un->un_path)
326 		free(un->un_path, M_TEMP);
327 	mutex_destroy(&un->un_lock);
328 
329 	free(un, M_TEMP);
330 }
331 
332 /*
333  * allocate a union_node/vnode pair.  the vnode is
334  * referenced and unlocked.  the new vnode is returned
335  * via (vpp).  (mp) is the mountpoint of the union filesystem,
336  * (dvp) is the parent directory where the upper layer object
337  * should exist (but doesn't) and (cnp) is the componentname
338  * information which is partially copied to allow the upper
339  * layer object to be created at a later time.  (uppervp)
340  * and (lowervp) reference the upper and lower layer objects
341  * being mapped.  either, but not both, can be nil.
342  * both, if supplied, are unlocked.
343  * the reference is either maintained in the new union_node
344  * object which is allocated, or they are vrele'd.
345  *
346  * all union_nodes are maintained on a hash
347  * list.  new nodes are only allocated when they cannot
348  * be found on this list.  entries on the list are
349  * removed when the vfs reclaim entry is called.
350  *
351  * the vnode gets attached or referenced with vcache_get().
352  */
353 int
354 union_allocvp(
355 	struct vnode **vpp,
356 	struct mount *mp,
357 	struct vnode *undvp,		/* parent union vnode */
358 	struct vnode *dvp,		/* may be null */
359 	struct componentname *cnp,	/* may be null */
360 	struct vnode *uppervp,		/* may be null */
361 	struct vnode *lowervp,		/* may be null */
362 	int docache)
363 {
364 	int error;
365 	struct union_node *un = NULL, *un1;
366 	struct vnode *vp, *xlowervp = NULLVP;
367 	u_long hash[3];
368 	int try;
369 	bool is_dotdot;
370 
371 	is_dotdot = (dvp != NULL && cnp != NULL && (cnp->cn_flags & ISDOTDOT));
372 
373 	if (uppervp == NULLVP && lowervp == NULLVP)
374 		panic("union: unidentifiable allocation");
375 
376 	if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
377 		xlowervp = lowervp;
378 		lowervp = NULLVP;
379 	}
380 
381 	/*
382 	 * If both uppervp and lowervp are not NULL we have to
383 	 * search union nodes with one vnode as NULL too.
384 	 */
385 	hash[0] = UNION_HASH(uppervp, lowervp);
386 	if (uppervp == NULL || lowervp == NULL) {
387 		hash[1] = hash[2] = NOHASH;
388 	} else {
389 		hash[1] = UNION_HASH(uppervp, NULLVP);
390 		hash[2] = UNION_HASH(NULLVP, lowervp);
391 	}
392 
393 	if (!docache) {
394 		un = NULL;
395 		goto found;
396 	}
397 
398 loop:
399 	mutex_enter(&uhash_lock);
400 
401 	for (try = 0; try < 3; try++) {
402 		if (hash[try] == NOHASH)
403 			continue;
404 		LIST_FOREACH(un, &uhashtbl[hash[try]], un_cache) {
405 			if ((un->un_lowervp && un->un_lowervp != lowervp) ||
406 			    (un->un_uppervp && un->un_uppervp != uppervp) ||
407 			    un->un_mount != mp)
408 				continue;
409 
410 			union_ref(un);
411 			mutex_exit(&uhash_lock);
412 			error = vcache_get(mp, &un, sizeof(un), &vp);
413 			KASSERT(error != 0 || UNIONTOV(un) == vp);
414 			union_rele(un);
415 			if (error == ENOENT)
416 				goto loop;
417 			else if (error)
418 				goto out;
419 			goto found;
420 		}
421 	}
422 
423 	mutex_exit(&uhash_lock);
424 
425 found:
426 	if (un) {
427 		if (uppervp != dvp) {
428 			if (is_dotdot)
429 				VOP_UNLOCK(dvp);
430 			vn_lock(UNIONTOV(un), LK_EXCLUSIVE | LK_RETRY);
431 			if (is_dotdot)
432 				vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
433 		}
434 		/*
435 		 * Save information about the upper layer.
436 		 */
437 		if (uppervp != un->un_uppervp) {
438 			union_newupper(un, uppervp);
439 		} else if (uppervp) {
440 			vrele(uppervp);
441 		}
442 
443 		/*
444 		 * Save information about the lower layer.
445 		 * This needs to keep track of pathname
446 		 * and directory information which union_vn_create
447 		 * might need.
448 		 */
449 		if (lowervp != un->un_lowervp) {
450 			union_newlower(un, lowervp);
451 			if (cnp && (lowervp != NULLVP)) {
452 				un->un_path = malloc(cnp->cn_namelen+1,
453 						M_TEMP, M_WAITOK);
454 				memcpy(un->un_path, cnp->cn_nameptr,
455 						cnp->cn_namelen);
456 				un->un_path[cnp->cn_namelen] = '\0';
457 				vref(dvp);
458 				un->un_dirvp = dvp;
459 			}
460 		} else if (lowervp) {
461 			vrele(lowervp);
462 		}
463 		*vpp = UNIONTOV(un);
464 		if (uppervp != dvp)
465 			VOP_UNLOCK(*vpp);
466 		error = 0;
467 		goto out;
468 	}
469 
470 	un = malloc(sizeof(struct union_node), M_TEMP, M_WAITOK);
471 	mutex_init(&un->un_lock, MUTEX_DEFAULT, IPL_NONE);
472 	un->un_refs = 1;
473 	un->un_mount = mp;
474 	un->un_vnode = NULL;
475 	un->un_uppervp = uppervp;
476 	un->un_lowervp = lowervp;
477 	un->un_pvp = undvp;
478 	if (undvp != NULLVP)
479 		vref(undvp);
480 	un->un_dircache = 0;
481 	un->un_openl = 0;
482 	un->un_cflags = 0;
483 	un->un_hooknode = false;
484 
485 	un->un_uppersz = VNOVAL;
486 	un->un_lowersz = VNOVAL;
487 
488 	if (dvp && cnp && (lowervp != NULLVP)) {
489 		un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
490 		memcpy(un->un_path, cnp->cn_nameptr, cnp->cn_namelen);
491 		un->un_path[cnp->cn_namelen] = '\0';
492 		vref(dvp);
493 		un->un_dirvp = dvp;
494 	} else {
495 		un->un_path = 0;
496 		un->un_dirvp = 0;
497 	}
498 
499 	if (docache) {
500 		mutex_enter(&uhash_lock);
501 		LIST_FOREACH(un1, &uhashtbl[hash[0]], un_cache) {
502 			if (un1->un_lowervp == lowervp &&
503 			    un1->un_uppervp == uppervp &&
504 			    un1->un_mount == mp) {
505 				/*
506 				 * Another thread beat us, push back freshly
507 				 * allocated node and retry.
508 				 */
509 				mutex_exit(&uhash_lock);
510 				union_rele(un);
511 				goto loop;
512 			}
513 		}
514 		LIST_INSERT_HEAD(&uhashtbl[hash[0]], un, un_cache);
515 		un->un_cflags |= UN_CACHED;
516 		mutex_exit(&uhash_lock);
517 	}
518 
519 	error = vcache_get(mp, &un, sizeof(un), vpp);
520 	KASSERT(error != 0 || UNIONTOV(un) == *vpp);
521 	union_rele(un);
522 	if (error == ENOENT)
523 		goto loop;
524 
525 out:
526 	if (xlowervp)
527 		vrele(xlowervp);
528 
529 	return error;
530 }
531 
532 int
533 union_freevp(struct vnode *vp)
534 {
535 	struct union_node *un = VTOUNION(vp);
536 
537 	/* Detach vnode from union node. */
538 	un->un_vnode = NULL;
539 	un->un_uppersz = VNOVAL;
540 	un->un_lowersz = VNOVAL;
541 
542 	/* Detach union node from vnode. */
543 	mutex_enter(vp->v_interlock);
544 	vp->v_data = NULL;
545 	mutex_exit(vp->v_interlock);
546 
547 	union_rele(un);
548 
549 	return 0;
550 }
551 
552 int
553 union_loadvnode(struct mount *mp, struct vnode *vp,
554     const void *key, size_t key_len, const void **new_key)
555 {
556 	struct vattr va;
557 	struct vnode *svp;
558 	struct union_node *un;
559 	struct union_mount *um;
560 	voff_t uppersz, lowersz;
561 
562 	KASSERT(key_len == sizeof(un));
563 	memcpy(&un, key, key_len);
564 
565 	um = MOUNTTOUNIONMOUNT(mp);
566 	svp = (un->un_uppervp != NULLVP) ? un->un_uppervp : un->un_lowervp;
567 
568 	vp->v_tag = VT_UNION;
569 	vp->v_op = union_vnodeop_p;
570 	vp->v_data = un;
571 	un->un_vnode = vp;
572 
573 	vp->v_type = svp->v_type;
574 	if (svp->v_type == VCHR || svp->v_type == VBLK)
575 		spec_node_init(vp, svp->v_rdev);
576 
577 	vshareilock(vp, svp);
578 	rw_obj_hold(svp->v_uobj.vmobjlock);
579 	uvm_obj_setlock(&vp->v_uobj, svp->v_uobj.vmobjlock);
580 
581 	/* detect the root vnode (and aliases) */
582 	if ((un->un_uppervp == um->um_uppervp) &&
583 	    ((un->un_lowervp == NULLVP) || un->un_lowervp == um->um_lowervp)) {
584 		if (un->un_lowervp == NULLVP) {
585 			un->un_lowervp = um->um_lowervp;
586 			if (un->un_lowervp != NULLVP)
587 				vref(un->un_lowervp);
588 		}
589 		vp->v_vflag |= VV_ROOT;
590 	}
591 
592 	uppersz = lowersz = VNOVAL;
593 	if (un->un_uppervp != NULLVP) {
594 		if (vn_lock(un->un_uppervp, LK_SHARED) == 0) {
595 			if (VOP_GETATTR(un->un_uppervp, &va, FSCRED) == 0)
596 				uppersz = va.va_size;
597 			VOP_UNLOCK(un->un_uppervp);
598 		}
599 	}
600 	if (un->un_lowervp != NULLVP) {
601 		if (vn_lock(un->un_lowervp, LK_SHARED) == 0) {
602 			if (VOP_GETATTR(un->un_lowervp, &va, FSCRED) == 0)
603 				lowersz = va.va_size;
604 			VOP_UNLOCK(un->un_lowervp);
605 		}
606 	}
607 
608 	mutex_enter(&un->un_lock);
609 	union_newsize(vp, uppersz, lowersz);
610 
611 	mutex_enter(&uhash_lock);
612 	union_ref(un);
613 	mutex_exit(&uhash_lock);
614 
615 	*new_key = &vp->v_data;
616 
617 	return 0;
618 }
619 
620 /*
621  * copyfile.  copy the vnode (fvp) to the vnode (tvp)
622  * using a sequence of reads and writes.  both (fvp)
623  * and (tvp) are locked on entry and exit.
624  */
625 int
626 union_copyfile(struct vnode *fvp, struct vnode *tvp, kauth_cred_t cred,
627 	struct lwp *l)
628 {
629 	char *tbuf;
630 	struct uio uio;
631 	struct iovec iov;
632 	int error = 0;
633 
634 	/*
635 	 * strategy:
636 	 * allocate a buffer of size MAXBSIZE.
637 	 * loop doing reads and writes, keeping track
638 	 * of the current uio offset.
639 	 * give up at the first sign of trouble.
640 	 */
641 
642 	uio.uio_offset = 0;
643 	UIO_SETUP_SYSSPACE(&uio);
644 
645 	tbuf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
646 
647 	/* ugly loop follows... */
648 	do {
649 		off_t offset = uio.uio_offset;
650 
651 		uio.uio_iov = &iov;
652 		uio.uio_iovcnt = 1;
653 		iov.iov_base = tbuf;
654 		iov.iov_len = MAXBSIZE;
655 		uio.uio_resid = iov.iov_len;
656 		uio.uio_rw = UIO_READ;
657 		error = VOP_READ(fvp, &uio, 0, cred);
658 
659 		if (error == 0) {
660 			uio.uio_iov = &iov;
661 			uio.uio_iovcnt = 1;
662 			iov.iov_base = tbuf;
663 			iov.iov_len = MAXBSIZE - uio.uio_resid;
664 			uio.uio_offset = offset;
665 			uio.uio_rw = UIO_WRITE;
666 			uio.uio_resid = iov.iov_len;
667 
668 			if (uio.uio_resid == 0)
669 				break;
670 
671 			do {
672 				error = VOP_WRITE(tvp, &uio, 0, cred);
673 			} while ((uio.uio_resid > 0) && (error == 0));
674 		}
675 
676 	} while (error == 0);
677 
678 	free(tbuf, M_TEMP);
679 	return (error);
680 }
681 
682 /*
683  * (un) is assumed to be locked on entry and remains
684  * locked on exit.
685  */
686 int
687 union_copyup(struct union_node *un, int docopy, kauth_cred_t cred,
688 	struct lwp *l)
689 {
690 	int error;
691 	struct vnode *lvp, *uvp;
692 	struct vattr lvattr, uvattr;
693 
694 	error = union_vn_create(&uvp, un, l);
695 	if (error)
696 		return (error);
697 
698 	union_newupper(un, uvp);
699 
700 	lvp = un->un_lowervp;
701 
702 	if (docopy) {
703 		/*
704 		 * XX - should not ignore errors
705 		 * from VOP_CLOSE
706 		 */
707 		vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
708 
709         	error = VOP_GETATTR(lvp, &lvattr, cred);
710 		if (error == 0)
711 			error = VOP_OPEN(lvp, FREAD, cred);
712 		if (error == 0) {
713 			error = union_copyfile(lvp, uvp, cred, l);
714 			(void) VOP_CLOSE(lvp, FREAD, cred);
715 		}
716 		if (error == 0) {
717 			/* Copy permissions up too */
718 			vattr_null(&uvattr);
719 			uvattr.va_mode = lvattr.va_mode;
720 			uvattr.va_flags = lvattr.va_flags;
721         		error = VOP_SETATTR(uvp, &uvattr, cred);
722 		}
723 		VOP_UNLOCK(lvp);
724 #ifdef UNION_DIAGNOSTIC
725 		if (error == 0)
726 			uprintf("union: copied up %s\n", un->un_path);
727 #endif
728 
729 	}
730 	union_vn_close(uvp, FWRITE, cred, l);
731 
732 	/*
733 	 * Subsequent IOs will go to the top layer, so
734 	 * call close on the lower vnode and open on the
735 	 * upper vnode to ensure that the filesystem keeps
736 	 * its references counts right.  This doesn't do
737 	 * the right thing with (cred) and (FREAD) though.
738 	 * Ignoring error returns is not right, either.
739 	 */
740 	if (error == 0) {
741 		int i;
742 
743 		vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
744 		for (i = 0; i < un->un_openl; i++) {
745 			(void) VOP_CLOSE(lvp, FREAD, cred);
746 			(void) VOP_OPEN(uvp, FREAD, cred);
747 		}
748 		un->un_openl = 0;
749 		VOP_UNLOCK(lvp);
750 	}
751 
752 	return (error);
753 
754 }
755 
756 /*
757  * Prepare the creation of a new node in the upper layer.
758  *
759  * (dvp) is the directory in which to create the new node.
760  * it is locked on entry and exit.
761  * (cnp) is the componentname to be created.
762  * (cred, path, hash) are credentials, path and its hash to fill (cnp).
763  */
764 static int
765 union_do_lookup(struct vnode *dvp, struct componentname *cnp, kauth_cred_t cred,
766     const char *path)
767 {
768 	int error;
769 	struct vnode *vp;
770 
771 	cnp->cn_nameiop = CREATE;
772 	cnp->cn_flags = LOCKPARENT | ISLASTCN;
773 	cnp->cn_cred = cred;
774 	cnp->cn_nameptr = path;
775 	cnp->cn_namelen = strlen(path);
776 
777 	error = VOP_LOOKUP(dvp, &vp, cnp);
778 
779 	if (error == 0) {
780 		KASSERT(vp != NULL);
781 		VOP_ABORTOP(dvp, cnp);
782 		vrele(vp);
783 		error = EEXIST;
784 	} else if (error == EJUSTRETURN) {
785 		error = 0;
786 	}
787 
788 	return error;
789 }
790 
791 /*
792  * Create a shadow directory in the upper layer.
793  * The new vnode is returned locked.
794  *
795  * (um) points to the union mount structure for access to the
796  * the mounting process's credentials.
797  * (dvp) is the directory in which to create the shadow directory.
798  * it is unlocked on entry and exit.
799  * (cnp) is the componentname to be created.
800  * (vpp) is the returned newly created shadow directory, which
801  * is returned locked.
802  *
803  * N.B. We still attempt to create shadow directories even if the union
804  * is mounted read-only, which is a little nonintuitive.
805  */
806 int
807 union_mkshadow(struct union_mount *um, struct vnode *dvp,
808 	struct componentname *cnp, struct vnode **vpp)
809 {
810 	int error;
811 	struct vattr va;
812 	struct componentname cn;
813 	char *pnbuf;
814 
815 	if (cnp->cn_namelen + 1 > MAXPATHLEN)
816 		return ENAMETOOLONG;
817 	pnbuf = PNBUF_GET();
818 	memcpy(pnbuf, cnp->cn_nameptr, cnp->cn_namelen);
819 	pnbuf[cnp->cn_namelen] = '\0';
820 
821 	vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
822 
823 	error = union_do_lookup(dvp, &cn,
824 	    (um->um_op == UNMNT_ABOVE ? cnp->cn_cred : um->um_cred), pnbuf);
825 	if (error) {
826 		VOP_UNLOCK(dvp);
827 		PNBUF_PUT(pnbuf);
828 		return error;
829 	}
830 
831 	/*
832 	 * policy: when creating the shadow directory in the
833 	 * upper layer, create it owned by the user who did
834 	 * the mount, group from parent directory, and mode
835 	 * 777 modified by umask (ie mostly identical to the
836 	 * mkdir syscall).  (jsp, kb)
837 	 */
838 
839 	vattr_null(&va);
840 	va.va_type = VDIR;
841 	va.va_mode = um->um_cmode;
842 
843 	KASSERT(*vpp == NULL);
844 	error = VOP_MKDIR(dvp, vpp, &cn, &va);
845 	VOP_UNLOCK(dvp);
846 	PNBUF_PUT(pnbuf);
847 	return error;
848 }
849 
850 /*
851  * Create a whiteout entry in the upper layer.
852  *
853  * (um) points to the union mount structure for access to the
854  * the mounting process's credentials.
855  * (dvp) is the directory in which to create the whiteout.
856  * it is locked on entry and exit.
857  * (cnp) is the componentname to be created.
858  * (un) holds the path and its hash to be created.
859  */
860 int
861 union_mkwhiteout(struct union_mount *um, struct vnode *dvp,
862 	struct componentname *cnp, struct union_node *un)
863 {
864 	int error;
865 	struct componentname cn;
866 
867 	error = union_do_lookup(dvp, &cn,
868 	    (um->um_op == UNMNT_ABOVE ? cnp->cn_cred : um->um_cred),
869 	    un->un_path);
870 	if (error)
871 		return error;
872 
873 	error = VOP_WHITEOUT(dvp, &cn, CREATE);
874 	return error;
875 }
876 
877 /*
878  * union_vn_create: creates and opens a new shadow file
879  * on the upper union layer.  this function is similar
880  * in spirit to calling vn_open but it avoids calling namei().
881  * the problem with calling namei is that a) it locks too many
882  * things, and b) it doesn't start at the "right" directory,
883  * whereas union_do_lookup is told where to start.
884  */
885 int
886 union_vn_create(struct vnode **vpp, struct union_node *un, struct lwp *l)
887 {
888 	struct vnode *vp;
889 	kauth_cred_t cred = l->l_cred;
890 	struct vattr vat;
891 	struct vattr *vap = &vat;
892 	int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
893 	int error;
894 	int cmode = UN_FILEMODE & ~l->l_proc->p_cwdi->cwdi_cmask;
895 	struct componentname cn;
896 
897 	*vpp = NULLVP;
898 
899 	vn_lock(un->un_dirvp, LK_EXCLUSIVE | LK_RETRY);
900 
901 	error = union_do_lookup(un->un_dirvp, &cn, l->l_cred,
902 	    un->un_path);
903 	if (error) {
904 		VOP_UNLOCK(un->un_dirvp);
905 		return error;
906 	}
907 
908 	/*
909 	 * Good - there was no race to create the file
910 	 * so go ahead and create it.  The permissions
911 	 * on the file will be 0666 modified by the
912 	 * current user's umask.  Access to the file, while
913 	 * it is unioned, will require access to the top *and*
914 	 * bottom files.  Access when not unioned will simply
915 	 * require access to the top-level file.
916 	 * TODO: confirm choice of access permissions.
917 	 */
918 	vattr_null(vap);
919 	vap->va_type = VREG;
920 	vap->va_mode = cmode;
921 	vp = NULL;
922 	error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap);
923 	if (error) {
924 		VOP_UNLOCK(un->un_dirvp);
925 		return error;
926 	}
927 
928 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
929 	VOP_UNLOCK(un->un_dirvp);
930 	error = VOP_OPEN(vp, fmode, cred);
931 	if (error) {
932 		vput(vp);
933 		return error;
934 	}
935 
936 	vp->v_writecount++;
937 	VOP_UNLOCK(vp);
938 	*vpp = vp;
939 	return 0;
940 }
941 
942 int
943 union_vn_close(struct vnode *vp, int fmode, kauth_cred_t cred, struct lwp *l)
944 {
945 
946 	if (fmode & FWRITE)
947 		--vp->v_writecount;
948 	return (VOP_CLOSE(vp, fmode, cred));
949 }
950 
951 void
952 union_removed_upper(struct union_node *un)
953 {
954 	struct vnode *vp = UNIONTOV(un);
955 
956 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
957 #if 1
958 	/*
959 	 * We do not set the uppervp to NULLVP here, because lowervp
960 	 * may also be NULLVP, so this routine would end up creating
961 	 * a bogus union node with no upper or lower VP (that causes
962 	 * pain in many places that assume at least one VP exists).
963 	 * Since we've removed this node from the cache hash chains,
964 	 * it won't be found again.  When all current holders
965 	 * release it, union_inactive() will vgone() it.
966 	 */
967 	union_diruncache(un);
968 #else
969 	union_newupper(un, NULLVP);
970 #endif
971 
972 	VOP_UNLOCK(vp);
973 
974 	mutex_enter(&uhash_lock);
975 	if (un->un_cflags & UN_CACHED) {
976 		un->un_cflags &= ~UN_CACHED;
977 		LIST_REMOVE(un, un_cache);
978 	}
979 	mutex_exit(&uhash_lock);
980 }
981 
982 #if 0
983 struct vnode *
984 union_lowervp(struct vnode *vp)
985 {
986 	struct union_node *un = VTOUNION(vp);
987 
988 	if ((un->un_lowervp != NULLVP) &&
989 	    (vp->v_type == un->un_lowervp->v_type)) {
990 		if (vget(un->un_lowervp, 0, true /* wait */) == 0)
991 			return (un->un_lowervp);
992 	}
993 
994 	return (NULLVP);
995 }
996 #endif
997 
998 /*
999  * determine whether a whiteout is needed
1000  * during a remove/rmdir operation.
1001  */
1002 int
1003 union_dowhiteout(struct union_node *un, kauth_cred_t cred)
1004 {
1005 	struct vattr va;
1006 
1007 	if (un->un_lowervp != NULLVP)
1008 		return (1);
1009 
1010 	if (VOP_GETATTR(un->un_uppervp, &va, cred) == 0 &&
1011 	    (va.va_flags & OPAQUE))
1012 		return (1);
1013 
1014 	return (0);
1015 }
1016 
1017 static void
1018 union_dircache_r(struct vnode *vp, struct vnode ***vppp, int *cntp)
1019 {
1020 	struct union_node *un;
1021 
1022 	if (vp->v_op != union_vnodeop_p) {
1023 		if (vppp) {
1024 			vref(vp);
1025 			*(*vppp)++ = vp;
1026 			if (--(*cntp) == 0)
1027 				panic("union: dircache table too small");
1028 		} else {
1029 			(*cntp)++;
1030 		}
1031 
1032 		return;
1033 	}
1034 
1035 	un = VTOUNION(vp);
1036 	if (un->un_uppervp != NULLVP)
1037 		union_dircache_r(un->un_uppervp, vppp, cntp);
1038 	if (un->un_lowervp != NULLVP)
1039 		union_dircache_r(un->un_lowervp, vppp, cntp);
1040 }
1041 
1042 struct vnode *
1043 union_dircache(struct vnode *vp, struct lwp *l)
1044 {
1045 	int cnt;
1046 	struct vnode *nvp = NULLVP;
1047 	struct vnode **vpp;
1048 	struct vnode **dircache;
1049 	int error;
1050 
1051 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1052 	dircache = VTOUNION(vp)->un_dircache;
1053 
1054 	nvp = NULLVP;
1055 
1056 	if (dircache == 0) {
1057 		cnt = 0;
1058 		union_dircache_r(vp, 0, &cnt);
1059 		cnt++;
1060 		dircache = (struct vnode **)
1061 				malloc(cnt * sizeof(struct vnode *),
1062 					M_TEMP, M_WAITOK);
1063 		vpp = dircache;
1064 		union_dircache_r(vp, &vpp, &cnt);
1065 		VTOUNION(vp)->un_dircache = dircache;
1066 		*vpp = NULLVP;
1067 		vpp = dircache + 1;
1068 	} else {
1069 		vpp = dircache;
1070 		do {
1071 			if (*vpp++ == VTOUNION(vp)->un_lowervp)
1072 				break;
1073 		} while (*vpp != NULLVP);
1074 	}
1075 
1076 	if (*vpp == NULLVP)
1077 		goto out;
1078 
1079 	vref(*vpp);
1080 	error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0,
1081 	    NULLVP, *vpp, 0);
1082 	if (!error) {
1083 		vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY);
1084 		VTOUNION(vp)->un_dircache = 0;
1085 		VTOUNION(nvp)->un_hooknode = true;
1086 		VTOUNION(nvp)->un_dircache = dircache;
1087 	}
1088 
1089 out:
1090 	VOP_UNLOCK(vp);
1091 	return (nvp);
1092 }
1093 
1094 void
1095 union_diruncache(struct union_node *un)
1096 {
1097 	struct vnode **vpp;
1098 
1099 	KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE);
1100 	if (un->un_dircache != 0) {
1101 		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1102 			vrele(*vpp);
1103 		free(un->un_dircache, M_TEMP);
1104 		un->un_dircache = 0;
1105 	}
1106 }
1107 
1108 /*
1109  * Check whether node can rmdir (check empty).
1110  */
1111 int
1112 union_check_rmdir(struct union_node *un, kauth_cred_t cred)
1113 {
1114 	int dirlen, eofflag, error;
1115 	char *dirbuf;
1116 	struct vattr va;
1117 	struct vnode *tvp;
1118 	struct dirent *dp, *edp;
1119 	struct componentname cn;
1120 	struct iovec aiov;
1121 	struct uio auio;
1122 
1123 	KASSERT(un->un_uppervp != NULL);
1124 
1125 	/* Check upper for being opaque. */
1126 	KASSERT(VOP_ISLOCKED(un->un_uppervp));
1127 	error = VOP_GETATTR(un->un_uppervp, &va, cred);
1128 	if (error || (va.va_flags & OPAQUE))
1129 		return error;
1130 
1131 	if (un->un_lowervp == NULL)
1132 		return 0;
1133 
1134 	/* Check lower for being empty. */
1135 	vn_lock(un->un_lowervp, LK_SHARED | LK_RETRY);
1136 	error = VOP_GETATTR(un->un_lowervp, &va, cred);
1137 	if (error) {
1138 		VOP_UNLOCK(un->un_lowervp);
1139 		return error;
1140 	}
1141 	dirlen = va.va_blocksize;
1142 	dirbuf = kmem_alloc(dirlen, KM_SLEEP);
1143 	/* error = 0; */
1144 	eofflag = 0;
1145 	auio.uio_offset = 0;
1146 	do {
1147 		aiov.iov_len = dirlen;
1148 		aiov.iov_base = dirbuf;
1149 		auio.uio_iov = &aiov;
1150 		auio.uio_iovcnt = 1;
1151 		auio.uio_resid = aiov.iov_len;
1152 		auio.uio_rw = UIO_READ;
1153 		UIO_SETUP_SYSSPACE(&auio);
1154 		error = VOP_READDIR(un->un_lowervp, &auio, cred, &eofflag,
1155 		    NULL, NULL);
1156 		if (error)
1157 			break;
1158 		edp = (struct dirent *)&dirbuf[dirlen - auio.uio_resid];
1159 		for (dp = (struct dirent *)dirbuf;
1160 		    error == 0 && dp < edp;
1161 		    dp = (struct dirent *)((char *)dp + dp->d_reclen)) {
1162 			if (dp->d_reclen == 0) {
1163 				error = ENOTEMPTY;
1164 				break;
1165 			}
1166 			if (dp->d_type == DT_WHT ||
1167 			    (dp->d_namlen == 1 && dp->d_name[0] == '.') ||
1168 			    (dp->d_namlen == 2 && !memcmp(dp->d_name, "..", 2)))
1169 				continue;
1170 			/* Check for presence in the upper layer. */
1171 			cn.cn_nameiop = LOOKUP;
1172 			cn.cn_flags = ISLASTCN | RDONLY;
1173 			cn.cn_cred = cred;
1174 			cn.cn_nameptr = dp->d_name;
1175 			cn.cn_namelen = dp->d_namlen;
1176 			error = VOP_LOOKUP(un->un_uppervp, &tvp, &cn);
1177 			if (error == ENOENT && (cn.cn_flags & ISWHITEOUT)) {
1178 				error = 0;
1179 				continue;
1180 			}
1181 			if (error == 0)
1182 				vrele(tvp);
1183 			error = ENOTEMPTY;
1184 		}
1185 	} while (error == 0 && !eofflag);
1186 	kmem_free(dirbuf, dirlen);
1187 	VOP_UNLOCK(un->un_lowervp);
1188 
1189 	return error;
1190 }
1191 
1192 /*
1193  * This hook is called from vn_readdir() to switch to lower directory
1194  * entry after the upper directory is read.
1195  */
1196 int
1197 union_readdirhook(struct vnode **vpp, struct file *fp, struct lwp *l)
1198 {
1199 	struct vnode *vp = *vpp, *lvp;
1200 	struct vattr va;
1201 	int error;
1202 
1203 	if (vp->v_op != union_vnodeop_p)
1204 		return (0);
1205 
1206 	/*
1207 	 * If the directory is opaque,
1208 	 * then don't show lower entries
1209 	 */
1210 	vn_lock(vp, LK_SHARED | LK_RETRY);
1211 	error = VOP_GETATTR(vp, &va, fp->f_cred);
1212 	VOP_UNLOCK(vp);
1213 	if (error || (va.va_flags & OPAQUE))
1214 		return error;
1215 
1216 	if ((lvp = union_dircache(vp, l)) == NULLVP)
1217 		return (0);
1218 
1219 	error = VOP_OPEN(lvp, FREAD, fp->f_cred);
1220 	if (error) {
1221 		vput(lvp);
1222 		return (error);
1223 	}
1224 	VOP_UNLOCK(lvp);
1225 	fp->f_vnode = lvp;
1226 	fp->f_offset = 0;
1227 	error = vn_close(vp, FREAD, fp->f_cred);
1228 	if (error)
1229 		return (error);
1230 	*vpp = lvp;
1231 	return (0);
1232 }
1233