xref: /netbsd-src/sys/fs/union/union_vnops.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: union_vnops.c,v 1.2 2003/03/17 11:39:16 jdolecek Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
5  * Copyright (c) 1992, 1993, 1994, 1995
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Jan-Simon Pendry.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	@(#)union_vnops.c	8.33 (Berkeley) 7/31/95
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: union_vnops.c,v 1.2 2003/03/17 11:39:16 jdolecek Exp $");
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/proc.h>
48 #include <sys/file.h>
49 #include <sys/time.h>
50 #include <sys/stat.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/malloc.h>
55 #include <sys/buf.h>
56 #include <sys/queue.h>
57 #include <sys/lock.h>
58 #include <fs/union/union.h>
59 #include <miscfs/genfs/genfs.h>
60 
61 int union_lookup	__P((void *));
62 int union_create	__P((void *));
63 int union_whiteout	__P((void *));
64 int union_mknod		__P((void *));
65 int union_open		__P((void *));
66 int union_close		__P((void *));
67 int union_access	__P((void *));
68 int union_getattr	__P((void *));
69 int union_setattr	__P((void *));
70 int union_read		__P((void *));
71 int union_write		__P((void *));
72 int union_lease		__P((void *));
73 int union_ioctl		__P((void *));
74 int union_poll		__P((void *));
75 int union_revoke	__P((void *));
76 int union_mmap		__P((void *));
77 int union_fsync		__P((void *));
78 int union_seek		__P((void *));
79 int union_remove	__P((void *));
80 int union_link		__P((void *));
81 int union_rename	__P((void *));
82 int union_mkdir		__P((void *));
83 int union_rmdir		__P((void *));
84 int union_symlink	__P((void *));
85 int union_readdir	__P((void *));
86 int union_readlink	__P((void *));
87 int union_abortop	__P((void *));
88 int union_inactive	__P((void *));
89 int union_reclaim	__P((void *));
90 int union_lock		__P((void *));
91 int union_unlock	__P((void *));
92 int union_bmap		__P((void *));
93 int union_print		__P((void *));
94 int union_islocked	__P((void *));
95 int union_pathconf	__P((void *));
96 int union_advlock	__P((void *));
97 int union_strategy	__P((void *));
98 int union_getpages	__P((void *));
99 int union_putpages	__P((void *));
100 int union_kqfilter	__P((void *));
101 
102 static void union_fixup __P((struct union_node *));
103 static int union_lookup1 __P((struct vnode *, struct vnode **,
104 			      struct vnode **, struct componentname *));
105 
106 
107 /*
108  * Global vfs data structures
109  */
110 int (**union_vnodeop_p) __P((void *));
111 const struct vnodeopv_entry_desc union_vnodeop_entries[] = {
112 	{ &vop_default_desc, vn_default_error },
113 	{ &vop_lookup_desc, union_lookup },		/* lookup */
114 	{ &vop_create_desc, union_create },		/* create */
115 	{ &vop_whiteout_desc, union_whiteout },		/* whiteout */
116 	{ &vop_mknod_desc, union_mknod },		/* mknod */
117 	{ &vop_open_desc, union_open },			/* open */
118 	{ &vop_close_desc, union_close },		/* close */
119 	{ &vop_access_desc, union_access },		/* access */
120 	{ &vop_getattr_desc, union_getattr },		/* getattr */
121 	{ &vop_setattr_desc, union_setattr },		/* setattr */
122 	{ &vop_read_desc, union_read },			/* read */
123 	{ &vop_write_desc, union_write },		/* write */
124 	{ &vop_lease_desc, union_lease },		/* lease */
125 	{ &vop_ioctl_desc, union_ioctl },		/* ioctl */
126 	{ &vop_poll_desc, union_poll },			/* select */
127 	{ &vop_revoke_desc, union_revoke },		/* revoke */
128 	{ &vop_mmap_desc, union_mmap },			/* mmap */
129 	{ &vop_fsync_desc, union_fsync },		/* fsync */
130 	{ &vop_seek_desc, union_seek },			/* seek */
131 	{ &vop_remove_desc, union_remove },		/* remove */
132 	{ &vop_link_desc, union_link },			/* link */
133 	{ &vop_rename_desc, union_rename },		/* rename */
134 	{ &vop_mkdir_desc, union_mkdir },		/* mkdir */
135 	{ &vop_rmdir_desc, union_rmdir },		/* rmdir */
136 	{ &vop_symlink_desc, union_symlink },		/* symlink */
137 	{ &vop_readdir_desc, union_readdir },		/* readdir */
138 	{ &vop_readlink_desc, union_readlink },		/* readlink */
139 	{ &vop_abortop_desc, union_abortop },		/* abortop */
140 	{ &vop_inactive_desc, union_inactive },		/* inactive */
141 	{ &vop_reclaim_desc, union_reclaim },		/* reclaim */
142 	{ &vop_lock_desc, union_lock },			/* lock */
143 	{ &vop_unlock_desc, union_unlock },		/* unlock */
144 	{ &vop_bmap_desc, union_bmap },			/* bmap */
145 	{ &vop_strategy_desc, union_strategy },		/* strategy */
146 	{ &vop_print_desc, union_print },		/* print */
147 	{ &vop_islocked_desc, union_islocked },		/* islocked */
148 	{ &vop_pathconf_desc, union_pathconf },		/* pathconf */
149 	{ &vop_advlock_desc, union_advlock },		/* advlock */
150 	{ &vop_getpages_desc, union_getpages },		/* getpages */
151 	{ &vop_putpages_desc, union_putpages },		/* putpages */
152 	{ &vop_kqfilter_desc, union_kqfilter },		/* kqfilter */
153 #ifdef notdef
154 	{ &vop_blkatoff_desc, union_blkatoff },		/* blkatoff */
155 	{ &vop_valloc_desc, union_valloc },		/* valloc */
156 	{ &vop_vfree_desc, union_vfree },		/* vfree */
157 	{ &vop_truncate_desc, union_truncate },		/* truncate */
158 	{ &vop_update_desc, union_update },		/* update */
159 	{ &vop_bwrite_desc, union_bwrite },		/* bwrite */
160 #endif
161 	{ NULL, NULL }
162 };
163 const struct vnodeopv_desc union_vnodeop_opv_desc =
164 	{ &union_vnodeop_p, union_vnodeop_entries };
165 
166 #define FIXUP(un) { \
167 	if (((un)->un_flags & UN_ULOCK) == 0) { \
168 		union_fixup(un); \
169 	} \
170 }
171 
172 static void
173 union_fixup(un)
174 	struct union_node *un;
175 {
176 
177 	vn_lock(un->un_uppervp, LK_EXCLUSIVE | LK_RETRY);
178 	un->un_flags |= UN_ULOCK;
179 }
180 
181 static int
182 union_lookup1(udvp, dvpp, vpp, cnp)
183 	struct vnode *udvp;
184 	struct vnode **dvpp;
185 	struct vnode **vpp;
186 	struct componentname *cnp;
187 {
188 	int error;
189 	struct vnode *tdvp;
190 	struct vnode *dvp;
191 	struct mount *mp;
192 
193 	dvp = *dvpp;
194 
195 	/*
196 	 * If stepping up the directory tree, check for going
197 	 * back across the mount point, in which case do what
198 	 * lookup would do by stepping back down the mount
199 	 * hierarchy.
200 	 */
201 	if (cnp->cn_flags & ISDOTDOT) {
202 		while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
203 			/*
204 			 * Don't do the NOCROSSMOUNT check
205 			 * at this level.  By definition,
206 			 * union fs deals with namespaces, not
207 			 * filesystems.
208 			 */
209 			tdvp = dvp;
210 			*dvpp = dvp = dvp->v_mount->mnt_vnodecovered;
211 			vput(tdvp);
212 			VREF(dvp);
213 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
214 		}
215 	}
216 
217         error = VOP_LOOKUP(dvp, &tdvp, cnp);
218 	if (error)
219 		return (error);
220 
221 	/*
222 	 * The parent directory will have been unlocked, unless lookup
223 	 * found the last component.  In which case, re-lock the node
224 	 * here to allow it to be unlocked again (phew) in union_lookup.
225 	 */
226 	if (dvp != tdvp && !(cnp->cn_flags & ISLASTCN))
227 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
228 
229 	dvp = tdvp;
230 
231 	/*
232 	 * Lastly check if the current node is a mount point in
233 	 * which case walk up the mount hierarchy making sure not to
234 	 * bump into the root of the mount tree (ie. dvp != udvp).
235 	 */
236 	while (dvp != udvp && (dvp->v_type == VDIR) &&
237 	       (mp = dvp->v_mountedhere)) {
238 
239 		if (vfs_busy(mp, 0, 0))
240 			continue;
241 
242 		error = VFS_ROOT(mp, &tdvp);
243 		vfs_unbusy(mp);
244 		if (error) {
245 			vput(dvp);
246 			return (error);
247 		}
248 
249 		vput(dvp);
250 		dvp = tdvp;
251 	}
252 
253 	*vpp = dvp;
254 	return (0);
255 }
256 
257 int
258 union_lookup(v)
259 	void *v;
260 {
261 	struct vop_lookup_args /* {
262 		struct vnodeop_desc *a_desc;
263 		struct vnode *a_dvp;
264 		struct vnode **a_vpp;
265 		struct componentname *a_cnp;
266 	} */ *ap = v;
267 	int error;
268 	int uerror, lerror;
269 	struct vnode *uppervp, *lowervp;
270 	struct vnode *upperdvp, *lowerdvp;
271 	struct vnode *dvp = ap->a_dvp;
272 	struct union_node *dun = VTOUNION(dvp);
273 	struct componentname *cnp = ap->a_cnp;
274 	int lockparent = cnp->cn_flags & LOCKPARENT;
275 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
276 	struct ucred *saved_cred = NULL;
277 	int iswhiteout;
278 	struct vattr va;
279 
280 #ifdef notyet
281 	if (cnp->cn_namelen == 3 &&
282 			cnp->cn_nameptr[2] == '.' &&
283 			cnp->cn_nameptr[1] == '.' &&
284 			cnp->cn_nameptr[0] == '.') {
285 		dvp = *ap->a_vpp = LOWERVP(ap->a_dvp);
286 		if (dvp == NULLVP)
287 			return (ENOENT);
288 		VREF(dvp);
289 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
290 		if (!lockparent || !(cnp->cn_flags & ISLASTCN))
291 			VOP_UNLOCK(ap->a_dvp, 0);
292 		return (0);
293 	}
294 #endif
295 
296 	if ((cnp->cn_flags & ISLASTCN) &&
297 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
298 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
299 		return (EROFS);
300 
301 	cnp->cn_flags |= LOCKPARENT;
302 
303 	upperdvp = dun->un_uppervp;
304 	lowerdvp = dun->un_lowervp;
305 	uppervp = NULLVP;
306 	lowervp = NULLVP;
307 	iswhiteout = 0;
308 
309 	/*
310 	 * do the lookup in the upper level.
311 	 * if that level comsumes additional pathnames,
312 	 * then assume that something special is going
313 	 * on and just return that vnode.
314 	 */
315 	if (upperdvp != NULLVP) {
316 		FIXUP(dun);
317 		/*
318 		 * If we're doing `..' in the underlying filesystem,
319 		 * we must drop our lock on the union node before
320 		 * going up the tree in the lower file system--if we block
321 		 * on the lowervp lock, and that's held by someone else
322 		 * coming down the tree and who's waiting for our lock,
323 		 * we would be hosed.
324 		 */
325 		if (cnp->cn_flags & ISDOTDOT) {
326 			/* retain lock on underlying VP */
327 			dun->un_flags |= UN_KLOCK;
328 			VOP_UNLOCK(dvp, 0);
329 		}
330 		uerror = union_lookup1(um->um_uppervp, &upperdvp,
331 					&uppervp, cnp);
332 
333 		if (cnp->cn_flags & ISDOTDOT) {
334 			if (dun->un_uppervp == upperdvp) {
335 				/*
336 				 * we got the underlying bugger back locked...
337 				 * now take back the union node lock.  Since we
338 				 *  hold the uppervp lock, we can diddle union
339 				 * locking flags at will. :)
340 				 */
341 				dun->un_flags |= UN_ULOCK;
342 			}
343 			/*
344 			 * if upperdvp got swapped out, it means we did
345 			 * some mount point magic, and we do not have
346 			 * dun->un_uppervp locked currently--so we get it
347 			 * locked here (don't set the UN_ULOCK flag).
348 			 */
349 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
350 		}
351 		if (cnp->cn_consume != 0) {
352 			*ap->a_vpp = uppervp;
353 			if (!lockparent)
354 				cnp->cn_flags &= ~LOCKPARENT;
355 			return (uerror);
356 		}
357 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
358 			if (cnp->cn_flags & ISWHITEOUT) {
359 				iswhiteout = 1;
360 			} else if (lowerdvp != NULLVP) {
361 				lerror = VOP_GETATTR(upperdvp, &va,
362 					cnp->cn_cred, cnp->cn_proc);
363 				if (lerror == 0 && (va.va_flags & OPAQUE))
364 					iswhiteout = 1;
365 			}
366 		}
367 	} else {
368 		uerror = ENOENT;
369 	}
370 
371 	/*
372 	 * in a similar way to the upper layer, do the lookup
373 	 * in the lower layer.   this time, if there is some
374 	 * component magic going on, then vput whatever we got
375 	 * back from the upper layer and return the lower vnode
376 	 * instead.
377 	 */
378 	if (lowerdvp != NULLVP && !iswhiteout) {
379 		int nameiop;
380 
381 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY);
382 
383 		/*
384 		 * Only do a LOOKUP on the bottom node, since
385 		 * we won't be making changes to it anyway.
386 		 */
387 		nameiop = cnp->cn_nameiop;
388 		cnp->cn_nameiop = LOOKUP;
389 		if (um->um_op == UNMNT_BELOW) {
390 			saved_cred = cnp->cn_cred;
391 			cnp->cn_cred = um->um_cred;
392 		}
393 		/*
394 		 * we shouldn't have to worry about locking interactions
395 		 * between the lower layer and our union layer (w.r.t.
396 		 * `..' processing) because we don't futz with lowervp
397 		 * locks in the union-node instantiation code path.
398 		 */
399 		lerror = union_lookup1(um->um_lowervp, &lowerdvp,
400 				&lowervp, cnp);
401 		if (um->um_op == UNMNT_BELOW)
402 			cnp->cn_cred = saved_cred;
403 		cnp->cn_nameiop = nameiop;
404 
405 		if (lowervp != lowerdvp)
406 			VOP_UNLOCK(lowerdvp, 0);
407 
408 		if (cnp->cn_consume != 0) {
409 			if (uppervp != NULLVP) {
410 				if (uppervp == upperdvp)
411 					vrele(uppervp);
412 				else
413 					vput(uppervp);
414 				uppervp = NULLVP;
415 			}
416 			*ap->a_vpp = lowervp;
417 			if (!lockparent)
418 				cnp->cn_flags &= ~LOCKPARENT;
419 			return (lerror);
420 		}
421 	} else {
422 		lerror = ENOENT;
423 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
424 			lowervp = LOWERVP(dun->un_pvp);
425 			if (lowervp != NULLVP) {
426 				VREF(lowervp);
427 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
428 				lerror = 0;
429 			}
430 		}
431 	}
432 
433 	if (!lockparent)
434 		cnp->cn_flags &= ~LOCKPARENT;
435 
436 	/*
437 	 * EJUSTRETURN is used by underlying filesystems to indicate that
438 	 * a directory modification op was started successfully.
439 	 * This will only happen in the upper layer, since
440 	 * the lower layer only does LOOKUPs.
441 	 * If this union is mounted read-only, bounce it now.
442 	 */
443 
444 	if ((uerror == EJUSTRETURN) && (cnp->cn_flags & ISLASTCN) &&
445 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
446 	    ((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)))
447 		uerror = EROFS;
448 
449 	/*
450 	 * at this point, we have uerror and lerror indicating
451 	 * possible errors with the lookups in the upper and lower
452 	 * layers.  additionally, uppervp and lowervp are (locked)
453 	 * references to existing vnodes in the upper and lower layers.
454 	 *
455 	 * there are now three cases to consider.
456 	 * 1. if both layers returned an error, then return whatever
457 	 *    error the upper layer generated.
458 	 *
459 	 * 2. if the top layer failed and the bottom layer succeeded
460 	 *    then two subcases occur.
461 	 *    a.  the bottom vnode is not a directory, in which
462 	 *	  case just return a new union vnode referencing
463 	 *	  an empty top layer and the existing bottom layer.
464 	 *    b.  the bottom vnode is a directory, in which case
465 	 *	  create a new directory in the top-level and
466 	 *	  continue as in case 3.
467 	 *
468 	 * 3. if the top layer succeeded then return a new union
469 	 *    vnode referencing whatever the new top layer and
470 	 *    whatever the bottom layer returned.
471 	 */
472 
473 	*ap->a_vpp = NULLVP;
474 
475 
476 	/* case 1. */
477 	if ((uerror != 0) && (lerror != 0)) {
478 		return (uerror);
479 	}
480 
481 	/* case 2. */
482 	if (uerror != 0 /* && (lerror == 0) */ ) {
483 		if (lowervp->v_type == VDIR) { /* case 2b. */
484 			/*
485 			 * We may be racing another process to make the
486 			 * upper-level shadow directory.  Be careful with
487 			 * locks/etc!
488 			 */
489 			dun->un_flags &= ~UN_ULOCK;
490 			VOP_UNLOCK(upperdvp, 0);
491 			uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
492 			vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY);
493 			dun->un_flags |= UN_ULOCK;
494 
495 			if (uerror) {
496 				if (lowervp != NULLVP) {
497 					vput(lowervp);
498 					lowervp = NULLVP;
499 				}
500 				return (uerror);
501 			}
502 		}
503 	}
504 
505 	if (lowervp != NULLVP)
506 		VOP_UNLOCK(lowervp, 0);
507 
508 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
509 			      uppervp, lowervp, 1);
510 
511 	if (error) {
512 		if (uppervp != NULLVP)
513 			vput(uppervp);
514 		if (lowervp != NULLVP)
515 			vrele(lowervp);
516 	} else {
517 		if (*ap->a_vpp != dvp)
518 			if (!lockparent || !(cnp->cn_flags & ISLASTCN))
519 				VOP_UNLOCK(dvp, 0);
520 		if (cnp->cn_namelen == 1 &&
521 		    cnp->cn_nameptr[0] == '.' &&
522 		    *ap->a_vpp != dvp) {
523 			panic("union_lookup -> . (%p) != startdir (%p)",
524 			    ap->a_vpp, dvp);
525 		}
526 	}
527 
528 	return (error);
529 }
530 
531 int
532 union_create(v)
533 	void *v;
534 {
535 	struct vop_create_args /* {
536 		struct vnode *a_dvp;
537 		struct vnode **a_vpp;
538 		struct componentname *a_cnp;
539 		struct vattr *a_vap;
540 	} */ *ap = v;
541 	struct union_node *un = VTOUNION(ap->a_dvp);
542 	struct vnode *dvp = un->un_uppervp;
543 	struct componentname *cnp = ap->a_cnp;
544 
545 	if (dvp != NULLVP) {
546 		int error;
547 		struct vnode *vp;
548 		struct mount *mp;
549 
550 		FIXUP(un);
551 
552 		VREF(dvp);
553 		un->un_flags |= UN_KLOCK;
554 		mp = ap->a_dvp->v_mount;
555 		vput(ap->a_dvp);
556 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
557 		if (error)
558 			return (error);
559 
560 		error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp,
561 				NULLVP, 1);
562 		if (error)
563 			vput(vp);
564 		return (error);
565 	}
566 
567 	vput(ap->a_dvp);
568 	return (EROFS);
569 }
570 
571 int
572 union_whiteout(v)
573 	void *v;
574 {
575 	struct vop_whiteout_args /* {
576 		struct vnode *a_dvp;
577 		struct componentname *a_cnp;
578 		int a_flags;
579 	} */ *ap = v;
580 	struct union_node *un = VTOUNION(ap->a_dvp);
581 	struct componentname *cnp = ap->a_cnp;
582 
583 	if (un->un_uppervp == NULLVP)
584 		return (EOPNOTSUPP);
585 
586 	FIXUP(un);
587 	return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags));
588 }
589 
590 int
591 union_mknod(v)
592 	void *v;
593 {
594 	struct vop_mknod_args /* {
595 		struct vnode *a_dvp;
596 		struct vnode **a_vpp;
597 		struct componentname *a_cnp;
598 		struct vattr *a_vap;
599 	} */ *ap = v;
600 	struct union_node *un = VTOUNION(ap->a_dvp);
601 	struct vnode *dvp = un->un_uppervp;
602 	struct componentname *cnp = ap->a_cnp;
603 
604 	if (dvp != NULLVP) {
605 		int error;
606 		struct vnode *vp;
607 		struct mount *mp;
608 
609 		FIXUP(un);
610 
611 		VREF(dvp);
612 		un->un_flags |= UN_KLOCK;
613 		mp = ap->a_dvp->v_mount;
614 		vput(ap->a_dvp);
615 		error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap);
616 		if (error)
617 			return (error);
618 
619 		error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
620 				      cnp, vp, NULLVP, 1);
621 		if (error)
622 		    vput(vp);
623 		return (error);
624 	}
625 
626 	vput(ap->a_dvp);
627 	return (EROFS);
628 }
629 
630 int
631 union_open(v)
632 	void *v;
633 {
634 	struct vop_open_args /* {
635 		struct vnodeop_desc *a_desc;
636 		struct vnode *a_vp;
637 		int a_mode;
638 		struct ucred *a_cred;
639 		struct proc *a_p;
640 	} */ *ap = v;
641 	struct union_node *un = VTOUNION(ap->a_vp);
642 	struct vnode *tvp;
643 	int mode = ap->a_mode;
644 	struct ucred *cred = ap->a_cred;
645 	struct proc *p = ap->a_p;
646 	int error;
647 
648 	/*
649 	 * If there is an existing upper vp then simply open that.
650 	 */
651 	tvp = un->un_uppervp;
652 	if (tvp == NULLVP) {
653 		/*
654 		 * If the lower vnode is being opened for writing, then
655 		 * copy the file contents to the upper vnode and open that,
656 		 * otherwise can simply open the lower vnode.
657 		 */
658 		tvp = un->un_lowervp;
659 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
660 			error = union_copyup(un, (mode&O_TRUNC) == 0, cred, p);
661 			if (error == 0)
662 				error = VOP_OPEN(un->un_uppervp, mode, cred, p);
663 			return (error);
664 		}
665 
666 		/*
667 		 * Just open the lower vnode, but check for nodev mount flag
668 		 */
669 		if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
670 		    (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
671 			return ENXIO;
672 		un->un_openl++;
673 		vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
674 		error = VOP_OPEN(tvp, mode, cred, p);
675 		VOP_UNLOCK(tvp, 0);
676 
677 		return (error);
678 	}
679 	/*
680 	 * Just open the upper vnode, checking for nodev mount flag first
681 	 */
682 	if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
683 	    (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
684 		return ENXIO;
685 
686 	FIXUP(un);
687 
688 	error = VOP_OPEN(tvp, mode, cred, p);
689 
690 	return (error);
691 }
692 
693 int
694 union_close(v)
695 	void *v;
696 {
697 	struct vop_close_args /* {
698 		struct vnode *a_vp;
699 		int  a_fflag;
700 		struct ucred *a_cred;
701 		struct proc *a_p;
702 	} */ *ap = v;
703 	struct union_node *un = VTOUNION(ap->a_vp);
704 	struct vnode *vp;
705 
706 	vp = un->un_uppervp;
707 	if (vp == NULLVP) {
708 #ifdef UNION_DIAGNOSTIC
709 		if (un->un_openl <= 0)
710 			panic("union: un_openl cnt");
711 #endif
712 		--un->un_openl;
713 		vp = un->un_lowervp;
714 	}
715 
716 #ifdef DIAGNOSTIC
717 	if (vp == NULLVP) {
718 		vprint("empty union vnode", vp);
719 		panic("union_close empty vnode");
720 	}
721 #endif
722 
723 	ap->a_vp = vp;
724 	return (VCALL(vp, VOFFSET(vop_close), ap));
725 }
726 
727 /*
728  * Check access permission on the union vnode.
729  * The access check being enforced is to check
730  * against both the underlying vnode, and any
731  * copied vnode.  This ensures that no additional
732  * file permissions are given away simply because
733  * the user caused an implicit file copy.
734  */
735 int
736 union_access(v)
737 	void *v;
738 {
739 	struct vop_access_args /* {
740 		struct vnodeop_desc *a_desc;
741 		struct vnode *a_vp;
742 		int a_mode;
743 		struct ucred *a_cred;
744 		struct proc *a_p;
745 	} */ *ap = v;
746 	struct vnode *vp = ap->a_vp;
747 	struct union_node *un = VTOUNION(vp);
748 	int error = EACCES;
749 	struct union_mount *um = MOUNTTOUNIONMOUNT(vp->v_mount);
750 
751 	/*
752 	 * Disallow write attempts on read-only file systems;
753 	 * unless the file is a socket, fifo, or a block or
754 	 * character device resident on the file system.
755 	 */
756 	if (ap->a_mode & VWRITE) {
757 		switch (vp->v_type) {
758 		case VDIR:
759 		case VLNK:
760 		case VREG:
761 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
762 				return (EROFS);
763 			break;
764 		case VBAD:
765 		case VBLK:
766 		case VCHR:
767 		case VSOCK:
768 		case VFIFO:
769 		case VNON:
770 		default:
771 			break;
772 		}
773 	}
774 
775 
776 	if ((vp = un->un_uppervp) != NULLVP) {
777 		FIXUP(un);
778 		ap->a_vp = vp;
779 		return (VCALL(vp, VOFFSET(vop_access), ap));
780 	}
781 
782 	if ((vp = un->un_lowervp) != NULLVP) {
783 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
784 		ap->a_vp = vp;
785 		error = VCALL(vp, VOFFSET(vop_access), ap);
786 		if (error == 0) {
787 			if (um->um_op == UNMNT_BELOW) {
788 				ap->a_cred = um->um_cred;
789 				error = VCALL(vp, VOFFSET(vop_access), ap);
790 			}
791 		}
792 		VOP_UNLOCK(vp, 0);
793 		if (error)
794 			return (error);
795 	}
796 
797 	return (error);
798 }
799 
800 /*
801  * We handle getattr only to change the fsid and
802  * track object sizes
803  */
804 int
805 union_getattr(v)
806 	void *v;
807 {
808 	struct vop_getattr_args /* {
809 		struct vnode *a_vp;
810 		struct vattr *a_vap;
811 		struct ucred *a_cred;
812 		struct proc *a_p;
813 	} */ *ap = v;
814 	int error;
815 	struct union_node *un = VTOUNION(ap->a_vp);
816 	struct vnode *vp = un->un_uppervp;
817 	struct vattr *vap;
818 	struct vattr va;
819 
820 
821 	/*
822 	 * Some programs walk the filesystem hierarchy by counting
823 	 * links to directories to avoid stat'ing all the time.
824 	 * This means the link count on directories needs to be "correct".
825 	 * The only way to do that is to call getattr on both layers
826 	 * and fix up the link count.  The link count will not necessarily
827 	 * be accurate but will be large enough to defeat the tree walkers.
828 	 *
829 	 * To make life more interesting, some filesystems don't keep
830 	 * track of link counts in the expected way, and return a
831 	 * link count of `1' for those directories; if either of the
832 	 * component directories returns a link count of `1', we return a 1.
833 	 */
834 
835 	vap = ap->a_vap;
836 
837 	vp = un->un_uppervp;
838 	if (vp != NULLVP) {
839 		/*
840 		 * It's not clear whether VOP_GETATTR is to be
841 		 * called with the vnode locked or not.  stat() calls
842 		 * it with (vp) locked, and fstat calls it with
843 		 * (vp) unlocked.
844 		 * In the mean time, compensate here by checking
845 		 * the union_node's lock flag.
846 		 */
847 		if (un->un_flags & UN_LOCKED)
848 			FIXUP(un);
849 
850 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
851 		if (error)
852 			return (error);
853 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
854 	}
855 
856 	if (vp == NULLVP) {
857 		vp = un->un_lowervp;
858 	} else if (vp->v_type == VDIR) {
859 		vp = un->un_lowervp;
860 		if (vp != NULLVP)
861 			vap = &va;
862 	} else {
863 		vp = NULLVP;
864 	}
865 
866 	if (vp != NULLVP) {
867 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p);
868 		if (error)
869 			return (error);
870 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
871 	}
872 
873 	if ((vap != ap->a_vap) && (vap->va_type == VDIR)) {
874 		/*
875 		 * Link count manipulation:
876 		 *	- If both return "2", return 2 (no subdirs)
877 		 *	- If one or the other return "1", return "1" (ENOCLUE)
878 		 */
879 		if ((ap->a_vap->va_nlink == 2) &&
880 		    (vap->va_nlink == 2))
881 			;
882 		else if (ap->a_vap->va_nlink != 1) {
883 			if (vap->va_nlink == 1)
884 				ap->a_vap->va_nlink = 1;
885 			else
886 				ap->a_vap->va_nlink += vap->va_nlink;
887 		}
888 	}
889 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
890 	return (0);
891 }
892 
893 int
894 union_setattr(v)
895 	void *v;
896 {
897 	struct vop_setattr_args /* {
898 		struct vnode *a_vp;
899 		struct vattr *a_vap;
900 		struct ucred *a_cred;
901 		struct proc *a_p;
902 	} */ *ap = v;
903 	struct vattr *vap = ap->a_vap;
904 	struct vnode *vp = ap->a_vp;
905 	struct union_node *un = VTOUNION(vp);
906 	int error;
907 
908   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
909 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
910 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
911 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
912 		return (EROFS);
913 	if (vap->va_size != VNOVAL) {
914  		switch (vp->v_type) {
915  		case VDIR:
916  			return (EISDIR);
917  		case VCHR:
918  		case VBLK:
919  		case VSOCK:
920  		case VFIFO:
921 			break;
922 		case VREG:
923 		case VLNK:
924  		default:
925 			/*
926 			 * Disallow write attempts if the filesystem is
927 			 * mounted read-only.
928 			 */
929 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
930 				return (EROFS);
931 		}
932 	}
933 
934 	/*
935 	 * Handle case of truncating lower object to zero size,
936 	 * by creating a zero length upper object.  This is to
937 	 * handle the case of open with O_TRUNC and O_CREAT.
938 	 */
939 	if ((un->un_uppervp == NULLVP) &&
940 	    /* assert(un->un_lowervp != NULLVP) */
941 	    (un->un_lowervp->v_type == VREG)) {
942 		error = union_copyup(un, (vap->va_size != 0),
943 						ap->a_cred, ap->a_p);
944 		if (error)
945 			return (error);
946 	}
947 
948 	/*
949 	 * Try to set attributes in upper layer,
950 	 * otherwise return read-only filesystem error.
951 	 */
952 	if (un->un_uppervp != NULLVP) {
953 		FIXUP(un);
954 		error = VOP_SETATTR(un->un_uppervp, vap,
955 					ap->a_cred, ap->a_p);
956 		if ((error == 0) && (vap->va_size != VNOVAL))
957 			union_newsize(ap->a_vp, vap->va_size, VNOVAL);
958 	} else {
959 		error = EROFS;
960 	}
961 
962 	return (error);
963 }
964 
965 int
966 union_read(v)
967 	void *v;
968 {
969 	struct vop_read_args /* {
970 		struct vnode *a_vp;
971 		struct uio *a_uio;
972 		int  a_ioflag;
973 		struct ucred *a_cred;
974 	} */ *ap = v;
975 	int error;
976 	struct vnode *vp = OTHERVP(ap->a_vp);
977 	int dolock = (vp == LOWERVP(ap->a_vp));
978 
979 	if (dolock)
980 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
981 	else
982 		FIXUP(VTOUNION(ap->a_vp));
983 	error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
984 	if (dolock)
985 		VOP_UNLOCK(vp, 0);
986 
987 	/*
988 	 * XXX
989 	 * perhaps the size of the underlying object has changed under
990 	 * our feet.  take advantage of the offset information present
991 	 * in the uio structure.
992 	 */
993 	if (error == 0) {
994 		struct union_node *un = VTOUNION(ap->a_vp);
995 		off_t cur = ap->a_uio->uio_offset;
996 
997 		if (vp == un->un_uppervp) {
998 			if (cur > un->un_uppersz)
999 				union_newsize(ap->a_vp, cur, VNOVAL);
1000 		} else {
1001 			if (cur > un->un_lowersz)
1002 				union_newsize(ap->a_vp, VNOVAL, cur);
1003 		}
1004 	}
1005 
1006 	return (error);
1007 }
1008 
1009 int
1010 union_write(v)
1011 	void *v;
1012 {
1013 	struct vop_read_args /* {
1014 		struct vnode *a_vp;
1015 		struct uio *a_uio;
1016 		int  a_ioflag;
1017 		struct ucred *a_cred;
1018 	} */ *ap = v;
1019 	int error;
1020 	struct vnode *vp;
1021 	struct union_node *un = VTOUNION(ap->a_vp);
1022 
1023 	vp = UPPERVP(ap->a_vp);
1024 	if (vp == NULLVP)
1025 		panic("union: missing upper layer in write");
1026 
1027 	FIXUP(un);
1028 	error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1029 
1030 	/*
1031 	 * the size of the underlying object may be changed by the
1032 	 * write.
1033 	 */
1034 	if (error == 0) {
1035 		off_t cur = ap->a_uio->uio_offset;
1036 
1037 		if (cur > un->un_uppersz)
1038 			union_newsize(ap->a_vp, cur, VNOVAL);
1039 	}
1040 
1041 	return (error);
1042 }
1043 
1044 int
1045 union_lease(v)
1046 	void *v;
1047 {
1048 	struct vop_lease_args /* {
1049 		struct vnode *a_vp;
1050 		struct proc *a_p;
1051 		struct ucred *a_cred;
1052 		int a_flag;
1053 	} */ *ap = v;
1054 	struct vnode *ovp = OTHERVP(ap->a_vp);
1055 
1056 	ap->a_vp = ovp;
1057 	return (VCALL(ovp, VOFFSET(vop_lease), ap));
1058 }
1059 
1060 int
1061 union_ioctl(v)
1062 	void *v;
1063 {
1064 	struct vop_ioctl_args /* {
1065 		struct vnode *a_vp;
1066 		int  a_command;
1067 		caddr_t  a_data;
1068 		int  a_fflag;
1069 		struct ucred *a_cred;
1070 		struct proc *a_p;
1071 	} */ *ap = v;
1072 	struct vnode *ovp = OTHERVP(ap->a_vp);
1073 
1074 	ap->a_vp = ovp;
1075 	return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1076 }
1077 
1078 int
1079 union_poll(v)
1080 	void *v;
1081 {
1082 	struct vop_poll_args /* {
1083 		struct vnode *a_vp;
1084 		int a_events;
1085 		struct proc *a_p;
1086 	} */ *ap = v;
1087 	struct vnode *ovp = OTHERVP(ap->a_vp);
1088 
1089 	ap->a_vp = ovp;
1090 	return (VCALL(ovp, VOFFSET(vop_poll), ap));
1091 }
1092 
1093 int
1094 union_revoke(v)
1095 	void *v;
1096 {
1097 	struct vop_revoke_args /* {
1098 		struct vnode *a_vp;
1099 		int a_flags;
1100 		struct proc *a_p;
1101 	} */ *ap = v;
1102 	struct vnode *vp = ap->a_vp;
1103 
1104 	if (UPPERVP(vp))
1105 		VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1106 	if (LOWERVP(vp))
1107 		VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1108 	vgone(vp);
1109 	return (0);
1110 }
1111 
1112 int
1113 union_mmap(v)
1114 	void *v;
1115 {
1116 	struct vop_mmap_args /* {
1117 		struct vnode *a_vp;
1118 		int  a_fflags;
1119 		struct ucred *a_cred;
1120 		struct proc *a_p;
1121 	} */ *ap = v;
1122 	struct vnode *ovp = OTHERVP(ap->a_vp);
1123 
1124 	ap->a_vp = ovp;
1125 	return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1126 }
1127 
1128 int
1129 union_fsync(v)
1130 	void *v;
1131 {
1132 	struct vop_fsync_args /* {
1133 		struct vnode *a_vp;
1134 		struct ucred *a_cred;
1135 		int  a_flags;
1136 		off_t offhi;
1137 		off_t offlo;
1138 		struct proc *a_p;
1139 	} */ *ap = v;
1140 	int error = 0;
1141 	struct proc *p;
1142 	struct vnode *targetvp;
1143 
1144 	/*
1145 	 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't
1146 	 * bother syncing the underlying vnodes, since (a) they'll be
1147 	 * fsync'ed when reclaimed and (b) we could deadlock if
1148 	 * they're locked; otherwise, pass it through to the
1149 	 * underlying layer.
1150 	 */
1151 	if (ap->a_flags & FSYNC_RECLAIM)
1152 		return 0;
1153 
1154 	targetvp = OTHERVP(ap->a_vp);
1155 	p = ap->a_p;
1156 
1157 	if (targetvp != NULLVP) {
1158 		int dolock = (targetvp == LOWERVP(ap->a_vp));
1159 
1160 		if (dolock)
1161 			vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY);
1162 		else
1163 			FIXUP(VTOUNION(ap->a_vp));
1164 		error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_flags,
1165 			    ap->a_offlo, ap->a_offhi, p);
1166 		if (dolock)
1167 			VOP_UNLOCK(targetvp, 0);
1168 	}
1169 
1170 	return (error);
1171 }
1172 
1173 int
1174 union_seek(v)
1175 	void *v;
1176 {
1177 	struct vop_seek_args /* {
1178 		struct vnode *a_vp;
1179 		off_t  a_oldoff;
1180 		off_t  a_newoff;
1181 		struct ucred *a_cred;
1182 	} */ *ap = v;
1183 	struct vnode *ovp = OTHERVP(ap->a_vp);
1184 
1185 	ap->a_vp = ovp;
1186 	return (VCALL(ovp, VOFFSET(vop_seek), ap));
1187 }
1188 
1189 int
1190 union_remove(v)
1191 	void *v;
1192 {
1193 	struct vop_remove_args /* {
1194 		struct vnode *a_dvp;
1195 		struct vnode *a_vp;
1196 		struct componentname *a_cnp;
1197 	} */ *ap = v;
1198 	int error;
1199 	struct union_node *dun = VTOUNION(ap->a_dvp);
1200 	struct union_node *un = VTOUNION(ap->a_vp);
1201 	struct componentname *cnp = ap->a_cnp;
1202 
1203 	if (dun->un_uppervp == NULLVP)
1204 		panic("union remove: null upper vnode");
1205 
1206 	if (un->un_uppervp != NULLVP) {
1207 		struct vnode *dvp = dun->un_uppervp;
1208 		struct vnode *vp = un->un_uppervp;
1209 
1210 		FIXUP(dun);
1211 		VREF(dvp);
1212 		dun->un_flags |= UN_KLOCK;
1213 		vput(ap->a_dvp);
1214 		FIXUP(un);
1215 		VREF(vp);
1216 		un->un_flags |= UN_KLOCK;
1217 		vput(ap->a_vp);
1218 
1219 		if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc))
1220 			cnp->cn_flags |= DOWHITEOUT;
1221 		error = VOP_REMOVE(dvp, vp, cnp);
1222 		if (!error)
1223 			union_removed_upper(un);
1224 	} else {
1225 		FIXUP(dun);
1226 		error = union_mkwhiteout(
1227 			MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1228 			dun->un_uppervp, ap->a_cnp, un->un_path);
1229 		vput(ap->a_dvp);
1230 		vput(ap->a_vp);
1231 	}
1232 
1233 	return (error);
1234 }
1235 
1236 int
1237 union_link(v)
1238 	void *v;
1239 {
1240 	struct vop_link_args /* {
1241 		struct vnode *a_dvp;
1242 		struct vnode *a_vp;
1243 		struct componentname *a_cnp;
1244 	} */ *ap = v;
1245 	int error = 0;
1246 	struct componentname *cnp = ap->a_cnp;
1247 	struct proc *p = cnp->cn_proc;
1248 	struct union_node *dun;
1249 	struct vnode *vp;
1250 	struct vnode *dvp;
1251 
1252 	dun = VTOUNION(ap->a_dvp);
1253 
1254 #ifdef DIAGNOSTIC
1255 	if (!(ap->a_cnp->cn_flags & LOCKPARENT)) {
1256 		printf("union_link called without LOCKPARENT set!\n");
1257 		error = EIO; /* need some error code for "caller is a bozo" */
1258 	} else
1259 #endif
1260 
1261 
1262 	if (ap->a_dvp->v_op != ap->a_vp->v_op) {
1263 		vp = ap->a_vp;
1264 	} else {
1265 		struct union_node *un = VTOUNION(ap->a_vp);
1266 		if (un->un_uppervp == NULLVP) {
1267 			/*
1268 			 * Needs to be copied before we can link it.
1269 			 */
1270 			vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
1271 			if (dun->un_uppervp == un->un_dirvp) {
1272 				dun->un_flags &= ~UN_ULOCK;
1273 				VOP_UNLOCK(dun->un_uppervp, 0);
1274 			}
1275 			error = union_copyup(un, 1, cnp->cn_cred, p);
1276 			if (dun->un_uppervp == un->un_dirvp) {
1277 				/*
1278 				 * During copyup, we dropped the lock on the
1279 				 * dir and invalidated any saved namei lookup
1280 				 * state for the directory we'll be entering
1281 				 * the link in.  We need to re-run the lookup
1282 				 * in that directory to reset any state needed
1283 				 * for VOP_LINK.
1284 				 * Call relookup on the union-layer to reset
1285 				 * the state.
1286 				 */
1287 				vp  = NULLVP;
1288 				if (dun->un_uppervp == NULLVP)
1289 					 panic("union: null upperdvp?");
1290 				/*
1291 				 * relookup starts with an unlocked node,
1292 				 * and since LOCKPARENT is set returns
1293 				 * the starting directory locked.
1294 				 */
1295 				VOP_UNLOCK(ap->a_dvp, 0);
1296 				error = relookup(ap->a_dvp, &vp, ap->a_cnp);
1297 				if (error) {
1298 					vrele(ap->a_dvp);
1299 					VOP_UNLOCK(ap->a_vp, 0);
1300 					return EROFS;	/* ? */
1301 				}
1302 				if (vp != NULLVP) {
1303 					/*
1304 					 * The name we want to create has
1305 					 * mysteriously appeared (a race?)
1306 					 */
1307 					error = EEXIST;
1308 					VOP_UNLOCK(ap->a_vp, 0);
1309 					goto croak;
1310 				}
1311 			}
1312 			VOP_UNLOCK(ap->a_vp, 0);
1313 		}
1314 		vp = un->un_uppervp;
1315 	}
1316 
1317 	dvp = dun->un_uppervp;
1318 	if (dvp == NULLVP)
1319 		error = EROFS;
1320 
1321 	if (error) {
1322 croak:
1323 		vput(ap->a_dvp);
1324 		return (error);
1325 	}
1326 
1327 	FIXUP(dun);
1328 	VREF(dvp);
1329 	dun->un_flags |= UN_KLOCK;
1330 	vput(ap->a_dvp);
1331 
1332 	return (VOP_LINK(dvp, vp, cnp));
1333 }
1334 
1335 int
1336 union_rename(v)
1337 	void *v;
1338 {
1339 	struct vop_rename_args  /* {
1340 		struct vnode *a_fdvp;
1341 		struct vnode *a_fvp;
1342 		struct componentname *a_fcnp;
1343 		struct vnode *a_tdvp;
1344 		struct vnode *a_tvp;
1345 		struct componentname *a_tcnp;
1346 	} */ *ap = v;
1347 	int error;
1348 
1349 	struct vnode *fdvp = ap->a_fdvp;
1350 	struct vnode *fvp = ap->a_fvp;
1351 	struct vnode *tdvp = ap->a_tdvp;
1352 	struct vnode *tvp = ap->a_tvp;
1353 
1354 	if (fdvp->v_op == union_vnodeop_p) {	/* always true */
1355 		struct union_node *un = VTOUNION(fdvp);
1356 		if (un->un_uppervp == NULLVP) {
1357 			/*
1358 			 * this should never happen in normal
1359 			 * operation but might if there was
1360 			 * a problem creating the top-level shadow
1361 			 * directory.
1362 			 */
1363 			error = EXDEV;
1364 			goto bad;
1365 		}
1366 
1367 		fdvp = un->un_uppervp;
1368 		VREF(fdvp);
1369 		vrele(ap->a_fdvp);
1370 	}
1371 
1372 	if (fvp->v_op == union_vnodeop_p) {	/* always true */
1373 		struct union_node *un = VTOUNION(fvp);
1374 		if (un->un_uppervp == NULLVP) {
1375 			/* XXX: should do a copyup */
1376 			error = EXDEV;
1377 			goto bad;
1378 		}
1379 
1380 		if (un->un_lowervp != NULLVP)
1381 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1382 
1383 		fvp = un->un_uppervp;
1384 		VREF(fvp);
1385 		vrele(ap->a_fvp);
1386 	}
1387 
1388 	if (tdvp->v_op == union_vnodeop_p) {
1389 		struct union_node *un = VTOUNION(tdvp);
1390 		if (un->un_uppervp == NULLVP) {
1391 			/*
1392 			 * this should never happen in normal
1393 			 * operation but might if there was
1394 			 * a problem creating the top-level shadow
1395 			 * directory.
1396 			 */
1397 			error = EXDEV;
1398 			goto bad;
1399 		}
1400 
1401 		tdvp = un->un_uppervp;
1402 		VREF(tdvp);
1403 		un->un_flags |= UN_KLOCK;
1404 		vput(ap->a_tdvp);
1405 	}
1406 
1407 	if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1408 		struct union_node *un = VTOUNION(tvp);
1409 
1410 		tvp = un->un_uppervp;
1411 		if (tvp != NULLVP) {
1412 			VREF(tvp);
1413 			un->un_flags |= UN_KLOCK;
1414 		}
1415 		vput(ap->a_tvp);
1416 	}
1417 
1418 	return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1419 
1420 bad:
1421 	vrele(fdvp);
1422 	vrele(fvp);
1423 	vput(tdvp);
1424 	if (tvp != NULLVP)
1425 		vput(tvp);
1426 
1427 	return (error);
1428 }
1429 
1430 int
1431 union_mkdir(v)
1432 	void *v;
1433 {
1434 	struct vop_mkdir_args /* {
1435 		struct vnode *a_dvp;
1436 		struct vnode **a_vpp;
1437 		struct componentname *a_cnp;
1438 		struct vattr *a_vap;
1439 	} */ *ap = v;
1440 	struct union_node *un = VTOUNION(ap->a_dvp);
1441 	struct vnode *dvp = un->un_uppervp;
1442 	struct componentname *cnp = ap->a_cnp;
1443 
1444 	if (dvp != NULLVP) {
1445 		int error;
1446 		struct vnode *vp;
1447 
1448 		FIXUP(un);
1449 		VREF(dvp);
1450 		un->un_flags |= UN_KLOCK;
1451 		VOP_UNLOCK(ap->a_dvp, 0);
1452 		error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap);
1453 		if (error) {
1454 			vrele(ap->a_dvp);
1455 			return (error);
1456 		}
1457 
1458 		error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp,
1459 				NULLVP, cnp, vp, NULLVP, 1);
1460 		vrele(ap->a_dvp);
1461 		if (error)
1462 			vput(vp);
1463 		return (error);
1464 	}
1465 
1466 	vput(ap->a_dvp);
1467 	return (EROFS);
1468 }
1469 
1470 int
1471 union_rmdir(v)
1472 	void *v;
1473 {
1474 	struct vop_rmdir_args /* {
1475 		struct vnode *a_dvp;
1476 		struct vnode *a_vp;
1477 		struct componentname *a_cnp;
1478 	} */ *ap = v;
1479 	int error;
1480 	struct union_node *dun = VTOUNION(ap->a_dvp);
1481 	struct union_node *un = VTOUNION(ap->a_vp);
1482 	struct componentname *cnp = ap->a_cnp;
1483 
1484 	if (dun->un_uppervp == NULLVP)
1485 		panic("union rmdir: null upper vnode");
1486 
1487 	if (un->un_uppervp != NULLVP) {
1488 		struct vnode *dvp = dun->un_uppervp;
1489 		struct vnode *vp = un->un_uppervp;
1490 
1491 		FIXUP(dun);
1492 		VREF(dvp);
1493 		dun->un_flags |= UN_KLOCK;
1494 		vput(ap->a_dvp);
1495 		FIXUP(un);
1496 		VREF(vp);
1497 		un->un_flags |= UN_KLOCK;
1498 		vput(ap->a_vp);
1499 
1500 		if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc))
1501 			cnp->cn_flags |= DOWHITEOUT;
1502 		error = VOP_RMDIR(dvp, vp, ap->a_cnp);
1503 		if (!error)
1504 			union_removed_upper(un);
1505 	} else {
1506 		FIXUP(dun);
1507 		error = union_mkwhiteout(
1508 			MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1509 			dun->un_uppervp, ap->a_cnp, un->un_path);
1510 		vput(ap->a_dvp);
1511 		vput(ap->a_vp);
1512 	}
1513 
1514 	return (error);
1515 }
1516 
1517 int
1518 union_symlink(v)
1519 	void *v;
1520 {
1521 	struct vop_symlink_args /* {
1522 		struct vnode *a_dvp;
1523 		struct vnode **a_vpp;
1524 		struct componentname *a_cnp;
1525 		struct vattr *a_vap;
1526 		char *a_target;
1527 	} */ *ap = v;
1528 	struct union_node *un = VTOUNION(ap->a_dvp);
1529 	struct vnode *dvp = un->un_uppervp;
1530 	struct componentname *cnp = ap->a_cnp;
1531 
1532 	if (dvp != NULLVP) {
1533 		int error;
1534 
1535 		FIXUP(un);
1536 		VREF(dvp);
1537 		un->un_flags |= UN_KLOCK;
1538 		vput(ap->a_dvp);
1539 		error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1540 				    ap->a_target);
1541 		return (error);
1542 	}
1543 
1544 	vput(ap->a_dvp);
1545 	return (EROFS);
1546 }
1547 
1548 /*
1549  * union_readdir works in concert with getdirentries and
1550  * readdir(3) to provide a list of entries in the unioned
1551  * directories.  getdirentries is responsible for walking
1552  * down the union stack.  readdir(3) is responsible for
1553  * eliminating duplicate names from the returned data stream.
1554  */
1555 int
1556 union_readdir(v)
1557 	void *v;
1558 {
1559 	struct vop_readdir_args /* {
1560 		struct vnodeop_desc *a_desc;
1561 		struct vnode *a_vp;
1562 		struct uio *a_uio;
1563 		struct ucred *a_cred;
1564 		int *a_eofflag;
1565 		u_long *a_cookies;
1566 		int a_ncookies;
1567 	} */ *ap = v;
1568 	struct union_node *un = VTOUNION(ap->a_vp);
1569 	struct vnode *uvp = un->un_uppervp;
1570 
1571 	if (uvp == NULLVP)
1572 		return (0);
1573 
1574 	FIXUP(un);
1575 	ap->a_vp = uvp;
1576 	return (VCALL(uvp, VOFFSET(vop_readdir), ap));
1577 }
1578 
1579 int
1580 union_readlink(v)
1581 	void *v;
1582 {
1583 	struct vop_readlink_args /* {
1584 		struct vnode *a_vp;
1585 		struct uio *a_uio;
1586 		struct ucred *a_cred;
1587 	} */ *ap = v;
1588 	int error;
1589 	struct vnode *vp = OTHERVP(ap->a_vp);
1590 	int dolock = (vp == LOWERVP(ap->a_vp));
1591 
1592 	if (dolock)
1593 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1594 	else
1595 		FIXUP(VTOUNION(ap->a_vp));
1596 	ap->a_vp = vp;
1597 	error = VCALL(vp, VOFFSET(vop_readlink), ap);
1598 	if (dolock)
1599 		VOP_UNLOCK(vp, 0);
1600 
1601 	return (error);
1602 }
1603 
1604 int
1605 union_abortop(v)
1606 	void *v;
1607 {
1608 	struct vop_abortop_args /* {
1609 		struct vnode *a_dvp;
1610 		struct componentname *a_cnp;
1611 	} */ *ap = v;
1612 	int error;
1613 	struct vnode *vp = OTHERVP(ap->a_dvp);
1614 	struct union_node *un = VTOUNION(ap->a_dvp);
1615 	int islocked = un->un_flags & UN_LOCKED;
1616 	int dolock = (vp == LOWERVP(ap->a_dvp));
1617 
1618 	if (islocked) {
1619 		if (dolock)
1620 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1621 		else
1622 			FIXUP(VTOUNION(ap->a_dvp));
1623 	}
1624 	ap->a_dvp = vp;
1625 	error = VCALL(vp, VOFFSET(vop_abortop), ap);
1626 	if (islocked && dolock)
1627 		VOP_UNLOCK(vp, 0);
1628 
1629 	return (error);
1630 }
1631 
1632 int
1633 union_inactive(v)
1634 	void *v;
1635 {
1636 	struct vop_inactive_args /* {
1637 		struct vnode *a_vp;
1638 		struct proc *a_p;
1639 	} */ *ap = v;
1640 	struct vnode *vp = ap->a_vp;
1641 	struct union_node *un = VTOUNION(vp);
1642 	struct vnode **vpp;
1643 
1644 	/*
1645 	 * Do nothing (and _don't_ bypass).
1646 	 * Wait to vrele lowervp until reclaim,
1647 	 * so that until then our union_node is in the
1648 	 * cache and reusable.
1649 	 *
1650 	 * NEEDSWORK: Someday, consider inactive'ing
1651 	 * the lowervp and then trying to reactivate it
1652 	 * with capabilities (v_id)
1653 	 * like they do in the name lookup cache code.
1654 	 * That's too much work for now.
1655 	 */
1656 
1657 	if (un->un_dircache != 0) {
1658 		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1659 			vrele(*vpp);
1660 		free(un->un_dircache, M_TEMP);
1661 		un->un_dircache = 0;
1662 	}
1663 
1664 	VOP_UNLOCK(vp, 0);
1665 
1666 	if ((un->un_flags & UN_CACHED) == 0)
1667 		vgone(vp);
1668 
1669 	return (0);
1670 }
1671 
1672 int
1673 union_reclaim(v)
1674 	void *v;
1675 {
1676 	struct vop_reclaim_args /* {
1677 		struct vnode *a_vp;
1678 	} */ *ap = v;
1679 
1680 	union_freevp(ap->a_vp);
1681 
1682 	return (0);
1683 }
1684 
1685 int
1686 union_lock(v)
1687 	void *v;
1688 {
1689 	struct vop_lock_args /* {
1690 		struct vnode *a_vp;
1691 		int a_flags;
1692 	} */ *ap = v;
1693 	struct vnode *vp = ap->a_vp;
1694 	int flags = ap->a_flags;
1695 	struct union_node *un;
1696 	int error;
1697 #ifdef DIAGNOSTIC
1698 	int drain = 0;
1699 #endif
1700 
1701 	genfs_nolock(ap);
1702 	/*
1703 	 * Need to do real lockmgr-style locking here.
1704 	 * in the mean time, draining won't work quite right,
1705 	 * which could lead to a few race conditions.
1706 	 * the following test was here, but is not quite right, we
1707 	 * still need to take the lock:
1708 	if ((flags & LK_TYPE_MASK) == LK_DRAIN)
1709 		return (0);
1710 	 */
1711 	flags &= ~LK_INTERLOCK;
1712 
1713 	un = VTOUNION(vp);
1714 #ifdef DIAGNOSTIC
1715 	if (un->un_flags & (UN_DRAINING|UN_DRAINED)) {
1716 		if (un->un_flags & UN_DRAINED)
1717 			panic("union: %p: warning: locking decommissioned lock", vp);
1718 		if ((flags & LK_TYPE_MASK) != LK_RELEASE)
1719 			panic("union: %p: non-release on draining lock: %d",
1720 			    vp, flags & LK_TYPE_MASK);
1721 		un->un_flags &= ~UN_DRAINING;
1722 		if ((flags & LK_REENABLE) == 0)
1723 			un->un_flags |= UN_DRAINED;
1724 	}
1725 #endif
1726 
1727 	/*
1728 	 * Don't pass DRAIN through to sub-vnode lock; keep track of
1729 	 * DRAIN state at this level, and just get an exclusive lock
1730 	 * on the underlying vnode.
1731 	 */
1732 	if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
1733 #ifdef DIAGNOSTIC
1734 		drain = 1;
1735 #endif
1736 		flags = LK_EXCLUSIVE | (flags & ~LK_TYPE_MASK);
1737 	}
1738 start:
1739 	un = VTOUNION(vp);
1740 
1741 	if (un->un_uppervp != NULLVP) {
1742 		if (((un->un_flags & UN_ULOCK) == 0) &&
1743 		    (vp->v_usecount != 0)) {
1744 			/*
1745 			 * We MUST always use the order of: take upper
1746 			 * vp lock, manipulate union node flags, drop
1747 			 * upper vp lock.  This code must not be an
1748 			 */
1749 			error = vn_lock(un->un_uppervp, flags);
1750 			if (error)
1751 				return (error);
1752 			un->un_flags |= UN_ULOCK;
1753 		}
1754 #ifdef DIAGNOSTIC
1755 		if (un->un_flags & UN_KLOCK) {
1756 			vprint("union: dangling klock", vp);
1757 			panic("union: dangling upper lock (%p)", vp);
1758 		}
1759 #endif
1760 	}
1761 
1762 	/* XXX ignores LK_NOWAIT */
1763 	if (un->un_flags & UN_LOCKED) {
1764 #ifdef DIAGNOSTIC
1765 		if (curproc && un->un_pid == curproc->p_pid &&
1766 			    un->un_pid > -1 && curproc->p_pid > -1)
1767 			panic("union: locking against myself");
1768 #endif
1769 		un->un_flags |= UN_WANTED;
1770 		tsleep((caddr_t)&un->un_flags, PINOD, "unionlk2", 0);
1771 		goto start;
1772 	}
1773 
1774 #ifdef DIAGNOSTIC
1775 	if (curproc)
1776 		un->un_pid = curproc->p_pid;
1777 	else
1778 		un->un_pid = -1;
1779 	if (drain)
1780 		un->un_flags |= UN_DRAINING;
1781 #endif
1782 
1783 	un->un_flags |= UN_LOCKED;
1784 	return (0);
1785 }
1786 
1787 /*
1788  * When operations want to vput() a union node yet retain a lock on
1789  * the upper vnode (say, to do some further operations like link(),
1790  * mkdir(), ...), they set UN_KLOCK on the union node, then call
1791  * vput() which calls VOP_UNLOCK() and comes here.  union_unlock()
1792  * unlocks the union node (leaving the upper vnode alone), clears the
1793  * KLOCK flag, and then returns to vput().  The caller then does whatever
1794  * is left to do with the upper vnode, and ensures that it gets unlocked.
1795  *
1796  * If UN_KLOCK isn't set, then the upper vnode is unlocked here.
1797  */
1798 int
1799 union_unlock(v)
1800 	void *v;
1801 {
1802 	struct vop_unlock_args /* {
1803 		struct vnode *a_vp;
1804 		int a_flags;
1805 	} */ *ap = v;
1806 	struct union_node *un = VTOUNION(ap->a_vp);
1807 
1808 #ifdef DIAGNOSTIC
1809 	if ((un->un_flags & UN_LOCKED) == 0)
1810 		panic("union: unlock unlocked node");
1811 	if (curproc && un->un_pid != curproc->p_pid &&
1812 			curproc->p_pid > -1 && un->un_pid > -1)
1813 		panic("union: unlocking other process's union node");
1814 	if (un->un_flags & UN_DRAINED)
1815 		panic("union: %p: warning: unlocking decommissioned lock", ap->a_vp);
1816 #endif
1817 
1818 	un->un_flags &= ~UN_LOCKED;
1819 
1820 	if ((un->un_flags & (UN_ULOCK|UN_KLOCK)) == UN_ULOCK)
1821 		VOP_UNLOCK(un->un_uppervp, 0);
1822 
1823 	un->un_flags &= ~(UN_ULOCK|UN_KLOCK);
1824 
1825 	if (un->un_flags & UN_WANTED) {
1826 		un->un_flags &= ~UN_WANTED;
1827 		wakeup((caddr_t) &un->un_flags);
1828 	}
1829 
1830 #ifdef DIAGNOSTIC
1831 	un->un_pid = 0;
1832 	if (un->un_flags & UN_DRAINING) {
1833 		un->un_flags |= UN_DRAINED;
1834 		un->un_flags &= ~UN_DRAINING;
1835 	}
1836 #endif
1837 	genfs_nounlock(ap);
1838 
1839 	return (0);
1840 }
1841 
1842 int
1843 union_bmap(v)
1844 	void *v;
1845 {
1846 	struct vop_bmap_args /* {
1847 		struct vnode *a_vp;
1848 		daddr_t  a_bn;
1849 		struct vnode **a_vpp;
1850 		daddr_t *a_bnp;
1851 		int *a_runp;
1852 	} */ *ap = v;
1853 	int error;
1854 	struct vnode *vp = OTHERVP(ap->a_vp);
1855 	int dolock = (vp == LOWERVP(ap->a_vp));
1856 
1857 	if (dolock)
1858 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1859 	else
1860 		FIXUP(VTOUNION(ap->a_vp));
1861 	ap->a_vp = vp;
1862 	error = VCALL(vp, VOFFSET(vop_bmap), ap);
1863 	if (dolock)
1864 		VOP_UNLOCK(vp, 0);
1865 
1866 	return (error);
1867 }
1868 
1869 int
1870 union_print(v)
1871 	void *v;
1872 {
1873 	struct vop_print_args /* {
1874 		struct vnode *a_vp;
1875 	} */ *ap = v;
1876 	struct vnode *vp = ap->a_vp;
1877 
1878 	printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1879 			vp, UPPERVP(vp), LOWERVP(vp));
1880 	if (UPPERVP(vp) != NULLVP)
1881 		vprint("union: upper", UPPERVP(vp));
1882 	if (LOWERVP(vp) != NULLVP)
1883 		vprint("union: lower", LOWERVP(vp));
1884 	if (VTOUNION(vp)->un_dircache) {
1885 		struct vnode **vpp;
1886 		for (vpp = VTOUNION(vp)->un_dircache; *vpp != NULLVP; vpp++)
1887 			vprint("dircache:", *vpp);
1888 	}
1889 
1890 	return (0);
1891 }
1892 
1893 int
1894 union_islocked(v)
1895 	void *v;
1896 {
1897 	struct vop_islocked_args /* {
1898 		struct vnode *a_vp;
1899 	} */ *ap = v;
1900 
1901 	return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0);
1902 }
1903 
1904 int
1905 union_pathconf(v)
1906 	void *v;
1907 {
1908 	struct vop_pathconf_args /* {
1909 		struct vnode *a_vp;
1910 		int a_name;
1911 		int *a_retval;
1912 	} */ *ap = v;
1913 	int error;
1914 	struct vnode *vp = OTHERVP(ap->a_vp);
1915 	int dolock = (vp == LOWERVP(ap->a_vp));
1916 
1917 	if (dolock)
1918 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1919 	else
1920 		FIXUP(VTOUNION(ap->a_vp));
1921 	ap->a_vp = vp;
1922 	error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1923 	if (dolock)
1924 		VOP_UNLOCK(vp, 0);
1925 
1926 	return (error);
1927 }
1928 
1929 int
1930 union_advlock(v)
1931 	void *v;
1932 {
1933 	struct vop_advlock_args /* {
1934 		struct vnode *a_vp;
1935 		caddr_t  a_id;
1936 		int  a_op;
1937 		struct flock *a_fl;
1938 		int  a_flags;
1939 	} */ *ap = v;
1940 	struct vnode *ovp = OTHERVP(ap->a_vp);
1941 
1942 	ap->a_vp = ovp;
1943 	return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1944 }
1945 
1946 
1947 /*
1948  * XXX - vop_strategy must be hand coded because it has no
1949  * vnode in its arguments.
1950  * This goes away with a merged VM/buffer cache.
1951  */
1952 int
1953 union_strategy(v)
1954 	void *v;
1955 {
1956 	struct vop_strategy_args /* {
1957 		struct buf *a_bp;
1958 	} */ *ap = v;
1959 	struct buf *bp = ap->a_bp;
1960 	int error;
1961 	struct vnode *savedvp;
1962 
1963 	savedvp = bp->b_vp;
1964 	bp->b_vp = OTHERVP(bp->b_vp);
1965 
1966 #ifdef DIAGNOSTIC
1967 	if (bp->b_vp == NULLVP)
1968 		panic("union_strategy: nil vp");
1969 	if (((bp->b_flags & B_READ) == 0) &&
1970 	    (bp->b_vp == LOWERVP(savedvp)))
1971 		panic("union_strategy: writing to lowervp");
1972 #endif
1973 
1974 	error = VOP_STRATEGY(bp);
1975 	bp->b_vp = savedvp;
1976 
1977 	return (error);
1978 }
1979 
1980 int
1981 union_getpages(v)
1982 	void *v;
1983 {
1984 	struct vop_getpages_args /* {
1985 		struct vnode *a_vp;
1986 		voff_t a_offset;
1987 		struct vm_page **a_m;
1988 		int *a_count;
1989 		int a_centeridx;
1990 		vm_prot_t a_access_type;
1991 		int a_advice;
1992 		int a_flags;
1993 	} */ *ap = v;
1994 	struct vnode *vp = ap->a_vp;
1995 	int error;
1996 
1997 	/*
1998 	 * just pass the request on to the underlying layer.
1999 	 */
2000 
2001 	if (ap->a_flags & PGO_LOCKED) {
2002 		return EBUSY;
2003 	}
2004 	ap->a_vp = OTHERVP(vp);
2005 	simple_unlock(&vp->v_interlock);
2006 	simple_lock(&ap->a_vp->v_interlock);
2007 	error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2008 	return error;
2009 }
2010 
2011 int
2012 union_putpages(v)
2013 	void *v;
2014 {
2015 	struct vop_putpages_args /* {
2016 		struct vnode *a_vp;
2017 		voff_t a_offlo;
2018 		voff_t a_offhi;
2019 		int a_flags;
2020 	} */ *ap = v;
2021 	struct vnode *vp = ap->a_vp;
2022 	int error;
2023 
2024 	/*
2025 	 * just pass the request on to the underlying layer.
2026 	 */
2027 
2028 	ap->a_vp = OTHERVP(vp);
2029 	simple_unlock(&vp->v_interlock);
2030 	simple_lock(&ap->a_vp->v_interlock);
2031 	error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2032 	return error;
2033 }
2034 
2035 int
2036 union_kqfilter(void *v)
2037 {
2038 	struct vop_kqfilter_args /* {
2039 		struct vnode	*a_vp;
2040 		struct knote	*a_kn;
2041 	} */ *ap = v;
2042 	int error;
2043 
2044 	/*
2045 	 * We watch either the upper layer file (if it already exists),
2046 	 * or the lower layer one. If there is lower layer file only
2047 	 * at this moment, we will keep watching that lower layer file
2048 	 * even if upper layer file would be created later on.
2049 	 */
2050 	if (UPPERVP(ap->a_vp))
2051 		error = VOP_KQFILTER(UPPERVP(ap->a_vp), ap->a_kn);
2052 	else if (LOWERVP(ap->a_vp))
2053 		error = VOP_KQFILTER(LOWERVP(ap->a_vp), ap->a_kn);
2054 	else {
2055 		/* panic? */
2056 		error = EOPNOTSUPP;
2057 	}
2058 
2059 	return (error);
2060 }
2061