xref: /netbsd-src/sys/fs/union/union_vnops.c (revision 7fa608457b817eca6e0977b37f758ae064f3c99c)
1 /*	$NetBSD: union_vnops.c,v 1.22 2007/10/10 20:42:26 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993, 1994, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Jan-Simon Pendry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)union_vnops.c	8.33 (Berkeley) 7/31/95
35  */
36 
37 /*
38  * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
39  *
40  * This code is derived from software contributed to Berkeley by
41  * Jan-Simon Pendry.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)union_vnops.c	8.33 (Berkeley) 7/31/95
72  */
73 
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: union_vnops.c,v 1.22 2007/10/10 20:42:26 ad Exp $");
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/file.h>
81 #include <sys/time.h>
82 #include <sys/stat.h>
83 #include <sys/vnode.h>
84 #include <sys/mount.h>
85 #include <sys/namei.h>
86 #include <sys/malloc.h>
87 #include <sys/buf.h>
88 #include <sys/queue.h>
89 #include <sys/lock.h>
90 #include <sys/kauth.h>
91 
92 #include <fs/union/union.h>
93 #include <miscfs/genfs/genfs.h>
94 
95 int union_lookup(void *);
96 int union_create(void *);
97 int union_whiteout(void *);
98 int union_mknod(void *);
99 int union_open(void *);
100 int union_close(void *);
101 int union_access(void *);
102 int union_getattr(void *);
103 int union_setattr(void *);
104 int union_read(void *);
105 int union_write(void *);
106 int union_lease(void *);
107 int union_ioctl(void *);
108 int union_poll(void *);
109 int union_revoke(void *);
110 int union_mmap(void *);
111 int union_fsync(void *);
112 int union_seek(void *);
113 int union_remove(void *);
114 int union_link(void *);
115 int union_rename(void *);
116 int union_mkdir(void *);
117 int union_rmdir(void *);
118 int union_symlink(void *);
119 int union_readdir(void *);
120 int union_readlink(void *);
121 int union_abortop(void *);
122 int union_inactive(void *);
123 int union_reclaim(void *);
124 int union_lock(void *);
125 int union_unlock(void *);
126 int union_bmap(void *);
127 int union_print(void *);
128 int union_islocked(void *);
129 int union_pathconf(void *);
130 int union_advlock(void *);
131 int union_strategy(void *);
132 int union_getpages(void *);
133 int union_putpages(void *);
134 int union_kqfilter(void *);
135 
136 static void union_fixup(struct union_node *);
137 static int union_lookup1(struct vnode *, struct vnode **,
138 			      struct vnode **, struct componentname *);
139 
140 
141 /*
142  * Global vfs data structures
143  */
144 int (**union_vnodeop_p)(void *);
145 const struct vnodeopv_entry_desc union_vnodeop_entries[] = {
146 	{ &vop_default_desc, vn_default_error },
147 	{ &vop_lookup_desc, union_lookup },		/* lookup */
148 	{ &vop_create_desc, union_create },		/* create */
149 	{ &vop_whiteout_desc, union_whiteout },		/* whiteout */
150 	{ &vop_mknod_desc, union_mknod },		/* mknod */
151 	{ &vop_open_desc, union_open },			/* open */
152 	{ &vop_close_desc, union_close },		/* close */
153 	{ &vop_access_desc, union_access },		/* access */
154 	{ &vop_getattr_desc, union_getattr },		/* getattr */
155 	{ &vop_setattr_desc, union_setattr },		/* setattr */
156 	{ &vop_read_desc, union_read },			/* read */
157 	{ &vop_write_desc, union_write },		/* write */
158 	{ &vop_lease_desc, union_lease },		/* lease */
159 	{ &vop_ioctl_desc, union_ioctl },		/* ioctl */
160 	{ &vop_poll_desc, union_poll },			/* select */
161 	{ &vop_revoke_desc, union_revoke },		/* revoke */
162 	{ &vop_mmap_desc, union_mmap },			/* mmap */
163 	{ &vop_fsync_desc, union_fsync },		/* fsync */
164 	{ &vop_seek_desc, union_seek },			/* seek */
165 	{ &vop_remove_desc, union_remove },		/* remove */
166 	{ &vop_link_desc, union_link },			/* link */
167 	{ &vop_rename_desc, union_rename },		/* rename */
168 	{ &vop_mkdir_desc, union_mkdir },		/* mkdir */
169 	{ &vop_rmdir_desc, union_rmdir },		/* rmdir */
170 	{ &vop_symlink_desc, union_symlink },		/* symlink */
171 	{ &vop_readdir_desc, union_readdir },		/* readdir */
172 	{ &vop_readlink_desc, union_readlink },		/* readlink */
173 	{ &vop_abortop_desc, union_abortop },		/* abortop */
174 	{ &vop_inactive_desc, union_inactive },		/* inactive */
175 	{ &vop_reclaim_desc, union_reclaim },		/* reclaim */
176 	{ &vop_lock_desc, union_lock },			/* lock */
177 	{ &vop_unlock_desc, union_unlock },		/* unlock */
178 	{ &vop_bmap_desc, union_bmap },			/* bmap */
179 	{ &vop_strategy_desc, union_strategy },		/* strategy */
180 	{ &vop_print_desc, union_print },		/* print */
181 	{ &vop_islocked_desc, union_islocked },		/* islocked */
182 	{ &vop_pathconf_desc, union_pathconf },		/* pathconf */
183 	{ &vop_advlock_desc, union_advlock },		/* advlock */
184 	{ &vop_getpages_desc, union_getpages },		/* getpages */
185 	{ &vop_putpages_desc, union_putpages },		/* putpages */
186 	{ &vop_kqfilter_desc, union_kqfilter },		/* kqfilter */
187 #ifdef notdef
188 	{ &vop_bwrite_desc, union_bwrite },		/* bwrite */
189 #endif
190 	{ NULL, NULL }
191 };
192 const struct vnodeopv_desc union_vnodeop_opv_desc =
193 	{ &union_vnodeop_p, union_vnodeop_entries };
194 
195 #define FIXUP(un) { \
196 	if (((un)->un_flags & UN_ULOCK) == 0) { \
197 		union_fixup(un); \
198 	} \
199 }
200 
201 static void
202 union_fixup(un)
203 	struct union_node *un;
204 {
205 
206 	vn_lock(un->un_uppervp, LK_EXCLUSIVE | LK_RETRY);
207 	un->un_flags |= UN_ULOCK;
208 }
209 
210 static int
211 union_lookup1(udvp, dvpp, vpp, cnp)
212 	struct vnode *udvp;
213 	struct vnode **dvpp;
214 	struct vnode **vpp;
215 	struct componentname *cnp;
216 {
217 	int error;
218 	struct vnode *tdvp;
219 	struct vnode *dvp;
220 	struct mount *mp;
221 
222 	dvp = *dvpp;
223 
224 	/*
225 	 * If stepping up the directory tree, check for going
226 	 * back across the mount point, in which case do what
227 	 * lookup would do by stepping back down the mount
228 	 * hierarchy.
229 	 */
230 	if (cnp->cn_flags & ISDOTDOT) {
231 		while ((dvp != udvp) && (dvp->v_vflag & VV_ROOT)) {
232 			/*
233 			 * Don't do the NOCROSSMOUNT check
234 			 * at this level.  By definition,
235 			 * union fs deals with namespaces, not
236 			 * filesystems.
237 			 */
238 			tdvp = dvp;
239 			*dvpp = dvp = dvp->v_mount->mnt_vnodecovered;
240 			VOP_UNLOCK(tdvp, 0);
241 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
242 		}
243 	}
244 
245         error = VOP_LOOKUP(dvp, &tdvp, cnp);
246 	if (error)
247 		return (error);
248 
249 	dvp = tdvp;
250 
251 	/*
252 	 * Lastly check if the current node is a mount point in
253 	 * which case walk up the mount hierarchy making sure not to
254 	 * bump into the root of the mount tree (ie. dvp != udvp).
255 	 */
256 	while (dvp != udvp && (dvp->v_type == VDIR) &&
257 	       (mp = dvp->v_mountedhere)) {
258 
259 		if (vfs_busy(mp, 0, 0))
260 			continue;
261 
262 		vput(dvp);
263 		error = VFS_ROOT(mp, &tdvp);
264 		vfs_unbusy(mp);
265 		if (error) {
266 			return (error);
267 		}
268 		dvp = tdvp;
269 	}
270 
271 	*vpp = dvp;
272 	return (0);
273 }
274 
275 int
276 union_lookup(v)
277 	void *v;
278 {
279 	struct vop_lookup_args /* {
280 		struct vnodeop_desc *a_desc;
281 		struct vnode *a_dvp;
282 		struct vnode **a_vpp;
283 		struct componentname *a_cnp;
284 	} */ *ap = v;
285 	int error;
286 	int uerror, lerror;
287 	struct vnode *uppervp, *lowervp;
288 	struct vnode *upperdvp, *lowerdvp;
289 	struct vnode *dvp = ap->a_dvp;
290 	struct union_node *dun = VTOUNION(dvp);
291 	struct componentname *cnp = ap->a_cnp;
292 	struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
293 	kauth_cred_t saved_cred = NULL;
294 	int iswhiteout;
295 	struct vattr va;
296 
297 #ifdef notyet
298 	if (cnp->cn_namelen == 3 &&
299 			cnp->cn_nameptr[2] == '.' &&
300 			cnp->cn_nameptr[1] == '.' &&
301 			cnp->cn_nameptr[0] == '.') {
302 		dvp = *ap->a_vpp = LOWERVP(ap->a_dvp);
303 		if (dvp == NULLVP)
304 			return (ENOENT);
305 		VREF(dvp);
306 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
307 		return (0);
308 	}
309 #endif
310 
311 	if ((cnp->cn_flags & ISLASTCN) &&
312 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
313 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
314 		return (EROFS);
315 
316 	upperdvp = dun->un_uppervp;
317 	lowerdvp = dun->un_lowervp;
318 	uppervp = NULLVP;
319 	lowervp = NULLVP;
320 	iswhiteout = 0;
321 
322 	/*
323 	 * do the lookup in the upper level.
324 	 * if that level comsumes additional pathnames,
325 	 * then assume that something special is going
326 	 * on and just return that vnode.
327 	 */
328 	if (upperdvp != NULLVP) {
329 		FIXUP(dun);
330 		/*
331 		 * If we're doing `..' in the underlying filesystem,
332 		 * we must drop our lock on the union node before
333 		 * going up the tree in the lower file system--if we block
334 		 * on the lowervp lock, and that's held by someone else
335 		 * coming down the tree and who's waiting for our lock,
336 		 * we would be hosed.
337 		 */
338 		if (cnp->cn_flags & ISDOTDOT) {
339 			/* retain lock on underlying VP */
340 			dun->un_flags |= UN_KLOCK;
341 			VOP_UNLOCK(dvp, 0);
342 		}
343 		uerror = union_lookup1(um->um_uppervp, &upperdvp,
344 					&uppervp, cnp);
345 
346 		if (cnp->cn_flags & ISDOTDOT) {
347 			if (dun->un_uppervp == upperdvp) {
348 				/*
349 				 * we got the underlying bugger back locked...
350 				 * now take back the union node lock.  Since we
351 				 *  hold the uppervp lock, we can diddle union
352 				 * locking flags at will. :)
353 				 */
354 				dun->un_flags |= UN_ULOCK;
355 			}
356 			/*
357 			 * if upperdvp got swapped out, it means we did
358 			 * some mount point magic, and we do not have
359 			 * dun->un_uppervp locked currently--so we get it
360 			 * locked here (don't set the UN_ULOCK flag).
361 			 */
362 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
363 		}
364 		if (cnp->cn_consume != 0) {
365 			*ap->a_vpp = uppervp;
366 			return (uerror);
367 		}
368 		if (uerror == ENOENT || uerror == EJUSTRETURN) {
369 			if (cnp->cn_flags & ISWHITEOUT) {
370 				iswhiteout = 1;
371 			} else if (lowerdvp != NULLVP) {
372 				lerror = VOP_GETATTR(upperdvp, &va,
373 					cnp->cn_cred, cnp->cn_lwp);
374 				if (lerror == 0 && (va.va_flags & OPAQUE))
375 					iswhiteout = 1;
376 			}
377 		}
378 	} else {
379 		uerror = ENOENT;
380 	}
381 
382 	/*
383 	 * in a similar way to the upper layer, do the lookup
384 	 * in the lower layer.   this time, if there is some
385 	 * component magic going on, then vput whatever we got
386 	 * back from the upper layer and return the lower vnode
387 	 * instead.
388 	 */
389 	if (lowerdvp != NULLVP && !iswhiteout) {
390 		int nameiop;
391 
392 		vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY);
393 
394 		/*
395 		 * Only do a LOOKUP on the bottom node, since
396 		 * we won't be making changes to it anyway.
397 		 */
398 		nameiop = cnp->cn_nameiop;
399 		cnp->cn_nameiop = LOOKUP;
400 		if (um->um_op == UNMNT_BELOW) {
401 			saved_cred = cnp->cn_cred;
402 			cnp->cn_cred = um->um_cred;
403 		}
404 
405 		/*
406 		 * we shouldn't have to worry about locking interactions
407 		 * between the lower layer and our union layer (w.r.t.
408 		 * `..' processing) because we don't futz with lowervp
409 		 * locks in the union-node instantiation code path.
410 		 */
411 		lerror = union_lookup1(um->um_lowervp, &lowerdvp,
412 				&lowervp, cnp);
413 		if (um->um_op == UNMNT_BELOW)
414 			cnp->cn_cred = saved_cred;
415 		cnp->cn_nameiop = nameiop;
416 
417 		if (lowervp != lowerdvp)
418 			VOP_UNLOCK(lowerdvp, 0);
419 
420 		if (cnp->cn_consume != 0) {
421 			if (uppervp != NULLVP) {
422 				if (uppervp == upperdvp)
423 					vrele(uppervp);
424 				else
425 					vput(uppervp);
426 				uppervp = NULLVP;
427 			}
428 			*ap->a_vpp = lowervp;
429 			return (lerror);
430 		}
431 	} else {
432 		lerror = ENOENT;
433 		if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
434 			lowervp = LOWERVP(dun->un_pvp);
435 			if (lowervp != NULLVP) {
436 				VREF(lowervp);
437 				vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
438 				lerror = 0;
439 			}
440 		}
441 	}
442 
443 	/*
444 	 * EJUSTRETURN is used by underlying filesystems to indicate that
445 	 * a directory modification op was started successfully.
446 	 * This will only happen in the upper layer, since
447 	 * the lower layer only does LOOKUPs.
448 	 * If this union is mounted read-only, bounce it now.
449 	 */
450 
451 	if ((uerror == EJUSTRETURN) && (cnp->cn_flags & ISLASTCN) &&
452 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
453 	    ((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)))
454 		uerror = EROFS;
455 
456 	/*
457 	 * at this point, we have uerror and lerror indicating
458 	 * possible errors with the lookups in the upper and lower
459 	 * layers.  additionally, uppervp and lowervp are (locked)
460 	 * references to existing vnodes in the upper and lower layers.
461 	 *
462 	 * there are now three cases to consider.
463 	 * 1. if both layers returned an error, then return whatever
464 	 *    error the upper layer generated.
465 	 *
466 	 * 2. if the top layer failed and the bottom layer succeeded
467 	 *    then two subcases occur.
468 	 *    a.  the bottom vnode is not a directory, in which
469 	 *	  case just return a new union vnode referencing
470 	 *	  an empty top layer and the existing bottom layer.
471 	 *    b.  the bottom vnode is a directory, in which case
472 	 *	  create a new directory in the top-level and
473 	 *	  continue as in case 3.
474 	 *
475 	 * 3. if the top layer succeeded then return a new union
476 	 *    vnode referencing whatever the new top layer and
477 	 *    whatever the bottom layer returned.
478 	 */
479 
480 	*ap->a_vpp = NULLVP;
481 
482 
483 	/* case 1. */
484 	if ((uerror != 0) && (lerror != 0)) {
485 		return (uerror);
486 	}
487 
488 	/* case 2. */
489 	if (uerror != 0 /* && (lerror == 0) */ ) {
490 		if (lowervp->v_type == VDIR) { /* case 2b. */
491 			/*
492 			 * We may be racing another process to make the
493 			 * upper-level shadow directory.  Be careful with
494 			 * locks/etc!
495 			 */
496 			if (upperdvp) {
497 				dun->un_flags &= ~UN_ULOCK;
498 				VOP_UNLOCK(upperdvp, 0);
499 				uerror = union_mkshadow(um, upperdvp, cnp,
500 				    &uppervp);
501 				vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY);
502 				dun->un_flags |= UN_ULOCK;
503 			}
504 			if (uerror) {
505 				if (lowervp != NULLVP) {
506 					vput(lowervp);
507 					lowervp = NULLVP;
508 				}
509 				return (uerror);
510 			}
511 		}
512 	}
513 
514 	if (lowervp != NULLVP)
515 		VOP_UNLOCK(lowervp, 0);
516 
517 	error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
518 			      uppervp, lowervp, 1);
519 
520 	if (error) {
521 		if (uppervp != NULLVP)
522 			vput(uppervp);
523 		if (lowervp != NULLVP)
524 			vrele(lowervp);
525 	}
526 
527 	return (error);
528 }
529 
530 int
531 union_create(v)
532 	void *v;
533 {
534 	struct vop_create_args /* {
535 		struct vnode *a_dvp;
536 		struct vnode **a_vpp;
537 		struct componentname *a_cnp;
538 		struct vattr *a_vap;
539 	} */ *ap = v;
540 	struct union_node *un = VTOUNION(ap->a_dvp);
541 	struct vnode *dvp = un->un_uppervp;
542 	struct componentname *cnp = ap->a_cnp;
543 
544 	if (dvp != NULLVP) {
545 		int error;
546 		struct vnode *vp;
547 		struct mount *mp;
548 
549 		FIXUP(un);
550 
551 		VREF(dvp);
552 		un->un_flags |= UN_KLOCK;
553 		mp = ap->a_dvp->v_mount;
554 		vput(ap->a_dvp);
555 		error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
556 		if (error)
557 			return (error);
558 
559 		error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp,
560 				NULLVP, 1);
561 		if (error)
562 			vput(vp);
563 		return (error);
564 	}
565 
566 	vput(ap->a_dvp);
567 	return (EROFS);
568 }
569 
570 int
571 union_whiteout(v)
572 	void *v;
573 {
574 	struct vop_whiteout_args /* {
575 		struct vnode *a_dvp;
576 		struct componentname *a_cnp;
577 		int a_flags;
578 	} */ *ap = v;
579 	struct union_node *un = VTOUNION(ap->a_dvp);
580 	struct componentname *cnp = ap->a_cnp;
581 
582 	if (un->un_uppervp == NULLVP)
583 		return (EOPNOTSUPP);
584 
585 	FIXUP(un);
586 	return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags));
587 }
588 
589 int
590 union_mknod(v)
591 	void *v;
592 {
593 	struct vop_mknod_args /* {
594 		struct vnode *a_dvp;
595 		struct vnode **a_vpp;
596 		struct componentname *a_cnp;
597 		struct vattr *a_vap;
598 	} */ *ap = v;
599 	struct union_node *un = VTOUNION(ap->a_dvp);
600 	struct vnode *dvp = un->un_uppervp;
601 	struct componentname *cnp = ap->a_cnp;
602 
603 	if (dvp != NULLVP) {
604 		int error;
605 		struct vnode *vp;
606 		struct mount *mp;
607 
608 		FIXUP(un);
609 
610 		VREF(dvp);
611 		un->un_flags |= UN_KLOCK;
612 		mp = ap->a_dvp->v_mount;
613 		vput(ap->a_dvp);
614 		error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap);
615 		if (error)
616 			return (error);
617 
618 		error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
619 				      cnp, vp, NULLVP, 1);
620 		if (error)
621 		    vput(vp);
622 		return (error);
623 	}
624 
625 	vput(ap->a_dvp);
626 	return (EROFS);
627 }
628 
629 int
630 union_open(v)
631 	void *v;
632 {
633 	struct vop_open_args /* {
634 		struct vnodeop_desc *a_desc;
635 		struct vnode *a_vp;
636 		int a_mode;
637 		kauth_cred_t a_cred;
638 		struct lwp *a_l;
639 	} */ *ap = v;
640 	struct union_node *un = VTOUNION(ap->a_vp);
641 	struct vnode *tvp;
642 	int mode = ap->a_mode;
643 	kauth_cred_t cred = ap->a_cred;
644 	struct lwp *l = ap->a_l;
645 	int error;
646 
647 	/*
648 	 * If there is an existing upper vp then simply open that.
649 	 */
650 	tvp = un->un_uppervp;
651 	if (tvp == NULLVP) {
652 		/*
653 		 * If the lower vnode is being opened for writing, then
654 		 * copy the file contents to the upper vnode and open that,
655 		 * otherwise can simply open the lower vnode.
656 		 */
657 		tvp = un->un_lowervp;
658 		if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
659 			error = union_copyup(un, (mode&O_TRUNC) == 0, cred, l);
660 			if (error == 0)
661 				error = VOP_OPEN(un->un_uppervp, mode, cred, l);
662 			return (error);
663 		}
664 
665 		/*
666 		 * Just open the lower vnode, but check for nodev mount flag
667 		 */
668 		if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
669 		    (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
670 			return ENXIO;
671 		un->un_openl++;
672 		vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
673 		error = VOP_OPEN(tvp, mode, cred, l);
674 		VOP_UNLOCK(tvp, 0);
675 
676 		return (error);
677 	}
678 	/*
679 	 * Just open the upper vnode, checking for nodev mount flag first
680 	 */
681 	if ((tvp->v_type == VBLK || tvp->v_type == VCHR) &&
682 	    (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
683 		return ENXIO;
684 
685 	FIXUP(un);
686 
687 	error = VOP_OPEN(tvp, mode, cred, l);
688 
689 	return (error);
690 }
691 
692 int
693 union_close(v)
694 	void *v;
695 {
696 	struct vop_close_args /* {
697 		struct vnode *a_vp;
698 		int  a_fflag;
699 		kauth_cred_t a_cred;
700 		struct lwp *a_l;
701 	} */ *ap = v;
702 	struct union_node *un = VTOUNION(ap->a_vp);
703 	struct vnode *vp;
704 
705 	vp = un->un_uppervp;
706 	if (vp == NULLVP) {
707 #ifdef UNION_DIAGNOSTIC
708 		if (un->un_openl <= 0)
709 			panic("union: un_openl cnt");
710 #endif
711 		--un->un_openl;
712 		vp = un->un_lowervp;
713 	}
714 
715 #ifdef DIAGNOSTIC
716 	if (vp == NULLVP)
717 		panic("union_close empty union vnode");
718 #endif
719 
720 	ap->a_vp = vp;
721 	return (VCALL(vp, VOFFSET(vop_close), ap));
722 }
723 
724 /*
725  * Check access permission on the union vnode.
726  * The access check being enforced is to check
727  * against both the underlying vnode, and any
728  * copied vnode.  This ensures that no additional
729  * file permissions are given away simply because
730  * the user caused an implicit file copy.
731  */
732 int
733 union_access(v)
734 	void *v;
735 {
736 	struct vop_access_args /* {
737 		struct vnodeop_desc *a_desc;
738 		struct vnode *a_vp;
739 		int a_mode;
740 		kauth_cred_t a_cred;
741 		struct lwp *a_l;
742 	} */ *ap = v;
743 	struct vnode *vp = ap->a_vp;
744 	struct union_node *un = VTOUNION(vp);
745 	int error = EACCES;
746 	struct union_mount *um = MOUNTTOUNIONMOUNT(vp->v_mount);
747 
748 	/*
749 	 * Disallow write attempts on read-only file systems;
750 	 * unless the file is a socket, fifo, or a block or
751 	 * character device resident on the file system.
752 	 */
753 	if (ap->a_mode & VWRITE) {
754 		switch (vp->v_type) {
755 		case VDIR:
756 		case VLNK:
757 		case VREG:
758 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
759 				return (EROFS);
760 			break;
761 		case VBAD:
762 		case VBLK:
763 		case VCHR:
764 		case VSOCK:
765 		case VFIFO:
766 		case VNON:
767 		default:
768 			break;
769 		}
770 	}
771 
772 
773 	if ((vp = un->un_uppervp) != NULLVP) {
774 		FIXUP(un);
775 		ap->a_vp = vp;
776 		return (VCALL(vp, VOFFSET(vop_access), ap));
777 	}
778 
779 	if ((vp = un->un_lowervp) != NULLVP) {
780 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
781 		ap->a_vp = vp;
782 		error = VCALL(vp, VOFFSET(vop_access), ap);
783 		if (error == 0) {
784 			if (um->um_op == UNMNT_BELOW) {
785 				ap->a_cred = um->um_cred;
786 				error = VCALL(vp, VOFFSET(vop_access), ap);
787 			}
788 		}
789 		VOP_UNLOCK(vp, 0);
790 		if (error)
791 			return (error);
792 	}
793 
794 	return (error);
795 }
796 
797 /*
798  * We handle getattr only to change the fsid and
799  * track object sizes
800  */
801 int
802 union_getattr(v)
803 	void *v;
804 {
805 	struct vop_getattr_args /* {
806 		struct vnode *a_vp;
807 		struct vattr *a_vap;
808 		kauth_cred_t a_cred;
809 		struct lwp *a_l;
810 	} */ *ap = v;
811 	int error;
812 	struct union_node *un = VTOUNION(ap->a_vp);
813 	struct vnode *vp = un->un_uppervp;
814 	struct vattr *vap;
815 	struct vattr va;
816 
817 
818 	/*
819 	 * Some programs walk the filesystem hierarchy by counting
820 	 * links to directories to avoid stat'ing all the time.
821 	 * This means the link count on directories needs to be "correct".
822 	 * The only way to do that is to call getattr on both layers
823 	 * and fix up the link count.  The link count will not necessarily
824 	 * be accurate but will be large enough to defeat the tree walkers.
825 	 *
826 	 * To make life more interesting, some filesystems don't keep
827 	 * track of link counts in the expected way, and return a
828 	 * link count of `1' for those directories; if either of the
829 	 * component directories returns a link count of `1', we return a 1.
830 	 */
831 
832 	vap = ap->a_vap;
833 
834 	vp = un->un_uppervp;
835 	if (vp != NULLVP) {
836 		/*
837 		 * It's not clear whether VOP_GETATTR is to be
838 		 * called with the vnode locked or not.  stat() calls
839 		 * it with (vp) locked, and fstat calls it with
840 		 * (vp) unlocked.
841 		 * In the mean time, compensate here by checking
842 		 * the union_node's lock flag.
843 		 */
844 		if (un->un_flags & UN_LOCKED)
845 			FIXUP(un);
846 
847 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_l);
848 		if (error)
849 			return (error);
850 		union_newsize(ap->a_vp, vap->va_size, VNOVAL);
851 	}
852 
853 	if (vp == NULLVP) {
854 		vp = un->un_lowervp;
855 	} else if (vp->v_type == VDIR) {
856 		vp = un->un_lowervp;
857 		if (vp != NULLVP)
858 			vap = &va;
859 	} else {
860 		vp = NULLVP;
861 	}
862 
863 	if (vp != NULLVP) {
864 		error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_l);
865 		if (error)
866 			return (error);
867 		union_newsize(ap->a_vp, VNOVAL, vap->va_size);
868 	}
869 
870 	if ((vap != ap->a_vap) && (vap->va_type == VDIR)) {
871 		/*
872 		 * Link count manipulation:
873 		 *	- If both return "2", return 2 (no subdirs)
874 		 *	- If one or the other return "1", return "1" (ENOCLUE)
875 		 */
876 		if ((ap->a_vap->va_nlink == 2) &&
877 		    (vap->va_nlink == 2))
878 			;
879 		else if (ap->a_vap->va_nlink != 1) {
880 			if (vap->va_nlink == 1)
881 				ap->a_vap->va_nlink = 1;
882 			else
883 				ap->a_vap->va_nlink += vap->va_nlink;
884 		}
885 	}
886 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
887 	return (0);
888 }
889 
890 int
891 union_setattr(v)
892 	void *v;
893 {
894 	struct vop_setattr_args /* {
895 		struct vnode *a_vp;
896 		struct vattr *a_vap;
897 		kauth_cred_t a_cred;
898 		struct lwp *a_l;
899 	} */ *ap = v;
900 	struct vattr *vap = ap->a_vap;
901 	struct vnode *vp = ap->a_vp;
902 	struct union_node *un = VTOUNION(vp);
903 	int error;
904 
905   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
906 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
907 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
908 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
909 		return (EROFS);
910 	if (vap->va_size != VNOVAL) {
911  		switch (vp->v_type) {
912  		case VDIR:
913  			return (EISDIR);
914  		case VCHR:
915  		case VBLK:
916  		case VSOCK:
917  		case VFIFO:
918 			break;
919 		case VREG:
920 		case VLNK:
921  		default:
922 			/*
923 			 * Disallow write attempts if the filesystem is
924 			 * mounted read-only.
925 			 */
926 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
927 				return (EROFS);
928 		}
929 	}
930 
931 	/*
932 	 * Handle case of truncating lower object to zero size,
933 	 * by creating a zero length upper object.  This is to
934 	 * handle the case of open with O_TRUNC and O_CREAT.
935 	 */
936 	if ((un->un_uppervp == NULLVP) &&
937 	    /* assert(un->un_lowervp != NULLVP) */
938 	    (un->un_lowervp->v_type == VREG)) {
939 		error = union_copyup(un, (vap->va_size != 0),
940 						ap->a_cred, ap->a_l);
941 		if (error)
942 			return (error);
943 	}
944 
945 	/*
946 	 * Try to set attributes in upper layer,
947 	 * otherwise return read-only filesystem error.
948 	 */
949 	if (un->un_uppervp != NULLVP) {
950 		FIXUP(un);
951 		error = VOP_SETATTR(un->un_uppervp, vap,
952 					ap->a_cred, ap->a_l);
953 		if ((error == 0) && (vap->va_size != VNOVAL))
954 			union_newsize(ap->a_vp, vap->va_size, VNOVAL);
955 	} else {
956 		error = EROFS;
957 	}
958 
959 	return (error);
960 }
961 
962 int
963 union_read(v)
964 	void *v;
965 {
966 	struct vop_read_args /* {
967 		struct vnode *a_vp;
968 		struct uio *a_uio;
969 		int  a_ioflag;
970 		kauth_cred_t a_cred;
971 	} */ *ap = v;
972 	int error;
973 	struct vnode *vp = OTHERVP(ap->a_vp);
974 	int dolock = (vp == LOWERVP(ap->a_vp));
975 
976 	if (dolock)
977 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
978 	else
979 		FIXUP(VTOUNION(ap->a_vp));
980 	error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
981 	if (dolock)
982 		VOP_UNLOCK(vp, 0);
983 
984 	/*
985 	 * XXX
986 	 * perhaps the size of the underlying object has changed under
987 	 * our feet.  take advantage of the offset information present
988 	 * in the uio structure.
989 	 */
990 	if (error == 0) {
991 		struct union_node *un = VTOUNION(ap->a_vp);
992 		off_t cur = ap->a_uio->uio_offset;
993 
994 		if (vp == un->un_uppervp) {
995 			if (cur > un->un_uppersz)
996 				union_newsize(ap->a_vp, cur, VNOVAL);
997 		} else {
998 			if (cur > un->un_lowersz)
999 				union_newsize(ap->a_vp, VNOVAL, cur);
1000 		}
1001 	}
1002 
1003 	return (error);
1004 }
1005 
1006 int
1007 union_write(v)
1008 	void *v;
1009 {
1010 	struct vop_read_args /* {
1011 		struct vnode *a_vp;
1012 		struct uio *a_uio;
1013 		int  a_ioflag;
1014 		kauth_cred_t a_cred;
1015 	} */ *ap = v;
1016 	int error;
1017 	struct vnode *vp;
1018 	struct union_node *un = VTOUNION(ap->a_vp);
1019 
1020 	vp = UPPERVP(ap->a_vp);
1021 	if (vp == NULLVP)
1022 		panic("union: missing upper layer in write");
1023 
1024 	FIXUP(un);
1025 	error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1026 
1027 	/*
1028 	 * the size of the underlying object may be changed by the
1029 	 * write.
1030 	 */
1031 	if (error == 0) {
1032 		off_t cur = ap->a_uio->uio_offset;
1033 
1034 		if (cur > un->un_uppersz)
1035 			union_newsize(ap->a_vp, cur, VNOVAL);
1036 	}
1037 
1038 	return (error);
1039 }
1040 
1041 int
1042 union_lease(v)
1043 	void *v;
1044 {
1045 	struct vop_lease_args /* {
1046 		struct vnode *a_vp;
1047 		struct lwp *a_l;
1048 		kauth_cred_t a_cred;
1049 		int a_flag;
1050 	} */ *ap = v;
1051 	struct vnode *ovp = OTHERVP(ap->a_vp);
1052 
1053 	ap->a_vp = ovp;
1054 	return (VCALL(ovp, VOFFSET(vop_lease), ap));
1055 }
1056 
1057 int
1058 union_ioctl(v)
1059 	void *v;
1060 {
1061 	struct vop_ioctl_args /* {
1062 		struct vnode *a_vp;
1063 		int  a_command;
1064 		void *a_data;
1065 		int  a_fflag;
1066 		kauth_cred_t a_cred;
1067 		struct lwp *a_l;
1068 	} */ *ap = v;
1069 	struct vnode *ovp = OTHERVP(ap->a_vp);
1070 
1071 	ap->a_vp = ovp;
1072 	return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1073 }
1074 
1075 int
1076 union_poll(v)
1077 	void *v;
1078 {
1079 	struct vop_poll_args /* {
1080 		struct vnode *a_vp;
1081 		int a_events;
1082 		struct lwp *a_l;
1083 	} */ *ap = v;
1084 	struct vnode *ovp = OTHERVP(ap->a_vp);
1085 
1086 	ap->a_vp = ovp;
1087 	return (VCALL(ovp, VOFFSET(vop_poll), ap));
1088 }
1089 
1090 int
1091 union_revoke(v)
1092 	void *v;
1093 {
1094 	struct vop_revoke_args /* {
1095 		struct vnode *a_vp;
1096 		int a_flags;
1097 		struct proc *a_p;
1098 	} */ *ap = v;
1099 	struct vnode *vp = ap->a_vp;
1100 
1101 	if (UPPERVP(vp))
1102 		VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1103 	if (LOWERVP(vp))
1104 		VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1105 	vgone(vp);
1106 	return (0);
1107 }
1108 
1109 int
1110 union_mmap(v)
1111 	void *v;
1112 {
1113 	struct vop_mmap_args /* {
1114 		struct vnode *a_vp;
1115 		vm_prot_t a_prot;
1116 		kauth_cred_t a_cred;
1117 		struct lwp *a_l;
1118 	} */ *ap = v;
1119 	struct vnode *ovp = OTHERVP(ap->a_vp);
1120 
1121 	ap->a_vp = ovp;
1122 	return (VCALL(ovp, VOFFSET(vop_mmap), ap));
1123 }
1124 
1125 int
1126 union_fsync(v)
1127 	void *v;
1128 {
1129 	struct vop_fsync_args /* {
1130 		struct vnode *a_vp;
1131 		kauth_cred_t a_cred;
1132 		int  a_flags;
1133 		off_t offhi;
1134 		off_t offlo;
1135 		struct lwp *a_l;
1136 	} */ *ap = v;
1137 	int error = 0;
1138 	struct lwp *l;
1139 	struct vnode *targetvp;
1140 
1141 	/*
1142 	 * If vinvalbuf is calling us, it's a "shallow fsync" -- don't
1143 	 * bother syncing the underlying vnodes, since (a) they'll be
1144 	 * fsync'ed when reclaimed and (b) we could deadlock if
1145 	 * they're locked; otherwise, pass it through to the
1146 	 * underlying layer.
1147 	 */
1148 	if (ap->a_flags & FSYNC_RECLAIM)
1149 		return 0;
1150 
1151 	targetvp = OTHERVP(ap->a_vp);
1152 	l = ap->a_l;
1153 
1154 	if (targetvp != NULLVP) {
1155 		int dolock = (targetvp == LOWERVP(ap->a_vp));
1156 
1157 		if (dolock)
1158 			vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY);
1159 		else
1160 			FIXUP(VTOUNION(ap->a_vp));
1161 		error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_flags,
1162 			    ap->a_offlo, ap->a_offhi, l);
1163 		if (dolock)
1164 			VOP_UNLOCK(targetvp, 0);
1165 	}
1166 
1167 	return (error);
1168 }
1169 
1170 int
1171 union_seek(v)
1172 	void *v;
1173 {
1174 	struct vop_seek_args /* {
1175 		struct vnode *a_vp;
1176 		off_t  a_oldoff;
1177 		off_t  a_newoff;
1178 		kauth_cred_t a_cred;
1179 	} */ *ap = v;
1180 	struct vnode *ovp = OTHERVP(ap->a_vp);
1181 
1182 	ap->a_vp = ovp;
1183 	return (VCALL(ovp, VOFFSET(vop_seek), ap));
1184 }
1185 
1186 int
1187 union_remove(v)
1188 	void *v;
1189 {
1190 	struct vop_remove_args /* {
1191 		struct vnode *a_dvp;
1192 		struct vnode *a_vp;
1193 		struct componentname *a_cnp;
1194 	} */ *ap = v;
1195 	int error;
1196 	struct union_node *dun = VTOUNION(ap->a_dvp);
1197 	struct union_node *un = VTOUNION(ap->a_vp);
1198 	struct componentname *cnp = ap->a_cnp;
1199 
1200 	if (dun->un_uppervp == NULLVP)
1201 		panic("union remove: null upper vnode");
1202 
1203 	if (un->un_uppervp != NULLVP) {
1204 		struct vnode *dvp = dun->un_uppervp;
1205 		struct vnode *vp = un->un_uppervp;
1206 
1207 		FIXUP(dun);
1208 		VREF(dvp);
1209 		dun->un_flags |= UN_KLOCK;
1210 		vput(ap->a_dvp);
1211 		FIXUP(un);
1212 		VREF(vp);
1213 		un->un_flags |= UN_KLOCK;
1214 		vput(ap->a_vp);
1215 
1216 		if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_lwp))
1217 			cnp->cn_flags |= DOWHITEOUT;
1218 		error = VOP_REMOVE(dvp, vp, cnp);
1219 		if (!error)
1220 			union_removed_upper(un);
1221 	} else {
1222 		FIXUP(dun);
1223 		error = union_mkwhiteout(
1224 			MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1225 			dun->un_uppervp, ap->a_cnp, un->un_path);
1226 		vput(ap->a_dvp);
1227 		vput(ap->a_vp);
1228 	}
1229 
1230 	return (error);
1231 }
1232 
1233 int
1234 union_link(v)
1235 	void *v;
1236 {
1237 	struct vop_link_args /* {
1238 		struct vnode *a_dvp;
1239 		struct vnode *a_vp;
1240 		struct componentname *a_cnp;
1241 	} */ *ap = v;
1242 	int error = 0;
1243 	struct componentname *cnp = ap->a_cnp;
1244 	struct lwp *l = cnp->cn_lwp;
1245 	struct union_node *dun;
1246 	struct vnode *vp;
1247 	struct vnode *dvp;
1248 
1249 	dun = VTOUNION(ap->a_dvp);
1250 
1251 #ifdef DIAGNOSTIC
1252 	if (!(ap->a_cnp->cn_flags & LOCKPARENT)) {
1253 		printf("union_link called without LOCKPARENT set!\n");
1254 		error = EIO; /* need some error code for "caller is a bozo" */
1255 	} else
1256 #endif
1257 
1258 
1259 	if (ap->a_dvp->v_op != ap->a_vp->v_op) {
1260 		vp = ap->a_vp;
1261 	} else {
1262 		struct union_node *un = VTOUNION(ap->a_vp);
1263 		if (un->un_uppervp == NULLVP) {
1264 			/*
1265 			 * Needs to be copied before we can link it.
1266 			 */
1267 			vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
1268 			if (dun->un_uppervp == un->un_dirvp) {
1269 				dun->un_flags &= ~UN_ULOCK;
1270 				VOP_UNLOCK(dun->un_uppervp, 0);
1271 			}
1272 			error = union_copyup(un, 1, cnp->cn_cred, l);
1273 			if (dun->un_uppervp == un->un_dirvp) {
1274 				/*
1275 				 * During copyup, we dropped the lock on the
1276 				 * dir and invalidated any saved namei lookup
1277 				 * state for the directory we'll be entering
1278 				 * the link in.  We need to re-run the lookup
1279 				 * in that directory to reset any state needed
1280 				 * for VOP_LINK.
1281 				 * Call relookup on the union-layer to reset
1282 				 * the state.
1283 				 */
1284 				vp  = NULLVP;
1285 				if (dun->un_uppervp == NULLVP)
1286 					 panic("union: null upperdvp?");
1287 				error = relookup(ap->a_dvp, &vp, ap->a_cnp);
1288 				if (error) {
1289 					VOP_UNLOCK(ap->a_vp, 0);
1290 					return EROFS;	/* ? */
1291 				}
1292 				if (vp != NULLVP) {
1293 					/*
1294 					 * The name we want to create has
1295 					 * mysteriously appeared (a race?)
1296 					 */
1297 					error = EEXIST;
1298 					VOP_UNLOCK(ap->a_vp, 0);
1299 					vput(ap->a_dvp);
1300 					vput(vp);
1301 					return (error);
1302 				}
1303 			}
1304 			VOP_UNLOCK(ap->a_vp, 0);
1305 		}
1306 		vp = un->un_uppervp;
1307 	}
1308 
1309 	dvp = dun->un_uppervp;
1310 	if (dvp == NULLVP)
1311 		error = EROFS;
1312 
1313 	if (error) {
1314 		vput(ap->a_dvp);
1315 		return (error);
1316 	}
1317 
1318 	FIXUP(dun);
1319 	VREF(dvp);
1320 	dun->un_flags |= UN_KLOCK;
1321 	vput(ap->a_dvp);
1322 
1323 	return (VOP_LINK(dvp, vp, cnp));
1324 }
1325 
1326 int
1327 union_rename(v)
1328 	void *v;
1329 {
1330 	struct vop_rename_args  /* {
1331 		struct vnode *a_fdvp;
1332 		struct vnode *a_fvp;
1333 		struct componentname *a_fcnp;
1334 		struct vnode *a_tdvp;
1335 		struct vnode *a_tvp;
1336 		struct componentname *a_tcnp;
1337 	} */ *ap = v;
1338 	int error;
1339 
1340 	struct vnode *fdvp = ap->a_fdvp;
1341 	struct vnode *fvp = ap->a_fvp;
1342 	struct vnode *tdvp = ap->a_tdvp;
1343 	struct vnode *tvp = ap->a_tvp;
1344 
1345 	if (fdvp->v_op == union_vnodeop_p) {	/* always true */
1346 		struct union_node *un = VTOUNION(fdvp);
1347 		if (un->un_uppervp == NULLVP) {
1348 			/*
1349 			 * this should never happen in normal
1350 			 * operation but might if there was
1351 			 * a problem creating the top-level shadow
1352 			 * directory.
1353 			 */
1354 			error = EXDEV;
1355 			goto bad;
1356 		}
1357 
1358 		fdvp = un->un_uppervp;
1359 		VREF(fdvp);
1360 	}
1361 
1362 	if (fvp->v_op == union_vnodeop_p) {	/* always true */
1363 		struct union_node *un = VTOUNION(fvp);
1364 		if (un->un_uppervp == NULLVP) {
1365 			/* XXX: should do a copyup */
1366 			error = EXDEV;
1367 			goto bad;
1368 		}
1369 
1370 		if (un->un_lowervp != NULLVP)
1371 			ap->a_fcnp->cn_flags |= DOWHITEOUT;
1372 
1373 		fvp = un->un_uppervp;
1374 		VREF(fvp);
1375 	}
1376 
1377 	if (tdvp->v_op == union_vnodeop_p) {
1378 		struct union_node *un = VTOUNION(tdvp);
1379 		if (un->un_uppervp == NULLVP) {
1380 			/*
1381 			 * this should never happen in normal
1382 			 * operation but might if there was
1383 			 * a problem creating the top-level shadow
1384 			 * directory.
1385 			 */
1386 			error = EXDEV;
1387 			goto bad;
1388 		}
1389 
1390 		tdvp = un->un_uppervp;
1391 		VREF(tdvp);
1392 		un->un_flags |= UN_KLOCK;
1393 		vput(ap->a_tdvp);
1394 	}
1395 
1396 	if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1397 		struct union_node *un = VTOUNION(tvp);
1398 
1399 		tvp = un->un_uppervp;
1400 		if (tvp != NULLVP) {
1401 			VREF(tvp);
1402 			un->un_flags |= UN_KLOCK;
1403 		}
1404 		vput(ap->a_tvp);
1405 	}
1406 
1407 	error = VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp);
1408 	goto out;
1409 
1410 bad:
1411 	vput(tdvp);
1412 	if (tvp != NULLVP)
1413 		vput(tvp);
1414 	vrele(fdvp);
1415 	vrele(fvp);
1416 
1417 out:
1418 	if (fdvp != ap->a_fdvp) {
1419 		vrele(ap->a_fdvp);
1420 	}
1421 	if (fvp != ap->a_fvp) {
1422 		vrele(ap->a_fvp);
1423 	}
1424 	return (error);
1425 }
1426 
1427 int
1428 union_mkdir(v)
1429 	void *v;
1430 {
1431 	struct vop_mkdir_args /* {
1432 		struct vnode *a_dvp;
1433 		struct vnode **a_vpp;
1434 		struct componentname *a_cnp;
1435 		struct vattr *a_vap;
1436 	} */ *ap = v;
1437 	struct union_node *un = VTOUNION(ap->a_dvp);
1438 	struct vnode *dvp = un->un_uppervp;
1439 	struct componentname *cnp = ap->a_cnp;
1440 
1441 	if (dvp != NULLVP) {
1442 		int error;
1443 		struct vnode *vp;
1444 
1445 		FIXUP(un);
1446 		VREF(dvp);
1447 		un->un_flags |= UN_KLOCK;
1448 		VOP_UNLOCK(ap->a_dvp, 0);
1449 		error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap);
1450 		if (error) {
1451 			vrele(ap->a_dvp);
1452 			return (error);
1453 		}
1454 
1455 		error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp,
1456 				NULLVP, cnp, vp, NULLVP, 1);
1457 		if (error)
1458 			vput(vp);
1459 		vrele(ap->a_dvp);
1460 		return (error);
1461 	}
1462 
1463 	vput(ap->a_dvp);
1464 	return (EROFS);
1465 }
1466 
1467 int
1468 union_rmdir(v)
1469 	void *v;
1470 {
1471 	struct vop_rmdir_args /* {
1472 		struct vnode *a_dvp;
1473 		struct vnode *a_vp;
1474 		struct componentname *a_cnp;
1475 	} */ *ap = v;
1476 	int error;
1477 	struct union_node *dun = VTOUNION(ap->a_dvp);
1478 	struct union_node *un = VTOUNION(ap->a_vp);
1479 	struct componentname *cnp = ap->a_cnp;
1480 
1481 	if (dun->un_uppervp == NULLVP)
1482 		panic("union rmdir: null upper vnode");
1483 
1484 	if (un->un_uppervp != NULLVP) {
1485 		struct vnode *dvp = dun->un_uppervp;
1486 		struct vnode *vp = un->un_uppervp;
1487 
1488 		FIXUP(dun);
1489 		VREF(dvp);
1490 		dun->un_flags |= UN_KLOCK;
1491 		vput(ap->a_dvp);
1492 		FIXUP(un);
1493 		VREF(vp);
1494 		un->un_flags |= UN_KLOCK;
1495 		vput(ap->a_vp);
1496 
1497 		if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_lwp))
1498 			cnp->cn_flags |= DOWHITEOUT;
1499 		error = VOP_RMDIR(dvp, vp, ap->a_cnp);
1500 		if (!error)
1501 			union_removed_upper(un);
1502 	} else {
1503 		FIXUP(dun);
1504 		error = union_mkwhiteout(
1505 			MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount),
1506 			dun->un_uppervp, ap->a_cnp, un->un_path);
1507 		vput(ap->a_dvp);
1508 		vput(ap->a_vp);
1509 	}
1510 
1511 	return (error);
1512 }
1513 
1514 int
1515 union_symlink(v)
1516 	void *v;
1517 {
1518 	struct vop_symlink_args /* {
1519 		struct vnode *a_dvp;
1520 		struct vnode **a_vpp;
1521 		struct componentname *a_cnp;
1522 		struct vattr *a_vap;
1523 		char *a_target;
1524 	} */ *ap = v;
1525 	struct union_node *un = VTOUNION(ap->a_dvp);
1526 	struct vnode *dvp = un->un_uppervp;
1527 	struct componentname *cnp = ap->a_cnp;
1528 
1529 	if (dvp != NULLVP) {
1530 		int error;
1531 
1532 		FIXUP(un);
1533 		VREF(dvp);
1534 		un->un_flags |= UN_KLOCK;
1535 		vput(ap->a_dvp);
1536 		error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1537 				    ap->a_target);
1538 		return (error);
1539 	}
1540 
1541 	vput(ap->a_dvp);
1542 	return (EROFS);
1543 }
1544 
1545 /*
1546  * union_readdir works in concert with getdirentries and
1547  * readdir(3) to provide a list of entries in the unioned
1548  * directories.  getdirentries is responsible for walking
1549  * down the union stack.  readdir(3) is responsible for
1550  * eliminating duplicate names from the returned data stream.
1551  */
1552 int
1553 union_readdir(v)
1554 	void *v;
1555 {
1556 	struct vop_readdir_args /* {
1557 		struct vnodeop_desc *a_desc;
1558 		struct vnode *a_vp;
1559 		struct uio *a_uio;
1560 		kauth_cred_t a_cred;
1561 		int *a_eofflag;
1562 		u_long *a_cookies;
1563 		int a_ncookies;
1564 	} */ *ap = v;
1565 	struct union_node *un = VTOUNION(ap->a_vp);
1566 	struct vnode *uvp = un->un_uppervp;
1567 
1568 	if (uvp == NULLVP)
1569 		return (0);
1570 
1571 	FIXUP(un);
1572 	ap->a_vp = uvp;
1573 	return (VCALL(uvp, VOFFSET(vop_readdir), ap));
1574 }
1575 
1576 int
1577 union_readlink(v)
1578 	void *v;
1579 {
1580 	struct vop_readlink_args /* {
1581 		struct vnode *a_vp;
1582 		struct uio *a_uio;
1583 		kauth_cred_t a_cred;
1584 	} */ *ap = v;
1585 	int error;
1586 	struct vnode *vp = OTHERVP(ap->a_vp);
1587 	int dolock = (vp == LOWERVP(ap->a_vp));
1588 
1589 	if (dolock)
1590 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1591 	else
1592 		FIXUP(VTOUNION(ap->a_vp));
1593 	ap->a_vp = vp;
1594 	error = VCALL(vp, VOFFSET(vop_readlink), ap);
1595 	if (dolock)
1596 		VOP_UNLOCK(vp, 0);
1597 
1598 	return (error);
1599 }
1600 
1601 int
1602 union_abortop(v)
1603 	void *v;
1604 {
1605 	struct vop_abortop_args /* {
1606 		struct vnode *a_dvp;
1607 		struct componentname *a_cnp;
1608 	} */ *ap = v;
1609 	int error;
1610 	struct vnode *vp = OTHERVP(ap->a_dvp);
1611 	struct union_node *un = VTOUNION(ap->a_dvp);
1612 	int islocked = un->un_flags & UN_LOCKED;
1613 	int dolock = (vp == LOWERVP(ap->a_dvp));
1614 
1615 	if (islocked) {
1616 		if (dolock)
1617 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1618 		else
1619 			FIXUP(VTOUNION(ap->a_dvp));
1620 	}
1621 	ap->a_dvp = vp;
1622 	error = VCALL(vp, VOFFSET(vop_abortop), ap);
1623 	if (islocked && dolock)
1624 		VOP_UNLOCK(vp, 0);
1625 
1626 	return (error);
1627 }
1628 
1629 int
1630 union_inactive(v)
1631 	void *v;
1632 {
1633 	struct vop_inactive_args /* {
1634 		const struct vnodeop_desc *a_desc;
1635 		struct vnode *a_vp;
1636 		struct lwp *a_l;
1637 	} */ *ap = v;
1638 	struct vnode *vp = ap->a_vp;
1639 	struct union_node *un = VTOUNION(vp);
1640 	struct vnode **vpp;
1641 
1642 	/*
1643 	 * Do nothing (and _don't_ bypass).
1644 	 * Wait to vrele lowervp until reclaim,
1645 	 * so that until then our union_node is in the
1646 	 * cache and reusable.
1647 	 *
1648 	 * NEEDSWORK: Someday, consider inactive'ing
1649 	 * the lowervp and then trying to reactivate it
1650 	 * with capabilities (v_id)
1651 	 * like they do in the name lookup cache code.
1652 	 * That's too much work for now.
1653 	 */
1654 
1655 	if (un->un_dircache != 0) {
1656 		for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1657 			vrele(*vpp);
1658 		free(un->un_dircache, M_TEMP);
1659 		un->un_dircache = 0;
1660 	}
1661 
1662 	VOP_UNLOCK(vp, 0);
1663 
1664 	if ((un->un_flags & UN_CACHED) == 0)
1665 		vgone(vp);
1666 
1667 	return (0);
1668 }
1669 
1670 int
1671 union_reclaim(v)
1672 	void *v;
1673 {
1674 	struct vop_reclaim_args /* {
1675 		struct vnode *a_vp;
1676 	} */ *ap = v;
1677 
1678 	union_freevp(ap->a_vp);
1679 
1680 	return (0);
1681 }
1682 
1683 int
1684 union_lock(v)
1685 	void *v;
1686 {
1687 	struct vop_lock_args /* {
1688 		struct vnode *a_vp;
1689 		int a_flags;
1690 	} */ *ap = v;
1691 	struct vnode *vp = ap->a_vp;
1692 	int flags = ap->a_flags;
1693 	struct union_node *un;
1694 	int error;
1695 #ifdef DIAGNOSTIC
1696 	int drain = 0;
1697 #endif
1698 
1699 	/* XXX unionfs can't handle shared locks yet */
1700 	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1701 		flags = LK_EXCLUSIVE | (flags & ~LK_TYPE_MASK);
1702 	}
1703 
1704 	genfs_nolock(ap);
1705 	/*
1706 	 * Need to do real lockmgr-style locking here.
1707 	 * in the mean time, draining won't work quite right,
1708 	 * which could lead to a few race conditions.
1709 	 * the following test was here, but is not quite right, we
1710 	 * still need to take the lock:
1711 	if ((flags & LK_TYPE_MASK) == LK_DRAIN)
1712 		return (0);
1713 	 */
1714 	flags &= ~LK_INTERLOCK;
1715 
1716 	un = VTOUNION(vp);
1717 #ifdef DIAGNOSTIC
1718 	if (un->un_flags & (UN_DRAINING|UN_DRAINED)) {
1719 		if (un->un_flags & UN_DRAINED)
1720 			panic("union: %p: warning: locking decommissioned lock", vp);
1721 		if ((flags & LK_TYPE_MASK) != LK_RELEASE)
1722 			panic("union: %p: non-release on draining lock: %d",
1723 			    vp, flags & LK_TYPE_MASK);
1724 		un->un_flags &= ~UN_DRAINING;
1725 		if ((flags & LK_REENABLE) == 0)
1726 			un->un_flags |= UN_DRAINED;
1727 	}
1728 #endif
1729 
1730 	/*
1731 	 * Don't pass DRAIN through to sub-vnode lock; keep track of
1732 	 * DRAIN state at this level, and just get an exclusive lock
1733 	 * on the underlying vnode.
1734 	 */
1735 	if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
1736 #ifdef DIAGNOSTIC
1737 		drain = 1;
1738 #endif
1739 		flags = LK_EXCLUSIVE | (flags & ~LK_TYPE_MASK);
1740 	}
1741 start:
1742 	un = VTOUNION(vp);
1743 
1744 	if (un->un_uppervp != NULLVP) {
1745 		if (((un->un_flags & UN_ULOCK) == 0) &&
1746 		    (vp->v_usecount != 0)) {
1747 			/*
1748 			 * We MUST always use the order of: take upper
1749 			 * vp lock, manipulate union node flags, drop
1750 			 * upper vp lock.  This code must not be an
1751 			 */
1752 			error = vn_lock(un->un_uppervp, flags);
1753 			if (error)
1754 				return (error);
1755 			un->un_flags |= UN_ULOCK;
1756 		}
1757 #ifdef DIAGNOSTIC
1758 		if (un->un_flags & UN_KLOCK) {
1759 			vprint("union: dangling klock", vp);
1760 			panic("union: dangling upper lock (%p)", vp);
1761 		}
1762 #endif
1763 	}
1764 
1765 	/* XXX ignores LK_NOWAIT */
1766 	if (un->un_flags & UN_LOCKED) {
1767 #ifdef DIAGNOSTIC
1768 		if (curproc && un->un_pid == curproc->p_pid &&
1769 			    un->un_pid > -1 && curproc->p_pid > -1)
1770 			panic("union: locking against myself");
1771 #endif
1772 		un->un_flags |= UN_WANTED;
1773 		tsleep(&un->un_flags, PINOD, "unionlk2", 0);
1774 		goto start;
1775 	}
1776 
1777 #ifdef DIAGNOSTIC
1778 	if (curproc)
1779 		un->un_pid = curproc->p_pid;
1780 	else
1781 		un->un_pid = -1;
1782 	if (drain)
1783 		un->un_flags |= UN_DRAINING;
1784 #endif
1785 
1786 	un->un_flags |= UN_LOCKED;
1787 	return (0);
1788 }
1789 
1790 /*
1791  * When operations want to vput() a union node yet retain a lock on
1792  * the upper vnode (say, to do some further operations like link(),
1793  * mkdir(), ...), they set UN_KLOCK on the union node, then call
1794  * vput() which calls VOP_UNLOCK() and comes here.  union_unlock()
1795  * unlocks the union node (leaving the upper vnode alone), clears the
1796  * KLOCK flag, and then returns to vput().  The caller then does whatever
1797  * is left to do with the upper vnode, and ensures that it gets unlocked.
1798  *
1799  * If UN_KLOCK isn't set, then the upper vnode is unlocked here.
1800  */
1801 int
1802 union_unlock(v)
1803 	void *v;
1804 {
1805 	struct vop_unlock_args /* {
1806 		struct vnode *a_vp;
1807 		int a_flags;
1808 	} */ *ap = v;
1809 	struct union_node *un = VTOUNION(ap->a_vp);
1810 
1811 #ifdef DIAGNOSTIC
1812 	if ((un->un_flags & UN_LOCKED) == 0)
1813 		panic("union: unlock unlocked node");
1814 	if (curproc && un->un_pid != curproc->p_pid &&
1815 			curproc->p_pid > -1 && un->un_pid > -1)
1816 		panic("union: unlocking other process's union node");
1817 	if (un->un_flags & UN_DRAINED)
1818 		panic("union: %p: warning: unlocking decommissioned lock", ap->a_vp);
1819 #endif
1820 
1821 	un->un_flags &= ~UN_LOCKED;
1822 
1823 	if ((un->un_flags & (UN_ULOCK|UN_KLOCK)) == UN_ULOCK)
1824 		VOP_UNLOCK(un->un_uppervp, 0);
1825 
1826 	un->un_flags &= ~(UN_ULOCK|UN_KLOCK);
1827 
1828 	if (un->un_flags & UN_WANTED) {
1829 		un->un_flags &= ~UN_WANTED;
1830 		wakeup( &un->un_flags);
1831 	}
1832 
1833 #ifdef DIAGNOSTIC
1834 	un->un_pid = 0;
1835 	if (un->un_flags & UN_DRAINING) {
1836 		un->un_flags |= UN_DRAINED;
1837 		un->un_flags &= ~UN_DRAINING;
1838 	}
1839 #endif
1840 	genfs_nounlock(ap);
1841 
1842 	return (0);
1843 }
1844 
1845 int
1846 union_bmap(v)
1847 	void *v;
1848 {
1849 	struct vop_bmap_args /* {
1850 		struct vnode *a_vp;
1851 		daddr_t  a_bn;
1852 		struct vnode **a_vpp;
1853 		daddr_t *a_bnp;
1854 		int *a_runp;
1855 	} */ *ap = v;
1856 	int error;
1857 	struct vnode *vp = OTHERVP(ap->a_vp);
1858 	int dolock = (vp == LOWERVP(ap->a_vp));
1859 
1860 	if (dolock)
1861 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1862 	else
1863 		FIXUP(VTOUNION(ap->a_vp));
1864 	ap->a_vp = vp;
1865 	error = VCALL(vp, VOFFSET(vop_bmap), ap);
1866 	if (dolock)
1867 		VOP_UNLOCK(vp, 0);
1868 
1869 	return (error);
1870 }
1871 
1872 int
1873 union_print(v)
1874 	void *v;
1875 {
1876 	struct vop_print_args /* {
1877 		struct vnode *a_vp;
1878 	} */ *ap = v;
1879 	struct vnode *vp = ap->a_vp;
1880 
1881 	printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1882 			vp, UPPERVP(vp), LOWERVP(vp));
1883 	if (UPPERVP(vp) != NULLVP)
1884 		vprint("union: upper", UPPERVP(vp));
1885 	if (LOWERVP(vp) != NULLVP)
1886 		vprint("union: lower", LOWERVP(vp));
1887 	if (VTOUNION(vp)->un_dircache) {
1888 		struct vnode **vpp;
1889 		for (vpp = VTOUNION(vp)->un_dircache; *vpp != NULLVP; vpp++)
1890 			vprint("dircache:", *vpp);
1891 	}
1892 
1893 	return (0);
1894 }
1895 
1896 int
1897 union_islocked(v)
1898 	void *v;
1899 {
1900 	struct vop_islocked_args /* {
1901 		struct vnode *a_vp;
1902 	} */ *ap = v;
1903 
1904 	return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0);
1905 }
1906 
1907 int
1908 union_pathconf(v)
1909 	void *v;
1910 {
1911 	struct vop_pathconf_args /* {
1912 		struct vnode *a_vp;
1913 		int a_name;
1914 		int *a_retval;
1915 	} */ *ap = v;
1916 	int error;
1917 	struct vnode *vp = OTHERVP(ap->a_vp);
1918 	int dolock = (vp == LOWERVP(ap->a_vp));
1919 
1920 	if (dolock)
1921 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1922 	else
1923 		FIXUP(VTOUNION(ap->a_vp));
1924 	ap->a_vp = vp;
1925 	error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1926 	if (dolock)
1927 		VOP_UNLOCK(vp, 0);
1928 
1929 	return (error);
1930 }
1931 
1932 int
1933 union_advlock(v)
1934 	void *v;
1935 {
1936 	struct vop_advlock_args /* {
1937 		struct vnode *a_vp;
1938 		void *a_id;
1939 		int  a_op;
1940 		struct flock *a_fl;
1941 		int  a_flags;
1942 	} */ *ap = v;
1943 	struct vnode *ovp = OTHERVP(ap->a_vp);
1944 
1945 	ap->a_vp = ovp;
1946 	return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1947 }
1948 
1949 
1950 /*
1951  * XXX - vop_strategy must be hand coded because it has no
1952  * vnode in its arguments.
1953  * This goes away with a merged VM/buffer cache.
1954  */
1955 int
1956 union_strategy(v)
1957 	void *v;
1958 {
1959 	struct vop_strategy_args /* {
1960 		struct vnode *a_vp;
1961 		struct buf *a_bp;
1962 	} */ *ap = v;
1963 	struct vnode *ovp = OTHERVP(ap->a_vp);
1964 	struct buf *bp = ap->a_bp;
1965 
1966 #ifdef DIAGNOSTIC
1967 	if (ovp == NULLVP)
1968 		panic("union_strategy: nil vp");
1969 	if (((bp->b_flags & B_READ) == 0) &&
1970 	    (ovp == LOWERVP(bp->b_vp)))
1971 		panic("union_strategy: writing to lowervp");
1972 #endif
1973 
1974 	return (VOP_STRATEGY(ovp, bp));
1975 }
1976 
1977 int
1978 union_getpages(v)
1979 	void *v;
1980 {
1981 	struct vop_getpages_args /* {
1982 		struct vnode *a_vp;
1983 		voff_t a_offset;
1984 		struct vm_page **a_m;
1985 		int *a_count;
1986 		int a_centeridx;
1987 		vm_prot_t a_access_type;
1988 		int a_advice;
1989 		int a_flags;
1990 	} */ *ap = v;
1991 	struct vnode *vp = ap->a_vp;
1992 	int error;
1993 
1994 	/*
1995 	 * just pass the request on to the underlying layer.
1996 	 */
1997 
1998 	if (ap->a_flags & PGO_LOCKED) {
1999 		return EBUSY;
2000 	}
2001 	ap->a_vp = OTHERVP(vp);
2002 	simple_unlock(&vp->v_interlock);
2003 	simple_lock(&ap->a_vp->v_interlock);
2004 	error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2005 	return error;
2006 }
2007 
2008 int
2009 union_putpages(v)
2010 	void *v;
2011 {
2012 	struct vop_putpages_args /* {
2013 		struct vnode *a_vp;
2014 		voff_t a_offlo;
2015 		voff_t a_offhi;
2016 		int a_flags;
2017 	} */ *ap = v;
2018 	struct vnode *vp = ap->a_vp;
2019 	int error;
2020 
2021 	/*
2022 	 * just pass the request on to the underlying layer.
2023 	 */
2024 
2025 	ap->a_vp = OTHERVP(vp);
2026 	simple_unlock(&vp->v_interlock);
2027 	if (ap->a_flags & PGO_RECLAIM) {
2028 		return 0;
2029 	}
2030 	simple_lock(&ap->a_vp->v_interlock);
2031 	error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2032 	return error;
2033 }
2034 
2035 int
2036 union_kqfilter(void *v)
2037 {
2038 	struct vop_kqfilter_args /* {
2039 		struct vnode	*a_vp;
2040 		struct knote	*a_kn;
2041 	} */ *ap = v;
2042 	int error;
2043 
2044 	/*
2045 	 * We watch either the upper layer file (if it already exists),
2046 	 * or the lower layer one. If there is lower layer file only
2047 	 * at this moment, we will keep watching that lower layer file
2048 	 * even if upper layer file would be created later on.
2049 	 */
2050 	if (UPPERVP(ap->a_vp))
2051 		error = VOP_KQFILTER(UPPERVP(ap->a_vp), ap->a_kn);
2052 	else if (LOWERVP(ap->a_vp))
2053 		error = VOP_KQFILTER(LOWERVP(ap->a_vp), ap->a_kn);
2054 	else {
2055 		/* panic? */
2056 		error = EOPNOTSUPP;
2057 	}
2058 
2059 	return (error);
2060 }
2061