xref: /netbsd-src/sys/kern/vfs_subr.c (revision 1f2744e6e4915c9da2a3f980279398c4cf7d5e6d)
1 /*	$NetBSD: vfs_subr.c,v 1.38 1995/01/18 06:24:21 mycroft Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
41  */
42 
43 /*
44  * External virtual filesystem routines
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/mount.h>
51 #include <sys/time.h>
52 #include <sys/vnode.h>
53 #include <sys/stat.h>
54 #include <sys/namei.h>
55 #include <sys/ucred.h>
56 #include <sys/buf.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/domain.h>
60 #include <sys/mbuf.h>
61 
62 #include <vm/vm.h>
63 #include <sys/sysctl.h>
64 
65 #include <miscfs/specfs/specdev.h>
66 
67 enum vtype iftovt_tab[16] = {
68 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
69 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
70 };
71 int	vttoif_tab[9] = {
72 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
73 	S_IFSOCK, S_IFIFO, S_IFMT,
74 };
75 
76 int doforce = 1;		/* 1 => permit forcible unmounting */
77 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
78 
79 /*
80  * Insq/Remq for the vnode usage lists.
81  */
82 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
83 #define	bufremvn(bp) {							\
84 	LIST_REMOVE(bp, b_vnbufs);					\
85 	(bp)->b_vnbufs.le_next = NOLIST;				\
86 }
87 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
88 struct mntlist mountlist;			/* mounted filesystem list */
89 
90 /*
91  * Initialize the vnode management data structures.
92  */
93 vntblinit()
94 {
95 
96 	TAILQ_INIT(&vnode_free_list);
97 	CIRCLEQ_INIT(&mountlist);
98 }
99 
100 /*
101  * Lock a filesystem.
102  * Used to prevent access to it while mounting and unmounting.
103  */
104 vfs_lock(mp)
105 	register struct mount *mp;
106 {
107 
108 	while (mp->mnt_flag & MNT_MLOCK) {
109 		mp->mnt_flag |= MNT_MWAIT;
110 		tsleep((caddr_t)mp, PVFS, "vfslock", 0);
111 	}
112 	mp->mnt_flag |= MNT_MLOCK;
113 	return (0);
114 }
115 
116 /*
117  * Unlock a locked filesystem.
118  * Panic if filesystem is not locked.
119  */
120 void
121 vfs_unlock(mp)
122 	register struct mount *mp;
123 {
124 
125 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
126 		panic("vfs_unlock: not locked");
127 	mp->mnt_flag &= ~MNT_MLOCK;
128 	if (mp->mnt_flag & MNT_MWAIT) {
129 		mp->mnt_flag &= ~MNT_MWAIT;
130 		wakeup((caddr_t)mp);
131 	}
132 }
133 
134 /*
135  * Mark a mount point as busy.
136  * Used to synchronize access and to delay unmounting.
137  */
138 vfs_busy(mp)
139 	register struct mount *mp;
140 {
141 
142 	while(mp->mnt_flag & MNT_MPBUSY) {
143 		mp->mnt_flag |= MNT_MPWANT;
144 		tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbusy", 0);
145 	}
146 	if (mp->mnt_flag & MNT_UNMOUNT)
147 		return (1);
148 	mp->mnt_flag |= MNT_MPBUSY;
149 	return (0);
150 }
151 
152 /*
153  * Free a busy filesystem.
154  * Panic if filesystem is not busy.
155  */
156 vfs_unbusy(mp)
157 	register struct mount *mp;
158 {
159 
160 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
161 		panic("vfs_unbusy: not busy");
162 	mp->mnt_flag &= ~MNT_MPBUSY;
163 	if (mp->mnt_flag & MNT_MPWANT) {
164 		mp->mnt_flag &= ~MNT_MPWANT;
165 		wakeup((caddr_t)&mp->mnt_flag);
166 	}
167 }
168 
169 /*
170  * Lookup a mount point by filesystem identifier.
171  */
172 struct mount *
173 getvfs(fsid)
174 	fsid_t *fsid;
175 {
176 	register struct mount *mp;
177 
178 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
179 	     mp = mp->mnt_list.cqe_next)
180 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
181 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
182 			return (mp);
183 	return ((struct mount *)0);
184 }
185 
186 /*
187  * Get a new unique fsid
188  */
189 void
190 getnewfsid(mp, mtype)
191 	struct mount *mp;
192 	int mtype;
193 {
194 	static u_short xxxfs_mntid;
195 
196 	fsid_t tfsid;
197 
198 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + 11, 0);	/* XXX */
199 	mp->mnt_stat.f_fsid.val[1] = mtype;
200 	if (xxxfs_mntid == 0)
201 		++xxxfs_mntid;
202 	tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid);
203 	tfsid.val[1] = mtype;
204 	if (mountlist.cqh_first != (void *)&mountlist) {
205 		while (getvfs(&tfsid)) {
206 			tfsid.val[0]++;
207 			xxxfs_mntid++;
208 		}
209 	}
210 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
211 }
212 
213 /*
214  * Make a 'unique' number from a mount type name.
215  */
216 long
217 makefstype(type)
218 	char *type;
219 {
220 	long rv;
221 
222 	for (rv = 0; *type; type++) {
223 		rv <<= 2;
224 		rv ^= *type;
225 	}
226 	return rv;
227 }
228 
229 /*
230  * Set vnode attributes to VNOVAL
231  */
232 void
233 vattr_null(vap)
234 	register struct vattr *vap;
235 {
236 
237 	vap->va_type = VNON;
238 	/* XXX These next two used to be one line, but for a GCC bug. */
239 	vap->va_size = VNOVAL;
240 	vap->va_bytes = VNOVAL;
241 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
242 		vap->va_fsid = vap->va_fileid =
243 		vap->va_blocksize = vap->va_rdev =
244 		vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
245 		vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
246 		vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
247 		vap->va_flags = vap->va_gen = VNOVAL;
248 	vap->va_vaflags = 0;
249 }
250 
251 /*
252  * Routines having to do with the management of the vnode table.
253  */
254 extern int (**dead_vnodeop_p)();
255 extern void vclean();
256 long numvnodes;
257 
258 /*
259  * Return the next vnode from the free list.
260  */
261 getnewvnode(tag, mp, vops, vpp)
262 	enum vtagtype tag;
263 	struct mount *mp;
264 	int (**vops)();
265 	struct vnode **vpp;
266 {
267 	register struct vnode *vp;
268 	int s;
269 
270 	if ((vnode_free_list.tqh_first == NULL &&
271 	     numvnodes < 2 * desiredvnodes) ||
272 	    numvnodes < desiredvnodes) {
273 		vp = (struct vnode *)malloc((u_long)sizeof *vp,
274 		    M_VNODE, M_WAITOK);
275 		bzero((char *)vp, sizeof *vp);
276 		numvnodes++;
277 	} else {
278 		if ((vp = vnode_free_list.tqh_first) == NULL) {
279 			tablefull("vnode");
280 			*vpp = 0;
281 			return (ENFILE);
282 		}
283 		if (vp->v_usecount)
284 			panic("free vnode isn't");
285 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
286 		/* see comment on why 0xdeadb is set at end of vgone (below) */
287 		vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
288 		vp->v_lease = NULL;
289 		if (vp->v_type != VBAD)
290 			vgone(vp);
291 #ifdef DIAGNOSTIC
292 		if (vp->v_data)
293 			panic("cleaned vnode isn't");
294 		s = splbio();
295 		if (vp->v_numoutput)
296 			panic("Clean vnode has pending I/O's");
297 		splx(s);
298 #endif
299 		vp->v_flag = 0;
300 		vp->v_lastr = 0;
301 		vp->v_ralen = 0;
302 		vp->v_maxra = 0;
303 		vp->v_lastw = 0;
304 		vp->v_lasta = 0;
305 		vp->v_cstart = 0;
306 		vp->v_clen = 0;
307 		vp->v_socket = 0;
308 	}
309 	vp->v_type = VNON;
310 	cache_purge(vp);
311 	vp->v_tag = tag;
312 	vp->v_op = vops;
313 	insmntque(vp, mp);
314 	*vpp = vp;
315 	vp->v_usecount = 1;
316 	vp->v_data = 0;
317 	return (0);
318 }
319 
320 /*
321  * Move a vnode from one mount queue to another.
322  */
323 insmntque(vp, mp)
324 	register struct vnode *vp;
325 	register struct mount *mp;
326 {
327 
328 	/*
329 	 * Delete from old mount point vnode list, if on one.
330 	 */
331 	if (vp->v_mount != NULL)
332 		LIST_REMOVE(vp, v_mntvnodes);
333 	/*
334 	 * Insert into list of vnodes for the new mount point, if available.
335 	 */
336 	if ((vp->v_mount = mp) == NULL)
337 		return;
338 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
339 }
340 
341 /*
342  * Update outstanding I/O count and do wakeup if requested.
343  */
344 vwakeup(bp)
345 	register struct buf *bp;
346 {
347 	register struct vnode *vp;
348 
349 	bp->b_flags &= ~B_WRITEINPROG;
350 	if (vp = bp->b_vp) {
351 		if (--vp->v_numoutput < 0)
352 			panic("vwakeup: neg numoutput");
353 		if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
354 			vp->v_flag &= ~VBWAIT;
355 			wakeup((caddr_t)&vp->v_numoutput);
356 		}
357 	}
358 }
359 
360 /*
361  * Flush out and invalidate all buffers associated with a vnode.
362  * Called with the underlying object locked.
363  */
364 int
365 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
366 	register struct vnode *vp;
367 	int flags;
368 	struct ucred *cred;
369 	struct proc *p;
370 	int slpflag, slptimeo;
371 {
372 	register struct buf *bp;
373 	struct buf *nbp, *blist;
374 	int s, error;
375 
376 	if (flags & V_SAVE) {
377 		if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p))
378 			return (error);
379 		if (vp->v_dirtyblkhd.lh_first != NULL)
380 			panic("vinvalbuf: dirty bufs");
381 	}
382 	for (;;) {
383 		if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA)
384 			while (blist && blist->b_lblkno < 0)
385 				blist = blist->b_vnbufs.le_next;
386 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
387 		    (flags & V_SAVEMETA))
388 			while (blist && blist->b_lblkno < 0)
389 				blist = blist->b_vnbufs.le_next;
390 		if (!blist)
391 			break;
392 
393 		for (bp = blist; bp; bp = nbp) {
394 			nbp = bp->b_vnbufs.le_next;
395 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
396 				continue;
397 			s = splbio();
398 			if (bp->b_flags & B_BUSY) {
399 				bp->b_flags |= B_WANTED;
400 				error = tsleep((caddr_t)bp,
401 					slpflag | (PRIBIO + 1), "vinvalbuf",
402 					slptimeo);
403 				splx(s);
404 				if (error)
405 					return (error);
406 				break;
407 			}
408 			bremfree(bp);
409 			bp->b_flags |= B_BUSY;
410 			splx(s);
411 			/*
412 			 * XXX Since there are no node locks for NFS, I believe
413 			 * there is a slight chance that a delayed write will
414 			 * occur while sleeping just above, so check for it.
415 			 */
416 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
417 				(void) VOP_BWRITE(bp);
418 				break;
419 			}
420 			bp->b_flags |= B_INVAL;
421 			brelse(bp);
422 		}
423 	}
424 	if (!(flags & V_SAVEMETA) &&
425 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
426 		panic("vinvalbuf: flush failed");
427 	return (0);
428 }
429 
430 void
431 vflushbuf(vp, sync)
432 	register struct vnode *vp;
433 	int sync;
434 {
435 	register struct buf *bp, *nbp;
436 	int s;
437 
438 loop:
439 	s = splbio();
440 	for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
441 		nbp = bp->b_vnbufs.le_next;
442 		if ((bp->b_flags & B_BUSY))
443 			continue;
444 		if ((bp->b_flags & B_DELWRI) == 0)
445 			panic("vflushbuf: not dirty");
446 		bremfree(bp);
447 		bp->b_flags |= B_BUSY;
448 		splx(s);
449 		/*
450 		 * Wait for I/O associated with indirect blocks to complete,
451 		 * since there is no way to quickly wait for them below.
452 		 */
453 		if (bp->b_vp == vp || sync == 0)
454 			(void) bawrite(bp);
455 		else
456 			(void) bwrite(bp);
457 		goto loop;
458 	}
459 	if (sync == 0) {
460 		splx(s);
461 		return;
462 	}
463 	while (vp->v_numoutput) {
464 		vp->v_flag |= VBWAIT;
465 		tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0);
466 	}
467 	splx(s);
468 	if (vp->v_dirtyblkhd.lh_first != NULL) {
469 		vprint("vflushbuf: dirty", vp);
470 		goto loop;
471 	}
472 }
473 
474 /*
475  * Associate a buffer with a vnode.
476  */
477 bgetvp(vp, bp)
478 	register struct vnode *vp;
479 	register struct buf *bp;
480 {
481 
482 	if (bp->b_vp)
483 		panic("bgetvp: not free");
484 	VHOLD(vp);
485 	bp->b_vp = vp;
486 	if (vp->v_type == VBLK || vp->v_type == VCHR)
487 		bp->b_dev = vp->v_rdev;
488 	else
489 		bp->b_dev = NODEV;
490 	/*
491 	 * Insert onto list for new vnode.
492 	 */
493 	bufinsvn(bp, &vp->v_cleanblkhd);
494 }
495 
496 /*
497  * Disassociate a buffer from a vnode.
498  */
499 brelvp(bp)
500 	register struct buf *bp;
501 {
502 	struct vnode *vp;
503 
504 	if (bp->b_vp == (struct vnode *) 0)
505 		panic("brelvp: NULL");
506 	/*
507 	 * Delete from old vnode list, if on one.
508 	 */
509 	if (bp->b_vnbufs.le_next != NOLIST)
510 		bufremvn(bp);
511 	vp = bp->b_vp;
512 	bp->b_vp = (struct vnode *) 0;
513 	HOLDRELE(vp);
514 }
515 
516 /*
517  * Reassign a buffer from one vnode to another.
518  * Used to assign file specific control information
519  * (indirect blocks) to the vnode to which they belong.
520  */
521 reassignbuf(bp, newvp)
522 	register struct buf *bp;
523 	register struct vnode *newvp;
524 {
525 	register struct buflists *listheadp;
526 
527 	if (newvp == NULL) {
528 		printf("reassignbuf: NULL");
529 		return;
530 	}
531 	/*
532 	 * Delete from old vnode list, if on one.
533 	 */
534 	if (bp->b_vnbufs.le_next != NOLIST)
535 		bufremvn(bp);
536 	/*
537 	 * If dirty, put on list of dirty buffers;
538 	 * otherwise insert onto list of clean buffers.
539 	 */
540 	if (bp->b_flags & B_DELWRI)
541 		listheadp = &newvp->v_dirtyblkhd;
542 	else
543 		listheadp = &newvp->v_cleanblkhd;
544 	bufinsvn(bp, listheadp);
545 }
546 
547 /*
548  * Create a vnode for a block device.
549  * Used for root filesystem, argdev, and swap areas.
550  * Also used for memory file system special devices.
551  */
552 bdevvp(dev, vpp)
553 	dev_t dev;
554 	struct vnode **vpp;
555 {
556 
557 	return (getdevvp(dev, vpp, VBLK));
558 }
559 
560 /*
561  * Create a vnode for a character device.
562  * Used for kernfs and some console handling.
563  */
564 cdevvp(dev, vpp)
565 	dev_t dev;
566 	struct vnode **vpp;
567 {
568 
569 	return (getdevvp(dev, vpp, VCHR));
570 }
571 
572 /*
573  * Create a vnode for a device.
574  * Used by bdevvp (block device) for root file system etc.,
575  * and by cdevvp (character device) for console and kernfs.
576  */
577 getdevvp(dev, vpp, type)
578 	dev_t dev;
579 	struct vnode **vpp;
580 	enum vtype type;
581 {
582 	register struct vnode *vp;
583 	struct vnode *nvp;
584 	int error;
585 
586 	if (dev == NODEV)
587 		return (0);
588 	error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
589 	if (error) {
590 		*vpp = NULLVP;
591 		return (error);
592 	}
593 	vp = nvp;
594 	vp->v_type = type;
595 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
596 		vput(vp);
597 		vp = nvp;
598 	}
599 	*vpp = vp;
600 	return (0);
601 }
602 
603 /*
604  * Check to see if the new vnode represents a special device
605  * for which we already have a vnode (either because of
606  * bdevvp() or because of a different vnode representing
607  * the same block device). If such an alias exists, deallocate
608  * the existing contents and return the aliased vnode. The
609  * caller is responsible for filling it with its new contents.
610  */
611 struct vnode *
612 checkalias(nvp, nvp_rdev, mp)
613 	register struct vnode *nvp;
614 	dev_t nvp_rdev;
615 	struct mount *mp;
616 {
617 	register struct vnode *vp;
618 	struct vnode **vpp;
619 
620 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
621 		return (NULLVP);
622 
623 	vpp = &speclisth[SPECHASH(nvp_rdev)];
624 loop:
625 	for (vp = *vpp; vp; vp = vp->v_specnext) {
626 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
627 			continue;
628 		/*
629 		 * Alias, but not in use, so flush it out.
630 		 */
631 		if (vp->v_usecount == 0) {
632 			vgone(vp);
633 			goto loop;
634 		}
635 		if (vget(vp, 1))
636 			goto loop;
637 		break;
638 	}
639 	if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
640 		MALLOC(nvp->v_specinfo, struct specinfo *,
641 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
642 		nvp->v_rdev = nvp_rdev;
643 		nvp->v_hashchain = vpp;
644 		nvp->v_specnext = *vpp;
645 		nvp->v_specflags = 0;
646 		*vpp = nvp;
647 		if (vp != NULL) {
648 			nvp->v_flag |= VALIASED;
649 			vp->v_flag |= VALIASED;
650 			vput(vp);
651 		}
652 		return (NULLVP);
653 	}
654 	VOP_UNLOCK(vp);
655 	vclean(vp, 0);
656 	vp->v_op = nvp->v_op;
657 	vp->v_tag = nvp->v_tag;
658 	nvp->v_type = VNON;
659 	insmntque(vp, mp);
660 	return (vp);
661 }
662 
663 /*
664  * Grab a particular vnode from the free list, increment its
665  * reference count and lock it. The vnode lock bit is set the
666  * vnode is being eliminated in vgone. The process is awakened
667  * when the transition is completed, and an error returned to
668  * indicate that the vnode is no longer usable (possibly having
669  * been changed to a new file system type).
670  */
671 int
672 vget(vp, lockflag)
673 	register struct vnode *vp;
674 	int lockflag;
675 {
676 
677 	/*
678 	 * If the vnode is in the process of being cleaned out for
679 	 * another use, we wait for the cleaning to finish and then
680 	 * return failure. Cleaning is determined either by checking
681 	 * that the VXLOCK flag is set, or that the use count is
682 	 * zero with the back pointer set to show that it has been
683 	 * removed from the free list by getnewvnode. The VXLOCK
684 	 * flag may not have been set yet because vclean is blocked in
685 	 * the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
686 	 */
687 	if ((vp->v_flag & VXLOCK) ||
688 	    (vp->v_usecount == 0 &&
689 	     vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)) {
690 		vp->v_flag |= VXWANT;
691 		tsleep((caddr_t)vp, PINOD, "vget", 0);
692 		return (1);
693 	}
694 	if (vp->v_usecount == 0)
695 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
696 	vp->v_usecount++;
697 	if (lockflag)
698 		VOP_LOCK(vp);
699 	return (0);
700 }
701 
702 /*
703  * Vnode reference, just increment the count
704  */
705 void
706 vref(vp)
707 	struct vnode *vp;
708 {
709 
710 	if (vp->v_usecount <= 0)
711 		panic("vref used where vget required");
712 	vp->v_usecount++;
713 }
714 
715 /*
716  * vput(), just unlock and vrele()
717  */
718 void
719 vput(vp)
720 	register struct vnode *vp;
721 {
722 
723 	VOP_UNLOCK(vp);
724 	vrele(vp);
725 }
726 
727 /*
728  * Vnode release.
729  * If count drops to zero, call inactive routine and return to freelist.
730  */
731 void
732 vrele(vp)
733 	register struct vnode *vp;
734 {
735 
736 #ifdef DIAGNOSTIC
737 	if (vp == NULL)
738 		panic("vrele: null vp");
739 #endif
740 	vp->v_usecount--;
741 	if (vp->v_usecount > 0)
742 		return;
743 #ifdef DIAGNOSTIC
744 	if (vp->v_usecount != 0 || vp->v_writecount != 0) {
745 		vprint("vrele: bad ref count", vp);
746 		panic("vrele: ref cnt");
747 	}
748 #endif
749 	/*
750 	 * insert at tail of LRU list
751 	 */
752 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
753 	VOP_INACTIVE(vp);
754 }
755 
756 /*
757  * Page or buffer structure gets a reference.
758  */
759 void
760 vhold(vp)
761 	register struct vnode *vp;
762 {
763 
764 	vp->v_holdcnt++;
765 }
766 
767 /*
768  * Page or buffer structure frees a reference.
769  */
770 void
771 holdrele(vp)
772 	register struct vnode *vp;
773 {
774 
775 	if (vp->v_holdcnt <= 0)
776 		panic("holdrele: holdcnt");
777 	vp->v_holdcnt--;
778 }
779 
780 /*
781  * Remove any vnodes in the vnode table belonging to mount point mp.
782  *
783  * If MNT_NOFORCE is specified, there should not be any active ones,
784  * return error if any are found (nb: this is a user error, not a
785  * system error). If MNT_FORCE is specified, detach any active vnodes
786  * that are found.
787  */
788 #ifdef DEBUG
789 int busyprt = 0;	/* print out busy vnodes */
790 struct ctldebug debug1 = { "busyprt", &busyprt };
791 #endif
792 
793 vflush(mp, skipvp, flags)
794 	struct mount *mp;
795 	struct vnode *skipvp;
796 	int flags;
797 {
798 	register struct vnode *vp, *nvp;
799 	int busy = 0;
800 
801 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
802 		panic("vflush: not busy");
803 loop:
804 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
805 		if (vp->v_mount != mp)
806 			goto loop;
807 		nvp = vp->v_mntvnodes.le_next;
808 		/*
809 		 * Skip over a selected vnode.
810 		 */
811 		if (vp == skipvp)
812 			continue;
813 		/*
814 		 * Skip over a vnodes marked VSYSTEM.
815 		 */
816 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
817 			continue;
818 		/*
819 		 * If WRITECLOSE is set, only flush out regular file
820 		 * vnodes open for writing.
821 		 */
822 		if ((flags & WRITECLOSE) &&
823 		    (vp->v_writecount == 0 || vp->v_type != VREG))
824 			continue;
825 		/*
826 		 * With v_usecount == 0, all we need to do is clear
827 		 * out the vnode data structures and we are done.
828 		 */
829 		if (vp->v_usecount == 0) {
830 			vgone(vp);
831 			continue;
832 		}
833 		/*
834 		 * If FORCECLOSE is set, forcibly close the vnode.
835 		 * For block or character devices, revert to an
836 		 * anonymous device. For all other files, just kill them.
837 		 */
838 		if (flags & FORCECLOSE) {
839 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
840 				vgone(vp);
841 			} else {
842 				vclean(vp, 0);
843 				vp->v_op = spec_vnodeop_p;
844 				insmntque(vp, (struct mount *)0);
845 			}
846 			continue;
847 		}
848 #ifdef DEBUG
849 		if (busyprt)
850 			vprint("vflush: busy vnode", vp);
851 #endif
852 		busy++;
853 	}
854 	if (busy)
855 		return (EBUSY);
856 	return (0);
857 }
858 
859 /*
860  * Disassociate the underlying file system from a vnode.
861  */
862 void
863 vclean(vp, flags)
864 	register struct vnode *vp;
865 	int flags;
866 {
867 	int active;
868 
869 	/*
870 	 * Check to see if the vnode is in use.
871 	 * If so we have to reference it before we clean it out
872 	 * so that its count cannot fall to zero and generate a
873 	 * race against ourselves to recycle it.
874 	 */
875 	if (active = vp->v_usecount)
876 		VREF(vp);
877 	/*
878 	 * Even if the count is zero, the VOP_INACTIVE routine may still
879 	 * have the object locked while it cleans it out. The VOP_LOCK
880 	 * ensures that the VOP_INACTIVE routine is done with its work.
881 	 * For active vnodes, it ensures that no other activity can
882 	 * occur while the underlying object is being cleaned out.
883 	 */
884 	VOP_LOCK(vp);
885 	/*
886 	 * Prevent the vnode from being recycled or
887 	 * brought into use while we clean it out.
888 	 */
889 	if (vp->v_flag & VXLOCK)
890 		panic("vclean: deadlock");
891 	vp->v_flag |= VXLOCK;
892 	/*
893 	 * Clean out any buffers associated with the vnode.
894 	 */
895 	if (flags & DOCLOSE)
896 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
897 	/*
898 	 * Any other processes trying to obtain this lock must first
899 	 * wait for VXLOCK to clear, then call the new lock operation.
900 	 */
901 	VOP_UNLOCK(vp);
902 	/*
903 	 * If purging an active vnode, it must be closed and
904 	 * deactivated before being reclaimed.
905 	 */
906 	if (active) {
907 		if (flags & DOCLOSE)
908 			VOP_CLOSE(vp, IO_NDELAY, NOCRED, NULL);
909 		VOP_INACTIVE(vp);
910 	}
911 	/*
912 	 * Reclaim the vnode.
913 	 */
914 	if (VOP_RECLAIM(vp))
915 		panic("vclean: cannot reclaim");
916 	if (active)
917 		vrele(vp);
918 
919 	/*
920 	 * Done with purge, notify sleepers of the grim news.
921 	 */
922 	vp->v_op = dead_vnodeop_p;
923 	vp->v_tag = VT_NON;
924 	vp->v_flag &= ~VXLOCK;
925 	if (vp->v_flag & VXWANT) {
926 		vp->v_flag &= ~VXWANT;
927 		wakeup((caddr_t)vp);
928 	}
929 }
930 
931 /*
932  * Eliminate all activity associated with  the requested vnode
933  * and with all vnodes aliased to the requested vnode.
934  */
935 void
936 vgoneall(vp)
937 	register struct vnode *vp;
938 {
939 	register struct vnode *vq;
940 
941 	if (vp->v_flag & VALIASED) {
942 		/*
943 		 * If a vgone (or vclean) is already in progress,
944 		 * wait until it is done and return.
945 		 */
946 		if (vp->v_flag & VXLOCK) {
947 			vp->v_flag |= VXWANT;
948 			tsleep((caddr_t)vp, PINOD, "vgoneall", 0);
949 			return;
950 		}
951 		/*
952 		 * Ensure that vp will not be vgone'd while we
953 		 * are eliminating its aliases.
954 		 */
955 		vp->v_flag |= VXLOCK;
956 		while (vp->v_flag & VALIASED) {
957 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
958 				if (vq->v_rdev != vp->v_rdev ||
959 				    vq->v_type != vp->v_type || vp == vq)
960 					continue;
961 				vgone(vq);
962 				break;
963 			}
964 		}
965 		/*
966 		 * Remove the lock so that vgone below will
967 		 * really eliminate the vnode after which time
968 		 * vgone will awaken any sleepers.
969 		 */
970 		vp->v_flag &= ~VXLOCK;
971 	}
972 	vgone(vp);
973 }
974 
975 /*
976  * Eliminate all activity associated with a vnode
977  * in preparation for reuse.
978  */
979 void
980 vgone(vp)
981 	register struct vnode *vp;
982 {
983 	register struct vnode *vq;
984 	struct vnode *vx;
985 
986 	/*
987 	 * If a vgone (or vclean) is already in progress,
988 	 * wait until it is done and return.
989 	 */
990 	if (vp->v_flag & VXLOCK) {
991 		vp->v_flag |= VXWANT;
992 		tsleep((caddr_t)vp, PINOD, "vgone", 0);
993 		return;
994 	}
995 	/*
996 	 * Clean out the filesystem specific data.
997 	 */
998 	vclean(vp, DOCLOSE);
999 	/*
1000 	 * Delete from old mount point vnode list, if on one.
1001 	 */
1002 	if (vp->v_mount != NULL) {
1003 		LIST_REMOVE(vp, v_mntvnodes);
1004 		vp->v_mount = NULL;
1005 	}
1006 	/*
1007 	 * If special device, remove it from special device alias list.
1008 	 */
1009 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1010 		if (*vp->v_hashchain == vp) {
1011 			*vp->v_hashchain = vp->v_specnext;
1012 		} else {
1013 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1014 				if (vq->v_specnext != vp)
1015 					continue;
1016 				vq->v_specnext = vp->v_specnext;
1017 				break;
1018 			}
1019 			if (vq == NULL)
1020 				panic("missing bdev");
1021 		}
1022 		if (vp->v_flag & VALIASED) {
1023 			vx = NULL;
1024 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1025 				if (vq->v_rdev != vp->v_rdev ||
1026 				    vq->v_type != vp->v_type)
1027 					continue;
1028 				if (vx)
1029 					break;
1030 				vx = vq;
1031 			}
1032 			if (vx == NULL)
1033 				panic("missing alias");
1034 			if (vq == NULL)
1035 				vx->v_flag &= ~VALIASED;
1036 			vp->v_flag &= ~VALIASED;
1037 		}
1038 		FREE(vp->v_specinfo, M_VNODE);
1039 		vp->v_specinfo = NULL;
1040 	}
1041 	/*
1042 	 * If it is on the freelist and not already at the head,
1043 	 * move it to the head of the list. The test of the back
1044 	 * pointer and the reference count of zero is because
1045 	 * it will be removed from the free list by getnewvnode,
1046 	 * but will not have its reference count incremented until
1047 	 * after calling vgone. If the reference count were
1048 	 * incremented first, vgone would (incorrectly) try to
1049 	 * close the previous instance of the underlying object.
1050 	 * So, the back pointer is explicitly set to `0xdeadb' in
1051 	 * getnewvnode after removing it from the freelist to ensure
1052 	 * that we do not try to move it here.
1053 	 */
1054 	if (vp->v_usecount == 0 &&
1055 	    vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
1056 	    vnode_free_list.tqh_first != vp) {
1057 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1058 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1059 	}
1060 	vp->v_type = VBAD;
1061 }
1062 
1063 /*
1064  * Lookup a vnode by device number.
1065  */
1066 vfinddev(dev, type, vpp)
1067 	dev_t dev;
1068 	enum vtype type;
1069 	struct vnode **vpp;
1070 {
1071 	register struct vnode *vp;
1072 
1073 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1074 		if (dev != vp->v_rdev || type != vp->v_type)
1075 			continue;
1076 		*vpp = vp;
1077 		return (1);
1078 	}
1079 	return (0);
1080 }
1081 
1082 /*
1083  * Calculate the total number of references to a special device.
1084  */
1085 int
1086 vcount(vp)
1087 	register struct vnode *vp;
1088 {
1089 	register struct vnode *vq, *vnext;
1090 	int count;
1091 
1092 loop:
1093 	if ((vp->v_flag & VALIASED) == 0)
1094 		return (vp->v_usecount);
1095 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1096 		vnext = vq->v_specnext;
1097 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1098 			continue;
1099 		/*
1100 		 * Alias, but not in use, so flush it out.
1101 		 */
1102 		if (vq->v_usecount == 0 && vq != vp) {
1103 			vgone(vq);
1104 			goto loop;
1105 		}
1106 		count += vq->v_usecount;
1107 	}
1108 	return (count);
1109 }
1110 
1111 /*
1112  * Print out a description of a vnode.
1113  */
1114 static char *typename[] =
1115    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1116 
1117 void
1118 vprint(label, vp)
1119 	char *label;
1120 	register struct vnode *vp;
1121 {
1122 	char buf[64];
1123 
1124 	if (label != NULL)
1125 		printf("%s: ", label);
1126 	printf("type %s, usecount %d, writecount %d, refcount %d,",
1127 		typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1128 		vp->v_holdcnt);
1129 	buf[0] = '\0';
1130 	if (vp->v_flag & VROOT)
1131 		strcat(buf, "|VROOT");
1132 	if (vp->v_flag & VTEXT)
1133 		strcat(buf, "|VTEXT");
1134 	if (vp->v_flag & VSYSTEM)
1135 		strcat(buf, "|VSYSTEM");
1136 	if (vp->v_flag & VXLOCK)
1137 		strcat(buf, "|VXLOCK");
1138 	if (vp->v_flag & VXWANT)
1139 		strcat(buf, "|VXWANT");
1140 	if (vp->v_flag & VBWAIT)
1141 		strcat(buf, "|VBWAIT");
1142 	if (vp->v_flag & VALIASED)
1143 		strcat(buf, "|VALIASED");
1144 	if (buf[0] != '\0')
1145 		printf(" flags (%s)", &buf[1]);
1146 	if (vp->v_data == NULL) {
1147 		printf("\n");
1148 	} else {
1149 		printf("\n\t");
1150 		VOP_PRINT(vp);
1151 	}
1152 }
1153 
1154 #ifdef DEBUG
1155 /*
1156  * List all of the locked vnodes in the system.
1157  * Called when debugging the kernel.
1158  */
1159 printlockedvnodes()
1160 {
1161 	register struct mount *mp;
1162 	register struct vnode *vp;
1163 
1164 	printf("Locked vnodes\n");
1165 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
1166 	     mp = mp->mnt_list.cqe_next) {
1167 		for (vp = mp->mnt_vnodelist.lh_first;
1168 		     vp != NULL;
1169 		     vp = vp->v_mntvnodes.le_next)
1170 			if (VOP_ISLOCKED(vp))
1171 				vprint((char *)0, vp);
1172 	}
1173 }
1174 #endif
1175 
1176 int kinfo_vdebug = 1;
1177 int kinfo_vgetfailed;
1178 #define KINFO_VNODESLOP	10
1179 /*
1180  * Dump vnode list (via sysctl).
1181  * Copyout address of vnode followed by vnode.
1182  */
1183 /* ARGSUSED */
1184 sysctl_vnode(where, sizep)
1185 	char *where;
1186 	size_t *sizep;
1187 {
1188 	register struct mount *mp, *nmp;
1189 	struct vnode *vp;
1190 	register char *bp = where, *savebp;
1191 	char *ewhere;
1192 	int error;
1193 
1194 #define VPTRSZ	sizeof (struct vnode *)
1195 #define VNODESZ	sizeof (struct vnode)
1196 	if (where == NULL) {
1197 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
1198 		return (0);
1199 	}
1200 	ewhere = where + *sizep;
1201 
1202 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1203 		nmp = mp->mnt_list.cqe_next;
1204 		if (vfs_busy(mp))
1205 			continue;
1206 		savebp = bp;
1207 again:
1208 		for (vp = mp->mnt_vnodelist.lh_first;
1209 		     vp != NULL;
1210 		     vp = vp->v_mntvnodes.le_next) {
1211 			/*
1212 			 * Check that the vp is still associated with
1213 			 * this filesystem.  RACE: could have been
1214 			 * recycled onto the same filesystem.
1215 			 */
1216 			if (vp->v_mount != mp) {
1217 				if (kinfo_vdebug)
1218 					printf("kinfo: vp changed\n");
1219 				bp = savebp;
1220 				goto again;
1221 			}
1222 			if (bp + VPTRSZ + VNODESZ > ewhere) {
1223 				*sizep = bp - where;
1224 				return (ENOMEM);
1225 			}
1226 			if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
1227 			   (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
1228 				return (error);
1229 			bp += VPTRSZ + VNODESZ;
1230 		}
1231 		vfs_unbusy(mp);
1232 	}
1233 
1234 	*sizep = bp - where;
1235 	return (0);
1236 }
1237 
1238 /*
1239  * Check to see if a filesystem is mounted on a block device.
1240  */
1241 int
1242 vfs_mountedon(vp)
1243 	register struct vnode *vp;
1244 {
1245 	register struct vnode *vq;
1246 
1247 	if (vp->v_specflags & SI_MOUNTEDON)
1248 		return (EBUSY);
1249 	if (vp->v_flag & VALIASED) {
1250 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1251 			if (vq->v_rdev != vp->v_rdev ||
1252 			    vq->v_type != vp->v_type)
1253 				continue;
1254 			if (vq->v_specflags & SI_MOUNTEDON)
1255 				return (EBUSY);
1256 		}
1257 	}
1258 	return (0);
1259 }
1260 
1261 /*
1262  * Build hash lists of net addresses and hang them off the mount point.
1263  * Called by ufs_mount() to set up the lists of export addresses.
1264  */
1265 static int
1266 vfs_hang_addrlist(mp, nep, argp)
1267 	struct mount *mp;
1268 	struct netexport *nep;
1269 	struct export_args *argp;
1270 {
1271 	register struct netcred *np;
1272 	register struct radix_node_head *rnh;
1273 	register int i;
1274 	struct radix_node *rn;
1275 	struct sockaddr *saddr, *smask = 0;
1276 	struct domain *dom;
1277 	int error;
1278 
1279 	if (argp->ex_addrlen == 0) {
1280 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1281 			return (EPERM);
1282 		np = &nep->ne_defexported;
1283 		np->netc_exflags = argp->ex_flags;
1284 		np->netc_anon = argp->ex_anon;
1285 		np->netc_anon.cr_ref = 1;
1286 		mp->mnt_flag |= MNT_DEFEXPORTED;
1287 		return (0);
1288 	}
1289 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1290 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1291 	bzero((caddr_t)np, i);
1292 	saddr = (struct sockaddr *)(np + 1);
1293 	if (error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen))
1294 		goto out;
1295 	if (saddr->sa_len > argp->ex_addrlen)
1296 		saddr->sa_len = argp->ex_addrlen;
1297 	if (argp->ex_masklen) {
1298 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1299 		error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
1300 		if (error)
1301 			goto out;
1302 		if (smask->sa_len > argp->ex_masklen)
1303 			smask->sa_len = argp->ex_masklen;
1304 	}
1305 	i = saddr->sa_family;
1306 	if ((rnh = nep->ne_rtable[i]) == 0) {
1307 		/*
1308 		 * Seems silly to initialize every AF when most are not
1309 		 * used, do so on demand here
1310 		 */
1311 		for (dom = domains; dom; dom = dom->dom_next)
1312 			if (dom->dom_family == i && dom->dom_rtattach) {
1313 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1314 					dom->dom_rtoffset);
1315 				break;
1316 			}
1317 		if ((rnh = nep->ne_rtable[i]) == 0) {
1318 			error = ENOBUFS;
1319 			goto out;
1320 		}
1321 	}
1322 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1323 		np->netc_rnodes);
1324 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1325 		error = EPERM;
1326 		goto out;
1327 	}
1328 	np->netc_exflags = argp->ex_flags;
1329 	np->netc_anon = argp->ex_anon;
1330 	np->netc_anon.cr_ref = 1;
1331 	return (0);
1332 out:
1333 	free(np, M_NETADDR);
1334 	return (error);
1335 }
1336 
1337 /* ARGSUSED */
1338 static int
1339 vfs_free_netcred(rn, w)
1340 	struct radix_node *rn;
1341 	caddr_t w;
1342 {
1343 	register struct radix_node_head *rnh = (struct radix_node_head *)w;
1344 
1345 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
1346 	free((caddr_t)rn, M_NETADDR);
1347 	return (0);
1348 }
1349 
1350 /*
1351  * Free the net address hash lists that are hanging off the mount points.
1352  */
1353 static void
1354 vfs_free_addrlist(nep)
1355 	struct netexport *nep;
1356 {
1357 	register int i;
1358 	register struct radix_node_head *rnh;
1359 
1360 	for (i = 0; i <= AF_MAX; i++)
1361 		if (rnh = nep->ne_rtable[i]) {
1362 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred,
1363 			    (caddr_t)rnh);
1364 			free((caddr_t)rnh, M_RTABLE);
1365 			nep->ne_rtable[i] = 0;
1366 		}
1367 }
1368 
1369 int
1370 vfs_export(mp, nep, argp)
1371 	struct mount *mp;
1372 	struct netexport *nep;
1373 	struct export_args *argp;
1374 {
1375 	int error;
1376 
1377 	if (argp->ex_flags & MNT_DELEXPORT) {
1378 		vfs_free_addrlist(nep);
1379 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1380 	}
1381 	if (argp->ex_flags & MNT_EXPORTED) {
1382 		if (error = vfs_hang_addrlist(mp, nep, argp))
1383 			return (error);
1384 		mp->mnt_flag |= MNT_EXPORTED;
1385 	}
1386 	return (0);
1387 }
1388 
1389 struct netcred *
1390 vfs_export_lookup(mp, nep, nam)
1391 	register struct mount *mp;
1392 	struct netexport *nep;
1393 	struct mbuf *nam;
1394 {
1395 	register struct netcred *np;
1396 	register struct radix_node_head *rnh;
1397 	struct sockaddr *saddr;
1398 
1399 	np = NULL;
1400 	if (mp->mnt_flag & MNT_EXPORTED) {
1401 		/*
1402 		 * Lookup in the export list first.
1403 		 */
1404 		if (nam != NULL) {
1405 			saddr = mtod(nam, struct sockaddr *);
1406 			rnh = nep->ne_rtable[saddr->sa_family];
1407 			if (rnh != NULL) {
1408 				np = (struct netcred *)
1409 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1410 							      rnh);
1411 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1412 					np = NULL;
1413 			}
1414 		}
1415 		/*
1416 		 * If no address match, use the default if it exists.
1417 		 */
1418 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1419 			np = &nep->ne_defexported;
1420 	}
1421 	return (np);
1422 }
1423 
1424 /*
1425  * Do the usual access checking.
1426  * file_mode, uid and gid are from the vnode in question,
1427  * while acc_mode and cred are from the VOP_ACCESS parameter list
1428  */
1429 vaccess(file_mode, uid, gid, acc_mode, cred)
1430 	mode_t file_mode;
1431 	uid_t uid;
1432 	gid_t gid;
1433 	mode_t acc_mode;
1434 	struct ucred *cred;
1435 {
1436 	mode_t mask;
1437 	int i;
1438 	register gid_t *gp;
1439 
1440 	/* User id 0 always gets access. */
1441 	if (cred->cr_uid == 0)
1442 		return 0;
1443 
1444 	mask = 0;
1445 
1446 	/* Otherwise, check the owner. */
1447 	if (cred->cr_uid == uid) {
1448 		if (acc_mode & VEXEC)
1449 			mask |= S_IXUSR;
1450 		if (acc_mode & VREAD)
1451 			mask |= S_IRUSR;
1452 		if (acc_mode & VWRITE)
1453 			mask |= S_IWUSR;
1454 		return (file_mode & mask) == mask ? 0 : EACCES;
1455 	}
1456 
1457 	/* Otherwise, check the groups. */
1458 	if (groupmember(gid, cred)) {
1459 		if (acc_mode & VEXEC)
1460 			mask |= S_IXGRP;
1461 		if (acc_mode & VREAD)
1462 			mask |= S_IRGRP;
1463 		if (acc_mode & VWRITE)
1464 			mask |= S_IWGRP;
1465 		return (file_mode & mask) == mask ? 0 : EACCES;
1466 	}
1467 
1468 	/* Otherwise, check everyone else. */
1469 	if (acc_mode & VEXEC)
1470 		mask |= S_IXOTH;
1471 	if (acc_mode & VREAD)
1472 		mask |= S_IROTH;
1473 	if (acc_mode & VWRITE)
1474 		mask |= S_IWOTH;
1475 	return (file_mode & mask) == mask ? 0 : EACCES;
1476 }
1477