xref: /netbsd-src/sys/kern/vfs_subr.c (revision 81b108b45f75f89f1e3ffad9fb6f074e771c0935)
1 /*	$NetBSD: vfs_subr.c,v 1.54 1996/06/01 20:24:05 jtk Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
41  */
42 
43 /*
44  * External virtual filesystem routines
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/mount.h>
51 #include <sys/time.h>
52 #include <sys/fcntl.h>
53 #include <sys/vnode.h>
54 #include <sys/stat.h>
55 #include <sys/namei.h>
56 #include <sys/ucred.h>
57 #include <sys/buf.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/domain.h>
61 #include <sys/mbuf.h>
62 #include <sys/syscallargs.h>
63 
64 #include <vm/vm.h>
65 #include <sys/sysctl.h>
66 
67 #include <miscfs/specfs/specdev.h>
68 
69 enum vtype iftovt_tab[16] = {
70 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
71 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
72 };
73 int	vttoif_tab[9] = {
74 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
75 	S_IFSOCK, S_IFIFO, S_IFMT,
76 };
77 
78 int doforce = 1;		/* 1 => permit forcible unmounting */
79 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
80 
81 /*
82  * Insq/Remq for the vnode usage lists.
83  */
84 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
85 #define	bufremvn(bp) {							\
86 	LIST_REMOVE(bp, b_vnbufs);					\
87 	(bp)->b_vnbufs.le_next = NOLIST;				\
88 }
89 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
90 struct mntlist mountlist;			/* mounted filesystem list */
91 
92 int vfs_lock __P((struct mount *));
93 void vfs_unlock __P((struct mount *));
94 struct mount *getvfs __P((fsid_t *));
95 long makefstype __P((char *));
96 void vattr_null __P((struct vattr *));
97 int getnewvnode __P((enum vtagtype, struct mount *, int (**)(void *),
98 		     struct vnode **));
99 void insmntque __P((struct vnode *, struct mount *));
100 int vinvalbuf __P((struct vnode *, int, struct ucred *, struct proc *, int,
101 		   int));
102 void vflushbuf __P((struct vnode *, int));
103 void brelvp __P((struct buf *));
104 int bdevvp __P((dev_t, struct vnode **));
105 int cdevvp __P((dev_t, struct vnode **));
106 int getdevvp __P((dev_t, struct vnode **, enum vtype));
107 struct vnode *checkalias __P((struct vnode *, dev_t, struct mount *));
108 int vget __P((struct vnode *, int));
109 void vref __P((struct vnode *));
110 void vput __P((struct vnode *));
111 void vrele __P((struct vnode *));
112 void vhold __P((struct vnode *));
113 void holdrele __P((struct vnode *));
114 int vflush __P((struct mount *, struct vnode *, int));
115 void vgoneall __P((struct vnode *));
116 void vgone __P((struct vnode *));
117 int vcount __P((struct vnode *));
118 void vprint __P((char *, struct vnode *));
119 int vfs_mountedon __P((struct vnode *));
120 int vfs_export __P((struct mount *, struct netexport *, struct export_args *));
121 struct netcred *vfs_export_lookup __P((struct mount *, struct netexport *,
122 				       struct mbuf *));
123 int vaccess __P((mode_t, uid_t, gid_t, mode_t, struct ucred *));
124 void vfs_unmountall __P((void));
125 void vfs_shutdown __P((void));
126 
127 static int vfs_hang_addrlist __P((struct mount *, struct netexport *,
128 				  struct export_args *));
129 static int vfs_free_netcred __P((struct radix_node *, void *));
130 static void vfs_free_addrlist __P((struct netexport *));
131 
132 #ifdef DEBUG
133 void printlockedvnodes __P((void));
134 #endif
135 
136 /*
137  * Initialize the vnode management data structures.
138  */
139 void
140 vntblinit()
141 {
142 
143 	TAILQ_INIT(&vnode_free_list);
144 	CIRCLEQ_INIT(&mountlist);
145 }
146 
147 /*
148  * Lock a filesystem.
149  * Used to prevent access to it while mounting and unmounting.
150  */
151 int
152 vfs_lock(mp)
153 	register struct mount *mp;
154 {
155 
156 	while (mp->mnt_flag & MNT_MLOCK) {
157 		mp->mnt_flag |= MNT_MWAIT;
158 		tsleep((caddr_t)mp, PVFS, "vfslock", 0);
159 	}
160 	mp->mnt_flag |= MNT_MLOCK;
161 	return (0);
162 }
163 
164 /*
165  * Unlock a locked filesystem.
166  * Panic if filesystem is not locked.
167  */
168 void
169 vfs_unlock(mp)
170 	register struct mount *mp;
171 {
172 
173 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
174 		panic("vfs_unlock: not locked");
175 	mp->mnt_flag &= ~MNT_MLOCK;
176 	if (mp->mnt_flag & MNT_MWAIT) {
177 		mp->mnt_flag &= ~MNT_MWAIT;
178 		wakeup((caddr_t)mp);
179 	}
180 }
181 
182 /*
183  * Mark a mount point as busy.
184  * Used to synchronize access and to delay unmounting.
185  */
186 int
187 vfs_busy(mp)
188 	register struct mount *mp;
189 {
190 
191 	while(mp->mnt_flag & MNT_MPBUSY) {
192 		mp->mnt_flag |= MNT_MPWANT;
193 		tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbusy", 0);
194 	}
195 	if (mp->mnt_flag & MNT_UNMOUNT)
196 		return (1);
197 	mp->mnt_flag |= MNT_MPBUSY;
198 	return (0);
199 }
200 
201 /*
202  * Free a busy filesystem.
203  * Panic if filesystem is not busy.
204  */
205 void
206 vfs_unbusy(mp)
207 	register struct mount *mp;
208 {
209 
210 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
211 		panic("vfs_unbusy: not busy");
212 	mp->mnt_flag &= ~MNT_MPBUSY;
213 	if (mp->mnt_flag & MNT_MPWANT) {
214 		mp->mnt_flag &= ~MNT_MPWANT;
215 		wakeup((caddr_t)&mp->mnt_flag);
216 	}
217 }
218 
219 /*
220  * Lookup a mount point by filesystem identifier.
221  */
222 struct mount *
223 getvfs(fsid)
224 	fsid_t *fsid;
225 {
226 	register struct mount *mp;
227 
228 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
229 	     mp = mp->mnt_list.cqe_next)
230 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
231 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
232 			return (mp);
233 	return ((struct mount *)0);
234 }
235 
236 /*
237  * Get a new unique fsid
238  */
239 void
240 getnewfsid(mp, mtype)
241 	struct mount *mp;
242 	int mtype;
243 {
244 	static u_short xxxfs_mntid;
245 
246 	fsid_t tfsid;
247 
248 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + 11, 0);	/* XXX */
249 	mp->mnt_stat.f_fsid.val[1] = mtype;
250 	if (xxxfs_mntid == 0)
251 		++xxxfs_mntid;
252 	tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid);
253 	tfsid.val[1] = mtype;
254 	if (mountlist.cqh_first != (void *)&mountlist) {
255 		while (getvfs(&tfsid)) {
256 			tfsid.val[0]++;
257 			xxxfs_mntid++;
258 		}
259 	}
260 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
261 }
262 
263 /*
264  * Make a 'unique' number from a mount type name.
265  */
266 long
267 makefstype(type)
268 	char *type;
269 {
270 	long rv;
271 
272 	for (rv = 0; *type; type++) {
273 		rv <<= 2;
274 		rv ^= *type;
275 	}
276 	return rv;
277 }
278 
279 /*
280  * Set vnode attributes to VNOVAL
281  */
282 void
283 vattr_null(vap)
284 	register struct vattr *vap;
285 {
286 
287 	vap->va_type = VNON;
288 	/* XXX These next two used to be one line, but for a GCC bug. */
289 	vap->va_size = VNOVAL;
290 	vap->va_bytes = VNOVAL;
291 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
292 		vap->va_fsid = vap->va_fileid =
293 		vap->va_blocksize = vap->va_rdev =
294 		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
295 		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
296 		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
297 		vap->va_flags = vap->va_gen = VNOVAL;
298 	vap->va_vaflags = 0;
299 }
300 
301 /*
302  * Routines having to do with the management of the vnode table.
303  */
304 extern int (**dead_vnodeop_p) __P((void *));
305 long numvnodes;
306 
307 /*
308  * Return the next vnode from the free list.
309  */
310 int
311 getnewvnode(tag, mp, vops, vpp)
312 	enum vtagtype tag;
313 	struct mount *mp;
314 	int (**vops) __P((void *));
315 	struct vnode **vpp;
316 {
317 	register struct vnode *vp;
318 #ifdef DIAGNOSTIC
319 	int s;
320 #endif
321 
322 	if ((vnode_free_list.tqh_first == NULL &&
323 	     numvnodes < 2 * desiredvnodes) ||
324 	    numvnodes < desiredvnodes) {
325 		vp = (struct vnode *)malloc((u_long)sizeof *vp,
326 		    M_VNODE, M_WAITOK);
327 		bzero((char *)vp, sizeof *vp);
328 		numvnodes++;
329 	} else {
330 		if ((vp = vnode_free_list.tqh_first) == NULL) {
331 			tablefull("vnode");
332 			*vpp = 0;
333 			return (ENFILE);
334 		}
335 		if (vp->v_usecount) {
336 			vprint("free vnode", vp);
337 			panic("free vnode isn't");
338 		}
339 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
340 		/* see comment on why 0xdeadb is set at end of vgone (below) */
341 		vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
342 		vp->v_lease = NULL;
343 		if (vp->v_type != VBAD)
344 			vgone(vp);
345 #ifdef DIAGNOSTIC
346 		if (vp->v_data) {
347 			vprint("cleaned vnode", vp);
348 			panic("cleaned vnode isn't");
349 		}
350 		s = splbio();
351 		if (vp->v_numoutput)
352 			panic("Clean vnode has pending I/O's");
353 		splx(s);
354 #endif
355 		vp->v_flag = 0;
356 		vp->v_lastr = 0;
357 		vp->v_ralen = 0;
358 		vp->v_maxra = 0;
359 		vp->v_lastw = 0;
360 		vp->v_lasta = 0;
361 		vp->v_cstart = 0;
362 		vp->v_clen = 0;
363 		vp->v_socket = 0;
364 	}
365 	vp->v_type = VNON;
366 	cache_purge(vp);
367 	vp->v_tag = tag;
368 	vp->v_op = vops;
369 	insmntque(vp, mp);
370 	*vpp = vp;
371 	vp->v_usecount = 1;
372 	vp->v_data = 0;
373 	return (0);
374 }
375 
376 /*
377  * Move a vnode from one mount queue to another.
378  */
379 void
380 insmntque(vp, mp)
381 	register struct vnode *vp;
382 	register struct mount *mp;
383 {
384 
385 	/*
386 	 * Delete from old mount point vnode list, if on one.
387 	 */
388 	if (vp->v_mount != NULL)
389 		LIST_REMOVE(vp, v_mntvnodes);
390 	/*
391 	 * Insert into list of vnodes for the new mount point, if available.
392 	 */
393 	if ((vp->v_mount = mp) == NULL)
394 		return;
395 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
396 }
397 
398 /*
399  * Update outstanding I/O count and do wakeup if requested.
400  */
401 void
402 vwakeup(bp)
403 	register struct buf *bp;
404 {
405 	register struct vnode *vp;
406 
407 	bp->b_flags &= ~B_WRITEINPROG;
408 	if ((vp = bp->b_vp) != NULL) {
409 		if (--vp->v_numoutput < 0)
410 			panic("vwakeup: neg numoutput");
411 		if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
412 			vp->v_flag &= ~VBWAIT;
413 			wakeup((caddr_t)&vp->v_numoutput);
414 		}
415 	}
416 }
417 
418 /*
419  * Flush out and invalidate all buffers associated with a vnode.
420  * Called with the underlying object locked.
421  */
422 int
423 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
424 	register struct vnode *vp;
425 	int flags;
426 	struct ucred *cred;
427 	struct proc *p;
428 	int slpflag, slptimeo;
429 {
430 	register struct buf *bp;
431 	struct buf *nbp, *blist;
432 	int s, error;
433 
434 	if (flags & V_SAVE) {
435 		if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
436 			return (error);
437 		if (vp->v_dirtyblkhd.lh_first != NULL)
438 			panic("vinvalbuf: dirty bufs");
439 	}
440 	for (;;) {
441 		if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA)
442 			while (blist && blist->b_lblkno < 0)
443 				blist = blist->b_vnbufs.le_next;
444 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
445 		    (flags & V_SAVEMETA))
446 			while (blist && blist->b_lblkno < 0)
447 				blist = blist->b_vnbufs.le_next;
448 		if (!blist)
449 			break;
450 
451 		for (bp = blist; bp; bp = nbp) {
452 			nbp = bp->b_vnbufs.le_next;
453 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
454 				continue;
455 			s = splbio();
456 			if (bp->b_flags & B_BUSY) {
457 				bp->b_flags |= B_WANTED;
458 				error = tsleep((caddr_t)bp,
459 					slpflag | (PRIBIO + 1), "vinvalbuf",
460 					slptimeo);
461 				splx(s);
462 				if (error)
463 					return (error);
464 				break;
465 			}
466 			bremfree(bp);
467 			bp->b_flags |= B_BUSY;
468 			splx(s);
469 			/*
470 			 * XXX Since there are no node locks for NFS, I believe
471 			 * there is a slight chance that a delayed write will
472 			 * occur while sleeping just above, so check for it.
473 			 */
474 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
475 				(void) VOP_BWRITE(bp);
476 				break;
477 			}
478 			bp->b_flags |= B_INVAL;
479 			brelse(bp);
480 		}
481 	}
482 	if (!(flags & V_SAVEMETA) &&
483 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
484 		panic("vinvalbuf: flush failed");
485 	return (0);
486 }
487 
488 void
489 vflushbuf(vp, sync)
490 	register struct vnode *vp;
491 	int sync;
492 {
493 	register struct buf *bp, *nbp;
494 	int s;
495 
496 loop:
497 	s = splbio();
498 	for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
499 		nbp = bp->b_vnbufs.le_next;
500 		if ((bp->b_flags & B_BUSY))
501 			continue;
502 		if ((bp->b_flags & B_DELWRI) == 0)
503 			panic("vflushbuf: not dirty");
504 		bremfree(bp);
505 		bp->b_flags |= B_BUSY;
506 		splx(s);
507 		/*
508 		 * Wait for I/O associated with indirect blocks to complete,
509 		 * since there is no way to quickly wait for them below.
510 		 */
511 		if (bp->b_vp == vp || sync == 0)
512 			(void) bawrite(bp);
513 		else
514 			(void) bwrite(bp);
515 		goto loop;
516 	}
517 	if (sync == 0) {
518 		splx(s);
519 		return;
520 	}
521 	while (vp->v_numoutput) {
522 		vp->v_flag |= VBWAIT;
523 		tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0);
524 	}
525 	splx(s);
526 	if (vp->v_dirtyblkhd.lh_first != NULL) {
527 		vprint("vflushbuf: dirty", vp);
528 		goto loop;
529 	}
530 }
531 
532 /*
533  * Associate a buffer with a vnode.
534  */
535 void
536 bgetvp(vp, bp)
537 	register struct vnode *vp;
538 	register struct buf *bp;
539 {
540 
541 	if (bp->b_vp)
542 		panic("bgetvp: not free");
543 	VHOLD(vp);
544 	bp->b_vp = vp;
545 	if (vp->v_type == VBLK || vp->v_type == VCHR)
546 		bp->b_dev = vp->v_rdev;
547 	else
548 		bp->b_dev = NODEV;
549 	/*
550 	 * Insert onto list for new vnode.
551 	 */
552 	bufinsvn(bp, &vp->v_cleanblkhd);
553 }
554 
555 /*
556  * Disassociate a buffer from a vnode.
557  */
558 void
559 brelvp(bp)
560 	register struct buf *bp;
561 {
562 	struct vnode *vp;
563 
564 	if (bp->b_vp == (struct vnode *) 0)
565 		panic("brelvp: NULL");
566 	/*
567 	 * Delete from old vnode list, if on one.
568 	 */
569 	if (bp->b_vnbufs.le_next != NOLIST)
570 		bufremvn(bp);
571 	vp = bp->b_vp;
572 	bp->b_vp = (struct vnode *) 0;
573 	HOLDRELE(vp);
574 }
575 
576 /*
577  * Reassign a buffer from one vnode to another.
578  * Used to assign file specific control information
579  * (indirect blocks) to the vnode to which they belong.
580  */
581 void
582 reassignbuf(bp, newvp)
583 	register struct buf *bp;
584 	register struct vnode *newvp;
585 {
586 	register struct buflists *listheadp;
587 
588 	if (newvp == NULL) {
589 		printf("reassignbuf: NULL");
590 		return;
591 	}
592 	/*
593 	 * Delete from old vnode list, if on one.
594 	 */
595 	if (bp->b_vnbufs.le_next != NOLIST)
596 		bufremvn(bp);
597 	/*
598 	 * If dirty, put on list of dirty buffers;
599 	 * otherwise insert onto list of clean buffers.
600 	 */
601 	if (bp->b_flags & B_DELWRI)
602 		listheadp = &newvp->v_dirtyblkhd;
603 	else
604 		listheadp = &newvp->v_cleanblkhd;
605 	bufinsvn(bp, listheadp);
606 }
607 
608 /*
609  * Create a vnode for a block device.
610  * Used for root filesystem, argdev, and swap areas.
611  * Also used for memory file system special devices.
612  */
613 int
614 bdevvp(dev, vpp)
615 	dev_t dev;
616 	struct vnode **vpp;
617 {
618 
619 	return (getdevvp(dev, vpp, VBLK));
620 }
621 
622 /*
623  * Create a vnode for a character device.
624  * Used for kernfs and some console handling.
625  */
626 int
627 cdevvp(dev, vpp)
628 	dev_t dev;
629 	struct vnode **vpp;
630 {
631 
632 	return (getdevvp(dev, vpp, VCHR));
633 }
634 
635 /*
636  * Create a vnode for a device.
637  * Used by bdevvp (block device) for root file system etc.,
638  * and by cdevvp (character device) for console and kernfs.
639  */
640 int
641 getdevvp(dev, vpp, type)
642 	dev_t dev;
643 	struct vnode **vpp;
644 	enum vtype type;
645 {
646 	register struct vnode *vp;
647 	struct vnode *nvp;
648 	int error;
649 
650 	if (dev == NODEV)
651 		return (0);
652 	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
653 	if (error) {
654 		*vpp = NULLVP;
655 		return (error);
656 	}
657 	vp = nvp;
658 	vp->v_type = type;
659 	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
660 		vput(vp);
661 		vp = nvp;
662 	}
663 	*vpp = vp;
664 	return (0);
665 }
666 
667 /*
668  * Check to see if the new vnode represents a special device
669  * for which we already have a vnode (either because of
670  * bdevvp() or because of a different vnode representing
671  * the same block device). If such an alias exists, deallocate
672  * the existing contents and return the aliased vnode. The
673  * caller is responsible for filling it with its new contents.
674  */
675 struct vnode *
676 checkalias(nvp, nvp_rdev, mp)
677 	register struct vnode *nvp;
678 	dev_t nvp_rdev;
679 	struct mount *mp;
680 {
681 	register struct vnode *vp;
682 	struct vnode **vpp;
683 
684 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
685 		return (NULLVP);
686 
687 	vpp = &speclisth[SPECHASH(nvp_rdev)];
688 loop:
689 	for (vp = *vpp; vp; vp = vp->v_specnext) {
690 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
691 			continue;
692 		/*
693 		 * Alias, but not in use, so flush it out.
694 		 */
695 		if (vp->v_usecount == 0) {
696 			vgone(vp);
697 			goto loop;
698 		}
699 		if (vget(vp, 1))
700 			goto loop;
701 		break;
702 	}
703 	if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
704 		MALLOC(nvp->v_specinfo, struct specinfo *,
705 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
706 		nvp->v_rdev = nvp_rdev;
707 		nvp->v_hashchain = vpp;
708 		nvp->v_specnext = *vpp;
709 		nvp->v_specflags = 0;
710 		*vpp = nvp;
711 		if (vp != NULL) {
712 			nvp->v_flag |= VALIASED;
713 			vp->v_flag |= VALIASED;
714 			vput(vp);
715 		}
716 		return (NULLVP);
717 	}
718 	VOP_UNLOCK(vp);
719 	vclean(vp, 0);
720 	vp->v_op = nvp->v_op;
721 	vp->v_tag = nvp->v_tag;
722 	nvp->v_type = VNON;
723 	insmntque(vp, mp);
724 	return (vp);
725 }
726 
727 /*
728  * Grab a particular vnode from the free list, increment its
729  * reference count and lock it. The vnode lock bit is set the
730  * vnode is being eliminated in vgone. The process is awakened
731  * when the transition is completed, and an error returned to
732  * indicate that the vnode is no longer usable (possibly having
733  * been changed to a new file system type).
734  */
735 int
736 vget(vp, lockflag)
737 	register struct vnode *vp;
738 	int lockflag;
739 {
740 
741 	/*
742 	 * If the vnode is in the process of being cleaned out for
743 	 * another use, we wait for the cleaning to finish and then
744 	 * return failure. Cleaning is determined either by checking
745 	 * that the VXLOCK flag is set, or that the use count is
746 	 * zero with the back pointer set to show that it has been
747 	 * removed from the free list by getnewvnode. The VXLOCK
748 	 * flag may not have been set yet because vclean is blocked in
749 	 * the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
750 	 */
751 	if ((vp->v_flag & VXLOCK) ||
752 	    (vp->v_usecount == 0 &&
753 	     vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)) {
754 		vp->v_flag |= VXWANT;
755 		tsleep((caddr_t)vp, PINOD, "vget", 0);
756 		return (1);
757 	}
758 	if (vp->v_usecount == 0)
759 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
760 	vp->v_usecount++;
761 	if (lockflag)
762 		VOP_LOCK(vp);
763 	return (0);
764 }
765 
766 /*
767  * Vnode reference, just increment the count
768  */
769 void
770 vref(vp)
771 	struct vnode *vp;
772 {
773 
774 	if (vp->v_usecount <= 0)
775 		panic("vref used where vget required");
776 	vp->v_usecount++;
777 }
778 
779 /*
780  * vput(), just unlock and vrele()
781  */
782 void
783 vput(vp)
784 	register struct vnode *vp;
785 {
786 
787 	VOP_UNLOCK(vp);
788 	vrele(vp);
789 }
790 
791 /*
792  * Vnode release.
793  * If count drops to zero, call inactive routine and return to freelist.
794  */
795 void
796 vrele(vp)
797 	register struct vnode *vp;
798 {
799 
800 #ifdef DIAGNOSTIC
801 	if (vp == NULL)
802 		panic("vrele: null vp");
803 #endif
804 	vp->v_usecount--;
805 	if (vp->v_usecount > 0)
806 		return;
807 #ifdef DIAGNOSTIC
808 	if (vp->v_usecount != 0 || vp->v_writecount != 0) {
809 		vprint("vrele: bad ref count", vp);
810 		panic("vrele: ref cnt");
811 	}
812 #endif
813 	/*
814 	 * insert at tail of LRU list
815 	 */
816 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
817 	VOP_INACTIVE(vp);
818 }
819 
820 /*
821  * Page or buffer structure gets a reference.
822  */
823 void
824 vhold(vp)
825 	register struct vnode *vp;
826 {
827 
828 	vp->v_holdcnt++;
829 }
830 
831 /*
832  * Page or buffer structure frees a reference.
833  */
834 void
835 holdrele(vp)
836 	register struct vnode *vp;
837 {
838 
839 	if (vp->v_holdcnt <= 0)
840 		panic("holdrele: holdcnt");
841 	vp->v_holdcnt--;
842 }
843 
844 /*
845  * Remove any vnodes in the vnode table belonging to mount point mp.
846  *
847  * If MNT_NOFORCE is specified, there should not be any active ones,
848  * return error if any are found (nb: this is a user error, not a
849  * system error). If MNT_FORCE is specified, detach any active vnodes
850  * that are found.
851  */
852 #ifdef DEBUG
853 int busyprt = 0;	/* print out busy vnodes */
854 struct ctldebug debug1 = { "busyprt", &busyprt };
855 #endif
856 
857 int
858 vflush(mp, skipvp, flags)
859 	struct mount *mp;
860 	struct vnode *skipvp;
861 	int flags;
862 {
863 	register struct vnode *vp, *nvp;
864 	int busy = 0;
865 
866 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
867 		panic("vflush: not busy");
868 loop:
869 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
870 		if (vp->v_mount != mp)
871 			goto loop;
872 		nvp = vp->v_mntvnodes.le_next;
873 		/*
874 		 * Skip over a selected vnode.
875 		 */
876 		if (vp == skipvp)
877 			continue;
878 		/*
879 		 * Skip over a vnodes marked VSYSTEM.
880 		 */
881 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
882 			continue;
883 		/*
884 		 * If WRITECLOSE is set, only flush out regular file
885 		 * vnodes open for writing.
886 		 */
887 		if ((flags & WRITECLOSE) &&
888 		    (vp->v_writecount == 0 || vp->v_type != VREG))
889 			continue;
890 		/*
891 		 * With v_usecount == 0, all we need to do is clear
892 		 * out the vnode data structures and we are done.
893 		 */
894 		if (vp->v_usecount == 0) {
895 			vgone(vp);
896 			continue;
897 		}
898 		/*
899 		 * If FORCECLOSE is set, forcibly close the vnode.
900 		 * For block or character devices, revert to an
901 		 * anonymous device. For all other files, just kill them.
902 		 */
903 		if (flags & FORCECLOSE) {
904 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
905 				vgone(vp);
906 			} else {
907 				vclean(vp, 0);
908 				vp->v_op = spec_vnodeop_p;
909 				insmntque(vp, (struct mount *)0);
910 			}
911 			continue;
912 		}
913 #ifdef DEBUG
914 		if (busyprt)
915 			vprint("vflush: busy vnode", vp);
916 #endif
917 		busy++;
918 	}
919 	if (busy)
920 		return (EBUSY);
921 	return (0);
922 }
923 
924 /*
925  * Disassociate the underlying file system from a vnode.
926  */
927 void
928 vclean(vp, flags)
929 	register struct vnode *vp;
930 	int flags;
931 {
932 	int active;
933 
934 	/*
935 	 * Check to see if the vnode is in use.
936 	 * If so we have to reference it before we clean it out
937 	 * so that its count cannot fall to zero and generate a
938 	 * race against ourselves to recycle it.
939 	 */
940 	if ((active = vp->v_usecount) != 0)
941 		VREF(vp);
942 	/*
943 	 * Even if the count is zero, the VOP_INACTIVE routine may still
944 	 * have the object locked while it cleans it out. The VOP_LOCK
945 	 * ensures that the VOP_INACTIVE routine is done with its work.
946 	 * For active vnodes, it ensures that no other activity can
947 	 * occur while the underlying object is being cleaned out.
948 	 */
949 	VOP_LOCK(vp);
950 	/*
951 	 * Prevent the vnode from being recycled or
952 	 * brought into use while we clean it out.
953 	 */
954 	if (vp->v_flag & VXLOCK)
955 		panic("vclean: deadlock");
956 	vp->v_flag |= VXLOCK;
957 	/*
958 	 * Clean out any buffers associated with the vnode.
959 	 */
960 	if (flags & DOCLOSE)
961 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
962 	/*
963 	 * Any other processes trying to obtain this lock must first
964 	 * wait for VXLOCK to clear, then call the new lock operation.
965 	 */
966 	VOP_UNLOCK(vp);
967 	/*
968 	 * If purging an active vnode, it must be closed and
969 	 * deactivated before being reclaimed.
970 	 */
971 	if (active) {
972 		if (flags & DOCLOSE)
973 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
974 		VOP_INACTIVE(vp);
975 	}
976 	/*
977 	 * Reclaim the vnode.
978 	 */
979 	if (VOP_RECLAIM(vp))
980 		panic("vclean: cannot reclaim");
981 	if (active)
982 		vrele(vp);
983 
984 	/*
985 	 * Done with purge, notify sleepers of the grim news.
986 	 */
987 	vp->v_op = dead_vnodeop_p;
988 	vp->v_tag = VT_NON;
989 	vp->v_flag &= ~VXLOCK;
990 	if (vp->v_flag & VXWANT) {
991 		vp->v_flag &= ~VXWANT;
992 		wakeup((caddr_t)vp);
993 	}
994 }
995 
996 /*
997  * Eliminate all activity associated with  the requested vnode
998  * and with all vnodes aliased to the requested vnode.
999  */
1000 void
1001 vgoneall(vp)
1002 	register struct vnode *vp;
1003 {
1004 	register struct vnode *vq;
1005 
1006 	if (vp->v_flag & VALIASED) {
1007 		/*
1008 		 * If a vgone (or vclean) is already in progress,
1009 		 * wait until it is done and return.
1010 		 */
1011 		if (vp->v_flag & VXLOCK) {
1012 			vp->v_flag |= VXWANT;
1013 			tsleep((caddr_t)vp, PINOD, "vgoneall", 0);
1014 			return;
1015 		}
1016 		/*
1017 		 * Ensure that vp will not be vgone'd while we
1018 		 * are eliminating its aliases.
1019 		 */
1020 		vp->v_flag |= VXLOCK;
1021 		while (vp->v_flag & VALIASED) {
1022 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1023 				if (vq->v_rdev != vp->v_rdev ||
1024 				    vq->v_type != vp->v_type || vp == vq)
1025 					continue;
1026 				vgone(vq);
1027 				break;
1028 			}
1029 		}
1030 		/*
1031 		 * Remove the lock so that vgone below will
1032 		 * really eliminate the vnode after which time
1033 		 * vgone will awaken any sleepers.
1034 		 */
1035 		vp->v_flag &= ~VXLOCK;
1036 	}
1037 	vgone(vp);
1038 }
1039 
1040 /*
1041  * Eliminate all activity associated with a vnode
1042  * in preparation for reuse.
1043  */
1044 void
1045 vgone(vp)
1046 	register struct vnode *vp;
1047 {
1048 	register struct vnode *vq;
1049 	struct vnode *vx;
1050 
1051 	/*
1052 	 * If a vgone (or vclean) is already in progress,
1053 	 * wait until it is done and return.
1054 	 */
1055 	if (vp->v_flag & VXLOCK) {
1056 		vp->v_flag |= VXWANT;
1057 		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1058 		return;
1059 	}
1060 	/*
1061 	 * Clean out the filesystem specific data.
1062 	 */
1063 	vclean(vp, DOCLOSE);
1064 	/*
1065 	 * Delete from old mount point vnode list, if on one.
1066 	 */
1067 	insmntque(vp, (struct mount *)0);
1068 	/*
1069 	 * If special device, remove it from special device alias list.
1070 	 */
1071 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1072 		if (*vp->v_hashchain == vp) {
1073 			*vp->v_hashchain = vp->v_specnext;
1074 		} else {
1075 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1076 				if (vq->v_specnext != vp)
1077 					continue;
1078 				vq->v_specnext = vp->v_specnext;
1079 				break;
1080 			}
1081 			if (vq == NULL)
1082 				panic("missing bdev");
1083 		}
1084 		if (vp->v_flag & VALIASED) {
1085 			vx = NULL;
1086 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1087 				if (vq->v_rdev != vp->v_rdev ||
1088 				    vq->v_type != vp->v_type)
1089 					continue;
1090 				if (vx)
1091 					break;
1092 				vx = vq;
1093 			}
1094 			if (vx == NULL)
1095 				panic("missing alias");
1096 			if (vq == NULL)
1097 				vx->v_flag &= ~VALIASED;
1098 			vp->v_flag &= ~VALIASED;
1099 		}
1100 		FREE(vp->v_specinfo, M_VNODE);
1101 		vp->v_specinfo = NULL;
1102 	}
1103 	/*
1104 	 * If it is on the freelist and not already at the head,
1105 	 * move it to the head of the list. The test of the back
1106 	 * pointer and the reference count of zero is because
1107 	 * it will be removed from the free list by getnewvnode,
1108 	 * but will not have its reference count incremented until
1109 	 * after calling vgone. If the reference count were
1110 	 * incremented first, vgone would (incorrectly) try to
1111 	 * close the previous instance of the underlying object.
1112 	 * So, the back pointer is explicitly set to `0xdeadb' in
1113 	 * getnewvnode after removing it from the freelist to ensure
1114 	 * that we do not try to move it here.
1115 	 */
1116 	if (vp->v_usecount == 0 &&
1117 	    vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
1118 	    vnode_free_list.tqh_first != vp) {
1119 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1120 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1121 	}
1122 	vp->v_type = VBAD;
1123 }
1124 
1125 /*
1126  * Lookup a vnode by device number.
1127  */
1128 int
1129 vfinddev(dev, type, vpp)
1130 	dev_t dev;
1131 	enum vtype type;
1132 	struct vnode **vpp;
1133 {
1134 	register struct vnode *vp;
1135 
1136 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1137 		if (dev != vp->v_rdev || type != vp->v_type)
1138 			continue;
1139 		*vpp = vp;
1140 		return (1);
1141 	}
1142 	return (0);
1143 }
1144 
1145 /*
1146  * Calculate the total number of references to a special device.
1147  */
1148 int
1149 vcount(vp)
1150 	register struct vnode *vp;
1151 {
1152 	register struct vnode *vq, *vnext;
1153 	int count;
1154 
1155 loop:
1156 	if ((vp->v_flag & VALIASED) == 0)
1157 		return (vp->v_usecount);
1158 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1159 		vnext = vq->v_specnext;
1160 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1161 			continue;
1162 		/*
1163 		 * Alias, but not in use, so flush it out.
1164 		 */
1165 		if (vq->v_usecount == 0 && vq != vp) {
1166 			vgone(vq);
1167 			goto loop;
1168 		}
1169 		count += vq->v_usecount;
1170 	}
1171 	return (count);
1172 }
1173 
1174 /*
1175  * Print out a description of a vnode.
1176  */
1177 static char *typename[] =
1178    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1179 
1180 void
1181 vprint(label, vp)
1182 	char *label;
1183 	register struct vnode *vp;
1184 {
1185 	char buf[64];
1186 
1187 	if (label != NULL)
1188 		printf("%s: ", label);
1189 	printf("type %s, usecount %d, writecount %d, refcount %ld,",
1190 		typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1191 		vp->v_holdcnt);
1192 	buf[0] = '\0';
1193 	if (vp->v_flag & VROOT)
1194 		strcat(buf, "|VROOT");
1195 	if (vp->v_flag & VTEXT)
1196 		strcat(buf, "|VTEXT");
1197 	if (vp->v_flag & VSYSTEM)
1198 		strcat(buf, "|VSYSTEM");
1199 	if (vp->v_flag & VXLOCK)
1200 		strcat(buf, "|VXLOCK");
1201 	if (vp->v_flag & VXWANT)
1202 		strcat(buf, "|VXWANT");
1203 	if (vp->v_flag & VBWAIT)
1204 		strcat(buf, "|VBWAIT");
1205 	if (vp->v_flag & VALIASED)
1206 		strcat(buf, "|VALIASED");
1207 	if (buf[0] != '\0')
1208 		printf(" flags (%s)", &buf[1]);
1209 	if (vp->v_data == NULL) {
1210 		printf("\n");
1211 	} else {
1212 		printf("\n\t");
1213 		VOP_PRINT(vp);
1214 	}
1215 }
1216 
1217 #ifdef DEBUG
1218 /*
1219  * List all of the locked vnodes in the system.
1220  * Called when debugging the kernel.
1221  */
1222 void
1223 printlockedvnodes()
1224 {
1225 	register struct mount *mp;
1226 	register struct vnode *vp;
1227 
1228 	printf("Locked vnodes\n");
1229 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
1230 	     mp = mp->mnt_list.cqe_next) {
1231 		for (vp = mp->mnt_vnodelist.lh_first;
1232 		     vp != NULL;
1233 		     vp = vp->v_mntvnodes.le_next)
1234 			if (VOP_ISLOCKED(vp))
1235 				vprint((char *)0, vp);
1236 	}
1237 }
1238 #endif
1239 
1240 int kinfo_vdebug = 1;
1241 int kinfo_vgetfailed;
1242 #define KINFO_VNODESLOP	10
1243 /*
1244  * Dump vnode list (via sysctl).
1245  * Copyout address of vnode followed by vnode.
1246  */
1247 /* ARGSUSED */
1248 int
1249 sysctl_vnode(where, sizep)
1250 	char *where;
1251 	size_t *sizep;
1252 {
1253 	register struct mount *mp, *nmp;
1254 	struct vnode *vp;
1255 	register char *bp = where, *savebp;
1256 	char *ewhere;
1257 	int error;
1258 
1259 #define VPTRSZ	sizeof (struct vnode *)
1260 #define VNODESZ	sizeof (struct vnode)
1261 	if (where == NULL) {
1262 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
1263 		return (0);
1264 	}
1265 	ewhere = where + *sizep;
1266 
1267 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1268 		nmp = mp->mnt_list.cqe_next;
1269 		if (vfs_busy(mp))
1270 			continue;
1271 		savebp = bp;
1272 again:
1273 		for (vp = mp->mnt_vnodelist.lh_first;
1274 		     vp != NULL;
1275 		     vp = vp->v_mntvnodes.le_next) {
1276 			/*
1277 			 * Check that the vp is still associated with
1278 			 * this filesystem.  RACE: could have been
1279 			 * recycled onto the same filesystem.
1280 			 */
1281 			if (vp->v_mount != mp) {
1282 				if (kinfo_vdebug)
1283 					printf("kinfo: vp changed\n");
1284 				bp = savebp;
1285 				goto again;
1286 			}
1287 			if (bp + VPTRSZ + VNODESZ > ewhere) {
1288 				*sizep = bp - where;
1289 				return (ENOMEM);
1290 			}
1291 			if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
1292 			   (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
1293 				return (error);
1294 			bp += VPTRSZ + VNODESZ;
1295 		}
1296 		vfs_unbusy(mp);
1297 	}
1298 
1299 	*sizep = bp - where;
1300 	return (0);
1301 }
1302 
1303 /*
1304  * Check to see if a filesystem is mounted on a block device.
1305  */
1306 int
1307 vfs_mountedon(vp)
1308 	register struct vnode *vp;
1309 {
1310 	register struct vnode *vq;
1311 
1312 	if (vp->v_specflags & SI_MOUNTEDON)
1313 		return (EBUSY);
1314 	if (vp->v_flag & VALIASED) {
1315 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1316 			if (vq->v_rdev != vp->v_rdev ||
1317 			    vq->v_type != vp->v_type)
1318 				continue;
1319 			if (vq->v_specflags & SI_MOUNTEDON)
1320 				return (EBUSY);
1321 		}
1322 	}
1323 	return (0);
1324 }
1325 
1326 /*
1327  * Build hash lists of net addresses and hang them off the mount point.
1328  * Called by ufs_mount() to set up the lists of export addresses.
1329  */
1330 static int
1331 vfs_hang_addrlist(mp, nep, argp)
1332 	struct mount *mp;
1333 	struct netexport *nep;
1334 	struct export_args *argp;
1335 {
1336 	register struct netcred *np;
1337 	register struct radix_node_head *rnh;
1338 	register int i;
1339 	struct radix_node *rn;
1340 	struct sockaddr *saddr, *smask = 0;
1341 	struct domain *dom;
1342 	int error;
1343 
1344 	if (argp->ex_addrlen == 0) {
1345 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1346 			return (EPERM);
1347 		np = &nep->ne_defexported;
1348 		np->netc_exflags = argp->ex_flags;
1349 		np->netc_anon = argp->ex_anon;
1350 		np->netc_anon.cr_ref = 1;
1351 		mp->mnt_flag |= MNT_DEFEXPORTED;
1352 		return (0);
1353 	}
1354 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1355 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1356 	bzero((caddr_t)np, i);
1357 	saddr = (struct sockaddr *)(np + 1);
1358 	error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen);
1359 	if (error)
1360 		goto out;
1361 	if (saddr->sa_len > argp->ex_addrlen)
1362 		saddr->sa_len = argp->ex_addrlen;
1363 	if (argp->ex_masklen) {
1364 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1365 		error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
1366 		if (error)
1367 			goto out;
1368 		if (smask->sa_len > argp->ex_masklen)
1369 			smask->sa_len = argp->ex_masklen;
1370 	}
1371 	i = saddr->sa_family;
1372 	if ((rnh = nep->ne_rtable[i]) == 0) {
1373 		/*
1374 		 * Seems silly to initialize every AF when most are not
1375 		 * used, do so on demand here
1376 		 */
1377 		for (dom = domains; dom; dom = dom->dom_next)
1378 			if (dom->dom_family == i && dom->dom_rtattach) {
1379 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1380 					dom->dom_rtoffset);
1381 				break;
1382 			}
1383 		if ((rnh = nep->ne_rtable[i]) == 0) {
1384 			error = ENOBUFS;
1385 			goto out;
1386 		}
1387 	}
1388 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1389 		np->netc_rnodes);
1390 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1391 		error = EPERM;
1392 		goto out;
1393 	}
1394 	np->netc_exflags = argp->ex_flags;
1395 	np->netc_anon = argp->ex_anon;
1396 	np->netc_anon.cr_ref = 1;
1397 	return (0);
1398 out:
1399 	free(np, M_NETADDR);
1400 	return (error);
1401 }
1402 
1403 /* ARGSUSED */
1404 static int
1405 vfs_free_netcred(rn, w)
1406 	struct radix_node *rn;
1407 	void *w;
1408 {
1409 	register struct radix_node_head *rnh = (struct radix_node_head *)w;
1410 
1411 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
1412 	free((caddr_t)rn, M_NETADDR);
1413 	return (0);
1414 }
1415 
1416 /*
1417  * Free the net address hash lists that are hanging off the mount points.
1418  */
1419 static void
1420 vfs_free_addrlist(nep)
1421 	struct netexport *nep;
1422 {
1423 	register int i;
1424 	register struct radix_node_head *rnh;
1425 
1426 	for (i = 0; i <= AF_MAX; i++)
1427 		if ((rnh = nep->ne_rtable[i]) != NULL) {
1428 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1429 			free((caddr_t)rnh, M_RTABLE);
1430 			nep->ne_rtable[i] = 0;
1431 		}
1432 }
1433 
1434 int
1435 vfs_export(mp, nep, argp)
1436 	struct mount *mp;
1437 	struct netexport *nep;
1438 	struct export_args *argp;
1439 {
1440 	int error;
1441 
1442 	if (argp->ex_flags & MNT_DELEXPORT) {
1443 		vfs_free_addrlist(nep);
1444 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1445 	}
1446 	if (argp->ex_flags & MNT_EXPORTED) {
1447 		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1448 			return (error);
1449 		mp->mnt_flag |= MNT_EXPORTED;
1450 	}
1451 	return (0);
1452 }
1453 
1454 struct netcred *
1455 vfs_export_lookup(mp, nep, nam)
1456 	register struct mount *mp;
1457 	struct netexport *nep;
1458 	struct mbuf *nam;
1459 {
1460 	register struct netcred *np;
1461 	register struct radix_node_head *rnh;
1462 	struct sockaddr *saddr;
1463 
1464 	np = NULL;
1465 	if (mp->mnt_flag & MNT_EXPORTED) {
1466 		/*
1467 		 * Lookup in the export list first.
1468 		 */
1469 		if (nam != NULL) {
1470 			saddr = mtod(nam, struct sockaddr *);
1471 			rnh = nep->ne_rtable[saddr->sa_family];
1472 			if (rnh != NULL) {
1473 				np = (struct netcred *)
1474 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1475 							      rnh);
1476 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1477 					np = NULL;
1478 			}
1479 		}
1480 		/*
1481 		 * If no address match, use the default if it exists.
1482 		 */
1483 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1484 			np = &nep->ne_defexported;
1485 	}
1486 	return (np);
1487 }
1488 
1489 /*
1490  * Do the usual access checking.
1491  * file_mode, uid and gid are from the vnode in question,
1492  * while acc_mode and cred are from the VOP_ACCESS parameter list
1493  */
1494 int
1495 vaccess(file_mode, uid, gid, acc_mode, cred)
1496 	mode_t file_mode;
1497 	uid_t uid;
1498 	gid_t gid;
1499 	mode_t acc_mode;
1500 	struct ucred *cred;
1501 {
1502 	mode_t mask;
1503 
1504 	/* User id 0 always gets access. */
1505 	if (cred->cr_uid == 0)
1506 		return 0;
1507 
1508 	mask = 0;
1509 
1510 	/* Otherwise, check the owner. */
1511 	if (cred->cr_uid == uid) {
1512 		if (acc_mode & VEXEC)
1513 			mask |= S_IXUSR;
1514 		if (acc_mode & VREAD)
1515 			mask |= S_IRUSR;
1516 		if (acc_mode & VWRITE)
1517 			mask |= S_IWUSR;
1518 		return (file_mode & mask) == mask ? 0 : EACCES;
1519 	}
1520 
1521 	/* Otherwise, check the groups. */
1522 	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1523 		if (acc_mode & VEXEC)
1524 			mask |= S_IXGRP;
1525 		if (acc_mode & VREAD)
1526 			mask |= S_IRGRP;
1527 		if (acc_mode & VWRITE)
1528 			mask |= S_IWGRP;
1529 		return (file_mode & mask) == mask ? 0 : EACCES;
1530 	}
1531 
1532 	/* Otherwise, check everyone else. */
1533 	if (acc_mode & VEXEC)
1534 		mask |= S_IXOTH;
1535 	if (acc_mode & VREAD)
1536 		mask |= S_IROTH;
1537 	if (acc_mode & VWRITE)
1538 		mask |= S_IWOTH;
1539 	return (file_mode & mask) == mask ? 0 : EACCES;
1540 }
1541 
1542 /*
1543  * Unmount all file systems.
1544  * We traverse the list in reverse order under the assumption that doing so
1545  * will avoid needing to worry about dependencies.
1546  */
1547 void
1548 vfs_unmountall()
1549 {
1550 	register struct mount *mp, *nmp;
1551 	int allerror, error;
1552 
1553 	for (allerror = 0,
1554 	     mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
1555 		nmp = mp->mnt_list.cqe_prev;
1556 #ifdef DEBUG
1557 		printf("unmounting %s (%s)...\n",
1558 		       mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname);
1559 #endif
1560 		if ((error = dounmount(mp, MNT_FORCE, &proc0)) != 0) {
1561 			printf("unmount of %s failed with error %d\n",
1562 			    mp->mnt_stat.f_mntonname, error);
1563 			allerror = 1;
1564 		}
1565 	}
1566 	if (allerror)
1567 		printf("WARNING: some file systems would not unmount\n");
1568 }
1569 
1570 /*
1571  * Sync and unmount file systems before shutting down.
1572  */
1573 void
1574 vfs_shutdown()
1575 {
1576 	register struct buf *bp;
1577 	int iter, nbusy;
1578 
1579 	/* XXX Should suspend scheduling. */
1580 	(void) spl0();
1581 
1582 	printf("syncing disks... ");
1583 
1584 	if (panicstr == 0) {
1585 		/* Release inodes held by texts before update. */
1586 		vnode_pager_umount(NULL);
1587 #ifdef notdef
1588 		vnshutdown();
1589 #endif
1590 
1591 		/* Sync before unmount, in case we hang on something. */
1592 		sys_sync(&proc0, (void *)0, (register_t *)0);
1593 
1594 		/* Unmount file systems. */
1595 		vfs_unmountall();
1596 	}
1597 
1598 	/* Sync again after unmount, just in case. */
1599 	sys_sync(&proc0, (void *)0, (register_t *)0);
1600 
1601 	/* Wait for sync to finish. */
1602 	for (iter = 0; iter < 20; iter++) {
1603 		nbusy = 0;
1604 		for (bp = &buf[nbuf]; --bp >= buf; )
1605 			if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
1606 				nbusy++;
1607 		if (nbusy == 0)
1608 			break;
1609 		printf("%d ", nbusy);
1610 		DELAY(40000 * iter);
1611 	}
1612 	if (nbusy)
1613 		printf("giving up\n");
1614 	else
1615 		printf("done\n");
1616 }
1617