xref: /csrg-svn/sys/kern/vfs_subr.c (revision 41421)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_subr.c	7.44 (Berkeley) 05/05/90
18  */
19 
20 /*
21  * External virtual filesystem routines
22  */
23 
24 #include "param.h"
25 #include "mount.h"
26 #include "time.h"
27 #include "vnode.h"
28 #include "specdev.h"
29 #include "namei.h"
30 #include "ucred.h"
31 #include "errno.h"
32 #include "malloc.h"
33 
34 /*
35  * Remove a mount point from the list of mounted filesystems.
36  * Unmount of the root is illegal.
37  */
38 void
39 vfs_remove(mp)
40 	register struct mount *mp;
41 {
42 
43 	if (mp == rootfs)
44 		panic("vfs_remove: unmounting root");
45 	mp->mnt_prev->mnt_next = mp->mnt_next;
46 	mp->mnt_next->mnt_prev = mp->mnt_prev;
47 	mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
48 	vfs_unlock(mp);
49 }
50 
51 /*
52  * Lock a filesystem.
53  * Used to prevent access to it while mounting and unmounting.
54  */
55 vfs_lock(mp)
56 	register struct mount *mp;
57 {
58 
59 	while(mp->mnt_flag & MNT_MLOCK) {
60 		mp->mnt_flag |= MNT_MWAIT;
61 		sleep((caddr_t)mp, PVFS);
62 	}
63 	mp->mnt_flag |= MNT_MLOCK;
64 	return (0);
65 }
66 
67 /*
68  * Unlock a locked filesystem.
69  * Panic if filesystem is not locked.
70  */
71 void
72 vfs_unlock(mp)
73 	register struct mount *mp;
74 {
75 
76 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
77 		panic("vfs_unlock: not locked");
78 	mp->mnt_flag &= ~MNT_MLOCK;
79 	if (mp->mnt_flag & MNT_MWAIT) {
80 		mp->mnt_flag &= ~MNT_MWAIT;
81 		wakeup((caddr_t)mp);
82 	}
83 }
84 
85 /*
86  * Mark a mount point as busy.
87  * Used to synchronize access and to delay unmounting.
88  */
89 vfs_busy(mp)
90 	register struct mount *mp;
91 {
92 
93 	while(mp->mnt_flag & MNT_MPBUSY) {
94 		mp->mnt_flag |= MNT_MPWANT;
95 		sleep((caddr_t)&mp->mnt_flag, PVFS);
96 	}
97 	if (mp->mnt_flag & MNT_UNMOUNT)
98 		return (1);
99 	mp->mnt_flag |= MNT_MPBUSY;
100 	return (0);
101 }
102 
103 /*
104  * Free a busy filesystem.
105  * Panic if filesystem is not busy.
106  */
107 void
108 vfs_unbusy(mp)
109 	register struct mount *mp;
110 {
111 
112 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
113 		panic("vfs_unbusy: not busy");
114 	mp->mnt_flag &= ~MNT_MPBUSY;
115 	if (mp->mnt_flag & MNT_MPWANT) {
116 		mp->mnt_flag &= ~MNT_MPWANT;
117 		wakeup((caddr_t)&mp->mnt_flag);
118 	}
119 }
120 
121 /*
122  * Lookup a mount point by filesystem identifier.
123  */
124 struct mount *
125 getvfs(fsid)
126 	fsid_t *fsid;
127 {
128 	register struct mount *mp;
129 
130 	mp = rootfs;
131 	do {
132 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
133 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
134 			return (mp);
135 		}
136 		mp = mp->mnt_next;
137 	} while (mp != rootfs);
138 	return ((struct mount *)0);
139 }
140 
141 /*
142  * Set vnode attributes to VNOVAL
143  */
144 void vattr_null(vap)
145 	register struct vattr *vap;
146 {
147 
148 	vap->va_type = VNON;
149 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
150 		vap->va_fsid = vap->va_fileid = vap->va_size =
151 		vap->va_size_rsv = vap->va_blocksize = vap->va_rdev =
152 		vap->va_bytes = vap->va_bytes_rsv =
153 		vap->va_atime.tv_sec = vap->va_atime.tv_usec =
154 		vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
155 		vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
156 		vap->va_flags = vap->va_gen = VNOVAL;
157 }
158 
159 /*
160  * Initialize a nameidata structure
161  */
162 ndinit(ndp)
163 	register struct nameidata *ndp;
164 {
165 
166 	bzero((caddr_t)ndp, sizeof(struct nameidata));
167 	ndp->ni_iov = &ndp->ni_nd.nd_iovec;
168 	ndp->ni_iovcnt = 1;
169 	ndp->ni_base = (caddr_t)&ndp->ni_dent;
170 	ndp->ni_rw = UIO_WRITE;
171 	ndp->ni_uioseg = UIO_SYSSPACE;
172 }
173 
174 /*
175  * Duplicate a nameidata structure
176  */
177 nddup(ndp, newndp)
178 	register struct nameidata *ndp, *newndp;
179 {
180 
181 	ndinit(newndp);
182 	newndp->ni_cdir = ndp->ni_cdir;
183 	VREF(newndp->ni_cdir);
184 	newndp->ni_rdir = ndp->ni_rdir;
185 	if (newndp->ni_rdir)
186 		VREF(newndp->ni_rdir);
187 	newndp->ni_cred = ndp->ni_cred;
188 	crhold(newndp->ni_cred);
189 }
190 
191 /*
192  * Release a nameidata structure
193  */
194 ndrele(ndp)
195 	register struct nameidata *ndp;
196 {
197 
198 	vrele(ndp->ni_cdir);
199 	if (ndp->ni_rdir)
200 		vrele(ndp->ni_rdir);
201 	crfree(ndp->ni_cred);
202 }
203 
204 /*
205  * Routines having to do with the management of the vnode table.
206  */
207 struct vnode *vfreeh, **vfreet;
208 extern struct vnodeops dead_vnodeops, spec_vnodeops;
209 extern void vclean();
210 long numvnodes;
211 struct vattr va_null;
212 
213 /*
214  * Initialize the vnode structures and initialize each file system type.
215  */
216 vfsinit()
217 {
218 	struct vfsops **vfsp;
219 
220 	/*
221 	 * Initialize the vnode name cache
222 	 */
223 	nchinit();
224 	/*
225 	 * Initialize each file system type.
226 	 */
227 	vattr_null(&va_null);
228 	for (vfsp = &vfssw[0]; vfsp <= &vfssw[MOUNT_MAXTYPE]; vfsp++) {
229 		if (*vfsp == NULL)
230 			continue;
231 		(*(*vfsp)->vfs_init)();
232 	}
233 }
234 
235 /*
236  * Return the next vnode from the free list.
237  */
238 getnewvnode(tag, mp, vops, vpp)
239 	enum vtagtype tag;
240 	struct mount *mp;
241 	struct vnodeops *vops;
242 	struct vnode **vpp;
243 {
244 	register struct vnode *vp, *vq;
245 
246 	if (numvnodes < desiredvnodes) {
247 		vp = (struct vnode *)malloc(sizeof *vp, M_VNODE, M_WAITOK);
248 		bzero((char *)vp, sizeof *vp);
249 		numvnodes++;
250 	} else {
251 		if ((vp = vfreeh) == NULL) {
252 			tablefull("vnode");
253 			*vpp = 0;
254 			return (ENFILE);
255 		}
256 		if (vp->v_usecount)
257 			panic("free vnode isn't");
258 		if (vq = vp->v_freef)
259 			vq->v_freeb = &vfreeh;
260 		else
261 			vfreet = &vfreeh;
262 		vfreeh = vq;
263 		vp->v_freef = NULL;
264 		vp->v_freeb = NULL;
265 		if (vp->v_type != VBAD)
266 			vgone(vp);
267 		vp->v_flag = 0;
268 		vp->v_shlockc = 0;
269 		vp->v_exlockc = 0;
270 		vp->v_lastr = 0;
271 		vp->v_socket = 0;
272 	}
273 	vp->v_type = VNON;
274 	cache_purge(vp);
275 	vp->v_tag = tag;
276 	vp->v_op = vops;
277 	insmntque(vp, mp);
278 	VREF(vp);
279 	*vpp = vp;
280 	return (0);
281 }
282 
283 /*
284  * Move a vnode from one mount queue to another.
285  */
286 insmntque(vp, mp)
287 	register struct vnode *vp;
288 	register struct mount *mp;
289 {
290 	struct vnode *vq;
291 
292 	/*
293 	 * Delete from old mount point vnode list, if on one.
294 	 */
295 	if (vp->v_mountb) {
296 		if (vq = vp->v_mountf)
297 			vq->v_mountb = vp->v_mountb;
298 		*vp->v_mountb = vq;
299 	}
300 	/*
301 	 * Insert into list of vnodes for the new mount point, if available.
302 	 */
303 	vp->v_mount = mp;
304 	if (mp == NULL) {
305 		vp->v_mountf = NULL;
306 		vp->v_mountb = NULL;
307 		return;
308 	}
309 	if (mp->mnt_mounth) {
310 		vp->v_mountf = mp->mnt_mounth;
311 		vp->v_mountb = &mp->mnt_mounth;
312 		mp->mnt_mounth->v_mountb = &vp->v_mountf;
313 		mp->mnt_mounth = vp;
314 	} else {
315 		mp->mnt_mounth = vp;
316 		vp->v_mountb = &mp->mnt_mounth;
317 		vp->v_mountf = NULL;
318 	}
319 }
320 
321 /*
322  * Create a vnode for a block device.
323  * Used for root filesystem, argdev, and swap areas.
324  * Also used for memory file system special devices.
325  */
326 bdevvp(dev, vpp)
327 	dev_t dev;
328 	struct vnode **vpp;
329 {
330 	register struct vnode *vp;
331 	struct vnode *nvp;
332 	int error;
333 
334 	error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
335 	if (error) {
336 		*vpp = 0;
337 		return (error);
338 	}
339 	vp = nvp;
340 	vp->v_type = VBLK;
341 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
342 		vput(vp);
343 		vp = nvp;
344 	}
345 	*vpp = vp;
346 	return (0);
347 }
348 
349 /*
350  * Check to see if the new vnode represents a special device
351  * for which we already have a vnode (either because of
352  * bdevvp() or because of a different vnode representing
353  * the same block device). If such an alias exists, deallocate
354  * the existing contents and return the aliased vnode. The
355  * caller is responsible for filling it with its new contents.
356  */
357 struct vnode *
358 checkalias(nvp, nvp_rdev, mp)
359 	register struct vnode *nvp;
360 	dev_t nvp_rdev;
361 	struct mount *mp;
362 {
363 	register struct vnode *vp;
364 	struct vnode **vpp;
365 
366 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
367 		return (NULLVP);
368 
369 	vpp = &speclisth[SPECHASH(nvp_rdev)];
370 loop:
371 	for (vp = *vpp; vp; vp = vp->v_specnext) {
372 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
373 			continue;
374 		/*
375 		 * Alias, but not in use, so flush it out.
376 		 */
377 		if (vp->v_usecount == 0) {
378 			vgone(vp);
379 			goto loop;
380 		}
381 		if (vget(vp))
382 			goto loop;
383 		break;
384 	}
385 	if (vp == NULL || vp->v_tag != VT_NON) {
386 		MALLOC(nvp->v_specinfo, struct specinfo *,
387 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
388 		nvp->v_rdev = nvp_rdev;
389 		nvp->v_hashchain = vpp;
390 		nvp->v_specnext = *vpp;
391 		*vpp = nvp;
392 		if (vp != NULL) {
393 			nvp->v_flag |= VALIASED;
394 			vp->v_flag |= VALIASED;
395 			vput(vp);
396 		}
397 		return (NULLVP);
398 	}
399 	VOP_UNLOCK(vp);
400 	vclean(vp, 0);
401 	vp->v_op = nvp->v_op;
402 	vp->v_tag = nvp->v_tag;
403 	nvp->v_type = VNON;
404 	insmntque(vp, mp);
405 	return (vp);
406 }
407 
408 /*
409  * Grab a particular vnode from the free list, increment its
410  * reference count and lock it. The vnode lock bit is set the
411  * vnode is being eliminated in vgone. The process is awakened
412  * when the transition is completed, and an error returned to
413  * indicate that the vnode is no longer usable (possibly having
414  * been changed to a new file system type).
415  */
416 vget(vp)
417 	register struct vnode *vp;
418 {
419 	register struct vnode *vq;
420 
421 	if (vp->v_flag & VXLOCK) {
422 		vp->v_flag |= VXWANT;
423 		sleep((caddr_t)vp, PINOD);
424 		return (1);
425 	}
426 	if (vp->v_usecount == 0) {
427 		if (vq = vp->v_freef)
428 			vq->v_freeb = vp->v_freeb;
429 		else
430 			vfreet = vp->v_freeb;
431 		*vp->v_freeb = vq;
432 		vp->v_freef = NULL;
433 		vp->v_freeb = NULL;
434 	}
435 	VREF(vp);
436 	VOP_LOCK(vp);
437 	return (0);
438 }
439 
440 /*
441  * Vnode reference, just increment the count
442  */
443 void vref(vp)
444 	struct vnode *vp;
445 {
446 
447 	vp->v_usecount++;
448 }
449 
450 /*
451  * vput(), just unlock and vrele()
452  */
453 void vput(vp)
454 	register struct vnode *vp;
455 {
456 	VOP_UNLOCK(vp);
457 	vrele(vp);
458 }
459 
460 /*
461  * Vnode release.
462  * If count drops to zero, call inactive routine and return to freelist.
463  */
464 void vrele(vp)
465 	register struct vnode *vp;
466 {
467 
468 	if (vp == NULL)
469 		panic("vrele: null vp");
470 	vp->v_usecount--;
471 	if (vp->v_usecount < 0)
472 		vprint("vrele: bad ref count", vp);
473 	if (vp->v_usecount > 0)
474 		return;
475 	if (vfreeh == NULLVP) {
476 		/*
477 		 * insert into empty list
478 		 */
479 		vfreeh = vp;
480 		vp->v_freeb = &vfreeh;
481 	} else {
482 		/*
483 		 * insert at tail of list
484 		 */
485 		*vfreet = vp;
486 		vp->v_freeb = vfreet;
487 	}
488 	vp->v_freef = NULL;
489 	vfreet = &vp->v_freef;
490 	VOP_INACTIVE(vp);
491 }
492 
493 /*
494  * Page or buffer structure gets a reference.
495  */
496 vhold(vp)
497 	register struct vnode *vp;
498 {
499 
500 	vp->v_holdcnt++;
501 }
502 
503 /*
504  * Page or buffer structure frees a reference.
505  */
506 holdrele(vp)
507 	register struct vnode *vp;
508 {
509 
510 	if (vp->v_holdcnt <= 0)
511 		panic("holdrele: holdcnt");
512 	vp->v_holdcnt--;
513 }
514 
515 /*
516  * Remove any vnodes in the vnode table belonging to mount point mp.
517  *
518  * If MNT_NOFORCE is specified, there should not be any active ones,
519  * return error if any are found (nb: this is a user error, not a
520  * system error). If MNT_FORCE is specified, detach any active vnodes
521  * that are found.
522  */
523 int busyprt = 0;	/* patch to print out busy vnodes */
524 
525 vflush(mp, skipvp, flags)
526 	struct mount *mp;
527 	struct vnode *skipvp;
528 	int flags;
529 {
530 	register struct vnode *vp, *nvp;
531 	int busy = 0;
532 
533 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
534 		panic("vflush: not busy");
535 loop:
536 	for (vp = mp->mnt_mounth; vp; vp = nvp) {
537 		if (vp->v_mount != mp)
538 			goto loop;
539 		nvp = vp->v_mountf;
540 		/*
541 		 * Skip over a selected vnode.
542 		 */
543 		if (vp == skipvp)
544 			continue;
545 		/*
546 		 * Skip over a vnodes marked VSYSTEM.
547 		 */
548 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
549 			continue;
550 		/*
551 		 * With v_usecount == 0, all we need to do is clear
552 		 * out the vnode data structures and we are done.
553 		 */
554 		if (vp->v_usecount == 0) {
555 			vgone(vp);
556 			continue;
557 		}
558 		/*
559 		 * For block or character devices, revert to an
560 		 * anonymous device. For all other files, just kill them.
561 		 */
562 		if (flags & FORCECLOSE) {
563 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
564 				vgone(vp);
565 			} else {
566 				vclean(vp, 0);
567 				vp->v_op = &spec_vnodeops;
568 				insmntque(vp, (struct mount *)0);
569 			}
570 			continue;
571 		}
572 		if (busyprt)
573 			vprint("vflush: busy vnode", vp);
574 		busy++;
575 	}
576 	if (busy)
577 		return (EBUSY);
578 	return (0);
579 }
580 
581 /*
582  * Disassociate the underlying file system from a vnode.
583  */
584 void vclean(vp, flags)
585 	register struct vnode *vp;
586 	long flags;
587 {
588 	struct vnodeops *origops;
589 	int active;
590 
591 	/*
592 	 * Check to see if the vnode is in use.
593 	 * If so we have to reference it before we clean it out
594 	 * so that its count cannot fall to zero and generate a
595 	 * race against ourselves to recycle it.
596 	 */
597 	if (active = vp->v_usecount)
598 		VREF(vp);
599 	/*
600 	 * Prevent the vnode from being recycled or
601 	 * brought into use while we clean it out.
602 	 */
603 	if (vp->v_flag & VXLOCK)
604 		panic("vclean: deadlock");
605 	vp->v_flag |= VXLOCK;
606 	/*
607 	 * Even if the count is zero, the VOP_INACTIVE routine may still
608 	 * have the object locked while it cleans it out. The VOP_LOCK
609 	 * ensures that the VOP_INACTIVE routine is done with its work.
610 	 * For active vnodes, it ensures that no other activity can
611 	 * occur while the buffer list is being cleaned out.
612 	 */
613 	VOP_LOCK(vp);
614 	if (flags & DOCLOSE)
615 		vinvalbuf(vp, 1);
616 	/*
617 	 * Prevent any further operations on the vnode from
618 	 * being passed through to the old file system.
619 	 */
620 	origops = vp->v_op;
621 	vp->v_op = &dead_vnodeops;
622 	vp->v_tag = VT_NON;
623 	/*
624 	 * If purging an active vnode, it must be unlocked, closed,
625 	 * and deactivated before being reclaimed.
626 	 */
627 	(*(origops->vn_unlock))(vp);
628 	if (active) {
629 		if (flags & DOCLOSE)
630 			(*(origops->vn_close))(vp, 0, NOCRED);
631 		(*(origops->vn_inactive))(vp);
632 	}
633 	/*
634 	 * Reclaim the vnode.
635 	 */
636 	if ((*(origops->vn_reclaim))(vp))
637 		panic("vclean: cannot reclaim");
638 	if (active)
639 		vrele(vp);
640 	/*
641 	 * Done with purge, notify sleepers in vget of the grim news.
642 	 */
643 	vp->v_flag &= ~VXLOCK;
644 	if (vp->v_flag & VXWANT) {
645 		vp->v_flag &= ~VXWANT;
646 		wakeup((caddr_t)vp);
647 	}
648 }
649 
650 /*
651  * Eliminate all activity associated with  the requested vnode
652  * and with all vnodes aliased to the requested vnode.
653  */
654 void vgoneall(vp)
655 	register struct vnode *vp;
656 {
657 	register struct vnode *vq;
658 
659 	if (vp->v_flag & VALIASED) {
660 		/*
661 		 * If a vgone (or vclean) is already in progress,
662 		 * wait until it is done and return.
663 		 */
664 		if (vp->v_flag & VXLOCK) {
665 			vp->v_flag |= VXWANT;
666 			sleep((caddr_t)vp, PINOD);
667 			return;
668 		}
669 		/*
670 		 * Ensure that vp will not be vgone'd while we
671 		 * are eliminating its aliases.
672 		 */
673 		vp->v_flag |= VXLOCK;
674 		while (vp->v_flag & VALIASED) {
675 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
676 				if (vq->v_rdev != vp->v_rdev ||
677 				    vq->v_type != vp->v_type || vp == vq)
678 					continue;
679 				vgone(vq);
680 				break;
681 			}
682 		}
683 		/*
684 		 * Remove the lock so that vgone below will
685 		 * really eliminate the vnode after which time
686 		 * vgone will awaken any sleepers.
687 		 */
688 		vp->v_flag &= ~VXLOCK;
689 	}
690 	vgone(vp);
691 }
692 
693 /*
694  * Eliminate all activity associated with a vnode
695  * in preparation for reuse.
696  */
697 void vgone(vp)
698 	register struct vnode *vp;
699 {
700 	register struct vnode *vq;
701 	struct vnode *vx;
702 	long count;
703 
704 	/*
705 	 * If a vgone (or vclean) is already in progress,
706 	 * wait until it is done and return.
707 	 */
708 	if (vp->v_flag & VXLOCK) {
709 		vp->v_flag |= VXWANT;
710 		sleep((caddr_t)vp, PINOD);
711 		return;
712 	}
713 	/*
714 	 * Clean out the filesystem specific data.
715 	 */
716 	vclean(vp, DOCLOSE);
717 	/*
718 	 * Delete from old mount point vnode list, if on one.
719 	 */
720 	if (vp->v_mountb) {
721 		if (vq = vp->v_mountf)
722 			vq->v_mountb = vp->v_mountb;
723 		*vp->v_mountb = vq;
724 		vp->v_mountf = NULL;
725 		vp->v_mountb = NULL;
726 	}
727 	/*
728 	 * If special device, remove it from special device alias list.
729 	 */
730 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
731 		if (*vp->v_hashchain == vp) {
732 			*vp->v_hashchain = vp->v_specnext;
733 		} else {
734 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
735 				if (vq->v_specnext != vp)
736 					continue;
737 				vq->v_specnext = vp->v_specnext;
738 				break;
739 			}
740 			if (vq == NULL)
741 				panic("missing bdev");
742 		}
743 		if (vp->v_flag & VALIASED) {
744 			count = 0;
745 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
746 				if (vq->v_rdev != vp->v_rdev ||
747 				    vq->v_type != vp->v_type)
748 					continue;
749 				count++;
750 				vx = vq;
751 			}
752 			if (count == 0)
753 				panic("missing alias");
754 			if (count == 1)
755 				vx->v_flag &= ~VALIASED;
756 			vp->v_flag &= ~VALIASED;
757 		}
758 		FREE(vp->v_specinfo, M_VNODE);
759 		vp->v_specinfo = NULL;
760 	}
761 	/*
762 	 * If it is on the freelist, move it to the head of the list.
763 	 */
764 	if (vp->v_freeb) {
765 		if (vq = vp->v_freef)
766 			vq->v_freeb = vp->v_freeb;
767 		else
768 			vfreet = vp->v_freeb;
769 		*vp->v_freeb = vq;
770 		vp->v_freef = vfreeh;
771 		vp->v_freeb = &vfreeh;
772 		vfreeh->v_freeb = &vp->v_freef;
773 		vfreeh = vp;
774 	}
775 	vp->v_type = VBAD;
776 }
777 
778 /*
779  * Lookup a vnode by device number.
780  */
781 vfinddev(dev, type, vpp)
782 	dev_t dev;
783 	enum vtype type;
784 	struct vnode **vpp;
785 {
786 	register struct vnode *vp;
787 
788 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
789 		if (dev != vp->v_rdev || type != vp->v_type)
790 			continue;
791 		*vpp = vp;
792 		return (0);
793 	}
794 	return (1);
795 }
796 
797 /*
798  * Calculate the total number of references to a special device.
799  */
800 vcount(vp)
801 	register struct vnode *vp;
802 {
803 	register struct vnode *vq;
804 	int count;
805 
806 	if ((vp->v_flag & VALIASED) == 0)
807 		return (vp->v_usecount);
808 loop:
809 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
810 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
811 			continue;
812 		/*
813 		 * Alias, but not in use, so flush it out.
814 		 */
815 		if (vq->v_usecount == 0) {
816 			vgone(vq);
817 			goto loop;
818 		}
819 		count += vq->v_usecount;
820 	}
821 	return (count);
822 }
823 
824 /*
825  * Print out a description of a vnode.
826  */
827 static char *typename[] =
828    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
829 
830 vprint(label, vp)
831 	char *label;
832 	register struct vnode *vp;
833 {
834 	char buf[64];
835 
836 	if (label != NULL)
837 		printf("%s: ", label);
838 	printf("type %s, usecount %d, refcount %d,", typename[vp->v_type],
839 		vp->v_usecount, vp->v_holdcnt);
840 	buf[0] = '\0';
841 	if (vp->v_flag & VROOT)
842 		strcat(buf, "|VROOT");
843 	if (vp->v_flag & VTEXT)
844 		strcat(buf, "|VTEXT");
845 	if (vp->v_flag & VSYSTEM)
846 		strcat(buf, "|VSYSTEM");
847 	if (vp->v_flag & VEXLOCK)
848 		strcat(buf, "|VEXLOCK");
849 	if (vp->v_flag & VSHLOCK)
850 		strcat(buf, "|VSHLOCK");
851 	if (vp->v_flag & VLWAIT)
852 		strcat(buf, "|VLWAIT");
853 	if (vp->v_flag & VXLOCK)
854 		strcat(buf, "|VXLOCK");
855 	if (vp->v_flag & VXWANT)
856 		strcat(buf, "|VXWANT");
857 	if (vp->v_flag & VBWAIT)
858 		strcat(buf, "|VBWAIT");
859 	if (vp->v_flag & VALIASED)
860 		strcat(buf, "|VALIASED");
861 	if (buf[0] != '\0')
862 		printf(" flags (%s)", &buf[1]);
863 	printf("\n\t");
864 	VOP_PRINT(vp);
865 }
866 
867 int kinfo_vdebug = 1;
868 int kinfo_vgetfailed;
869 #define KINFO_VNODESLOP	10
870 /*
871  * Dump vnode list (via kinfo).
872  * Copyout address of vnode followed by vnode.
873  */
874 kinfo_vnode(op, where, acopysize, arg, aneeded)
875 	char *where;
876 	int *acopysize, *aneeded;
877 {
878 	register struct mount *mp = rootfs;
879 	struct mount *omp;
880 	struct vnode *vp;
881 	register needed = 0;
882 	register char *bp = where, *savebp;
883 	char *ewhere = where + *acopysize;
884 	int error;
885 
886 #define VPTRSZ	sizeof (struct vnode *)
887 #define VNODESZ	sizeof (struct vnode)
888 	if (where == NULL) {
889 		*aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
890 		return (0);
891 	}
892 
893 	do {
894 		if (vfs_busy(mp)) {
895 			mp = mp->mnt_next;
896 			continue;
897 		}
898 		/*
899 		 * A vget can fail if the vnode is being
900 		 * recycled.  In this (rare) case, we have to start
901 		 * over with this filesystem.  Also, have to
902 		 * check that the next vp is still associated
903 		 * with this filesystem.  RACE: could have been
904 		 * recycled onto the same filesystem.
905 		 */
906 		savebp = bp;
907 again:
908 		for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
909 			if (vp->v_mount != mp) {
910 				if (kinfo_vdebug)
911 					printf("kinfo: vp changed\n");
912 				bp = savebp;
913 				goto again;
914 			}
915 			if (vget(vp)) {
916 				if (kinfo_vdebug)
917 					printf("kinfo: vget failed\n");
918 				kinfo_vgetfailed++;
919 				bp = savebp;
920 				goto again;
921 			}
922 			if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
923 			    ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
924 			     (error = copyout((caddr_t)vp, bp + VPTRSZ,
925 			      VNODESZ)))) {
926 				vput(vp);
927 				return (error);
928 			}
929 			bp += VPTRSZ + VNODESZ;
930 			vput(vp);
931 		}
932 		omp = mp;
933 		mp = mp->mnt_next;
934 		vfs_unbusy(omp);
935 	} while (mp != rootfs);
936 
937 	*aneeded = bp - where;
938 	if (bp > ewhere)
939 		*acopysize = ewhere - where;
940 	else
941 		*acopysize = bp - where;
942 	return (0);
943 }
944