xref: /netbsd-src/sys/kern/vfs_subr.c (revision 1a9a81992d29fa1ebe387b8059e482fa3d394fb8)
1 /*	$NetBSD: vfs_subr.c,v 1.421 2011/04/02 04:28:56 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67  */
68 
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.421 2011/04/02 04:28:56 rmind Exp $");
71 
72 #include "opt_ddb.h"
73 #include "opt_compat_netbsd.h"
74 #include "opt_compat_43.h"
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/conf.h>
79 #include <sys/dirent.h>
80 #include <sys/filedesc.h>
81 #include <sys/kernel.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/stat.h>
85 #include <sys/sysctl.h>
86 #include <sys/namei.h>
87 #include <sys/buf.h>
88 #include <sys/errno.h>
89 #include <sys/kmem.h>
90 #include <sys/syscallargs.h>
91 #include <sys/kauth.h>
92 #include <sys/module.h>
93 
94 #include <miscfs/genfs/genfs.h>
95 #include <miscfs/syncfs/syncfs.h>
96 #include <miscfs/specfs/specdev.h>
97 #include <uvm/uvm_ddb.h>
98 
99 const enum vtype iftovt_tab[16] = {
100 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
101 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
102 };
103 const int	vttoif_tab[9] = {
104 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
105 	S_IFSOCK, S_IFIFO, S_IFMT,
106 };
107 
108 /*
109  * Insq/Remq for the vnode usage lists.
110  */
111 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
112 #define	bufremvn(bp) {							\
113 	LIST_REMOVE(bp, b_vnbufs);					\
114 	(bp)->b_vnbufs.le_next = NOLIST;				\
115 }
116 
117 int doforce = 1;		/* 1 => permit forcible unmounting */
118 int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
119 
120 /*
121  * Local declarations.
122  */
123 
124 static int getdevvp(dev_t, vnode_t **, enum vtype);
125 
126 /*
127  * Initialize the vnode management data structures.
128  */
129 void
130 vntblinit(void)
131 {
132 
133 	vn_initialize_syncerd();
134 	vfs_vnode_sysinit();
135 	vfs_mount_sysinit();
136 }
137 
138 /*
139  * Flush out and invalidate all buffers associated with a vnode.
140  * Called with the underlying vnode locked, which should prevent new dirty
141  * buffers from being queued.
142  */
143 int
144 vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l,
145 	  bool catch, int slptimeo)
146 {
147 	struct buf *bp, *nbp;
148 	int error;
149 	int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
150 	    (flags & V_SAVE ? PGO_CLEANIT | PGO_RECLAIM : 0);
151 
152 	/* XXXUBC this doesn't look at flags or slp* */
153 	mutex_enter(&vp->v_interlock);
154 	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
155 	if (error) {
156 		return error;
157 	}
158 
159 	if (flags & V_SAVE) {
160 		error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0);
161 		if (error)
162 		        return (error);
163 		KASSERT(LIST_EMPTY(&vp->v_dirtyblkhd));
164 	}
165 
166 	mutex_enter(&bufcache_lock);
167 restart:
168 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
169 		nbp = LIST_NEXT(bp, b_vnbufs);
170 		error = bbusy(bp, catch, slptimeo, NULL);
171 		if (error != 0) {
172 			if (error == EPASSTHROUGH)
173 				goto restart;
174 			mutex_exit(&bufcache_lock);
175 			return (error);
176 		}
177 		brelsel(bp, BC_INVAL | BC_VFLUSH);
178 	}
179 
180 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
181 		nbp = LIST_NEXT(bp, b_vnbufs);
182 		error = bbusy(bp, catch, slptimeo, NULL);
183 		if (error != 0) {
184 			if (error == EPASSTHROUGH)
185 				goto restart;
186 			mutex_exit(&bufcache_lock);
187 			return (error);
188 		}
189 		/*
190 		 * XXX Since there are no node locks for NFS, I believe
191 		 * there is a slight chance that a delayed write will
192 		 * occur while sleeping just above, so check for it.
193 		 */
194 		if ((bp->b_oflags & BO_DELWRI) && (flags & V_SAVE)) {
195 #ifdef DEBUG
196 			printf("buffer still DELWRI\n");
197 #endif
198 			bp->b_cflags |= BC_BUSY | BC_VFLUSH;
199 			mutex_exit(&bufcache_lock);
200 			VOP_BWRITE(bp);
201 			mutex_enter(&bufcache_lock);
202 			goto restart;
203 		}
204 		brelsel(bp, BC_INVAL | BC_VFLUSH);
205 	}
206 
207 #ifdef DIAGNOSTIC
208 	if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
209 		panic("vinvalbuf: flush failed, vp %p", vp);
210 #endif
211 
212 	mutex_exit(&bufcache_lock);
213 
214 	return (0);
215 }
216 
217 /*
218  * Destroy any in core blocks past the truncation length.
219  * Called with the underlying vnode locked, which should prevent new dirty
220  * buffers from being queued.
221  */
222 int
223 vtruncbuf(struct vnode *vp, daddr_t lbn, bool catch, int slptimeo)
224 {
225 	struct buf *bp, *nbp;
226 	int error;
227 	voff_t off;
228 
229 	off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
230 	mutex_enter(&vp->v_interlock);
231 	error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
232 	if (error) {
233 		return error;
234 	}
235 
236 	mutex_enter(&bufcache_lock);
237 restart:
238 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
239 		nbp = LIST_NEXT(bp, b_vnbufs);
240 		if (bp->b_lblkno < lbn)
241 			continue;
242 		error = bbusy(bp, catch, slptimeo, NULL);
243 		if (error != 0) {
244 			if (error == EPASSTHROUGH)
245 				goto restart;
246 			mutex_exit(&bufcache_lock);
247 			return (error);
248 		}
249 		brelsel(bp, BC_INVAL | BC_VFLUSH);
250 	}
251 
252 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
253 		nbp = LIST_NEXT(bp, b_vnbufs);
254 		if (bp->b_lblkno < lbn)
255 			continue;
256 		error = bbusy(bp, catch, slptimeo, NULL);
257 		if (error != 0) {
258 			if (error == EPASSTHROUGH)
259 				goto restart;
260 			mutex_exit(&bufcache_lock);
261 			return (error);
262 		}
263 		brelsel(bp, BC_INVAL | BC_VFLUSH);
264 	}
265 	mutex_exit(&bufcache_lock);
266 
267 	return (0);
268 }
269 
270 /*
271  * Flush all dirty buffers from a vnode.
272  * Called with the underlying vnode locked, which should prevent new dirty
273  * buffers from being queued.
274  */
275 void
276 vflushbuf(struct vnode *vp, int sync)
277 {
278 	struct buf *bp, *nbp;
279 	int flags = PGO_CLEANIT | PGO_ALLPAGES | (sync ? PGO_SYNCIO : 0);
280 	bool dirty;
281 
282 	mutex_enter(&vp->v_interlock);
283 	(void) VOP_PUTPAGES(vp, 0, 0, flags);
284 
285 loop:
286 	mutex_enter(&bufcache_lock);
287 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
288 		nbp = LIST_NEXT(bp, b_vnbufs);
289 		if ((bp->b_cflags & BC_BUSY))
290 			continue;
291 		if ((bp->b_oflags & BO_DELWRI) == 0)
292 			panic("vflushbuf: not dirty, bp %p", bp);
293 		bp->b_cflags |= BC_BUSY | BC_VFLUSH;
294 		mutex_exit(&bufcache_lock);
295 		/*
296 		 * Wait for I/O associated with indirect blocks to complete,
297 		 * since there is no way to quickly wait for them below.
298 		 */
299 		if (bp->b_vp == vp || sync == 0)
300 			(void) bawrite(bp);
301 		else
302 			(void) bwrite(bp);
303 		goto loop;
304 	}
305 	mutex_exit(&bufcache_lock);
306 
307 	if (sync == 0)
308 		return;
309 
310 	mutex_enter(&vp->v_interlock);
311 	while (vp->v_numoutput != 0)
312 		cv_wait(&vp->v_cv, &vp->v_interlock);
313 	dirty = !LIST_EMPTY(&vp->v_dirtyblkhd);
314 	mutex_exit(&vp->v_interlock);
315 
316 	if (dirty) {
317 		vprint("vflushbuf: dirty", vp);
318 		goto loop;
319 	}
320 }
321 
322 /*
323  * Create a vnode for a block device.
324  * Used for root filesystem and swap areas.
325  * Also used for memory file system special devices.
326  */
327 int
328 bdevvp(dev_t dev, vnode_t **vpp)
329 {
330 
331 	return (getdevvp(dev, vpp, VBLK));
332 }
333 
334 /*
335  * Create a vnode for a character device.
336  * Used for kernfs and some console handling.
337  */
338 int
339 cdevvp(dev_t dev, vnode_t **vpp)
340 {
341 
342 	return (getdevvp(dev, vpp, VCHR));
343 }
344 
345 /*
346  * Associate a buffer with a vnode.  There must already be a hold on
347  * the vnode.
348  */
349 void
350 bgetvp(struct vnode *vp, struct buf *bp)
351 {
352 
353 	KASSERT(bp->b_vp == NULL);
354 	KASSERT(bp->b_objlock == &buffer_lock);
355 	KASSERT(mutex_owned(&vp->v_interlock));
356 	KASSERT(mutex_owned(&bufcache_lock));
357 	KASSERT((bp->b_cflags & BC_BUSY) != 0);
358 	KASSERT(!cv_has_waiters(&bp->b_done));
359 
360 	vholdl(vp);
361 	bp->b_vp = vp;
362 	if (vp->v_type == VBLK || vp->v_type == VCHR)
363 		bp->b_dev = vp->v_rdev;
364 	else
365 		bp->b_dev = NODEV;
366 
367 	/*
368 	 * Insert onto list for new vnode.
369 	 */
370 	bufinsvn(bp, &vp->v_cleanblkhd);
371 	bp->b_objlock = &vp->v_interlock;
372 }
373 
374 /*
375  * Disassociate a buffer from a vnode.
376  */
377 void
378 brelvp(struct buf *bp)
379 {
380 	struct vnode *vp = bp->b_vp;
381 
382 	KASSERT(vp != NULL);
383 	KASSERT(bp->b_objlock == &vp->v_interlock);
384 	KASSERT(mutex_owned(&vp->v_interlock));
385 	KASSERT(mutex_owned(&bufcache_lock));
386 	KASSERT((bp->b_cflags & BC_BUSY) != 0);
387 	KASSERT(!cv_has_waiters(&bp->b_done));
388 
389 	/*
390 	 * Delete from old vnode list, if on one.
391 	 */
392 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
393 		bufremvn(bp);
394 
395 	if (vp->v_uobj.uo_npages == 0 && (vp->v_iflag & VI_ONWORKLST) &&
396 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
397 		vp->v_iflag &= ~VI_WRMAPDIRTY;
398 		vn_syncer_remove_from_worklist(vp);
399 	}
400 
401 	bp->b_objlock = &buffer_lock;
402 	bp->b_vp = NULL;
403 	holdrelel(vp);
404 }
405 
406 /*
407  * Reassign a buffer from one vnode list to another.
408  * The list reassignment must be within the same vnode.
409  * Used to assign file specific control information
410  * (indirect blocks) to the list to which they belong.
411  */
412 void
413 reassignbuf(struct buf *bp, struct vnode *vp)
414 {
415 	struct buflists *listheadp;
416 	int delayx;
417 
418 	KASSERT(mutex_owned(&bufcache_lock));
419 	KASSERT(bp->b_objlock == &vp->v_interlock);
420 	KASSERT(mutex_owned(&vp->v_interlock));
421 	KASSERT((bp->b_cflags & BC_BUSY) != 0);
422 
423 	/*
424 	 * Delete from old vnode list, if on one.
425 	 */
426 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
427 		bufremvn(bp);
428 
429 	/*
430 	 * If dirty, put on list of dirty buffers;
431 	 * otherwise insert onto list of clean buffers.
432 	 */
433 	if ((bp->b_oflags & BO_DELWRI) == 0) {
434 		listheadp = &vp->v_cleanblkhd;
435 		if (vp->v_uobj.uo_npages == 0 &&
436 		    (vp->v_iflag & VI_ONWORKLST) &&
437 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
438 			vp->v_iflag &= ~VI_WRMAPDIRTY;
439 			vn_syncer_remove_from_worklist(vp);
440 		}
441 	} else {
442 		listheadp = &vp->v_dirtyblkhd;
443 		if ((vp->v_iflag & VI_ONWORKLST) == 0) {
444 			switch (vp->v_type) {
445 			case VDIR:
446 				delayx = dirdelay;
447 				break;
448 			case VBLK:
449 				if (vp->v_specmountpoint != NULL) {
450 					delayx = metadelay;
451 					break;
452 				}
453 				/* fall through */
454 			default:
455 				delayx = filedelay;
456 				break;
457 			}
458 			if (!vp->v_mount ||
459 			    (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
460 				vn_syncer_add_to_worklist(vp, delayx);
461 		}
462 	}
463 	bufinsvn(bp, listheadp);
464 }
465 
466 /*
467  * Create a vnode for a device.
468  * Used by bdevvp (block device) for root file system etc.,
469  * and by cdevvp (character device) for console and kernfs.
470  */
471 static int
472 getdevvp(dev_t dev, vnode_t **vpp, enum vtype type)
473 {
474 	vnode_t *vp;
475 	vnode_t *nvp;
476 	int error;
477 
478 	if (dev == NODEV) {
479 		*vpp = NULL;
480 		return (0);
481 	}
482 	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
483 	if (error) {
484 		*vpp = NULL;
485 		return (error);
486 	}
487 	vp = nvp;
488 	vp->v_type = type;
489 	vp->v_vflag |= VV_MPSAFE;
490 	uvm_vnp_setsize(vp, 0);
491 	spec_node_init(vp, dev);
492 	*vpp = vp;
493 	return (0);
494 }
495 
496 /*
497  * Lookup a vnode by device number and return it referenced.
498  */
499 int
500 vfinddev(dev_t dev, enum vtype type, vnode_t **vpp)
501 {
502 	vnode_t *vp;
503 
504 	mutex_enter(&device_lock);
505 	for (vp = specfs_hash[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
506 		if (dev == vp->v_rdev && type == vp->v_type)
507 			break;
508 	}
509 	if (vp == NULL) {
510 		mutex_exit(&device_lock);
511 		return 0;
512 	}
513 	mutex_enter(&vp->v_interlock);
514 	mutex_exit(&device_lock);
515 	if (vget(vp, 0) != 0)
516 		return 0;
517 	*vpp = vp;
518 	return 1;
519 }
520 
521 /*
522  * Revoke all the vnodes corresponding to the specified minor number
523  * range (endpoints inclusive) of the specified major.
524  */
525 void
526 vdevgone(int maj, int minl, int minh, enum vtype type)
527 {
528 	vnode_t *vp, **vpp;
529 	dev_t dev;
530 	int mn;
531 
532 	vp = NULL;	/* XXX gcc */
533 
534 	mutex_enter(&device_lock);
535 	for (mn = minl; mn <= minh; mn++) {
536 		dev = makedev(maj, mn);
537 		vpp = &specfs_hash[SPECHASH(dev)];
538 		for (vp = *vpp; vp != NULL;) {
539 			mutex_enter(&vp->v_interlock);
540 			if ((vp->v_iflag & VI_CLEAN) != 0 ||
541 			    dev != vp->v_rdev || type != vp->v_type) {
542 				mutex_exit(&vp->v_interlock);
543 				vp = vp->v_specnext;
544 				continue;
545 			}
546 			mutex_exit(&device_lock);
547 			if (vget(vp, 0) == 0) {
548 				VOP_REVOKE(vp, REVOKEALL);
549 				vrele(vp);
550 			}
551 			mutex_enter(&device_lock);
552 			vp = *vpp;
553 		}
554 	}
555 	mutex_exit(&device_lock);
556 }
557 
558 /*
559  * sysctl helper routine to return list of supported fstypes
560  */
561 int
562 sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
563 {
564 	char bf[sizeof(((struct statvfs *)NULL)->f_fstypename)];
565 	char *where = oldp;
566 	struct vfsops *v;
567 	size_t needed, left, slen;
568 	int error, first;
569 
570 	if (newp != NULL)
571 		return (EPERM);
572 	if (namelen != 0)
573 		return (EINVAL);
574 
575 	first = 1;
576 	error = 0;
577 	needed = 0;
578 	left = *oldlenp;
579 
580 	sysctl_unlock();
581 	mutex_enter(&vfs_list_lock);
582 	LIST_FOREACH(v, &vfs_list, vfs_list) {
583 		if (where == NULL)
584 			needed += strlen(v->vfs_name) + 1;
585 		else {
586 			memset(bf, 0, sizeof(bf));
587 			if (first) {
588 				strncpy(bf, v->vfs_name, sizeof(bf));
589 				first = 0;
590 			} else {
591 				bf[0] = ' ';
592 				strncpy(bf + 1, v->vfs_name, sizeof(bf) - 1);
593 			}
594 			bf[sizeof(bf)-1] = '\0';
595 			slen = strlen(bf);
596 			if (left < slen + 1)
597 				break;
598 			v->vfs_refcount++;
599 			mutex_exit(&vfs_list_lock);
600 			/* +1 to copy out the trailing NUL byte */
601 			error = copyout(bf, where, slen + 1);
602 			mutex_enter(&vfs_list_lock);
603 			v->vfs_refcount--;
604 			if (error)
605 				break;
606 			where += slen;
607 			needed += slen;
608 			left -= slen;
609 		}
610 	}
611 	mutex_exit(&vfs_list_lock);
612 	sysctl_relock();
613 	*oldlenp = needed;
614 	return (error);
615 }
616 
617 int kinfo_vdebug = 1;
618 int kinfo_vgetfailed;
619 
620 #define KINFO_VNODESLOP	10
621 
622 /*
623  * Dump vnode list (via sysctl).
624  * Copyout address of vnode followed by vnode.
625  */
626 int
627 sysctl_kern_vnode(SYSCTLFN_ARGS)
628 {
629 	char *where = oldp;
630 	size_t *sizep = oldlenp;
631 	struct mount *mp, *nmp;
632 	vnode_t *vp, *mvp, vbuf;
633 	char *bp = where;
634 	char *ewhere;
635 	int error;
636 
637 	if (namelen != 0)
638 		return (EOPNOTSUPP);
639 	if (newp != NULL)
640 		return (EPERM);
641 
642 #define VPTRSZ	sizeof(vnode_t *)
643 #define VNODESZ	sizeof(vnode_t)
644 	if (where == NULL) {
645 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
646 		return (0);
647 	}
648 	ewhere = where + *sizep;
649 
650 	sysctl_unlock();
651 	mutex_enter(&mountlist_lock);
652 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
653 	    mp = nmp) {
654 		if (vfs_busy(mp, &nmp)) {
655 			continue;
656 		}
657 		/* Allocate a marker vnode. */
658 		mvp = vnalloc(mp);
659 		/* Should never fail for mp != NULL */
660 		KASSERT(mvp != NULL);
661 		mutex_enter(&mntvnode_lock);
662 		for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp;
663 		    vp = vunmark(mvp)) {
664 			vmark(mvp, vp);
665 			/*
666 			 * Check that the vp is still associated with
667 			 * this filesystem.  RACE: could have been
668 			 * recycled onto the same filesystem.
669 			 */
670 			if (vp->v_mount != mp || vismarker(vp))
671 				continue;
672 			if (bp + VPTRSZ + VNODESZ > ewhere) {
673 				(void)vunmark(mvp);
674 				mutex_exit(&mntvnode_lock);
675 				vnfree(mvp);
676 				vfs_unbusy(mp, false, NULL);
677 				sysctl_relock();
678 				*sizep = bp - where;
679 				return (ENOMEM);
680 			}
681 			memcpy(&vbuf, vp, VNODESZ);
682 			mutex_exit(&mntvnode_lock);
683 			if ((error = copyout(&vp, bp, VPTRSZ)) ||
684 			    (error = copyout(&vbuf, bp + VPTRSZ, VNODESZ))) {
685 			   	mutex_enter(&mntvnode_lock);
686 				(void)vunmark(mvp);
687 				mutex_exit(&mntvnode_lock);
688 				vnfree(mvp);
689 				vfs_unbusy(mp, false, NULL);
690 				sysctl_relock();
691 				return (error);
692 			}
693 			bp += VPTRSZ + VNODESZ;
694 			mutex_enter(&mntvnode_lock);
695 		}
696 		mutex_exit(&mntvnode_lock);
697 		vnfree(mvp);
698 		vfs_unbusy(mp, false, &nmp);
699 	}
700 	mutex_exit(&mountlist_lock);
701 	sysctl_relock();
702 
703 	*sizep = bp - where;
704 	return (0);
705 }
706 
707 /*
708  * Set vnode attributes to VNOVAL
709  */
710 void
711 vattr_null(struct vattr *vap)
712 {
713 
714 	memset(vap, 0, sizeof(*vap));
715 
716 	vap->va_type = VNON;
717 
718 	/*
719 	 * Assign individually so that it is safe even if size and
720 	 * sign of each member are varied.
721 	 */
722 	vap->va_mode = VNOVAL;
723 	vap->va_nlink = VNOVAL;
724 	vap->va_uid = VNOVAL;
725 	vap->va_gid = VNOVAL;
726 	vap->va_fsid = VNOVAL;
727 	vap->va_fileid = VNOVAL;
728 	vap->va_size = VNOVAL;
729 	vap->va_blocksize = VNOVAL;
730 	vap->va_atime.tv_sec =
731 	    vap->va_mtime.tv_sec =
732 	    vap->va_ctime.tv_sec =
733 	    vap->va_birthtime.tv_sec = VNOVAL;
734 	vap->va_atime.tv_nsec =
735 	    vap->va_mtime.tv_nsec =
736 	    vap->va_ctime.tv_nsec =
737 	    vap->va_birthtime.tv_nsec = VNOVAL;
738 	vap->va_gen = VNOVAL;
739 	vap->va_flags = VNOVAL;
740 	vap->va_rdev = VNOVAL;
741 	vap->va_bytes = VNOVAL;
742 }
743 
744 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
745 #define ARRAY_PRINT(idx, arr) \
746     ((unsigned int)(idx) < ARRAY_SIZE(arr) ? (arr)[(idx)] : "UNKNOWN")
747 
748 const char * const vnode_tags[] = { VNODE_TAGS };
749 const char * const vnode_types[] = { VNODE_TYPES };
750 const char vnode_flagbits[] = VNODE_FLAGBITS;
751 
752 /*
753  * Print out a description of a vnode.
754  */
755 void
756 vprint(const char *label, struct vnode *vp)
757 {
758 	char bf[96];
759 	int flag;
760 
761 	flag = vp->v_iflag | vp->v_vflag | vp->v_uflag;
762 	snprintb(bf, sizeof(bf), vnode_flagbits, flag);
763 
764 	if (label != NULL)
765 		printf("%s: ", label);
766 	printf("vnode @ %p, flags (%s)\n\ttag %s(%d), type %s(%d), "
767 	    "usecount %d, writecount %d, holdcount %d\n"
768 	    "\tfreelisthd %p, mount %p, data %p lock %p\n",
769 	    vp, bf, ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
770 	    ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
771 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt,
772 	    vp->v_freelisthd, vp->v_mount, vp->v_data, &vp->v_lock);
773 	if (vp->v_data != NULL) {
774 		printf("\t");
775 		VOP_PRINT(vp);
776 	}
777 }
778 
779 /* Deprecated. Kept for KPI compatibility. */
780 int
781 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
782     mode_t acc_mode, kauth_cred_t cred)
783 {
784 
785 #ifdef DIAGNOSTIC
786 	printf("vaccess: deprecated interface used.\n");
787 #endif /* DIAGNOSTIC */
788 
789 	return genfs_can_access(type, file_mode, uid, gid, acc_mode, cred);
790 }
791 
792 /*
793  * Given a file system name, look up the vfsops for that
794  * file system, or return NULL if file system isn't present
795  * in the kernel.
796  */
797 struct vfsops *
798 vfs_getopsbyname(const char *name)
799 {
800 	struct vfsops *v;
801 
802 	mutex_enter(&vfs_list_lock);
803 	LIST_FOREACH(v, &vfs_list, vfs_list) {
804 		if (strcmp(v->vfs_name, name) == 0)
805 			break;
806 	}
807 	if (v != NULL)
808 		v->vfs_refcount++;
809 	mutex_exit(&vfs_list_lock);
810 
811 	return (v);
812 }
813 
814 void
815 copy_statvfs_info(struct statvfs *sbp, const struct mount *mp)
816 {
817 	const struct statvfs *mbp;
818 
819 	if (sbp == (mbp = &mp->mnt_stat))
820 		return;
821 
822 	(void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx));
823 	sbp->f_fsid = mbp->f_fsid;
824 	sbp->f_owner = mbp->f_owner;
825 	sbp->f_flag = mbp->f_flag;
826 	sbp->f_syncwrites = mbp->f_syncwrites;
827 	sbp->f_asyncwrites = mbp->f_asyncwrites;
828 	sbp->f_syncreads = mbp->f_syncreads;
829 	sbp->f_asyncreads = mbp->f_asyncreads;
830 	(void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare));
831 	(void)memcpy(sbp->f_fstypename, mbp->f_fstypename,
832 	    sizeof(sbp->f_fstypename));
833 	(void)memcpy(sbp->f_mntonname, mbp->f_mntonname,
834 	    sizeof(sbp->f_mntonname));
835 	(void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname,
836 	    sizeof(sbp->f_mntfromname));
837 	sbp->f_namemax = mbp->f_namemax;
838 }
839 
840 int
841 set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom,
842     const char *vfsname, struct mount *mp, struct lwp *l)
843 {
844 	int error;
845 	size_t size;
846 	struct statvfs *sfs = &mp->mnt_stat;
847 	int (*fun)(const void *, void *, size_t, size_t *);
848 
849 	(void)strlcpy(mp->mnt_stat.f_fstypename, vfsname,
850 	    sizeof(mp->mnt_stat.f_fstypename));
851 
852 	if (onp) {
853 		struct cwdinfo *cwdi = l->l_proc->p_cwdi;
854 		fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr;
855 		if (cwdi->cwdi_rdir != NULL) {
856 			size_t len;
857 			char *bp;
858 			char *path = PNBUF_GET();
859 
860 			bp = path + MAXPATHLEN;
861 			*--bp = '\0';
862 			rw_enter(&cwdi->cwdi_lock, RW_READER);
863 			error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp,
864 			    path, MAXPATHLEN / 2, 0, l);
865 			rw_exit(&cwdi->cwdi_lock);
866 			if (error) {
867 				PNBUF_PUT(path);
868 				return error;
869 			}
870 
871 			len = strlen(bp);
872 			if (len > sizeof(sfs->f_mntonname) - 1)
873 				len = sizeof(sfs->f_mntonname) - 1;
874 			(void)strncpy(sfs->f_mntonname, bp, len);
875 			PNBUF_PUT(path);
876 
877 			if (len < sizeof(sfs->f_mntonname) - 1) {
878 				error = (*fun)(onp, &sfs->f_mntonname[len],
879 				    sizeof(sfs->f_mntonname) - len - 1, &size);
880 				if (error)
881 					return error;
882 				size += len;
883 			} else {
884 				size = len;
885 			}
886 		} else {
887 			error = (*fun)(onp, &sfs->f_mntonname,
888 			    sizeof(sfs->f_mntonname) - 1, &size);
889 			if (error)
890 				return error;
891 		}
892 		(void)memset(sfs->f_mntonname + size, 0,
893 		    sizeof(sfs->f_mntonname) - size);
894 	}
895 
896 	if (fromp) {
897 		fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr;
898 		error = (*fun)(fromp, sfs->f_mntfromname,
899 		    sizeof(sfs->f_mntfromname) - 1, &size);
900 		if (error)
901 			return error;
902 		(void)memset(sfs->f_mntfromname + size, 0,
903 		    sizeof(sfs->f_mntfromname) - size);
904 	}
905 	return 0;
906 }
907 
908 void
909 vfs_timestamp(struct timespec *ts)
910 {
911 
912 	nanotime(ts);
913 }
914 
915 time_t	rootfstime;			/* recorded root fs time, if known */
916 void
917 setrootfstime(time_t t)
918 {
919 	rootfstime = t;
920 }
921 
922 static const uint8_t vttodt_tab[9] = {
923 	DT_UNKNOWN,	/* VNON  */
924 	DT_REG,		/* VREG  */
925 	DT_DIR,		/* VDIR  */
926 	DT_BLK,		/* VBLK  */
927 	DT_CHR,		/* VCHR  */
928 	DT_LNK,		/* VLNK  */
929 	DT_SOCK,	/* VSUCK */
930 	DT_FIFO,	/* VFIFO */
931 	DT_UNKNOWN	/* VBAD  */
932 };
933 
934 uint8_t
935 vtype2dt(enum vtype vt)
936 {
937 
938 	CTASSERT(VBAD == __arraycount(vttodt_tab) - 1);
939 	return vttodt_tab[vt];
940 }
941 
942 int
943 VFS_MOUNT(struct mount *mp, const char *a, void *b, size_t *c)
944 {
945 	int error;
946 
947 	KERNEL_LOCK(1, NULL);
948 	error = (*(mp->mnt_op->vfs_mount))(mp, a, b, c);
949 	KERNEL_UNLOCK_ONE(NULL);
950 
951 	return error;
952 }
953 
954 int
955 VFS_START(struct mount *mp, int a)
956 {
957 	int error;
958 
959 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
960 		KERNEL_LOCK(1, NULL);
961 	}
962 	error = (*(mp->mnt_op->vfs_start))(mp, a);
963 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
964 		KERNEL_UNLOCK_ONE(NULL);
965 	}
966 
967 	return error;
968 }
969 
970 int
971 VFS_UNMOUNT(struct mount *mp, int a)
972 {
973 	int error;
974 
975 	KERNEL_LOCK(1, NULL);
976 	error = (*(mp->mnt_op->vfs_unmount))(mp, a);
977 	KERNEL_UNLOCK_ONE(NULL);
978 
979 	return error;
980 }
981 
982 int
983 VFS_ROOT(struct mount *mp, struct vnode **a)
984 {
985 	int error;
986 
987 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
988 		KERNEL_LOCK(1, NULL);
989 	}
990 	error = (*(mp->mnt_op->vfs_root))(mp, a);
991 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
992 		KERNEL_UNLOCK_ONE(NULL);
993 	}
994 
995 	return error;
996 }
997 
998 int
999 VFS_QUOTACTL(struct mount *mp, prop_dictionary_t dict)
1000 {
1001 	int error;
1002 
1003 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1004 		KERNEL_LOCK(1, NULL);
1005 	}
1006 	error = (*(mp->mnt_op->vfs_quotactl))(mp, dict);
1007 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1008 		KERNEL_UNLOCK_ONE(NULL);
1009 	}
1010 
1011 	return error;
1012 }
1013 
1014 int
1015 VFS_STATVFS(struct mount *mp, struct statvfs *a)
1016 {
1017 	int error;
1018 
1019 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1020 		KERNEL_LOCK(1, NULL);
1021 	}
1022 	error = (*(mp->mnt_op->vfs_statvfs))(mp, a);
1023 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1024 		KERNEL_UNLOCK_ONE(NULL);
1025 	}
1026 
1027 	return error;
1028 }
1029 
1030 int
1031 VFS_SYNC(struct mount *mp, int a, struct kauth_cred *b)
1032 {
1033 	int error;
1034 
1035 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1036 		KERNEL_LOCK(1, NULL);
1037 	}
1038 	error = (*(mp->mnt_op->vfs_sync))(mp, a, b);
1039 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1040 		KERNEL_UNLOCK_ONE(NULL);
1041 	}
1042 
1043 	return error;
1044 }
1045 
1046 int
1047 VFS_FHTOVP(struct mount *mp, struct fid *a, struct vnode **b)
1048 {
1049 	int error;
1050 
1051 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1052 		KERNEL_LOCK(1, NULL);
1053 	}
1054 	error = (*(mp->mnt_op->vfs_fhtovp))(mp, a, b);
1055 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1056 		KERNEL_UNLOCK_ONE(NULL);
1057 	}
1058 
1059 	return error;
1060 }
1061 
1062 int
1063 VFS_VPTOFH(struct vnode *vp, struct fid *a, size_t *b)
1064 {
1065 	int error;
1066 
1067 	if ((vp->v_vflag & VV_MPSAFE) == 0) {
1068 		KERNEL_LOCK(1, NULL);
1069 	}
1070 	error = (*(vp->v_mount->mnt_op->vfs_vptofh))(vp, a, b);
1071 	if ((vp->v_vflag & VV_MPSAFE) == 0) {
1072 		KERNEL_UNLOCK_ONE(NULL);
1073 	}
1074 
1075 	return error;
1076 }
1077 
1078 int
1079 VFS_SNAPSHOT(struct mount *mp, struct vnode *a, struct timespec *b)
1080 {
1081 	int error;
1082 
1083 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1084 		KERNEL_LOCK(1, NULL);
1085 	}
1086 	error = (*(mp->mnt_op->vfs_snapshot))(mp, a, b);
1087 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1088 		KERNEL_UNLOCK_ONE(NULL);
1089 	}
1090 
1091 	return error;
1092 }
1093 
1094 int
1095 VFS_EXTATTRCTL(struct mount *mp, int a, struct vnode *b, int c, const char *d)
1096 {
1097 	int error;
1098 
1099 	KERNEL_LOCK(1, NULL);		/* XXXSMP check ffs */
1100 	error = (*(mp->mnt_op->vfs_extattrctl))(mp, a, b, c, d);
1101 	KERNEL_UNLOCK_ONE(NULL);	/* XXX */
1102 
1103 	return error;
1104 }
1105 
1106 int
1107 VFS_SUSPENDCTL(struct mount *mp, int a)
1108 {
1109 	int error;
1110 
1111 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1112 		KERNEL_LOCK(1, NULL);
1113 	}
1114 	error = (*(mp->mnt_op->vfs_suspendctl))(mp, a);
1115 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1116 		KERNEL_UNLOCK_ONE(NULL);
1117 	}
1118 
1119 	return error;
1120 }
1121 
1122 #if defined(DDB) || defined(DEBUGPRINT)
1123 static const char buf_flagbits[] = BUF_FLAGBITS;
1124 
1125 void
1126 vfs_buf_print(struct buf *bp, int full, void (*pr)(const char *, ...))
1127 {
1128 	char bf[1024];
1129 
1130 	(*pr)("  vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" rawblkno 0x%"
1131 	    PRIx64 " dev 0x%x\n",
1132 	    bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_rawblkno, bp->b_dev);
1133 
1134 	snprintb(bf, sizeof(bf),
1135 	    buf_flagbits, bp->b_flags | bp->b_oflags | bp->b_cflags);
1136 	(*pr)("  error %d flags 0x%s\n", bp->b_error, bf);
1137 
1138 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n",
1139 		  bp->b_bufsize, bp->b_bcount, bp->b_resid);
1140 	(*pr)("  data %p saveaddr %p\n",
1141 		  bp->b_data, bp->b_saveaddr);
1142 	(*pr)("  iodone %p objlock %p\n", bp->b_iodone, bp->b_objlock);
1143 }
1144 
1145 void
1146 vfs_vnode_print(struct vnode *vp, int full, void (*pr)(const char *, ...))
1147 {
1148 	char bf[256];
1149 
1150 	uvm_object_printit(&vp->v_uobj, full, pr);
1151 	snprintb(bf, sizeof(bf),
1152 	    vnode_flagbits, vp->v_iflag | vp->v_vflag | vp->v_uflag);
1153 	(*pr)("\nVNODE flags %s\n", bf);
1154 	(*pr)("mp %p numoutput %d size 0x%llx writesize 0x%llx\n",
1155 	      vp->v_mount, vp->v_numoutput, vp->v_size, vp->v_writesize);
1156 
1157 	(*pr)("data %p writecount %ld holdcnt %ld\n",
1158 	      vp->v_data, vp->v_writecount, vp->v_holdcnt);
1159 
1160 	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
1161 	      ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
1162 	      ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
1163 	      vp->v_mount, vp->v_mountedhere);
1164 
1165 	(*pr)("v_lock %p\n", &vp->v_lock);
1166 
1167 	if (full) {
1168 		struct buf *bp;
1169 
1170 		(*pr)("clean bufs:\n");
1171 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
1172 			(*pr)(" bp %p\n", bp);
1173 			vfs_buf_print(bp, full, pr);
1174 		}
1175 
1176 		(*pr)("dirty bufs:\n");
1177 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
1178 			(*pr)(" bp %p\n", bp);
1179 			vfs_buf_print(bp, full, pr);
1180 		}
1181 	}
1182 }
1183 
1184 void
1185 vfs_mount_print(struct mount *mp, int full, void (*pr)(const char *, ...))
1186 {
1187 	char sbuf[256];
1188 
1189 	(*pr)("vnodecovered = %p syncer = %p data = %p\n",
1190 			mp->mnt_vnodecovered,mp->mnt_syncer,mp->mnt_data);
1191 
1192 	(*pr)("fs_bshift %d dev_bshift = %d\n",
1193 			mp->mnt_fs_bshift,mp->mnt_dev_bshift);
1194 
1195 	snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_flag);
1196 	(*pr)("flag = %s\n", sbuf);
1197 
1198 	snprintb(sbuf, sizeof(sbuf), __IMNT_FLAG_BITS, mp->mnt_iflag);
1199 	(*pr)("iflag = %s\n", sbuf);
1200 
1201 	(*pr)("refcnt = %d unmounting @ %p updating @ %p\n", mp->mnt_refcnt,
1202 	    &mp->mnt_unmounting, &mp->mnt_updating);
1203 
1204 	(*pr)("statvfs cache:\n");
1205 	(*pr)("\tbsize = %lu\n",mp->mnt_stat.f_bsize);
1206 	(*pr)("\tfrsize = %lu\n",mp->mnt_stat.f_frsize);
1207 	(*pr)("\tiosize = %lu\n",mp->mnt_stat.f_iosize);
1208 
1209 	(*pr)("\tblocks = %"PRIu64"\n",mp->mnt_stat.f_blocks);
1210 	(*pr)("\tbfree = %"PRIu64"\n",mp->mnt_stat.f_bfree);
1211 	(*pr)("\tbavail = %"PRIu64"\n",mp->mnt_stat.f_bavail);
1212 	(*pr)("\tbresvd = %"PRIu64"\n",mp->mnt_stat.f_bresvd);
1213 
1214 	(*pr)("\tfiles = %"PRIu64"\n",mp->mnt_stat.f_files);
1215 	(*pr)("\tffree = %"PRIu64"\n",mp->mnt_stat.f_ffree);
1216 	(*pr)("\tfavail = %"PRIu64"\n",mp->mnt_stat.f_favail);
1217 	(*pr)("\tfresvd = %"PRIu64"\n",mp->mnt_stat.f_fresvd);
1218 
1219 	(*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n",
1220 			mp->mnt_stat.f_fsidx.__fsid_val[0],
1221 			mp->mnt_stat.f_fsidx.__fsid_val[1]);
1222 
1223 	(*pr)("\towner = %"PRIu32"\n",mp->mnt_stat.f_owner);
1224 	(*pr)("\tnamemax = %lu\n",mp->mnt_stat.f_namemax);
1225 
1226 	snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_stat.f_flag);
1227 
1228 	(*pr)("\tflag = %s\n",sbuf);
1229 	(*pr)("\tsyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_syncwrites);
1230 	(*pr)("\tasyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_asyncwrites);
1231 	(*pr)("\tsyncreads = %" PRIu64 "\n",mp->mnt_stat.f_syncreads);
1232 	(*pr)("\tasyncreads = %" PRIu64 "\n",mp->mnt_stat.f_asyncreads);
1233 	(*pr)("\tfstypename = %s\n",mp->mnt_stat.f_fstypename);
1234 	(*pr)("\tmntonname = %s\n",mp->mnt_stat.f_mntonname);
1235 	(*pr)("\tmntfromname = %s\n",mp->mnt_stat.f_mntfromname);
1236 
1237 	{
1238 		int cnt = 0;
1239 		struct vnode *vp;
1240 		(*pr)("locked vnodes =");
1241 		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1242 			if (VOP_ISLOCKED(vp)) {
1243 				if ((++cnt % 6) == 0) {
1244 					(*pr)(" %p,\n\t", vp);
1245 				} else {
1246 					(*pr)(" %p,", vp);
1247 				}
1248 			}
1249 		}
1250 		(*pr)("\n");
1251 	}
1252 
1253 	if (full) {
1254 		int cnt = 0;
1255 		struct vnode *vp;
1256 		(*pr)("all vnodes =");
1257 		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1258 			if (!TAILQ_NEXT(vp, v_mntvnodes)) {
1259 				(*pr)(" %p", vp);
1260 			} else if ((++cnt % 6) == 0) {
1261 				(*pr)(" %p,\n\t", vp);
1262 			} else {
1263 				(*pr)(" %p,", vp);
1264 			}
1265 		}
1266 		(*pr)("\n", vp);
1267 	}
1268 }
1269 
1270 /*
1271  * List all of the locked vnodes in the system.
1272  */
1273 void printlockedvnodes(void);
1274 
1275 void
1276 printlockedvnodes(void)
1277 {
1278 	struct mount *mp, *nmp;
1279 	struct vnode *vp;
1280 
1281 	printf("Locked vnodes\n");
1282 	mutex_enter(&mountlist_lock);
1283 	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
1284 	     mp = nmp) {
1285 		if (vfs_busy(mp, &nmp)) {
1286 			continue;
1287 		}
1288 		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1289 			if (VOP_ISLOCKED(vp))
1290 				vprint(NULL, vp);
1291 		}
1292 		mutex_enter(&mountlist_lock);
1293 		vfs_unbusy(mp, false, &nmp);
1294 	}
1295 	mutex_exit(&mountlist_lock);
1296 }
1297 
1298 #endif /* DDB || DEBUGPRINT */
1299