xref: /netbsd-src/sys/kern/vfs_subr.c (revision 4724848cf0da353df257f730694b7882798e5daf)
1 /*	$NetBSD: vfs_subr.c,v 1.496 2022/10/26 23:39:43 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008, 2019, 2020
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center, by Charles M. Hannum, by Andrew Doran,
11  * by Marshall Kirk McKusick and Greg Ganger at the University of Michigan.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Copyright (c) 1989, 1993
37  *	The Regents of the University of California.  All rights reserved.
38  * (c) UNIX System Laboratories, Inc.
39  * All or some portions of this file are derived from material licensed
40  * to the University of California by American Telephone and Telegraph
41  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42  * the permission of UNIX System Laboratories, Inc.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. Neither the name of the University nor the names of its contributors
53  *    may be used to endorse or promote products derived from this software
54  *    without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66  * SUCH DAMAGE.
67  *
68  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
69  */
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.496 2022/10/26 23:39:43 riastradh Exp $");
73 
74 #ifdef _KERNEL_OPT
75 #include "opt_ddb.h"
76 #include "opt_compat_netbsd.h"
77 #include "opt_compat_43.h"
78 #endif
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/conf.h>
83 #include <sys/dirent.h>
84 #include <sys/filedesc.h>
85 #include <sys/kernel.h>
86 #include <sys/mount.h>
87 #include <sys/fstrans.h>
88 #include <sys/vnode_impl.h>
89 #include <sys/stat.h>
90 #include <sys/sysctl.h>
91 #include <sys/namei.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <sys/kmem.h>
95 #include <sys/syscallargs.h>
96 #include <sys/kauth.h>
97 #include <sys/module.h>
98 
99 #include <miscfs/deadfs/deadfs.h>
100 #include <miscfs/genfs/genfs.h>
101 #include <miscfs/specfs/specdev.h>
102 
103 #include <uvm/uvm_ddb.h>
104 
105 const enum vtype iftovt_tab[16] = {
106 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
107 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
108 };
109 const int	vttoif_tab[9] = {
110 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
111 	S_IFSOCK, S_IFIFO, S_IFMT,
112 };
113 
114 /*
115  * Insq/Remq for the vnode usage lists.
116  */
117 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
118 #define	bufremvn(bp) {							\
119 	LIST_REMOVE(bp, b_vnbufs);					\
120 	(bp)->b_vnbufs.le_next = NOLIST;				\
121 }
122 
123 int doforce = 1;		/* 1 => permit forcible unmounting */
124 
125 /*
126  * Local declarations.
127  */
128 
129 static void vn_initialize_syncerd(void);
130 
131 /*
132  * Initialize the vnode management data structures.
133  */
134 void
135 vntblinit(void)
136 {
137 
138 	vn_initialize_syncerd();
139 	vfs_mount_sysinit();
140 	vfs_vnode_sysinit();
141 }
142 
143 /*
144  * Flush out and invalidate all buffers associated with a vnode.
145  * Called with the underlying vnode locked, which should prevent new dirty
146  * buffers from being queued.
147  */
148 int
149 vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l,
150 	  bool catch_p, int slptimeo)
151 {
152 	struct buf *bp, *nbp;
153 	int error;
154 	int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
155 	    (flags & V_SAVE ? PGO_CLEANIT | PGO_RECLAIM : 0);
156 
157 	/* XXXUBC this doesn't look at flags or slp* */
158 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
159 	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
160 	if (error) {
161 		return error;
162 	}
163 
164 	if (flags & V_SAVE) {
165 		error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0);
166 		if (error)
167 		        return (error);
168 		KASSERT(LIST_EMPTY(&vp->v_dirtyblkhd));
169 	}
170 
171 	mutex_enter(&bufcache_lock);
172 restart:
173 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
174 		KASSERT(bp->b_vp == vp);
175 		nbp = LIST_NEXT(bp, b_vnbufs);
176 		error = bbusy(bp, catch_p, slptimeo, NULL);
177 		if (error != 0) {
178 			if (error == EPASSTHROUGH)
179 				goto restart;
180 			mutex_exit(&bufcache_lock);
181 			return (error);
182 		}
183 		brelsel(bp, BC_INVAL | BC_VFLUSH);
184 	}
185 
186 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
187 		KASSERT(bp->b_vp == vp);
188 		nbp = LIST_NEXT(bp, b_vnbufs);
189 		error = bbusy(bp, catch_p, slptimeo, NULL);
190 		if (error != 0) {
191 			if (error == EPASSTHROUGH)
192 				goto restart;
193 			mutex_exit(&bufcache_lock);
194 			return (error);
195 		}
196 		/*
197 		 * XXX Since there are no node locks for NFS, I believe
198 		 * there is a slight chance that a delayed write will
199 		 * occur while sleeping just above, so check for it.
200 		 */
201 		if ((bp->b_oflags & BO_DELWRI) && (flags & V_SAVE)) {
202 #ifdef DEBUG
203 			printf("buffer still DELWRI\n");
204 #endif
205 			bp->b_cflags |= BC_BUSY | BC_VFLUSH;
206 			mutex_exit(&bufcache_lock);
207 			VOP_BWRITE(bp->b_vp, bp);
208 			mutex_enter(&bufcache_lock);
209 			goto restart;
210 		}
211 		brelsel(bp, BC_INVAL | BC_VFLUSH);
212 	}
213 
214 #ifdef DIAGNOSTIC
215 	if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
216 		panic("vinvalbuf: flush failed, vp %p", vp);
217 #endif
218 
219 	mutex_exit(&bufcache_lock);
220 
221 	return (0);
222 }
223 
224 /*
225  * Destroy any in core blocks past the truncation length.
226  * Called with the underlying vnode locked, which should prevent new dirty
227  * buffers from being queued.
228  */
229 int
230 vtruncbuf(struct vnode *vp, daddr_t lbn, bool catch_p, int slptimeo)
231 {
232 	struct buf *bp, *nbp;
233 	int error;
234 	voff_t off;
235 
236 	off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
237 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
238 	error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
239 	if (error) {
240 		return error;
241 	}
242 
243 	mutex_enter(&bufcache_lock);
244 restart:
245 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
246 		KASSERT(bp->b_vp == vp);
247 		nbp = LIST_NEXT(bp, b_vnbufs);
248 		if (bp->b_lblkno < lbn)
249 			continue;
250 		error = bbusy(bp, catch_p, slptimeo, NULL);
251 		if (error != 0) {
252 			if (error == EPASSTHROUGH)
253 				goto restart;
254 			mutex_exit(&bufcache_lock);
255 			return (error);
256 		}
257 		brelsel(bp, BC_INVAL | BC_VFLUSH);
258 	}
259 
260 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
261 		KASSERT(bp->b_vp == vp);
262 		nbp = LIST_NEXT(bp, b_vnbufs);
263 		if (bp->b_lblkno < lbn)
264 			continue;
265 		error = bbusy(bp, catch_p, slptimeo, NULL);
266 		if (error != 0) {
267 			if (error == EPASSTHROUGH)
268 				goto restart;
269 			mutex_exit(&bufcache_lock);
270 			return (error);
271 		}
272 		brelsel(bp, BC_INVAL | BC_VFLUSH);
273 	}
274 	mutex_exit(&bufcache_lock);
275 
276 	return (0);
277 }
278 
279 /*
280  * Flush all dirty buffers from a vnode.
281  * Called with the underlying vnode locked, which should prevent new dirty
282  * buffers from being queued.
283  */
284 int
285 vflushbuf(struct vnode *vp, int flags)
286 {
287 	struct buf *bp, *nbp;
288 	int error, pflags;
289 	bool dirty, sync;
290 
291 	sync = (flags & FSYNC_WAIT) != 0;
292 	pflags = PGO_CLEANIT | PGO_ALLPAGES |
293 		(sync ? PGO_SYNCIO : 0) |
294 		((flags & FSYNC_LAZY) ? PGO_LAZY : 0);
295 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
296 	(void) VOP_PUTPAGES(vp, 0, 0, pflags);
297 
298 loop:
299 	mutex_enter(&bufcache_lock);
300 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
301 		KASSERT(bp->b_vp == vp);
302 		nbp = LIST_NEXT(bp, b_vnbufs);
303 		if ((bp->b_cflags & BC_BUSY))
304 			continue;
305 		if ((bp->b_oflags & BO_DELWRI) == 0)
306 			panic("vflushbuf: not dirty, bp %p", bp);
307 		bp->b_cflags |= BC_BUSY | BC_VFLUSH;
308 		mutex_exit(&bufcache_lock);
309 		/*
310 		 * Wait for I/O associated with indirect blocks to complete,
311 		 * since there is no way to quickly wait for them below.
312 		 */
313 		if (bp->b_vp == vp || !sync)
314 			(void) bawrite(bp);
315 		else {
316 			error = bwrite(bp);
317 			if (error)
318 				return error;
319 		}
320 		goto loop;
321 	}
322 	mutex_exit(&bufcache_lock);
323 
324 	if (!sync)
325 		return 0;
326 
327 	mutex_enter(vp->v_interlock);
328 	while (vp->v_numoutput != 0)
329 		cv_wait(&vp->v_cv, vp->v_interlock);
330 	dirty = !LIST_EMPTY(&vp->v_dirtyblkhd);
331 	mutex_exit(vp->v_interlock);
332 
333 	if (dirty) {
334 		vprint("vflushbuf: dirty", vp);
335 		goto loop;
336 	}
337 
338 	return 0;
339 }
340 
341 /*
342  * Create a vnode for a block device.
343  * Used for root filesystem and swap areas.
344  * Also used for memory file system special devices.
345  */
346 int
347 bdevvp(dev_t dev, vnode_t **vpp)
348 {
349 	struct vattr va;
350 
351 	vattr_null(&va);
352 	va.va_type = VBLK;
353 	va.va_rdev = dev;
354 
355 	return vcache_new(dead_rootmount, NULL, &va, NOCRED, NULL, vpp);
356 }
357 
358 /*
359  * Create a vnode for a character device.
360  * Used for kernfs and some console handling.
361  */
362 int
363 cdevvp(dev_t dev, vnode_t **vpp)
364 {
365 	struct vattr va;
366 
367 	vattr_null(&va);
368 	va.va_type = VCHR;
369 	va.va_rdev = dev;
370 
371 	return vcache_new(dead_rootmount, NULL, &va, NOCRED, NULL, vpp);
372 }
373 
374 /*
375  * Associate a buffer with a vnode.  There must already be a hold on
376  * the vnode.
377  */
378 void
379 bgetvp(struct vnode *vp, struct buf *bp)
380 {
381 
382 	KASSERT(bp->b_vp == NULL);
383 	KASSERT(bp->b_objlock == &buffer_lock);
384 	KASSERT(mutex_owned(vp->v_interlock));
385 	KASSERT(mutex_owned(&bufcache_lock));
386 	KASSERT((bp->b_cflags & BC_BUSY) != 0);
387 	KASSERT(!cv_has_waiters(&bp->b_done));
388 
389 	vholdl(vp);
390 	bp->b_vp = vp;
391 	if (vp->v_type == VBLK || vp->v_type == VCHR)
392 		bp->b_dev = vp->v_rdev;
393 	else
394 		bp->b_dev = NODEV;
395 
396 	/*
397 	 * Insert onto list for new vnode.
398 	 */
399 	bufinsvn(bp, &vp->v_cleanblkhd);
400 	bp->b_objlock = vp->v_interlock;
401 }
402 
403 /*
404  * Disassociate a buffer from a vnode.
405  */
406 void
407 brelvp(struct buf *bp)
408 {
409 	struct vnode *vp = bp->b_vp;
410 
411 	KASSERT(vp != NULL);
412 	KASSERT(bp->b_objlock == vp->v_interlock);
413 	KASSERT(mutex_owned(vp->v_interlock));
414 	KASSERT(mutex_owned(&bufcache_lock));
415 	KASSERT((bp->b_cflags & BC_BUSY) != 0);
416 	KASSERT(!cv_has_waiters(&bp->b_done));
417 
418 	/*
419 	 * Delete from old vnode list, if on one.
420 	 */
421 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
422 		bufremvn(bp);
423 
424 	if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == VI_ONWORKLST &&
425 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
426 		vn_syncer_remove_from_worklist(vp);
427 
428 	bp->b_objlock = &buffer_lock;
429 	bp->b_vp = NULL;
430 	holdrelel(vp);
431 }
432 
433 /*
434  * Reassign a buffer from one vnode list to another.
435  * The list reassignment must be within the same vnode.
436  * Used to assign file specific control information
437  * (indirect blocks) to the list to which they belong.
438  */
439 void
440 reassignbuf(struct buf *bp, struct vnode *vp)
441 {
442 	struct buflists *listheadp;
443 	int delayx;
444 
445 	KASSERT(mutex_owned(&bufcache_lock));
446 	KASSERT(bp->b_objlock == vp->v_interlock);
447 	KASSERT(mutex_owned(vp->v_interlock));
448 	KASSERT((bp->b_cflags & BC_BUSY) != 0);
449 
450 	/*
451 	 * Delete from old vnode list, if on one.
452 	 */
453 	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
454 		bufremvn(bp);
455 
456 	/*
457 	 * If dirty, put on list of dirty buffers;
458 	 * otherwise insert onto list of clean buffers.
459 	 */
460 	if ((bp->b_oflags & BO_DELWRI) == 0) {
461 		listheadp = &vp->v_cleanblkhd;
462 		if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) ==
463 		    VI_ONWORKLST &&
464 		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
465 			vn_syncer_remove_from_worklist(vp);
466 	} else {
467 		listheadp = &vp->v_dirtyblkhd;
468 		if ((vp->v_iflag & VI_ONWORKLST) == 0) {
469 			switch (vp->v_type) {
470 			case VDIR:
471 				delayx = dirdelay;
472 				break;
473 			case VBLK:
474 				if (spec_node_getmountedfs(vp) != NULL) {
475 					delayx = metadelay;
476 					break;
477 				}
478 				/* fall through */
479 			default:
480 				delayx = filedelay;
481 				break;
482 			}
483 			if (!vp->v_mount ||
484 			    (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
485 				vn_syncer_add_to_worklist(vp, delayx);
486 		}
487 	}
488 	bufinsvn(bp, listheadp);
489 }
490 
491 /*
492  * Lookup a vnode by device number and return it referenced.
493  */
494 int
495 vfinddev(dev_t dev, enum vtype type, vnode_t **vpp)
496 {
497 
498 	return (spec_node_lookup_by_dev(type, dev, VDEAD_NOWAIT, vpp) == 0);
499 }
500 
501 /*
502  * Revoke all the vnodes corresponding to the specified minor number
503  * range (endpoints inclusive) of the specified major.
504  */
505 void
506 vdevgone(int maj, int minl, int minh, enum vtype type)
507 {
508 	vnode_t *vp;
509 	dev_t dev;
510 	int mn;
511 
512 	for (mn = minl; mn <= minh; mn++) {
513 		dev = makedev(maj, mn);
514 		/*
515 		 * Notify anyone trying to get at this device that it
516 		 * has been detached, and then revoke it.
517 		 */
518 		switch (type) {
519 		case VBLK:
520 			bdev_detached(dev);
521 			break;
522 		case VCHR:
523 			cdev_detached(dev);
524 			break;
525 		default:
526 			panic("invalid specnode type: %d", type);
527 		}
528 		/*
529 		 * Passing 0 as flags, instead of VDEAD_NOWAIT, means
530 		 * spec_node_lookup_by_dev will wait for vnodes it
531 		 * finds concurrently being revoked before returning.
532 		 */
533 		while (spec_node_lookup_by_dev(type, dev, 0, &vp) == 0) {
534 			VOP_REVOKE(vp, REVOKEALL);
535 			vrele(vp);
536 		}
537 	}
538 }
539 
540 /*
541  * The filesystem synchronizer mechanism - syncer.
542  *
543  * It is useful to delay writes of file data and filesystem metadata for
544  * a certain amount of time so that quickly created and deleted files need
545  * not waste disk bandwidth being created and removed.  To implement this,
546  * vnodes are appended to a "workitem" queue.
547  *
548  * Most pending metadata should not wait for more than ten seconds.  Thus,
549  * mounted on block devices are delayed only about a half the time that file
550  * data is delayed.  Similarly, directory updates are more critical, so are
551  * only delayed about a third the time that file data is delayed.
552  *
553  * There are SYNCER_MAXDELAY queues that are processed in a round-robin
554  * manner at a rate of one each second (driven off the filesystem syner
555  * thread). The syncer_delayno variable indicates the next queue that is
556  * to be processed.  Items that need to be processed soon are placed in
557  * this queue:
558  *
559  *	syncer_workitem_pending[syncer_delayno]
560  *
561  * A delay of e.g. fifteen seconds is done by placing the request fifteen
562  * entries later in the queue:
563  *
564  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
565  *
566  * Flag VI_ONWORKLST indicates that vnode is added into the queue.
567  */
568 
569 #define SYNCER_MAXDELAY		32
570 
571 typedef TAILQ_HEAD(synclist, vnode_impl) synclist_t;
572 
573 static void	vn_syncer_add1(struct vnode *, int);
574 static void	sysctl_vfs_syncfs_setup(struct sysctllog **);
575 
576 /*
577  * Defines and variables for the syncer process.
578  */
579 int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
580 time_t syncdelay = 30;			/* max time to delay syncing data */
581 time_t filedelay = 30;			/* time to delay syncing files */
582 time_t dirdelay  = 15;			/* time to delay syncing directories */
583 time_t metadelay = 10;			/* time to delay syncing metadata */
584 time_t lockdelay = 1;			/* time to delay if locking fails */
585 
586 static kmutex_t		syncer_data_lock; /* short term lock on data structs */
587 
588 static int		syncer_delayno = 0;
589 static long		syncer_last;
590 static synclist_t *	syncer_workitem_pending;
591 
592 static void
593 vn_initialize_syncerd(void)
594 {
595 	int i;
596 
597 	syncer_last = SYNCER_MAXDELAY + 2;
598 
599 	sysctl_vfs_syncfs_setup(NULL);
600 
601 	syncer_workitem_pending =
602 	    kmem_alloc(syncer_last * sizeof (struct synclist), KM_SLEEP);
603 
604 	for (i = 0; i < syncer_last; i++)
605 		TAILQ_INIT(&syncer_workitem_pending[i]);
606 
607 	mutex_init(&syncer_data_lock, MUTEX_DEFAULT, IPL_NONE);
608 }
609 
610 /*
611  * Return delay factor appropriate for the given file system.   For
612  * WAPBL we use the sync vnode to burst out metadata updates: sync
613  * those file systems more frequently.
614  */
615 static inline int
616 sync_delay(struct mount *mp)
617 {
618 
619 	return mp->mnt_wapbl != NULL ? metadelay : syncdelay;
620 }
621 
622 /*
623  * Compute the next slot index from delay.
624  */
625 static inline int
626 sync_delay_slot(int delayx)
627 {
628 
629 	if (delayx > syncer_maxdelay - 2)
630 		delayx = syncer_maxdelay - 2;
631 	return (syncer_delayno + delayx) % syncer_last;
632 }
633 
634 /*
635  * Add an item to the syncer work queue.
636  */
637 static void
638 vn_syncer_add1(struct vnode *vp, int delayx)
639 {
640 	synclist_t *slp;
641 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
642 
643 	KASSERT(mutex_owned(&syncer_data_lock));
644 
645 	if (vp->v_iflag & VI_ONWORKLST) {
646 		/*
647 		 * Remove in order to adjust the position of the vnode.
648 		 * Note: called from sched_sync(), which will not hold
649 		 * interlock, therefore we cannot modify v_iflag here.
650 		 */
651 		slp = &syncer_workitem_pending[vip->vi_synclist_slot];
652 		TAILQ_REMOVE(slp, vip, vi_synclist);
653 	} else {
654 		KASSERT(mutex_owned(vp->v_interlock));
655 		vp->v_iflag |= VI_ONWORKLST;
656 	}
657 
658 	vip->vi_synclist_slot = sync_delay_slot(delayx);
659 
660 	slp = &syncer_workitem_pending[vip->vi_synclist_slot];
661 	TAILQ_INSERT_TAIL(slp, vip, vi_synclist);
662 }
663 
664 void
665 vn_syncer_add_to_worklist(struct vnode *vp, int delayx)
666 {
667 
668 	KASSERT(mutex_owned(vp->v_interlock));
669 
670 	mutex_enter(&syncer_data_lock);
671 	vn_syncer_add1(vp, delayx);
672 	mutex_exit(&syncer_data_lock);
673 }
674 
675 /*
676  * Remove an item from the syncer work queue.
677  */
678 void
679 vn_syncer_remove_from_worklist(struct vnode *vp)
680 {
681 	synclist_t *slp;
682 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
683 
684 	KASSERT(mutex_owned(vp->v_interlock));
685 
686 	if (vp->v_iflag & VI_ONWORKLST) {
687 		mutex_enter(&syncer_data_lock);
688 		vp->v_iflag &= ~VI_ONWORKLST;
689 		slp = &syncer_workitem_pending[vip->vi_synclist_slot];
690 		TAILQ_REMOVE(slp, vip, vi_synclist);
691 		mutex_exit(&syncer_data_lock);
692 	}
693 }
694 
695 /*
696  * Add this mount point to the syncer.
697  */
698 void
699 vfs_syncer_add_to_worklist(struct mount *mp)
700 {
701 	static int start, incr, next;
702 	int vdelay;
703 
704 	KASSERT(mutex_owned(mp->mnt_updating));
705 	KASSERT((mp->mnt_iflag & IMNT_ONWORKLIST) == 0);
706 
707 	/*
708 	 * We attempt to scatter the mount points on the list
709 	 * so that they will go off at evenly distributed times
710 	 * even if all the filesystems are mounted at once.
711 	 */
712 
713 	next += incr;
714 	if (next == 0 || next > syncer_maxdelay) {
715 		start /= 2;
716 		incr /= 2;
717 		if (start == 0) {
718 			start = syncer_maxdelay / 2;
719 			incr = syncer_maxdelay;
720 		}
721 		next = start;
722 	}
723 	mp->mnt_iflag |= IMNT_ONWORKLIST;
724 	vdelay = sync_delay(mp);
725 	mp->mnt_synclist_slot = vdelay > 0 ? next % vdelay : 0;
726 }
727 
728 /*
729  * Remove the mount point from the syncer.
730  */
731 void
732 vfs_syncer_remove_from_worklist(struct mount *mp)
733 {
734 
735 	KASSERT(mutex_owned(mp->mnt_updating));
736 	KASSERT((mp->mnt_iflag & IMNT_ONWORKLIST) != 0);
737 
738 	mp->mnt_iflag &= ~IMNT_ONWORKLIST;
739 }
740 
741 /*
742  * Try lazy sync, return true on success.
743  */
744 static bool
745 lazy_sync_vnode(struct vnode *vp)
746 {
747 	bool synced;
748 
749 	KASSERT(mutex_owned(&syncer_data_lock));
750 
751 	synced = false;
752 	if (vcache_tryvget(vp) == 0) {
753 		mutex_exit(&syncer_data_lock);
754 		if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
755 			synced = true;
756 			(void) VOP_FSYNC(vp, curlwp->l_cred,
757 			    FSYNC_LAZY, 0, 0);
758 			vput(vp);
759 		} else
760 			vrele(vp);
761 		mutex_enter(&syncer_data_lock);
762 	}
763 	return synced;
764 }
765 
766 /*
767  * System filesystem synchronizer daemon.
768  */
769 void
770 sched_sync(void *arg)
771 {
772 	mount_iterator_t *iter;
773 	synclist_t *slp;
774 	struct vnode_impl *vi;
775 	struct vnode *vp;
776 	struct mount *mp;
777 	time_t starttime;
778 	bool synced;
779 
780 	for (;;) {
781 		starttime = time_second;
782 
783 		/*
784 		 * Sync mounts whose dirty time has expired.
785 		 */
786 		mountlist_iterator_init(&iter);
787 		while ((mp = mountlist_iterator_trynext(iter)) != NULL) {
788 			if ((mp->mnt_iflag & IMNT_ONWORKLIST) == 0 ||
789 			    mp->mnt_synclist_slot != syncer_delayno) {
790 				continue;
791 			}
792 			mp->mnt_synclist_slot = sync_delay_slot(sync_delay(mp));
793 			VFS_SYNC(mp, MNT_LAZY, curlwp->l_cred);
794 		}
795 		mountlist_iterator_destroy(iter);
796 
797 		mutex_enter(&syncer_data_lock);
798 
799 		/*
800 		 * Push files whose dirty time has expired.
801 		 */
802 		slp = &syncer_workitem_pending[syncer_delayno];
803 		syncer_delayno += 1;
804 		if (syncer_delayno >= syncer_last)
805 			syncer_delayno = 0;
806 
807 		while ((vi = TAILQ_FIRST(slp)) != NULL) {
808 			vp = VIMPL_TO_VNODE(vi);
809 			synced = lazy_sync_vnode(vp);
810 
811 			/*
812 			 * XXX The vnode may have been recycled, in which
813 			 * case it may have a new identity.
814 			 */
815 			vi = TAILQ_FIRST(slp);
816 			if (vi != NULL && VIMPL_TO_VNODE(vi) == vp) {
817 				/*
818 				 * Put us back on the worklist.  The worklist
819 				 * routine will remove us from our current
820 				 * position and then add us back in at a later
821 				 * position.
822 				 *
823 				 * Try again sooner rather than later if
824 				 * we were unable to lock the vnode.  Lock
825 				 * failure should not prevent us from doing
826 				 * the sync "soon".
827 				 *
828 				 * If we locked it yet arrive here, it's
829 				 * likely that lazy sync is in progress and
830 				 * so the vnode still has dirty metadata.
831 				 * syncdelay is mainly to get this vnode out
832 				 * of the way so we do not consider it again
833 				 * "soon" in this loop, so the delay time is
834 				 * not critical as long as it is not "soon".
835 				 * While write-back strategy is the file
836 				 * system's domain, we expect write-back to
837 				 * occur no later than syncdelay seconds
838 				 * into the future.
839 				 */
840 				vn_syncer_add1(vp,
841 				    synced ? syncdelay : lockdelay);
842 			}
843 		}
844 
845 		/*
846 		 * If it has taken us less than a second to process the
847 		 * current work, then wait.  Otherwise start right over
848 		 * again.  We can still lose time if any single round
849 		 * takes more than two seconds, but it does not really
850 		 * matter as we are just trying to generally pace the
851 		 * filesystem activity.
852 		 */
853 		if (time_second == starttime) {
854 			kpause("syncer", false, hz, &syncer_data_lock);
855 		}
856 		mutex_exit(&syncer_data_lock);
857 	}
858 }
859 
860 static void
861 sysctl_vfs_syncfs_setup(struct sysctllog **clog)
862 {
863 	const struct sysctlnode *rnode, *cnode;
864 
865 	sysctl_createv(clog, 0, NULL, &rnode,
866 			CTLFLAG_PERMANENT,
867 			CTLTYPE_NODE, "sync",
868 			SYSCTL_DESCR("syncer options"),
869 			NULL, 0, NULL, 0,
870 			CTL_VFS, CTL_CREATE, CTL_EOL);
871 
872 	sysctl_createv(clog, 0, &rnode, &cnode,
873 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
874 			CTLTYPE_QUAD, "delay",
875 			SYSCTL_DESCR("max time to delay syncing data"),
876 			NULL, 0, &syncdelay, 0,
877 			CTL_CREATE, CTL_EOL);
878 
879 	sysctl_createv(clog, 0, &rnode, &cnode,
880 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
881 			CTLTYPE_QUAD, "filedelay",
882 			SYSCTL_DESCR("time to delay syncing files"),
883 			NULL, 0, &filedelay, 0,
884 			CTL_CREATE, CTL_EOL);
885 
886 	sysctl_createv(clog, 0, &rnode, &cnode,
887 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
888 			CTLTYPE_QUAD, "dirdelay",
889 			SYSCTL_DESCR("time to delay syncing directories"),
890 			NULL, 0, &dirdelay, 0,
891 			CTL_CREATE, CTL_EOL);
892 
893 	sysctl_createv(clog, 0, &rnode, &cnode,
894 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
895 			CTLTYPE_QUAD, "metadelay",
896 			SYSCTL_DESCR("time to delay syncing metadata"),
897 			NULL, 0, &metadelay, 0,
898 			CTL_CREATE, CTL_EOL);
899 }
900 
901 /*
902  * sysctl helper routine to return list of supported fstypes
903  */
904 int
905 sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
906 {
907 	char bf[sizeof(((struct statvfs *)NULL)->f_fstypename)];
908 	char *where = oldp;
909 	struct vfsops *v;
910 	size_t needed, left, slen;
911 	int error, first;
912 
913 	if (newp != NULL)
914 		return (EPERM);
915 	if (namelen != 0)
916 		return (EINVAL);
917 
918 	first = 1;
919 	error = 0;
920 	needed = 0;
921 	left = *oldlenp;
922 
923 	sysctl_unlock();
924 	mutex_enter(&vfs_list_lock);
925 	LIST_FOREACH(v, &vfs_list, vfs_list) {
926 		if (where == NULL)
927 			needed += strlen(v->vfs_name) + 1;
928 		else {
929 			memset(bf, 0, sizeof(bf));
930 			if (first) {
931 				strncpy(bf, v->vfs_name, sizeof(bf));
932 				first = 0;
933 			} else {
934 				bf[0] = ' ';
935 				strncpy(bf + 1, v->vfs_name, sizeof(bf) - 1);
936 			}
937 			bf[sizeof(bf)-1] = '\0';
938 			slen = strlen(bf);
939 			if (left < slen + 1)
940 				break;
941 			v->vfs_refcount++;
942 			mutex_exit(&vfs_list_lock);
943 			/* +1 to copy out the trailing NUL byte */
944 			error = copyout(bf, where, slen + 1);
945 			mutex_enter(&vfs_list_lock);
946 			v->vfs_refcount--;
947 			if (error)
948 				break;
949 			where += slen;
950 			needed += slen;
951 			left -= slen;
952 		}
953 	}
954 	mutex_exit(&vfs_list_lock);
955 	sysctl_relock();
956 	*oldlenp = needed;
957 	return (error);
958 }
959 
960 int kinfo_vdebug = 1;
961 int kinfo_vgetfailed;
962 
963 #define KINFO_VNODESLOP	10
964 
965 /*
966  * Dump vnode list (via sysctl).
967  * Copyout address of vnode followed by vnode.
968  */
969 int
970 sysctl_kern_vnode(SYSCTLFN_ARGS)
971 {
972 	char *where = oldp;
973 	size_t *sizep = oldlenp;
974 	struct mount *mp;
975 	vnode_t *vp, vbuf;
976 	mount_iterator_t *iter;
977 	struct vnode_iterator *marker;
978 	char *bp = where;
979 	char *ewhere;
980 	int error;
981 
982 	if (namelen != 0)
983 		return (EOPNOTSUPP);
984 	if (newp != NULL)
985 		return (EPERM);
986 
987 #define VPTRSZ	sizeof(vnode_t *)
988 #define VNODESZ	sizeof(vnode_t)
989 	if (where == NULL) {
990 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
991 		return (0);
992 	}
993 	ewhere = where + *sizep;
994 
995 	sysctl_unlock();
996 	mountlist_iterator_init(&iter);
997 	while ((mp = mountlist_iterator_next(iter)) != NULL) {
998 		vfs_vnode_iterator_init(mp, &marker);
999 		while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
1000 			if (bp + VPTRSZ + VNODESZ > ewhere) {
1001 				vrele(vp);
1002 				vfs_vnode_iterator_destroy(marker);
1003 				mountlist_iterator_destroy(iter);
1004 				sysctl_relock();
1005 				*sizep = bp - where;
1006 				return (ENOMEM);
1007 			}
1008 			memcpy(&vbuf, vp, VNODESZ);
1009 			if ((error = copyout(&vp, bp, VPTRSZ)) ||
1010 			    (error = copyout(&vbuf, bp + VPTRSZ, VNODESZ))) {
1011 				vrele(vp);
1012 				vfs_vnode_iterator_destroy(marker);
1013 				mountlist_iterator_destroy(iter);
1014 				sysctl_relock();
1015 				return (error);
1016 			}
1017 			vrele(vp);
1018 			bp += VPTRSZ + VNODESZ;
1019 		}
1020 		vfs_vnode_iterator_destroy(marker);
1021 	}
1022 	mountlist_iterator_destroy(iter);
1023 	sysctl_relock();
1024 
1025 	*sizep = bp - where;
1026 	return (0);
1027 }
1028 
1029 /*
1030  * Set vnode attributes to VNOVAL
1031  */
1032 void
1033 vattr_null(struct vattr *vap)
1034 {
1035 
1036 	memset(vap, 0, sizeof(*vap));
1037 
1038 	vap->va_type = VNON;
1039 
1040 	/*
1041 	 * Assign individually so that it is safe even if size and
1042 	 * sign of each member are varied.
1043 	 */
1044 	vap->va_mode = VNOVAL;
1045 	vap->va_nlink = VNOVAL;
1046 	vap->va_uid = VNOVAL;
1047 	vap->va_gid = VNOVAL;
1048 	vap->va_fsid = VNOVAL;
1049 	vap->va_fileid = VNOVAL;
1050 	vap->va_size = VNOVAL;
1051 	vap->va_blocksize = VNOVAL;
1052 	vap->va_atime.tv_sec =
1053 	    vap->va_mtime.tv_sec =
1054 	    vap->va_ctime.tv_sec =
1055 	    vap->va_birthtime.tv_sec = VNOVAL;
1056 	vap->va_atime.tv_nsec =
1057 	    vap->va_mtime.tv_nsec =
1058 	    vap->va_ctime.tv_nsec =
1059 	    vap->va_birthtime.tv_nsec = VNOVAL;
1060 	vap->va_gen = VNOVAL;
1061 	vap->va_flags = VNOVAL;
1062 	vap->va_rdev = VNOVAL;
1063 	vap->va_bytes = VNOVAL;
1064 }
1065 
1066 /*
1067  * Vnode state to string.
1068  */
1069 const char *
1070 vstate_name(enum vnode_state state)
1071 {
1072 
1073 	switch (state) {
1074 	case VS_ACTIVE:
1075 		return "ACTIVE";
1076 	case VS_MARKER:
1077 		return "MARKER";
1078 	case VS_LOADING:
1079 		return "LOADING";
1080 	case VS_LOADED:
1081 		return "LOADED";
1082 	case VS_BLOCKED:
1083 		return "BLOCKED";
1084 	case VS_RECLAIMING:
1085 		return "RECLAIMING";
1086 	case VS_RECLAIMED:
1087 		return "RECLAIMED";
1088 	default:
1089 		return "ILLEGAL";
1090 	}
1091 }
1092 
1093 /*
1094  * Print a description of a vnode (common part).
1095  */
1096 static void
1097 vprint_common(struct vnode *vp, const char *prefix,
1098     void (*pr)(const char *, ...) __printflike(1, 2))
1099 {
1100 	int n;
1101 	char bf[96];
1102 	const uint8_t *cp;
1103 	vnode_impl_t *vip;
1104 	const char * const vnode_tags[] = { VNODE_TAGS };
1105 	const char * const vnode_types[] = { VNODE_TYPES };
1106 	const char vnode_flagbits[] = VNODE_FLAGBITS;
1107 
1108 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
1109 #define ARRAY_PRINT(idx, arr) \
1110     ((unsigned int)(idx) < ARRAY_SIZE(arr) ? (arr)[(idx)] : "UNKNOWN")
1111 
1112 	vip = VNODE_TO_VIMPL(vp);
1113 
1114 	snprintb(bf, sizeof(bf),
1115 	    vnode_flagbits, vp->v_iflag | vp->v_vflag | vp->v_uflag);
1116 
1117 	(*pr)("vnode %p flags %s\n", vp, bf);
1118 	(*pr)("%stag %s(%d) type %s(%d) mount %p typedata %p\n", prefix,
1119 	    ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
1120 	    ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
1121 	    vp->v_mount, vp->v_mountedhere);
1122 	(*pr)("%susecount %d writecount %d holdcount %d\n", prefix,
1123 	    vrefcnt(vp), vp->v_writecount, vp->v_holdcnt);
1124 	(*pr)("%ssize %" PRIx64 " writesize %" PRIx64 " numoutput %d\n",
1125 	    prefix, vp->v_size, vp->v_writesize, vp->v_numoutput);
1126 	(*pr)("%sdata %p lock %p\n", prefix, vp->v_data, &vip->vi_lock);
1127 
1128 	(*pr)("%sstate %s key(%p %zd)", prefix, vstate_name(vip->vi_state),
1129 	    vip->vi_key.vk_mount, vip->vi_key.vk_key_len);
1130 	n = vip->vi_key.vk_key_len;
1131 	cp = vip->vi_key.vk_key;
1132 	while (n-- > 0)
1133 		(*pr)(" %02x", *cp++);
1134 	(*pr)("\n");
1135 	(*pr)("%slrulisthd %p\n", prefix, vip->vi_lrulisthd);
1136 
1137 #undef ARRAY_PRINT
1138 #undef ARRAY_SIZE
1139 }
1140 
1141 /*
1142  * Print out a description of a vnode.
1143  */
1144 void
1145 vprint(const char *label, struct vnode *vp)
1146 {
1147 
1148 	if (label != NULL)
1149 		printf("%s: ", label);
1150 	vprint_common(vp, "\t", printf);
1151 	if (vp->v_data != NULL) {
1152 		printf("\t");
1153 		VOP_PRINT(vp);
1154 	}
1155 }
1156 
1157 /*
1158  * Given a file system name, look up the vfsops for that
1159  * file system, or return NULL if file system isn't present
1160  * in the kernel.
1161  */
1162 struct vfsops *
1163 vfs_getopsbyname(const char *name)
1164 {
1165 	struct vfsops *v;
1166 
1167 	mutex_enter(&vfs_list_lock);
1168 	LIST_FOREACH(v, &vfs_list, vfs_list) {
1169 		if (strcmp(v->vfs_name, name) == 0)
1170 			break;
1171 	}
1172 	if (v != NULL)
1173 		v->vfs_refcount++;
1174 	mutex_exit(&vfs_list_lock);
1175 
1176 	return (v);
1177 }
1178 
1179 void
1180 copy_statvfs_info(struct statvfs *sbp, const struct mount *mp)
1181 {
1182 	const struct statvfs *mbp;
1183 
1184 	if (sbp == (mbp = &mp->mnt_stat))
1185 		return;
1186 
1187 	(void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx));
1188 	sbp->f_fsid = mbp->f_fsid;
1189 	sbp->f_owner = mbp->f_owner;
1190 	sbp->f_flag = mbp->f_flag;
1191 	sbp->f_syncwrites = mbp->f_syncwrites;
1192 	sbp->f_asyncwrites = mbp->f_asyncwrites;
1193 	sbp->f_syncreads = mbp->f_syncreads;
1194 	sbp->f_asyncreads = mbp->f_asyncreads;
1195 	(void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare));
1196 	(void)memcpy(sbp->f_fstypename, mbp->f_fstypename,
1197 	    sizeof(sbp->f_fstypename));
1198 	(void)memcpy(sbp->f_mntonname, mbp->f_mntonname,
1199 	    sizeof(sbp->f_mntonname));
1200 	(void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname,
1201 	    sizeof(sbp->f_mntfromname));
1202 	(void)memcpy(sbp->f_mntfromlabel, mp->mnt_stat.f_mntfromlabel,
1203 	    sizeof(sbp->f_mntfromlabel));
1204 	sbp->f_namemax = mbp->f_namemax;
1205 }
1206 
1207 int
1208 set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom,
1209     const char *vfsname, struct mount *mp, struct lwp *l)
1210 {
1211 	int error;
1212 	size_t size;
1213 	struct statvfs *sfs = &mp->mnt_stat;
1214 	int (*fun)(const void *, void *, size_t, size_t *);
1215 
1216 	(void)strlcpy(mp->mnt_stat.f_fstypename, vfsname,
1217 	    sizeof(mp->mnt_stat.f_fstypename));
1218 
1219 	if (onp) {
1220 		struct cwdinfo *cwdi = l->l_proc->p_cwdi;
1221 		fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr;
1222 		if (cwdi->cwdi_rdir != NULL) {
1223 			size_t len;
1224 			char *bp;
1225 			char *path = PNBUF_GET();
1226 
1227 			bp = path + MAXPATHLEN;
1228 			*--bp = '\0';
1229 			rw_enter(&cwdi->cwdi_lock, RW_READER);
1230 			error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp,
1231 			    path, MAXPATHLEN / 2, 0, l);
1232 			rw_exit(&cwdi->cwdi_lock);
1233 			if (error) {
1234 				PNBUF_PUT(path);
1235 				return error;
1236 			}
1237 
1238 			len = strlen(bp);
1239 			if (len > sizeof(sfs->f_mntonname) - 1)
1240 				len = sizeof(sfs->f_mntonname) - 1;
1241 			(void)strncpy(sfs->f_mntonname, bp, len);
1242 			PNBUF_PUT(path);
1243 
1244 			if (len < sizeof(sfs->f_mntonname) - 1) {
1245 				error = (*fun)(onp, &sfs->f_mntonname[len],
1246 				    sizeof(sfs->f_mntonname) - len - 1, &size);
1247 				if (error)
1248 					return error;
1249 				size += len;
1250 			} else {
1251 				size = len;
1252 			}
1253 		} else {
1254 			error = (*fun)(onp, &sfs->f_mntonname,
1255 			    sizeof(sfs->f_mntonname) - 1, &size);
1256 			if (error)
1257 				return error;
1258 		}
1259 		(void)memset(sfs->f_mntonname + size, 0,
1260 		    sizeof(sfs->f_mntonname) - size);
1261 	}
1262 
1263 	if (fromp) {
1264 		fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr;
1265 		error = (*fun)(fromp, sfs->f_mntfromname,
1266 		    sizeof(sfs->f_mntfromname) - 1, &size);
1267 		if (error)
1268 			return error;
1269 		(void)memset(sfs->f_mntfromname + size, 0,
1270 		    sizeof(sfs->f_mntfromname) - size);
1271 	}
1272 	return 0;
1273 }
1274 
1275 /*
1276  * Knob to control the precision of file timestamps:
1277  *
1278  *   0 = seconds only; nanoseconds zeroed.
1279  *   1 = seconds and nanoseconds, accurate within 1/HZ.
1280  *   2 = seconds and nanoseconds, truncated to microseconds.
1281  * >=3 = seconds and nanoseconds, maximum precision.
1282  */
1283 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
1284 
1285 int vfs_timestamp_precision __read_mostly = TSP_NSEC;
1286 
1287 void
1288 vfs_timestamp(struct timespec *tsp)
1289 {
1290 	struct timeval tv;
1291 
1292 	switch (vfs_timestamp_precision) {
1293 	case TSP_SEC:
1294 		tsp->tv_sec = time_second;
1295 		tsp->tv_nsec = 0;
1296 		break;
1297 	case TSP_HZ:
1298 		getnanotime(tsp);
1299 		break;
1300 	case TSP_USEC:
1301 		microtime(&tv);
1302 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
1303 		break;
1304 	case TSP_NSEC:
1305 	default:
1306 		nanotime(tsp);
1307 		break;
1308 	}
1309 }
1310 
1311 /*
1312  * The purpose of this routine is to remove granularity from accmode_t,
1313  * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
1314  * VADMIN and VAPPEND.
1315  *
1316  * If it returns 0, the caller is supposed to continue with the usual
1317  * access checks using 'accmode' as modified by this routine.  If it
1318  * returns nonzero value, the caller is supposed to return that value
1319  * as errno.
1320  *
1321  * Note that after this routine runs, accmode may be zero.
1322  */
1323 int
1324 vfs_unixify_accmode(accmode_t *accmode)
1325 {
1326 	/*
1327 	 * There is no way to specify explicit "deny" rule using
1328 	 * file mode or POSIX.1e ACLs.
1329 	 */
1330 	if (*accmode & VEXPLICIT_DENY) {
1331 		*accmode = 0;
1332 		return (0);
1333 	}
1334 
1335 	/*
1336 	 * None of these can be translated into usual access bits.
1337 	 * Also, the common case for NFSv4 ACLs is to not contain
1338 	 * either of these bits. Caller should check for VWRITE
1339 	 * on the containing directory instead.
1340 	 */
1341 	if (*accmode & (VDELETE_CHILD | VDELETE))
1342 		return (EPERM);
1343 
1344 	if (*accmode & VADMIN_PERMS) {
1345 		*accmode &= ~VADMIN_PERMS;
1346 		*accmode |= VADMIN;
1347 	}
1348 
1349 	/*
1350 	 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
1351 	 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
1352 	 */
1353 	*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
1354 
1355 	return (0);
1356 }
1357 
1358 time_t	rootfstime;			/* recorded root fs time, if known */
1359 void
1360 setrootfstime(time_t t)
1361 {
1362 	rootfstime = t;
1363 }
1364 
1365 static const uint8_t vttodt_tab[ ] = {
1366 	[VNON]	=	DT_UNKNOWN,
1367 	[VREG]	=	DT_REG,
1368 	[VDIR]	=	DT_DIR,
1369 	[VBLK]	=	DT_BLK,
1370 	[VCHR]	=	DT_CHR,
1371 	[VLNK]	=	DT_LNK,
1372 	[VSOCK]	=	DT_SOCK,
1373 	[VFIFO]	=	DT_FIFO,
1374 	[VBAD]	=	DT_UNKNOWN
1375 };
1376 
1377 uint8_t
1378 vtype2dt(enum vtype vt)
1379 {
1380 
1381 	CTASSERT(VBAD == __arraycount(vttodt_tab) - 1);
1382 	return vttodt_tab[vt];
1383 }
1384 
1385 int
1386 VFS_MOUNT(struct mount *mp, const char *a, void *b, size_t *c)
1387 {
1388 	int mpsafe = mp->mnt_iflag & IMNT_MPSAFE;
1389 	int error;
1390 
1391 	/*
1392 	 * Note: The first time through, the vfs_mount function may set
1393 	 * IMNT_MPSAFE, so we have to cache it on entry in order to
1394 	 * avoid leaking a kernel lock.
1395 	 *
1396 	 * XXX Maybe the MPSAFE bit should be set in struct vfsops and
1397 	 * not in struct mount.
1398 	 */
1399 	if (mpsafe) {
1400 		KERNEL_LOCK(1, NULL);
1401 	}
1402 	error = (*(mp->mnt_op->vfs_mount))(mp, a, b, c);
1403 	if (mpsafe) {
1404 		KERNEL_UNLOCK_ONE(NULL);
1405 	}
1406 
1407 	return error;
1408 }
1409 
1410 int
1411 VFS_START(struct mount *mp, int a)
1412 {
1413 	int error;
1414 
1415 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1416 		KERNEL_LOCK(1, NULL);
1417 	}
1418 	error = (*(mp->mnt_op->vfs_start))(mp, a);
1419 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1420 		KERNEL_UNLOCK_ONE(NULL);
1421 	}
1422 
1423 	return error;
1424 }
1425 
1426 int
1427 VFS_UNMOUNT(struct mount *mp, int a)
1428 {
1429 	int error;
1430 
1431 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1432 		KERNEL_LOCK(1, NULL);
1433 	}
1434 	error = (*(mp->mnt_op->vfs_unmount))(mp, a);
1435 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1436 		KERNEL_UNLOCK_ONE(NULL);
1437 	}
1438 
1439 	return error;
1440 }
1441 
1442 int
1443 VFS_ROOT(struct mount *mp, int lktype, struct vnode **a)
1444 {
1445 	int error;
1446 
1447 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1448 		KERNEL_LOCK(1, NULL);
1449 	}
1450 	error = (*(mp->mnt_op->vfs_root))(mp, lktype, a);
1451 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1452 		KERNEL_UNLOCK_ONE(NULL);
1453 	}
1454 
1455 	return error;
1456 }
1457 
1458 int
1459 VFS_QUOTACTL(struct mount *mp, struct quotactl_args *args)
1460 {
1461 	int error;
1462 
1463 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1464 		KERNEL_LOCK(1, NULL);
1465 	}
1466 	error = (*(mp->mnt_op->vfs_quotactl))(mp, args);
1467 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1468 		KERNEL_UNLOCK_ONE(NULL);
1469 	}
1470 
1471 	return error;
1472 }
1473 
1474 int
1475 VFS_STATVFS(struct mount *mp, struct statvfs *a)
1476 {
1477 	int error;
1478 
1479 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1480 		KERNEL_LOCK(1, NULL);
1481 	}
1482 	error = (*(mp->mnt_op->vfs_statvfs))(mp, a);
1483 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1484 		KERNEL_UNLOCK_ONE(NULL);
1485 	}
1486 
1487 	return error;
1488 }
1489 
1490 int
1491 VFS_SYNC(struct mount *mp, int a, struct kauth_cred *b)
1492 {
1493 	int error;
1494 
1495 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1496 		KERNEL_LOCK(1, NULL);
1497 	}
1498 	error = (*(mp->mnt_op->vfs_sync))(mp, a, b);
1499 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1500 		KERNEL_UNLOCK_ONE(NULL);
1501 	}
1502 
1503 	return error;
1504 }
1505 
1506 int
1507 VFS_FHTOVP(struct mount *mp, struct fid *a, int b, struct vnode **c)
1508 {
1509 	int error;
1510 
1511 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1512 		KERNEL_LOCK(1, NULL);
1513 	}
1514 	error = (*(mp->mnt_op->vfs_fhtovp))(mp, a, b, c);
1515 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1516 		KERNEL_UNLOCK_ONE(NULL);
1517 	}
1518 
1519 	return error;
1520 }
1521 
1522 int
1523 VFS_VPTOFH(struct vnode *vp, struct fid *a, size_t *b)
1524 {
1525 	int error;
1526 
1527 	if ((vp->v_vflag & VV_MPSAFE) == 0) {
1528 		KERNEL_LOCK(1, NULL);
1529 	}
1530 	error = (*(vp->v_mount->mnt_op->vfs_vptofh))(vp, a, b);
1531 	if ((vp->v_vflag & VV_MPSAFE) == 0) {
1532 		KERNEL_UNLOCK_ONE(NULL);
1533 	}
1534 
1535 	return error;
1536 }
1537 
1538 int
1539 VFS_SNAPSHOT(struct mount *mp, struct vnode *a, struct timespec *b)
1540 {
1541 	int error;
1542 
1543 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1544 		KERNEL_LOCK(1, NULL);
1545 	}
1546 	error = (*(mp->mnt_op->vfs_snapshot))(mp, a, b);
1547 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1548 		KERNEL_UNLOCK_ONE(NULL);
1549 	}
1550 
1551 	return error;
1552 }
1553 
1554 int
1555 VFS_EXTATTRCTL(struct mount *mp, int a, struct vnode *b, int c, const char *d)
1556 {
1557 	int error;
1558 
1559 	KERNEL_LOCK(1, NULL);		/* XXXSMP check ffs */
1560 	error = (*(mp->mnt_op->vfs_extattrctl))(mp, a, b, c, d);
1561 	KERNEL_UNLOCK_ONE(NULL);	/* XXX */
1562 
1563 	return error;
1564 }
1565 
1566 int
1567 VFS_SUSPENDCTL(struct mount *mp, int a)
1568 {
1569 	int error;
1570 
1571 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1572 		KERNEL_LOCK(1, NULL);
1573 	}
1574 	error = (*(mp->mnt_op->vfs_suspendctl))(mp, a);
1575 	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1576 		KERNEL_UNLOCK_ONE(NULL);
1577 	}
1578 
1579 	return error;
1580 }
1581 
1582 #if defined(DDB) || defined(DEBUGPRINT)
1583 static const char buf_flagbits[] = BUF_FLAGBITS;
1584 
1585 void
1586 vfs_buf_print(struct buf *bp, int full, void (*pr)(const char *, ...))
1587 {
1588 	char bf[1024];
1589 
1590 	(*pr)("  vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" rawblkno 0x%"
1591 	    PRIx64 " dev 0x%x\n",
1592 	    bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_rawblkno, bp->b_dev);
1593 
1594 	snprintb(bf, sizeof(bf),
1595 	    buf_flagbits, bp->b_flags | bp->b_oflags | bp->b_cflags);
1596 	(*pr)("  error %d flags %s\n", bp->b_error, bf);
1597 
1598 	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n",
1599 		  bp->b_bufsize, bp->b_bcount, bp->b_resid);
1600 	(*pr)("  data %p saveaddr %p\n",
1601 		  bp->b_data, bp->b_saveaddr);
1602 	(*pr)("  iodone %p objlock %p\n", bp->b_iodone, bp->b_objlock);
1603 }
1604 
1605 void
1606 vfs_vnode_print(struct vnode *vp, int full, void (*pr)(const char *, ...))
1607 {
1608 
1609 	uvm_object_printit(&vp->v_uobj, full, pr);
1610 	(*pr)("\n");
1611 	vprint_common(vp, "", pr);
1612 	if (full) {
1613 		struct buf *bp;
1614 
1615 		(*pr)("clean bufs:\n");
1616 		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
1617 			(*pr)(" bp %p\n", bp);
1618 			vfs_buf_print(bp, full, pr);
1619 		}
1620 
1621 		(*pr)("dirty bufs:\n");
1622 		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
1623 			(*pr)(" bp %p\n", bp);
1624 			vfs_buf_print(bp, full, pr);
1625 		}
1626 	}
1627 }
1628 
1629 void
1630 vfs_vnode_lock_print(void *vlock, int full, void (*pr)(const char *, ...))
1631 {
1632 	struct mount *mp;
1633 	vnode_impl_t *vip;
1634 
1635 	for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) {
1636 		TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1637 			if (&vip->vi_lock == vlock ||
1638 			    VIMPL_TO_VNODE(vip)->v_interlock == vlock)
1639 				vfs_vnode_print(VIMPL_TO_VNODE(vip), full, pr);
1640 		}
1641 	}
1642 }
1643 
1644 void
1645 vfs_mount_print_all(int full, void (*pr)(const char *, ...))
1646 {
1647 	struct mount *mp;
1648 	for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp))
1649 		vfs_mount_print(mp, full, pr);
1650 }
1651 
1652 void
1653 vfs_mount_print(struct mount *mp, int full, void (*pr)(const char *, ...))
1654 {
1655 	char sbuf[256];
1656 
1657 	(*pr)("vnodecovered = %p data = %p\n",
1658 			mp->mnt_vnodecovered, mp->mnt_data);
1659 
1660 	(*pr)("fs_bshift %d dev_bshift = %d\n",
1661 			mp->mnt_fs_bshift, mp->mnt_dev_bshift);
1662 
1663 	snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_flag);
1664 	(*pr)("flag = %s\n", sbuf);
1665 
1666 	snprintb(sbuf, sizeof(sbuf), __IMNT_FLAG_BITS, mp->mnt_iflag);
1667 	(*pr)("iflag = %s\n", sbuf);
1668 
1669 	(*pr)("refcnt = %d updating @ %p\n", mp->mnt_refcnt, mp->mnt_updating);
1670 
1671 	(*pr)("statvfs cache:\n");
1672 	(*pr)("\tbsize = %lu\n", mp->mnt_stat.f_bsize);
1673 	(*pr)("\tfrsize = %lu\n", mp->mnt_stat.f_frsize);
1674 	(*pr)("\tiosize = %lu\n", mp->mnt_stat.f_iosize);
1675 
1676 	(*pr)("\tblocks = %"PRIu64"\n", mp->mnt_stat.f_blocks);
1677 	(*pr)("\tbfree = %"PRIu64"\n", mp->mnt_stat.f_bfree);
1678 	(*pr)("\tbavail = %"PRIu64"\n", mp->mnt_stat.f_bavail);
1679 	(*pr)("\tbresvd = %"PRIu64"\n", mp->mnt_stat.f_bresvd);
1680 
1681 	(*pr)("\tfiles = %"PRIu64"\n", mp->mnt_stat.f_files);
1682 	(*pr)("\tffree = %"PRIu64"\n", mp->mnt_stat.f_ffree);
1683 	(*pr)("\tfavail = %"PRIu64"\n", mp->mnt_stat.f_favail);
1684 	(*pr)("\tfresvd = %"PRIu64"\n", mp->mnt_stat.f_fresvd);
1685 
1686 	(*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n",
1687 			mp->mnt_stat.f_fsidx.__fsid_val[0],
1688 			mp->mnt_stat.f_fsidx.__fsid_val[1]);
1689 
1690 	(*pr)("\towner = %"PRIu32"\n", mp->mnt_stat.f_owner);
1691 	(*pr)("\tnamemax = %lu\n", mp->mnt_stat.f_namemax);
1692 
1693 	snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_stat.f_flag);
1694 
1695 	(*pr)("\tflag = %s\n", sbuf);
1696 	(*pr)("\tsyncwrites = %" PRIu64 "\n", mp->mnt_stat.f_syncwrites);
1697 	(*pr)("\tasyncwrites = %" PRIu64 "\n", mp->mnt_stat.f_asyncwrites);
1698 	(*pr)("\tsyncreads = %" PRIu64 "\n", mp->mnt_stat.f_syncreads);
1699 	(*pr)("\tasyncreads = %" PRIu64 "\n", mp->mnt_stat.f_asyncreads);
1700 	(*pr)("\tfstypename = %s\n", mp->mnt_stat.f_fstypename);
1701 	(*pr)("\tmntonname = %s\n", mp->mnt_stat.f_mntonname);
1702 	(*pr)("\tmntfromname = %s\n", mp->mnt_stat.f_mntfromname);
1703 
1704 	{
1705 		int cnt = 0;
1706 		vnode_t *vp;
1707 		vnode_impl_t *vip;
1708 		(*pr)("locked vnodes =");
1709 		TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1710 			vp = VIMPL_TO_VNODE(vip);
1711 			if (VOP_ISLOCKED(vp)) {
1712 				if ((++cnt % 6) == 0) {
1713 					(*pr)(" %p,\n\t", vp);
1714 				} else {
1715 					(*pr)(" %p,", vp);
1716 				}
1717 			}
1718 		}
1719 		(*pr)("\n");
1720 	}
1721 
1722 	if (full) {
1723 		int cnt = 0;
1724 		vnode_t *vp;
1725 		vnode_impl_t *vip;
1726 		(*pr)("all vnodes =");
1727 		TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1728 			vp = VIMPL_TO_VNODE(vip);
1729 			if (!TAILQ_NEXT(vip, vi_mntvnodes)) {
1730 				(*pr)(" %p", vp);
1731 			} else if ((++cnt % 6) == 0) {
1732 				(*pr)(" %p,\n\t", vp);
1733 			} else {
1734 				(*pr)(" %p,", vp);
1735 			}
1736 		}
1737 		(*pr)("\n");
1738 	}
1739 }
1740 
1741 /*
1742  * List all of the locked vnodes in the system.
1743  */
1744 void printlockedvnodes(void);
1745 
1746 void
1747 printlockedvnodes(void)
1748 {
1749 	struct mount *mp;
1750 	vnode_t *vp;
1751 	vnode_impl_t *vip;
1752 
1753 	printf("Locked vnodes\n");
1754 	for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) {
1755 		TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
1756 			vp = VIMPL_TO_VNODE(vip);
1757 			if (VOP_ISLOCKED(vp))
1758 				vprint(NULL, vp);
1759 		}
1760 	}
1761 }
1762 
1763 #endif /* DDB || DEBUGPRINT */
1764