xref: /netbsd-src/sys/ufs/lfs/lfs_bio.c (revision c7c727fae85036860d5bb848f2730ff419e2b060)
1 /*	$NetBSD: lfs_bio.c,v 1.122 2012/02/16 02:47:55 perseant Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * Copyright (c) 1991, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	@(#)lfs_bio.c	8.10 (Berkeley) 6/10/95
60  */
61 
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.122 2012/02/16 02:47:55 perseant Exp $");
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/buf.h>
69 #include <sys/vnode.h>
70 #include <sys/resourcevar.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/kauth.h>
74 
75 #include <ufs/ufs/inode.h>
76 #include <ufs/ufs/ufsmount.h>
77 #include <ufs/ufs/ufs_extern.h>
78 
79 #include <ufs/lfs/lfs.h>
80 #include <ufs/lfs/lfs_extern.h>
81 
82 #include <uvm/uvm.h>
83 
84 /*
85  * LFS block write function.
86  *
87  * XXX
88  * No write cost accounting is done.
89  * This is almost certainly wrong for synchronous operations and NFS.
90  *
91  * protected by lfs_lock.
92  */
93 int	locked_queue_count   = 0;	/* Count of locked-down buffers. */
94 long	locked_queue_bytes   = 0L;	/* Total size of locked buffers. */
95 int	lfs_subsys_pages     = 0L;	/* Total number LFS-written pages */
96 int	lfs_fs_pagetrip	     = 0;	/* # of pages to trip per-fs write */
97 int	lfs_writing	     = 0;	/* Set if already kicked off a writer
98 					   because of buffer space */
99 int	locked_queue_waiters = 0;	/* Number of processes waiting on lq */
100 
101 /* Lock and condition variables for above. */
102 kcondvar_t	locked_queue_cv;
103 kcondvar_t	lfs_writing_cv;
104 kmutex_t	lfs_lock;
105 
106 extern int lfs_dostats;
107 
108 /*
109  * reserved number/bytes of locked buffers
110  */
111 int locked_queue_rcount = 0;
112 long locked_queue_rbytes = 0L;
113 
114 static int lfs_fits_buf(struct lfs *, int, int);
115 static int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
116     int, int);
117 static int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2,
118     int);
119 
120 static int
121 lfs_fits_buf(struct lfs *fs, int n, int bytes)
122 {
123 	int count_fit, bytes_fit;
124 
125 	ASSERT_NO_SEGLOCK(fs);
126 	KASSERT(mutex_owned(&lfs_lock));
127 
128 	count_fit =
129 	    (locked_queue_count + locked_queue_rcount + n <= LFS_WAIT_BUFS);
130 	bytes_fit =
131 	    (locked_queue_bytes + locked_queue_rbytes + bytes <= LFS_WAIT_BYTES);
132 
133 #ifdef DEBUG
134 	if (!count_fit) {
135 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
136 		      locked_queue_count, locked_queue_rcount,
137 		      n, LFS_WAIT_BUFS));
138 	}
139 	if (!bytes_fit) {
140 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
141 		      locked_queue_bytes, locked_queue_rbytes,
142 		      bytes, LFS_WAIT_BYTES));
143 	}
144 #endif /* DEBUG */
145 
146 	return (count_fit && bytes_fit);
147 }
148 
149 /* ARGSUSED */
150 static int
151 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
152     struct vnode *vp2, int n, int bytes)
153 {
154 	int cantwait;
155 
156 	ASSERT_MAYBE_SEGLOCK(fs);
157 	KASSERT(locked_queue_rcount >= 0);
158 	KASSERT(locked_queue_rbytes >= 0);
159 
160 	cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
161 	mutex_enter(&lfs_lock);
162 	while (!cantwait && n > 0 && !lfs_fits_buf(fs, n, bytes)) {
163 		int error;
164 
165 		lfs_flush(fs, 0, 0);
166 
167 		DLOG((DLOG_AVAIL, "lfs_reservebuf: waiting: count=%d, bytes=%ld\n",
168 		      locked_queue_count, locked_queue_bytes));
169 		++locked_queue_waiters;
170 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
171 		    hz * LFS_BUFWAIT);
172 		--locked_queue_waiters;
173 		if (error && error != EWOULDBLOCK) {
174 			mutex_exit(&lfs_lock);
175 			return error;
176 		}
177 	}
178 
179 	locked_queue_rcount += n;
180 	locked_queue_rbytes += bytes;
181 
182 	if (n < 0 && locked_queue_waiters > 0) {
183 		DLOG((DLOG_AVAIL, "lfs_reservebuf: broadcast: count=%d, bytes=%ld\n",
184 		      locked_queue_count, locked_queue_bytes));
185 		cv_broadcast(&locked_queue_cv);
186 	}
187 
188 	mutex_exit(&lfs_lock);
189 
190 	KASSERT(locked_queue_rcount >= 0);
191 	KASSERT(locked_queue_rbytes >= 0);
192 
193 	return 0;
194 }
195 
196 /*
197  * Try to reserve some blocks, prior to performing a sensitive operation that
198  * requires the vnode lock to be honored.  If there is not enough space, give
199  * up the vnode lock temporarily and wait for the space to become available.
200  *
201  * Called with vp locked.  (Note nowever that if fsb < 0, vp is ignored.)
202  *
203  * XXX YAMT - it isn't safe to unlock vp here
204  * because the node might be modified while we sleep.
205  * (eg. cached states like i_offset might be stale,
206  *  the vnode might be truncated, etc..)
207  * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
208  * or rearrange vnodeop interface to leave vnode locking to file system
209  * specific code so that each file systems can have their own vnode locking and
210  * vnode re-using strategies.
211  */
212 static int
213 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
214     struct vnode *vp2, int fsb)
215 {
216 	CLEANERINFO *cip;
217 	struct buf *bp;
218 	int error, slept;
219 	int cantwait;
220 
221 	ASSERT_MAYBE_SEGLOCK(fs);
222 	slept = 0;
223 	mutex_enter(&lfs_lock);
224 	cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
225 	while (!cantwait && fsb > 0 &&
226 	       !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
227 		mutex_exit(&lfs_lock);
228 
229 		if (!slept) {
230 			DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
231 			      " est_bfree = %d)\n",
232 			      fsb + fs->lfs_ravail + fs->lfs_favail,
233 			      fs->lfs_bfree, LFS_EST_BFREE(fs)));
234 		}
235 		++slept;
236 
237 		/* Wake up the cleaner */
238 		LFS_CLEANERINFO(cip, fs, bp);
239 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
240 		lfs_wakeup_cleaner(fs);
241 
242 		mutex_enter(&lfs_lock);
243 		/* Cleaner might have run while we were reading, check again */
244 		if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
245 			break;
246 
247 		error = mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
248 				0, &lfs_lock);
249 		if (error) {
250 			mutex_exit(&lfs_lock);
251 			return error;
252 		}
253 	}
254 #ifdef DEBUG
255 	if (slept) {
256 		DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
257 	}
258 #endif
259 	fs->lfs_ravail += fsb;
260 	mutex_exit(&lfs_lock);
261 
262 	return 0;
263 }
264 
265 #ifdef DIAGNOSTIC
266 int lfs_rescount;
267 int lfs_rescountdirop;
268 #endif
269 
270 int
271 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
272 {
273 	int error;
274 
275 	ASSERT_MAYBE_SEGLOCK(fs);
276 	if (vp2) {
277 		/* Make sure we're not in the process of reclaiming vp2 */
278 		mutex_enter(&lfs_lock);
279 		while(fs->lfs_flags & LFS_UNDIROP) {
280 			mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
281 			    &lfs_lock);
282 		}
283 		mutex_exit(&lfs_lock);
284 	}
285 
286 	KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
287 	KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
288 	KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
289 
290 #ifdef DIAGNOSTIC
291 	mutex_enter(&lfs_lock);
292 	if (fsb > 0)
293 		lfs_rescount++;
294 	else if (fsb < 0)
295 		lfs_rescount--;
296 	if (lfs_rescount < 0)
297 		panic("lfs_rescount");
298 	mutex_exit(&lfs_lock);
299 #endif
300 
301 	/*
302 	 * XXX
303 	 * vref vnodes here so that cleaner doesn't try to reuse them.
304 	 * (see XXX comment in lfs_reserveavail)
305 	 */
306 	vhold(vp);
307 	if (vp2 != NULL) {
308 		vhold(vp2);
309 	}
310 
311 	error = lfs_reserveavail(fs, vp, vp2, fsb);
312 	if (error)
313 		goto done;
314 
315 	/*
316 	 * XXX just a guess. should be more precise.
317 	 */
318 	error = lfs_reservebuf(fs, vp, vp2, fsb, fsbtob(fs, fsb));
319 	if (error)
320 		lfs_reserveavail(fs, vp, vp2, -fsb);
321 
322 done:
323 	holdrele(vp);
324 	if (vp2 != NULL) {
325 		holdrele(vp2);
326 	}
327 
328 	return error;
329 }
330 
331 int
332 lfs_bwrite(void *v)
333 {
334 	struct vop_bwrite_args /* {
335 		struct vnode *a_vp;
336 		struct buf *a_bp;
337 	} */ *ap = v;
338 	struct buf *bp = ap->a_bp;
339 
340 #ifdef DIAGNOSTIC
341 	if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
342 		panic("bawrite LFS buffer");
343 	}
344 #endif /* DIAGNOSTIC */
345 	return lfs_bwrite_ext(bp, 0);
346 }
347 
348 /*
349  * Determine if there is enough room currently available to write fsb
350  * blocks.  We need enough blocks for the new blocks, the current
351  * inode blocks (including potentially the ifile inode), a summary block,
352  * and the segment usage table, plus an ifile block.
353  */
354 int
355 lfs_fits(struct lfs *fs, int fsb)
356 {
357 	int needed;
358 
359 	ASSERT_NO_SEGLOCK(fs);
360 	needed = fsb + btofsb(fs, fs->lfs_sumsize) +
361 		 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
362 		   1) << (fs->lfs_bshift - fs->lfs_ffshift));
363 
364 	if (needed >= fs->lfs_avail) {
365 #ifdef DEBUG
366 		DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
367 		      "needed = %ld, avail = %ld\n",
368 		      (long)fsb, (long)fs->lfs_uinodes, (long)needed,
369 		      (long)fs->lfs_avail));
370 #endif
371 		return 0;
372 	}
373 	return 1;
374 }
375 
376 int
377 lfs_availwait(struct lfs *fs, int fsb)
378 {
379 	int error;
380 	CLEANERINFO *cip;
381 	struct buf *cbp;
382 
383 	ASSERT_NO_SEGLOCK(fs);
384 	/* Push cleaner blocks through regardless */
385 	mutex_enter(&lfs_lock);
386 	if (LFS_SEGLOCK_HELD(fs) &&
387 	    fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
388 		mutex_exit(&lfs_lock);
389 		return 0;
390 	}
391 	mutex_exit(&lfs_lock);
392 
393 	while (!lfs_fits(fs, fsb)) {
394 		/*
395 		 * Out of space, need cleaner to run.
396 		 * Update the cleaner info, then wake it up.
397 		 * Note the cleanerinfo block is on the ifile
398 		 * so it CANT_WAIT.
399 		 */
400 		LFS_CLEANERINFO(cip, fs, cbp);
401 		LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
402 
403 #ifdef DEBUG
404 		DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
405 		      "waiting on cleaner\n"));
406 #endif
407 
408 		lfs_wakeup_cleaner(fs);
409 #ifdef DIAGNOSTIC
410 		if (LFS_SEGLOCK_HELD(fs))
411 			panic("lfs_availwait: deadlock");
412 #endif
413 		error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
414 		if (error)
415 			return (error);
416 	}
417 	return 0;
418 }
419 
420 int
421 lfs_bwrite_ext(struct buf *bp, int flags)
422 {
423 	struct lfs *fs;
424 	struct inode *ip;
425 	struct vnode *vp;
426 	int fsb;
427 
428 	vp = bp->b_vp;
429 	fs = VFSTOUFS(vp->v_mount)->um_lfs;
430 
431 	ASSERT_MAYBE_SEGLOCK(fs);
432 	KASSERT(bp->b_cflags & BC_BUSY);
433 	KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
434 	KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED))
435 	    != BO_DELWRI);
436 
437 	/*
438 	 * Don't write *any* blocks if we're mounted read-only, or
439 	 * if we are "already unmounted".
440 	 *
441 	 * In particular the cleaner can't write blocks either.
442 	 */
443 	if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
444 		bp->b_oflags &= ~BO_DELWRI;
445 		bp->b_flags |= B_READ; /* XXX is this right? --ks */
446 		bp->b_error = 0;
447 		mutex_enter(&bufcache_lock);
448 		LFS_UNLOCK_BUF(bp);
449 		if (LFS_IS_MALLOC_BUF(bp))
450 			bp->b_cflags &= ~BC_BUSY;
451 		else
452 			brelsel(bp, 0);
453 		mutex_exit(&bufcache_lock);
454 		return (fs->lfs_ronly ? EROFS : 0);
455 	}
456 
457 	/*
458 	 * Set the delayed write flag and use reassignbuf to move the buffer
459 	 * from the clean list to the dirty one.
460 	 *
461 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
462 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
463 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
464 	 * isn't going to work.
465 	 *
466 	 * XXX we don't let meta-data writes run out of space because they can
467 	 * come from the segment writer.  We need to make sure that there is
468 	 * enough space reserved so that there's room to write meta-data
469 	 * blocks.
470 	 */
471 	if ((bp->b_flags & B_LOCKED) == 0) {
472 		fsb = numfrags(fs, bp->b_bcount);
473 
474 		ip = VTOI(vp);
475 		mutex_enter(&lfs_lock);
476 		if (flags & BW_CLEAN) {
477 			LFS_SET_UINO(ip, IN_CLEANING);
478 		} else {
479 			LFS_SET_UINO(ip, IN_MODIFIED);
480 		}
481 		mutex_exit(&lfs_lock);
482 		fs->lfs_avail -= fsb;
483 
484 		mutex_enter(&bufcache_lock);
485 		mutex_enter(vp->v_interlock);
486 		bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE;
487 		LFS_LOCK_BUF(bp);
488 		bp->b_flags &= ~B_READ;
489 		bp->b_error = 0;
490 		reassignbuf(bp, bp->b_vp);
491 		mutex_exit(vp->v_interlock);
492 	} else {
493 		mutex_enter(&bufcache_lock);
494 	}
495 
496 	if (bp->b_iodone != NULL)
497 		bp->b_cflags &= ~BC_BUSY;
498 	else
499 		brelsel(bp, 0);
500 	mutex_exit(&bufcache_lock);
501 
502 	return (0);
503 }
504 
505 /*
506  * Called and return with the lfs_lock held.
507  */
508 void
509 lfs_flush_fs(struct lfs *fs, int flags)
510 {
511 	ASSERT_NO_SEGLOCK(fs);
512 	KASSERT(mutex_owned(&lfs_lock));
513 	if (fs->lfs_ronly)
514 		return;
515 
516 	if (lfs_dostats)
517 		++lfs_stats.flush_invoked;
518 
519 	fs->lfs_pdflush = 0;
520 	mutex_exit(&lfs_lock);
521 	lfs_writer_enter(fs, "fldirop");
522 	lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
523 	lfs_writer_leave(fs);
524 	mutex_enter(&lfs_lock);
525 	fs->lfs_favail = 0; /* XXX */
526 }
527 
528 /*
529  * This routine initiates segment writes when LFS is consuming too many
530  * resources.  Ideally the pageout daemon would be able to direct LFS
531  * more subtly.
532  * XXX We have one static count of locked buffers;
533  * XXX need to think more about the multiple filesystem case.
534  *
535  * Called and return with lfs_lock held.
536  * If fs != NULL, we hold the segment lock for fs.
537  */
538 void
539 lfs_flush(struct lfs *fs, int flags, int only_onefs)
540 {
541 	extern u_int64_t locked_fakequeue_count;
542 	struct mount *mp, *nmp;
543 	struct lfs *tfs;
544 
545 	KASSERT(mutex_owned(&lfs_lock));
546 	KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
547 
548 	if (lfs_dostats)
549 		++lfs_stats.write_exceeded;
550 	/* XXX should we include SEGM_CKP here? */
551 	if (lfs_writing && !(flags & SEGM_SYNC)) {
552 		DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
553 		return;
554 	}
555 	while (lfs_writing)
556 		cv_wait(&lfs_writing_cv, &lfs_lock);
557 	lfs_writing = 1;
558 
559 	mutex_exit(&lfs_lock);
560 
561 	if (only_onefs) {
562 		KASSERT(fs != NULL);
563 		if (vfs_busy(fs->lfs_ivnode->v_mount, NULL))
564 			goto errout;
565 		mutex_enter(&lfs_lock);
566 		lfs_flush_fs(fs, flags);
567 		mutex_exit(&lfs_lock);
568 		vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL);
569 	} else {
570 		locked_fakequeue_count = 0;
571 		mutex_enter(&mountlist_lock);
572 		for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
573 		     mp = nmp) {
574 			if (vfs_busy(mp, &nmp)) {
575 				DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
576 				continue;
577 			}
578 			if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
579 			    sizeof(mp->mnt_stat.f_fstypename)) == 0) {
580 				tfs = VFSTOUFS(mp)->um_lfs;
581 				mutex_enter(&lfs_lock);
582 				lfs_flush_fs(tfs, flags);
583 				mutex_exit(&lfs_lock);
584 			}
585 			vfs_unbusy(mp, false, &nmp);
586 		}
587 		mutex_exit(&mountlist_lock);
588 	}
589 	LFS_DEBUG_COUNTLOCKED("flush");
590 	wakeup(&lfs_subsys_pages);
591 
592     errout:
593 	mutex_enter(&lfs_lock);
594 	KASSERT(lfs_writing);
595 	lfs_writing = 0;
596 	wakeup(&lfs_writing);
597 }
598 
599 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
600 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
601 
602 /*
603  * make sure that we don't have too many locked buffers.
604  * flush buffers if needed.
605  */
606 int
607 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
608 {
609 	int error;
610 	struct lfs *fs;
611 	struct inode *ip;
612 	extern pid_t lfs_writer_daemon;
613 
614 	error = 0;
615 	ip = VTOI(vp);
616 
617 	/* If out of buffers, wait on writer */
618 	/* XXX KS - if it's the Ifile, we're probably the cleaner! */
619 	if (ip->i_number == LFS_IFILE_INUM)
620 		return 0;
621 	/* If we're being called from inside a dirop, don't sleep */
622 	if (ip->i_flag & IN_ADIROP)
623 		return 0;
624 
625 	fs = ip->i_lfs;
626 
627 	ASSERT_NO_SEGLOCK(fs);
628 
629 	/*
630 	 * If we would flush below, but dirops are active, sleep.
631 	 * Note that a dirop cannot ever reach this code!
632 	 */
633 	mutex_enter(&lfs_lock);
634 	while (fs->lfs_dirops > 0 &&
635 	       (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
636 		locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
637 		lfs_subsys_pages > LFS_MAX_PAGES ||
638 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
639 		lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
640 	{
641 		++fs->lfs_diropwait;
642 		mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
643 			&lfs_lock);
644 		--fs->lfs_diropwait;
645 	}
646 
647 #ifdef DEBUG
648 	if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
649 		DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
650 		      locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
651 	if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
652 		DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
653 		      locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
654 	if (lfs_subsys_pages > LFS_MAX_PAGES)
655 		DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
656 		      lfs_subsys_pages, LFS_MAX_PAGES));
657 	if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
658 		DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
659 		      fs->lfs_pages, lfs_fs_pagetrip));
660 	if (lfs_dirvcount > LFS_MAX_DIROP)
661 		DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
662 		      lfs_dirvcount, LFS_MAX_DIROP));
663 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
664 		DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
665 		      fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
666 	if (fs->lfs_diropwait > 0)
667 		DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
668 		      fs->lfs_diropwait));
669 #endif
670 
671 	/* If there are too many pending dirops, we have to flush them. */
672 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
673 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
674 		mutex_exit(&lfs_lock);
675 		lfs_flush_dirops(fs);
676 		mutex_enter(&lfs_lock);
677 	} else if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
678 	    locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
679 	    lfs_subsys_pages > LFS_MAX_PAGES ||
680 	    fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
681 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
682 		lfs_flush(fs, flags, 0);
683 	} else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
684 		/*
685 		 * If we didn't flush the whole thing, some filesystems
686 		 * still might want to be flushed.
687 		 */
688 		++fs->lfs_pdflush;
689 		wakeup(&lfs_writer_daemon);
690 	}
691 
692 	while (locked_queue_count + INOCOUNT(fs) >= LFS_WAIT_BUFS ||
693 		locked_queue_bytes + INOBYTES(fs) >= LFS_WAIT_BYTES ||
694 		lfs_subsys_pages > LFS_WAIT_PAGES ||
695 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
696 		lfs_dirvcount > LFS_MAX_DIROP) {
697 
698 		if (lfs_dostats)
699 			++lfs_stats.wait_exceeded;
700 		DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
701 		      locked_queue_count, locked_queue_bytes));
702 		++locked_queue_waiters;
703 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
704 		    hz * LFS_BUFWAIT);
705 		--locked_queue_waiters;
706 		if (error != EWOULDBLOCK)
707 			break;
708 
709 		/*
710 		 * lfs_flush might not flush all the buffers, if some of the
711 		 * inodes were locked or if most of them were Ifile blocks
712 		 * and we weren't asked to checkpoint.	Try flushing again
713 		 * to keep us from blocking indefinitely.
714 		 */
715 		if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS ||
716 		    locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) {
717 			lfs_flush(fs, flags | SEGM_CKP, 0);
718 		}
719 	}
720 	mutex_exit(&lfs_lock);
721 	return (error);
722 }
723 
724 /*
725  * Allocate a new buffer header.
726  */
727 struct buf *
728 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
729 {
730 	struct buf *bp;
731 	size_t nbytes;
732 
733 	ASSERT_MAYBE_SEGLOCK(fs);
734 	nbytes = roundup(size, fsbtob(fs, 1));
735 
736 	bp = getiobuf(NULL, true);
737 	if (nbytes) {
738 		bp->b_data = lfs_malloc(fs, nbytes, type);
739 		/* memset(bp->b_data, 0, nbytes); */
740 	}
741 #ifdef DIAGNOSTIC
742 	if (vp == NULL)
743 		panic("vp is NULL in lfs_newbuf");
744 	if (bp == NULL)
745 		panic("bp is NULL after malloc in lfs_newbuf");
746 #endif
747 
748 	bp->b_bufsize = size;
749 	bp->b_bcount = size;
750 	bp->b_lblkno = daddr;
751 	bp->b_blkno = daddr;
752 	bp->b_error = 0;
753 	bp->b_resid = 0;
754 	bp->b_iodone = lfs_callback;
755 	bp->b_cflags = BC_BUSY | BC_NOCACHE;
756 	bp->b_private = fs;
757 
758 	mutex_enter(&bufcache_lock);
759 	mutex_enter(vp->v_interlock);
760 	bgetvp(vp, bp);
761 	mutex_exit(vp->v_interlock);
762 	mutex_exit(&bufcache_lock);
763 
764 	return (bp);
765 }
766 
767 void
768 lfs_freebuf(struct lfs *fs, struct buf *bp)
769 {
770 	struct vnode *vp;
771 
772 	if ((vp = bp->b_vp) != NULL) {
773 		mutex_enter(&bufcache_lock);
774 		mutex_enter(vp->v_interlock);
775 		brelvp(bp);
776 		mutex_exit(vp->v_interlock);
777 		mutex_exit(&bufcache_lock);
778 	}
779 	if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */
780 		lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
781 		bp->b_data = NULL;
782 	}
783 	putiobuf(bp);
784 }
785 
786 /*
787  * Count buffers on the "locked" queue, and compare it to a pro-forma count.
788  * Don't count malloced buffers, since they don't detract from the total.
789  */
790 void
791 lfs_countlocked(int *count, long *bytes, const char *msg)
792 {
793 	struct buf *bp;
794 	int n = 0;
795 	long int size = 0L;
796 
797 	mutex_enter(&bufcache_lock);
798 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) {
799 		KASSERT(bp->b_iodone == NULL);
800 		n++;
801 		size += bp->b_bufsize;
802 #ifdef DIAGNOSTIC
803 		if (n > nbuf)
804 			panic("lfs_countlocked: this can't happen: more"
805 			      " buffers locked than exist");
806 #endif
807 	}
808 	/*
809 	 * Theoretically this function never really does anything.
810 	 * Give a warning if we have to fix the accounting.
811 	 */
812 	if (n != *count) {
813 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
814 		      " from %d to %d\n", msg, *count, n));
815 	}
816 	if (size != *bytes) {
817 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
818 		      " from %ld to %ld\n", msg, *bytes, size));
819 	}
820 	*count = n;
821 	*bytes = size;
822 	mutex_exit(&bufcache_lock);
823 	return;
824 }
825 
826 int
827 lfs_wait_pages(void)
828 {
829 	int active, inactive;
830 
831 	uvm_estimatepageable(&active, &inactive);
832 	return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
833 }
834 
835 int
836 lfs_max_pages(void)
837 {
838 	int active, inactive;
839 
840 	uvm_estimatepageable(&active, &inactive);
841 	return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
842 }
843