xref: /netbsd-src/sys/ufs/lfs/lfs_bio.c (revision a5847cc334d9a7029f6352b847e9e8d71a0f9e0c)
1 /*	$NetBSD: lfs_bio.c,v 1.120 2011/07/11 08:27:40 hannken Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * Copyright (c) 1991, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	@(#)lfs_bio.c	8.10 (Berkeley) 6/10/95
60  */
61 
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.120 2011/07/11 08:27:40 hannken Exp $");
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/buf.h>
69 #include <sys/vnode.h>
70 #include <sys/resourcevar.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/kauth.h>
74 
75 #include <ufs/ufs/inode.h>
76 #include <ufs/ufs/ufsmount.h>
77 #include <ufs/ufs/ufs_extern.h>
78 
79 #include <ufs/lfs/lfs.h>
80 #include <ufs/lfs/lfs_extern.h>
81 
82 #include <uvm/uvm.h>
83 
84 /*
85  * LFS block write function.
86  *
87  * XXX
88  * No write cost accounting is done.
89  * This is almost certainly wrong for synchronous operations and NFS.
90  *
91  * protected by lfs_lock.
92  */
93 int	locked_queue_count   = 0;	/* Count of locked-down buffers. */
94 long	locked_queue_bytes   = 0L;	/* Total size of locked buffers. */
95 int	lfs_subsys_pages     = 0L;	/* Total number LFS-written pages */
96 int	lfs_fs_pagetrip	     = 0;	/* # of pages to trip per-fs write */
97 int	lfs_writing	     = 0;	/* Set if already kicked off a writer
98 					   because of buffer space */
99 
100 /* Lock and condition variables for above. */
101 kcondvar_t	locked_queue_cv;
102 kcondvar_t	lfs_writing_cv;
103 kmutex_t	lfs_lock;
104 
105 extern int lfs_dostats;
106 
107 /*
108  * reserved number/bytes of locked buffers
109  */
110 int locked_queue_rcount = 0;
111 long locked_queue_rbytes = 0L;
112 
113 static int lfs_fits_buf(struct lfs *, int, int);
114 static int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
115     int, int);
116 static int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2,
117     int);
118 
119 static int
120 lfs_fits_buf(struct lfs *fs, int n, int bytes)
121 {
122 	int count_fit, bytes_fit;
123 
124 	ASSERT_NO_SEGLOCK(fs);
125 	KASSERT(mutex_owned(&lfs_lock));
126 
127 	count_fit =
128 	    (locked_queue_count + locked_queue_rcount + n <= LFS_WAIT_BUFS);
129 	bytes_fit =
130 	    (locked_queue_bytes + locked_queue_rbytes + bytes <= LFS_WAIT_BYTES);
131 
132 #ifdef DEBUG
133 	if (!count_fit) {
134 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
135 		      locked_queue_count, locked_queue_rcount,
136 		      n, LFS_WAIT_BUFS));
137 	}
138 	if (!bytes_fit) {
139 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
140 		      locked_queue_bytes, locked_queue_rbytes,
141 		      bytes, LFS_WAIT_BYTES));
142 	}
143 #endif /* DEBUG */
144 
145 	return (count_fit && bytes_fit);
146 }
147 
148 /* ARGSUSED */
149 static int
150 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
151     struct vnode *vp2, int n, int bytes)
152 {
153 	ASSERT_MAYBE_SEGLOCK(fs);
154 	KASSERT(locked_queue_rcount >= 0);
155 	KASSERT(locked_queue_rbytes >= 0);
156 
157 	mutex_enter(&lfs_lock);
158 	while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
159 		int error;
160 
161 		lfs_flush(fs, 0, 0);
162 
163 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
164 		    hz * LFS_BUFWAIT);
165 		if (error && error != EWOULDBLOCK) {
166 			mutex_exit(&lfs_lock);
167 			return error;
168 		}
169 	}
170 
171 	locked_queue_rcount += n;
172 	locked_queue_rbytes += bytes;
173 
174 	if (n < 0)
175 		cv_broadcast(&locked_queue_cv);
176 
177 	mutex_exit(&lfs_lock);
178 
179 	KASSERT(locked_queue_rcount >= 0);
180 	KASSERT(locked_queue_rbytes >= 0);
181 
182 	return 0;
183 }
184 
185 /*
186  * Try to reserve some blocks, prior to performing a sensitive operation that
187  * requires the vnode lock to be honored.  If there is not enough space, give
188  * up the vnode lock temporarily and wait for the space to become available.
189  *
190  * Called with vp locked.  (Note nowever that if fsb < 0, vp is ignored.)
191  *
192  * XXX YAMT - it isn't safe to unlock vp here
193  * because the node might be modified while we sleep.
194  * (eg. cached states like i_offset might be stale,
195  *  the vnode might be truncated, etc..)
196  * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
197  * or rearrange vnodeop interface to leave vnode locking to file system
198  * specific code so that each file systems can have their own vnode locking and
199  * vnode re-using strategies.
200  */
201 static int
202 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
203     struct vnode *vp2, int fsb)
204 {
205 	CLEANERINFO *cip;
206 	struct buf *bp;
207 	int error, slept;
208 
209 	ASSERT_MAYBE_SEGLOCK(fs);
210 	slept = 0;
211 	mutex_enter(&lfs_lock);
212 	while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
213 		mutex_exit(&lfs_lock);
214 #if 0
215 		/*
216 		 * XXX ideally, we should unlock vnodes here
217 		 * because we might sleep very long time.
218 		 */
219 		VOP_UNLOCK(vp);
220 		if (vp2 != NULL) {
221 			VOP_UNLOCK(vp2);
222 		}
223 #else
224 		/*
225 		 * XXX since we'll sleep for cleaner with vnode lock holding,
226 		 * deadlock will occur if cleaner tries to lock the vnode.
227 		 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
228 		 */
229 #endif
230 
231 		if (!slept) {
232 			DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
233 			      " est_bfree = %d)\n",
234 			      fsb + fs->lfs_ravail + fs->lfs_favail,
235 			      fs->lfs_bfree, LFS_EST_BFREE(fs)));
236 		}
237 		++slept;
238 
239 		/* Wake up the cleaner */
240 		LFS_CLEANERINFO(cip, fs, bp);
241 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
242 		lfs_wakeup_cleaner(fs);
243 
244 		mutex_enter(&lfs_lock);
245 		/* Cleaner might have run while we were reading, check again */
246 		if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
247 			break;
248 
249 		error = mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
250 				0, &lfs_lock);
251 #if 0
252 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
253 		vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
254 #endif
255 		if (error) {
256 			mutex_exit(&lfs_lock);
257 			return error;
258 		}
259 	}
260 #ifdef DEBUG
261 	if (slept) {
262 		DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
263 	}
264 #endif
265 	fs->lfs_ravail += fsb;
266 	mutex_exit(&lfs_lock);
267 
268 	return 0;
269 }
270 
271 #ifdef DIAGNOSTIC
272 int lfs_rescount;
273 int lfs_rescountdirop;
274 #endif
275 
276 int
277 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
278 {
279 	int error;
280 	int cantwait;
281 
282 	ASSERT_MAYBE_SEGLOCK(fs);
283 	if (vp2) {
284 		/* Make sure we're not in the process of reclaiming vp2 */
285 		mutex_enter(&lfs_lock);
286 		while(fs->lfs_flags & LFS_UNDIROP) {
287 			mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
288 			    &lfs_lock);
289 		}
290 		mutex_exit(&lfs_lock);
291 	}
292 
293 	KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
294 	KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
295 	KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
296 	KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
297 
298 	cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
299 #ifdef DIAGNOSTIC
300 	if (cantwait) {
301 		if (fsb > 0)
302 			lfs_rescountdirop++;
303 		else if (fsb < 0)
304 			lfs_rescountdirop--;
305 		if (lfs_rescountdirop < 0)
306 			panic("lfs_rescountdirop");
307 	}
308 	else {
309 		if (fsb > 0)
310 			lfs_rescount++;
311 		else if (fsb < 0)
312 			lfs_rescount--;
313 		if (lfs_rescount < 0)
314 			panic("lfs_rescount");
315 	}
316 #endif
317 	if (cantwait)
318 		return 0;
319 
320 	/*
321 	 * XXX
322 	 * vref vnodes here so that cleaner doesn't try to reuse them.
323 	 * (see XXX comment in lfs_reserveavail)
324 	 */
325 	vhold(vp);
326 	if (vp2 != NULL) {
327 		vhold(vp2);
328 	}
329 
330 	error = lfs_reserveavail(fs, vp, vp2, fsb);
331 	if (error)
332 		goto done;
333 
334 	/*
335 	 * XXX just a guess. should be more precise.
336 	 */
337 	error = lfs_reservebuf(fs, vp, vp2, fsb, fsbtob(fs, fsb));
338 	if (error)
339 		lfs_reserveavail(fs, vp, vp2, -fsb);
340 
341 done:
342 	holdrele(vp);
343 	if (vp2 != NULL) {
344 		holdrele(vp2);
345 	}
346 
347 	return error;
348 }
349 
350 int
351 lfs_bwrite(void *v)
352 {
353 	struct vop_bwrite_args /* {
354 		struct vnode *a_vp;
355 		struct buf *a_bp;
356 	} */ *ap = v;
357 	struct buf *bp = ap->a_bp;
358 
359 #ifdef DIAGNOSTIC
360 	if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
361 		panic("bawrite LFS buffer");
362 	}
363 #endif /* DIAGNOSTIC */
364 	return lfs_bwrite_ext(bp, 0);
365 }
366 
367 /*
368  * Determine if there is enough room currently available to write fsb
369  * blocks.  We need enough blocks for the new blocks, the current
370  * inode blocks (including potentially the ifile inode), a summary block,
371  * and the segment usage table, plus an ifile block.
372  */
373 int
374 lfs_fits(struct lfs *fs, int fsb)
375 {
376 	int needed;
377 
378 	ASSERT_NO_SEGLOCK(fs);
379 	needed = fsb + btofsb(fs, fs->lfs_sumsize) +
380 		 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
381 		   1) << (fs->lfs_bshift - fs->lfs_ffshift));
382 
383 	if (needed >= fs->lfs_avail) {
384 #ifdef DEBUG
385 		DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
386 		      "needed = %ld, avail = %ld\n",
387 		      (long)fsb, (long)fs->lfs_uinodes, (long)needed,
388 		      (long)fs->lfs_avail));
389 #endif
390 		return 0;
391 	}
392 	return 1;
393 }
394 
395 int
396 lfs_availwait(struct lfs *fs, int fsb)
397 {
398 	int error;
399 	CLEANERINFO *cip;
400 	struct buf *cbp;
401 
402 	ASSERT_NO_SEGLOCK(fs);
403 	/* Push cleaner blocks through regardless */
404 	mutex_enter(&lfs_lock);
405 	if (LFS_SEGLOCK_HELD(fs) &&
406 	    fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
407 		mutex_exit(&lfs_lock);
408 		return 0;
409 	}
410 	mutex_exit(&lfs_lock);
411 
412 	while (!lfs_fits(fs, fsb)) {
413 		/*
414 		 * Out of space, need cleaner to run.
415 		 * Update the cleaner info, then wake it up.
416 		 * Note the cleanerinfo block is on the ifile
417 		 * so it CANT_WAIT.
418 		 */
419 		LFS_CLEANERINFO(cip, fs, cbp);
420 		LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
421 
422 #ifdef DEBUG
423 		DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
424 		      "waiting on cleaner\n"));
425 #endif
426 
427 		lfs_wakeup_cleaner(fs);
428 #ifdef DIAGNOSTIC
429 		if (LFS_SEGLOCK_HELD(fs))
430 			panic("lfs_availwait: deadlock");
431 #endif
432 		error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
433 		if (error)
434 			return (error);
435 	}
436 	return 0;
437 }
438 
439 int
440 lfs_bwrite_ext(struct buf *bp, int flags)
441 {
442 	struct lfs *fs;
443 	struct inode *ip;
444 	struct vnode *vp;
445 	int fsb;
446 
447 	vp = bp->b_vp;
448 	fs = VFSTOUFS(vp->v_mount)->um_lfs;
449 
450 	ASSERT_MAYBE_SEGLOCK(fs);
451 	KASSERT(bp->b_cflags & BC_BUSY);
452 	KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
453 	KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED))
454 	    != BO_DELWRI);
455 
456 	/*
457 	 * Don't write *any* blocks if we're mounted read-only, or
458 	 * if we are "already unmounted".
459 	 *
460 	 * In particular the cleaner can't write blocks either.
461 	 */
462 	if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
463 		bp->b_oflags &= ~BO_DELWRI;
464 		bp->b_flags |= B_READ;
465 		bp->b_error = 0;
466 		mutex_enter(&bufcache_lock);
467 		LFS_UNLOCK_BUF(bp);
468 		if (LFS_IS_MALLOC_BUF(bp))
469 			bp->b_cflags &= ~BC_BUSY;
470 		else
471 			brelsel(bp, 0);
472 		mutex_exit(&bufcache_lock);
473 		return (fs->lfs_ronly ? EROFS : 0);
474 	}
475 
476 	/*
477 	 * Set the delayed write flag and use reassignbuf to move the buffer
478 	 * from the clean list to the dirty one.
479 	 *
480 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
481 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
482 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
483 	 * isn't going to work.
484 	 *
485 	 * XXX we don't let meta-data writes run out of space because they can
486 	 * come from the segment writer.  We need to make sure that there is
487 	 * enough space reserved so that there's room to write meta-data
488 	 * blocks.
489 	 */
490 	if ((bp->b_flags & B_LOCKED) == 0) {
491 		fsb = numfrags(fs, bp->b_bcount);
492 
493 		ip = VTOI(vp);
494 		mutex_enter(&lfs_lock);
495 		if (flags & BW_CLEAN) {
496 			LFS_SET_UINO(ip, IN_CLEANING);
497 		} else {
498 			LFS_SET_UINO(ip, IN_MODIFIED);
499 		}
500 		mutex_exit(&lfs_lock);
501 		fs->lfs_avail -= fsb;
502 
503 		mutex_enter(&bufcache_lock);
504 		mutex_enter(vp->v_interlock);
505 		bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE;
506 		LFS_LOCK_BUF(bp);
507 		bp->b_flags &= ~B_READ;
508 		bp->b_error = 0;
509 		reassignbuf(bp, bp->b_vp);
510 		mutex_exit(vp->v_interlock);
511 	} else {
512 		mutex_enter(&bufcache_lock);
513 	}
514 
515 	if (bp->b_iodone != NULL)
516 		bp->b_cflags &= ~BC_BUSY;
517 	else
518 		brelsel(bp, 0);
519 	mutex_exit(&bufcache_lock);
520 
521 	return (0);
522 }
523 
524 /*
525  * Called and return with the lfs_lock held.
526  */
527 void
528 lfs_flush_fs(struct lfs *fs, int flags)
529 {
530 	ASSERT_NO_SEGLOCK(fs);
531 	KASSERT(mutex_owned(&lfs_lock));
532 	if (fs->lfs_ronly)
533 		return;
534 
535 	if (lfs_dostats)
536 		++lfs_stats.flush_invoked;
537 
538 	mutex_exit(&lfs_lock);
539 	lfs_writer_enter(fs, "fldirop");
540 	lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
541 	lfs_writer_leave(fs);
542 	mutex_enter(&lfs_lock);
543 	fs->lfs_favail = 0; /* XXX */
544 }
545 
546 /*
547  * This routine initiates segment writes when LFS is consuming too many
548  * resources.  Ideally the pageout daemon would be able to direct LFS
549  * more subtly.
550  * XXX We have one static count of locked buffers;
551  * XXX need to think more about the multiple filesystem case.
552  *
553  * Called and return with lfs_lock held.
554  * If fs != NULL, we hold the segment lock for fs.
555  */
556 void
557 lfs_flush(struct lfs *fs, int flags, int only_onefs)
558 {
559 	extern u_int64_t locked_fakequeue_count;
560 	struct mount *mp, *nmp;
561 	struct lfs *tfs;
562 
563 	KASSERT(mutex_owned(&lfs_lock));
564 	KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
565 
566 	if (lfs_dostats)
567 		++lfs_stats.write_exceeded;
568 	/* XXX should we include SEGM_CKP here? */
569 	if (lfs_writing && !(flags & SEGM_SYNC)) {
570 		DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
571 		return;
572 	}
573 	while (lfs_writing)
574 		cv_wait(&lfs_writing_cv, &lfs_lock);
575 	lfs_writing = 1;
576 
577 	mutex_exit(&lfs_lock);
578 
579 	if (only_onefs) {
580 		KASSERT(fs != NULL);
581 		if (vfs_busy(fs->lfs_ivnode->v_mount, NULL))
582 			goto errout;
583 		mutex_enter(&lfs_lock);
584 		lfs_flush_fs(fs, flags);
585 		mutex_exit(&lfs_lock);
586 		vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL);
587 	} else {
588 		locked_fakequeue_count = 0;
589 		mutex_enter(&mountlist_lock);
590 		for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
591 		     mp = nmp) {
592 			if (vfs_busy(mp, &nmp)) {
593 				DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
594 				continue;
595 			}
596 			if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
597 			    sizeof(mp->mnt_stat.f_fstypename)) == 0) {
598 				tfs = VFSTOUFS(mp)->um_lfs;
599 				mutex_enter(&lfs_lock);
600 				lfs_flush_fs(tfs, flags);
601 				mutex_exit(&lfs_lock);
602 			}
603 			vfs_unbusy(mp, false, &nmp);
604 		}
605 		mutex_exit(&mountlist_lock);
606 	}
607 	LFS_DEBUG_COUNTLOCKED("flush");
608 	wakeup(&lfs_subsys_pages);
609 
610     errout:
611 	mutex_enter(&lfs_lock);
612 	KASSERT(lfs_writing);
613 	lfs_writing = 0;
614 	wakeup(&lfs_writing);
615 }
616 
617 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
618 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
619 
620 /*
621  * make sure that we don't have too many locked buffers.
622  * flush buffers if needed.
623  */
624 int
625 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
626 {
627 	int error;
628 	struct lfs *fs;
629 	struct inode *ip;
630 	extern pid_t lfs_writer_daemon;
631 
632 	error = 0;
633 	ip = VTOI(vp);
634 
635 	/* If out of buffers, wait on writer */
636 	/* XXX KS - if it's the Ifile, we're probably the cleaner! */
637 	if (ip->i_number == LFS_IFILE_INUM)
638 		return 0;
639 	/* If we're being called from inside a dirop, don't sleep */
640 	if (ip->i_flag & IN_ADIROP)
641 		return 0;
642 
643 	fs = ip->i_lfs;
644 
645 	ASSERT_NO_SEGLOCK(fs);
646 
647 	/*
648 	 * If we would flush below, but dirops are active, sleep.
649 	 * Note that a dirop cannot ever reach this code!
650 	 */
651 	mutex_enter(&lfs_lock);
652 	while (fs->lfs_dirops > 0 &&
653 	       (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
654 		locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
655 		lfs_subsys_pages > LFS_MAX_PAGES ||
656 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
657 		lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
658 	{
659 		++fs->lfs_diropwait;
660 		mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
661 			&lfs_lock);
662 		--fs->lfs_diropwait;
663 	}
664 
665 #ifdef DEBUG
666 	if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
667 		DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
668 		      locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
669 	if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
670 		DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
671 		      locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
672 	if (lfs_subsys_pages > LFS_MAX_PAGES)
673 		DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
674 		      lfs_subsys_pages, LFS_MAX_PAGES));
675 	if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
676 		DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
677 		      fs->lfs_pages, lfs_fs_pagetrip));
678 	if (lfs_dirvcount > LFS_MAX_DIROP)
679 		DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
680 		      lfs_dirvcount, LFS_MAX_DIROP));
681 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
682 		DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
683 		      fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
684 	if (fs->lfs_diropwait > 0)
685 		DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
686 		      fs->lfs_diropwait));
687 #endif
688 
689 	/* If there are too many pending dirops, we have to flush them. */
690 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
691 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
692 		flags |= SEGM_CKP;
693 	}
694 
695 	if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
696 	    locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
697 	    lfs_subsys_pages > LFS_MAX_PAGES ||
698 	    fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
699 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
700 		lfs_flush(fs, flags, 0);
701 	} else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
702 		/*
703 		 * If we didn't flush the whole thing, some filesystems
704 		 * still might want to be flushed.
705 		 */
706 		++fs->lfs_pdflush;
707 		wakeup(&lfs_writer_daemon);
708 	}
709 
710 	while (locked_queue_count + INOCOUNT(fs) >= LFS_WAIT_BUFS ||
711 		locked_queue_bytes + INOBYTES(fs) >= LFS_WAIT_BYTES ||
712 		lfs_subsys_pages > LFS_WAIT_PAGES ||
713 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
714 		lfs_dirvcount > LFS_MAX_DIROP) {
715 
716 		if (lfs_dostats)
717 			++lfs_stats.wait_exceeded;
718 		DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
719 		      locked_queue_count, locked_queue_bytes));
720 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
721 		    hz * LFS_BUFWAIT);
722 		if (error != EWOULDBLOCK)
723 			break;
724 
725 		/*
726 		 * lfs_flush might not flush all the buffers, if some of the
727 		 * inodes were locked or if most of them were Ifile blocks
728 		 * and we weren't asked to checkpoint.	Try flushing again
729 		 * to keep us from blocking indefinitely.
730 		 */
731 		if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS ||
732 		    locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) {
733 			lfs_flush(fs, flags | SEGM_CKP, 0);
734 		}
735 	}
736 	mutex_exit(&lfs_lock);
737 	return (error);
738 }
739 
740 /*
741  * Allocate a new buffer header.
742  */
743 struct buf *
744 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
745 {
746 	struct buf *bp;
747 	size_t nbytes;
748 
749 	ASSERT_MAYBE_SEGLOCK(fs);
750 	nbytes = roundup(size, fsbtob(fs, 1));
751 
752 	bp = getiobuf(NULL, true);
753 	if (nbytes) {
754 		bp->b_data = lfs_malloc(fs, nbytes, type);
755 		/* memset(bp->b_data, 0, nbytes); */
756 	}
757 #ifdef DIAGNOSTIC
758 	if (vp == NULL)
759 		panic("vp is NULL in lfs_newbuf");
760 	if (bp == NULL)
761 		panic("bp is NULL after malloc in lfs_newbuf");
762 #endif
763 
764 	bp->b_bufsize = size;
765 	bp->b_bcount = size;
766 	bp->b_lblkno = daddr;
767 	bp->b_blkno = daddr;
768 	bp->b_error = 0;
769 	bp->b_resid = 0;
770 	bp->b_iodone = lfs_callback;
771 	bp->b_cflags = BC_BUSY | BC_NOCACHE;
772 	bp->b_private = fs;
773 
774 	mutex_enter(&bufcache_lock);
775 	mutex_enter(vp->v_interlock);
776 	bgetvp(vp, bp);
777 	mutex_exit(vp->v_interlock);
778 	mutex_exit(&bufcache_lock);
779 
780 	return (bp);
781 }
782 
783 void
784 lfs_freebuf(struct lfs *fs, struct buf *bp)
785 {
786 	struct vnode *vp;
787 
788 	if ((vp = bp->b_vp) != NULL) {
789 		mutex_enter(&bufcache_lock);
790 		mutex_enter(vp->v_interlock);
791 		brelvp(bp);
792 		mutex_exit(vp->v_interlock);
793 		mutex_exit(&bufcache_lock);
794 	}
795 	if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */
796 		lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
797 		bp->b_data = NULL;
798 	}
799 	putiobuf(bp);
800 }
801 
802 /*
803  * Count buffers on the "locked" queue, and compare it to a pro-forma count.
804  * Don't count malloced buffers, since they don't detract from the total.
805  */
806 void
807 lfs_countlocked(int *count, long *bytes, const char *msg)
808 {
809 	struct buf *bp;
810 	int n = 0;
811 	long int size = 0L;
812 
813 	mutex_enter(&bufcache_lock);
814 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) {
815 		KASSERT(bp->b_iodone == NULL);
816 		n++;
817 		size += bp->b_bufsize;
818 #ifdef DIAGNOSTIC
819 		if (n > nbuf)
820 			panic("lfs_countlocked: this can't happen: more"
821 			      " buffers locked than exist");
822 #endif
823 	}
824 	/*
825 	 * Theoretically this function never really does anything.
826 	 * Give a warning if we have to fix the accounting.
827 	 */
828 	if (n != *count) {
829 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
830 		      " from %d to %d\n", msg, *count, n));
831 	}
832 	if (size != *bytes) {
833 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
834 		      " from %ld to %ld\n", msg, *bytes, size));
835 	}
836 	*count = n;
837 	*bytes = size;
838 	mutex_exit(&bufcache_lock);
839 	return;
840 }
841 
842 int
843 lfs_wait_pages(void)
844 {
845 	int active, inactive;
846 
847 	uvm_estimatepageable(&active, &inactive);
848 	return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
849 }
850 
851 int
852 lfs_max_pages(void)
853 {
854 	int active, inactive;
855 
856 	uvm_estimatepageable(&active, &inactive);
857 	return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
858 }
859