xref: /netbsd-src/sys/ufs/lfs/lfs_subr.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: lfs_subr.c,v 1.71 2007/10/10 20:42:35 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 /*
39  * Copyright (c) 1991, 1993
40  *	The Regents of the University of California.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)lfs_subr.c	8.4 (Berkeley) 5/8/95
67  */
68 
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.71 2007/10/10 20:42:35 ad Exp $");
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/namei.h>
75 #include <sys/vnode.h>
76 #include <sys/buf.h>
77 #include <sys/mount.h>
78 #include <sys/malloc.h>
79 #include <sys/proc.h>
80 #include <sys/kauth.h>
81 
82 #include <ufs/ufs/inode.h>
83 #include <ufs/lfs/lfs.h>
84 #include <ufs/lfs/lfs_extern.h>
85 
86 #include <uvm/uvm.h>
87 
88 #ifdef DEBUG
89 const char *lfs_res_names[LFS_NB_COUNT] = {
90 	"summary",
91 	"superblock",
92 	"file block",
93 	"cluster",
94 	"clean",
95 	"blkiov",
96 };
97 #endif
98 
99 int lfs_res_qty[LFS_NB_COUNT] = {
100 	LFS_N_SUMMARIES,
101 	LFS_N_SBLOCKS,
102 	LFS_N_IBLOCKS,
103 	LFS_N_CLUSTERS,
104 	LFS_N_CLEAN,
105 	LFS_N_BLKIOV,
106 };
107 
108 void
109 lfs_setup_resblks(struct lfs *fs)
110 {
111 	int i, j;
112 	int maxbpp;
113 
114 	ASSERT_NO_SEGLOCK(fs);
115 	fs->lfs_resblk = (res_t *)malloc(LFS_N_TOTAL * sizeof(res_t), M_SEGMENT,
116 					  M_WAITOK);
117 	for (i = 0; i < LFS_N_TOTAL; i++) {
118 		fs->lfs_resblk[i].inuse = 0;
119 		fs->lfs_resblk[i].p = NULL;
120 	}
121 	for (i = 0; i < LFS_RESHASH_WIDTH; i++)
122 		LIST_INIT(fs->lfs_reshash + i);
123 
124 	/*
125 	 * These types of allocations can be larger than a page,
126 	 * so we can't use the pool subsystem for them.
127 	 */
128 	for (i = 0, j = 0; j < LFS_N_SUMMARIES; j++, i++)
129 		fs->lfs_resblk[i].size = fs->lfs_sumsize;
130 	for (j = 0; j < LFS_N_SBLOCKS; j++, i++)
131 		fs->lfs_resblk[i].size = LFS_SBPAD;
132 	for (j = 0; j < LFS_N_IBLOCKS; j++, i++)
133 		fs->lfs_resblk[i].size = fs->lfs_bsize;
134 	for (j = 0; j < LFS_N_CLUSTERS; j++, i++)
135 		fs->lfs_resblk[i].size = MAXPHYS;
136 	for (j = 0; j < LFS_N_CLEAN; j++, i++)
137 		fs->lfs_resblk[i].size = MAXPHYS;
138 	for (j = 0; j < LFS_N_BLKIOV; j++, i++)
139 		fs->lfs_resblk[i].size = LFS_MARKV_MAXBLKCNT * sizeof(BLOCK_INFO);
140 
141 	for (i = 0; i < LFS_N_TOTAL; i++) {
142 		fs->lfs_resblk[i].p = malloc(fs->lfs_resblk[i].size,
143 					     M_SEGMENT, M_WAITOK);
144 	}
145 
146 	/*
147 	 * Initialize pools for small types (XXX is BPP small?)
148 	 */
149 	pool_init(&fs->lfs_clpool, sizeof(struct lfs_cluster), 0, 0, 0,
150 		"lfsclpl", &pool_allocator_nointr, IPL_NONE);
151 	pool_init(&fs->lfs_segpool, sizeof(struct segment), 0, 0, 0,
152 		"lfssegpool", &pool_allocator_nointr, IPL_NONE);
153 	maxbpp = ((fs->lfs_sumsize - SEGSUM_SIZE(fs)) / sizeof(int32_t) + 2);
154 	maxbpp = MIN(maxbpp, segsize(fs) / fs->lfs_fsize + 2);
155 	pool_init(&fs->lfs_bpppool, maxbpp * sizeof(struct buf *), 0, 0, 0,
156 		"lfsbpppl", &pool_allocator_nointr, IPL_NONE);
157 }
158 
159 void
160 lfs_free_resblks(struct lfs *fs)
161 {
162 	int i;
163 
164 	pool_destroy(&fs->lfs_bpppool);
165 	pool_destroy(&fs->lfs_segpool);
166 	pool_destroy(&fs->lfs_clpool);
167 
168 	simple_lock(&fs->lfs_interlock);
169 	for (i = 0; i < LFS_N_TOTAL; i++) {
170 		while (fs->lfs_resblk[i].inuse)
171 			ltsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0,
172 				&fs->lfs_interlock);
173 		if (fs->lfs_resblk[i].p != NULL)
174 			free(fs->lfs_resblk[i].p, M_SEGMENT);
175 	}
176 	free(fs->lfs_resblk, M_SEGMENT);
177 	simple_unlock(&fs->lfs_interlock);
178 }
179 
180 static unsigned int
181 lfs_mhash(void *vp)
182 {
183 	return (unsigned int)(((unsigned long)vp) >> 2) % LFS_RESHASH_WIDTH;
184 }
185 
186 /*
187  * Return memory of the given size for the given purpose, or use one of a
188  * number of spare last-resort buffers, if malloc returns NULL.
189  */
190 void *
191 lfs_malloc(struct lfs *fs, size_t size, int type)
192 {
193 	struct lfs_res_blk *re;
194 	void *r;
195 	int i, s, start;
196 	unsigned int h;
197 
198 	ASSERT_MAYBE_SEGLOCK(fs);
199 	r = NULL;
200 
201 	/* If no mem allocated for this type, it just waits */
202 	if (lfs_res_qty[type] == 0) {
203 		r = malloc(size, M_SEGMENT, M_WAITOK);
204 		return r;
205 	}
206 
207 	/* Otherwise try a quick malloc, and if it works, great */
208 	if ((r = malloc(size, M_SEGMENT, M_NOWAIT)) != NULL) {
209 		return r;
210 	}
211 
212 	/*
213 	 * If malloc returned NULL, we are forced to use one of our
214 	 * reserve blocks.  We have on hand at least one summary block,
215 	 * at least one cluster block, at least one superblock,
216 	 * and several indirect blocks.
217 	 */
218 
219 	simple_lock(&fs->lfs_interlock);
220 	/* skip over blocks of other types */
221 	for (i = 0, start = 0; i < type; i++)
222 		start += lfs_res_qty[i];
223 	while (r == NULL) {
224 		for (i = 0; i < lfs_res_qty[type]; i++) {
225 			if (fs->lfs_resblk[start + i].inuse == 0) {
226 				re = fs->lfs_resblk + start + i;
227 				re->inuse = 1;
228 				r = re->p;
229 				KASSERT(re->size >= size);
230 				h = lfs_mhash(r);
231 				s = splbio();
232 				LIST_INSERT_HEAD(&fs->lfs_reshash[h], re, res);
233 				splx(s);
234 				simple_unlock(&fs->lfs_interlock);
235 				return r;
236 			}
237 		}
238 		DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n",
239 		      lfs_res_names[type], lfs_res_qty[type]));
240 		ltsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0,
241 			&fs->lfs_interlock);
242 		DLOG((DLOG_MALLOC, "done sleeping on %s\n",
243 		      lfs_res_names[type]));
244 	}
245 	/* NOTREACHED */
246 	simple_unlock(&fs->lfs_interlock);
247 	return r;
248 }
249 
250 void
251 lfs_free(struct lfs *fs, void *p, int type)
252 {
253 	int s;
254 	unsigned int h;
255 	res_t *re;
256 #ifdef DEBUG
257 	int i;
258 #endif
259 
260 	ASSERT_MAYBE_SEGLOCK(fs);
261 	h = lfs_mhash(p);
262 	simple_lock(&fs->lfs_interlock);
263 	s = splbio();
264 	LIST_FOREACH(re, &fs->lfs_reshash[h], res) {
265 		if (re->p == p) {
266 			KASSERT(re->inuse == 1);
267 			LIST_REMOVE(re, res);
268 			re->inuse = 0;
269 			wakeup(&fs->lfs_resblk);
270 			splx(s);
271 			simple_unlock(&fs->lfs_interlock);
272 			return;
273 		}
274 	}
275 #ifdef DEBUG
276 	for (i = 0; i < LFS_N_TOTAL; i++) {
277 		if (fs->lfs_resblk[i].p == p)
278 			panic("lfs_free: inconsistent reserved block");
279 	}
280 #endif
281 	splx(s);
282 	simple_unlock(&fs->lfs_interlock);
283 
284 	/*
285 	 * If we didn't find it, free it.
286 	 */
287 	free(p, M_SEGMENT);
288 }
289 
290 /*
291  * lfs_seglock --
292  *	Single thread the segment writer.
293  */
294 int
295 lfs_seglock(struct lfs *fs, unsigned long flags)
296 {
297 	struct segment *sp;
298 
299 	simple_lock(&fs->lfs_interlock);
300 	if (fs->lfs_seglock) {
301 		if (fs->lfs_lockpid == curproc->p_pid &&
302 		    fs->lfs_locklwp == curlwp->l_lid) {
303 			simple_unlock(&fs->lfs_interlock);
304 			++fs->lfs_seglock;
305 			fs->lfs_sp->seg_flags |= flags;
306 			return 0;
307 		} else if (flags & SEGM_PAGEDAEMON) {
308 			simple_unlock(&fs->lfs_interlock);
309 			return EWOULDBLOCK;
310 		} else {
311 			while (fs->lfs_seglock) {
312 				(void)ltsleep(&fs->lfs_seglock, PRIBIO + 1,
313 					"lfs_seglock", 0, &fs->lfs_interlock);
314 			}
315 		}
316 	}
317 
318 	fs->lfs_seglock = 1;
319 	fs->lfs_lockpid = curproc->p_pid;
320 	fs->lfs_locklwp = curlwp->l_lid;
321 	simple_unlock(&fs->lfs_interlock);
322 	fs->lfs_cleanind = 0;
323 
324 #ifdef DEBUG
325 	LFS_ENTER_LOG("seglock", __FILE__, __LINE__, 0, flags, curproc->p_pid);
326 #endif
327 	/* Drain fragment size changes out */
328 	rw_enter(&fs->lfs_fraglock, RW_WRITER);
329 
330 	sp = fs->lfs_sp = pool_get(&fs->lfs_segpool, PR_WAITOK);
331 	sp->bpp = pool_get(&fs->lfs_bpppool, PR_WAITOK);
332 	sp->seg_flags = flags;
333 	sp->vp = NULL;
334 	sp->seg_iocount = 0;
335 	(void) lfs_initseg(fs);
336 
337 	/*
338 	 * Keep a cumulative count of the outstanding I/O operations.  If the
339 	 * disk drive catches up with us it could go to zero before we finish,
340 	 * so we artificially increment it by one until we've scheduled all of
341 	 * the writes we intend to do.
342 	 */
343 	simple_lock(&fs->lfs_interlock);
344 	++fs->lfs_iocount;
345 	simple_unlock(&fs->lfs_interlock);
346 	return 0;
347 }
348 
349 static void lfs_unmark_dirop(struct lfs *);
350 
351 static void
352 lfs_unmark_dirop(struct lfs *fs)
353 {
354 	struct inode *ip, *nip;
355 	struct vnode *vp;
356 	int doit;
357 
358 	ASSERT_NO_SEGLOCK(fs);
359 	simple_lock(&fs->lfs_interlock);
360 	doit = !(fs->lfs_flags & LFS_UNDIROP);
361 	if (doit)
362 		fs->lfs_flags |= LFS_UNDIROP;
363 	if (!doit) {
364 		simple_unlock(&fs->lfs_interlock);
365 		return;
366 	}
367 
368 	for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
369 		nip = TAILQ_NEXT(ip, i_lfs_dchain);
370 		simple_unlock(&fs->lfs_interlock);
371 		vp = ITOV(ip);
372 
373 		simple_lock(&vp->v_interlock);
374 		if (VOP_ISLOCKED(vp) == LK_EXCLOTHER) {
375 			simple_lock(&fs->lfs_interlock);
376 			simple_unlock(&vp->v_interlock);
377 			continue;
378 		}
379 		if ((VTOI(vp)->i_flag & (IN_ADIROP | IN_ALLMOD)) == 0) {
380 			simple_lock(&fs->lfs_interlock);
381 			simple_lock(&lfs_subsys_lock);
382 			--lfs_dirvcount;
383 			simple_unlock(&lfs_subsys_lock);
384 			--fs->lfs_dirvcount;
385 			vp->v_uflag &= ~VU_DIROP;
386 			TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain);
387 			simple_unlock(&fs->lfs_interlock);
388 			wakeup(&lfs_dirvcount);
389 			simple_unlock(&vp->v_interlock);
390 			simple_lock(&fs->lfs_interlock);
391 			fs->lfs_unlockvp = vp;
392 			simple_unlock(&fs->lfs_interlock);
393 			vrele(vp);
394 			simple_lock(&fs->lfs_interlock);
395 			fs->lfs_unlockvp = NULL;
396 			simple_unlock(&fs->lfs_interlock);
397 		} else
398 			simple_unlock(&vp->v_interlock);
399 		simple_lock(&fs->lfs_interlock);
400 	}
401 
402 	fs->lfs_flags &= ~LFS_UNDIROP;
403 	simple_unlock(&fs->lfs_interlock);
404 	wakeup(&fs->lfs_flags);
405 }
406 
407 static void
408 lfs_auto_segclean(struct lfs *fs)
409 {
410 	int i, error, s, waited;
411 
412 	ASSERT_SEGLOCK(fs);
413 	/*
414 	 * Now that we've swapped lfs_activesb, but while we still
415 	 * hold the segment lock, run through the segment list marking
416 	 * the empty ones clean.
417 	 * XXX - do we really need to do them all at once?
418 	 */
419 	waited = 0;
420 	for (i = 0; i < fs->lfs_nseg; i++) {
421 		if ((fs->lfs_suflags[0][i] &
422 		     (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) ==
423 		    (SEGUSE_DIRTY | SEGUSE_EMPTY) &&
424 		    (fs->lfs_suflags[1][i] &
425 		     (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) ==
426 		    (SEGUSE_DIRTY | SEGUSE_EMPTY)) {
427 
428 			/* Make sure the sb is written before we clean */
429 			simple_lock(&fs->lfs_interlock);
430 			s = splbio();
431 			while (waited == 0 && fs->lfs_sbactive)
432 				ltsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs asb",
433 					0, &fs->lfs_interlock);
434 			splx(s);
435 			simple_unlock(&fs->lfs_interlock);
436 			waited = 1;
437 
438 			if ((error = lfs_do_segclean(fs, i)) != 0) {
439 				DLOG((DLOG_CLEAN, "lfs_auto_segclean: lfs_do_segclean returned %d for seg %d\n", error, i));
440 			}
441 		}
442 		fs->lfs_suflags[1 - fs->lfs_activesb][i] =
443 			fs->lfs_suflags[fs->lfs_activesb][i];
444 	}
445 }
446 
447 /*
448  * lfs_segunlock --
449  *	Single thread the segment writer.
450  */
451 void
452 lfs_segunlock(struct lfs *fs)
453 {
454 	struct segment *sp;
455 	unsigned long sync, ckp;
456 	struct buf *bp;
457 	int do_unmark_dirop = 0;
458 
459 	sp = fs->lfs_sp;
460 
461 	simple_lock(&fs->lfs_interlock);
462 	LOCK_ASSERT(LFS_SEGLOCK_HELD(fs));
463 	if (fs->lfs_seglock == 1) {
464 		if ((sp->seg_flags & (SEGM_PROT | SEGM_CLEAN)) == 0 &&
465 		    LFS_STARVED_FOR_SEGS(fs) == 0)
466 			do_unmark_dirop = 1;
467 		simple_unlock(&fs->lfs_interlock);
468 		sync = sp->seg_flags & SEGM_SYNC;
469 		ckp = sp->seg_flags & SEGM_CKP;
470 
471 		/* We should have a segment summary, and nothing else */
472 		KASSERT(sp->cbpp == sp->bpp + 1);
473 
474 		/* Free allocated segment summary */
475 		fs->lfs_offset -= btofsb(fs, fs->lfs_sumsize);
476 		bp = *sp->bpp;
477 		lfs_freebuf(fs, bp);
478 
479 		pool_put(&fs->lfs_bpppool, sp->bpp);
480 		sp->bpp = NULL;
481 
482 		/*
483 		 * If we're not sync, we're done with sp, get rid of it.
484 		 * Otherwise, we keep a local copy around but free
485 		 * fs->lfs_sp so another process can use it (we have to
486 		 * wait but they don't have to wait for us).
487 		 */
488 		if (!sync)
489 			pool_put(&fs->lfs_segpool, sp);
490 		fs->lfs_sp = NULL;
491 
492 		/*
493 		 * If the I/O count is non-zero, sleep until it reaches zero.
494 		 * At the moment, the user's process hangs around so we can
495 		 * sleep.
496 		 */
497 		simple_lock(&fs->lfs_interlock);
498 		if (--fs->lfs_iocount == 0) {
499 			LFS_DEBUG_COUNTLOCKED("lfs_segunlock");
500 		}
501 		if (fs->lfs_iocount <= 1)
502 			wakeup(&fs->lfs_iocount);
503 		simple_unlock(&fs->lfs_interlock);
504 		/*
505 		 * If we're not checkpointing, we don't have to block
506 		 * other processes to wait for a synchronous write
507 		 * to complete.
508 		 */
509 		if (!ckp) {
510 #ifdef DEBUG
511 			LFS_ENTER_LOG("segunlock_std", __FILE__, __LINE__, 0, 0, curproc->p_pid);
512 #endif
513 			simple_lock(&fs->lfs_interlock);
514 			--fs->lfs_seglock;
515 			fs->lfs_lockpid = 0;
516 			fs->lfs_locklwp = 0;
517 			simple_unlock(&fs->lfs_interlock);
518 			wakeup(&fs->lfs_seglock);
519 		}
520 		/*
521 		 * We let checkpoints happen asynchronously.  That means
522 		 * that during recovery, we have to roll forward between
523 		 * the two segments described by the first and second
524 		 * superblocks to make sure that the checkpoint described
525 		 * by a superblock completed.
526 		 */
527 		simple_lock(&fs->lfs_interlock);
528 		while (ckp && sync && fs->lfs_iocount)
529 			(void)ltsleep(&fs->lfs_iocount, PRIBIO + 1,
530 				      "lfs_iocount", 0, &fs->lfs_interlock);
531 		while (sync && sp->seg_iocount) {
532 			(void)ltsleep(&sp->seg_iocount, PRIBIO + 1,
533 				     "seg_iocount", 0, &fs->lfs_interlock);
534 			DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", sp, sp->seg_iocount));
535 		}
536 		simple_unlock(&fs->lfs_interlock);
537 		if (sync)
538 			pool_put(&fs->lfs_segpool, sp);
539 
540 		if (ckp) {
541 			fs->lfs_nactive = 0;
542 			/* If we *know* everything's on disk, write both sbs */
543 			/* XXX should wait for this one	 */
544 			if (sync)
545 				lfs_writesuper(fs, fs->lfs_sboffs[fs->lfs_activesb]);
546 			lfs_writesuper(fs, fs->lfs_sboffs[1 - fs->lfs_activesb]);
547 			if (!(fs->lfs_ivnode->v_mount->mnt_iflag & IMNT_UNMOUNT)) {
548 				lfs_auto_segclean(fs);
549 				/* If sync, we can clean the remainder too */
550 				if (sync)
551 					lfs_auto_segclean(fs);
552 			}
553 			fs->lfs_activesb = 1 - fs->lfs_activesb;
554 #ifdef DEBUG
555 			LFS_ENTER_LOG("segunlock_ckp", __FILE__, __LINE__, 0, 0, curproc->p_pid);
556 #endif
557 			simple_lock(&fs->lfs_interlock);
558 			--fs->lfs_seglock;
559 			fs->lfs_lockpid = 0;
560 			fs->lfs_locklwp = 0;
561 			simple_unlock(&fs->lfs_interlock);
562 			wakeup(&fs->lfs_seglock);
563 		}
564 		/* Reenable fragment size changes */
565 		rw_exit(&fs->lfs_fraglock);
566 		if (do_unmark_dirop)
567 			lfs_unmark_dirop(fs);
568 	} else if (fs->lfs_seglock == 0) {
569 		simple_unlock(&fs->lfs_interlock);
570 		panic ("Seglock not held");
571 	} else {
572 		--fs->lfs_seglock;
573 		simple_unlock(&fs->lfs_interlock);
574 	}
575 }
576 
577 /*
578  * Drain dirops and start writer.
579  *
580  * No simple_locks are held when we enter and none are held when we return.
581  */
582 int
583 lfs_writer_enter(struct lfs *fs, const char *wmesg)
584 {
585 	int error = 0;
586 
587 	ASSERT_MAYBE_SEGLOCK(fs);
588 	simple_lock(&fs->lfs_interlock);
589 
590 	/* disallow dirops during flush */
591 	fs->lfs_writer++;
592 
593 	while (fs->lfs_dirops > 0) {
594 		++fs->lfs_diropwait;
595 		error = ltsleep(&fs->lfs_writer, PRIBIO+1, wmesg, 0,
596 				&fs->lfs_interlock);
597 		--fs->lfs_diropwait;
598 	}
599 
600 	if (error)
601 		fs->lfs_writer--;
602 
603 	simple_unlock(&fs->lfs_interlock);
604 
605 	return error;
606 }
607 
608 void
609 lfs_writer_leave(struct lfs *fs)
610 {
611 	bool dowakeup;
612 
613 	ASSERT_MAYBE_SEGLOCK(fs);
614 	simple_lock(&fs->lfs_interlock);
615 	dowakeup = !(--fs->lfs_writer);
616 	simple_unlock(&fs->lfs_interlock);
617 	if (dowakeup)
618 		wakeup(&fs->lfs_dirops);
619 }
620 
621 /*
622  * Unlock, wait for the cleaner, then relock to where we were before.
623  * To be used only at a fairly high level, to address a paucity of free
624  * segments propagated back from lfs_gop_write().
625  */
626 void
627 lfs_segunlock_relock(struct lfs *fs)
628 {
629 	int n = fs->lfs_seglock;
630 	u_int16_t seg_flags;
631 	CLEANERINFO *cip;
632 	struct buf *bp;
633 
634 	if (n == 0)
635 		return;
636 
637 	/* Write anything we've already gathered to disk */
638 	lfs_writeseg(fs, fs->lfs_sp);
639 
640 	/* Tell cleaner */
641 	LFS_CLEANERINFO(cip, fs, bp);
642 	cip->flags |= LFS_CLEANER_MUST_CLEAN;
643 	LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
644 
645 	/* Save segment flags for later */
646 	seg_flags = fs->lfs_sp->seg_flags;
647 
648 	fs->lfs_sp->seg_flags |= SEGM_PROT; /* Don't unmark dirop nodes */
649 	while(fs->lfs_seglock)
650 		lfs_segunlock(fs);
651 
652 	/* Wait for the cleaner */
653 	lfs_wakeup_cleaner(fs);
654 	simple_lock(&fs->lfs_interlock);
655 	while (LFS_STARVED_FOR_SEGS(fs))
656 		ltsleep(&fs->lfs_avail, PRIBIO, "relock", 0,
657 			&fs->lfs_interlock);
658 	simple_unlock(&fs->lfs_interlock);
659 
660 	/* Put the segment lock back the way it was. */
661 	while(n--)
662 		lfs_seglock(fs, seg_flags);
663 
664 	/* Cleaner can relax now */
665 	LFS_CLEANERINFO(cip, fs, bp);
666 	cip->flags &= ~LFS_CLEANER_MUST_CLEAN;
667 	LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
668 
669 	return;
670 }
671 
672 /*
673  * Wake up the cleaner, provided that nowrap is not set.
674  */
675 void
676 lfs_wakeup_cleaner(struct lfs *fs)
677 {
678 	if (fs->lfs_nowrap > 0)
679 		return;
680 
681 	wakeup(&fs->lfs_nextseg);
682 	wakeup(&lfs_allclean_wakeup);
683 }
684