xref: /netbsd-src/sys/ufs/lfs/lfs_segment.c (revision 9fc453562f6ebe8eabdfd51e21ae0a0058906d4f)
1 /*	$NetBSD: lfs_segment.c,v 1.288 2020/09/05 16:30:13 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * Copyright (c) 1991, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	@(#)lfs_segment.c	8.10 (Berkeley) 6/10/95
60  */
61 
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.288 2020/09/05 16:30:13 riastradh Exp $");
64 
65 #ifdef DEBUG
66 # define vndebug(vp, str) do {						\
67 	if (VTOI(vp)->i_state & IN_CLEANING)				\
68 		DLOG((DLOG_WVNODE, "not writing ino %d because %s (op %d)\n", \
69 		     VTOI(vp)->i_number, (str), op));			\
70 } while(0)
71 #else
72 # define vndebug(vp, str)
73 #endif
74 #define ivndebug(vp, str) \
75 	DLOG((DLOG_WVNODE, "ino %d: %s\n", VTOI(vp)->i_number, (str)))
76 
77 #if defined(_KERNEL_OPT)
78 #include "opt_ddb.h"
79 #endif
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/namei.h>
84 #include <sys/kernel.h>
85 #include <sys/resourcevar.h>
86 #include <sys/file.h>
87 #include <sys/stat.h>
88 #include <sys/buf.h>
89 #include <sys/proc.h>
90 #include <sys/vnode.h>
91 #include <sys/mount.h>
92 #include <sys/kauth.h>
93 #include <sys/syslog.h>
94 
95 #include <miscfs/specfs/specdev.h>
96 #include <miscfs/fifofs/fifo.h>
97 
98 #include <ufs/lfs/ulfs_inode.h>
99 #include <ufs/lfs/ulfsmount.h>
100 #include <ufs/lfs/ulfs_extern.h>
101 
102 #include <ufs/lfs/lfs.h>
103 #include <ufs/lfs/lfs_accessors.h>
104 #include <ufs/lfs/lfs_kernel.h>
105 #include <ufs/lfs/lfs_extern.h>
106 
107 #include <uvm/uvm_extern.h>
108 #include <uvm/uvm_page.h>
109 
110 MALLOC_JUSTDEFINE(M_SEGMENT, "LFS segment", "Segment for LFS");
111 
112 static void lfs_super_aiodone(struct buf *);
113 static void lfs_cluster_aiodone(struct buf *);
114 
115 /*
116  * Determine if it's OK to start a partial in this segment, or if we need
117  * to go on to a new segment.
118  */
119 #define	LFS_PARTIAL_FITS(fs) \
120 	(lfs_sb_getfsbpseg(fs) - \
121 	    (lfs_sb_getoffset(fs) - lfs_sb_getcurseg(fs)) > \
122 	lfs_sb_getfrag(fs))
123 
124 /*
125  * Figure out whether we should do a checkpoint write or go ahead with
126  * an ordinary write.
127  */
128 #define LFS_SHOULD_CHECKPOINT(fs, flags) \
129         ((flags & SEGM_CLEAN) == 0 &&					\
130 	  ((fs->lfs_nactive > LFS_MAX_ACTIVE ||				\
131 	    (flags & SEGM_CKP) ||					\
132 	    lfs_sb_getnclean(fs) < LFS_MAX_ACTIVE)))
133 
134 int	 lfs_match_fake(struct lfs *, struct buf *);
135 void	 lfs_newseg(struct lfs *);
136 void	 lfs_updatemeta(struct segment *);
137 void	 lfs_writesuper(struct lfs *, daddr_t);
138 int	 lfs_writevnodes(struct lfs *fs, struct mount *mp,
139 	    struct segment *sp, int dirops);
140 
141 static void lfs_shellsort(struct lfs *, struct buf **, union lfs_blocks *,
142 			  int, int);
143 
144 kcondvar_t	lfs_allclean_wakeup;	/* Cleaner wakeup address. */
145 int	lfs_writeindir = 1;		/* whether to flush indir on non-ckp */
146 int	lfs_clean_vnhead = 0;		/* Allow freeing to head of vn list */
147 int	lfs_dirvcount = 0;		/* # active dirops */
148 
149 /* Statistics Counters */
150 int lfs_dostats = 1;
151 struct lfs_stats lfs_stats;
152 
153 /* op values to lfs_writevnodes */
154 #define	VN_REG		0
155 #define	VN_DIROP	1
156 #define	VN_EMPTY	2
157 #define VN_CLEAN	3
158 
159 /*
160  * XXX KS - Set modification time on the Ifile, so the cleaner can
161  * read the fs mod time off of it.  We don't set IN_UPDATE here,
162  * since we don't really need this to be flushed to disk (and in any
163  * case that wouldn't happen to the Ifile until we checkpoint).
164  */
165 void
lfs_imtime(struct lfs * fs)166 lfs_imtime(struct lfs *fs)
167 {
168 	struct timespec ts;
169 	struct inode *ip;
170 
171 	ASSERT_MAYBE_SEGLOCK(fs);
172 	vfs_timestamp(&ts);
173 	ip = VTOI(fs->lfs_ivnode);
174 	lfs_dino_setmtime(fs, ip->i_din, ts.tv_sec);
175 	lfs_dino_setmtimensec(fs, ip->i_din, ts.tv_nsec);
176 }
177 
178 /*
179  * Ifile and meta data blocks are not marked busy, so segment writes MUST be
180  * single threaded.  Currently, there are two paths into lfs_segwrite, sync()
181  * and getnewbuf().  They both mark the file system busy.  Lfs_vflush()
182  * explicitly marks the file system busy.  So lfs_segwrite is safe.  I think.
183  */
184 
185 #define IS_FLUSHING(fs,vp)  ((fs)->lfs_flushvp == (vp))
186 
187 int
lfs_vflush(struct vnode * vp)188 lfs_vflush(struct vnode *vp)
189 {
190 	struct inode *ip;
191 	struct lfs *fs;
192 	struct segment *sp;
193 	struct buf *bp, *nbp, *tbp, *tnbp;
194 	int error;
195 	int flushed;
196 	int relock;
197 
198 	ip = VTOI(vp);
199 	fs = VFSTOULFS(vp->v_mount)->um_lfs;
200 	relock = 0;
201 
202     top:
203 	KASSERT(mutex_owned(vp->v_interlock) == false);
204 	KASSERT(mutex_owned(&lfs_lock) == false);
205 	KASSERT(mutex_owned(&bufcache_lock) == false);
206 	ASSERT_NO_SEGLOCK(fs);
207 	if (ip->i_state & IN_CLEANING) {
208 		ivndebug(vp,"vflush/in_cleaning");
209 		mutex_enter(&lfs_lock);
210 		LFS_CLR_UINO(ip, IN_CLEANING);
211 		LFS_SET_UINO(ip, IN_MODIFIED);
212 		mutex_exit(&lfs_lock);
213 
214 		/*
215 		 * Toss any cleaning buffers that have real counterparts
216 		 * to avoid losing new data.
217 		 */
218 		mutex_enter(vp->v_interlock);
219 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
220 			nbp = LIST_NEXT(bp, b_vnbufs);
221 			if (!LFS_IS_MALLOC_BUF(bp))
222 				continue;
223 			/*
224 			 * Look for pages matching the range covered
225 			 * by cleaning blocks.  It's okay if more dirty
226 			 * pages appear, so long as none disappear out
227 			 * from under us.
228 			 */
229 			if (bp->b_lblkno > 0 && vp->v_type == VREG &&
230 			    vp != fs->lfs_ivnode) {
231 				struct vm_page *pg;
232 				voff_t off;
233 
234 				for (off = lfs_lblktosize(fs, bp->b_lblkno);
235 				     off < lfs_lblktosize(fs, bp->b_lblkno + 1);
236 				     off += PAGE_SIZE) {
237 					pg = uvm_pagelookup(&vp->v_uobj, off);
238 					if (pg == NULL)
239 						continue;
240 					if (uvm_pagegetdirty(pg)
241 					    == UVM_PAGE_STATUS_DIRTY ||
242 					    pmap_is_modified(pg)) {
243 						lfs_sb_addavail(fs,
244 							lfs_btofsb(fs,
245 								bp->b_bcount));
246 						wakeup(&fs->lfs_availsleep);
247 						mutex_exit(vp->v_interlock);
248 						lfs_freebuf(fs, bp);
249 						mutex_enter(vp->v_interlock);
250 						bp = NULL;
251 						break;
252 					}
253 				}
254 			}
255 			for (tbp = LIST_FIRST(&vp->v_dirtyblkhd); tbp;
256 			    tbp = tnbp)
257 			{
258 				tnbp = LIST_NEXT(tbp, b_vnbufs);
259 				if (tbp->b_vp == bp->b_vp
260 				   && tbp->b_lblkno == bp->b_lblkno
261 				   && tbp != bp)
262 				{
263 					lfs_sb_addavail(fs, lfs_btofsb(fs,
264 						bp->b_bcount));
265 					wakeup(&fs->lfs_availsleep);
266 					mutex_exit(vp->v_interlock);
267 					lfs_freebuf(fs, bp);
268 					mutex_enter(vp->v_interlock);
269 					bp = NULL;
270 					break;
271 				}
272 			}
273 		}
274 	} else {
275 		mutex_enter(vp->v_interlock);
276 	}
277 
278 	/* If the node is being written, wait until that is done */
279 	while (WRITEINPROG(vp)) {
280 		ivndebug(vp,"vflush/writeinprog");
281 		cv_wait(&vp->v_cv, vp->v_interlock);
282 	}
283 	error = vdead_check(vp, VDEAD_NOWAIT);
284 	mutex_exit(vp->v_interlock);
285 
286 	/* Protect against deadlock in vinvalbuf() */
287 	lfs_seglock(fs, SEGM_SYNC | ((error != 0) ? SEGM_RECLAIM : 0));
288 	if (error != 0) {
289 		fs->lfs_reclino = ip->i_number;
290 	}
291 
292 	/* If we're supposed to flush a freed inode, just toss it */
293 	if (ip->i_lfs_iflags & LFSI_DELETED) {
294 		DLOG((DLOG_VNODE, "lfs_vflush: ino %d freed, not flushing\n",
295 		      ip->i_number));
296 		/* Drain v_numoutput */
297 		mutex_enter(vp->v_interlock);
298 		while (vp->v_numoutput > 0) {
299 			cv_wait(&vp->v_cv, vp->v_interlock);
300 		}
301 		KASSERT(vp->v_numoutput == 0);
302 		mutex_exit(vp->v_interlock);
303 
304 		mutex_enter(&bufcache_lock);
305 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
306 			nbp = LIST_NEXT(bp, b_vnbufs);
307 
308 			KASSERT((bp->b_flags & B_GATHERED) == 0);
309 			if (bp->b_oflags & BO_DELWRI) { /* XXX always true? */
310 				lfs_sb_addavail(fs, lfs_btofsb(fs, bp->b_bcount));
311 				wakeup(&fs->lfs_availsleep);
312 			}
313 			/* Copied from lfs_writeseg */
314 			if (bp->b_iodone != NULL) {
315 				mutex_exit(&bufcache_lock);
316 				biodone(bp);
317 				mutex_enter(&bufcache_lock);
318 			} else {
319 				bremfree(bp);
320 				LFS_UNLOCK_BUF(bp);
321 				mutex_enter(vp->v_interlock);
322 				bp->b_flags &= ~(B_READ | B_GATHERED);
323 				bp->b_oflags = (bp->b_oflags & ~BO_DELWRI) | BO_DONE;
324 				bp->b_error = 0;
325 				reassignbuf(bp, vp);
326 				mutex_exit(vp->v_interlock);
327 				brelse(bp, 0);
328 			}
329 		}
330 		mutex_exit(&bufcache_lock);
331 		LFS_CLR_UINO(ip, IN_CLEANING);
332 		LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED);
333 		ip->i_state &= ~IN_ALLMOD;
334 		DLOG((DLOG_VNODE, "lfs_vflush: done not flushing ino %d\n",
335 		      ip->i_number));
336 		lfs_segunlock(fs);
337 
338 		KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL);
339 
340 		return 0;
341 	}
342 
343 	fs->lfs_flushvp = vp;
344 	if (LFS_SHOULD_CHECKPOINT(fs, fs->lfs_sp->seg_flags)) {
345 		error = lfs_segwrite(vp->v_mount, SEGM_CKP | SEGM_SYNC);
346 		fs->lfs_flushvp = NULL;
347 		KASSERT(fs->lfs_flushvp_fakevref == 0);
348 		lfs_segunlock(fs);
349 
350 		/* Make sure that any pending buffers get written */
351 		mutex_enter(vp->v_interlock);
352 		while (vp->v_numoutput > 0) {
353 			cv_wait(&vp->v_cv, vp->v_interlock);
354 		}
355 		KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL);
356 		KASSERT(vp->v_numoutput == 0);
357 		mutex_exit(vp->v_interlock);
358 
359 		return error;
360 	}
361 	sp = fs->lfs_sp;
362 
363 	flushed = 0;
364 	if (VPISEMPTY(vp)) {
365 		lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
366 		++flushed;
367 	} else if ((ip->i_state & IN_CLEANING) &&
368 		  (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
369 		ivndebug(vp,"vflush/clean");
370 		lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
371 		++flushed;
372 	} else if (lfs_dostats) {
373 		if (!VPISEMPTY(vp) || (VTOI(vp)->i_state & IN_ALLMOD))
374 			++lfs_stats.vflush_invoked;
375 		ivndebug(vp,"vflush");
376 	}
377 
378 #ifdef DIAGNOSTIC
379 	if (vp->v_uflag & VU_DIROP) {
380 		DLOG((DLOG_VNODE, "lfs_vflush: flushing VU_DIROP\n"));
381 		/* panic("lfs_vflush: VU_DIROP being flushed...this can\'t happen"); */
382 	}
383 #endif
384 
385 	do {
386 #ifdef DEBUG
387 		int loopcount = 0;
388 #endif
389 		do {
390 			if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) {
391 				relock = lfs_writefile(fs, sp, vp);
392 				if (relock && vp != fs->lfs_ivnode) {
393 					/*
394 					 * Might have to wait for the
395 					 * cleaner to run; but we're
396 					 * still not done with this vnode.
397 					 * XXX we can do better than this.
398 					 */
399 					KASSERT(ip->i_number != LFS_IFILE_INUM);
400 					lfs_writeinode(fs, sp, ip);
401 					mutex_enter(&lfs_lock);
402 					LFS_SET_UINO(ip, IN_MODIFIED);
403 					mutex_exit(&lfs_lock);
404 					lfs_writeseg(fs, sp);
405 					lfs_segunlock(fs);
406 					lfs_segunlock_relock(fs);
407 					goto top;
408 				}
409 			}
410 			/*
411 			 * If we begin a new segment in the middle of writing
412 			 * the Ifile, it creates an inconsistent checkpoint,
413 			 * since the Ifile information for the new segment
414 			 * is not up-to-date.  Take care of this here by
415 			 * sending the Ifile through again in case there
416 			 * are newly dirtied blocks.  But wait, there's more!
417 			 * This second Ifile write could *also* cross a segment
418 			 * boundary, if the first one was large.  The second
419 			 * one is guaranteed to be no more than 8 blocks,
420 			 * though (two segment blocks and supporting indirects)
421 			 * so the third write *will not* cross the boundary.
422 			 */
423 			if (vp == fs->lfs_ivnode) {
424 				lfs_writefile(fs, sp, vp);
425 				lfs_writefile(fs, sp, vp);
426 			}
427 #ifdef DEBUG
428 			if (++loopcount > 2)
429 				log(LOG_NOTICE, "lfs_vflush: looping count=%d\n", loopcount);
430 #endif
431 		} while (lfs_writeinode(fs, sp, ip));
432 	} while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
433 
434 	if (lfs_dostats) {
435 		++lfs_stats.nwrites;
436 		if (sp->seg_flags & SEGM_SYNC)
437 			++lfs_stats.nsync_writes;
438 		if (sp->seg_flags & SEGM_CKP)
439 			++lfs_stats.ncheckpoints;
440 	}
441 	/*
442 	 * If we were called from somewhere that has already held the seglock
443 	 * (e.g., lfs_markv()), the lfs_segunlock will not wait for
444 	 * the write to complete because we are still locked.
445 	 * Since lfs_vflush() must return the vnode with no dirty buffers,
446 	 * we must explicitly wait, if that is the case.
447 	 *
448 	 * We compare the iocount against 1, not 0, because it is
449 	 * artificially incremented by lfs_seglock().
450 	 */
451 	mutex_enter(&lfs_lock);
452 	if (fs->lfs_seglock > 1) {
453 		while (fs->lfs_iocount > 1)
454 			(void)mtsleep(&fs->lfs_iocount, PRIBIO + 1,
455 				     "lfs_vflush", 0, &lfs_lock);
456 	}
457 	mutex_exit(&lfs_lock);
458 
459 	lfs_segunlock(fs);
460 
461 	/* Wait for these buffers to be recovered by aiodoned */
462 	mutex_enter(vp->v_interlock);
463 	while (vp->v_numoutput > 0) {
464 		cv_wait(&vp->v_cv, vp->v_interlock);
465 	}
466 	KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL);
467 	KASSERT(vp->v_numoutput == 0);
468 	mutex_exit(vp->v_interlock);
469 
470 	fs->lfs_flushvp = NULL;
471 	KASSERT(fs->lfs_flushvp_fakevref == 0);
472 
473 	return (0);
474 }
475 
476 struct lfs_writevnodes_ctx {
477 	int op;
478 	struct lfs *fs;
479 };
480 static bool
lfs_writevnodes_selector(void * cl,struct vnode * vp)481 lfs_writevnodes_selector(void *cl, struct vnode *vp)
482 {
483 	struct lfs_writevnodes_ctx *c = cl;
484 	struct inode *ip;
485 	int op = c->op;
486 
487 	KASSERT(mutex_owned(vp->v_interlock));
488 
489 	ip = VTOI(vp);
490 	if (ip == NULL || vp->v_type == VNON || ip->i_nlink <= 0)
491 		return false;
492 	if ((op == VN_DIROP && !(vp->v_uflag & VU_DIROP)) ||
493 	    (op != VN_DIROP && op != VN_CLEAN && (vp->v_uflag & VU_DIROP))) {
494 		vndebug(vp, "dirop");
495 		return false;
496 	}
497 	if (op == VN_EMPTY && !VPISEMPTY(vp)) {
498 		vndebug(vp,"empty");
499 		return false;
500 	}
501 	if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM &&
502 	    vp != c->fs->lfs_flushvp && !(ip->i_state & IN_CLEANING)) {
503 		vndebug(vp,"cleaning");
504 		return false;
505 	}
506 	mutex_enter(&lfs_lock);
507 	if (vp == c->fs->lfs_unlockvp) {
508 		mutex_exit(&lfs_lock);
509 		return false;
510 	}
511 	mutex_exit(&lfs_lock);
512 
513 	return true;
514 }
515 
516 int
lfs_writevnodes(struct lfs * fs,struct mount * mp,struct segment * sp,int op)517 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
518 {
519 	struct inode *ip;
520 	struct vnode *vp;
521 	struct vnode_iterator *marker;
522 	struct lfs_writevnodes_ctx ctx;
523 	int inodes_written = 0;
524 	int error = 0;
525 
526 	/*
527 	 * XXX This was TAILQ_FOREACH_REVERSE on &mp->mnt_vnodelist.
528 	 * XXX The rationale is unclear, the initial commit had no information.
529 	 * XXX If the order really matters we have to sort the vnodes first.
530 	*/
531 
532 	ASSERT_SEGLOCK(fs);
533 	vfs_vnode_iterator_init(mp, &marker);
534 	ctx.op = op;
535 	ctx.fs = fs;
536 	while ((vp = vfs_vnode_iterator_next(marker,
537 	    lfs_writevnodes_selector, &ctx)) != NULL) {
538 		ip = VTOI(vp);
539 
540 		/*
541 		 * Write the inode/file if dirty and it's not the IFILE.
542 		 */
543 		if (((ip->i_state & IN_ALLMOD) || !VPISEMPTY(vp)) &&
544 		    ip->i_number != LFS_IFILE_INUM) {
545 			error = lfs_writefile(fs, sp, vp);
546 			if (error) {
547 				vrele(vp);
548 				if (error == EAGAIN) {
549 					/*
550 					 * This error from lfs_putpages
551 					 * indicates we need to drop
552 					 * the segment lock and start
553 					 * over after the cleaner has
554 					 * had a chance to run.
555 					 */
556 					lfs_writeinode(fs, sp, ip);
557 					lfs_writeseg(fs, sp);
558 					if (!VPISEMPTY(vp) &&
559 					    !WRITEINPROG(vp) &&
560 					    !(ip->i_state & IN_ALLMOD)) {
561 						mutex_enter(&lfs_lock);
562 						LFS_SET_UINO(ip, IN_MODIFIED);
563 						mutex_exit(&lfs_lock);
564 					}
565 					break;
566 				}
567 				error = 0; /* XXX not quite right */
568 				continue;
569 			}
570 
571 			if (!VPISEMPTY(vp)) {
572 				if (WRITEINPROG(vp)) {
573 					ivndebug(vp,"writevnodes/write2");
574 				} else if (!(ip->i_state & IN_ALLMOD)) {
575 					mutex_enter(&lfs_lock);
576 					LFS_SET_UINO(ip, IN_MODIFIED);
577 					mutex_exit(&lfs_lock);
578 				}
579 			}
580 			(void) lfs_writeinode(fs, sp, ip);
581 			inodes_written++;
582 		}
583 		vrele(vp);
584 	}
585 	vfs_vnode_iterator_destroy(marker);
586 	return error;
587 }
588 
589 /*
590  * Do a checkpoint.
591  */
592 int
lfs_segwrite(struct mount * mp,int flags)593 lfs_segwrite(struct mount *mp, int flags)
594 {
595 	struct buf *bp;
596 	struct inode *ip;
597 	struct lfs *fs;
598 	struct segment *sp;
599 	struct vnode *vp;
600 	SEGUSE *segusep;
601 	int do_ckp, did_ckp, error;
602 	unsigned n, segleft, maxseg, sn, i, curseg;
603 	int writer_set = 0;
604 	int dirty;
605 	int redo;
606 	SEGSUM *ssp;
607 	int um_error;
608 
609 	fs = VFSTOULFS(mp)->um_lfs;
610 	ASSERT_MAYBE_SEGLOCK(fs);
611 
612 	if (fs->lfs_ronly)
613 		return EROFS;
614 
615 	lfs_imtime(fs);
616 
617 	/*
618 	 * Allocate a segment structure and enough space to hold pointers to
619 	 * the maximum possible number of buffers which can be described in a
620 	 * single summary block.
621 	 */
622 	do_ckp = LFS_SHOULD_CHECKPOINT(fs, flags);
623 
624 	/*
625 	 * If we know we're gonna need the writer lock, take it now to
626 	 * preserve the lock order lfs_writer -> lfs_seglock.
627 	 */
628 	if (do_ckp) {
629 		lfs_writer_enter(fs, "ckpwriter");
630 		writer_set = 1;
631 	}
632 
633 	/* We can't do a partial write and checkpoint at the same time. */
634 	if (do_ckp)
635 		flags &= ~SEGM_SINGLE;
636 
637 	lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
638 	sp = fs->lfs_sp;
639 	if (sp->seg_flags & (SEGM_CLEAN | SEGM_CKP))
640 		do_ckp = 1;
641 
642 	/*
643 	 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
644 	 * in which case we have to flush *all* buffers off of this vnode.
645 	 * We don't care about other nodes, but write any non-dirop nodes
646 	 * anyway in anticipation of another getnewvnode().
647 	 *
648 	 * If we're cleaning we only write cleaning and ifile blocks, and
649 	 * no dirops, since otherwise we'd risk corruption in a crash.
650 	 */
651 	if (sp->seg_flags & SEGM_CLEAN)
652 		lfs_writevnodes(fs, mp, sp, VN_CLEAN);
653 	else if (!(sp->seg_flags & SEGM_FORCE_CKP)) {
654 		do {
655 			um_error = lfs_writevnodes(fs, mp, sp, VN_REG);
656 			if ((sp->seg_flags & SEGM_SINGLE) &&
657 			    lfs_sb_getcurseg(fs) != fs->lfs_startseg) {
658 				DLOG((DLOG_SEG, "lfs_segwrite: breaking out of segment write at daddr 0x%jx\n", (uintmax_t)lfs_sb_getoffset(fs)));
659 				break;
660 			}
661 
662 			if (do_ckp ||
663 			    (writer_set = lfs_writer_tryenter(fs)) != 0) {
664 				KASSERT(writer_set);
665 				KASSERT(fs->lfs_writer);
666 				error = lfs_writevnodes(fs, mp, sp, VN_DIROP);
667 				if (um_error == 0)
668 					um_error = error;
669 				/*
670 				 * In case writevnodes errored out
671 				 * XXX why are we always doing this and not
672 				 * just on error?
673 				 */
674 				lfs_flush_dirops(fs);
675 				ssp = (SEGSUM *)(sp->segsum);
676 				lfs_ss_setflags(fs, ssp,
677 						lfs_ss_getflags(fs, ssp) & ~(SS_CONT));
678 				lfs_finalize_fs_seguse(fs);
679 			}
680 			if (do_ckp && um_error) {
681 				lfs_segunlock_relock(fs);
682 				sp = fs->lfs_sp;
683 			}
684 		} while (do_ckp && um_error != 0);
685 	}
686 
687 	/*
688 	 * If we are doing a checkpoint, mark everything since the
689 	 * last checkpoint as no longer ACTIVE.
690 	 */
691 	if (do_ckp || fs->lfs_doifile) {
692 		segleft = lfs_sb_getnseg(fs);
693 		curseg = 0;
694 		for (n = 0; n < lfs_sb_getsegtabsz(fs); n++) {
695 			int bread_error;
696 
697 			dirty = 0;
698 			bread_error = bread(fs->lfs_ivnode,
699 			    lfs_sb_getcleansz(fs) + n,
700 			    lfs_sb_getbsize(fs), B_MODIFY, &bp);
701 			if (bread_error)
702 				panic("lfs_segwrite: ifile read: "
703 				      "seguse %u: error %d\n",
704 				      n, bread_error);
705 			segusep = (SEGUSE *)bp->b_data;
706 			maxseg = uimin(segleft, lfs_sb_getsepb(fs));
707 			for (i = 0; i < maxseg; i++) {
708 				sn = curseg + i;
709 				if (sn != lfs_dtosn(fs, lfs_sb_getcurseg(fs)) &&
710 				    segusep->su_flags & SEGUSE_ACTIVE) {
711 					segusep->su_flags &= ~SEGUSE_ACTIVE;
712 					--fs->lfs_nactive;
713 					++dirty;
714 				}
715 				fs->lfs_suflags[fs->lfs_activesb][sn] =
716 					segusep->su_flags;
717 				if (lfs_sb_getversion(fs) > 1)
718 					++segusep;
719 				else
720 					segusep = (SEGUSE *)
721 						((SEGUSE_V1 *)segusep + 1);
722 			}
723 
724 			if (dirty)
725 				error = LFS_BWRITE_LOG(bp); /* Ifile */
726 			else
727 				brelse(bp, 0);
728 			segleft -= lfs_sb_getsepb(fs);
729 			curseg += lfs_sb_getsepb(fs);
730 		}
731 	}
732 
733 	KASSERT(LFS_SEGLOCK_HELD(fs));
734 
735 	did_ckp = 0;
736 	if (do_ckp || fs->lfs_doifile) {
737 		vp = fs->lfs_ivnode;
738 #ifdef DEBUG
739 		int loopcount = 0;
740 #endif
741 		do {
742 
743 			LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0, curproc->p_pid);
744 
745 			mutex_enter(&lfs_lock);
746 			fs->lfs_flags &= ~LFS_IFDIRTY;
747 			mutex_exit(&lfs_lock);
748 
749 			ip = VTOI(vp);
750 
751 			if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) {
752 				/*
753 				 * Ifile has no pages, so we don't need
754 				 * to check error return here.
755 				 */
756 				lfs_writefile(fs, sp, vp);
757 				/*
758 				 * Ensure the Ifile takes the current segment
759 				 * into account.  See comment in lfs_vflush.
760 				 */
761 				lfs_writefile(fs, sp, vp);
762 				lfs_writefile(fs, sp, vp);
763 			}
764 
765 			if (ip->i_state & IN_ALLMOD)
766 				++did_ckp;
767 #if 0
768 			redo = (do_ckp ? lfs_writeinode(fs, sp, ip) : 0);
769 #else
770 			redo = lfs_writeinode(fs, sp, ip);
771 #endif
772 			redo += lfs_writeseg(fs, sp);
773 			mutex_enter(&lfs_lock);
774 			redo += (fs->lfs_flags & LFS_IFDIRTY);
775 			mutex_exit(&lfs_lock);
776 #ifdef DEBUG
777 			if (++loopcount > 2)
778 				log(LOG_NOTICE, "lfs_segwrite: looping count=%d\n",
779 					loopcount);
780 #endif
781 		} while (redo && do_ckp);
782 
783 		/*
784 		 * Unless we are unmounting, the Ifile may continue to have
785 		 * dirty blocks even after a checkpoint, due to changes to
786 		 * inodes' atime.  If we're checkpointing, it's "impossible"
787 		 * for other parts of the Ifile to be dirty after the loop
788 		 * above, since we hold the segment lock.
789 		 */
790 		mutex_enter(vp->v_interlock);
791 		if (LIST_EMPTY(&vp->v_dirtyblkhd)) {
792 			LFS_CLR_UINO(ip, IN_ALLMOD);
793 		}
794 #ifdef DIAGNOSTIC
795 		else if (do_ckp) {
796 			int do_panic = 0;
797 			LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
798 				if (bp->b_lblkno < lfs_sb_getcleansz(fs) +
799 				    lfs_sb_getsegtabsz(fs) &&
800 				    !(bp->b_flags & B_GATHERED)) {
801 					printf("ifile lbn %ld still dirty (flags %lx)\n",
802 						(long)bp->b_lblkno,
803 						(long)bp->b_flags);
804 					++do_panic;
805 				}
806 			}
807 			if (do_panic)
808 				panic("dirty blocks");
809 		}
810 #endif
811 		mutex_exit(vp->v_interlock);
812 	} else {
813 		(void) lfs_writeseg(fs, sp);
814 	}
815 
816 	/* Note Ifile no longer needs to be written */
817 	fs->lfs_doifile = 0;
818 	if (writer_set)
819 		lfs_writer_leave(fs);
820 
821 	/*
822 	 * If we didn't write the Ifile, we didn't really do anything.
823 	 * That means that (1) there is a checkpoint on disk and (2)
824 	 * nothing has changed since it was written.
825 	 *
826 	 * Take the flags off of the segment so that lfs_segunlock
827 	 * doesn't have to write the superblock either.
828 	 */
829 	if (do_ckp && !did_ckp) {
830 		sp->seg_flags &= ~SEGM_CKP;
831 	}
832 
833 	if (lfs_dostats) {
834 		++lfs_stats.nwrites;
835 		if (sp->seg_flags & SEGM_SYNC)
836 			++lfs_stats.nsync_writes;
837 		if (sp->seg_flags & SEGM_CKP)
838 			++lfs_stats.ncheckpoints;
839 	}
840 	lfs_segunlock(fs);
841 	return (0);
842 }
843 
844 /*
845  * Write the dirty blocks associated with a vnode.
846  */
847 int
lfs_writefile(struct lfs * fs,struct segment * sp,struct vnode * vp)848 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
849 {
850 	struct inode *ip;
851 	int i, frag;
852 	SEGSUM *ssp;
853 	int error;
854 
855 	ASSERT_SEGLOCK(fs);
856 	error = 0;
857 	ip = VTOI(vp);
858 
859 	lfs_acquire_finfo(fs, ip->i_number, ip->i_gen);
860 
861 	if (vp->v_uflag & VU_DIROP) {
862 		ssp = (SEGSUM *)sp->segsum;
863 		lfs_ss_setflags(fs, ssp,
864 				lfs_ss_getflags(fs, ssp) | (SS_DIROP|SS_CONT));
865 	}
866 
867 	if (sp->seg_flags & SEGM_CLEAN) {
868 		lfs_gather(fs, sp, vp, lfs_match_fake);
869 		/*
870 		 * For a file being flushed, we need to write *all* blocks.
871 		 * This means writing the cleaning blocks first, and then
872 		 * immediately following with any non-cleaning blocks.
873 		 * The same is true of the Ifile since checkpoints assume
874 		 * that all valid Ifile blocks are written.
875 		 */
876 		if (IS_FLUSHING(fs, vp) || vp == fs->lfs_ivnode) {
877 			lfs_gather(fs, sp, vp, lfs_match_data);
878 			/*
879 			 * Don't call VOP_PUTPAGES: if we're flushing,
880 			 * we've already done it, and the Ifile doesn't
881 			 * use the page cache.
882 			 */
883 		}
884 	} else {
885 		lfs_gather(fs, sp, vp, lfs_match_data);
886 		/*
887 		 * If we're flushing, we've already called VOP_PUTPAGES
888 		 * so don't do it again.  Otherwise, we want to write
889 		 * everything we've got.
890 		 */
891 		if (!IS_FLUSHING(fs, vp)) {
892 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
893 			error = VOP_PUTPAGES(vp, 0, 0,
894 				PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED);
895 		}
896 	}
897 
898 	/*
899 	 * It may not be necessary to write the meta-data blocks at this point,
900 	 * as the roll-forward recovery code should be able to reconstruct the
901 	 * list.
902 	 *
903 	 * We have to write them anyway, though, under two conditions: (1) the
904 	 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
905 	 * checkpointing.
906 	 *
907 	 * BUT if we are cleaning, we might have indirect blocks that refer to
908 	 * new blocks not being written yet, in addition to fragments being
909 	 * moved out of a cleaned segment.  If that is the case, don't
910 	 * write the indirect blocks, or the finfo will have a small block
911 	 * in the middle of it!
912 	 * XXX in this case isn't the inode size wrong too?
913 	 */
914 	frag = 0;
915 	if (sp->seg_flags & SEGM_CLEAN) {
916 		for (i = 0; i < ULFS_NDADDR; i++)
917 			if (ip->i_lfs_fragsize[i] > 0 &&
918 			    ip->i_lfs_fragsize[i] < lfs_sb_getbsize(fs))
919 				++frag;
920 	}
921 	KASSERTMSG((frag <= 1),
922 	    "lfs_writefile: more than one fragment! frag=%d", frag);
923 	if (IS_FLUSHING(fs, vp) ||
924 	    (frag == 0 && (lfs_writeindir || (sp->seg_flags & SEGM_CKP)))) {
925 		lfs_gather(fs, sp, vp, lfs_match_indir);
926 		lfs_gather(fs, sp, vp, lfs_match_dindir);
927 		lfs_gather(fs, sp, vp, lfs_match_tindir);
928 	}
929 	lfs_release_finfo(fs);
930 
931 	return error;
932 }
933 
934 /*
935  * Update segment accounting to reflect this inode's change of address.
936  */
937 static int
lfs_update_iaddr(struct lfs * fs,struct segment * sp,struct inode * ip,daddr_t ndaddr)938 lfs_update_iaddr(struct lfs *fs, struct segment *sp, struct inode *ip, daddr_t ndaddr)
939 {
940 	struct buf *bp;
941 	daddr_t daddr;
942 	IFILE *ifp;
943 	SEGUSE *sup;
944 	ino_t ino;
945 	int redo_ifile;
946 	u_int32_t sn;
947 
948 	redo_ifile = 0;
949 
950 	/*
951 	 * If updating the ifile, update the super-block.  Update the disk
952 	 * address and access times for this inode in the ifile.
953 	 */
954 	ino = ip->i_number;
955 	if (ino == LFS_IFILE_INUM) {
956 		daddr = lfs_sb_getidaddr(fs);
957 		lfs_sb_setidaddr(fs, LFS_DBTOFSB(fs, ndaddr));
958 	} else {
959 		LFS_IENTRY(ifp, fs, ino, bp);
960 		daddr = lfs_if_getdaddr(fs, ifp);
961 		lfs_if_setdaddr(fs, ifp, LFS_DBTOFSB(fs, ndaddr));
962 		(void)LFS_BWRITE_LOG(bp); /* Ifile */
963 	}
964 
965 	/*
966 	 * If this is the Ifile and lfs_offset is set to the first block
967 	 * in the segment, dirty the new segment's accounting block
968 	 * (XXX should already be dirty?) and tell the caller to do it again.
969 	 */
970 	if (ip->i_number == LFS_IFILE_INUM) {
971 		sn = lfs_dtosn(fs, lfs_sb_getoffset(fs));
972 		if (lfs_sntod(fs, sn) + lfs_btofsb(fs, lfs_sb_getsumsize(fs)) ==
973 		    lfs_sb_getoffset(fs)) {
974 			LFS_SEGENTRY(sup, fs, sn, bp);
975 			KASSERT(bp->b_oflags & BO_DELWRI);
976 			LFS_WRITESEGENTRY(sup, fs, sn, bp);
977 			/* fs->lfs_flags |= LFS_IFDIRTY; */
978 			redo_ifile |= 1;
979 		}
980 	}
981 
982 	/*
983 	 * The inode's last address should not be in the current partial
984 	 * segment, except under exceptional circumstances (lfs_writevnodes
985 	 * had to start over, and in the meantime more blocks were written
986 	 * to a vnode).	 Both inodes will be accounted to this segment
987 	 * in lfs_writeseg so we need to subtract the earlier version
988 	 * here anyway.	 The segment count can temporarily dip below
989 	 * zero here; keep track of how many duplicates we have in
990 	 * "dupino" so we don't panic below.
991 	 */
992 	if (daddr >= lfs_sb_getlastpseg(fs) && daddr <= lfs_sb_getoffset(fs)) {
993 		++sp->ndupino;
994 		DLOG((DLOG_SEG, "lfs_writeinode: last inode addr in current pseg "
995 		      "(ino %d daddr 0x%llx) ndupino=%d\n", ino,
996 		      (long long)daddr, sp->ndupino));
997 	}
998 	/*
999 	 * Account the inode: it no longer belongs to its former segment,
1000 	 * though it will not belong to the new segment until that segment
1001 	 * is actually written.
1002 	 */
1003 	if (daddr != LFS_UNUSED_DADDR) {
1004 		u_int32_t oldsn = lfs_dtosn(fs, daddr);
1005 		int ndupino __diagused =
1006 		    (sp->seg_number == oldsn) ? sp->ndupino : 0;
1007 		LFS_SEGENTRY(sup, fs, oldsn, bp);
1008 		KASSERTMSG(((sup->su_nbytes + DINOSIZE(fs)*ndupino)
1009 			>= DINOSIZE(fs)),
1010 		    "lfs_writeinode: negative bytes "
1011 		    "(segment %" PRIu32 " short by %d, "
1012 		    "oldsn=%" PRIu32 ", cursn=%" PRIu32
1013 		    ", daddr=%" PRId64 ", su_nbytes=%u, "
1014 		    "ndupino=%d)\n",
1015 		    lfs_dtosn(fs, daddr),
1016 		    (int)DINOSIZE(fs) * (1 - sp->ndupino) - sup->su_nbytes,
1017 		    oldsn, sp->seg_number, daddr,
1018 		    (unsigned int)sup->su_nbytes,
1019 		    sp->ndupino);
1020 		DLOG((DLOG_SU, "seg %d -= %d for ino %d inode\n",
1021 		      lfs_dtosn(fs, daddr), DINOSIZE(fs), ino));
1022 		sup->su_nbytes -= DINOSIZE(fs);
1023 		redo_ifile |=
1024 			(ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
1025 		if (redo_ifile) {
1026 			mutex_enter(&lfs_lock);
1027 			fs->lfs_flags |= LFS_IFDIRTY;
1028 			mutex_exit(&lfs_lock);
1029 			/* Don't double-account */
1030 			lfs_sb_setidaddr(fs, 0x0);
1031 		}
1032 		LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */
1033 	}
1034 
1035 	return redo_ifile;
1036 }
1037 
1038 int
lfs_writeinode(struct lfs * fs,struct segment * sp,struct inode * ip)1039 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
1040 {
1041 	struct buf *bp;
1042 	union lfs_dinode *cdp;
1043 	struct vnode *vp = ITOV(ip);
1044 	daddr_t daddr;
1045 	IINFO *iip;
1046 	int i;
1047 	int redo_ifile = 0;
1048 	int gotblk = 0;
1049 	int count;
1050 	SEGSUM *ssp;
1051 
1052 	ASSERT_SEGLOCK(fs);
1053 	if (!(ip->i_state & IN_ALLMOD) && !(vp->v_uflag & VU_DIROP))
1054 		return (0);
1055 
1056 	/* Can't write ifile when writer is not set */
1057 	KASSERT(ip->i_number != LFS_IFILE_INUM || fs->lfs_writer > 0 ||
1058 		(sp->seg_flags & SEGM_CLEAN));
1059 
1060 	/*
1061 	 * If this is the Ifile, see if writing it here will generate a
1062 	 * temporary misaccounting.  If it will, do the accounting and write
1063 	 * the blocks, postponing the inode write until the accounting is
1064 	 * solid.
1065 	 */
1066 	count = 0;
1067 	while (vp == fs->lfs_ivnode) {
1068 		int redo = 0;
1069 
1070 		if (sp->idp == NULL && sp->ibp == NULL &&
1071 		    (sp->seg_bytes_left < lfs_sb_getibsize(fs) ||
1072 		     sp->sum_bytes_left < sizeof(int32_t))) {
1073 			(void) lfs_writeseg(fs, sp);
1074 			continue;
1075 		}
1076 
1077 		/* Look for dirty Ifile blocks */
1078 		LIST_FOREACH(bp, &fs->lfs_ivnode->v_dirtyblkhd, b_vnbufs) {
1079 			if (!(bp->b_flags & B_GATHERED)) {
1080 				redo = 1;
1081 				break;
1082 			}
1083 		}
1084 
1085 		if (redo == 0)
1086 			redo = lfs_update_iaddr(fs, sp, ip, 0x0);
1087 		if (redo == 0)
1088 			break;
1089 
1090 		if (sp->idp) {
1091 			lfs_dino_setinumber(fs, sp->idp, 0);
1092 			sp->idp = NULL;
1093 		}
1094 		++count;
1095 		if (count > 2)
1096 			log(LOG_NOTICE, "lfs_writeinode: looping count=%d\n", count);
1097 		lfs_writefile(fs, sp, fs->lfs_ivnode);
1098 	}
1099 
1100 	/* Allocate a new inode block if necessary. */
1101 	if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) &&
1102 	    sp->ibp == NULL) {
1103 		/* Allocate a new segment if necessary. */
1104 		if (sp->seg_bytes_left < lfs_sb_getibsize(fs) ||
1105 		    sp->sum_bytes_left < sizeof(int32_t))
1106 			(void) lfs_writeseg(fs, sp);
1107 
1108 		/* Get next inode block. */
1109 		daddr = lfs_sb_getoffset(fs);
1110 		lfs_sb_addoffset(fs, lfs_btofsb(fs, lfs_sb_getibsize(fs)));
1111 		sp->ibp = *sp->cbpp++ =
1112 			getblk(VTOI(fs->lfs_ivnode)->i_devvp,
1113 			    LFS_FSBTODB(fs, daddr), lfs_sb_getibsize(fs), 0, 0);
1114 		gotblk++;
1115 
1116 		/* Zero out inode numbers */
1117 		for (i = 0; i < LFS_INOPB(fs); ++i) {
1118 			union lfs_dinode *tmpdi;
1119 
1120 			tmpdi = (union lfs_dinode *)((char *)sp->ibp->b_data +
1121 						     DINOSIZE(fs) * i);
1122 			lfs_dino_setinumber(fs, tmpdi, 0);
1123 		}
1124 
1125 		++sp->start_bpp;
1126 		lfs_sb_subavail(fs, lfs_btofsb(fs, lfs_sb_getibsize(fs)));
1127 		/* Set remaining space counters. */
1128 		sp->seg_bytes_left -= lfs_sb_getibsize(fs);
1129 		sp->sum_bytes_left -= sizeof(int32_t);
1130 
1131 		/* Store the address in the segment summary. */
1132 		iip = NTH_IINFO(fs, sp->segsum, sp->ninodes / LFS_INOPB(fs));
1133 		lfs_ii_setblock(fs, iip, daddr);
1134 	}
1135 
1136 	/* Check VU_DIROP in case there is a new file with no data blocks */
1137 	if (vp->v_uflag & VU_DIROP) {
1138 		ssp = (SEGSUM *)sp->segsum;
1139 		lfs_ss_setflags(fs, ssp,
1140 				lfs_ss_getflags(fs, ssp) | (SS_DIROP|SS_CONT));
1141 	}
1142 
1143 	/* Update the inode times and copy the inode onto the inode page. */
1144 	/* XXX kludge --- don't redirty the ifile just to put times on it */
1145 	if (ip->i_number != LFS_IFILE_INUM)
1146 		LFS_ITIMES(ip, NULL, NULL, NULL);
1147 
1148 	/*
1149 	 * If this is the Ifile, and we've already written the Ifile in this
1150 	 * partial segment, just overwrite it (it's not on disk yet) and
1151 	 * continue.
1152 	 *
1153 	 * XXX we know that the bp that we get the second time around has
1154 	 * already been gathered.
1155 	 */
1156 	if (ip->i_number == LFS_IFILE_INUM && sp->idp) {
1157 		lfs_copy_dinode(fs, sp->idp, ip->i_din);
1158 		ip->i_lfs_osize = ip->i_size;
1159 		return 0;
1160 	}
1161 
1162 	bp = sp->ibp;
1163 	cdp = DINO_IN_BLOCK(fs, bp->b_data, sp->ninodes % LFS_INOPB(fs));
1164 	lfs_copy_dinode(fs, cdp, ip->i_din);
1165 
1166 	/*
1167 	 * This inode is on its way to disk; clear its VU_DIROP status when
1168 	 * the write is complete.
1169 	 */
1170 	if (vp->v_uflag & VU_DIROP) {
1171 		if (!(sp->seg_flags & SEGM_CLEAN))
1172 			ip->i_state |= IN_CDIROP;
1173 		else {
1174 			DLOG((DLOG_DIROP, "lfs_writeinode: not clearing dirop for cleaned ino %d\n", (int)ip->i_number));
1175 		}
1176 	}
1177 
1178 	/*
1179 	 * If cleaning, link counts and directory file sizes cannot change,
1180 	 * since those would be directory operations---even if the file
1181 	 * we are writing is marked VU_DIROP we should write the old values.
1182 	 * If we're not cleaning, of course, update the values so we get
1183 	 * current values the next time we clean.
1184 	 */
1185 	if (sp->seg_flags & SEGM_CLEAN) {
1186 		if (vp->v_uflag & VU_DIROP) {
1187 			lfs_dino_setnlink(fs, cdp, ip->i_lfs_odnlink);
1188 			/* if (vp->v_type == VDIR) */
1189 			lfs_dino_setsize(fs, cdp, ip->i_lfs_osize);
1190 		}
1191 	} else {
1192 		ip->i_lfs_odnlink = lfs_dino_getnlink(fs, cdp);
1193 		ip->i_lfs_osize = ip->i_size;
1194 	}
1195 
1196 
1197 	/* We can finish the segment accounting for truncations now */
1198 	lfs_finalize_ino_seguse(fs, ip);
1199 
1200 	/*
1201 	 * If we are cleaning, ensure that we don't write UNWRITTEN disk
1202 	 * addresses to disk; possibly change the on-disk record of
1203 	 * the inode size, either by reverting to the previous size
1204 	 * (in the case of cleaning) or by verifying the inode's block
1205 	 * holdings (in the case of files being allocated as they are being
1206 	 * written).
1207 	 * XXX By not writing UNWRITTEN blocks, we are making the lfs_avail
1208 	 * XXX count on disk wrong by the same amount.	We should be
1209 	 * XXX able to "borrow" from lfs_avail and return it after the
1210 	 * XXX Ifile is written.  See also in lfs_writeseg.
1211 	 */
1212 
1213 	/* Check file size based on highest allocated block */
1214 	if (((lfs_dino_getmode(fs, ip->i_din) & LFS_IFMT) == LFS_IFREG ||
1215 	     (lfs_dino_getmode(fs, ip->i_din) & LFS_IFMT) == LFS_IFDIR) &&
1216 	    ip->i_size > ((ip->i_lfs_hiblk + 1) << lfs_sb_getbshift(fs))) {
1217 		lfs_dino_setsize(fs, cdp, (ip->i_lfs_hiblk + 1) << lfs_sb_getbshift(fs));
1218 		DLOG((DLOG_SEG, "lfs_writeinode: ino %d size %" PRId64 " -> %"
1219 		      PRId64 "\n", (int)ip->i_number, ip->i_size, lfs_dino_getsize(fs, cdp)));
1220 	}
1221 	if (ip->i_lfs_effnblks != lfs_dino_getblocks(fs, ip->i_din)) {
1222 		DLOG((DLOG_SEG, "lfs_writeinode: cleansing ino %d eff %jd != nblk %d)"
1223 		      " at %jx\n", ip->i_number, (intmax_t)ip->i_lfs_effnblks,
1224 		      lfs_dino_getblocks(fs, ip->i_din), (uintmax_t)lfs_sb_getoffset(fs)));
1225 		for (i=0; i<ULFS_NDADDR; i++) {
1226 			if (lfs_dino_getdb(fs, cdp, i) == UNWRITTEN) {
1227 				DLOG((DLOG_SEG, "lfs_writeinode: wiping UNWRITTEN\n"));
1228 				lfs_dino_setdb(fs, cdp, i, 0);
1229 			}
1230 		}
1231 		for (i=0; i<ULFS_NIADDR; i++) {
1232 			if (lfs_dino_getib(fs, cdp, i) == UNWRITTEN) {
1233 				DLOG((DLOG_SEG, "lfs_writeinode: wiping UNWRITTEN\n"));
1234 				lfs_dino_setib(fs, cdp, i, 0);
1235 			}
1236 		}
1237 	}
1238 
1239 #ifdef DIAGNOSTIC
1240 	/*
1241 	 * Check dinode held blocks against dinode size.
1242 	 * This should be identical to the check in lfs_vget().
1243 	 */
1244 	for (i = (lfs_dino_getsize(fs, cdp) + lfs_sb_getbsize(fs) - 1) >> lfs_sb_getbshift(fs);
1245 	     i < ULFS_NDADDR; i++) {
1246 		KASSERT(i >= 0);
1247 		if ((lfs_dino_getmode(fs, cdp) & LFS_IFMT) == LFS_IFLNK)
1248 			continue;
1249 		if (((lfs_dino_getmode(fs, cdp) & LFS_IFMT) == LFS_IFBLK ||
1250 		     (lfs_dino_getmode(fs, cdp) & LFS_IFMT) == LFS_IFCHR) && i == 0)
1251 			continue;
1252 		if (lfs_dino_getdb(fs, cdp, i) != 0) {
1253 # ifdef DEBUG
1254 			lfs_dump_dinode(fs, cdp);
1255 # endif
1256 			panic("writing inconsistent inode");
1257 		}
1258 	}
1259 #endif /* DIAGNOSTIC */
1260 
1261 	if (ip->i_state & IN_CLEANING)
1262 		LFS_CLR_UINO(ip, IN_CLEANING);
1263 	else {
1264 		/* XXX IN_ALLMOD */
1265 		LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE |
1266 			     IN_UPDATE | IN_MODIFY);
1267 		if (ip->i_lfs_effnblks == lfs_dino_getblocks(fs, ip->i_din))
1268 			LFS_CLR_UINO(ip, IN_MODIFIED);
1269 		else {
1270 			DLOG((DLOG_VNODE, "lfs_writeinode: ino %d: real "
1271 			    "blks=%d, eff=%jd\n", ip->i_number,
1272 			    lfs_dino_getblocks(fs, ip->i_din), (intmax_t)ip->i_lfs_effnblks));
1273 		}
1274 	}
1275 
1276 	if (ip->i_number == LFS_IFILE_INUM) {
1277 		/* We know sp->idp == NULL */
1278 		sp->idp = DINO_IN_BLOCK(fs, bp, sp->ninodes % LFS_INOPB(fs));
1279 
1280 		/* Not dirty any more */
1281 		mutex_enter(&lfs_lock);
1282 		fs->lfs_flags &= ~LFS_IFDIRTY;
1283 		mutex_exit(&lfs_lock);
1284 	}
1285 
1286 	if (gotblk) {
1287 		mutex_enter(&bufcache_lock);
1288 		LFS_LOCK_BUF(bp);
1289 		brelsel(bp, 0);
1290 		mutex_exit(&bufcache_lock);
1291 	}
1292 
1293 	/* Increment inode count in segment summary block. */
1294 
1295 	ssp = (SEGSUM *)sp->segsum;
1296 	lfs_ss_setninos(fs, ssp, lfs_ss_getninos(fs, ssp) + 1);
1297 
1298 	/* If this page is full, set flag to allocate a new page. */
1299 	if (++sp->ninodes % LFS_INOPB(fs) == 0)
1300 		sp->ibp = NULL;
1301 
1302 	redo_ifile = lfs_update_iaddr(fs, sp, ip, bp->b_blkno);
1303 
1304 	KASSERT(redo_ifile == 0);
1305 	return (redo_ifile);
1306 }
1307 
1308 int
lfs_gatherblock(struct segment * sp,struct buf * bp,kmutex_t * mptr)1309 lfs_gatherblock(struct segment *sp, struct buf *bp, kmutex_t *mptr)
1310 {
1311 	struct lfs *fs;
1312 	int vers;
1313 	int j, blksinblk;
1314 
1315 	ASSERT_SEGLOCK(sp->fs);
1316 	KASSERTMSG((sp->vp != NULL),
1317 	    "lfs_gatherblock: Null vp in segment");
1318 
1319 	/* If full, finish this segment. */
1320 	fs = sp->fs;
1321 	blksinblk = howmany(bp->b_bcount, lfs_sb_getbsize(fs));
1322 	if (sp->sum_bytes_left < sizeof(int32_t) * blksinblk ||
1323 	    sp->seg_bytes_left < bp->b_bcount) {
1324 		if (mptr)
1325 			mutex_exit(mptr);
1326 		lfs_updatemeta(sp);
1327 
1328 		vers = lfs_fi_getversion(fs, sp->fip);
1329 		(void) lfs_writeseg(fs, sp);
1330 
1331 		/* Add the current file to the segment summary. */
1332 		lfs_acquire_finfo(fs, VTOI(sp->vp)->i_number, vers);
1333 
1334 		if (mptr)
1335 			mutex_enter(mptr);
1336 		return (1);
1337 	}
1338 
1339 	if (bp->b_flags & B_GATHERED) {
1340 		DLOG((DLOG_SEG, "lfs_gatherblock: already gathered! Ino %ju,"
1341 		      " lbn %" PRId64 "\n",
1342 		      (uintmax_t)lfs_fi_getino(fs, sp->fip), bp->b_lblkno));
1343 		return (0);
1344 	}
1345 
1346 	/* Insert into the buffer list, update the FINFO block. */
1347 	bp->b_flags |= B_GATHERED;
1348 
1349 	*sp->cbpp++ = bp;
1350 	for (j = 0; j < blksinblk; j++) {
1351 		unsigned bn;
1352 
1353 		bn = lfs_fi_getnblocks(fs, sp->fip);
1354 		lfs_fi_setnblocks(fs, sp->fip, bn+1);
1355 		lfs_fi_setblock(fs, sp->fip, bn, bp->b_lblkno + j);
1356 		/* This block's accounting moves from lfs_favail to lfs_avail */
1357 		lfs_deregister_block(sp->vp, bp->b_lblkno + j);
1358 	}
1359 
1360 	sp->sum_bytes_left -= sizeof(int32_t) * blksinblk;
1361 	sp->seg_bytes_left -= bp->b_bcount;
1362 	return (0);
1363 }
1364 
1365 int
lfs_gather(struct lfs * fs,struct segment * sp,struct vnode * vp,int (* match)(struct lfs *,struct buf *))1366 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp,
1367     int (*match)(struct lfs *, struct buf *))
1368 {
1369 	struct buf *bp, *nbp;
1370 	int count = 0;
1371 
1372 	ASSERT_SEGLOCK(fs);
1373 	if (vp->v_type == VBLK)
1374 		return 0;
1375 	KASSERT(sp->vp == NULL);
1376 	sp->vp = vp;
1377 	mutex_enter(&bufcache_lock);
1378 
1379 #ifndef LFS_NO_BACKBUF_HACK
1380 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
1381 # define	BUF_OFFSET	\
1382 	(((char *)&LIST_NEXT(bp, b_vnbufs)) - (char *)bp)
1383 # define	BACK_BUF(BP)	\
1384 	((struct buf *)(((char *)(BP)->b_vnbufs.le_prev) - BUF_OFFSET))
1385 # define	BEG_OF_LIST	\
1386 	((struct buf *)(((char *)&LIST_FIRST(&vp->v_dirtyblkhd)) - BUF_OFFSET))
1387 
1388 loop:
1389 	/* Find last buffer. */
1390 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1391 	     bp && LIST_NEXT(bp, b_vnbufs) != NULL;
1392 	     bp = LIST_NEXT(bp, b_vnbufs))
1393 		continue;
1394 
1395 	for (; bp && bp != BEG_OF_LIST; bp = nbp) {
1396 		nbp = BACK_BUF(bp);
1397 #else /* LFS_NO_BACKBUF_HACK */
1398 loop:
1399 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1400 		nbp = LIST_NEXT(bp, b_vnbufs);
1401 #endif /* LFS_NO_BACKBUF_HACK */
1402 		if ((bp->b_cflags & BC_BUSY) != 0 ||
1403 		    (bp->b_flags & B_GATHERED) != 0 || !match(fs, bp)) {
1404 #ifdef DEBUG
1405 			if (vp == fs->lfs_ivnode &&
1406 			    (bp->b_cflags & BC_BUSY) != 0 &&
1407 			    (bp->b_flags & B_GATHERED) == 0)
1408 				log(LOG_NOTICE, "lfs_gather: ifile lbn %"
1409 				      PRId64 " busy (%x) at 0x%jx",
1410 				      bp->b_lblkno, bp->b_flags,
1411 				      (uintmax_t)lfs_sb_getoffset(fs));
1412 #endif
1413 			continue;
1414 		}
1415 #ifdef DIAGNOSTIC
1416 # ifdef LFS_USE_BC_INVAL
1417 		if ((bp->b_cflags & BC_INVAL) != 0 && bp->b_iodone == NULL) {
1418 			DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64
1419 			      " is BC_INVAL\n", bp->b_lblkno));
1420 			VOP_PRINT(bp->b_vp);
1421 		}
1422 # endif /* LFS_USE_BC_INVAL */
1423 		if (!(bp->b_oflags & BO_DELWRI))
1424 			panic("lfs_gather: bp not BO_DELWRI");
1425 		if (!(bp->b_flags & B_LOCKED)) {
1426 			DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64
1427 			      " blk %" PRId64 " not B_LOCKED\n",
1428 			      bp->b_lblkno,
1429 			      LFS_DBTOFSB(fs, bp->b_blkno)));
1430 			VOP_PRINT(bp->b_vp);
1431 			panic("lfs_gather: bp not B_LOCKED");
1432 		}
1433 #endif
1434 		if (lfs_gatherblock(sp, bp, &bufcache_lock)) {
1435 			goto loop;
1436 		}
1437 		count++;
1438 	}
1439 	mutex_exit(&bufcache_lock);
1440 	lfs_updatemeta(sp);
1441 	KASSERT(sp->vp == vp);
1442 	sp->vp = NULL;
1443 	return count;
1444 }
1445 
1446 #if DEBUG
1447 # define DEBUG_OOFF(n) do {						\
1448 	if (ooff == 0) {						\
1449 		DLOG((DLOG_SEG, "lfs_updatemeta[%d]: warning: writing " \
1450 			"ino %d lbn %" PRId64 " at 0x%" PRIx32		\
1451 			", was 0x0 (or %" PRId64 ")\n",			\
1452 			(n), ip->i_number, lbn, ndaddr, daddr));	\
1453 	}								\
1454 } while (0)
1455 #else
1456 # define DEBUG_OOFF(n)
1457 #endif
1458 
1459 /*
1460  * Change the given block's address to ndaddr, finding its previous
1461  * location using ulfs_bmaparray().
1462  *
1463  * Account for this change in the segment table.
1464  *
1465  * called with sp == NULL by roll-forwarding code.
1466  */
1467 void
1468 lfs_update_single(struct lfs *fs, struct segment *sp,
1469     struct vnode *vp, daddr_t lbn, daddr_t ndaddr, int size)
1470 {
1471 	SEGUSE *sup;
1472 	struct buf *bp;
1473 	struct indir a[ULFS_NIADDR + 2], *ap;
1474 	struct inode *ip;
1475 	daddr_t daddr, ooff;
1476 	int num, error;
1477 	int bb, osize, obb;
1478 
1479 	ASSERT_SEGLOCK(fs);
1480 	KASSERT(sp == NULL || sp->vp == vp);
1481 	ip = VTOI(vp);
1482 
1483 	error = ulfs_bmaparray(vp, lbn, &daddr, a, &num, NULL, NULL);
1484 	if (error)
1485 		panic("lfs_updatemeta: ulfs_bmaparray returned %d", error);
1486 
1487 	KASSERT(daddr <= LFS_MAX_DADDR(fs));
1488 	if (daddr > 0)
1489 		daddr = LFS_DBTOFSB(fs, daddr);
1490 
1491 	bb = lfs_numfrags(fs, size);
1492 	switch (num) {
1493 	    case 0:
1494 		    ooff = lfs_dino_getdb(fs, ip->i_din, lbn);
1495 		    DEBUG_OOFF(0);
1496 		    if (ooff == UNWRITTEN)
1497 			    lfs_dino_setblocks(fs, ip->i_din,
1498 				lfs_dino_getblocks(fs, ip->i_din) + bb);
1499 		    else {
1500 			    /* possible fragment truncation or extension */
1501 			    obb = lfs_btofsb(fs, ip->i_lfs_fragsize[lbn]);
1502 			    lfs_dino_setblocks(fs, ip->i_din,
1503 				lfs_dino_getblocks(fs, ip->i_din) + (bb-obb));
1504 		    }
1505 		    lfs_dino_setdb(fs, ip->i_din, lbn, ndaddr);
1506 		    break;
1507 	    case 1:
1508 		    ooff = lfs_dino_getib(fs, ip->i_din, a[0].in_off);
1509 		    DEBUG_OOFF(1);
1510 		    if (ooff == UNWRITTEN)
1511 			    lfs_dino_setblocks(fs, ip->i_din,
1512 				lfs_dino_getblocks(fs, ip->i_din) + bb);
1513 		    lfs_dino_setib(fs, ip->i_din, a[0].in_off, ndaddr);
1514 		    break;
1515 	    default:
1516 		    ap = &a[num - 1];
1517 		    if (bread(vp, ap->in_lbn, lfs_sb_getbsize(fs),
1518 			B_MODIFY, &bp))
1519 			    panic("lfs_updatemeta: bread bno %" PRId64,
1520 				  ap->in_lbn);
1521 
1522 		    ooff = lfs_iblock_get(fs, bp->b_data, ap->in_off);
1523 		    DEBUG_OOFF(num);
1524 		    if (ooff == UNWRITTEN)
1525 			    lfs_dino_setblocks(fs, ip->i_din,
1526 				lfs_dino_getblocks(fs, ip->i_din) + bb);
1527 		    lfs_iblock_set(fs, bp->b_data, ap->in_off, ndaddr);
1528 		    (void) VOP_BWRITE(bp->b_vp, bp);
1529 	}
1530 
1531 	KASSERT(ooff == 0 || ooff == UNWRITTEN || ooff == daddr);
1532 
1533 	/* Update hiblk when extending the file */
1534 	if (lbn > ip->i_lfs_hiblk)
1535 		ip->i_lfs_hiblk = lbn;
1536 
1537 	/*
1538 	 * Though we'd rather it couldn't, this *can* happen right now
1539 	 * if cleaning blocks and regular blocks coexist.
1540 	 */
1541 	/* KASSERT(daddr < fs->lfs_lastpseg || daddr > ndaddr); */
1542 
1543 	/*
1544 	 * Update segment usage information, based on old size
1545 	 * and location.
1546 	 */
1547 	if (daddr > 0) {
1548 		u_int32_t oldsn = lfs_dtosn(fs, daddr);
1549 		int ndupino __diagused = (sp && sp->seg_number == oldsn ?
1550 		    sp->ndupino : 0);
1551 
1552 		KASSERT(oldsn < lfs_sb_getnseg(fs));
1553 		if (lbn >= 0 && lbn < ULFS_NDADDR)
1554 			osize = ip->i_lfs_fragsize[lbn];
1555 		else
1556 			osize = lfs_sb_getbsize(fs);
1557 		LFS_SEGENTRY(sup, fs, oldsn, bp);
1558 		KASSERTMSG(((sup->su_nbytes + DINOSIZE(fs)*ndupino) >= osize),
1559 		    "lfs_updatemeta: negative bytes "
1560 		    "(segment %" PRIu32 " short by %" PRId64
1561 		    ")\n"
1562 		    "lfs_updatemeta: ino %llu, lbn %" PRId64
1563 		    ", addr = 0x%" PRIx64 "\n"
1564 		    "lfs_updatemeta: ndupino=%d",
1565 		    lfs_dtosn(fs, daddr),
1566 		    (int64_t)osize - (DINOSIZE(fs) * ndupino + sup->su_nbytes),
1567 		    (unsigned long long)ip->i_number, lbn, daddr,
1568 		    ndupino);
1569 		DLOG((DLOG_SU, "seg %" PRIu32 " -= %d for ino %d lbn %" PRId64
1570 		      " db 0x%" PRIx64 "\n",
1571 		      lfs_dtosn(fs, daddr), osize,
1572 		      ip->i_number, lbn, daddr));
1573 		sup->su_nbytes -= osize;
1574 		if (!(bp->b_flags & B_GATHERED)) {
1575 			mutex_enter(&lfs_lock);
1576 			fs->lfs_flags |= LFS_IFDIRTY;
1577 			mutex_exit(&lfs_lock);
1578 		}
1579 		LFS_WRITESEGENTRY(sup, fs, oldsn, bp);
1580 	}
1581 	/*
1582 	 * Now that this block has a new address, and its old
1583 	 * segment no longer owns it, we can forget about its
1584 	 * old size.
1585 	 */
1586 	if (lbn >= 0 && lbn < ULFS_NDADDR)
1587 		ip->i_lfs_fragsize[lbn] = size;
1588 }
1589 
1590 /*
1591  * Update the metadata that points to the blocks listed in the FINFO
1592  * array.
1593  */
1594 void
1595 lfs_updatemeta(struct segment *sp)
1596 {
1597 	struct buf *sbp;
1598 	struct lfs *fs;
1599 	struct vnode *vp;
1600 	daddr_t lbn;
1601 	int i, nblocks, num;
1602 	int __diagused nblocks_orig;
1603 	int bb;
1604 	int bytesleft, size;
1605 	unsigned lastlength;
1606 	union lfs_blocks tmpptr;
1607 
1608 	fs = sp->fs;
1609 	vp = sp->vp;
1610 	ASSERT_SEGLOCK(fs);
1611 
1612 	/*
1613 	 * This used to be:
1614 	 *
1615 	 *  nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
1616 	 *
1617 	 * that is, it allowed for the possibility that start_lbp did
1618 	 * not point to the beginning of the finfo block pointer area.
1619 	 * This particular formulation is six kinds of painful in the
1620 	 * lfs64 world where we have two sizes of block pointer, so
1621 	 * unless/until everything can be cleaned up to not move
1622 	 * start_lbp around but instead use an offset, we do the
1623 	 * following:
1624 	 *    1. Get NEXT_FINFO(sp->fip). This is the same pointer as
1625 	 * &sp->fip->fi_blocks[sp->fip->fi_nblocks], just the wrong
1626 	 * type. (Ugh.)
1627 	 *    2. Cast it to void *, then assign it to a temporary
1628 	 * union lfs_blocks.
1629 	 *    3. Subtract start_lbp from that.
1630 	 *    4. Save the value of nblocks in blocks_orig so we can
1631 	 * assert below that it hasn't changed without repeating this
1632 	 * rubbish.
1633 	 *
1634 	 * XXX.
1635 	 */
1636 	lfs_blocks_fromvoid(fs, &tmpptr, (void *)NEXT_FINFO(fs, sp->fip));
1637 	nblocks = lfs_blocks_sub(fs, &tmpptr, &sp->start_lbp);
1638 	nblocks_orig = nblocks;
1639 
1640 	KASSERT(nblocks >= 0);
1641 	KASSERT(vp != NULL);
1642 	if (nblocks == 0)
1643 		return;
1644 
1645 	/*
1646 	 * This count may be high due to oversize blocks from lfs_gop_write.
1647 	 * Correct for this. (XXX we should be able to keep track of these.)
1648 	 */
1649 	for (i = 0; i < nblocks; i++) {
1650 		if (sp->start_bpp[i] == NULL) {
1651 			DLOG((DLOG_SEG, "lfs_updatemeta: nblocks = %d, not %d\n", i, nblocks));
1652 			nblocks = i;
1653 			break;
1654 		}
1655 		num = howmany(sp->start_bpp[i]->b_bcount, lfs_sb_getbsize(fs));
1656 		KASSERT(sp->start_bpp[i]->b_lblkno >= 0 || num == 1);
1657 		nblocks -= num - 1;
1658 	}
1659 
1660 #if 0
1661 	/* pre-lfs64 assertion */
1662 	KASSERT(vp->v_type == VREG ||
1663 	   nblocks == &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp);
1664 #else
1665 	KASSERT(vp->v_type == VREG || nblocks == nblocks_orig);
1666 #endif
1667 	KASSERT(nblocks == sp->cbpp - sp->start_bpp);
1668 
1669 	/*
1670 	 * Sort the blocks.
1671 	 *
1672 	 * We have to sort even if the blocks come from the
1673 	 * cleaner, because there might be other pending blocks on the
1674 	 * same inode...and if we don't sort, and there are fragments
1675 	 * present, blocks may be written in the wrong place.
1676 	 */
1677 	lfs_shellsort(fs, sp->start_bpp, &sp->start_lbp, nblocks, lfs_sb_getbsize(fs));
1678 
1679 	/*
1680 	 * Record the length of the last block in case it's a fragment.
1681 	 * If there are indirect blocks present, they sort last.  An
1682 	 * indirect block will be lfs_bsize and its presence indicates
1683 	 * that you cannot have fragments.
1684 	 *
1685 	 * XXX This last is a lie.  A cleaned fragment can coexist with
1686 	 * XXX a later indirect block.	This will continue to be
1687 	 * XXX true until lfs_markv is fixed to do everything with
1688 	 * XXX fake blocks (including fake inodes and fake indirect blocks).
1689 	 */
1690 	lastlength = ((sp->start_bpp[nblocks - 1]->b_bcount - 1) &
1691 		lfs_sb_getbmask(fs)) + 1;
1692 	lfs_fi_setlastlength(fs, sp->fip, lastlength);
1693 
1694 	/*
1695 	 * Assign disk addresses, and update references to the logical
1696 	 * block and the segment usage information.
1697 	 */
1698 	for (i = nblocks; i--; ++sp->start_bpp) {
1699 		sbp = *sp->start_bpp;
1700 		lbn = lfs_blocks_get(fs, &sp->start_lbp, 0);
1701 		KASSERT(sbp->b_lblkno == lbn);
1702 
1703 		sbp->b_blkno = LFS_FSBTODB(fs, lfs_sb_getoffset(fs));
1704 
1705 		/*
1706 		 * If we write a frag in the wrong place, the cleaner won't
1707 		 * be able to correctly identify its size later, and the
1708 		 * segment will be uncleanable.	 (Even worse, it will assume
1709 		 * that the indirect block that actually ends the list
1710 		 * is of a smaller size!)
1711 		 */
1712 		if ((sbp->b_bcount & lfs_sb_getbmask(fs)) && i != 0)
1713 			panic("lfs_updatemeta: fragment is not last block");
1714 
1715 		/*
1716 		 * For each subblock in this possibly oversized block,
1717 		 * update its address on disk.
1718 		 */
1719 		KASSERT(lbn >= 0 || sbp->b_bcount == lfs_sb_getbsize(fs));
1720 		KASSERT(vp == sbp->b_vp);
1721 		for (bytesleft = sbp->b_bcount; bytesleft > 0;
1722 		     bytesleft -= lfs_sb_getbsize(fs)) {
1723 			size = MIN(bytesleft, lfs_sb_getbsize(fs));
1724 			bb = lfs_numfrags(fs, size);
1725 			lbn = lfs_blocks_get(fs, &sp->start_lbp, 0);
1726 			lfs_blocks_inc(fs, &sp->start_lbp);
1727 			lfs_update_single(fs, sp, sp->vp, lbn, lfs_sb_getoffset(fs),
1728 			    size);
1729 			lfs_sb_addoffset(fs, bb);
1730 		}
1731 
1732 	}
1733 
1734 	/* This inode has been modified */
1735 	LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
1736 }
1737 
1738 /*
1739  * Move lfs_offset to a segment earlier than newsn.
1740  */
1741 int
1742 lfs_rewind(struct lfs *fs, int newsn)
1743 {
1744 	int sn, osn, isdirty;
1745 	struct buf *bp;
1746 	SEGUSE *sup;
1747 
1748 	ASSERT_SEGLOCK(fs);
1749 
1750 	osn = lfs_dtosn(fs, lfs_sb_getoffset(fs));
1751 	if (osn < newsn)
1752 		return 0;
1753 
1754 	/* lfs_avail eats the remaining space in this segment */
1755 	lfs_sb_subavail(fs, lfs_sb_getfsbpseg(fs) - (lfs_sb_getoffset(fs) - lfs_sb_getcurseg(fs)));
1756 
1757 	/* Find a low-numbered segment */
1758 	for (sn = 0; sn < lfs_sb_getnseg(fs); ++sn) {
1759 		LFS_SEGENTRY(sup, fs, sn, bp);
1760 		isdirty = sup->su_flags & SEGUSE_DIRTY;
1761 		brelse(bp, 0);
1762 
1763 		if (!isdirty)
1764 			break;
1765 	}
1766 	if (sn == lfs_sb_getnseg(fs))
1767 		panic("lfs_rewind: no clean segments");
1768 	if (newsn >= 0 && sn >= newsn)
1769 		return ENOENT;
1770 	lfs_sb_setnextseg(fs, lfs_sntod(fs, sn));
1771 	lfs_newseg(fs);
1772 	lfs_sb_setoffset(fs, lfs_sb_getcurseg(fs));
1773 
1774 	return 0;
1775 }
1776 
1777 /*
1778  * Start a new partial segment.
1779  *
1780  * Return 1 when we entered to a new segment.
1781  * Otherwise, return 0.
1782  */
1783 int
1784 lfs_initseg(struct lfs *fs)
1785 {
1786 	struct segment *sp = fs->lfs_sp;
1787 	SEGSUM *ssp;
1788 	struct buf *sbp;	/* buffer for SEGSUM */
1789 	int repeat = 0;		/* return value */
1790 
1791 	ASSERT_SEGLOCK(fs);
1792 	/* Advance to the next segment. */
1793 	if (!LFS_PARTIAL_FITS(fs)) {
1794 		SEGUSE *sup;
1795 		struct buf *bp;
1796 
1797 		/* lfs_avail eats the remaining space */
1798 		lfs_sb_subavail(fs, lfs_sb_getfsbpseg(fs) - (lfs_sb_getoffset(fs) -
1799 						   lfs_sb_getcurseg(fs)));
1800 		/* Wake up any cleaning procs waiting on this file system. */
1801 		lfs_wakeup_cleaner(fs);
1802 		lfs_newseg(fs);
1803 		repeat = 1;
1804 		lfs_sb_setoffset(fs, lfs_sb_getcurseg(fs));
1805 
1806 		sp->seg_number = lfs_dtosn(fs, lfs_sb_getcurseg(fs));
1807 		sp->seg_bytes_left = lfs_fsbtob(fs, lfs_sb_getfsbpseg(fs));
1808 
1809 		/*
1810 		 * If the segment contains a superblock, update the offset
1811 		 * and summary address to skip over it.
1812 		 */
1813 		LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1814 		if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1815 			lfs_sb_addoffset(fs, lfs_btofsb(fs, LFS_SBPAD));
1816 			sp->seg_bytes_left -= LFS_SBPAD;
1817 		}
1818 		brelse(bp, 0);
1819 		/* Segment zero could also contain the labelpad */
1820 		if (lfs_sb_getversion(fs) > 1 && sp->seg_number == 0 &&
1821 		    lfs_sb_gets0addr(fs) < lfs_btofsb(fs, LFS_LABELPAD)) {
1822 			lfs_sb_addoffset(fs,
1823 			    lfs_btofsb(fs, LFS_LABELPAD) - lfs_sb_gets0addr(fs));
1824 			sp->seg_bytes_left -=
1825 			    LFS_LABELPAD - lfs_fsbtob(fs, lfs_sb_gets0addr(fs));
1826 		}
1827 	} else {
1828 		sp->seg_number = lfs_dtosn(fs, lfs_sb_getcurseg(fs));
1829 		sp->seg_bytes_left = lfs_fsbtob(fs, lfs_sb_getfsbpseg(fs) -
1830 				      (lfs_sb_getoffset(fs) - lfs_sb_getcurseg(fs)));
1831 	}
1832 	lfs_sb_setlastpseg(fs, lfs_sb_getoffset(fs));
1833 
1834 	/* Record first address of this partial segment */
1835 	if (sp->seg_flags & SEGM_CLEAN) {
1836 		fs->lfs_cleanint[fs->lfs_cleanind] = lfs_sb_getoffset(fs);
1837 		if (++fs->lfs_cleanind >= LFS_MAX_CLEANIND) {
1838 			/* "1" is the artificial inc in lfs_seglock */
1839 			mutex_enter(&lfs_lock);
1840 			while (fs->lfs_iocount > 1) {
1841 				mtsleep(&fs->lfs_iocount, PRIBIO + 1,
1842 				    "lfs_initseg", 0, &lfs_lock);
1843 			}
1844 			mutex_exit(&lfs_lock);
1845 			fs->lfs_cleanind = 0;
1846 		}
1847 	}
1848 
1849 	sp->fs = fs;
1850 	sp->ibp = NULL;
1851 	sp->idp = NULL;
1852 	sp->ninodes = 0;
1853 	sp->ndupino = 0;
1854 
1855 	sp->cbpp = sp->bpp;
1856 
1857 	/* Get a new buffer for SEGSUM */
1858 	sbp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
1859 	    LFS_FSBTODB(fs, lfs_sb_getoffset(fs)), lfs_sb_getsumsize(fs), LFS_NB_SUMMARY);
1860 
1861 	/* ... and enter it into the buffer list. */
1862 	*sp->cbpp = sbp;
1863 	sp->cbpp++;
1864 	lfs_sb_addoffset(fs, lfs_btofsb(fs, lfs_sb_getsumsize(fs)));
1865 
1866 	sp->start_bpp = sp->cbpp;
1867 
1868 	/* Set point to SEGSUM, initialize it. */
1869 	ssp = sp->segsum = sbp->b_data;
1870 	memset(ssp, 0, lfs_sb_getsumsize(fs));
1871 	lfs_ss_setnext(fs, ssp, lfs_sb_getnextseg(fs));
1872 	lfs_ss_setnfinfo(fs, ssp, 0);
1873 	lfs_ss_setninos(fs, ssp, 0);
1874 	lfs_ss_setmagic(fs, ssp, SS_MAGIC);
1875 
1876 	/* Set pointer to first FINFO, initialize it. */
1877 	sp->fip = SEGSUM_FINFOBASE(fs, sp->segsum);
1878 	lfs_fi_setnblocks(fs, sp->fip, 0);
1879 	lfs_fi_setlastlength(fs, sp->fip, 0);
1880 	lfs_blocks_fromfinfo(fs, &sp->start_lbp, sp->fip);
1881 
1882 	sp->seg_bytes_left -= lfs_sb_getsumsize(fs);
1883 	sp->sum_bytes_left = lfs_sb_getsumsize(fs) - SEGSUM_SIZE(fs);
1884 
1885 	return (repeat);
1886 }
1887 
1888 /*
1889  * Remove SEGUSE_INVAL from all segments.
1890  */
1891 void
1892 lfs_unset_inval_all(struct lfs *fs)
1893 {
1894 	SEGUSE *sup;
1895 	struct buf *bp;
1896 	int i;
1897 
1898 	for (i = 0; i < lfs_sb_getnseg(fs); i++) {
1899 		LFS_SEGENTRY(sup, fs, i, bp);
1900 		if (sup->su_flags & SEGUSE_INVAL) {
1901 			sup->su_flags &= ~SEGUSE_INVAL;
1902 			LFS_WRITESEGENTRY(sup, fs, i, bp);
1903 		} else
1904 			brelse(bp, 0);
1905 	}
1906 }
1907 
1908 /*
1909  * Return the next segment to write.
1910  */
1911 void
1912 lfs_newseg(struct lfs *fs)
1913 {
1914 	CLEANERINFO *cip;
1915 	SEGUSE *sup;
1916 	struct buf *bp;
1917 	int curseg, isdirty, sn, skip_inval;
1918 
1919 	ASSERT_SEGLOCK(fs);
1920 
1921 	/* Honor LFCNWRAPSTOP */
1922 	mutex_enter(&lfs_lock);
1923 	while (lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs) && fs->lfs_nowrap) {
1924 		if (fs->lfs_wrappass) {
1925 			log(LOG_NOTICE, "%s: wrappass=%d\n",
1926 				lfs_sb_getfsmnt(fs), fs->lfs_wrappass);
1927 			fs->lfs_wrappass = 0;
1928 			break;
1929 		}
1930 		fs->lfs_wrapstatus = LFS_WRAP_WAITING;
1931 		wakeup(&fs->lfs_nowrap);
1932 		log(LOG_NOTICE, "%s: waiting at log wrap\n", lfs_sb_getfsmnt(fs));
1933 		mtsleep(&fs->lfs_wrappass, PVFS, "newseg", 10 * hz,
1934 			&lfs_lock);
1935 	}
1936 	fs->lfs_wrapstatus = LFS_WRAP_GOING;
1937 	mutex_exit(&lfs_lock);
1938 
1939 	LFS_SEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getnextseg(fs)), bp);
1940 	DLOG((DLOG_SU, "lfs_newseg: seg %d := 0 in newseg\n",
1941 	      lfs_dtosn(fs, lfs_sb_getnextseg(fs))));
1942 	sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1943 	sup->su_nbytes = 0;
1944 	sup->su_nsums = 0;
1945 	sup->su_ninos = 0;
1946 	LFS_WRITESEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getnextseg(fs)), bp);
1947 
1948 	LFS_CLEANERINFO(cip, fs, bp);
1949 	lfs_ci_shiftcleantodirty(fs, cip, 1);
1950 	lfs_sb_setnclean(fs, lfs_ci_getclean(fs, cip));
1951 	LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1952 
1953 	lfs_sb_setlastseg(fs, lfs_sb_getcurseg(fs));
1954 	lfs_sb_setcurseg(fs, lfs_sb_getnextseg(fs));
1955 	skip_inval = 1;
1956 	for (sn = curseg = lfs_dtosn(fs, lfs_sb_getcurseg(fs)) + lfs_sb_getinterleave(fs);;) {
1957 		sn = (sn + 1) % lfs_sb_getnseg(fs);
1958 
1959 		if (sn == curseg) {
1960 			if (skip_inval)
1961 				skip_inval = 0;
1962 			else
1963 				panic("lfs_nextseg: no clean segments");
1964 		}
1965 		LFS_SEGENTRY(sup, fs, sn, bp);
1966 		isdirty = sup->su_flags & (SEGUSE_DIRTY | (skip_inval ? SEGUSE_INVAL : 0));
1967 		/* Check SEGUSE_EMPTY as we go along */
1968 		if (isdirty && sup->su_nbytes == 0 &&
1969 		    !(sup->su_flags & SEGUSE_EMPTY))
1970 			LFS_WRITESEGENTRY(sup, fs, sn, bp);
1971 		else
1972 			brelse(bp, 0);
1973 
1974 		if (!isdirty)
1975 			break;
1976 	}
1977 	if (skip_inval == 0)
1978 		lfs_unset_inval_all(fs);
1979 
1980 	++fs->lfs_nactive;
1981 	lfs_sb_setnextseg(fs, lfs_sntod(fs, sn));
1982 	if (lfs_dostats) {
1983 		++lfs_stats.segsused;
1984 	}
1985 }
1986 
1987 static struct buf *
1988 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr,
1989     int n)
1990 {
1991 	struct lfs_cluster *cl;
1992 	struct buf **bpp, *bp;
1993 
1994 	ASSERT_SEGLOCK(fs);
1995 	cl = (struct lfs_cluster *)pool_get(&fs->lfs_clpool, PR_WAITOK);
1996 	bpp = (struct buf **)pool_get(&fs->lfs_bpppool, PR_WAITOK);
1997 	memset(cl, 0, sizeof(*cl));
1998 	cl->fs = fs;
1999 	cl->bpp = bpp;
2000 	cl->bufcount = 0;
2001 	cl->bufsize = 0;
2002 
2003 	/* If this segment is being written synchronously, note that */
2004 	if (fs->lfs_sp->seg_flags & SEGM_SYNC) {
2005 		cl->flags |= LFS_CL_SYNC;
2006 		cl->seg = fs->lfs_sp;
2007 		++cl->seg->seg_iocount;
2008 	}
2009 
2010 	/* Get an empty buffer header, or maybe one with something on it */
2011 	bp = getiobuf(vp, true);
2012 	bp->b_dev = NODEV;
2013 	bp->b_blkno = bp->b_lblkno = addr;
2014 	bp->b_iodone = lfs_cluster_aiodone;
2015 	bp->b_private = cl;
2016 
2017 	return bp;
2018 }
2019 
2020 int
2021 lfs_writeseg(struct lfs *fs, struct segment *sp)
2022 {
2023 	struct buf **bpp, *bp, *cbp, *newbp, *unbusybp;
2024 	SEGUSE *sup;
2025 	SEGSUM *ssp;
2026 	int i;
2027 	int do_again, nblocks, byteoffset;
2028 	size_t el_size;
2029 	struct lfs_cluster *cl;
2030 	u_short ninos;
2031 	struct vnode *devvp;
2032 	char *p = NULL;
2033 	struct vnode *vp;
2034 	unsigned ibindex, iblimit;
2035 	int changed;
2036 	u_int32_t sum;
2037 	size_t sumstart;
2038 #ifdef DEBUG
2039 	FINFO *fip;
2040 	int findex;
2041 #endif
2042 
2043 	ASSERT_SEGLOCK(fs);
2044 
2045 	ssp = (SEGSUM *)sp->segsum;
2046 
2047 	/*
2048 	 * If there are no buffers other than the segment summary to write,
2049 	 * don't do anything.  If we are the end of a dirop sequence, however,
2050 	 * write the empty segment summary anyway, to help out the
2051 	 * roll-forward agent.
2052 	 */
2053 	if ((nblocks = sp->cbpp - sp->bpp) == 1) {
2054 		if ((lfs_ss_getflags(fs, ssp) & (SS_DIROP | SS_CONT)) != SS_DIROP)
2055 			return 0;
2056 	}
2057 
2058 	/* Note if partial segment is being written by the cleaner */
2059 	if (sp->seg_flags & SEGM_CLEAN)
2060 		lfs_ss_setflags(fs, ssp, lfs_ss_getflags(fs, ssp) | SS_CLEAN);
2061 
2062 	/* Note if we are writing to reclaim */
2063 	if (sp->seg_flags & SEGM_RECLAIM) {
2064 		lfs_ss_setflags(fs, ssp, lfs_ss_getflags(fs, ssp) | SS_RECLAIM);
2065 		lfs_ss_setreclino(fs, ssp, fs->lfs_reclino);
2066 	}
2067 
2068 	devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2069 
2070 	/* Update the segment usage information. */
2071 	LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
2072 
2073 	/* Loop through all blocks, except the segment summary. */
2074 	for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
2075 		if ((*bpp)->b_vp != devvp) {
2076 			sup->su_nbytes += (*bpp)->b_bcount;
2077 			DLOG((DLOG_SU, "seg %" PRIu32 " += %ld for ino %d"
2078 			      " lbn %" PRId64 " db 0x%" PRIx64 "\n",
2079 			      sp->seg_number, (*bpp)->b_bcount,
2080 			      VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno,
2081 			      (*bpp)->b_blkno));
2082 		}
2083 	}
2084 
2085 #ifdef DEBUG
2086 	/* Check for zero-length and zero-version FINFO entries. */
2087 	fip = SEGSUM_FINFOBASE(fs, ssp);
2088 	for (findex = 0; findex < lfs_ss_getnfinfo(fs, ssp); findex++) {
2089 		KDASSERT(lfs_fi_getnblocks(fs, fip) > 0);
2090 		KDASSERT(lfs_fi_getversion(fs, fip) > 0);
2091 		fip = NEXT_FINFO(fs, fip);
2092 	}
2093 #endif /* DEBUG */
2094 
2095 	ninos = (lfs_ss_getninos(fs, ssp) + LFS_INOPB(fs) - 1) / LFS_INOPB(fs);
2096 	DLOG((DLOG_SU, "seg %d += %d for %d inodes\n",
2097 	      sp->seg_number,
2098 	      lfs_ss_getninos(fs, ssp) * DINOSIZE(fs),
2099 	      lfs_ss_getninos(fs, ssp)));
2100 	sup->su_nbytes += lfs_ss_getninos(fs, ssp) * DINOSIZE(fs);
2101 	/* sup->su_nbytes += lfs_sb_getsumsize(fs); */
2102 	if (lfs_sb_getversion(fs) == 1)
2103 		sup->su_olastmod = time_second;
2104 	else
2105 		sup->su_lastmod = time_second;
2106 	sup->su_ninos += ninos;
2107 	++sup->su_nsums;
2108 	lfs_sb_subavail(fs, lfs_btofsb(fs, lfs_sb_getsumsize(fs)));
2109 
2110 	do_again = !(bp->b_flags & B_GATHERED);
2111 	LFS_WRITESEGENTRY(sup, fs, sp->seg_number, bp); /* Ifile */
2112 
2113 	/*
2114 	 * Mark blocks BC_BUSY, to prevent then from being changed between
2115 	 * the checksum computation and the actual write.
2116 	 *
2117 	 * If we are cleaning, check indirect blocks for UNWRITTEN, and if
2118 	 * there are any, replace them with copies that have UNASSIGNED
2119 	 * instead.
2120 	 */
2121 	mutex_enter(&bufcache_lock);
2122 	for (bpp = sp->bpp, i = nblocks - 1; i--;) {
2123 		++bpp;
2124 		bp = *bpp;
2125 		if (bp->b_iodone != NULL) {	 /* UBC or malloced buffer */
2126 			bp->b_cflags |= BC_BUSY;
2127 			continue;
2128 		}
2129 
2130 		while (bp->b_cflags & BC_BUSY) {
2131 			DLOG((DLOG_SEG, "lfs_writeseg: avoiding potential"
2132 			      " data summary corruption for ino %d, lbn %"
2133 			      PRId64 "\n",
2134 			      VTOI(bp->b_vp)->i_number, bp->b_lblkno));
2135 			bp->b_cflags |= BC_WANTED;
2136 			cv_wait(&bp->b_busy, &bufcache_lock);
2137 		}
2138 		bp->b_cflags |= BC_BUSY;
2139 		mutex_exit(&bufcache_lock);
2140 		unbusybp = NULL;
2141 
2142 		/*
2143 		 * Check and replace indirect block UNWRITTEN bogosity.
2144 		 * XXX See comment in lfs_writefile.
2145 		 */
2146 		if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp &&
2147 		   lfs_dino_getblocks(fs, VTOI(bp->b_vp)->i_din) !=
2148 		   VTOI(bp->b_vp)->i_lfs_effnblks) {
2149 			DLOG((DLOG_VNODE, "lfs_writeseg: cleansing ino %d (%jd != %d)\n",
2150 			      VTOI(bp->b_vp)->i_number,
2151 			      (intmax_t)VTOI(bp->b_vp)->i_lfs_effnblks,
2152 			      lfs_dino_getblocks(fs, VTOI(bp->b_vp)->i_din)));
2153 			/* Make a copy we'll make changes to */
2154 			newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno,
2155 					   bp->b_bcount, LFS_NB_IBLOCK);
2156 			newbp->b_blkno = bp->b_blkno;
2157 			memcpy(newbp->b_data, bp->b_data,
2158 			       newbp->b_bcount);
2159 
2160 			changed = 0;
2161 			iblimit = newbp->b_bcount / LFS_BLKPTRSIZE(fs);
2162 			for (ibindex = 0; ibindex < iblimit; ibindex++) {
2163 				if (lfs_iblock_get(fs, newbp->b_data, ibindex) == UNWRITTEN) {
2164 					++changed;
2165 					lfs_iblock_set(fs, newbp->b_data,
2166 						       ibindex, 0);
2167 				}
2168 			}
2169 			/*
2170 			 * Get rid of the old buffer.  Don't mark it clean,
2171 			 * though, if it still has dirty data on it.
2172 			 */
2173 			if (changed) {
2174 				DLOG((DLOG_SEG, "lfs_writeseg: replacing UNWRITTEN(%d):"
2175 				      " bp = %p newbp = %p\n", changed, bp,
2176 				      newbp));
2177 				*bpp = newbp;
2178 				bp->b_flags &= ~B_GATHERED;
2179 				bp->b_error = 0;
2180 				if (bp->b_iodone != NULL) {
2181 					DLOG((DLOG_SEG, "lfs_writeseg: "
2182 					      "indir bp should not be B_CALL\n"));
2183 					biodone(bp);
2184 					bp = NULL;
2185 				} else {
2186 					/* Still on free list, leave it there */
2187 					unbusybp = bp;
2188 					/*
2189 					 * We have to re-decrement lfs_avail
2190 					 * since this block is going to come
2191 					 * back around to us in the next
2192 					 * segment.
2193 					 */
2194 					lfs_sb_subavail(fs,
2195 					    lfs_btofsb(fs, bp->b_bcount));
2196 				}
2197 			} else {
2198 				lfs_freebuf(fs, newbp);
2199 			}
2200 		}
2201 		mutex_enter(&bufcache_lock);
2202 		if (unbusybp != NULL) {
2203 			unbusybp->b_cflags &= ~BC_BUSY;
2204 			if (unbusybp->b_cflags & BC_WANTED)
2205 				cv_broadcast(&bp->b_busy);
2206 		}
2207 	}
2208 	mutex_exit(&bufcache_lock);
2209 
2210 	/*
2211 	 * Compute checksum across data and then across summary; the first
2212 	 * block (the summary block) is skipped.  Set the create time here
2213 	 * so that it's guaranteed to be later than the inode mod times.
2214 	 */
2215 	sum = 0;
2216 	if (lfs_sb_getversion(fs) == 1)
2217 		el_size = sizeof(u_long);
2218 	else
2219 		el_size = sizeof(u_int32_t);
2220 	for (bpp = sp->bpp, i = nblocks - 1; i--; ) {
2221 		++bpp;
2222 		/* Loop through gop_write cluster blocks */
2223 		for (byteoffset = 0; byteoffset < (*bpp)->b_bcount;
2224 		     byteoffset += lfs_sb_getbsize(fs)) {
2225 #ifdef LFS_USE_BC_INVAL
2226 			if (((*bpp)->b_cflags & BC_INVAL) != 0 &&
2227 			    (*bpp)->b_iodone != NULL) {
2228 				if (copyin((void *)(*bpp)->b_saveaddr +
2229 					   byteoffset, dp, el_size)) {
2230 					panic("lfs_writeseg: copyin failed [1]:"
2231 						" ino %" PRIu64 " blk %" PRId64,
2232 						VTOI((*bpp)->b_vp)->i_number,
2233 						(*bpp)->b_lblkno);
2234 				}
2235 			} else
2236 #endif /* LFS_USE_BC_INVAL */
2237 			{
2238 				sum = lfs_cksum_part((char *)
2239 				    (*bpp)->b_data + byteoffset, el_size, sum);
2240 			}
2241 		}
2242 	}
2243 	if (lfs_sb_getversion(fs) == 1)
2244 		lfs_ss_setocreate(fs, ssp, time_second);
2245 	else {
2246 		lfs_ss_setcreate(fs, ssp, time_second);
2247 		lfs_sb_addserial(fs, 1);
2248 		lfs_ss_setserial(fs, ssp, lfs_sb_getserial(fs));
2249 		lfs_ss_setident(fs, ssp, lfs_sb_getident(fs));
2250 	}
2251 	lfs_ss_setdatasum(fs, ssp, lfs_cksum_fold(sum));
2252 	sumstart = lfs_ss_getsumstart(fs);
2253 	lfs_ss_setsumsum(fs, ssp, cksum((char *)ssp + sumstart,
2254 	    lfs_sb_getsumsize(fs) - sumstart));
2255 
2256 	mutex_enter(&lfs_lock);
2257 	lfs_sb_subbfree(fs, (lfs_btofsb(fs, ninos * lfs_sb_getibsize(fs)) +
2258 			  lfs_btofsb(fs, lfs_sb_getsumsize(fs))));
2259 	lfs_sb_adddmeta(fs, (lfs_btofsb(fs, ninos * lfs_sb_getibsize(fs)) +
2260 			  lfs_btofsb(fs, lfs_sb_getsumsize(fs))));
2261 	mutex_exit(&lfs_lock);
2262 
2263 	/*
2264 	 * When we simply write the blocks we lose a rotation for every block
2265 	 * written.  To avoid this problem, we cluster the buffers into a
2266 	 * chunk and write the chunk.  MAXPHYS is the largest size I/O
2267 	 * devices can handle, use that for the size of the chunks.
2268 	 *
2269 	 * Blocks that are already clusters (from GOP_WRITE), however, we
2270 	 * don't bother to copy into other clusters.
2271 	 */
2272 
2273 #define CHUNKSIZE MAXPHYS
2274 
2275 	if (devvp == NULL)
2276 		panic("devvp is NULL");
2277 	for (bpp = sp->bpp, i = nblocks; i;) {
2278 		cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i);
2279 		cl = cbp->b_private;
2280 
2281 		cbp->b_flags |= B_ASYNC;
2282 		cbp->b_cflags |= BC_BUSY;
2283 		cbp->b_bcount = 0;
2284 
2285 		KASSERTMSG((bpp - sp->bpp <=
2286 			(lfs_sb_getsumsize(fs) - SEGSUM_SIZE(fs))
2287 			/ sizeof(int32_t)),
2288 		    "lfs_writeseg: real bpp overwrite");
2289 		KASSERTMSG((bpp - sp->bpp <=
2290 			lfs_segsize(fs) / lfs_sb_getfsize(fs)),
2291 		    "lfs_writeseg: theoretical bpp overwrite");
2292 
2293 		/*
2294 		 * Construct the cluster.
2295 		 */
2296 		mutex_enter(&lfs_lock);
2297 		++fs->lfs_iocount;
2298 		mutex_exit(&lfs_lock);
2299 		while (i && cbp->b_bcount < CHUNKSIZE) {
2300 			bp = *bpp;
2301 
2302 			if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
2303 				break;
2304 			if (cbp->b_bcount > 0 && !(cl->flags & LFS_CL_MALLOC))
2305 				break;
2306 
2307 			/* Clusters from GOP_WRITE are expedited */
2308 			if (bp->b_bcount > lfs_sb_getbsize(fs)) {
2309 				if (cbp->b_bcount > 0)
2310 					/* Put in its own buffer */
2311 					break;
2312 				else {
2313 					cbp->b_data = bp->b_data;
2314 				}
2315 			} else if (cbp->b_bcount == 0) {
2316 				p = cbp->b_data = lfs_malloc(fs, CHUNKSIZE,
2317 							     LFS_NB_CLUSTER);
2318 				cl->flags |= LFS_CL_MALLOC;
2319 			}
2320 			KASSERTMSG((lfs_dtosn(fs, LFS_DBTOFSB(fs, bp->b_blkno +
2321 					btodb(bp->b_bcount - 1))) ==
2322 				sp->seg_number),
2323 			    "segment overwrite: blk size %d daddr %" PRIx64
2324 			    " not in seg %d\n",
2325 			    bp->b_bcount, bp->b_blkno,
2326 			    sp->seg_number);
2327 
2328 #ifdef LFS_USE_BC_INVAL
2329 			/*
2330 			 * Fake buffers from the cleaner are marked as BC_INVAL.
2331 			 * We need to copy the data from user space rather than
2332 			 * from the buffer indicated.
2333 			 * XXX == what do I do on an error?
2334 			 */
2335 			if ((bp->b_cflags & BC_INVAL) != 0 &&
2336 			    bp->b_iodone != NULL) {
2337 				if (copyin(bp->b_saveaddr, p, bp->b_bcount))
2338 					panic("lfs_writeseg: "
2339 					    "copyin failed [2]");
2340 			} else
2341 #endif /* LFS_USE_BC_INVAL */
2342 			if (cl->flags & LFS_CL_MALLOC) {
2343 				/* copy data into our cluster. */
2344 				memcpy(p, bp->b_data, bp->b_bcount);
2345 				p += bp->b_bcount;
2346 			}
2347 
2348 			cbp->b_bcount += bp->b_bcount;
2349 			cl->bufsize += bp->b_bcount;
2350 
2351 			bp->b_flags &= ~B_READ;
2352 			bp->b_error = 0;
2353 			cl->bpp[cl->bufcount++] = bp;
2354 
2355 			vp = bp->b_vp;
2356 			mutex_enter(&bufcache_lock);
2357 			mutex_enter(vp->v_interlock);
2358 			bp->b_oflags &= ~(BO_DELWRI | BO_DONE);
2359 			reassignbuf(bp, vp);
2360 			vp->v_numoutput++;
2361 			mutex_exit(vp->v_interlock);
2362 			mutex_exit(&bufcache_lock);
2363 
2364 			bpp++;
2365 			i--;
2366 		}
2367 		if (fs->lfs_sp->seg_flags & SEGM_SYNC)
2368 			BIO_SETPRIO(cbp, BPRIO_TIMECRITICAL);
2369 		else
2370 			BIO_SETPRIO(cbp, BPRIO_TIMELIMITED);
2371 		mutex_enter(devvp->v_interlock);
2372 		devvp->v_numoutput++;
2373 		mutex_exit(devvp->v_interlock);
2374 		VOP_STRATEGY(devvp, cbp);
2375 		curlwp->l_ru.ru_oublock++;
2376 	}
2377 
2378 	if (lfs_dostats) {
2379 		++lfs_stats.psegwrites;
2380 		lfs_stats.blocktot += nblocks - 1;
2381 		if (fs->lfs_sp->seg_flags & SEGM_SYNC)
2382 			++lfs_stats.psyncwrites;
2383 		if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
2384 			++lfs_stats.pcleanwrites;
2385 			lfs_stats.cleanblocks += nblocks - 1;
2386 		}
2387 	}
2388 
2389 	return (lfs_initseg(fs) || do_again);
2390 }
2391 
2392 void
2393 lfs_writesuper(struct lfs *fs, daddr_t daddr)
2394 {
2395 	struct buf *bp;
2396 	struct vnode *devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2397 
2398 	ASSERT_MAYBE_SEGLOCK(fs);
2399 	if (fs->lfs_is64) {
2400 		KASSERT(fs->lfs_dlfs_u.u_64.dlfs_magic == LFS64_MAGIC);
2401 	} else {
2402 		KASSERT(fs->lfs_dlfs_u.u_32.dlfs_magic == LFS_MAGIC);
2403 	}
2404 	/*
2405 	 * If we can write one superblock while another is in
2406 	 * progress, we risk not having a complete checkpoint if we crash.
2407 	 * So, block here if a superblock write is in progress.
2408 	 */
2409 	mutex_enter(&lfs_lock);
2410 	while (fs->lfs_sbactive) {
2411 		mtsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0,
2412 			&lfs_lock);
2413 	}
2414 	fs->lfs_sbactive = daddr;
2415 	mutex_exit(&lfs_lock);
2416 
2417 	/* Set timestamp of this version of the superblock */
2418 	if (lfs_sb_getversion(fs) == 1)
2419 		lfs_sb_setotstamp(fs, time_second);
2420 	lfs_sb_settstamp(fs, time_second);
2421 
2422 	/* The next chunk of code relies on this assumption */
2423 	CTASSERT(sizeof(struct dlfs) == sizeof(struct dlfs64));
2424 
2425 	/* Checksum the superblock and copy it into a buffer. */
2426 	lfs_sb_setcksum(fs, lfs_sb_cksum(fs));
2427 	bp = lfs_newbuf(fs, devvp,
2428 	    LFS_FSBTODB(fs, daddr), LFS_SBPAD, LFS_NB_SBLOCK);
2429 	memcpy(bp->b_data, &fs->lfs_dlfs_u, sizeof(struct dlfs));
2430 	memset((char *)bp->b_data + sizeof(struct dlfs), 0,
2431 	    LFS_SBPAD - sizeof(struct dlfs));
2432 
2433 	bp->b_cflags |= BC_BUSY;
2434 	bp->b_flags = (bp->b_flags & ~B_READ) | B_ASYNC;
2435 	bp->b_oflags &= ~(BO_DONE | BO_DELWRI);
2436 	bp->b_error = 0;
2437 	bp->b_iodone = lfs_super_aiodone;
2438 
2439 	if (fs->lfs_sp != NULL && fs->lfs_sp->seg_flags & SEGM_SYNC)
2440 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
2441 	else
2442 		BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
2443 	curlwp->l_ru.ru_oublock++;
2444 
2445 	mutex_enter(devvp->v_interlock);
2446 	devvp->v_numoutput++;
2447 	mutex_exit(devvp->v_interlock);
2448 
2449 	mutex_enter(&lfs_lock);
2450 	++fs->lfs_iocount;
2451 	mutex_exit(&lfs_lock);
2452 	VOP_STRATEGY(devvp, bp);
2453 }
2454 
2455 /*
2456  * Logical block number match routines used when traversing the dirty block
2457  * chain.
2458  */
2459 int
2460 lfs_match_fake(struct lfs *fs, struct buf *bp)
2461 {
2462 
2463 	ASSERT_SEGLOCK(fs);
2464 	return LFS_IS_MALLOC_BUF(bp);
2465 }
2466 
2467 #if 0
2468 int
2469 lfs_match_real(struct lfs *fs, struct buf *bp)
2470 {
2471 
2472 	ASSERT_SEGLOCK(fs);
2473 	return (lfs_match_data(fs, bp) && !lfs_match_fake(fs, bp));
2474 }
2475 #endif
2476 
2477 int
2478 lfs_match_data(struct lfs *fs, struct buf *bp)
2479 {
2480 
2481 	ASSERT_SEGLOCK(fs);
2482 	return (bp->b_lblkno >= 0);
2483 }
2484 
2485 int
2486 lfs_match_indir(struct lfs *fs, struct buf *bp)
2487 {
2488 	daddr_t lbn;
2489 
2490 	ASSERT_SEGLOCK(fs);
2491 	lbn = bp->b_lblkno;
2492 	return (lbn < 0 && (-lbn - ULFS_NDADDR) % LFS_NINDIR(fs) == 0);
2493 }
2494 
2495 int
2496 lfs_match_dindir(struct lfs *fs, struct buf *bp)
2497 {
2498 	daddr_t lbn;
2499 
2500 	ASSERT_SEGLOCK(fs);
2501 	lbn = bp->b_lblkno;
2502 	return (lbn < 0 && (-lbn - ULFS_NDADDR) % LFS_NINDIR(fs) == 1);
2503 }
2504 
2505 int
2506 lfs_match_tindir(struct lfs *fs, struct buf *bp)
2507 {
2508 	daddr_t lbn;
2509 
2510 	ASSERT_SEGLOCK(fs);
2511 	lbn = bp->b_lblkno;
2512 	return (lbn < 0 && (-lbn - ULFS_NDADDR) % LFS_NINDIR(fs) == 2);
2513 }
2514 
2515 void
2516 lfs_free_aiodone(struct buf *bp)
2517 {
2518 	struct lfs *fs;
2519 
2520 	KERNEL_LOCK(1, curlwp);
2521 	fs = bp->b_private;
2522 	ASSERT_NO_SEGLOCK(fs);
2523 	lfs_freebuf(fs, bp);
2524 	KERNEL_UNLOCK_ONE(curlwp);
2525 }
2526 
2527 static void
2528 lfs_super_aiodone(struct buf *bp)
2529 {
2530 	struct lfs *fs;
2531 
2532 	KERNEL_LOCK(1, curlwp);
2533 	fs = bp->b_private;
2534 	ASSERT_NO_SEGLOCK(fs);
2535 	mutex_enter(&lfs_lock);
2536 	fs->lfs_sbactive = 0;
2537 	if (--fs->lfs_iocount <= 1)
2538 		wakeup(&fs->lfs_iocount);
2539 	wakeup(&fs->lfs_sbactive);
2540 	mutex_exit(&lfs_lock);
2541 	lfs_freebuf(fs, bp);
2542 	KERNEL_UNLOCK_ONE(curlwp);
2543 }
2544 
2545 static void
2546 lfs_cluster_aiodone(struct buf *bp)
2547 {
2548 	struct lfs_cluster *cl;
2549 	struct lfs *fs;
2550 	struct buf *tbp, *fbp;
2551 	struct vnode *vp, *devvp, *ovp;
2552 	struct inode *ip;
2553 	int error;
2554 
2555 	KERNEL_LOCK(1, curlwp);
2556 
2557 	error = bp->b_error;
2558 	cl = bp->b_private;
2559 	fs = cl->fs;
2560 	devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2561 	ASSERT_NO_SEGLOCK(fs);
2562 
2563 	/* Put the pages back, and release the buffer */
2564 	while (cl->bufcount--) {
2565 		tbp = cl->bpp[cl->bufcount];
2566 		KASSERT(tbp->b_cflags & BC_BUSY);
2567 		if (error) {
2568 			tbp->b_error = error;
2569 		}
2570 
2571 		/*
2572 		 * We're done with tbp.	 If it has not been re-dirtied since
2573 		 * the cluster was written, free it.  Otherwise, keep it on
2574 		 * the locked list to be written again.
2575 		 */
2576 		vp = tbp->b_vp;
2577 
2578 		tbp->b_flags &= ~B_GATHERED;
2579 
2580 #ifdef DEBUG
2581 		if ((tbp)->b_vp == (fs)->lfs_ivnode)
2582 			LFS_ENTER_LOG("clear", __FILE__, __LINE__,
2583 			    tbp->b_lblkno, tbp->b_flags, curproc->p_pid);
2584 #endif
2585 
2586 		mutex_enter(&bufcache_lock);
2587 		if (tbp->b_iodone == NULL) {
2588 			KASSERT(tbp->b_flags & B_LOCKED);
2589 			bremfree(tbp);
2590 			if (vp) {
2591 				mutex_enter(vp->v_interlock);
2592 				reassignbuf(tbp, vp);
2593 				mutex_exit(vp->v_interlock);
2594 			}
2595 			tbp->b_flags |= B_ASYNC; /* for biodone */
2596 		}
2597 
2598 		if ((tbp->b_flags & B_LOCKED) && !(tbp->b_oflags & BO_DELWRI))
2599 			LFS_UNLOCK_BUF(tbp);
2600 
2601 		if (tbp->b_oflags & BO_DONE) {
2602 			DLOG((DLOG_SEG, "blk %d biodone already (flags %lx)\n",
2603 				cl->bufcount, (long)tbp->b_flags));
2604 		}
2605 
2606 		if (tbp->b_iodone != NULL && !LFS_IS_MALLOC_BUF(tbp)) {
2607 			/*
2608 			 * A buffer from the page daemon.
2609 			 * We use the same iodone as it does,
2610 			 * so we must manually disassociate its
2611 			 * buffers from the vp.
2612 			 */
2613 			if ((ovp = tbp->b_vp) != NULL) {
2614 				/* This is just silly */
2615 				mutex_enter(ovp->v_interlock);
2616 				brelvp(tbp);
2617 				mutex_exit(ovp->v_interlock);
2618 				tbp->b_vp = vp;
2619 				tbp->b_objlock = vp->v_interlock;
2620 			}
2621 			/* Put it back the way it was */
2622 			tbp->b_flags |= B_ASYNC;
2623 			/* Master buffers have BC_AGE */
2624 			if (tbp->b_private == tbp)
2625 				tbp->b_cflags |= BC_AGE;
2626 		}
2627 		mutex_exit(&bufcache_lock);
2628 
2629 		biodone(tbp);
2630 
2631 		/*
2632 		 * If this is the last block for this vnode, but
2633 		 * there are other blocks on its dirty list,
2634 		 * set IN_MODIFIED/IN_CLEANING depending on what
2635 		 * sort of block.  Only do this for our mount point,
2636 		 * not for, e.g., inode blocks that are attached to
2637 		 * the devvp.
2638 		 * XXX KS - Shouldn't we set *both* if both types
2639 		 * of blocks are present (traverse the dirty list?)
2640 		 */
2641 		mutex_enter(vp->v_interlock);
2642 		mutex_enter(&lfs_lock);
2643 		if (vp != devvp && vp->v_numoutput == 0 &&
2644 		    (fbp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL) {
2645 			ip = VTOI(vp);
2646 			DLOG((DLOG_SEG, "lfs_cluster_aiodone: mark ino %d\n",
2647 			       ip->i_number));
2648 			if (LFS_IS_MALLOC_BUF(fbp))
2649 				LFS_SET_UINO(ip, IN_CLEANING);
2650 			else
2651 				LFS_SET_UINO(ip, IN_MODIFIED);
2652 		}
2653 		cv_broadcast(&vp->v_cv);
2654 		mutex_exit(&lfs_lock);
2655 		mutex_exit(vp->v_interlock);
2656 	}
2657 
2658 	/* Fix up the cluster buffer, and release it */
2659 	if (cl->flags & LFS_CL_MALLOC)
2660 		lfs_free(fs, bp->b_data, LFS_NB_CLUSTER);
2661 	putiobuf(bp);
2662 
2663 	/* Note i/o done */
2664 	if (cl->flags & LFS_CL_SYNC) {
2665 		if (--cl->seg->seg_iocount == 0)
2666 			wakeup(&cl->seg->seg_iocount);
2667 	}
2668 	mutex_enter(&lfs_lock);
2669 	KASSERTMSG((fs->lfs_iocount != 0),
2670 	    "lfs_cluster_aiodone: zero iocount");
2671 	if (--fs->lfs_iocount <= 1)
2672 		wakeup(&fs->lfs_iocount);
2673 	mutex_exit(&lfs_lock);
2674 
2675 	KERNEL_UNLOCK_ONE(curlwp);
2676 
2677 	pool_put(&fs->lfs_bpppool, cl->bpp);
2678 	cl->bpp = NULL;
2679 	pool_put(&fs->lfs_clpool, cl);
2680 }
2681 
2682 /*
2683  * Shellsort (diminishing increment sort) from Data Structures and
2684  * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
2685  * see also Knuth Vol. 3, page 84.  The increments are selected from
2686  * formula (8), page 95.  Roughly O(N^3/2).
2687  */
2688 /*
2689  * This is our own private copy of shellsort because we want to sort
2690  * two parallel arrays (the array of buffer pointers and the array of
2691  * logical block numbers) simultaneously.  Note that we cast the array
2692  * of logical block numbers to a unsigned in this routine so that the
2693  * negative block numbers (meta data blocks) sort AFTER the data blocks.
2694  */
2695 
2696 static void
2697 lfs_shellsort(struct lfs *fs,
2698 	      struct buf **bp_array, union lfs_blocks *lb_array,
2699 	      int nmemb, int size)
2700 {
2701 	static int __rsshell_increments[] = { 4, 1, 0 };
2702 	int incr, *incrp, t1, t2;
2703 	struct buf *bp_temp;
2704 
2705 #ifdef DEBUG
2706 	incr = 0;
2707 	for (t1 = 0; t1 < nmemb; t1++) {
2708 		for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2709 			if (lfs_blocks_get(fs, lb_array, incr++) != bp_array[t1]->b_lblkno + t2) {
2710 				/* dump before panic */
2711 				printf("lfs_shellsort: nmemb=%d, size=%d\n",
2712 				    nmemb, size);
2713 				incr = 0;
2714 				for (t1 = 0; t1 < nmemb; t1++) {
2715 					const struct buf *bp = bp_array[t1];
2716 
2717 					printf("bp[%d]: lbn=%" PRIu64 ", size=%"
2718 					    PRIu64 "\n", t1,
2719 					    (uint64_t)bp->b_bcount,
2720 					    (uint64_t)bp->b_lblkno);
2721 					printf("lbns:");
2722 					for (t2 = 0; t2 * size < bp->b_bcount;
2723 					    t2++) {
2724 						printf(" %jd",
2725 						    (intmax_t)lfs_blocks_get(fs, lb_array, incr++));
2726 					}
2727 					printf("\n");
2728 				}
2729 				panic("lfs_shellsort: inconsistent input");
2730 			}
2731 		}
2732 	}
2733 #endif
2734 
2735 	for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
2736 		for (t1 = incr; t1 < nmemb; ++t1)
2737 			for (t2 = t1 - incr; t2 >= 0;)
2738 				if ((u_int64_t)bp_array[t2]->b_lblkno >
2739 				    (u_int64_t)bp_array[t2 + incr]->b_lblkno) {
2740 					bp_temp = bp_array[t2];
2741 					bp_array[t2] = bp_array[t2 + incr];
2742 					bp_array[t2 + incr] = bp_temp;
2743 					t2 -= incr;
2744 				} else
2745 					break;
2746 
2747 	/* Reform the list of logical blocks */
2748 	incr = 0;
2749 	for (t1 = 0; t1 < nmemb; t1++) {
2750 		for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2751 			lfs_blocks_set(fs, lb_array, incr++,
2752 				       bp_array[t1]->b_lblkno + t2);
2753 		}
2754 	}
2755 }
2756 
2757 /*
2758  * Set up an FINFO entry for a new file.  The fip pointer is assumed to
2759  * point at uninitialized space.
2760  */
2761 void
2762 lfs_acquire_finfo(struct lfs *fs, ino_t ino, int vers)
2763 {
2764 	struct segment *sp = fs->lfs_sp;
2765 	SEGSUM *ssp;
2766 
2767 	KASSERT(vers > 0);
2768 
2769 	if (sp->seg_bytes_left < lfs_sb_getbsize(fs) ||
2770 	    sp->sum_bytes_left < FINFOSIZE(fs) + LFS_BLKPTRSIZE(fs))
2771 		(void) lfs_writeseg(fs, fs->lfs_sp);
2772 
2773 	sp->sum_bytes_left -= FINFOSIZE(fs);
2774 	ssp = (SEGSUM *)sp->segsum;
2775 	lfs_ss_setnfinfo(fs, ssp, lfs_ss_getnfinfo(fs, ssp) + 1);
2776 	lfs_fi_setnblocks(fs, sp->fip, 0);
2777 	lfs_fi_setino(fs, sp->fip, ino);
2778 	lfs_fi_setversion(fs, sp->fip, vers);
2779 }
2780 
2781 /*
2782  * Release the FINFO entry, either clearing out an unused entry or
2783  * advancing us to the next available entry.
2784  */
2785 void
2786 lfs_release_finfo(struct lfs *fs)
2787 {
2788 	struct segment *sp = fs->lfs_sp;
2789 	SEGSUM *ssp;
2790 
2791 	if (lfs_fi_getnblocks(fs, sp->fip) != 0) {
2792 		sp->fip = NEXT_FINFO(fs, sp->fip);
2793 		lfs_blocks_fromfinfo(fs, &sp->start_lbp, sp->fip);
2794 	} else {
2795 		/* XXX shouldn't this update sp->fip? */
2796 		sp->sum_bytes_left += FINFOSIZE(fs);
2797 		ssp = (SEGSUM *)sp->segsum;
2798 		lfs_ss_setnfinfo(fs, ssp, lfs_ss_getnfinfo(fs, ssp) - 1);
2799 	}
2800 }
2801