xref: /netbsd-src/sys/ufs/lfs/lfs_segment.c (revision 5e4c038a45edbc7d63b7c2daa76e29f88b64a4e3)
1 /*	$NetBSD: lfs_segment.c,v 1.78 2002/05/24 22:13:57 perseant Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the NetBSD
21  *      Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 /*
39  * Copyright (c) 1991, 1993
40  *	The Regents of the University of California.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)lfs_segment.c	8.10 (Berkeley) 6/10/95
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.78 2002/05/24 22:13:57 perseant Exp $");
75 
76 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
77 
78 #if defined(_KERNEL_OPT)
79 #include "opt_ddb.h"
80 #endif
81 
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/namei.h>
85 #include <sys/kernel.h>
86 #include <sys/resourcevar.h>
87 #include <sys/file.h>
88 #include <sys/stat.h>
89 #include <sys/buf.h>
90 #include <sys/proc.h>
91 #include <sys/conf.h>
92 #include <sys/vnode.h>
93 #include <sys/malloc.h>
94 #include <sys/mount.h>
95 
96 #include <miscfs/specfs/specdev.h>
97 #include <miscfs/fifofs/fifo.h>
98 
99 #include <ufs/ufs/inode.h>
100 #include <ufs/ufs/dir.h>
101 #include <ufs/ufs/ufsmount.h>
102 #include <ufs/ufs/ufs_extern.h>
103 
104 #include <ufs/lfs/lfs.h>
105 #include <ufs/lfs/lfs_extern.h>
106 
107 #include <uvm/uvm_extern.h>
108 
109 extern int count_lock_queue(void);
110 extern struct simplelock vnode_free_list_slock;		/* XXX */
111 
112 static void lfs_cluster_callback(struct buf *);
113 static struct buf **lookahead_pagemove(struct buf **, int, size_t *);
114 
115 /*
116  * Determine if it's OK to start a partial in this segment, or if we need
117  * to go on to a new segment.
118  */
119 #define	LFS_PARTIAL_FITS(fs) \
120 	((fs)->lfs_fsbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
121 	fragstofsb((fs), (fs)->lfs_frag))
122 
123 void	 lfs_callback(struct buf *);
124 int	 lfs_gather(struct lfs *, struct segment *,
125 	     struct vnode *, int (*)(struct lfs *, struct buf *));
126 int	 lfs_gatherblock(struct segment *, struct buf *, int *);
127 void	 lfs_iset(struct inode *, ufs_daddr_t, time_t);
128 int	 lfs_match_fake(struct lfs *, struct buf *);
129 int	 lfs_match_data(struct lfs *, struct buf *);
130 int	 lfs_match_dindir(struct lfs *, struct buf *);
131 int	 lfs_match_indir(struct lfs *, struct buf *);
132 int	 lfs_match_tindir(struct lfs *, struct buf *);
133 void	 lfs_newseg(struct lfs *);
134 void	 lfs_shellsort(struct buf **, ufs_daddr_t *, int);
135 void	 lfs_supercallback(struct buf *);
136 void	 lfs_updatemeta(struct segment *);
137 int	 lfs_vref(struct vnode *);
138 void	 lfs_vunref(struct vnode *);
139 void	 lfs_writefile(struct lfs *, struct segment *, struct vnode *);
140 int	 lfs_writeinode(struct lfs *, struct segment *, struct inode *);
141 int	 lfs_writeseg(struct lfs *, struct segment *);
142 void	 lfs_writesuper(struct lfs *, daddr_t);
143 int	 lfs_writevnodes(struct lfs *fs, struct mount *mp,
144 	    struct segment *sp, int dirops);
145 
146 int	lfs_allclean_wakeup;		/* Cleaner wakeup address. */
147 int	lfs_writeindir = 1;             /* whether to flush indir on non-ckp */
148 int	lfs_clean_vnhead = 0;		/* Allow freeing to head of vn list */
149 int	lfs_dirvcount = 0;		/* # active dirops */
150 
151 /* Statistics Counters */
152 int lfs_dostats = 1;
153 struct lfs_stats lfs_stats;
154 
155 extern int locked_queue_count;
156 extern long locked_queue_bytes;
157 
158 /* op values to lfs_writevnodes */
159 #define	VN_REG	        0
160 #define	VN_DIROP	1
161 #define	VN_EMPTY	2
162 #define VN_CLEAN        3
163 
164 #define LFS_MAX_ACTIVE          10
165 
166 /*
167  * XXX KS - Set modification time on the Ifile, so the cleaner can
168  * read the fs mod time off of it.  We don't set IN_UPDATE here,
169  * since we don't really need this to be flushed to disk (and in any
170  * case that wouldn't happen to the Ifile until we checkpoint).
171  */
172 void
173 lfs_imtime(struct lfs *fs)
174 {
175 	struct timespec ts;
176 	struct inode *ip;
177 
178 	TIMEVAL_TO_TIMESPEC(&time, &ts);
179 	ip = VTOI(fs->lfs_ivnode);
180 	ip->i_ffs_mtime = ts.tv_sec;
181 	ip->i_ffs_mtimensec = ts.tv_nsec;
182 }
183 
184 /*
185  * Ifile and meta data blocks are not marked busy, so segment writes MUST be
186  * single threaded.  Currently, there are two paths into lfs_segwrite, sync()
187  * and getnewbuf().  They both mark the file system busy.  Lfs_vflush()
188  * explicitly marks the file system busy.  So lfs_segwrite is safe.  I think.
189  */
190 
191 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
192 #define IS_FLUSHING(fs,vp)  ((fs)->lfs_flushvp == (vp))
193 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
194 
195 int
196 lfs_vflush(struct vnode *vp)
197 {
198 	struct inode *ip;
199 	struct lfs *fs;
200 	struct segment *sp;
201 	struct buf *bp, *nbp, *tbp, *tnbp;
202 	int error, s;
203 
204 	ip = VTOI(vp);
205 	fs = VFSTOUFS(vp->v_mount)->um_lfs;
206 
207 	if (ip->i_flag & IN_CLEANING) {
208 #ifdef DEBUG_LFS
209 		ivndebug(vp,"vflush/in_cleaning");
210 #endif
211 		LFS_CLR_UINO(ip, IN_CLEANING);
212 		LFS_SET_UINO(ip, IN_MODIFIED);
213 
214 		/*
215 		 * Toss any cleaning buffers that have real counterparts
216 		 * to avoid losing new data
217 		 */
218 		s = splbio();
219 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
220 			nbp = LIST_NEXT(bp, b_vnbufs);
221 			if (bp->b_flags & B_CALL) {
222 				for (tbp = LIST_FIRST(&vp->v_dirtyblkhd); tbp;
223 				    tbp = tnbp)
224 				{
225 					tnbp = LIST_NEXT(tbp, b_vnbufs);
226 					if (tbp->b_vp == bp->b_vp
227 					   && tbp->b_lblkno == bp->b_lblkno
228 					   && tbp != bp)
229 					{
230 						fs->lfs_avail += btofsb(fs, bp->b_bcount);
231 						wakeup(&fs->lfs_avail);
232 						lfs_freebuf(bp);
233 						bp = NULL;
234 						break;
235 					}
236 				}
237 			}
238 		}
239 		splx(s);
240 	}
241 
242 	/* If the node is being written, wait until that is done */
243 	s = splbio();
244 	if (WRITEINPROG(vp)) {
245 #ifdef DEBUG_LFS
246 		ivndebug(vp,"vflush/writeinprog");
247 #endif
248 		tsleep(vp, PRIBIO+1, "lfs_vw", 0);
249 	}
250 	splx(s);
251 
252 	/* Protect against VXLOCK deadlock in vinvalbuf() */
253 	lfs_seglock(fs, SEGM_SYNC);
254 
255 	/* If we're supposed to flush a freed inode, just toss it */
256 	/* XXX - seglock, so these buffers can't be gathered, right? */
257 	if (ip->i_ffs_mode == 0) {
258 		printf("lfs_vflush: ino %d is freed, not flushing\n",
259 			ip->i_number);
260 		s = splbio();
261 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
262 			nbp = LIST_NEXT(bp, b_vnbufs);
263 			if (bp->b_flags & B_DELWRI) { /* XXX always true? */
264 				fs->lfs_avail += btofsb(fs, bp->b_bcount);
265 				wakeup(&fs->lfs_avail);
266 			}
267 			/* Copied from lfs_writeseg */
268 			if (bp->b_flags & B_CALL) {
269 				/* if B_CALL, it was created with newbuf */
270 				lfs_freebuf(bp);
271 				bp = NULL;
272 			} else {
273 				bremfree(bp);
274 				LFS_UNLOCK_BUF(bp);
275 				bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
276                                          B_GATHERED);
277 				bp->b_flags |= B_DONE;
278 				reassignbuf(bp, vp);
279 				brelse(bp);
280 			}
281 		}
282 		splx(s);
283 		LFS_CLR_UINO(ip, IN_CLEANING);
284 		LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED);
285 		ip->i_flag &= ~IN_ALLMOD;
286 		printf("lfs_vflush: done not flushing ino %d\n",
287 			ip->i_number);
288 		lfs_segunlock(fs);
289 		return 0;
290 	}
291 
292 	SET_FLUSHING(fs,vp);
293 	if (fs->lfs_nactive > LFS_MAX_ACTIVE) {
294 		error = lfs_segwrite(vp->v_mount, SEGM_SYNC|SEGM_CKP);
295 		CLR_FLUSHING(fs,vp);
296 		lfs_segunlock(fs);
297 		return error;
298 	}
299 	sp = fs->lfs_sp;
300 
301 	if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
302 		lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
303 	} else if ((ip->i_flag & IN_CLEANING) &&
304 		  (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
305 #ifdef DEBUG_LFS
306 		ivndebug(vp,"vflush/clean");
307 #endif
308 		lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
309 	} else if (lfs_dostats) {
310 		if (LIST_FIRST(&vp->v_dirtyblkhd) || (VTOI(vp)->i_flag & IN_ALLMOD))
311 			++lfs_stats.vflush_invoked;
312 #ifdef DEBUG_LFS
313 		ivndebug(vp,"vflush");
314 #endif
315 	}
316 
317 #ifdef DIAGNOSTIC
318 	/* XXX KS This actually can happen right now, though it shouldn't(?) */
319 	if (vp->v_flag & VDIROP) {
320 		printf("lfs_vflush: flushing VDIROP, this shouldn\'t be\n");
321 		/* panic("VDIROP being flushed...this can\'t happen"); */
322 	}
323 	if (vp->v_usecount < 0) {
324 		printf("usecount=%ld\n", (long)vp->v_usecount);
325 		panic("lfs_vflush: usecount<0");
326 	}
327 #endif
328 
329 	do {
330 		do {
331 			if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
332 				lfs_writefile(fs, sp, vp);
333 		} while (lfs_writeinode(fs, sp, ip));
334 	} while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
335 
336 	if (lfs_dostats) {
337 		++lfs_stats.nwrites;
338 		if (sp->seg_flags & SEGM_SYNC)
339 			++lfs_stats.nsync_writes;
340 		if (sp->seg_flags & SEGM_CKP)
341 			++lfs_stats.ncheckpoints;
342 	}
343 	/*
344 	 * If we were called from somewhere that has already held the seglock
345 	 * (e.g., lfs_markv()), the lfs_segunlock will not wait for
346 	 * the write to complete because we are still locked.
347 	 * Since lfs_vflush() must return the vnode with no dirty buffers,
348 	 * we must explicitly wait, if that is the case.
349 	 *
350 	 * We compare the iocount against 1, not 0, because it is
351 	 * artificially incremented by lfs_seglock().
352 	 */
353 	if (fs->lfs_seglock > 1) {
354 		s = splbio();
355 		while (fs->lfs_iocount > 1)
356 			(void)tsleep(&fs->lfs_iocount, PRIBIO + 1,
357 				     "lfs_vflush", 0);
358 		splx(s);
359 	}
360 	lfs_segunlock(fs);
361 
362 	CLR_FLUSHING(fs,vp);
363 	return (0);
364 }
365 
366 #ifdef DEBUG_LFS_VERBOSE
367 # define vndebug(vp,str) if (VTOI(vp)->i_flag & IN_CLEANING) printf("not writing ino %d because %s (op %d)\n",VTOI(vp)->i_number,(str),op)
368 #else
369 # define vndebug(vp,str)
370 #endif
371 
372 int
373 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
374 {
375 	struct inode *ip;
376 	struct vnode *vp, *nvp;
377 	int inodes_written = 0, only_cleaning;
378 	int needs_unlock;
379 
380 #ifndef LFS_NO_BACKVP_HACK
381 	/* BEGIN HACK */
382 #define	VN_OFFSET	(((caddr_t)&LIST_NEXT(vp, v_mntvnodes)) - (caddr_t)vp)
383 #define	BACK_VP(VP)	((struct vnode *)(((caddr_t)(VP)->v_mntvnodes.le_prev) - VN_OFFSET))
384 #define	BEG_OF_VLIST	((struct vnode *)(((caddr_t)&(LIST_FIRST(&mp->mnt_vnodelist))) - VN_OFFSET))
385 
386 	/* Find last vnode. */
387  loop:	for (vp = LIST_FIRST(&mp->mnt_vnodelist);
388 	     vp && LIST_NEXT(vp, v_mntvnodes) != NULL;
389 	     vp = LIST_NEXT(vp, v_mntvnodes));
390 	for (; vp && vp != BEG_OF_VLIST; vp = nvp) {
391 		nvp = BACK_VP(vp);
392 #else
393 	loop:
394 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
395 		nvp = LIST_NEXT(vp, v_mntvnodes);
396 #endif
397 		/*
398 		 * If the vnode that we are about to sync is no longer
399 		 * associated with this mount point, start over.
400 		 */
401 		if (vp->v_mount != mp) {
402 			printf("lfs_writevnodes: starting over\n");
403 			goto loop;
404 		}
405 
406 		ip = VTOI(vp);
407 		if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
408 		    (op != VN_DIROP && op != VN_CLEAN && (vp->v_flag & VDIROP))) {
409 			vndebug(vp,"dirop");
410 			continue;
411 		}
412 
413 		if (op == VN_EMPTY && LIST_FIRST(&vp->v_dirtyblkhd)) {
414 			vndebug(vp,"empty");
415 			continue;
416 		}
417 
418 		if (vp->v_type == VNON) {
419 			continue;
420 		}
421 
422 		if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
423 		   && vp != fs->lfs_flushvp
424 		   && !(ip->i_flag & IN_CLEANING)) {
425 			vndebug(vp,"cleaning");
426 			continue;
427 		}
428 
429 		if (lfs_vref(vp)) {
430 			vndebug(vp,"vref");
431 			continue;
432 		}
433 
434 		needs_unlock = 0;
435 		if (VOP_ISLOCKED(vp)) {
436 			if (vp != fs->lfs_ivnode &&
437 			    vp->v_lock.lk_lockholder != curproc->p_pid) {
438 #ifdef DEBUG_LFS
439 				printf("lfs_writevnodes: not writing ino %d,"
440 				       " locked by pid %d\n",
441 				       VTOI(vp)->i_number,
442 				       vp->v_lock.lk_lockholder);
443 #endif
444 				lfs_vunref(vp);
445 				continue;
446 			}
447 		} else if (vp != fs->lfs_ivnode) {
448 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
449 			needs_unlock = 1;
450 		}
451 
452 		only_cleaning = 0;
453 		/*
454 		 * Write the inode/file if dirty and it's not the IFILE.
455 		 */
456 		if ((ip->i_flag & IN_ALLMOD) ||
457 		     (LIST_FIRST(&vp->v_dirtyblkhd) != NULL))
458 		{
459 			only_cleaning = ((ip->i_flag & IN_ALLMOD) == IN_CLEANING);
460 
461 			if (ip->i_number != LFS_IFILE_INUM
462 			   && LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
463 			{
464 				lfs_writefile(fs, sp, vp);
465 			}
466 			if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) {
467 				if (WRITEINPROG(vp)) {
468 #ifdef DEBUG_LFS
469 					ivndebug(vp,"writevnodes/write2");
470 #endif
471 				} else if (!(ip->i_flag & IN_ALLMOD)) {
472 #ifdef DEBUG_LFS
473 					printf("<%d>",ip->i_number);
474 #endif
475 					LFS_SET_UINO(ip, IN_MODIFIED);
476 				}
477 			}
478 			(void) lfs_writeinode(fs, sp, ip);
479 			inodes_written++;
480 		}
481 
482 		if (needs_unlock)
483 			VOP_UNLOCK(vp, 0);
484 
485 		if (lfs_clean_vnhead && only_cleaning)
486 			lfs_vunref_head(vp);
487 		else
488 			lfs_vunref(vp);
489 	}
490 	return inodes_written;
491 }
492 
493 /*
494  * Do a checkpoint.
495  */
496 int
497 lfs_segwrite(struct mount *mp, int flags)
498 {
499 	struct buf *bp;
500 	struct inode *ip;
501 	struct lfs *fs;
502 	struct segment *sp;
503 	struct vnode *vp;
504 	SEGUSE *segusep;
505 	ufs_daddr_t ibno;
506 	int do_ckp, did_ckp, error, i;
507 	int writer_set = 0;
508 	int dirty;
509 	int redo;
510 
511 	fs = VFSTOUFS(mp)->um_lfs;
512 
513 	if (fs->lfs_ronly)
514 		return EROFS;
515 
516 	lfs_imtime(fs);
517 
518 	/* printf("lfs_segwrite: ifile flags are 0x%lx\n",
519 	       (long)(VTOI(fs->lfs_ivnode)->i_flag)); */
520 
521 #if 0
522 	/*
523 	 * If we are not the cleaner, and there is no space available,
524 	 * wait until cleaner writes.
525 	 */
526 	if (!(flags & SEGM_CLEAN) && !(fs->lfs_seglock && fs->lfs_sp &&
527 				      (fs->lfs_sp->seg_flags & SEGM_CLEAN)))
528 	{
529 		while (fs->lfs_avail <= 0) {
530 			LFS_CLEANERINFO(cip, fs, bp);
531 			LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
532 
533 			wakeup(&lfs_allclean_wakeup);
534 			wakeup(&fs->lfs_nextseg);
535 			error = tsleep(&fs->lfs_avail, PRIBIO + 1, "lfs_av2",
536 				       0);
537 			if (error) {
538 				return (error);
539 			}
540 		}
541 	}
542 #endif
543 	/*
544 	 * Allocate a segment structure and enough space to hold pointers to
545 	 * the maximum possible number of buffers which can be described in a
546 	 * single summary block.
547 	 */
548 	do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
549 	lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
550 	sp = fs->lfs_sp;
551 
552 	/*
553 	 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
554 	 * in which case we have to flush *all* buffers off of this vnode.
555 	 * We don't care about other nodes, but write any non-dirop nodes
556 	 * anyway in anticipation of another getnewvnode().
557 	 *
558 	 * If we're cleaning we only write cleaning and ifile blocks, and
559 	 * no dirops, since otherwise we'd risk corruption in a crash.
560 	 */
561 	if (sp->seg_flags & SEGM_CLEAN)
562 		lfs_writevnodes(fs, mp, sp, VN_CLEAN);
563 	else {
564 		lfs_writevnodes(fs, mp, sp, VN_REG);
565 		if (!fs->lfs_dirops || !fs->lfs_flushvp) {
566 			while (fs->lfs_dirops)
567 				if ((error = tsleep(&fs->lfs_writer, PRIBIO + 1,
568 						"lfs writer", 0)))
569 				{
570 					/* XXX why not segunlock? */
571 					free(sp->bpp, M_SEGMENT);
572 					sp->bpp = NULL;
573 					free(sp, M_SEGMENT);
574 					fs->lfs_sp = NULL;
575 					return (error);
576 				}
577 			fs->lfs_writer++;
578 			writer_set = 1;
579 			lfs_writevnodes(fs, mp, sp, VN_DIROP);
580 			((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
581 		}
582 	}
583 
584 	/*
585 	 * If we are doing a checkpoint, mark everything since the
586 	 * last checkpoint as no longer ACTIVE.
587 	 */
588 	if (do_ckp) {
589 		for (ibno = fs->lfs_cleansz + fs->lfs_segtabsz;
590 		     --ibno >= fs->lfs_cleansz; ) {
591 			dirty = 0;
592 			if (bread(fs->lfs_ivnode, ibno, fs->lfs_bsize, NOCRED, &bp))
593 
594 				panic("lfs_segwrite: ifile read");
595 			segusep = (SEGUSE *)bp->b_data;
596 			for (i = fs->lfs_sepb; i--;) {
597 				if (segusep->su_flags & SEGUSE_ACTIVE) {
598 					segusep->su_flags &= ~SEGUSE_ACTIVE;
599 					++dirty;
600 				}
601 				if (fs->lfs_version > 1)
602 					++segusep;
603 				else
604 					segusep = (SEGUSE *)
605 						((SEGUSE_V1 *)segusep + 1);
606 			}
607 
608 			/* But the current segment is still ACTIVE */
609 			segusep = (SEGUSE *)bp->b_data;
610 			if (dtosn(fs, fs->lfs_curseg) / fs->lfs_sepb ==
611 			    (ibno-fs->lfs_cleansz)) {
612 				if (fs->lfs_version > 1)
613 					segusep[dtosn(fs, fs->lfs_curseg) %
614 					     fs->lfs_sepb].su_flags |=
615 						     SEGUSE_ACTIVE;
616 				else
617 					((SEGUSE *)
618 					 ((SEGUSE_V1 *)(bp->b_data) +
619 					  (dtosn(fs, fs->lfs_curseg) %
620 					   fs->lfs_sepb)))->su_flags
621 						   |= SEGUSE_ACTIVE;
622 				--dirty;
623 			}
624 			if (dirty)
625 				error = LFS_BWRITE_LOG(bp); /* Ifile */
626 			else
627 				brelse(bp);
628 		}
629 	}
630 
631 	did_ckp = 0;
632 	if (do_ckp || fs->lfs_doifile) {
633 		do {
634 			vp = fs->lfs_ivnode;
635 
636 			vget(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
637 #ifdef DEBUG
638 			LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0);
639 #endif
640 			fs->lfs_flags &= ~LFS_IFDIRTY;
641 
642 			ip = VTOI(vp);
643 			/* if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) */
644 				lfs_writefile(fs, sp, vp);
645 			if (ip->i_flag & IN_ALLMOD)
646 				++did_ckp;
647 			redo = lfs_writeinode(fs, sp, ip);
648 
649 			vput(vp);
650 			redo += lfs_writeseg(fs, sp);
651 			redo += (fs->lfs_flags & LFS_IFDIRTY);
652 		} while (redo && do_ckp);
653 
654 		/* The ifile should now be all clear */
655 		if (do_ckp && LIST_FIRST(&vp->v_dirtyblkhd)) {
656 			struct buf *bp;
657 			int s, warned = 0, dopanic = 0;
658 			s = splbio();
659 			for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = LIST_NEXT(bp, b_vnbufs)) {
660 				if (!(bp->b_flags & B_GATHERED)) {
661 					if (!warned)
662 						printf("lfs_segwrite: ifile still has dirty blocks?!\n");
663 					++dopanic;
664 					++warned;
665 					printf("bp=%p, lbn %d, flags 0x%lx\n",
666 						bp, bp->b_lblkno, bp->b_flags);
667 				}
668 			}
669 			if (dopanic)
670 				panic("dirty blocks");
671 			splx(s);
672 		}
673 		LFS_CLR_UINO(ip, IN_ALLMOD);
674 	} else {
675 		(void) lfs_writeseg(fs, sp);
676 	}
677 
678 	/*
679 	 * If the I/O count is non-zero, sleep until it reaches zero.
680 	 * At the moment, the user's process hangs around so we can
681 	 * sleep.
682 	 */
683 	fs->lfs_doifile = 0;
684 	if (writer_set && --fs->lfs_writer == 0)
685 		wakeup(&fs->lfs_dirops);
686 
687 	/*
688 	 * If we didn't write the Ifile, we didn't really do anything.
689 	 * That means that (1) there is a checkpoint on disk and (2)
690 	 * nothing has changed since it was written.
691 	 *
692 	 * Take the flags off of the segment so that lfs_segunlock
693 	 * doesn't have to write the superblock either.
694 	 */
695 	if (did_ckp == 0) {
696 		sp->seg_flags &= ~(SEGM_SYNC|SEGM_CKP);
697 		/* if (do_ckp) printf("lfs_segwrite: no checkpoint\n"); */
698 	}
699 
700 	if (lfs_dostats) {
701 		++lfs_stats.nwrites;
702 		if (sp->seg_flags & SEGM_SYNC)
703 			++lfs_stats.nsync_writes;
704 		if (sp->seg_flags & SEGM_CKP)
705 			++lfs_stats.ncheckpoints;
706 	}
707 	lfs_segunlock(fs);
708 	return (0);
709 }
710 
711 /*
712  * Write the dirty blocks associated with a vnode.
713  */
714 void
715 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
716 {
717 	struct buf *bp;
718 	struct finfo *fip;
719 	IFILE *ifp;
720 
721 
722 	if (sp->seg_bytes_left < fs->lfs_bsize ||
723 	    sp->sum_bytes_left < sizeof(struct finfo))
724 		(void) lfs_writeseg(fs, sp);
725 
726 	sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(ufs_daddr_t);
727 	++((SEGSUM *)(sp->segsum))->ss_nfinfo;
728 
729 	if (vp->v_flag & VDIROP)
730 		((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
731 
732 	fip = sp->fip;
733 	fip->fi_nblocks = 0;
734 	fip->fi_ino = VTOI(vp)->i_number;
735 	LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
736 	fip->fi_version = ifp->if_version;
737 	brelse(bp);
738 
739 	if (sp->seg_flags & SEGM_CLEAN) {
740 		lfs_gather(fs, sp, vp, lfs_match_fake);
741 		/*
742 		 * For a file being flushed, we need to write *all* blocks.
743 		 * This means writing the cleaning blocks first, and then
744 		 * immediately following with any non-cleaning blocks.
745 		 * The same is true of the Ifile since checkpoints assume
746 		 * that all valid Ifile blocks are written.
747 		 */
748 	   	if (IS_FLUSHING(fs,vp) || VTOI(vp)->i_number == LFS_IFILE_INUM)
749 			lfs_gather(fs, sp, vp, lfs_match_data);
750 	} else
751 		lfs_gather(fs, sp, vp, lfs_match_data);
752 
753 	/*
754 	 * It may not be necessary to write the meta-data blocks at this point,
755 	 * as the roll-forward recovery code should be able to reconstruct the
756 	 * list.
757 	 *
758 	 * We have to write them anyway, though, under two conditions: (1) the
759 	 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
760 	 * checkpointing.
761 	 */
762 	if (lfs_writeindir
763 	   || IS_FLUSHING(fs,vp)
764 	   || (sp->seg_flags & SEGM_CKP))
765 	{
766 		lfs_gather(fs, sp, vp, lfs_match_indir);
767 		lfs_gather(fs, sp, vp, lfs_match_dindir);
768 		lfs_gather(fs, sp, vp, lfs_match_tindir);
769 	}
770 	fip = sp->fip;
771 	if (fip->fi_nblocks != 0) {
772 		sp->fip = (FINFO*)((caddr_t)fip + sizeof(struct finfo) +
773 				   sizeof(ufs_daddr_t) * (fip->fi_nblocks-1));
774 		sp->start_lbp = &sp->fip->fi_blocks[0];
775 	} else {
776 		sp->sum_bytes_left += sizeof(FINFO) - sizeof(ufs_daddr_t);
777 		--((SEGSUM *)(sp->segsum))->ss_nfinfo;
778 	}
779 }
780 
781 int
782 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
783 {
784 	struct buf *bp, *ibp;
785 	struct dinode *cdp;
786 	IFILE *ifp;
787 	SEGUSE *sup;
788 	ufs_daddr_t daddr;
789 	daddr_t *daddrp;
790 	ino_t ino;
791 	int error, i, ndx, fsb = 0;
792 	int redo_ifile = 0;
793 	struct timespec ts;
794 	int gotblk = 0;
795 
796 	if (!(ip->i_flag & IN_ALLMOD))
797 		return (0);
798 
799 	/* Allocate a new inode block if necessary. */
800 	if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) && sp->ibp == NULL) {
801 		/* Allocate a new segment if necessary. */
802 		if (sp->seg_bytes_left < fs->lfs_ibsize ||
803 		    sp->sum_bytes_left < sizeof(ufs_daddr_t))
804 			(void) lfs_writeseg(fs, sp);
805 
806 		/* Get next inode block. */
807 		daddr = fs->lfs_offset;
808 		fs->lfs_offset += btofsb(fs, fs->lfs_ibsize);
809 		sp->ibp = *sp->cbpp++ =
810 			getblk(VTOI(fs->lfs_ivnode)->i_devvp, fsbtodb(fs, daddr),
811 			       fs->lfs_ibsize, 0, 0);
812 		gotblk++;
813 
814 		/* Zero out inode numbers */
815 		for (i = 0; i < INOPB(fs); ++i)
816 			((struct dinode *)sp->ibp->b_data)[i].di_inumber = 0;
817 
818 		++sp->start_bpp;
819 		fs->lfs_avail -= btofsb(fs, fs->lfs_ibsize);
820 		/* Set remaining space counters. */
821 		sp->seg_bytes_left -= fs->lfs_ibsize;
822 		sp->sum_bytes_left -= sizeof(ufs_daddr_t);
823 		ndx = fs->lfs_sumsize / sizeof(ufs_daddr_t) -
824 			sp->ninodes / INOPB(fs) - 1;
825 		((ufs_daddr_t *)(sp->segsum))[ndx] = daddr;
826 	}
827 
828 	/* Update the inode times and copy the inode onto the inode page. */
829 	TIMEVAL_TO_TIMESPEC(&time, &ts);
830 	/* XXX kludge --- don't redirty the ifile just to put times on it */
831 	if (ip->i_number != LFS_IFILE_INUM)
832 		LFS_ITIMES(ip, &ts, &ts, &ts);
833 
834 	/*
835 	 * If this is the Ifile, and we've already written the Ifile in this
836 	 * partial segment, just overwrite it (it's not on disk yet) and
837 	 * continue.
838 	 *
839 	 * XXX we know that the bp that we get the second time around has
840 	 * already been gathered.
841 	 */
842 	if (ip->i_number == LFS_IFILE_INUM && sp->idp) {
843 		*(sp->idp) = ip->i_din.ffs_din;
844 		return 0;
845 	}
846 
847 	bp = sp->ibp;
848 	cdp = ((struct dinode *)bp->b_data) + (sp->ninodes % INOPB(fs));
849 	*cdp = ip->i_din.ffs_din;
850 #ifdef LFS_IFILE_FRAG_ADDRESSING
851 	if (fs->lfs_version > 1)
852 		fsb = (sp->ninodes % INOPB(fs)) / INOPF(fs);
853 #endif
854 
855 	/*
856 	 * If we are cleaning, ensure that we don't write UNWRITTEN disk
857 	 * addresses to disk.
858 	 */
859 	if (ip->i_lfs_effnblks != ip->i_ffs_blocks) {
860 #ifdef DEBUG_LFS
861 		printf("lfs_writeinode: cleansing ino %d (%d != %d)\n",
862 		       ip->i_number, ip->i_lfs_effnblks, ip->i_ffs_blocks);
863 #endif
864 		for (daddrp = cdp->di_db; daddrp < cdp->di_ib + NIADDR;
865 		     daddrp++) {
866 			if (*daddrp == UNWRITTEN) {
867 #ifdef DEBUG_LFS
868 				printf("lfs_writeinode: wiping UNWRITTEN\n");
869 #endif
870 				*daddrp = 0;
871 			}
872 		}
873 	}
874 
875 	if (ip->i_flag & IN_CLEANING)
876 		LFS_CLR_UINO(ip, IN_CLEANING);
877 	else {
878 		/* XXX IN_ALLMOD */
879 		LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE |
880 			     IN_UPDATE);
881 		if (ip->i_lfs_effnblks == ip->i_ffs_blocks)
882 			LFS_CLR_UINO(ip, IN_MODIFIED);
883 #ifdef DEBUG_LFS
884 		else
885 			printf("lfs_writeinode: ino %d: real blks=%d, "
886 			       "eff=%d\n", ip->i_number, ip->i_ffs_blocks,
887 			       ip->i_lfs_effnblks);
888 #endif
889 	}
890 
891 	if (ip->i_number == LFS_IFILE_INUM) /* We know sp->idp == NULL */
892 		sp->idp = ((struct dinode *)bp->b_data) +
893 			(sp->ninodes % INOPB(fs));
894 	if (gotblk) {
895 		LFS_LOCK_BUF(bp);
896 		brelse(bp);
897 	}
898 
899 	/* Increment inode count in segment summary block. */
900 	++((SEGSUM *)(sp->segsum))->ss_ninos;
901 
902 	/* If this page is full, set flag to allocate a new page. */
903 	if (++sp->ninodes % INOPB(fs) == 0)
904 		sp->ibp = NULL;
905 
906 	/*
907 	 * If updating the ifile, update the super-block.  Update the disk
908 	 * address and access times for this inode in the ifile.
909 	 */
910 	ino = ip->i_number;
911 	if (ino == LFS_IFILE_INUM) {
912 		daddr = fs->lfs_idaddr;
913 		fs->lfs_idaddr = dbtofsb(fs, bp->b_blkno);
914 	} else {
915 		LFS_IENTRY(ifp, fs, ino, ibp);
916 		daddr = ifp->if_daddr;
917 		ifp->if_daddr = dbtofsb(fs, bp->b_blkno) + fsb;
918 #ifdef LFS_DEBUG_NEXTFREE
919 		if (ino > 3 && ifp->if_nextfree) {
920 			vprint("lfs_writeinode",ITOV(ip));
921 			printf("lfs_writeinode: updating free ino %d\n",
922 				ip->i_number);
923 		}
924 #endif
925 		error = LFS_BWRITE_LOG(ibp); /* Ifile */
926 	}
927 
928 	/*
929 	 * Account the inode: it no longer belongs to its former segment,
930 	 * though it will not belong to the new segment until that segment
931 	 * is actually written.
932 	 */
933 #ifdef DEBUG
934 	/*
935 	 * The inode's last address should not be in the current partial
936 	 * segment, except under exceptional circumstances (lfs_writevnodes
937 	 * had to start over, and in the meantime more blocks were written
938 	 * to a vnode).  Although the previous inode won't be accounted in
939 	 * su_nbytes until lfs_writeseg, this shouldn't be a problem as we
940 	 * have more data blocks in the current partial segment.
941 	 */
942 	if (daddr >= fs->lfs_lastpseg && daddr <= dbtofsb(fs, bp->b_blkno))
943 		printf("lfs_writeinode: last inode addr in current pseg "
944 		       "(ino %d daddr 0x%x)\n", ino, daddr);
945 #endif
946 	if (daddr != LFS_UNUSED_DADDR) {
947 		LFS_SEGENTRY(sup, fs, dtosn(fs, daddr), bp);
948 #ifdef DIAGNOSTIC
949 		if (sup->su_nbytes < DINODE_SIZE) {
950 			printf("lfs_writeinode: negative bytes "
951 			       "(segment %d short by %d)\n",
952 			       dtosn(fs, daddr),
953 			       (int)DINODE_SIZE - sup->su_nbytes);
954 			panic("lfs_writeinode: negative bytes");
955 			sup->su_nbytes = DINODE_SIZE;
956 		}
957 #endif
958 #ifdef DEBUG_SU_NBYTES
959 		printf("seg %d -= %d for ino %d inode\n",
960 		       dtosn(fs, daddr), DINODE_SIZE, ino);
961 #endif
962 		sup->su_nbytes -= DINODE_SIZE;
963 		redo_ifile =
964 			(ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
965 		if (redo_ifile)
966 			fs->lfs_flags |= LFS_IFDIRTY;
967 		error = LFS_BWRITE_LOG(bp); /* Ifile */
968 	}
969 	return (redo_ifile);
970 }
971 
972 int
973 lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr)
974 {
975 	struct lfs *fs;
976 	int version;
977 
978 	/*
979 	 * If full, finish this segment.  We may be doing I/O, so
980 	 * release and reacquire the splbio().
981 	 */
982 #ifdef DIAGNOSTIC
983 	if (sp->vp == NULL)
984 		panic ("lfs_gatherblock: Null vp in segment");
985 #endif
986 	fs = sp->fs;
987 	if (sp->sum_bytes_left < sizeof(ufs_daddr_t) ||
988 	    sp->seg_bytes_left < bp->b_bcount) {
989 		if (sptr)
990 			splx(*sptr);
991 		lfs_updatemeta(sp);
992 
993 		version = sp->fip->fi_version;
994 		(void) lfs_writeseg(fs, sp);
995 
996 		sp->fip->fi_version = version;
997 		sp->fip->fi_ino = VTOI(sp->vp)->i_number;
998 		/* Add the current file to the segment summary. */
999 		++((SEGSUM *)(sp->segsum))->ss_nfinfo;
1000 		sp->sum_bytes_left -=
1001 			sizeof(struct finfo) - sizeof(ufs_daddr_t);
1002 
1003 		if (sptr)
1004 			*sptr = splbio();
1005 		return (1);
1006 	}
1007 
1008 #ifdef DEBUG
1009 	if (bp->b_flags & B_GATHERED) {
1010 		printf("lfs_gatherblock: already gathered! Ino %d, lbn %d\n",
1011 		       sp->fip->fi_ino, bp->b_lblkno);
1012 		return (0);
1013 	}
1014 #endif
1015 	/* Insert into the buffer list, update the FINFO block. */
1016 	bp->b_flags |= B_GATHERED;
1017 	bp->b_flags &= ~B_DONE;
1018 
1019 	*sp->cbpp++ = bp;
1020 	sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno;
1021 
1022 	sp->sum_bytes_left -= sizeof(ufs_daddr_t);
1023 	sp->seg_bytes_left -= bp->b_bcount;
1024 	return (0);
1025 }
1026 
1027 int
1028 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp, int (*match)(struct lfs *, struct buf *))
1029 {
1030 	struct buf *bp, *nbp;
1031 	int s, count = 0;
1032 
1033 	sp->vp = vp;
1034 	s = splbio();
1035 
1036 #ifndef LFS_NO_BACKBUF_HACK
1037 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
1038 # define	BUF_OFFSET	(((caddr_t)&LIST_NEXT(bp, b_vnbufs)) - (caddr_t)bp)
1039 # define	BACK_BUF(BP)	((struct buf *)(((caddr_t)(BP)->b_vnbufs.le_prev) - BUF_OFFSET))
1040 # define	BEG_OF_LIST	((struct buf *)(((caddr_t)&LIST_FIRST(&vp->v_dirtyblkhd)) - BUF_OFFSET))
1041 /* Find last buffer. */
1042 loop:	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp && LIST_NEXT(bp, b_vnbufs) != NULL;
1043 	    bp = LIST_NEXT(bp, b_vnbufs));
1044 	for (; bp && bp != BEG_OF_LIST; bp = nbp) {
1045 		nbp = BACK_BUF(bp);
1046 #else /* LFS_NO_BACKBUF_HACK */
1047 loop:	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1048 		nbp = LIST_NEXT(bp, b_vnbufs);
1049 #endif /* LFS_NO_BACKBUF_HACK */
1050 		if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp)) {
1051 #ifdef DEBUG_LFS
1052 			if (vp == fs->lfs_ivnode && (bp->b_flags & (B_BUSY|B_GATHERED)) == B_BUSY)
1053 				printf("(%d:%lx)", bp->b_lblkno, bp->b_flags);
1054 #endif
1055 			continue;
1056 		}
1057 		if (vp->v_type == VBLK) {
1058 			/* For block devices, just write the blocks. */
1059 			/* XXX Do we really need to even do this? */
1060 #ifdef DEBUG_LFS
1061 			if (count == 0)
1062 				printf("BLK(");
1063 			printf(".");
1064 #endif
1065 			/* Get the block before bwrite, so we don't corrupt the free list */
1066 			bp->b_flags |= B_BUSY;
1067 			bremfree(bp);
1068 			bwrite(bp);
1069 		} else {
1070 #ifdef DIAGNOSTIC
1071 			if ((bp->b_flags & (B_CALL|B_INVAL)) == B_INVAL) {
1072 				printf("lfs_gather: lbn %d is B_INVAL\n",
1073 					bp->b_lblkno);
1074 				VOP_PRINT(bp->b_vp);
1075 			}
1076 			if (!(bp->b_flags & B_DELWRI))
1077 				panic("lfs_gather: bp not B_DELWRI");
1078 			if (!(bp->b_flags & B_LOCKED)) {
1079 				printf("lfs_gather: lbn %d blk %d"
1080 				       " not B_LOCKED\n", bp->b_lblkno,
1081 				       dbtofsb(fs, bp->b_blkno));
1082 				VOP_PRINT(bp->b_vp);
1083 				panic("lfs_gather: bp not B_LOCKED");
1084 			}
1085 #endif
1086 			if (lfs_gatherblock(sp, bp, &s)) {
1087 				goto loop;
1088 			}
1089 		}
1090 		count++;
1091 	}
1092 	splx(s);
1093 #ifdef DEBUG_LFS
1094 	if (vp->v_type == VBLK && count)
1095 		printf(")\n");
1096 #endif
1097 	lfs_updatemeta(sp);
1098 	sp->vp = NULL;
1099 	return count;
1100 }
1101 
1102 /*
1103  * Update the metadata that points to the blocks listed in the FINFO
1104  * array.
1105  */
1106 void
1107 lfs_updatemeta(struct segment *sp)
1108 {
1109 	SEGUSE *sup;
1110 	struct buf *bp;
1111 	struct lfs *fs;
1112 	struct vnode *vp;
1113 	struct indir a[NIADDR + 2], *ap;
1114 	struct inode *ip;
1115 	ufs_daddr_t daddr, lbn, off;
1116 	daddr_t ooff;
1117 	int error, i, nblocks, num;
1118 	int bb;
1119 
1120 	vp = sp->vp;
1121 	nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
1122 	if (nblocks < 0)
1123 		panic("This is a bad thing\n");
1124 	if (vp == NULL || nblocks == 0)
1125 		return;
1126 
1127 	/* Sort the blocks. */
1128 	/*
1129 	 * XXX KS - We have to sort even if the blocks come from the
1130 	 * cleaner, because there might be other pending blocks on the
1131 	 * same inode...and if we don't sort, and there are fragments
1132 	 * present, blocks may be written in the wrong place.
1133 	 */
1134 	/* if (!(sp->seg_flags & SEGM_CLEAN)) */
1135 	lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks);
1136 
1137 	/*
1138 	 * Record the length of the last block in case it's a fragment.
1139 	 * If there are indirect blocks present, they sort last.  An
1140 	 * indirect block will be lfs_bsize and its presence indicates
1141 	 * that you cannot have fragments.
1142 	 */
1143 	sp->fip->fi_lastlength = sp->start_bpp[nblocks - 1]->b_bcount;
1144 
1145 	/*
1146 	 * Assign disk addresses, and update references to the logical
1147 	 * block and the segment usage information.
1148 	 */
1149 	fs = sp->fs;
1150 	for (i = nblocks; i--; ++sp->start_bpp) {
1151 		lbn = *sp->start_lbp++;
1152 
1153 		(*sp->start_bpp)->b_blkno = fsbtodb(fs, fs->lfs_offset);
1154 		off = fs->lfs_offset;
1155 		if ((*sp->start_bpp)->b_blkno == (*sp->start_bpp)->b_lblkno) {
1156 			printf("lfs_updatemeta: ino %d blk %d"
1157 			       " has same lbn and daddr\n",
1158 			       VTOI(vp)->i_number, off);
1159 		}
1160 #ifdef DIAGNOSTIC
1161 		if ((*sp->start_bpp)->b_bcount < fs->lfs_bsize && i != 0)
1162 			panic("lfs_updatemeta: fragment is not last block\n");
1163 #endif
1164 		bb = fragstofsb(fs, numfrags(fs, (*sp->start_bpp)->b_bcount));
1165 		fs->lfs_offset += bb;
1166 		error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL);
1167 		if (daddr > 0)
1168 			daddr = dbtofsb(fs, daddr);
1169 		if (error)
1170 			panic("lfs_updatemeta: ufs_bmaparray %d", error);
1171 		ip = VTOI(vp);
1172 		switch (num) {
1173 		case 0:
1174 			ooff = ip->i_ffs_db[lbn];
1175 #ifdef DEBUG
1176 			if (ooff == 0) {
1177 				printf("lfs_updatemeta[1]: warning: writing "
1178 				       "ino %d lbn %d at 0x%x, was 0x0\n",
1179 				       ip->i_number, lbn, off);
1180 			}
1181 #endif
1182 			if (ooff == UNWRITTEN)
1183 				ip->i_ffs_blocks += bb;
1184 			ip->i_ffs_db[lbn] = off;
1185 			break;
1186 		case 1:
1187 			ooff = ip->i_ffs_ib[a[0].in_off];
1188 #ifdef DEBUG
1189 			if (ooff == 0) {
1190 				printf("lfs_updatemeta[2]: warning: writing "
1191 				       "ino %d lbn %d at 0x%x, was 0x0\n",
1192 				       ip->i_number, lbn, off);
1193 			}
1194 #endif
1195 			if (ooff == UNWRITTEN)
1196 				ip->i_ffs_blocks += bb;
1197 			ip->i_ffs_ib[a[0].in_off] = off;
1198 			break;
1199 		default:
1200 			ap = &a[num - 1];
1201 			if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
1202 				panic("lfs_updatemeta: bread bno %d",
1203 				      ap->in_lbn);
1204 
1205 			ooff = ((ufs_daddr_t *)bp->b_data)[ap->in_off];
1206 #if DEBUG
1207 			if (ooff == 0) {
1208 				printf("lfs_updatemeta[3]: warning: writing "
1209 				       "ino %d lbn %d at 0x%x, was 0x0\n",
1210 				       ip->i_number, lbn, off);
1211 			}
1212 #endif
1213 			if (ooff == UNWRITTEN)
1214 				ip->i_ffs_blocks += bb;
1215 			((ufs_daddr_t *)bp->b_data)[ap->in_off] = off;
1216 			(void) VOP_BWRITE(bp);
1217 		}
1218 #ifdef DEBUG
1219 		if (daddr >= fs->lfs_lastpseg && daddr <= off) {
1220 			printf("lfs_updatemeta: ino %d, lbn %d, addr = %x "
1221 			       "in same pseg\n", VTOI(sp->vp)->i_number,
1222 			       (*sp->start_bpp)->b_lblkno, daddr);
1223 		}
1224 #endif
1225 		/* Update segment usage information. */
1226 		if (daddr > 0) {
1227 			LFS_SEGENTRY(sup, fs, dtosn(fs, daddr), bp);
1228 #ifdef DIAGNOSTIC
1229 			if (sup->su_nbytes < (*sp->start_bpp)->b_bcount) {
1230 				/* XXX -- Change to a panic. */
1231 				printf("lfs_updatemeta: negative bytes "
1232 				       "(segment %d short by %ld)\n",
1233 				       dtosn(fs, daddr),
1234 				       (*sp->start_bpp)->b_bcount -
1235 				       sup->su_nbytes);
1236 				printf("lfs_updatemeta: ino %d, lbn %d, "
1237 				       "addr = 0x%x\n", VTOI(sp->vp)->i_number,
1238 				       (*sp->start_bpp)->b_lblkno, daddr);
1239 				panic("lfs_updatemeta: negative bytes");
1240 				sup->su_nbytes = (*sp->start_bpp)->b_bcount;
1241 			}
1242 #endif
1243 #ifdef DEBUG_SU_NBYTES
1244 			printf("seg %d -= %ld for ino %d lbn %d db 0x%x\n",
1245 			       dtosn(fs, daddr), (*sp->start_bpp)->b_bcount,
1246 			       VTOI(sp->vp)->i_number,
1247 			       (*sp->start_bpp)->b_lblkno, daddr);
1248 #endif
1249 			sup->su_nbytes -= (*sp->start_bpp)->b_bcount;
1250 			if (!(bp->b_flags & B_GATHERED))
1251 				fs->lfs_flags |= LFS_IFDIRTY;
1252 			error = LFS_BWRITE_LOG(bp); /* Ifile */
1253 		}
1254 	}
1255 }
1256 
1257 /*
1258  * Start a new segment.
1259  */
1260 int
1261 lfs_initseg(struct lfs *fs)
1262 {
1263 	struct segment *sp;
1264 	SEGUSE *sup;
1265 	SEGSUM *ssp;
1266 	struct buf *bp, *sbp;
1267 	int repeat;
1268 
1269 	sp = fs->lfs_sp;
1270 
1271 	repeat = 0;
1272 	/* Advance to the next segment. */
1273 	if (!LFS_PARTIAL_FITS(fs)) {
1274 		/* lfs_avail eats the remaining space */
1275 		fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset -
1276 						   fs->lfs_curseg);
1277 		/* Wake up any cleaning procs waiting on this file system. */
1278 		wakeup(&lfs_allclean_wakeup);
1279 		wakeup(&fs->lfs_nextseg);
1280 		lfs_newseg(fs);
1281 		repeat = 1;
1282 		fs->lfs_offset = fs->lfs_curseg;
1283 		sp->seg_number = dtosn(fs, fs->lfs_curseg);
1284 		sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg);
1285 		/*
1286 		 * If the segment contains a superblock, update the offset
1287 		 * and summary address to skip over it.
1288 		 */
1289 		LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1290 		if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1291 			fs->lfs_offset += btofsb(fs, LFS_SBPAD);
1292 			sp->seg_bytes_left -= LFS_SBPAD;
1293 		}
1294 		brelse(bp);
1295 		/* Segment zero could also contain the labelpad */
1296 		if (fs->lfs_version > 1 && sp->seg_number == 0 &&
1297 		    fs->lfs_start < btofsb(fs, LFS_LABELPAD)) {
1298 			fs->lfs_offset += btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
1299 			sp->seg_bytes_left -= LFS_LABELPAD - fsbtob(fs, fs->lfs_start);
1300 		}
1301 	} else {
1302 		sp->seg_number = dtosn(fs, fs->lfs_curseg);
1303 		sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg -
1304 				      (fs->lfs_offset - fs->lfs_curseg));
1305 	}
1306 	fs->lfs_lastpseg = fs->lfs_offset;
1307 
1308 	sp->fs = fs;
1309 	sp->ibp = NULL;
1310 	sp->idp = NULL;
1311 	sp->ninodes = 0;
1312 
1313 	/* Get a new buffer for SEGSUM and enter it into the buffer list. */
1314 	sp->cbpp = sp->bpp;
1315 #ifdef LFS_MALLOC_SUMMARY
1316 	sbp = *sp->cbpp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
1317 				     fsbtodb(fs, fs->lfs_offset), fs->lfs_sumsize);
1318   	sp->segsum = (*sp->cbpp)->b_data;
1319 #else
1320 	sbp = *sp->cbpp = getblk(VTOI(fs->lfs_ivnode)->i_devvp,
1321 				 fsbtodb(fs, fs->lfs_offset), NBPG, 0, 0);
1322 	memset(sbp->b_data, 0x5a, NBPG);
1323 	sp->segsum = (*sp->cbpp)->b_data + NBPG - fs->lfs_sumsize;
1324 #endif
1325 	bzero(sp->segsum, fs->lfs_sumsize);
1326 	sp->start_bpp = ++sp->cbpp;
1327 	fs->lfs_offset += btofsb(fs, fs->lfs_sumsize);
1328 
1329 	/* Set point to SEGSUM, initialize it. */
1330 	ssp = sp->segsum;
1331 	ssp->ss_next = fs->lfs_nextseg;
1332 	ssp->ss_nfinfo = ssp->ss_ninos = 0;
1333 	ssp->ss_magic = SS_MAGIC;
1334 
1335 	/* Set pointer to first FINFO, initialize it. */
1336 	sp->fip = (struct finfo *)((caddr_t)sp->segsum + SEGSUM_SIZE(fs));
1337 	sp->fip->fi_nblocks = 0;
1338 	sp->start_lbp = &sp->fip->fi_blocks[0];
1339 	sp->fip->fi_lastlength = 0;
1340 
1341 	sp->seg_bytes_left -= fs->lfs_sumsize;
1342 	sp->sum_bytes_left = fs->lfs_sumsize - SEGSUM_SIZE(fs);
1343 
1344 #ifndef LFS_MALLOC_SUMMARY
1345 	LFS_LOCK_BUF(sbp);
1346 	brelse(sbp);
1347 #endif
1348 	return (repeat);
1349 }
1350 
1351 /*
1352  * Return the next segment to write.
1353  */
1354 void
1355 lfs_newseg(struct lfs *fs)
1356 {
1357 	CLEANERINFO *cip;
1358 	SEGUSE *sup;
1359 	struct buf *bp;
1360 	int curseg, isdirty, sn;
1361 
1362 	LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
1363 #ifdef DEBUG_SU_NBYTES
1364 	printf("lfs_newseg: seg %d := 0 in newseg\n",   /* XXXDEBUG */
1365 	       dtosn(fs, fs->lfs_nextseg)); /* XXXDEBUG */
1366 #endif
1367 	sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1368 	sup->su_nbytes = 0;
1369 	sup->su_nsums = 0;
1370 	sup->su_ninos = 0;
1371 	(void) LFS_BWRITE_LOG(bp); /* Ifile */
1372 
1373 	LFS_CLEANERINFO(cip, fs, bp);
1374 	--cip->clean;
1375 	++cip->dirty;
1376 	fs->lfs_nclean = cip->clean;
1377 	LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1378 
1379 	fs->lfs_lastseg = fs->lfs_curseg;
1380 	fs->lfs_curseg = fs->lfs_nextseg;
1381 	for (sn = curseg = dtosn(fs, fs->lfs_curseg) + fs->lfs_interleave;;) {
1382 		sn = (sn + 1) % fs->lfs_nseg;
1383 		if (sn == curseg)
1384 			panic("lfs_nextseg: no clean segments");
1385 		LFS_SEGENTRY(sup, fs, sn, bp);
1386 		isdirty = sup->su_flags & SEGUSE_DIRTY;
1387 		brelse(bp);
1388 		if (!isdirty)
1389 			break;
1390 	}
1391 
1392 	++fs->lfs_nactive;
1393 	fs->lfs_nextseg = sntod(fs, sn);
1394 	if (lfs_dostats) {
1395 		++lfs_stats.segsused;
1396 	}
1397 }
1398 
1399 static struct buf **
1400 lookahead_pagemove(struct buf **bpp, int nblocks, size_t *size)
1401 {
1402 	size_t maxsize;
1403 #ifndef LFS_NO_PAGEMOVE
1404 	struct buf *bp;
1405 #endif
1406 
1407 	maxsize = *size;
1408 	*size = 0;
1409 #ifdef LFS_NO_PAGEMOVE
1410 	return bpp;
1411 #else
1412 	while((bp = *bpp) != NULL && *size < maxsize && nblocks--) {
1413 		if(bp->b_flags & B_CALL)
1414 			return bpp;
1415 		if(bp->b_bcount % NBPG)
1416 			return bpp;
1417 		*size += bp->b_bcount;
1418 		++bpp;
1419 	}
1420 	return NULL;
1421 #endif
1422 }
1423 
1424 #define BQUEUES 4 /* XXX */
1425 #define BQ_EMPTY 3 /* XXX */
1426 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
1427 
1428 #define	BUFHASH(dvp, lbn)	\
1429 	(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
1430 extern LIST_HEAD(bufhashhdr, buf) invalhash;
1431 /*
1432  * Insq/Remq for the buffer hash lists.
1433  */
1434 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
1435 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
1436 
1437 static struct buf *
1438 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr, int n)
1439 {
1440 	struct lfs_cluster *cl;
1441 	struct buf **bpp, *bp;
1442 	int s;
1443 
1444 	cl = (struct lfs_cluster *)malloc(sizeof(*cl), M_SEGMENT, M_WAITOK);
1445 	bpp = (struct buf **)malloc(n*sizeof(*bpp), M_SEGMENT, M_WAITOK);
1446 	memset(cl,0,sizeof(*cl));
1447 	cl->fs = fs;
1448 	cl->bpp = bpp;
1449 	cl->bufcount = 0;
1450 	cl->bufsize = 0;
1451 
1452 	/* Get an empty buffer header, or maybe one with something on it */
1453 	s = splbio();
1454 	if((bp = bufqueues[BQ_EMPTY].tqh_first) != NULL) {
1455 		bremfree(bp);
1456 		/* clear out various other fields */
1457 		bp->b_flags = B_BUSY;
1458 		bp->b_dev = NODEV;
1459 		bp->b_blkno = bp->b_lblkno = 0;
1460 		bp->b_error = 0;
1461 		bp->b_resid = 0;
1462 		bp->b_bcount = 0;
1463 
1464 		/* nuke any credentials we were holding */
1465 		/* XXXXXX */
1466 
1467 		bremhash(bp);
1468 
1469 		/* disassociate us from our vnode, if we had one... */
1470 		if (bp->b_vp)
1471 			brelvp(bp);
1472 	}
1473 	splx(s);
1474 	while (!bp)
1475 		bp = getnewbuf(0, 0);
1476 	s = splbio();
1477 	bgetvp(vp, bp);
1478 	binshash(bp,&invalhash);
1479 	splx(s);
1480 	bp->b_bcount = 0;
1481 	bp->b_blkno = bp->b_lblkno = addr;
1482 
1483 	bp->b_flags |= B_CALL;
1484 	bp->b_iodone = lfs_cluster_callback;
1485 	cl->saveaddr = bp->b_saveaddr; /* XXX is this ever used? */
1486 	bp->b_saveaddr = (caddr_t)cl;
1487 
1488 	return bp;
1489 }
1490 
1491 int
1492 lfs_writeseg(struct lfs *fs, struct segment *sp)
1493 {
1494 	struct buf **bpp, *bp, *cbp, *newbp, **pmlastbpp;
1495 	SEGUSE *sup;
1496 	SEGSUM *ssp;
1497 	dev_t i_dev;
1498 	char *datap, *dp;
1499 	int do_again, i, nblocks, s;
1500 	size_t el_size;
1501  	struct lfs_cluster *cl;
1502 	int (*strategy)(void *);
1503 	struct vop_strategy_args vop_strategy_a;
1504 	u_short ninos;
1505 	struct vnode *devvp;
1506 	char *p;
1507 	struct vnode *vp;
1508 	struct inode *ip;
1509 	size_t pmsize;
1510 	int use_pagemove;
1511 	daddr_t pseg_daddr;
1512 	daddr_t *daddrp;
1513 	int changed;
1514 #if defined(DEBUG) && defined(LFS_PROPELLER)
1515 	static int propeller;
1516 	char propstring[4] = "-\\|/";
1517 
1518 	printf("%c\b",propstring[propeller++]);
1519 	if (propeller == 4)
1520 		propeller = 0;
1521 #endif
1522 	pseg_daddr = (*(sp->bpp))->b_blkno;
1523 
1524 	/*
1525 	 * If there are no buffers other than the segment summary to write
1526 	 * and it is not a checkpoint, don't do anything.  On a checkpoint,
1527 	 * even if there aren't any buffers, you need to write the superblock.
1528 	 */
1529 	if ((nblocks = sp->cbpp - sp->bpp) == 1)
1530 		return (0);
1531 
1532 	i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1533 	devvp = VTOI(fs->lfs_ivnode)->i_devvp;
1534 
1535 	/* Update the segment usage information. */
1536 	LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1537 
1538 	/* Loop through all blocks, except the segment summary. */
1539 	for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
1540 		if ((*bpp)->b_vp != devvp) {
1541 			sup->su_nbytes += (*bpp)->b_bcount;
1542 #ifdef DEBUG_SU_NBYTES
1543 		printf("seg %d += %ld for ino %d lbn %d db 0x%x\n",
1544 		       sp->seg_number, (*bpp)->b_bcount,
1545 		       VTOI((*bpp)->b_vp)->i_number,
1546 		       (*bpp)->b_lblkno, (*bpp)->b_blkno);
1547 #endif
1548 		}
1549 	}
1550 
1551 	ssp = (SEGSUM *)sp->segsum;
1552 
1553 	ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
1554 #ifdef DEBUG_SU_NBYTES
1555 	printf("seg %d += %d for %d inodes\n",   /* XXXDEBUG */
1556 	       sp->seg_number, ssp->ss_ninos * DINODE_SIZE,
1557 	       ssp->ss_ninos);
1558 #endif
1559 	sup->su_nbytes += ssp->ss_ninos * DINODE_SIZE;
1560 	/* sup->su_nbytes += fs->lfs_sumsize; */
1561 	if (fs->lfs_version == 1)
1562 		sup->su_olastmod = time.tv_sec;
1563 	else
1564 		sup->su_lastmod = time.tv_sec;
1565 	sup->su_ninos += ninos;
1566 	++sup->su_nsums;
1567 	fs->lfs_dmeta += (btofsb(fs, fs->lfs_sumsize) + btofsb(fs, ninos *
1568 							 fs->lfs_ibsize));
1569 	fs->lfs_avail -= btofsb(fs, fs->lfs_sumsize);
1570 
1571 	do_again = !(bp->b_flags & B_GATHERED);
1572 	(void)LFS_BWRITE_LOG(bp); /* Ifile */
1573 	/*
1574 	 * Mark blocks B_BUSY, to prevent then from being changed between
1575 	 * the checksum computation and the actual write.
1576 	 *
1577 	 * If we are cleaning, check indirect blocks for UNWRITTEN, and if
1578 	 * there are any, replace them with copies that have UNASSIGNED
1579 	 * instead.
1580 	 */
1581 	for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1582 		++bpp;
1583 		if ((*bpp)->b_flags & B_CALL)
1584 			continue;
1585 		bp = *bpp;
1586 	    again:
1587 		s = splbio();
1588 		if (bp->b_flags & B_BUSY) {
1589 #ifdef DEBUG
1590 			printf("lfs_writeseg: avoiding potential data "
1591 			       "summary corruption for ino %d, lbn %d\n",
1592 			       VTOI(bp->b_vp)->i_number, bp->b_lblkno);
1593 #endif
1594 			bp->b_flags |= B_WANTED;
1595 			tsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0);
1596 			splx(s);
1597 			goto again;
1598 		}
1599 		bp->b_flags |= B_BUSY;
1600 		splx(s);
1601 		/* Check and replace indirect block UNWRITTEN bogosity */
1602 		if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp &&
1603 		   VTOI(bp->b_vp)->i_ffs_blocks !=
1604 		   VTOI(bp->b_vp)->i_lfs_effnblks) {
1605 #ifdef DEBUG_LFS
1606 			printf("lfs_writeseg: cleansing ino %d (%d != %d)\n",
1607 			       VTOI(bp->b_vp)->i_number,
1608 			       VTOI(bp->b_vp)->i_lfs_effnblks,
1609 			       VTOI(bp->b_vp)->i_ffs_blocks);
1610 #endif
1611 			/* Make a copy we'll make changes to */
1612 			newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno,
1613 					   bp->b_bcount);
1614 			newbp->b_blkno = bp->b_blkno;
1615 			memcpy(newbp->b_data, bp->b_data,
1616 			       newbp->b_bcount);
1617 			*bpp = newbp;
1618 
1619 			changed = 0;
1620 			for (daddrp = (daddr_t *)(newbp->b_data);
1621 			     daddrp < (daddr_t *)(newbp->b_data +
1622 						  newbp->b_bcount); daddrp++) {
1623 				if (*daddrp == UNWRITTEN) {
1624 					++changed;
1625 #ifdef DEBUG_LFS
1626 					printf("lfs_writeseg: replacing UNWRITTEN\n");
1627 #endif
1628 					*daddrp = 0;
1629 				}
1630 			}
1631 			/*
1632 			 * Get rid of the old buffer.  Don't mark it clean,
1633 			 * though, if it still has dirty data on it.
1634 			 */
1635 			if (changed) {
1636 				bp->b_flags &= ~(B_ERROR | B_GATHERED);
1637 				if (bp->b_flags & B_CALL) {
1638 					lfs_freebuf(bp);
1639 					bp = NULL;
1640 				} else {
1641 					/* Still on free list, leave it there */
1642 					s = splbio();
1643 					bp->b_flags &= ~B_BUSY;
1644 					if (bp->b_flags & B_WANTED)
1645 						wakeup(bp);
1646 				 	splx(s);
1647 					/*
1648 					 * We have to re-decrement lfs_avail
1649 					 * since this block is going to come
1650 					 * back around to us in the next
1651 					 * segment.
1652 					 */
1653 					fs->lfs_avail -= btofsb(fs, bp->b_bcount);
1654 				}
1655 			} else {
1656 				bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
1657 						 B_GATHERED);
1658 				if (bp->b_flags & B_CALL) {
1659 					lfs_freebuf(bp);
1660 					bp = NULL;
1661 				} else {
1662 					bremfree(bp);
1663 					bp->b_flags |= B_DONE;
1664 					s = splbio();
1665 					reassignbuf(bp, bp->b_vp);
1666 					splx(s);
1667 					LFS_UNLOCK_BUF(bp);
1668 					brelse(bp);
1669 				}
1670 			}
1671 
1672 		}
1673 	}
1674 	/*
1675 	 * Compute checksum across data and then across summary; the first
1676 	 * block (the summary block) is skipped.  Set the create time here
1677 	 * so that it's guaranteed to be later than the inode mod times.
1678 	 *
1679 	 * XXX
1680 	 * Fix this to do it inline, instead of malloc/copy.
1681 	 */
1682 	if (fs->lfs_version == 1)
1683 		el_size = sizeof(u_long);
1684 	else
1685 		el_size = sizeof(u_int32_t);
1686 	datap = dp = malloc(nblocks * el_size, M_SEGMENT, M_WAITOK);
1687 	for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1688 		if (((*++bpp)->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1689 			if (copyin((*bpp)->b_saveaddr, dp, el_size))
1690 				panic("lfs_writeseg: copyin failed [1]: "
1691 				      "ino %d blk %d",
1692 				      VTOI((*bpp)->b_vp)->i_number,
1693 				      (*bpp)->b_lblkno);
1694 		} else
1695 			memcpy(dp, (*bpp)->b_data, el_size);
1696 		dp += el_size;
1697 	}
1698 	if (fs->lfs_version == 1)
1699 		ssp->ss_ocreate = time.tv_sec;
1700 	else {
1701 		ssp->ss_create = time.tv_sec;
1702 		ssp->ss_serial = ++fs->lfs_serial;
1703 		ssp->ss_ident  = fs->lfs_ident;
1704 	}
1705 #ifndef LFS_MALLOC_SUMMARY
1706 	/* Set the summary block busy too */
1707 	(*(sp->bpp))->b_flags |= B_BUSY;
1708 #endif
1709 	ssp->ss_datasum = cksum(datap, (nblocks - 1) * el_size);
1710 	ssp->ss_sumsum =
1711 	    cksum(&ssp->ss_datasum, fs->lfs_sumsize - sizeof(ssp->ss_sumsum));
1712 	free(datap, M_SEGMENT);
1713 	datap = dp = NULL;
1714 #ifdef DIAGNOSTIC
1715 	if (fs->lfs_bfree < btofsb(fs, ninos * fs->lfs_ibsize) + btofsb(fs, fs->lfs_sumsize))
1716 		panic("lfs_writeseg: No diskspace for summary");
1717 #endif
1718 	fs->lfs_bfree -= (btofsb(fs, ninos * fs->lfs_ibsize) +
1719 			  btofsb(fs, fs->lfs_sumsize));
1720 
1721 	strategy = devvp->v_op[VOFFSET(vop_strategy)];
1722 
1723 	/*
1724   	 * When we simply write the blocks we lose a rotation for every block
1725 	 * written.  To avoid this problem, we use pagemove to cluster
1726 	 * the buffers into a chunk and write the chunk.  CHUNKSIZE is the
1727   	 * largest size I/O devices can handle.
1728   	 *
1729 	 * XXX - right now MAXPHYS is only 64k; could it be larger?
1730 	 */
1731 
1732 #define CHUNKSIZE MAXPHYS
1733 
1734 	if (devvp == NULL)
1735 		panic("devvp is NULL");
1736 	for (bpp = sp->bpp, i = nblocks; i;) {
1737 		cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i);
1738 		cl = (struct lfs_cluster *)cbp->b_saveaddr;
1739 
1740 		cbp->b_dev = i_dev;
1741 		cbp->b_flags |= B_ASYNC | B_BUSY;
1742 		cbp->b_bcount = 0;
1743 
1744 		/*
1745 		 * Find out if we can use pagemove to build the cluster,
1746 		 * or if we are stuck using malloc/copy.  If this is the
1747 		 * first cluster, set the shift flag (see below).
1748 		 */
1749 		pmsize = CHUNKSIZE;
1750 		use_pagemove = 0;
1751 		if(bpp == sp->bpp) {
1752 			/* Summary blocks have to get special treatment */
1753 			pmlastbpp = lookahead_pagemove(bpp + 1, i - 1, &pmsize);
1754 			if(pmsize >= CHUNKSIZE - fs->lfs_sumsize ||
1755 			   pmlastbpp == NULL) {
1756 				use_pagemove = 1;
1757 				cl->flags |= LFS_CL_SHIFT;
1758 			} else {
1759 				/*
1760 				 * If we're not using pagemove, we have
1761 				 * to copy the summary down to the bottom
1762 				 * end of the block.
1763 				 */
1764 #ifndef LFS_MALLOC_SUMMARY
1765 				memcpy((*bpp)->b_data, (*bpp)->b_data +
1766 				       NBPG - fs->lfs_sumsize,
1767 				       fs->lfs_sumsize);
1768 #endif /* LFS_MALLOC_SUMMARY */
1769 			}
1770 		} else {
1771 			pmlastbpp = lookahead_pagemove(bpp, i, &pmsize);
1772 			if(pmsize >= CHUNKSIZE || pmlastbpp == NULL) {
1773 				use_pagemove = 1;
1774 			}
1775 		}
1776 		if(use_pagemove == 0) {
1777 			cl->flags |= LFS_CL_MALLOC;
1778 			cl->olddata = cbp->b_data;
1779 			cbp->b_data = malloc(CHUNKSIZE, M_SEGMENT, M_WAITOK);
1780 		}
1781 #if defined(DEBUG) && defined(DIAGNOSTIC)
1782 		if(dtosn(fs, dbtofsb(fs, (*bpp)->b_blkno + btodb((*bpp)->b_bcount - 1))) !=
1783 		   dtosn(fs, dbtofsb(fs, cbp->b_blkno))) {
1784 			printf("block at %x (%d), cbp at %x (%d)\n",
1785 				(*bpp)->b_blkno, dtosn(fs, dbtofsb(fs, (*bpp)->b_blkno)),
1786 			       cbp->b_blkno, dtosn(fs, dbtofsb(fs, cbp->b_blkno)));
1787 			panic("lfs_writeseg: Segment overwrite");
1788 		}
1789 #endif
1790 
1791 		/*
1792 		 * Construct the cluster.
1793 		 */
1794 		s = splbio();
1795 		while (fs->lfs_iocount >= LFS_THROTTLE) {
1796 #ifdef DEBUG_LFS
1797 			printf("[%d]", fs->lfs_iocount);
1798 #endif
1799 			tsleep(&fs->lfs_iocount, PRIBIO+1, "lfs_throttle", 0);
1800 		}
1801 		++fs->lfs_iocount;
1802 
1803 		for (p = cbp->b_data; i && cbp->b_bcount < CHUNKSIZE; i--) {
1804 			bp = *bpp;
1805 
1806 			if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
1807 				break;
1808 
1809 			/*
1810 			 * Fake buffers from the cleaner are marked as B_INVAL.
1811 			 * We need to copy the data from user space rather than
1812 			 * from the buffer indicated.
1813 			 * XXX == what do I do on an error?
1814 			 */
1815 			if ((bp->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1816 				if (copyin(bp->b_saveaddr, p, bp->b_bcount))
1817 					panic("lfs_writeseg: copyin failed [2]");
1818 			} else if (use_pagemove) {
1819 				pagemove(bp->b_data, p, bp->b_bcount);
1820 				cbp->b_bufsize += bp->b_bcount;
1821 				bp->b_bufsize -= bp->b_bcount;
1822   			} else {
1823 				bcopy(bp->b_data, p, bp->b_bcount);
1824 				/* printf("copy in %p\n", bp->b_data); */
1825   			}
1826 
1827 			/*
1828 			 * XXX If we are *not* shifting, the summary
1829 			 * block is only fs->lfs_sumsize.  Otherwise,
1830 			 * it is NBPG but shifted.
1831 			 */
1832 			if(bpp == sp->bpp && !(cl->flags & LFS_CL_SHIFT)) {
1833 				p += fs->lfs_sumsize;
1834 				cbp->b_bcount += fs->lfs_sumsize;
1835 				cl->bufsize += fs->lfs_sumsize;
1836 			} else {
1837 				p += bp->b_bcount;
1838 				cbp->b_bcount += bp->b_bcount;
1839 				cl->bufsize += bp->b_bcount;
1840 			}
1841 			bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | B_DONE);
1842 			cl->bpp[cl->bufcount++] = bp;
1843 			vp = bp->b_vp;
1844 			++vp->v_numoutput;
1845 
1846 			/*
1847 			 * Although it cannot be freed for reuse before the
1848 			 * cluster is written to disk, this buffer does not
1849 			 * need to be held busy.  Therefore we unbusy it,
1850 			 * while leaving it on the locked list.  It will
1851 			 * be freed or requeued by the callback depending
1852 			 * on whether it has had B_DELWRI set again in the
1853 			 * meantime.
1854 			 *
1855 			 * If we are using pagemove, we have to hold the block
1856 			 * busy to prevent its contents from changing before
1857 			 * it hits the disk, and invalidating the checksum.
1858 			 */
1859 			bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
1860 #ifdef LFS_MNOBUSY
1861 			if (cl->flags & LFS_CL_MALLOC) {
1862 				if (!(bp->b_flags & B_CALL))
1863 					brelse(bp); /* Still B_LOCKED */
1864 			}
1865 #endif
1866 			bpp++;
1867 
1868 			/*
1869 			 * If this is the last block for this vnode, but
1870 			 * there are other blocks on its dirty list,
1871 			 * set IN_MODIFIED/IN_CLEANING depending on what
1872 			 * sort of block.  Only do this for our mount point,
1873 			 * not for, e.g., inode blocks that are attached to
1874 			 * the devvp.
1875 			 * XXX KS - Shouldn't we set *both* if both types
1876 			 * of blocks are present (traverse the dirty list?)
1877 			 */
1878 			if ((i == 1 ||
1879 			     (i > 1 && vp && *bpp && (*bpp)->b_vp != vp)) &&
1880 			    (bp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL &&
1881 			    vp->v_mount == fs->lfs_ivnode->v_mount)
1882   			{
1883 				ip = VTOI(vp);
1884 #ifdef DEBUG_LFS
1885 				printf("lfs_writeseg: marking ino %d\n",
1886 				       ip->i_number);
1887 #endif
1888 				if (bp->b_flags & B_CALL)
1889 					LFS_SET_UINO(ip, IN_CLEANING);
1890 				else
1891 					LFS_SET_UINO(ip, IN_MODIFIED);
1892 			}
1893 			wakeup(vp);
1894 		}
1895 		++cbp->b_vp->v_numoutput;
1896 		splx(s);
1897 		/*
1898 		 * In order to include the summary in a clustered block,
1899 		 * it may be necessary to shift the block forward (since
1900 		 * summary blocks are in generay smaller than can be
1901 		 * addressed by pagemove().  After the write, the block
1902 		 * will be corrected before disassembly.
1903 		 */
1904 		if(cl->flags & LFS_CL_SHIFT) {
1905 			cbp->b_data += (NBPG - fs->lfs_sumsize);
1906 			cbp->b_bcount -= (NBPG - fs->lfs_sumsize);
1907 		}
1908 		vop_strategy_a.a_desc = VDESC(vop_strategy);
1909 		vop_strategy_a.a_bp = cbp;
1910 		(strategy)(&vop_strategy_a);
1911 	}
1912 
1913 	if (lfs_dostats) {
1914 		++lfs_stats.psegwrites;
1915 		lfs_stats.blocktot += nblocks - 1;
1916 		if (fs->lfs_sp->seg_flags & SEGM_SYNC)
1917 			++lfs_stats.psyncwrites;
1918 		if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
1919 			++lfs_stats.pcleanwrites;
1920 			lfs_stats.cleanblocks += nblocks - 1;
1921 		}
1922 	}
1923 	return (lfs_initseg(fs) || do_again);
1924 }
1925 
1926 void
1927 lfs_writesuper(struct lfs *fs, daddr_t daddr)
1928 {
1929 	struct buf *bp;
1930 	dev_t i_dev;
1931 	int (*strategy)(void *);
1932 	int s;
1933 	struct vop_strategy_args vop_strategy_a;
1934 
1935 	/*
1936 	 * If we can write one superblock while another is in
1937 	 * progress, we risk not having a complete checkpoint if we crash.
1938 	 * So, block here if a superblock write is in progress.
1939 	 */
1940 	s = splbio();
1941 	while (fs->lfs_sbactive) {
1942 		tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
1943 	}
1944 	fs->lfs_sbactive = daddr;
1945 	splx(s);
1946 	i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1947 	strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)];
1948 
1949 	/* Set timestamp of this version of the superblock */
1950 	if (fs->lfs_version == 1)
1951 		fs->lfs_otstamp = time.tv_sec;
1952 	fs->lfs_tstamp = time.tv_sec;
1953 
1954 	/* Checksum the superblock and copy it into a buffer. */
1955 	fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
1956 	bp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp, fsbtodb(fs, daddr), LFS_SBPAD);
1957 	*(struct dlfs *)bp->b_data = fs->lfs_dlfs;
1958 
1959 	bp->b_dev = i_dev;
1960 	bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
1961 	bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
1962 	bp->b_iodone = lfs_supercallback;
1963 	/* XXX KS - same nasty hack as above */
1964 	bp->b_saveaddr = (caddr_t)fs;
1965 
1966 	vop_strategy_a.a_desc = VDESC(vop_strategy);
1967 	vop_strategy_a.a_bp = bp;
1968 	s = splbio();
1969 	++bp->b_vp->v_numoutput;
1970 	++fs->lfs_iocount;
1971 	splx(s);
1972 	(strategy)(&vop_strategy_a);
1973 }
1974 
1975 /*
1976  * Logical block number match routines used when traversing the dirty block
1977  * chain.
1978  */
1979 int
1980 lfs_match_fake(struct lfs *fs, struct buf *bp)
1981 {
1982 	return (bp->b_flags & B_CALL);
1983 }
1984 
1985 int
1986 lfs_match_data(struct lfs *fs, struct buf *bp)
1987 {
1988 	return (bp->b_lblkno >= 0);
1989 }
1990 
1991 int
1992 lfs_match_indir(struct lfs *fs, struct buf *bp)
1993 {
1994 	int lbn;
1995 
1996 	lbn = bp->b_lblkno;
1997 	return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
1998 }
1999 
2000 int
2001 lfs_match_dindir(struct lfs *fs, struct buf *bp)
2002 {
2003 	int lbn;
2004 
2005 	lbn = bp->b_lblkno;
2006 	return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
2007 }
2008 
2009 int
2010 lfs_match_tindir(struct lfs *fs, struct buf *bp)
2011 {
2012 	int lbn;
2013 
2014 	lbn = bp->b_lblkno;
2015 	return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
2016 }
2017 
2018 /*
2019  * XXX - The only buffers that are going to hit these functions are the
2020  * segment write blocks, or the segment summaries, or the superblocks.
2021  *
2022  * All of the above are created by lfs_newbuf, and so do not need to be
2023  * released via brelse.
2024  */
2025 void
2026 lfs_callback(struct buf *bp)
2027 {
2028 	/* struct lfs *fs; */
2029 	/* fs = (struct lfs *)bp->b_saveaddr; */
2030 	lfs_freebuf(bp);
2031 }
2032 
2033 void
2034 lfs_supercallback(struct buf *bp)
2035 {
2036 	struct lfs *fs;
2037 
2038 	fs = (struct lfs *)bp->b_saveaddr;
2039 	fs->lfs_sbactive = 0;
2040 	wakeup(&fs->lfs_sbactive);
2041 	if (--fs->lfs_iocount < LFS_THROTTLE)
2042 		wakeup(&fs->lfs_iocount);
2043 	lfs_freebuf(bp);
2044 }
2045 
2046 static void
2047 lfs_cluster_callback(struct buf *bp)
2048 {
2049 	struct lfs_cluster *cl;
2050 	struct lfs *fs;
2051 	struct buf *tbp;
2052 	struct vnode *vp;
2053 	int error=0;
2054 	char *cp;
2055 	extern int locked_queue_count;
2056 	extern long locked_queue_bytes;
2057 
2058 	if(bp->b_flags & B_ERROR)
2059 		error = bp->b_error;
2060 
2061 	cl = (struct lfs_cluster *)bp->b_saveaddr;
2062 	fs = cl->fs;
2063 	bp->b_saveaddr = cl->saveaddr;
2064 
2065 	/* If shifted, shift back now */
2066 	if(cl->flags & LFS_CL_SHIFT) {
2067 		bp->b_data -= (NBPG - fs->lfs_sumsize);
2068 		bp->b_bcount += (NBPG - fs->lfs_sumsize);
2069 	}
2070 
2071 	cp = (char *)bp->b_data + cl->bufsize;
2072 	/* Put the pages back, and release the buffer */
2073 	while(cl->bufcount--) {
2074 		tbp = cl->bpp[cl->bufcount];
2075 		if(!(cl->flags & LFS_CL_MALLOC)) {
2076 			cp -= tbp->b_bcount;
2077 			printf("pm(%p,%p,%lx)",cp,tbp->b_data,tbp->b_bcount);
2078 			pagemove(cp, tbp->b_data, tbp->b_bcount);
2079 			bp->b_bufsize -= tbp->b_bcount;
2080 			tbp->b_bufsize += tbp->b_bcount;
2081 		}
2082 		if(error) {
2083 			tbp->b_flags |= B_ERROR;
2084 			tbp->b_error = error;
2085 		}
2086 
2087 		/*
2088 		 * We're done with tbp.  If it has not been re-dirtied since
2089 		 * the cluster was written, free it.  Otherwise, keep it on
2090 		 * the locked list to be written again.
2091 		 */
2092 		if ((tbp->b_flags & (B_LOCKED | B_DELWRI)) == B_LOCKED)
2093 			LFS_UNLOCK_BUF(tbp);
2094 		tbp->b_flags &= ~B_GATHERED;
2095 
2096 		LFS_BCLEAN_LOG(fs, tbp);
2097 
2098 		vp = tbp->b_vp;
2099 		/* Segment summary for a shifted cluster */
2100 		if(!cl->bufcount && (cl->flags & LFS_CL_SHIFT))
2101 			tbp->b_flags |= B_INVAL;
2102 		if(!(tbp->b_flags & B_CALL)) {
2103 			bremfree(tbp);
2104 			if(vp)
2105 				reassignbuf(tbp, vp);
2106 			tbp->b_flags |= B_ASYNC; /* for biodone */
2107 		}
2108 #ifdef DIAGNOSTIC
2109 		if (tbp->b_flags & B_DONE) {
2110 			printf("blk %d biodone already (flags %lx)\n",
2111 				cl->bufcount, (long)tbp->b_flags);
2112 		}
2113 #endif
2114 		if (tbp->b_flags & (B_BUSY | B_CALL)) {
2115 			/*
2116 			 * Prevent vp from being moved between hold list
2117 			 * and free list by giving it an extra hold,
2118 			 * and then inline HOLDRELE, minus the TAILQ
2119 			 * manipulation.
2120 			 *
2121 			 * lfs_vunref() will put the vnode back on the
2122 			 * appropriate free list the next time it is
2123 			 * called (in thread context).
2124 			 */
2125 			if (vp)
2126 				VHOLD(vp);
2127 			biodone(tbp);
2128 			if (vp) {
2129         			simple_lock(&vp->v_interlock);
2130         			if (vp->v_holdcnt <= 0)
2131                 			panic("lfs_cluster_callback: "
2132 						"holdcnt vp %p", vp);
2133         			vp->v_holdcnt--;
2134         			simple_unlock(&vp->v_interlock);
2135 			}
2136 		}
2137 	}
2138 
2139 	/* Fix up the cluster buffer, and release it */
2140 	if(!(cl->flags & LFS_CL_MALLOC) && bp->b_bufsize) {
2141 		printf("PM(%p,%p,%lx)", (char *)bp->b_data + bp->b_bcount,
2142 			 (char *)bp->b_data, bp->b_bufsize);
2143 		pagemove((char *)bp->b_data + bp->b_bcount,
2144 			 (char *)bp->b_data, bp->b_bufsize);
2145 	}
2146 	if(cl->flags & LFS_CL_MALLOC) {
2147 		free(bp->b_data, M_SEGMENT);
2148 		bp->b_data = cl->olddata;
2149 	}
2150 	bp->b_bcount = 0;
2151 	bp->b_iodone = NULL;
2152 	bp->b_flags &= ~B_DELWRI;
2153 	bp->b_flags |= B_DONE;
2154 	reassignbuf(bp, bp->b_vp);
2155 	brelse(bp);
2156 
2157 	free(cl->bpp, M_SEGMENT);
2158 	free(cl, M_SEGMENT);
2159 
2160 #ifdef DIAGNOSTIC
2161 	if (fs->lfs_iocount == 0)
2162 		panic("lfs_callback: zero iocount\n");
2163 #endif
2164 	if (--fs->lfs_iocount < LFS_THROTTLE)
2165 		wakeup(&fs->lfs_iocount);
2166 #if 0
2167 	if (fs->lfs_iocount == 0) {
2168 		/*
2169 		 * XXX - do we really want to do this in a callback?
2170 		 *
2171 		 * Vinvalbuf can move locked buffers off the locked queue
2172 		 * and we have no way of knowing about this.  So, after
2173 		 * doing a big write, we recalculate how many buffers are
2174 		 * really still left on the locked queue.
2175 		 */
2176 		lfs_countlocked(&locked_queue_count, &locked_queue_bytes, "lfs_cluster_callback");
2177 		wakeup(&locked_queue_count);
2178 	}
2179 #endif
2180 }
2181 
2182 /*
2183  * Shellsort (diminishing increment sort) from Data Structures and
2184  * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
2185  * see also Knuth Vol. 3, page 84.  The increments are selected from
2186  * formula (8), page 95.  Roughly O(N^3/2).
2187  */
2188 /*
2189  * This is our own private copy of shellsort because we want to sort
2190  * two parallel arrays (the array of buffer pointers and the array of
2191  * logical block numbers) simultaneously.  Note that we cast the array
2192  * of logical block numbers to a unsigned in this routine so that the
2193  * negative block numbers (meta data blocks) sort AFTER the data blocks.
2194  */
2195 
2196 void
2197 lfs_shellsort(struct buf **bp_array, ufs_daddr_t *lb_array, int nmemb)
2198 {
2199 	static int __rsshell_increments[] = { 4, 1, 0 };
2200 	int incr, *incrp, t1, t2;
2201 	struct buf *bp_temp;
2202 	u_long lb_temp;
2203 
2204 	for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
2205 		for (t1 = incr; t1 < nmemb; ++t1)
2206 			for (t2 = t1 - incr; t2 >= 0;)
2207 				if (lb_array[t2] > lb_array[t2 + incr]) {
2208 					lb_temp = lb_array[t2];
2209 					lb_array[t2] = lb_array[t2 + incr];
2210 					lb_array[t2 + incr] = lb_temp;
2211 					bp_temp = bp_array[t2];
2212 					bp_array[t2] = bp_array[t2 + incr];
2213 					bp_array[t2 + incr] = bp_temp;
2214 					t2 -= incr;
2215 				} else
2216 					break;
2217 }
2218 
2219 /*
2220  * Check VXLOCK.  Return 1 if the vnode is locked.  Otherwise, vget it.
2221  */
2222 int
2223 lfs_vref(struct vnode *vp)
2224 {
2225 	/*
2226 	 * If we return 1 here during a flush, we risk vinvalbuf() not
2227 	 * being able to flush all of the pages from this vnode, which
2228 	 * will cause it to panic.  So, return 0 if a flush is in progress.
2229 	 */
2230 	if (vp->v_flag & VXLOCK) {
2231 		if (IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2232 			return 0;
2233 		}
2234 		return (1);
2235 	}
2236 	return (vget(vp, 0));
2237 }
2238 
2239 /*
2240  * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
2241  * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
2242  */
2243 void
2244 lfs_vunref(struct vnode *vp)
2245 {
2246 	/*
2247 	 * Analogous to lfs_vref, if the node is flushing, fake it.
2248 	 */
2249 	if ((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2250 		return;
2251 	}
2252 
2253 	simple_lock(&vp->v_interlock);
2254 #ifdef DIAGNOSTIC
2255 	if (vp->v_usecount <= 0) {
2256 		printf("lfs_vunref: inum is %d\n", VTOI(vp)->i_number);
2257 		printf("lfs_vunref: flags are 0x%lx\n", (u_long)vp->v_flag);
2258 		printf("lfs_vunref: usecount = %ld\n", (long)vp->v_usecount);
2259 		panic("lfs_vunref: v_usecount<0");
2260 	}
2261 #endif
2262 	vp->v_usecount--;
2263 	if (vp->v_usecount > 0) {
2264 		simple_unlock(&vp->v_interlock);
2265 		return;
2266 	}
2267 	/*
2268 	 * insert at tail of LRU list
2269 	 */
2270 	simple_lock(&vnode_free_list_slock);
2271 	if (vp->v_holdcnt > 0)
2272 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
2273 	else
2274 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2275 	simple_unlock(&vnode_free_list_slock);
2276 	simple_unlock(&vp->v_interlock);
2277 }
2278 
2279 /*
2280  * We use this when we have vnodes that were loaded in solely for cleaning.
2281  * There is no reason to believe that these vnodes will be referenced again
2282  * soon, since the cleaning process is unrelated to normal filesystem
2283  * activity.  Putting cleaned vnodes at the tail of the list has the effect
2284  * of flushing the vnode LRU.  So, put vnodes that were loaded only for
2285  * cleaning at the head of the list, instead.
2286  */
2287 void
2288 lfs_vunref_head(struct vnode *vp)
2289 {
2290 	simple_lock(&vp->v_interlock);
2291 #ifdef DIAGNOSTIC
2292 	if (vp->v_usecount == 0) {
2293 		panic("lfs_vunref: v_usecount<0");
2294 	}
2295 #endif
2296 	vp->v_usecount--;
2297 	if (vp->v_usecount > 0) {
2298 		simple_unlock(&vp->v_interlock);
2299 		return;
2300 	}
2301 	/*
2302 	 * insert at head of LRU list
2303 	 */
2304 	simple_lock(&vnode_free_list_slock);
2305 	if (vp->v_holdcnt > 0)
2306 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
2307 	else
2308 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2309 	simple_unlock(&vnode_free_list_slock);
2310 	simple_unlock(&vp->v_interlock);
2311 }
2312 
2313