xref: /netbsd-src/sys/kern/vfs_wapbl.c (revision c4974f83fa410048ee2b7444e30091152d054c44)
1 /*	$NetBSD: vfs_wapbl.c,v 1.117 2024/12/07 15:10:42 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This implements file system independent write ahead filesystem logging.
34  */
35 
36 #define WAPBL_INTERNAL
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.117 2024/12/07 15:10:42 riastradh Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/types.h>
43 
44 #include <sys/bitops.h>
45 #include <sys/time.h>
46 #include <sys/wapbl.h>
47 #include <sys/wapbl_replay.h>
48 
49 #ifdef _KERNEL
50 
51 #include <sys/atomic.h>
52 #include <sys/conf.h>
53 #include <sys/evcnt.h>
54 #include <sys/file.h>
55 #include <sys/kauth.h>
56 #include <sys/kernel.h>
57 #include <sys/module.h>
58 #include <sys/mount.h>
59 #include <sys/mutex.h>
60 #include <sys/namei.h>
61 #include <sys/proc.h>
62 #include <sys/resourcevar.h>
63 #include <sys/sdt.h>
64 #include <sys/sysctl.h>
65 #include <sys/uio.h>
66 #include <sys/vnode.h>
67 
68 #include <miscfs/specfs/specdev.h>
69 
70 #define	wapbl_alloc(s)		kmem_alloc((s), KM_SLEEP)
71 #define	wapbl_free(a, s)	kmem_free((a), (s))
72 #define	wapbl_calloc(n, s)	kmem_zalloc((n)*(s), KM_SLEEP)
73 
74 static int wapbl_flush_disk_cache = 1;
75 static int wapbl_verbose_commit = 0;
76 static int wapbl_allow_dpofua = 0;	/* switched off by default for now */
77 static int wapbl_journal_iobufs = 4;
78 
79 static inline size_t wapbl_space_free(size_t, off_t, off_t);
80 
81 #else /* !_KERNEL */
82 
83 #include <assert.h>
84 #include <errno.h>
85 #include <stdbool.h>
86 #include <stdio.h>
87 #include <stdlib.h>
88 #include <string.h>
89 
90 #define	KDASSERT(x)		assert(x)
91 #define	KASSERT(x)		assert(x)
92 #define	wapbl_alloc(s)		malloc(s)
93 #define	wapbl_free(a, s)	free(a)
94 #define	wapbl_calloc(n, s)	calloc((n), (s))
95 
96 #define	SET_ERROR(E)		(E)
97 
98 #endif /* !_KERNEL */
99 
100 /*
101  * INTERNAL DATA STRUCTURES
102  */
103 
104 /*
105  * This structure holds per-mount log information.
106  *
107  * Legend:	a = atomic access only
108  *		r = read-only after init
109  *		l = rwlock held
110  *		m = mutex held
111  *		lm = rwlock held writing or mutex held
112  *		u = unlocked access ok
113  *		b = bufcache_lock held
114  */
115 LIST_HEAD(wapbl_ino_head, wapbl_ino);
116 struct wapbl {
117 	struct vnode *wl_logvp;	/* r:	log here */
118 	struct vnode *wl_devvp;	/* r:	log on this device */
119 	struct mount *wl_mount;	/* r:	mountpoint wl is associated with */
120 	daddr_t wl_logpbn;	/* r:	Physical block number of start of log */
121 	int wl_log_dev_bshift;	/* r:	logarithm of device block size of log
122 					device */
123 	int wl_fs_dev_bshift;	/* r:	logarithm of device block size of
124 					filesystem device */
125 
126 	unsigned wl_lock_count;	/* m:	Count of transactions in progress */
127 
128 	size_t wl_circ_size;	/* r:	Number of bytes in buffer of log */
129 	size_t wl_circ_off;	/* r:	Number of bytes reserved at start */
130 
131 	size_t wl_bufcount_max;	/* r:	Number of buffers reserved for log */
132 	size_t wl_bufbytes_max;	/* r:	Number of buf bytes reserved for log */
133 
134 	off_t wl_head;		/* l:	Byte offset of log head */
135 	off_t wl_tail;		/* l:	Byte offset of log tail */
136 	/*
137 	 * WAPBL log layout, stored on wl_devvp at wl_logpbn:
138 	 *
139 	 *  ___________________ wl_circ_size __________________
140 	 * /                                                   \
141 	 * +---------+---------+-------+--------------+--------+
142 	 * [ commit0 | commit1 | CCWCW | EEEEEEEEEEEE | CCCWCW ]
143 	 * +---------+---------+-------+--------------+--------+
144 	 *       wl_circ_off --^       ^-- wl_head    ^-- wl_tail
145 	 *
146 	 * commit0 and commit1 are commit headers.  A commit header has
147 	 * a generation number, indicating which of the two headers is
148 	 * more recent, and an assignment of head and tail pointers.
149 	 * The rest is a circular queue of log records, starting at
150 	 * the byte offset wl_circ_off.
151 	 *
152 	 * E marks empty space for records.
153 	 * W marks records for block writes issued but waiting.
154 	 * C marks completed records.
155 	 *
156 	 * wapbl_flush writes new records to empty `E' spaces after
157 	 * wl_head from the current transaction in memory.
158 	 *
159 	 * wapbl_truncate advances wl_tail past any completed `C'
160 	 * records, freeing them up for use.
161 	 *
162 	 * head == tail == 0 means log is empty.
163 	 * head == tail != 0 means log is full.
164 	 *
165 	 * See assertions in wapbl_advance() for other boundary
166 	 * conditions.
167 	 *
168 	 * Only wapbl_flush moves the head, except when wapbl_truncate
169 	 * sets it to 0 to indicate that the log is empty.
170 	 *
171 	 * Only wapbl_truncate moves the tail, except when wapbl_flush
172 	 * sets it to wl_circ_off to indicate that the log is full.
173 	 */
174 
175 	struct wapbl_wc_header *wl_wc_header;	/* l	*/
176 	void *wl_wc_scratch;	/* l:	scratch space (XXX: por que?!?) */
177 
178 	kmutex_t wl_mtx;	/* u:	short-term lock */
179 	krwlock_t wl_rwlock;	/* u:	File system transaction lock */
180 
181 	/*
182 	 * Must be held while accessing
183 	 * wl_count or wl_bufs or head or tail
184 	 */
185 
186 #if _KERNEL
187 	/*
188 	 * Callback called from within the flush routine to flush any extra
189 	 * bits.  Note that flush may be skipped without calling this if
190 	 * there are no outstanding buffers in the transaction.
191 	 */
192 	wapbl_flush_fn_t wl_flush;	/* r	*/
193 	wapbl_flush_fn_t wl_flush_abort;/* r	*/
194 
195 	/* Event counters */
196 	char wl_ev_group[EVCNT_STRING_MAX];	/* r	*/
197 	struct evcnt wl_ev_commit;		/* l	*/
198 	struct evcnt wl_ev_journalwrite;	/* l	*/
199 	struct evcnt wl_ev_jbufs_bio_nowait;	/* l	*/
200 	struct evcnt wl_ev_metawrite;		/* lm	*/
201 	struct evcnt wl_ev_cacheflush;		/* l	*/
202 #endif
203 
204 	size_t wl_bufbytes;	/* m:	Byte count of pages in wl_bufs */
205 	size_t wl_bufcount;	/* m:	Count of buffers in wl_bufs */
206 	size_t wl_bcount;	/* m:	Total bcount of wl_bufs */
207 
208 	TAILQ_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */
209 
210 	kcondvar_t wl_reclaimable_cv;	/* m (obviously) */
211 	size_t wl_reclaimable_bytes; /* m:	Amount of space available for
212 						reclamation by truncate */
213 	int wl_error_count;	/* m:	# of wl_entries with errors */
214 	size_t wl_reserved_bytes; /* never truncate log smaller than this */
215 
216 #ifdef WAPBL_DEBUG_BUFBYTES
217 	size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */
218 #endif
219 
220 #if _KERNEL
221 	int wl_brperjblock;	/* r Block records per journal block */
222 #endif
223 
224 	TAILQ_HEAD(, wapbl_dealloc) wl_dealloclist;	/* lm:	list head */
225 	int wl_dealloccnt;				/* lm:	total count */
226 	int wl_dealloclim;				/* r:	max count */
227 
228 	/* hashtable of inode numbers for allocated but unlinked inodes */
229 	/* synch ??? */
230 	struct wapbl_ino_head *wl_inohash;
231 	u_long wl_inohashmask;
232 	int wl_inohashcnt;
233 
234 	SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* m: On disk transaction
235 						   accounting */
236 
237 	/* buffers for wapbl_buffered_write() */
238 	TAILQ_HEAD(, buf) wl_iobufs;		/* l: Free or filling bufs */
239 	TAILQ_HEAD(, buf) wl_iobufs_busy;	/* l: In-transit bufs */
240 
241 	int wl_dkcache;		/* r:	disk cache flags */
242 #define WAPBL_USE_FUA(wl)	\
243 		(wapbl_allow_dpofua && ISSET((wl)->wl_dkcache, DKCACHE_FUA))
244 #define WAPBL_JFLAGS(wl)	\
245 		(WAPBL_USE_FUA(wl) ? (wl)->wl_jwrite_flags : 0)
246 #define WAPBL_JDATA_FLAGS(wl)	\
247 		(WAPBL_JFLAGS(wl) & B_MEDIA_DPO)	/* only DPO */
248 	int wl_jwrite_flags;	/* r:	journal write flags */
249 };
250 
251 #ifdef WAPBL_DEBUG_PRINT
252 int wapbl_debug_print = WAPBL_DEBUG_PRINT;
253 #endif
254 
255 /****************************************************************/
256 #ifdef _KERNEL
257 
258 #ifdef WAPBL_DEBUG
259 struct wapbl *wapbl_debug_wl;
260 #endif
261 
262 static int wapbl_write_commit(struct wapbl *, off_t, off_t);
263 static int wapbl_write_blocks(struct wapbl *, off_t *);
264 static int wapbl_write_revocations(struct wapbl *, off_t *);
265 static int wapbl_write_inodes(struct wapbl *, off_t *);
266 #endif /* _KERNEL */
267 
268 static int wapbl_replay_process(struct wapbl_replay *, off_t, off_t);
269 
270 static inline size_t wapbl_space_used(size_t, off_t, off_t);
271 
272 #ifdef _KERNEL
273 
274 static struct pool wapbl_entry_pool;
275 static struct pool wapbl_dealloc_pool;
276 
277 #define	WAPBL_INODETRK_SIZE 83
278 static int wapbl_ino_pool_refcount;
279 static struct pool wapbl_ino_pool;
280 struct wapbl_ino {
281 	LIST_ENTRY(wapbl_ino) wi_hash;
282 	ino_t wi_ino;
283 	mode_t wi_mode;
284 };
285 
286 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size);
287 static void wapbl_inodetrk_free(struct wapbl *wl);
288 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino);
289 
290 static size_t wapbl_transaction_len(struct wapbl *wl);
291 static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl);
292 
293 static void wapbl_deallocation_free(struct wapbl *, struct wapbl_dealloc *,
294     bool);
295 
296 static void wapbl_evcnt_init(struct wapbl *);
297 static void wapbl_evcnt_free(struct wapbl *);
298 
299 static void wapbl_dkcache_init(struct wapbl *);
300 
301 #if 0
302 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *);
303 #endif
304 
305 static int wapbl_replay_isopen1(struct wapbl_replay *);
306 
307 const struct wapbl_ops wapbl_ops = {
308 	.wo_wapbl_discard	= wapbl_discard,
309 	.wo_wapbl_replay_isopen	= wapbl_replay_isopen1,
310 	.wo_wapbl_replay_can_read = wapbl_replay_can_read,
311 	.wo_wapbl_replay_read	= wapbl_replay_read,
312 	.wo_wapbl_add_buf	= wapbl_add_buf,
313 	.wo_wapbl_remove_buf	= wapbl_remove_buf,
314 	.wo_wapbl_resize_buf	= wapbl_resize_buf,
315 	.wo_wapbl_begin		= wapbl_begin,
316 	.wo_wapbl_end		= wapbl_end,
317 	.wo_wapbl_junlock_assert= wapbl_junlock_assert,
318 	.wo_wapbl_jlock_assert	= wapbl_jlock_assert,
319 
320 	/* XXX: the following is only used to say "this is a wapbl buf" */
321 	.wo_wapbl_biodone	= wapbl_biodone,
322 };
323 
324 SYSCTL_SETUP(wapbl_sysctl_init, "wapbl sysctl")
325 {
326 	int rv;
327 	const struct sysctlnode *rnode, *cnode;
328 
329 	rv = sysctl_createv(clog, 0, NULL, &rnode,
330 	    CTLFLAG_PERMANENT,
331 	    CTLTYPE_NODE, "wapbl",
332 	    SYSCTL_DESCR("WAPBL journaling options"),
333 	    NULL, 0, NULL, 0,
334 	    CTL_VFS, CTL_CREATE, CTL_EOL);
335 	if (rv)
336 		return;
337 
338 	rv = sysctl_createv(clog, 0, &rnode, &cnode,
339 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
340 	    CTLTYPE_INT, "flush_disk_cache",
341 	    SYSCTL_DESCR("flush disk cache"),
342 	    NULL, 0, &wapbl_flush_disk_cache, 0,
343 	    CTL_CREATE, CTL_EOL);
344 	if (rv)
345 		return;
346 
347 	rv = sysctl_createv(clog, 0, &rnode, &cnode,
348 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
349 	    CTLTYPE_INT, "verbose_commit",
350 	    SYSCTL_DESCR("show time and size of wapbl log commits"),
351 	    NULL, 0, &wapbl_verbose_commit, 0,
352 	    CTL_CREATE, CTL_EOL);
353 	if (rv)
354 		return;
355 
356 	rv = sysctl_createv(clog, 0, &rnode, &cnode,
357 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
358 	    CTLTYPE_INT, "allow_dpofua",
359 	    SYSCTL_DESCR("allow use of FUA/DPO instead of cache flush"
360 		" if available"),
361 	    NULL, 0, &wapbl_allow_dpofua, 0,
362 	    CTL_CREATE, CTL_EOL);
363 	if (rv)
364 		return;
365 
366 	rv = sysctl_createv(clog, 0, &rnode, &cnode,
367 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
368 	    CTLTYPE_INT, "journal_iobufs",
369 	    SYSCTL_DESCR("count of bufs used for journal I/O"
370 		" (max async count)"),
371 	    NULL, 0, &wapbl_journal_iobufs, 0,
372 	    CTL_CREATE, CTL_EOL);
373 	if (rv)
374 		return;
375 
376 	return;
377 }
378 
379 static void
380 wapbl_init(void)
381 {
382 
383 	pool_init(&wapbl_entry_pool, sizeof(struct wapbl_entry), 0, 0, 0,
384 	    "wapblentrypl", &pool_allocator_kmem, IPL_VM);
385 	pool_init(&wapbl_dealloc_pool, sizeof(struct wapbl_dealloc), 0, 0, 0,
386 	    "wapbldealloc", &pool_allocator_nointr, IPL_NONE);
387 }
388 
389 static int
390 wapbl_fini(void)
391 {
392 
393 	pool_destroy(&wapbl_dealloc_pool);
394 	pool_destroy(&wapbl_entry_pool);
395 
396 	return 0;
397 }
398 
399 static void
400 wapbl_evcnt_init(struct wapbl *wl)
401 {
402 
403 	snprintf(wl->wl_ev_group, sizeof(wl->wl_ev_group),
404 	    "wapbl fsid 0x%x/0x%x",
405 	    wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[0],
406 	    wl->wl_mount->mnt_stat.f_fsidx.__fsid_val[1]);
407 
408 	evcnt_attach_dynamic(&wl->wl_ev_commit, EVCNT_TYPE_MISC,
409 	    NULL, wl->wl_ev_group, "commit");
410 	evcnt_attach_dynamic(&wl->wl_ev_journalwrite, EVCNT_TYPE_MISC,
411 	    NULL, wl->wl_ev_group, "journal write total");
412 	evcnt_attach_dynamic(&wl->wl_ev_jbufs_bio_nowait, EVCNT_TYPE_MISC,
413 	    NULL, wl->wl_ev_group, "journal write finished async");
414 	evcnt_attach_dynamic(&wl->wl_ev_metawrite, EVCNT_TYPE_MISC,
415 	    NULL, wl->wl_ev_group, "metadata async write");
416 	evcnt_attach_dynamic(&wl->wl_ev_cacheflush, EVCNT_TYPE_MISC,
417 	    NULL, wl->wl_ev_group, "cache flush");
418 }
419 
420 static void
421 wapbl_evcnt_free(struct wapbl *wl)
422 {
423 
424 	evcnt_detach(&wl->wl_ev_commit);
425 	evcnt_detach(&wl->wl_ev_journalwrite);
426 	evcnt_detach(&wl->wl_ev_jbufs_bio_nowait);
427 	evcnt_detach(&wl->wl_ev_metawrite);
428 	evcnt_detach(&wl->wl_ev_cacheflush);
429 }
430 
431 static void
432 wapbl_dkcache_init(struct wapbl *wl)
433 {
434 	int error;
435 
436 	/* Get disk cache flags */
437 	error = VOP_IOCTL(wl->wl_devvp, DIOCGCACHE, &wl->wl_dkcache,
438 	    FWRITE, FSCRED);
439 	if (error) {
440 		/* behave as if there was a write cache */
441 		wl->wl_dkcache = DKCACHE_WRITE;
442 	}
443 
444 	/* Use FUA instead of cache flush if available */
445 	if (ISSET(wl->wl_dkcache, DKCACHE_FUA))
446 		wl->wl_jwrite_flags |= B_MEDIA_FUA;
447 
448 	/* Use DPO for journal writes if available */
449 	if (ISSET(wl->wl_dkcache, DKCACHE_DPO))
450 		wl->wl_jwrite_flags |= B_MEDIA_DPO;
451 }
452 
453 static int
454 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr)
455 {
456 	int error, i;
457 
458 	WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
459 	    ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt));
460 
461 	/*
462 	 * Its only valid to reuse the replay log if its
463 	 * the same as the new log we just opened.
464 	 */
465 	KDASSERT(!wapbl_replay_isopen(wr));
466 	KASSERT(wl->wl_devvp->v_type == VBLK);
467 	KASSERT(wr->wr_devvp->v_type == VBLK);
468 	KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev);
469 	KASSERT(wl->wl_logpbn == wr->wr_logpbn);
470 	KASSERT(wl->wl_circ_size == wr->wr_circ_size);
471 	KASSERT(wl->wl_circ_off == wr->wr_circ_off);
472 	KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift);
473 	KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift);
474 
475 	wl->wl_wc_header->wc_generation = wr->wr_generation + 1;
476 
477 	for (i = 0; i < wr->wr_inodescnt; i++)
478 		wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber,
479 		    wr->wr_inodes[i].wr_imode);
480 
481 	/* Make sure new transaction won't overwrite old inodes list */
482 	KDASSERT(wapbl_transaction_len(wl) <=
483 	    wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead,
484 		wr->wr_inodestail));
485 
486 	wl->wl_head = wl->wl_tail = wr->wr_inodeshead;
487 	wl->wl_reclaimable_bytes = wl->wl_reserved_bytes =
488 	    wapbl_transaction_len(wl);
489 
490 	error = wapbl_write_inodes(wl, &wl->wl_head);
491 	if (error)
492 		return error;
493 
494 	KASSERT(wl->wl_head != wl->wl_tail);
495 	KASSERT(wl->wl_head != 0);
496 
497 	return 0;
498 }
499 
500 int
501 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp,
502     daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr,
503     wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn)
504 {
505 	struct wapbl *wl;
506 	struct vnode *devvp;
507 	daddr_t logpbn;
508 	int error;
509 	int log_dev_bshift = ilog2(blksize);
510 	int fs_dev_bshift = log_dev_bshift;
511 	int run;
512 
513 	WAPBL_PRINTF(WAPBL_PRINT_OPEN,
514 	    ("wapbl_start: vp=%p off=%"PRId64" count=%zu blksize=%zu\n",
515 		vp, off, count, blksize));
516 
517 	if (log_dev_bshift > fs_dev_bshift) {
518 		WAPBL_PRINTF(WAPBL_PRINT_OPEN,
519 		    ("wapbl: log device's block size cannot be larger "
520 			"than filesystem's\n"));
521 		/*
522 		 * Not currently implemented, although it could be if
523 		 * needed someday.
524 		 */
525 		return SET_ERROR(ENOSYS);
526 	}
527 
528 	if (off < 0)
529 		return SET_ERROR(EINVAL);
530 
531 	if (blksize < DEV_BSIZE)
532 		return SET_ERROR(EINVAL);
533 	if (blksize % DEV_BSIZE)
534 		return SET_ERROR(EINVAL);
535 
536 	/* XXXTODO: verify that the full load is writable */
537 
538 	/*
539 	 * XXX check for minimum log size
540 	 * minimum is governed by minimum amount of space
541 	 * to complete a transaction. (probably truncate)
542 	 */
543 	/* XXX for now pick something minimal */
544 	if ((count * blksize) < MAXPHYS) {
545 		return SET_ERROR(ENOSPC);
546 	}
547 
548 	if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) {
549 		return error;
550 	}
551 
552 	wl = wapbl_calloc(1, sizeof(*wl));
553 	rw_init(&wl->wl_rwlock);
554 	mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE);
555 	cv_init(&wl->wl_reclaimable_cv, "wapblrec");
556 	TAILQ_INIT(&wl->wl_bufs);
557 	SIMPLEQ_INIT(&wl->wl_entries);
558 
559 	wl->wl_logvp = vp;
560 	wl->wl_devvp = devvp;
561 	wl->wl_mount = mp;
562 	wl->wl_logpbn = logpbn;
563 	wl->wl_log_dev_bshift = log_dev_bshift;
564 	wl->wl_fs_dev_bshift = fs_dev_bshift;
565 
566 	wl->wl_flush = flushfn;
567 	wl->wl_flush_abort = flushabortfn;
568 
569 	/* Reserve two log device blocks for the commit headers */
570 	wl->wl_circ_off = 2<<wl->wl_log_dev_bshift;
571 	wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off);
572 	/* truncate the log usage to a multiple of log_dev_bshift */
573 	wl->wl_circ_size >>= wl->wl_log_dev_bshift;
574 	wl->wl_circ_size <<= wl->wl_log_dev_bshift;
575 
576 	/*
577 	 * wl_bufbytes_max limits the size of the in memory transaction space.
578 	 * - Since buffers are allocated and accounted for in units of
579 	 *   PAGE_SIZE it is required to be a multiple of PAGE_SIZE
580 	 *   (i.e. 1<<PAGE_SHIFT)
581 	 * - Since the log device has to be written in units of
582 	 *   1<<wl_log_dev_bshift it is required to be a multiple of
583 	 *   1<<wl_log_dev_bshift.
584 	 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift,
585 	 *   it is convenient to be a multiple of 1<<wl_fs_dev_bshift.
586 	 * Therefore it must be multiple of the least common multiple of those
587 	 * three quantities.  Fortunately, all of those quantities are
588 	 * guaranteed to be a power of two, and the least common multiple of
589 	 * a set of numbers which are all powers of two is simply the maximum
590 	 * of those numbers.  Finally, the maximum logarithm of a power of two
591 	 * is the same as the log of the maximum power of two.  So we can do
592 	 * the following operations to size wl_bufbytes_max:
593 	 */
594 
595 	/* XXX fix actual number of pages reserved per filesystem. */
596 	wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2);
597 
598 	/* Round wl_bufbytes_max to the largest power of two constraint */
599 	wl->wl_bufbytes_max >>= PAGE_SHIFT;
600 	wl->wl_bufbytes_max <<= PAGE_SHIFT;
601 	wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift;
602 	wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift;
603 	wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift;
604 	wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift;
605 
606 	/* XXX maybe use filesystem fragment size instead of 1024 */
607 	/* XXX fix actual number of buffers reserved per filesystem. */
608 	wl->wl_bufcount_max = (buf_nbuf() / 2) * 1024;
609 
610 	wl->wl_brperjblock = ((1<<wl->wl_log_dev_bshift)
611 	    - offsetof(struct wapbl_wc_blocklist, wc_blocks)) /
612 	    sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]);
613 	KASSERT(wl->wl_brperjblock > 0);
614 
615 	/* XXX tie this into resource estimation */
616 	wl->wl_dealloclim = wl->wl_bufbytes_max / mp->mnt_stat.f_bsize / 2;
617 	TAILQ_INIT(&wl->wl_dealloclist);
618 
619 	wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE);
620 
621 	wapbl_evcnt_init(wl);
622 
623 	wapbl_dkcache_init(wl);
624 
625 	/* Initialize the commit header */
626 	{
627 		struct wapbl_wc_header *wc;
628 		size_t len = 1 << wl->wl_log_dev_bshift;
629 		wc = wapbl_calloc(1, len);
630 		wc->wc_type = WAPBL_WC_HEADER;
631 		wc->wc_len = len;
632 		wc->wc_circ_off = wl->wl_circ_off;
633 		wc->wc_circ_size = wl->wl_circ_size;
634 		/* XXX wc->wc_fsid */
635 		wc->wc_log_dev_bshift = wl->wl_log_dev_bshift;
636 		wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift;
637 		wl->wl_wc_header = wc;
638 		wl->wl_wc_scratch = wapbl_alloc(len);
639 	}
640 
641 	TAILQ_INIT(&wl->wl_iobufs);
642 	TAILQ_INIT(&wl->wl_iobufs_busy);
643 	for (int i = 0; i < wapbl_journal_iobufs; i++) {
644 		struct buf *bp;
645 
646 		if ((bp = geteblk(MAXPHYS)) == NULL)
647 			goto errout;
648 
649 		mutex_enter(&bufcache_lock);
650 		mutex_enter(devvp->v_interlock);
651 		bgetvp(devvp, bp);
652 		mutex_exit(devvp->v_interlock);
653 		mutex_exit(&bufcache_lock);
654 
655 		bp->b_dev = devvp->v_rdev;
656 
657 		TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
658 	}
659 
660 	/*
661 	 * if there was an existing set of unlinked but
662 	 * allocated inodes, preserve it in the new
663 	 * log.
664 	 */
665 	if (wr && wr->wr_inodescnt) {
666 		error = wapbl_start_flush_inodes(wl, wr);
667 		if (error)
668 			goto errout;
669 	}
670 
671 	error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail);
672 	if (error) {
673 		goto errout;
674 	}
675 
676 	*wlp = wl;
677 #if defined(WAPBL_DEBUG)
678 	wapbl_debug_wl = wl;
679 #endif
680 
681 	return 0;
682 errout:
683 	wapbl_discard(wl);
684 	wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
685 	wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
686 	while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
687 		struct buf *bp;
688 
689 		bp = TAILQ_FIRST(&wl->wl_iobufs);
690 		TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
691 		brelse(bp, BC_INVAL);
692 	}
693 	wapbl_inodetrk_free(wl);
694 	wapbl_free(wl, sizeof(*wl));
695 
696 	return error;
697 }
698 
699 /*
700  * Like wapbl_flush, only discards the transaction
701  * completely
702  */
703 
704 void
705 wapbl_discard(struct wapbl *wl)
706 {
707 	struct wapbl_entry *we;
708 	struct wapbl_dealloc *wd;
709 	struct buf *bp;
710 	int i;
711 
712 	/*
713 	 * XXX we may consider using upgrade here
714 	 * if we want to call flush from inside a transaction
715 	 */
716 	rw_enter(&wl->wl_rwlock, RW_WRITER);
717 	wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
718 
719 #ifdef WAPBL_DEBUG_PRINT
720 	{
721 		pid_t pid = -1;
722 		lwpid_t lid = -1;
723 		if (curproc)
724 			pid = curproc->p_pid;
725 		if (curlwp)
726 			lid = curlwp->l_lid;
727 #ifdef WAPBL_DEBUG_BUFBYTES
728 		WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
729 		    ("wapbl_discard: thread %d.%d discarding "
730 			"transaction\n"
731 			"\tbufcount=%zu bufbytes=%zu bcount=%zu "
732 			"deallocs=%d inodes=%d\n"
733 			"\terrcnt = %u, reclaimable=%zu reserved=%zu "
734 			"unsynced=%zu\n",
735 			pid, lid,
736 			wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
737 			wl->wl_dealloccnt, wl->wl_inohashcnt,
738 			wl->wl_error_count, wl->wl_reclaimable_bytes,
739 			wl->wl_reserved_bytes,
740 			wl->wl_unsynced_bufbytes));
741 		SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
742 			WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
743 			    ("\tentry: bufcount = %zu, reclaimable = %zu, "
744 				"error = %d, unsynced = %zu\n",
745 				we->we_bufcount, we->we_reclaimable_bytes,
746 				we->we_error, we->we_unsynced_bufbytes));
747 		}
748 #else /* !WAPBL_DEBUG_BUFBYTES */
749 		WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
750 		    ("wapbl_discard: thread %d.%d discarding transaction\n"
751 			"\tbufcount=%zu bufbytes=%zu bcount=%zu "
752 			"deallocs=%d inodes=%d\n"
753 			"\terrcnt = %u, reclaimable=%zu reserved=%zu\n",
754 			pid, lid,
755 			wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
756 			wl->wl_dealloccnt, wl->wl_inohashcnt,
757 			wl->wl_error_count, wl->wl_reclaimable_bytes,
758 			wl->wl_reserved_bytes));
759 		SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
760 			WAPBL_PRINTF(WAPBL_PRINT_DISCARD,
761 			    ("\tentry: bufcount = %zu, reclaimable = %zu, "
762 				"error = %d\n",
763 				we->we_bufcount, we->we_reclaimable_bytes,
764 				we->we_error));
765 		}
766 #endif /* !WAPBL_DEBUG_BUFBYTES */
767 	}
768 #endif /* WAPBL_DEBUG_PRINT */
769 
770 	for (i = 0; i <= wl->wl_inohashmask; i++) {
771 		struct wapbl_ino_head *wih;
772 		struct wapbl_ino *wi;
773 
774 		wih = &wl->wl_inohash[i];
775 		while ((wi = LIST_FIRST(wih)) != NULL) {
776 			LIST_REMOVE(wi, wi_hash);
777 			pool_put(&wapbl_ino_pool, wi);
778 			KASSERT(wl->wl_inohashcnt > 0);
779 			wl->wl_inohashcnt--;
780 		}
781 	}
782 
783 	/*
784 	 * clean buffer list
785 	 */
786 	mutex_enter(&bufcache_lock);
787 	mutex_enter(&wl->wl_mtx);
788 	while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
789 		if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) {
790 			KASSERT(bp->b_flags & B_LOCKED);
791 			KASSERT(bp->b_oflags & BO_DELWRI);
792 			/*
793 			 * Buffer is already on BQ_LOCKED queue.
794 			 * The buffer will be unlocked and
795 			 * removed from the transaction in brelsel()
796 			 */
797 			mutex_exit(&wl->wl_mtx);
798 			bremfree(bp);
799 			brelsel(bp, BC_INVAL);
800 			mutex_enter(&wl->wl_mtx);
801 		}
802 	}
803 
804 	/*
805 	 * Remove references to this wl from wl_entries, free any which
806 	 * no longer have buffers, others will be freed in wapbl_biodone()
807 	 * when they no longer have any buffers.
808 	 */
809 	while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) {
810 		SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
811 		/* XXX should we be accumulating wl_error_count
812 		 * and increasing reclaimable bytes ? */
813 		we->we_wapbl = NULL;
814 		if (we->we_bufcount == 0) {
815 #ifdef WAPBL_DEBUG_BUFBYTES
816 			KASSERT(we->we_unsynced_bufbytes == 0);
817 #endif
818 			pool_put(&wapbl_entry_pool, we);
819 		}
820 	}
821 
822 	mutex_exit(&wl->wl_mtx);
823 	mutex_exit(&bufcache_lock);
824 
825 	/* Discard list of deallocs */
826 	while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL)
827 		wapbl_deallocation_free(wl, wd, true);
828 
829 	/* XXX should we clear wl_reserved_bytes? */
830 
831 	KASSERT(wl->wl_bufbytes == 0);
832 	KASSERT(wl->wl_bcount == 0);
833 	KASSERT(wl->wl_bufcount == 0);
834 	KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
835 	KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
836 	KASSERT(wl->wl_inohashcnt == 0);
837 	KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
838 	KASSERT(wl->wl_dealloccnt == 0);
839 
840 	rw_exit(&wl->wl_rwlock);
841 }
842 
843 int
844 wapbl_stop(struct wapbl *wl, int force)
845 {
846 	int error;
847 
848 	WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n"));
849 	error = wapbl_flush(wl, 1);
850 	if (error) {
851 		if (force)
852 			wapbl_discard(wl);
853 		else
854 			return error;
855 	}
856 
857 	/* Unlinked inodes persist after a flush */
858 	if (wl->wl_inohashcnt) {
859 		if (force) {
860 			wapbl_discard(wl);
861 		} else {
862 			return SET_ERROR(EBUSY);
863 		}
864 	}
865 
866 	KASSERT(wl->wl_bufbytes == 0);
867 	KASSERT(wl->wl_bcount == 0);
868 	KASSERT(wl->wl_bufcount == 0);
869 	KASSERT(TAILQ_EMPTY(&wl->wl_bufs));
870 	KASSERT(wl->wl_dealloccnt == 0);
871 	KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries));
872 	KASSERT(wl->wl_inohashcnt == 0);
873 	KASSERT(TAILQ_EMPTY(&wl->wl_dealloclist));
874 	KASSERT(wl->wl_dealloccnt == 0);
875 	KASSERT(TAILQ_EMPTY(&wl->wl_iobufs_busy));
876 
877 	wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len);
878 	wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len);
879 	while (!TAILQ_EMPTY(&wl->wl_iobufs)) {
880 		struct buf *bp;
881 
882 		bp = TAILQ_FIRST(&wl->wl_iobufs);
883 		TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
884 		brelse(bp, BC_INVAL);
885 	}
886 	wapbl_inodetrk_free(wl);
887 
888 	wapbl_evcnt_free(wl);
889 
890 	cv_destroy(&wl->wl_reclaimable_cv);
891 	mutex_destroy(&wl->wl_mtx);
892 	rw_destroy(&wl->wl_rwlock);
893 	wapbl_free(wl, sizeof(*wl));
894 
895 	return 0;
896 }
897 
898 /****************************************************************/
899 /*
900  * Unbuffered disk I/O
901  */
902 
903 static void
904 wapbl_doio_accounting(struct vnode *devvp, int flags)
905 {
906 	struct pstats *pstats = curlwp->l_proc->p_stats;
907 
908 	if ((flags & (B_WRITE | B_READ)) == B_WRITE) {
909 		mutex_enter(devvp->v_interlock);
910 		devvp->v_numoutput++;
911 		mutex_exit(devvp->v_interlock);
912 		pstats->p_ru.ru_oublock++;
913 	} else {
914 		pstats->p_ru.ru_inblock++;
915 	}
916 
917 }
918 
919 static int
920 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags)
921 {
922 	struct buf *bp;
923 	int error;
924 
925 	KASSERT(devvp->v_type == VBLK);
926 
927 	wapbl_doio_accounting(devvp, flags);
928 
929 	bp = getiobuf(devvp, true);
930 	bp->b_flags = flags;
931 	bp->b_cflags |= BC_BUSY;	/* mandatory, asserted by biowait() */
932 	bp->b_dev = devvp->v_rdev;
933 	bp->b_data = data;
934 	bp->b_bufsize = bp->b_resid = bp->b_bcount = len;
935 	bp->b_blkno = pbn;
936 	BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
937 
938 	WAPBL_PRINTF(WAPBL_PRINT_IO,
939 	    ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n",
940 		BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount,
941 		bp->b_blkno, bp->b_dev));
942 
943 	VOP_STRATEGY(devvp, bp);
944 
945 	error = biowait(bp);
946 	putiobuf(bp);
947 
948 	if (error) {
949 		WAPBL_PRINTF(WAPBL_PRINT_ERROR,
950 		    ("wapbl_doio: %s %zu bytes at block %" PRId64
951 			" on dev 0x%"PRIx64" failed with error %d\n",
952 			(((flags & (B_WRITE | B_READ)) == B_WRITE) ?
953 			    "write" : "read"),
954 			len, pbn, devvp->v_rdev, error));
955 	}
956 
957 	return error;
958 }
959 
960 /*
961  * wapbl_write(data, len, devvp, pbn)
962  *
963  *	Synchronously write len bytes from data to physical block pbn
964  *	on devvp.
965  */
966 int
967 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
968 {
969 
970 	return wapbl_doio(data, len, devvp, pbn, B_WRITE);
971 }
972 
973 /*
974  * wapbl_read(data, len, devvp, pbn)
975  *
976  *	Synchronously read len bytes into data from physical block pbn
977  *	on devvp.
978  */
979 int
980 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn)
981 {
982 
983 	return wapbl_doio(data, len, devvp, pbn, B_READ);
984 }
985 
986 /****************************************************************/
987 /*
988  * Buffered disk writes -- try to coalesce writes and emit
989  * MAXPHYS-aligned blocks.
990  */
991 
992 /*
993  * wapbl_buffered_write_async(wl, bp)
994  *
995  *	Send buffer for asynchronous write.
996  */
997 static void
998 wapbl_buffered_write_async(struct wapbl *wl, struct buf *bp)
999 {
1000 
1001 	wapbl_doio_accounting(wl->wl_devvp, bp->b_flags);
1002 
1003 	KASSERT(TAILQ_FIRST(&wl->wl_iobufs) == bp);
1004 	TAILQ_REMOVE(&wl->wl_iobufs, bp, b_wapbllist);
1005 
1006 	bp->b_flags |= B_WRITE;
1007 	bp->b_cflags |= BC_BUSY;	/* mandatory, asserted by biowait() */
1008 	bp->b_oflags = 0;
1009 	bp->b_bcount = bp->b_resid;
1010 	BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1011 
1012 	VOP_STRATEGY(wl->wl_devvp, bp);
1013 
1014 	wl->wl_ev_journalwrite.ev_count++;
1015 
1016 	TAILQ_INSERT_TAIL(&wl->wl_iobufs_busy, bp, b_wapbllist);
1017 }
1018 
1019 /*
1020  * wapbl_buffered_flush(wl)
1021  *
1022  *	Flush any buffered writes from wapbl_buffered_write.
1023  */
1024 static int
1025 wapbl_buffered_flush(struct wapbl *wl, bool full)
1026 {
1027 	int error = 0;
1028 	struct buf *bp, *bnext;
1029 	bool only_done = true, found = false;
1030 
1031 	/* if there is outstanding buffered write, send it now */
1032 	if ((bp = TAILQ_FIRST(&wl->wl_iobufs)) && bp->b_resid > 0)
1033 		wapbl_buffered_write_async(wl, bp);
1034 
1035 	/* wait for I/O to complete */
1036 again:
1037 	TAILQ_FOREACH_SAFE(bp, &wl->wl_iobufs_busy, b_wapbllist, bnext) {
1038 		if (!full && only_done) {
1039 			/* skip unfinished */
1040 			if (!ISSET(bp->b_oflags, BO_DONE))
1041 				continue;
1042 		}
1043 
1044 		if (ISSET(bp->b_oflags, BO_DONE))
1045 			wl->wl_ev_jbufs_bio_nowait.ev_count++;
1046 
1047 		TAILQ_REMOVE(&wl->wl_iobufs_busy, bp, b_wapbllist);
1048 		error = biowait(bp);
1049 
1050 		/* reset for reuse */
1051 		bp->b_blkno = bp->b_resid = bp->b_flags = 0;
1052 		TAILQ_INSERT_TAIL(&wl->wl_iobufs, bp, b_wapbllist);
1053 		found = true;
1054 
1055 		if (!full)
1056 			break;
1057 	}
1058 
1059 	if (!found && only_done && !TAILQ_EMPTY(&wl->wl_iobufs_busy)) {
1060 		only_done = false;
1061 		goto again;
1062 	}
1063 
1064 	return error;
1065 }
1066 
1067 /*
1068  * wapbl_buffered_write(data, len, wl, pbn)
1069  *
1070  *	Write len bytes from data to physical block pbn on
1071  *	wl->wl_devvp.  The write may not complete until
1072  *	wapbl_buffered_flush.
1073  */
1074 static int
1075 wapbl_buffered_write(void *data, size_t len, struct wapbl *wl, daddr_t pbn,
1076     int bflags)
1077 {
1078 	size_t resid;
1079 	struct buf *bp;
1080 
1081 again:
1082 	bp = TAILQ_FIRST(&wl->wl_iobufs);
1083 
1084 	if (bp == NULL) {
1085 		/* No more buffers, wait for any previous I/O to finish. */
1086 		wapbl_buffered_flush(wl, false);
1087 
1088 		bp = TAILQ_FIRST(&wl->wl_iobufs);
1089 		KASSERT(bp != NULL);
1090 	}
1091 
1092 	/*
1093 	 * If not adjacent to buffered data flush first.  Disk block
1094 	 * address is always valid for non-empty buffer.
1095 	 */
1096 	if ((bp->b_resid > 0 && pbn != bp->b_blkno + btodb(bp->b_resid))) {
1097 		wapbl_buffered_write_async(wl, bp);
1098 		goto again;
1099 	}
1100 
1101 	/*
1102 	 * If this write goes to an empty buffer we have to
1103 	 * save the disk block address first.
1104 	 */
1105 	if (bp->b_blkno == 0) {
1106 		bp->b_blkno = pbn;
1107 		bp->b_flags |= bflags;
1108 	}
1109 
1110 	/*
1111 	 * Remaining space so this buffer ends on a buffer size boundary.
1112 	 *
1113 	 * Cannot become less or equal zero as the buffer would have been
1114 	 * flushed on the last call then.
1115 	 */
1116 	resid = bp->b_bufsize - dbtob(bp->b_blkno % btodb(bp->b_bufsize)) -
1117 	    bp->b_resid;
1118 	KASSERT(resid > 0);
1119 	KASSERT(dbtob(btodb(resid)) == resid);
1120 
1121 	if (len < resid)
1122 		resid = len;
1123 
1124 	memcpy((uint8_t *)bp->b_data + bp->b_resid, data, resid);
1125 	bp->b_resid += resid;
1126 
1127 	if (len >= resid) {
1128 		/* Just filled the buf, or data did not fit */
1129 		wapbl_buffered_write_async(wl, bp);
1130 
1131 		data = (uint8_t *)data + resid;
1132 		len -= resid;
1133 		pbn += btodb(resid);
1134 
1135 		if (len > 0)
1136 			goto again;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  * wapbl_circ_write(wl, data, len, offp)
1144  *
1145  *	Write len bytes from data to the circular queue of wl, starting
1146  *	at linear byte offset *offp, and returning the new linear byte
1147  *	offset in *offp.
1148  *
1149  *	If the starting linear byte offset precedes wl->wl_circ_off,
1150  *	the write instead begins at wl->wl_circ_off.  XXX WTF?  This
1151  *	should be a KASSERT, not a conditional.
1152  *
1153  *	The write is buffered in wl and must be flushed with
1154  *	wapbl_buffered_flush before it will be submitted to the disk.
1155  */
1156 static int
1157 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp)
1158 {
1159 	size_t slen;
1160 	off_t off = *offp;
1161 	int error;
1162 	daddr_t pbn;
1163 
1164 	KDASSERT(((len >> wl->wl_log_dev_bshift) << wl->wl_log_dev_bshift) ==
1165 	    len);
1166 
1167 	if (off < wl->wl_circ_off)
1168 		off = wl->wl_circ_off;
1169 	slen = wl->wl_circ_off + wl->wl_circ_size - off;
1170 	if (slen < len) {
1171 		pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1172 #ifdef _KERNEL
1173 		pbn = btodb(pbn << wl->wl_log_dev_bshift);
1174 #endif
1175 		error = wapbl_buffered_write(data, slen, wl, pbn,
1176 		    WAPBL_JDATA_FLAGS(wl));
1177 		if (error)
1178 			return error;
1179 		data = (uint8_t *)data + slen;
1180 		len -= slen;
1181 		off = wl->wl_circ_off;
1182 	}
1183 	pbn = wl->wl_logpbn + (off >> wl->wl_log_dev_bshift);
1184 #ifdef _KERNEL
1185 	pbn = btodb(pbn << wl->wl_log_dev_bshift);
1186 #endif
1187 	error = wapbl_buffered_write(data, len, wl, pbn,
1188 	    WAPBL_JDATA_FLAGS(wl));
1189 	if (error)
1190 		return error;
1191 	off += len;
1192 	if (off >= wl->wl_circ_off + wl->wl_circ_size)
1193 		off = wl->wl_circ_off;
1194 	*offp = off;
1195 	return 0;
1196 }
1197 
1198 /****************************************************************/
1199 /*
1200  * WAPBL transactions: entering, adding/removing bufs, and exiting
1201  */
1202 
1203 int
1204 wapbl_begin(struct wapbl *wl, const char *file, int line)
1205 {
1206 	int doflush;
1207 	unsigned lockcount;
1208 
1209 	KDASSERT(wl);
1210 
1211 	/*
1212 	 * XXX this needs to be made much more sophisticated.
1213 	 * perhaps each wapbl_begin could reserve a specified
1214 	 * number of buffers and bytes.
1215 	 */
1216 	mutex_enter(&wl->wl_mtx);
1217 	lockcount = wl->wl_lock_count;
1218 	doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) >
1219 		wl->wl_bufbytes_max / 2) ||
1220 	    ((wl->wl_bufcount + (lockcount * 10)) >
1221 		wl->wl_bufcount_max / 2) ||
1222 	    (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) ||
1223 	    (wl->wl_dealloccnt >= (wl->wl_dealloclim / 2));
1224 	mutex_exit(&wl->wl_mtx);
1225 
1226 	if (doflush) {
1227 		WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1228 		    ("force flush lockcnt=%d bufbytes=%zu "
1229 			"(max=%zu) bufcount=%zu (max=%zu) "
1230 			"dealloccnt %d (lim=%d)\n",
1231 			lockcount, wl->wl_bufbytes,
1232 			wl->wl_bufbytes_max, wl->wl_bufcount,
1233 			wl->wl_bufcount_max,
1234 			wl->wl_dealloccnt, wl->wl_dealloclim));
1235 	}
1236 
1237 	if (doflush) {
1238 		int error = wapbl_flush(wl, 0);
1239 		if (error)
1240 			return error;
1241 	}
1242 
1243 	rw_enter(&wl->wl_rwlock, RW_READER);
1244 	mutex_enter(&wl->wl_mtx);
1245 	wl->wl_lock_count++;
1246 	mutex_exit(&wl->wl_mtx);
1247 
1248 #if defined(WAPBL_DEBUG_PRINT)
1249 	WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1250 	    ("wapbl_begin thread %d.%d with bufcount=%zu "
1251 		"bufbytes=%zu bcount=%zu at %s:%d\n",
1252 		curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1253 		wl->wl_bufbytes, wl->wl_bcount, file, line));
1254 #endif
1255 
1256 	return 0;
1257 }
1258 
1259 void
1260 wapbl_end(struct wapbl *wl)
1261 {
1262 
1263 #if defined(WAPBL_DEBUG_PRINT)
1264 	WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION,
1265 	    ("wapbl_end thread %d.%d with bufcount=%zu "
1266 		"bufbytes=%zu bcount=%zu\n",
1267 		curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1268 		wl->wl_bufbytes, wl->wl_bcount));
1269 #endif
1270 
1271 	/*
1272 	 * XXX this could be handled more gracefully, perhaps place
1273 	 * only a partial transaction in the log and allow the
1274 	 * remaining to flush without the protection of the journal.
1275 	 */
1276 	KASSERTMSG((wapbl_transaction_len(wl) <=
1277 		(wl->wl_circ_size - wl->wl_reserved_bytes)),
1278 	    "wapbl_end: current transaction too big to flush");
1279 
1280 	mutex_enter(&wl->wl_mtx);
1281 	KASSERT(wl->wl_lock_count > 0);
1282 	wl->wl_lock_count--;
1283 	mutex_exit(&wl->wl_mtx);
1284 
1285 	rw_exit(&wl->wl_rwlock);
1286 }
1287 
1288 void
1289 wapbl_add_buf(struct wapbl *wl, struct buf * bp)
1290 {
1291 
1292 	KASSERT(bp->b_cflags & BC_BUSY);
1293 	KASSERT(bp->b_vp);
1294 
1295 	wapbl_jlock_assert(wl);
1296 
1297 #if 0
1298 	/*
1299 	 * XXX this might be an issue for swapfiles.
1300 	 * see uvm_swap.c:1702
1301 	 *
1302 	 * XXX2 why require it then?  leap of semantics?
1303 	 */
1304 	KASSERT((bp->b_cflags & BC_NOCACHE) == 0);
1305 #endif
1306 
1307 	mutex_enter(&wl->wl_mtx);
1308 	if (bp->b_flags & B_LOCKED) {
1309 		TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1310 		WAPBL_PRINTF(WAPBL_PRINT_BUFFER2,
1311 		    ("wapbl_add_buf thread %d.%d re-adding buf %p "
1312 			"with %d bytes %d bcount\n",
1313 			curproc->p_pid, curlwp->l_lid, bp,
1314 			bp->b_bufsize, bp->b_bcount));
1315 	} else {
1316 		/* unlocked by dirty buffers shouldn't exist */
1317 		KASSERT(!(bp->b_oflags & BO_DELWRI));
1318 		wl->wl_bufbytes += bp->b_bufsize;
1319 		wl->wl_bcount += bp->b_bcount;
1320 		wl->wl_bufcount++;
1321 		WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1322 		    ("wapbl_add_buf thread %d.%d adding buf %p "
1323 			"with %d bytes %d bcount\n",
1324 			curproc->p_pid, curlwp->l_lid, bp,
1325 			bp->b_bufsize, bp->b_bcount));
1326 	}
1327 	TAILQ_INSERT_TAIL(&wl->wl_bufs, bp, b_wapbllist);
1328 	mutex_exit(&wl->wl_mtx);
1329 
1330 	bp->b_flags |= B_LOCKED;
1331 }
1332 
1333 static void
1334 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp)
1335 {
1336 
1337 	KASSERT(mutex_owned(&wl->wl_mtx));
1338 	KASSERT(bp->b_cflags & BC_BUSY);
1339 	wapbl_jlock_assert(wl);
1340 
1341 #if 0
1342 	/*
1343 	 * XXX this might be an issue for swapfiles.
1344 	 * see uvm_swap.c:1725
1345 	 *
1346 	 * XXXdeux: see above
1347 	 */
1348 	KASSERT((bp->b_flags & BC_NOCACHE) == 0);
1349 #endif
1350 	KASSERT(bp->b_flags & B_LOCKED);
1351 
1352 	WAPBL_PRINTF(WAPBL_PRINT_BUFFER,
1353 	    ("wapbl_remove_buf thread %d.%d removing buf %p with "
1354 		"%d bytes %d bcount\n",
1355 		curproc->p_pid, curlwp->l_lid, bp,
1356 		bp->b_bufsize, bp->b_bcount));
1357 
1358 	KASSERT(wl->wl_bufbytes >= bp->b_bufsize);
1359 	wl->wl_bufbytes -= bp->b_bufsize;
1360 	KASSERT(wl->wl_bcount >= bp->b_bcount);
1361 	wl->wl_bcount -= bp->b_bcount;
1362 	KASSERT(wl->wl_bufcount > 0);
1363 	wl->wl_bufcount--;
1364 	KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1365 	KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1366 	TAILQ_REMOVE(&wl->wl_bufs, bp, b_wapbllist);
1367 
1368 	bp->b_flags &= ~B_LOCKED;
1369 }
1370 
1371 /* called from brelsel() in vfs_bio among other places */
1372 void
1373 wapbl_remove_buf(struct wapbl * wl, struct buf *bp)
1374 {
1375 
1376 	mutex_enter(&wl->wl_mtx);
1377 	wapbl_remove_buf_locked(wl, bp);
1378 	mutex_exit(&wl->wl_mtx);
1379 }
1380 
1381 void
1382 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt)
1383 {
1384 
1385 	KASSERT(bp->b_cflags & BC_BUSY);
1386 
1387 	/*
1388 	 * XXX: why does this depend on B_LOCKED?  otherwise the buf
1389 	 * is not for a transaction?  if so, why is this called in the
1390 	 * first place?
1391 	 */
1392 	if (bp->b_flags & B_LOCKED) {
1393 		mutex_enter(&wl->wl_mtx);
1394 		wl->wl_bufbytes += bp->b_bufsize - oldsz;
1395 		wl->wl_bcount += bp->b_bcount - oldcnt;
1396 		mutex_exit(&wl->wl_mtx);
1397 	}
1398 }
1399 
1400 #endif /* _KERNEL */
1401 
1402 /****************************************************************/
1403 /* Some utility inlines */
1404 
1405 /*
1406  * wapbl_space_used(avail, head, tail)
1407  *
1408  *	Number of bytes used in a circular queue of avail total bytes,
1409  *	from tail to head.
1410  */
1411 static inline size_t
1412 wapbl_space_used(size_t avail, off_t head, off_t tail)
1413 {
1414 
1415 	if (tail == 0) {
1416 		KASSERT(head == 0);
1417 		return 0;
1418 	}
1419 	return ((head + (avail - 1) - tail) % avail) + 1;
1420 }
1421 
1422 #ifdef _KERNEL
1423 /*
1424  * wapbl_advance(size, off, oldoff, delta)
1425  *
1426  *	Given a byte offset oldoff into a circular queue of size bytes
1427  *	starting at off, return a new byte offset oldoff + delta into
1428  *	the circular queue.
1429  */
1430 static inline off_t
1431 wapbl_advance(size_t size, size_t off, off_t oldoff, size_t delta)
1432 {
1433 	off_t newoff;
1434 
1435 	/* Define acceptable ranges for inputs. */
1436 	KASSERT(delta <= (size_t)size);
1437 	KASSERT(oldoff == 0 || (size_t)oldoff >= off);
1438 	KASSERT(oldoff < (off_t)(size + off));
1439 
1440 	if (oldoff == 0 && delta != 0)
1441 		newoff = off + delta;
1442 	else if (oldoff + delta < size + off)
1443 		newoff = oldoff + delta;
1444 	else
1445 		newoff = (oldoff + delta) - size;
1446 
1447 	/* Note some interesting axioms */
1448 	KASSERT(delta != 0 || newoff == oldoff);
1449 	KASSERT(delta == 0 || newoff != 0);
1450 	KASSERT(delta != size || newoff == oldoff);
1451 
1452 	/* Define acceptable ranges for output. */
1453 	KASSERT(newoff == 0 || (size_t)newoff >= off);
1454 	KASSERT((size_t)newoff < size + off);
1455 	return newoff;
1456 }
1457 
1458 /*
1459  * wapbl_space_free(avail, head, tail)
1460  *
1461  *	Number of bytes free in a circular queue of avail total bytes,
1462  *	in which everything from tail to head is used.
1463  */
1464 static inline size_t
1465 wapbl_space_free(size_t avail, off_t head, off_t tail)
1466 {
1467 
1468 	return avail - wapbl_space_used(avail, head, tail);
1469 }
1470 
1471 /*
1472  * wapbl_advance_head(size, off, delta, headp, tailp)
1473  *
1474  *	In a circular queue of size bytes starting at off, given the
1475  *	old head and tail offsets *headp and *tailp, store the new head
1476  *	and tail offsets in *headp and *tailp resulting from adding
1477  *	delta bytes of data to the head.
1478  */
1479 static inline void
1480 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp,
1481     off_t *tailp)
1482 {
1483 	off_t head = *headp;
1484 	off_t tail = *tailp;
1485 
1486 	KASSERT(delta <= wapbl_space_free(size, head, tail));
1487 	head = wapbl_advance(size, off, head, delta);
1488 	if (tail == 0 && head != 0)
1489 		tail = off;
1490 	*headp = head;
1491 	*tailp = tail;
1492 }
1493 
1494 /*
1495  * wapbl_advance_tail(size, off, delta, headp, tailp)
1496  *
1497  *	In a circular queue of size bytes starting at off, given the
1498  *	old head and tail offsets *headp and *tailp, store the new head
1499  *	and tail offsets in *headp and *tailp resulting from removing
1500  *	delta bytes of data from the tail.
1501  */
1502 static inline void
1503 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp,
1504     off_t *tailp)
1505 {
1506 	off_t head = *headp;
1507 	off_t tail = *tailp;
1508 
1509 	KASSERT(delta <= wapbl_space_used(size, head, tail));
1510 	tail = wapbl_advance(size, off, tail, delta);
1511 	if (head == tail) {
1512 		head = tail = 0;
1513 	}
1514 	*headp = head;
1515 	*tailp = tail;
1516 }
1517 
1518 
1519 /****************************************************************/
1520 
1521 /*
1522  * wapbl_truncate(wl, minfree)
1523  *
1524  *	Wait until at least minfree bytes are available in the log.
1525  *
1526  *	If it was necessary to wait for writes to complete,
1527  *	advance the circular queue tail to reflect the new write
1528  *	completions and issue a write commit to the log.
1529  *
1530  *	=> Caller must hold wl->wl_rwlock writer lock.
1531  */
1532 static int
1533 wapbl_truncate(struct wapbl *wl, size_t minfree)
1534 {
1535 	size_t delta;
1536 	size_t avail;
1537 	off_t head;
1538 	off_t tail;
1539 	int error = 0;
1540 
1541 	KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes));
1542 	KASSERT(rw_write_held(&wl->wl_rwlock));
1543 
1544 	mutex_enter(&wl->wl_mtx);
1545 
1546 	/*
1547 	 * First check to see if we have to do a commit
1548 	 * at all.
1549 	 */
1550 	avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail);
1551 	if (minfree < avail) {
1552 		mutex_exit(&wl->wl_mtx);
1553 		return 0;
1554 	}
1555 	minfree -= avail;
1556 	while (wl->wl_error_count == 0 &&
1557 	    wl->wl_reclaimable_bytes < minfree) {
1558 		WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1559 		    ("wapbl_truncate: sleeping on %p"
1560 			" wl=%p bytes=%zd minfree=%zd\n",
1561 			&wl->wl_reclaimable_bytes,
1562 			wl, wl->wl_reclaimable_bytes, minfree));
1563 		cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx);
1564 	}
1565 	if (wl->wl_reclaimable_bytes < minfree) {
1566 		KASSERT(wl->wl_error_count);
1567 		/* XXX maybe get actual error from buffer instead someday? */
1568 		error = SET_ERROR(EIO);
1569 	}
1570 	head = wl->wl_head;
1571 	tail = wl->wl_tail;
1572 	delta = wl->wl_reclaimable_bytes;
1573 
1574 	/* If all of the entries are flushed, then be sure to keep
1575 	 * the reserved bytes reserved.  Watch out for discarded transactions,
1576 	 * which could leave more bytes reserved than are reclaimable.
1577 	 */
1578 	if (SIMPLEQ_EMPTY(&wl->wl_entries) && delta >= wl->wl_reserved_bytes) {
1579 		delta -= wl->wl_reserved_bytes;
1580 	}
1581 	wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head,
1582 	    &tail);
1583 	KDASSERT(wl->wl_reserved_bytes <=
1584 	    wapbl_space_used(wl->wl_circ_size, head, tail));
1585 	mutex_exit(&wl->wl_mtx);
1586 
1587 	if (error)
1588 		return error;
1589 
1590 	/*
1591 	 * This is where head, tail and delta are unprotected
1592 	 * from races against itself or flush.  This is ok since
1593 	 * we only call this routine from inside flush itself.
1594 	 *
1595 	 * XXX: how can it race against itself when accessed only
1596 	 * from behind the write-locked rwlock?
1597 	 */
1598 	error = wapbl_write_commit(wl, head, tail);
1599 	if (error)
1600 		return error;
1601 
1602 	wl->wl_head = head;
1603 	wl->wl_tail = tail;
1604 
1605 	mutex_enter(&wl->wl_mtx);
1606 	KASSERT(wl->wl_reclaimable_bytes >= delta);
1607 	wl->wl_reclaimable_bytes -= delta;
1608 	mutex_exit(&wl->wl_mtx);
1609 	WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE,
1610 	    ("wapbl_truncate thread %d.%d truncating %zu bytes\n",
1611 		curproc->p_pid, curlwp->l_lid, delta));
1612 
1613 	return 0;
1614 }
1615 
1616 /****************************************************************/
1617 
1618 void
1619 wapbl_biodone(struct buf *bp)
1620 {
1621 	struct wapbl_entry *we = bp->b_private;
1622 	struct wapbl *wl;
1623 #ifdef WAPBL_DEBUG_BUFBYTES
1624 	const int bufsize = bp->b_bufsize;
1625 #endif
1626 
1627 	mutex_enter(&bufcache_lock);
1628 	wl = we->we_wapbl;
1629 	mutex_exit(&bufcache_lock);
1630 
1631 	/*
1632 	 * Handle possible flushing of buffers after log has been
1633 	 * decomissioned.
1634 	 */
1635 	if (!wl) {
1636 		KASSERT(we->we_bufcount > 0);
1637 		we->we_bufcount--;
1638 #ifdef WAPBL_DEBUG_BUFBYTES
1639 		KASSERT(we->we_unsynced_bufbytes >= bufsize);
1640 		we->we_unsynced_bufbytes -= bufsize;
1641 #endif
1642 
1643 		if (we->we_bufcount == 0) {
1644 #ifdef WAPBL_DEBUG_BUFBYTES
1645 			KASSERT(we->we_unsynced_bufbytes == 0);
1646 #endif
1647 			pool_put(&wapbl_entry_pool, we);
1648 		}
1649 
1650 		brelse(bp, 0);
1651 		return;
1652 	}
1653 
1654 #ifdef ohbother
1655 	KDASSERT(bp->b_oflags & BO_DONE);
1656 	KDASSERT(!(bp->b_oflags & BO_DELWRI));
1657 	KDASSERT(bp->b_flags & B_ASYNC);
1658 	KDASSERT(bp->b_cflags & BC_BUSY);
1659 	KDASSERT(!(bp->b_flags & B_LOCKED));
1660 	KDASSERT(!(bp->b_flags & B_READ));
1661 	KDASSERT(!(bp->b_cflags & BC_INVAL));
1662 	KDASSERT(!(bp->b_cflags & BC_NOCACHE));
1663 #endif
1664 
1665 	if (bp->b_error) {
1666 		/*
1667 		 * If an error occurs, it would be nice to leave the buffer
1668 		 * as a delayed write on the LRU queue so that we can retry
1669 		 * it later. But buffercache(9) can't handle dirty buffer
1670 		 * reuse, so just mark the log permanently errored out.
1671 		 */
1672 		mutex_enter(&wl->wl_mtx);
1673 		if (wl->wl_error_count == 0) {
1674 			wl->wl_error_count++;
1675 			cv_broadcast(&wl->wl_reclaimable_cv);
1676 		}
1677 		mutex_exit(&wl->wl_mtx);
1678 	}
1679 
1680 	/*
1681 	 * Make sure that the buf doesn't retain the media flags, so that
1682 	 * e.g. wapbl_allow_fuadpo has immediate effect on any following I/O.
1683 	 * The flags will be set again if needed by another I/O.
1684 	 */
1685 	bp->b_flags &= ~B_MEDIA_FLAGS;
1686 
1687 	/*
1688 	 * Release the buffer here. wapbl_flush() may wait for the
1689 	 * log to become empty and we better unbusy the buffer before
1690 	 * wapbl_flush() returns.
1691 	 */
1692 	brelse(bp, 0);
1693 
1694 	mutex_enter(&wl->wl_mtx);
1695 
1696 	KASSERT(we->we_bufcount > 0);
1697 	we->we_bufcount--;
1698 #ifdef WAPBL_DEBUG_BUFBYTES
1699 	KASSERT(we->we_unsynced_bufbytes >= bufsize);
1700 	we->we_unsynced_bufbytes -= bufsize;
1701 	KASSERT(wl->wl_unsynced_bufbytes >= bufsize);
1702 	wl->wl_unsynced_bufbytes -= bufsize;
1703 #endif
1704 	wl->wl_ev_metawrite.ev_count++;
1705 
1706 	/*
1707 	 * If the current transaction can be reclaimed, start
1708 	 * at the beginning and reclaim any consecutive reclaimable
1709 	 * transactions.  If we successfully reclaim anything,
1710 	 * then wakeup anyone waiting for the reclaim.
1711 	 */
1712 	if (we->we_bufcount == 0) {
1713 		size_t delta = 0;
1714 		int errcnt = 0;
1715 #ifdef WAPBL_DEBUG_BUFBYTES
1716 		KDASSERT(we->we_unsynced_bufbytes == 0);
1717 #endif
1718 		/*
1719 		 * clear any posted error, since the buffer it came from
1720 		 * has successfully flushed by now
1721 		 */
1722 		while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) &&
1723 		    we->we_bufcount == 0) {
1724 			delta += we->we_reclaimable_bytes;
1725 			if (we->we_error)
1726 				errcnt++;
1727 			SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries);
1728 			pool_put(&wapbl_entry_pool, we);
1729 		}
1730 
1731 		if (delta) {
1732 			wl->wl_reclaimable_bytes += delta;
1733 			KASSERT(wl->wl_error_count >= errcnt);
1734 			wl->wl_error_count -= errcnt;
1735 			cv_broadcast(&wl->wl_reclaimable_cv);
1736 		}
1737 	}
1738 
1739 	mutex_exit(&wl->wl_mtx);
1740 }
1741 
1742 /*
1743  * wapbl_flush(wl, wait)
1744  *
1745  *	Flush pending block writes, deallocations, and inodes from
1746  *	the current transaction in memory to the log on disk:
1747  *
1748  *	1. Call the file system's wl_flush callback to flush any
1749  *	   per-file-system pending updates.
1750  *	2. Wait for enough space in the log for the current transaction.
1751  *	3. Synchronously write the new log records, advancing the
1752  *	   circular queue head.
1753  *	4. Issue the pending block writes asynchronously, now that they
1754  *	   are recorded in the log and can be replayed after crash.
1755  *	5. If wait is true, wait for all writes to complete and for the
1756  *	   log to become empty.
1757  *
1758  *	On failure, call the file system's wl_flush_abort callback.
1759  */
1760 int
1761 wapbl_flush(struct wapbl *wl, int waitfor)
1762 {
1763 	struct buf *bp;
1764 	struct wapbl_entry *we;
1765 	off_t off;
1766 	off_t head;
1767 	off_t tail;
1768 	size_t delta = 0;
1769 	size_t flushsize;
1770 	size_t reserved;
1771 	int error = 0;
1772 
1773 	/*
1774 	 * Do a quick check to see if a full flush can be skipped
1775 	 * This assumes that the flush callback does not need to be called
1776 	 * unless there are other outstanding bufs.
1777 	 */
1778 	if (!waitfor) {
1779 		size_t nbufs;
1780 		mutex_enter(&wl->wl_mtx);	/* XXX need mutex here to
1781 						   protect the KASSERTS */
1782 		nbufs = wl->wl_bufcount;
1783 		KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0));
1784 		KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0));
1785 		mutex_exit(&wl->wl_mtx);
1786 		if (nbufs == 0)
1787 			return 0;
1788 	}
1789 
1790 	/*
1791 	 * XXX we may consider using LK_UPGRADE here
1792 	 * if we want to call flush from inside a transaction
1793 	 */
1794 	rw_enter(&wl->wl_rwlock, RW_WRITER);
1795 	wl->wl_flush(wl->wl_mount, TAILQ_FIRST(&wl->wl_dealloclist));
1796 
1797 	/*
1798 	 * Now that we are exclusively locked and the file system has
1799 	 * issued any deferred block writes for this transaction, check
1800 	 * whether there are any blocks to write to the log.  If not,
1801 	 * skip waiting for space or writing any log entries.
1802 	 *
1803 	 * XXX Shouldn't this also check wl_dealloccnt and
1804 	 * wl_inohashcnt?  Perhaps wl_dealloccnt doesn't matter if the
1805 	 * file system didn't produce any blocks as a consequence of
1806 	 * it, but the same does not seem to be so of wl_inohashcnt.
1807 	 */
1808 	if (wl->wl_bufcount == 0) {
1809 		goto wait_out;
1810 	}
1811 
1812 #if 0
1813 	WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1814 	    ("wapbl_flush thread %d.%d flushing entries with "
1815 		"bufcount=%zu bufbytes=%zu\n",
1816 		curproc->p_pid, curlwp->l_lid, wl->wl_bufcount,
1817 		wl->wl_bufbytes));
1818 #endif
1819 
1820 	/* Calculate amount of space needed to flush */
1821 	flushsize = wapbl_transaction_len(wl);
1822 	if (wapbl_verbose_commit) {
1823 		struct timespec ts;
1824 		getnanotime(&ts);
1825 		printf("%s: %lld.%09ld this transaction = %zu bytes\n",
1826 		    __func__, (long long)ts.tv_sec,
1827 		    (long)ts.tv_nsec, flushsize);
1828 	}
1829 
1830 	if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) {
1831 		/*
1832 		 * XXX this could be handled more gracefully, perhaps place
1833 		 * only a partial transaction in the log and allow the
1834 		 * remaining to flush without the protection of the journal.
1835 		 */
1836 		panic("wapbl_flush: current transaction too big to flush");
1837 	}
1838 
1839 	error = wapbl_truncate(wl, flushsize);
1840 	if (error)
1841 		goto out;
1842 
1843 	off = wl->wl_head;
1844 	KASSERT(off == 0 || off >= wl->wl_circ_off);
1845 	KASSERT(off == 0 || off < wl->wl_circ_off + wl->wl_circ_size);
1846 	error = wapbl_write_blocks(wl, &off);
1847 	if (error)
1848 		goto out;
1849 	error = wapbl_write_revocations(wl, &off);
1850 	if (error)
1851 		goto out;
1852 	error = wapbl_write_inodes(wl, &off);
1853 	if (error)
1854 		goto out;
1855 
1856 	reserved = 0;
1857 	if (wl->wl_inohashcnt)
1858 		reserved = wapbl_transaction_inodes_len(wl);
1859 
1860 	head = wl->wl_head;
1861 	tail = wl->wl_tail;
1862 
1863 	wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize,
1864 	    &head, &tail);
1865 
1866 	KASSERTMSG(head == off,
1867 	    "lost head! head=%"PRIdMAX" tail=%" PRIdMAX
1868 	    " off=%"PRIdMAX" flush=%zu",
1869 	    (intmax_t)head, (intmax_t)tail, (intmax_t)off,
1870 	    flushsize);
1871 
1872 	/* Opportunistically move the tail forward if we can */
1873 	mutex_enter(&wl->wl_mtx);
1874 	delta = wl->wl_reclaimable_bytes;
1875 	mutex_exit(&wl->wl_mtx);
1876 	wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta,
1877 	    &head, &tail);
1878 
1879 	error = wapbl_write_commit(wl, head, tail);
1880 	if (error)
1881 		goto out;
1882 
1883 	we = pool_get(&wapbl_entry_pool, PR_WAITOK);
1884 
1885 #ifdef WAPBL_DEBUG_BUFBYTES
1886 	WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1887 	    ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1888 		" unsynced=%zu"
1889 		"\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1890 		"inodes=%d\n",
1891 		curproc->p_pid, curlwp->l_lid, flushsize, delta,
1892 		wapbl_space_used(wl->wl_circ_size, head, tail),
1893 		wl->wl_unsynced_bufbytes, wl->wl_bufcount,
1894 		wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt,
1895 		wl->wl_inohashcnt));
1896 #else
1897 	WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1898 	    ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu"
1899 		"\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d "
1900 		"inodes=%d\n",
1901 		curproc->p_pid, curlwp->l_lid, flushsize, delta,
1902 		wapbl_space_used(wl->wl_circ_size, head, tail),
1903 		wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
1904 		wl->wl_dealloccnt, wl->wl_inohashcnt));
1905 #endif
1906 
1907 
1908 	mutex_enter(&bufcache_lock);
1909 	mutex_enter(&wl->wl_mtx);
1910 
1911 	wl->wl_reserved_bytes = reserved;
1912 	wl->wl_head = head;
1913 	wl->wl_tail = tail;
1914 	KASSERT(wl->wl_reclaimable_bytes >= delta);
1915 	wl->wl_reclaimable_bytes -= delta;
1916 	KDASSERT(wl->wl_dealloccnt == 0);
1917 #ifdef WAPBL_DEBUG_BUFBYTES
1918 	wl->wl_unsynced_bufbytes += wl->wl_bufbytes;
1919 #endif
1920 
1921 	we->we_wapbl = wl;
1922 	we->we_bufcount = wl->wl_bufcount;
1923 #ifdef WAPBL_DEBUG_BUFBYTES
1924 	we->we_unsynced_bufbytes = wl->wl_bufbytes;
1925 #endif
1926 	we->we_reclaimable_bytes = flushsize;
1927 	we->we_error = 0;
1928 	SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries);
1929 
1930 	/*
1931 	 * This flushes bufs in order than they were queued, so the LRU
1932 	 * order is preserved.
1933 	 */
1934 	while ((bp = TAILQ_FIRST(&wl->wl_bufs)) != NULL) {
1935 		if (bbusy(bp, 0, 0, &wl->wl_mtx)) {
1936 			continue;
1937 		}
1938 		bp->b_iodone = wapbl_biodone;
1939 		bp->b_private = we;
1940 
1941 		bremfree(bp);
1942 		wapbl_remove_buf_locked(wl, bp);
1943 		mutex_exit(&wl->wl_mtx);
1944 		mutex_exit(&bufcache_lock);
1945 		bawrite(bp);
1946 		mutex_enter(&bufcache_lock);
1947 		mutex_enter(&wl->wl_mtx);
1948 	}
1949 	mutex_exit(&wl->wl_mtx);
1950 	mutex_exit(&bufcache_lock);
1951 
1952 #if 0
1953 	WAPBL_PRINTF(WAPBL_PRINT_FLUSH,
1954 	    ("wapbl_flush thread %d.%d done flushing entries...\n",
1955 		curproc->p_pid, curlwp->l_lid));
1956 #endif
1957 
1958 wait_out:
1959 
1960 	/*
1961 	 * If the waitfor flag is set, don't return until everything is
1962 	 * fully flushed and the on disk log is empty.
1963 	 */
1964 	if (waitfor) {
1965 		error = wapbl_truncate(wl, wl->wl_circ_size -
1966 		    wl->wl_reserved_bytes);
1967 	}
1968 
1969 out:
1970 	if (error) {
1971 		wl->wl_flush_abort(wl->wl_mount,
1972 		    TAILQ_FIRST(&wl->wl_dealloclist));
1973 	}
1974 
1975 #ifdef WAPBL_DEBUG_PRINT
1976 	if (error) {
1977 		pid_t pid = -1;
1978 		lwpid_t lid = -1;
1979 		if (curproc)
1980 			pid = curproc->p_pid;
1981 		if (curlwp)
1982 			lid = curlwp->l_lid;
1983 		mutex_enter(&wl->wl_mtx);
1984 #ifdef WAPBL_DEBUG_BUFBYTES
1985 		WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1986 		    ("wapbl_flush: thread %d.%d aborted flush: "
1987 			"error = %d\n"
1988 			"\tbufcount=%zu bufbytes=%zu bcount=%zu "
1989 			"deallocs=%d inodes=%d\n"
1990 			"\terrcnt = %d, reclaimable=%zu reserved=%zu "
1991 			"unsynced=%zu\n",
1992 			pid, lid, error, wl->wl_bufcount,
1993 			wl->wl_bufbytes, wl->wl_bcount,
1994 			wl->wl_dealloccnt, wl->wl_inohashcnt,
1995 			wl->wl_error_count, wl->wl_reclaimable_bytes,
1996 			wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes));
1997 		SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
1998 			WAPBL_PRINTF(WAPBL_PRINT_ERROR,
1999 			    ("\tentry: bufcount = %zu, reclaimable = %zu, "
2000 				"error = %d, unsynced = %zu\n",
2001 				we->we_bufcount, we->we_reclaimable_bytes,
2002 				we->we_error, we->we_unsynced_bufbytes));
2003 		}
2004 #else
2005 		WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2006 		    ("wapbl_flush: thread %d.%d aborted flush: "
2007 			"error = %d\n"
2008 			"\tbufcount=%zu bufbytes=%zu bcount=%zu "
2009 			"deallocs=%d inodes=%d\n"
2010 			"\terrcnt = %d, reclaimable=%zu reserved=%zu\n",
2011 			pid, lid, error, wl->wl_bufcount,
2012 			wl->wl_bufbytes, wl->wl_bcount,
2013 			wl->wl_dealloccnt, wl->wl_inohashcnt,
2014 			wl->wl_error_count, wl->wl_reclaimable_bytes,
2015 			wl->wl_reserved_bytes));
2016 		SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2017 			WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2018 			    ("\tentry: bufcount = %zu, reclaimable = %zu, "
2019 				"error = %d\n", we->we_bufcount,
2020 				we->we_reclaimable_bytes, we->we_error));
2021 		}
2022 #endif
2023 		mutex_exit(&wl->wl_mtx);
2024 	}
2025 #endif
2026 
2027 	rw_exit(&wl->wl_rwlock);
2028 	return error;
2029 }
2030 
2031 /****************************************************************/
2032 
2033 void
2034 wapbl_jlock_assert(struct wapbl *wl)
2035 {
2036 
2037 	KASSERT(rw_lock_held(&wl->wl_rwlock));
2038 }
2039 
2040 void
2041 wapbl_junlock_assert(struct wapbl *wl)
2042 {
2043 
2044 	KASSERT(!rw_write_held(&wl->wl_rwlock));
2045 }
2046 
2047 /****************************************************************/
2048 
2049 /* locks missing */
2050 void
2051 wapbl_print(struct wapbl *wl, int full, void (*pr)(const char *, ...))
2052 {
2053 	struct buf *bp;
2054 	struct wapbl_entry *we;
2055 	(*pr)("wapbl %p", wl);
2056 	(*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n",
2057 	    wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn);
2058 	(*pr)("circ = %zu, header = %zu,"
2059 	    " head = %"PRIdMAX" tail = %"PRIdMAX"\n",
2060 	    wl->wl_circ_size, wl->wl_circ_off,
2061 	    (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail);
2062 	(*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n",
2063 	    wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift);
2064 #ifdef WAPBL_DEBUG_BUFBYTES
2065 	(*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2066 	    "reserved = %zu errcnt = %d unsynced = %zu\n",
2067 	    wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount,
2068 	    wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2069 	    wl->wl_error_count, wl->wl_unsynced_bufbytes);
2070 #else
2071 	(*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu "
2072 	    "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes,
2073 	    wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes,
2074 	    wl->wl_error_count);
2075 #endif
2076 	(*pr)("\tdealloccnt = %d, dealloclim = %d\n",
2077 	    wl->wl_dealloccnt, wl->wl_dealloclim);
2078 	(*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n",
2079 	    wl->wl_inohashcnt, wl->wl_inohashmask);
2080 	(*pr)("entries:\n");
2081 	SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) {
2082 #ifdef WAPBL_DEBUG_BUFBYTES
2083 		(*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, "
2084 		    "unsynced = %zu\n",
2085 		    we->we_bufcount, we->we_reclaimable_bytes,
2086 		    we->we_error, we->we_unsynced_bufbytes);
2087 #else
2088 		(*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n",
2089 		    we->we_bufcount, we->we_reclaimable_bytes, we->we_error);
2090 #endif
2091 	}
2092 	if (full) {
2093 		int cnt = 0;
2094 		(*pr)("bufs =");
2095 		TAILQ_FOREACH(bp, &wl->wl_bufs, b_wapbllist) {
2096 			if (!TAILQ_NEXT(bp, b_wapbllist)) {
2097 				(*pr)(" %p", bp);
2098 			} else if ((++cnt % 6) == 0) {
2099 				(*pr)(" %p,\n\t", bp);
2100 			} else {
2101 				(*pr)(" %p,", bp);
2102 			}
2103 		}
2104 		(*pr)("\n");
2105 
2106 		(*pr)("dealloced blks = ");
2107 		{
2108 			struct wapbl_dealloc *wd;
2109 			cnt = 0;
2110 			TAILQ_FOREACH(wd, &wl->wl_dealloclist, wd_entries) {
2111 				(*pr)(" %"PRId64":%d,",
2112 				    wd->wd_blkno,
2113 				    wd->wd_len);
2114 				if ((++cnt % 4) == 0) {
2115 					(*pr)("\n\t");
2116 				}
2117 			}
2118 		}
2119 		(*pr)("\n");
2120 
2121 		(*pr)("registered inodes = ");
2122 		{
2123 			int i;
2124 			cnt = 0;
2125 			for (i = 0; i <= wl->wl_inohashmask; i++) {
2126 				struct wapbl_ino_head *wih;
2127 				struct wapbl_ino *wi;
2128 
2129 				wih = &wl->wl_inohash[i];
2130 				LIST_FOREACH(wi, wih, wi_hash) {
2131 					if (wi->wi_ino == 0)
2132 						continue;
2133 					(*pr)(" %"PRIu64"/0%06"PRIo32",",
2134 					    wi->wi_ino, wi->wi_mode);
2135 					if ((++cnt % 4) == 0) {
2136 						(*pr)("\n\t");
2137 					}
2138 				}
2139 			}
2140 			(*pr)("\n");
2141 		}
2142 
2143 		(*pr)("iobufs free =");
2144 		TAILQ_FOREACH(bp, &wl->wl_iobufs, b_wapbllist) {
2145 			if (!TAILQ_NEXT(bp, b_wapbllist)) {
2146 				(*pr)(" %p", bp);
2147 			} else if ((++cnt % 6) == 0) {
2148 				(*pr)(" %p,\n\t", bp);
2149 			} else {
2150 				(*pr)(" %p,", bp);
2151 			}
2152 		}
2153 		(*pr)("\n");
2154 
2155 		(*pr)("iobufs busy =");
2156 		TAILQ_FOREACH(bp, &wl->wl_iobufs_busy, b_wapbllist) {
2157 			if (!TAILQ_NEXT(bp, b_wapbllist)) {
2158 				(*pr)(" %p", bp);
2159 			} else if ((++cnt % 6) == 0) {
2160 				(*pr)(" %p,\n\t", bp);
2161 			} else {
2162 				(*pr)(" %p,", bp);
2163 			}
2164 		}
2165 		(*pr)("\n");
2166 	}
2167 }
2168 
2169 #if defined(WAPBL_DEBUG) || defined(DDB)
2170 void
2171 wapbl_dump(struct wapbl *wl)
2172 {
2173 #if defined(WAPBL_DEBUG)
2174 	if (!wl)
2175 		wl = wapbl_debug_wl;
2176 #endif
2177 	if (!wl)
2178 		return;
2179 	wapbl_print(wl, 1, printf);
2180 }
2181 #endif
2182 
2183 /****************************************************************/
2184 
2185 int
2186 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len, bool force,
2187     void **cookiep)
2188 {
2189 	struct wapbl_dealloc *wd;
2190 	int error = 0;
2191 
2192 	wapbl_jlock_assert(wl);
2193 
2194 	mutex_enter(&wl->wl_mtx);
2195 
2196 	if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim)) {
2197 		if (!force) {
2198 			error = SET_ERROR(EAGAIN);
2199 			goto out;
2200 		}
2201 
2202 		/*
2203 		 * Forced registration can only be used when:
2204 		 * 1) the caller can't cope with failure
2205 		 * 2) the path can be triggered only bounded, small
2206 		 *    times per transaction
2207 		 * If this is not fullfilled, and the path would be triggered
2208 		 * many times, this could overflow maximum transaction size
2209 		 * and panic later.
2210 		 */
2211 		printf("%s: forced dealloc registration over limit:"
2212 		    " %d >= %d\n",
2213 		    wl->wl_mount->mnt_stat.f_mntonname,
2214 		    wl->wl_dealloccnt, wl->wl_dealloclim);
2215 	}
2216 
2217 	wl->wl_dealloccnt++;
2218 	mutex_exit(&wl->wl_mtx);
2219 
2220 	wd = pool_get(&wapbl_dealloc_pool, PR_WAITOK);
2221 	wd->wd_blkno = blk;
2222 	wd->wd_len = len;
2223 
2224 	mutex_enter(&wl->wl_mtx);
2225 	TAILQ_INSERT_TAIL(&wl->wl_dealloclist, wd, wd_entries);
2226 
2227 	if (cookiep)
2228 		*cookiep = wd;
2229 
2230 out:
2231 	mutex_exit(&wl->wl_mtx);
2232 
2233 	WAPBL_PRINTF(WAPBL_PRINT_ALLOC,
2234 	    ("wapbl_register_deallocation: blk=%"PRId64" len=%d error=%d\n",
2235 		blk, len, error));
2236 
2237 	return error;
2238 }
2239 
2240 static void
2241 wapbl_deallocation_free(struct wapbl *wl, struct wapbl_dealloc *wd,
2242 	bool locked)
2243 {
2244 
2245 	KASSERT(!locked
2246 	    || rw_lock_held(&wl->wl_rwlock) || mutex_owned(&wl->wl_mtx));
2247 
2248 	if (!locked)
2249 		mutex_enter(&wl->wl_mtx);
2250 
2251 	TAILQ_REMOVE(&wl->wl_dealloclist, wd, wd_entries);
2252 	wl->wl_dealloccnt--;
2253 
2254 	if (!locked)
2255 		mutex_exit(&wl->wl_mtx);
2256 
2257 	pool_put(&wapbl_dealloc_pool, wd);
2258 }
2259 
2260 void
2261 wapbl_unregister_deallocation(struct wapbl *wl, void *cookie)
2262 {
2263 
2264 	KASSERT(cookie != NULL);
2265 	wapbl_deallocation_free(wl, cookie, false);
2266 }
2267 
2268 /****************************************************************/
2269 
2270 static void
2271 wapbl_inodetrk_init(struct wapbl *wl, u_int size)
2272 {
2273 
2274 	wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask);
2275 	if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) {
2276 		pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0,
2277 		    "wapblinopl", &pool_allocator_nointr, IPL_NONE);
2278 	}
2279 }
2280 
2281 static void
2282 wapbl_inodetrk_free(struct wapbl *wl)
2283 {
2284 
2285 	/* XXX this KASSERT needs locking/mutex analysis */
2286 	KASSERT(wl->wl_inohashcnt == 0);
2287 	hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask);
2288 	membar_release();
2289 	if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) {
2290 		membar_acquire();
2291 		pool_destroy(&wapbl_ino_pool);
2292 	}
2293 }
2294 
2295 static struct wapbl_ino *
2296 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino)
2297 {
2298 	struct wapbl_ino_head *wih;
2299 	struct wapbl_ino *wi;
2300 
2301 	KASSERT(mutex_owned(&wl->wl_mtx));
2302 
2303 	wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2304 	LIST_FOREACH(wi, wih, wi_hash) {
2305 		if (ino == wi->wi_ino)
2306 			return wi;
2307 	}
2308 	return 0;
2309 }
2310 
2311 void
2312 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2313 {
2314 	struct wapbl_ino_head *wih;
2315 	struct wapbl_ino *wi;
2316 
2317 	wi = pool_get(&wapbl_ino_pool, PR_WAITOK);
2318 
2319 	mutex_enter(&wl->wl_mtx);
2320 	if (wapbl_inodetrk_get(wl, ino) == NULL) {
2321 		wi->wi_ino = ino;
2322 		wi->wi_mode = mode;
2323 		wih = &wl->wl_inohash[ino & wl->wl_inohashmask];
2324 		LIST_INSERT_HEAD(wih, wi, wi_hash);
2325 		wl->wl_inohashcnt++;
2326 		WAPBL_PRINTF(WAPBL_PRINT_INODE,
2327 		    ("wapbl_register_inode: ino=%"PRId64"\n", ino));
2328 		mutex_exit(&wl->wl_mtx);
2329 	} else {
2330 		mutex_exit(&wl->wl_mtx);
2331 		pool_put(&wapbl_ino_pool, wi);
2332 	}
2333 }
2334 
2335 void
2336 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode)
2337 {
2338 	struct wapbl_ino *wi;
2339 
2340 	mutex_enter(&wl->wl_mtx);
2341 	wi = wapbl_inodetrk_get(wl, ino);
2342 	if (wi) {
2343 		WAPBL_PRINTF(WAPBL_PRINT_INODE,
2344 		    ("wapbl_unregister_inode: ino=%"PRId64"\n", ino));
2345 		KASSERT(wl->wl_inohashcnt > 0);
2346 		wl->wl_inohashcnt--;
2347 		LIST_REMOVE(wi, wi_hash);
2348 		mutex_exit(&wl->wl_mtx);
2349 
2350 		pool_put(&wapbl_ino_pool, wi);
2351 	} else {
2352 		mutex_exit(&wl->wl_mtx);
2353 	}
2354 }
2355 
2356 /****************************************************************/
2357 
2358 /*
2359  * wapbl_transaction_inodes_len(wl)
2360  *
2361  *	Calculate the number of bytes required for inode registration
2362  *	log records in wl.
2363  */
2364 static inline size_t
2365 wapbl_transaction_inodes_len(struct wapbl *wl)
2366 {
2367 	int blocklen = 1<<wl->wl_log_dev_bshift;
2368 	int iph;
2369 
2370 	/* Calculate number of inodes described in a inodelist header */
2371 	iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2372 	    sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2373 
2374 	KASSERT(iph > 0);
2375 
2376 	return MAX(1, howmany(wl->wl_inohashcnt, iph)) * blocklen;
2377 }
2378 
2379 
2380 /*
2381  * wapbl_transaction_len(wl)
2382  *
2383  *	Calculate number of bytes required for all log records in wl.
2384  */
2385 static size_t
2386 wapbl_transaction_len(struct wapbl *wl)
2387 {
2388 	int blocklen = 1<<wl->wl_log_dev_bshift;
2389 	size_t len;
2390 
2391 	/* Calculate number of blocks described in a blocklist header */
2392 	len = wl->wl_bcount;
2393 	len += howmany(wl->wl_bufcount, wl->wl_brperjblock) * blocklen;
2394 	len += howmany(wl->wl_dealloccnt, wl->wl_brperjblock) * blocklen;
2395 	len += wapbl_transaction_inodes_len(wl);
2396 
2397 	return len;
2398 }
2399 
2400 /*
2401  * wapbl_cache_sync(wl, msg)
2402  *
2403  *	Issue DIOCCACHESYNC to wl->wl_devvp.
2404  *
2405  *	If sysctl(vfs.wapbl.verbose_commit) >= 2, print a message
2406  *	including msg about the duration of the cache sync.
2407  */
2408 static int
2409 wapbl_cache_sync(struct wapbl *wl, const char *msg)
2410 {
2411 	const bool verbose = wapbl_verbose_commit >= 2;
2412 	struct bintime start_time;
2413 	int force = 1;
2414 	int error;
2415 
2416 	/* Skip full cache sync if disabled */
2417 	if (!wapbl_flush_disk_cache) {
2418 		return 0;
2419 	}
2420 	if (verbose) {
2421 		bintime(&start_time);
2422 	}
2423 	error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force,
2424 	    FWRITE, FSCRED);
2425 	if (error) {
2426 		WAPBL_PRINTF(WAPBL_PRINT_ERROR,
2427 		    ("wapbl_cache_sync: DIOCCACHESYNC on dev 0x%jx "
2428 			"returned %d\n", (uintmax_t)wl->wl_devvp->v_rdev,
2429 			error));
2430 	}
2431 	if (verbose) {
2432 		struct bintime d;
2433 		struct timespec ts;
2434 
2435 		bintime(&d);
2436 		bintime_sub(&d, &start_time);
2437 		bintime2timespec(&d, &ts);
2438 		printf("wapbl_cache_sync: %s: dev 0x%jx %ju.%09lu\n",
2439 		    msg, (uintmax_t)wl->wl_devvp->v_rdev,
2440 		    (uintmax_t)ts.tv_sec, ts.tv_nsec);
2441 	}
2442 
2443 	wl->wl_ev_cacheflush.ev_count++;
2444 
2445 	return error;
2446 }
2447 
2448 /*
2449  * wapbl_write_commit(wl, head, tail)
2450  *
2451  *	Issue a disk cache sync to wait for all pending writes to the
2452  *	log to complete, and then synchronously commit the current
2453  *	circular queue head and tail to the log, in the next of two
2454  *	locations for commit headers on disk.
2455  *
2456  *	Increment the generation number.  If the generation number
2457  *	rolls over to zero, then a subsequent commit would appear to
2458  *	have an older generation than this one -- in that case, issue a
2459  *	duplicate commit to avoid this.
2460  *
2461  *	=> Caller must have exclusive access to wl, either by holding
2462  *	wl->wl_rwlock for writer or by being wapbl_start before anyone
2463  *	else has seen wl.
2464  */
2465 static int
2466 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail)
2467 {
2468 	struct wapbl_wc_header *wc = wl->wl_wc_header;
2469 	struct timespec ts;
2470 	int error;
2471 	daddr_t pbn;
2472 
2473 	error = wapbl_buffered_flush(wl, true);
2474 	if (error)
2475 		return error;
2476 	/*
2477 	 * Flush disk cache to ensure that blocks we've written are actually
2478 	 * written to the stable storage before the commit header.
2479 	 * This flushes to disk not only journal blocks, but also all
2480 	 * metadata blocks, written asynchronously since previous commit.
2481 	 *
2482 	 * XXX Calc checksum here, instead we do this for now
2483 	 */
2484 	wapbl_cache_sync(wl, "1");
2485 
2486 	wc->wc_head = head;
2487 	wc->wc_tail = tail;
2488 	wc->wc_checksum = 0;
2489 	wc->wc_version = 1;
2490 	getnanotime(&ts);
2491 	wc->wc_time = ts.tv_sec;
2492 	wc->wc_timensec = ts.tv_nsec;
2493 
2494 	WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2495 	    ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n",
2496 		(intmax_t)head, (intmax_t)tail));
2497 
2498 	/*
2499 	 * write the commit header.
2500 	 *
2501 	 * XXX if generation will rollover, then first zero
2502 	 * over second commit header before trying to write both headers.
2503 	 */
2504 
2505 	pbn = wl->wl_logpbn + (wc->wc_generation % 2);
2506 #ifdef _KERNEL
2507 	pbn = btodb(pbn << wc->wc_log_dev_bshift);
2508 #endif
2509 	error = wapbl_buffered_write(wc, wc->wc_len, wl, pbn,
2510 	    WAPBL_JFLAGS(wl));
2511 	if (error)
2512 		return error;
2513 	error = wapbl_buffered_flush(wl, true);
2514 	if (error)
2515 		return error;
2516 
2517 	/*
2518 	 * Flush disk cache to ensure that the commit header is actually
2519 	 * written before meta data blocks. Commit block is written using
2520 	 * FUA when enabled, in that case this flush is not needed.
2521 	 */
2522 	if (!WAPBL_USE_FUA(wl))
2523 		wapbl_cache_sync(wl, "2");
2524 
2525 	/*
2526 	 * If the generation number was zero, write it out a second time.
2527 	 * This handles initialization and generation number rollover
2528 	 */
2529 	if (wc->wc_generation++ == 0) {
2530 		error = wapbl_write_commit(wl, head, tail);
2531 		/*
2532 		 * This panic should be able to be removed if we do the
2533 		 * zero'ing mentioned above, and we are certain to roll
2534 		 * back generation number on failure.
2535 		 */
2536 		if (error) {
2537 			panic("wapbl_write_commit: error writing duplicate "
2538 			    "log header: %d", error);
2539 		}
2540 	}
2541 
2542 	wl->wl_ev_commit.ev_count++;
2543 
2544 	return 0;
2545 }
2546 
2547 /*
2548  * wapbl_write_blocks(wl, offp)
2549  *
2550  *	Write all pending physical blocks in the current transaction
2551  *	from wapbl_add_buf to the log on disk, adding to the circular
2552  *	queue head at byte offset *offp, and returning the new head's
2553  *	byte offset in *offp.
2554  */
2555 static int
2556 wapbl_write_blocks(struct wapbl *wl, off_t *offp)
2557 {
2558 	struct wapbl_wc_blocklist *wc =
2559 	    (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2560 	int blocklen = 1<<wl->wl_log_dev_bshift;
2561 	struct buf *bp;
2562 	off_t off = *offp;
2563 	int error;
2564 	size_t padding;
2565 
2566 	KASSERT(rw_write_held(&wl->wl_rwlock));
2567 
2568 	bp = TAILQ_FIRST(&wl->wl_bufs);
2569 
2570 	while (bp) {
2571 		int cnt;
2572 		struct buf *obp = bp;
2573 
2574 		KASSERT(bp->b_flags & B_LOCKED);
2575 
2576 		wc->wc_type = WAPBL_WC_BLOCKS;
2577 		wc->wc_len = blocklen;
2578 		wc->wc_blkcount = 0;
2579 		wc->wc_unused = 0;
2580 		while (bp && wc->wc_blkcount < wl->wl_brperjblock) {
2581 			/*
2582 			 * Make sure all the physical block numbers are up to
2583 			 * date.  If this is not always true on a given
2584 			 * filesystem, then VOP_BMAP must be called.  We
2585 			 * could call VOP_BMAP here, or else in the filesystem
2586 			 * specific flush callback, although neither of those
2587 			 * solutions allow us to take the vnode lock.  If a
2588 			 * filesystem requires that we must take the vnode lock
2589 			 * to call VOP_BMAP, then we can probably do it in
2590 			 * bwrite when the vnode lock should already be held
2591 			 * by the invoking code.
2592 			 */
2593 			KASSERT(bp->b_vp->v_type == VBLK ||
2594 			    bp->b_blkno != bp->b_lblkno);
2595 			KASSERT(bp->b_blkno > 0);
2596 
2597 			wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno;
2598 			wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount;
2599 			wc->wc_len += bp->b_bcount;
2600 			wc->wc_blkcount++;
2601 			bp = TAILQ_NEXT(bp, b_wapbllist);
2602 		}
2603 		if (wc->wc_len % blocklen != 0) {
2604 			padding = blocklen - wc->wc_len % blocklen;
2605 			wc->wc_len += padding;
2606 		} else {
2607 			padding = 0;
2608 		}
2609 
2610 		WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2611 		    ("wapbl_write_blocks:"
2612 			" len = %u (padding %zu) off = %"PRIdMAX"\n",
2613 			wc->wc_len, padding, (intmax_t)off));
2614 
2615 		error = wapbl_circ_write(wl, wc, blocklen, &off);
2616 		if (error)
2617 			return error;
2618 		bp = obp;
2619 		cnt = 0;
2620 		while (bp && cnt++ < wl->wl_brperjblock) {
2621 			error = wapbl_circ_write(wl, bp->b_data,
2622 			    bp->b_bcount, &off);
2623 			if (error)
2624 				return error;
2625 			bp = TAILQ_NEXT(bp, b_wapbllist);
2626 		}
2627 		if (padding) {
2628 			void *zero;
2629 
2630 			zero = wapbl_alloc(padding);
2631 			memset(zero, 0, padding);
2632 			error = wapbl_circ_write(wl, zero, padding, &off);
2633 			wapbl_free(zero, padding);
2634 			if (error)
2635 				return error;
2636 		}
2637 	}
2638 	*offp = off;
2639 	return 0;
2640 }
2641 
2642 /*
2643  * wapbl_write_revocations(wl, offp)
2644  *
2645  *	Write all pending deallocations in the current transaction from
2646  *	wapbl_register_deallocation to the log on disk, adding to the
2647  *	circular queue's head at byte offset *offp, and returning the
2648  *	new head's byte offset in *offp.
2649  */
2650 static int
2651 wapbl_write_revocations(struct wapbl *wl, off_t *offp)
2652 {
2653 	struct wapbl_wc_blocklist *wc =
2654 	    (struct wapbl_wc_blocklist *)wl->wl_wc_scratch;
2655 	struct wapbl_dealloc *wd, *lwd;
2656 	int blocklen = 1<<wl->wl_log_dev_bshift;
2657 	off_t off = *offp;
2658 	int error;
2659 
2660 	KASSERT(rw_write_held(&wl->wl_rwlock));
2661 
2662 	if (wl->wl_dealloccnt == 0)
2663 		return 0;
2664 
2665 	while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2666 		wc->wc_type = WAPBL_WC_REVOCATIONS;
2667 		wc->wc_len = blocklen;
2668 		wc->wc_blkcount = 0;
2669 		wc->wc_unused = 0;
2670 		while (wd && wc->wc_blkcount < wl->wl_brperjblock) {
2671 			wc->wc_blocks[wc->wc_blkcount].wc_daddr =
2672 			    wd->wd_blkno;
2673 			wc->wc_blocks[wc->wc_blkcount].wc_dlen =
2674 			    wd->wd_len;
2675 			wc->wc_blkcount++;
2676 
2677 			wd = TAILQ_NEXT(wd, wd_entries);
2678 		}
2679 		WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2680 		    ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n",
2681 			wc->wc_len, (intmax_t)off));
2682 		error = wapbl_circ_write(wl, wc, blocklen, &off);
2683 		if (error)
2684 			return error;
2685 
2686 		/* free all successfully written deallocs */
2687 		lwd = wd;
2688 		while ((wd = TAILQ_FIRST(&wl->wl_dealloclist)) != NULL) {
2689 			if (wd == lwd)
2690 				break;
2691 			wapbl_deallocation_free(wl, wd, true);
2692 		}
2693 	}
2694 	*offp = off;
2695 	return 0;
2696 }
2697 
2698 /*
2699  * wapbl_write_inodes(wl, offp)
2700  *
2701  *	Write all pending inode allocations in the current transaction
2702  *	from wapbl_register_inode to the log on disk, adding to the
2703  *	circular queue's head at byte offset *offp and returning the
2704  *	new head's byte offset in *offp.
2705  */
2706 static int
2707 wapbl_write_inodes(struct wapbl *wl, off_t *offp)
2708 {
2709 	struct wapbl_wc_inodelist *wc =
2710 	    (struct wapbl_wc_inodelist *)wl->wl_wc_scratch;
2711 	int i;
2712 	int blocklen = 1 << wl->wl_log_dev_bshift;
2713 	off_t off = *offp;
2714 	int error;
2715 
2716 	struct wapbl_ino_head *wih;
2717 	struct wapbl_ino *wi;
2718 	int iph;
2719 
2720 	iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) /
2721 	    sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]);
2722 
2723 	i = 0;
2724 	wih = &wl->wl_inohash[0];
2725 	wi = 0;
2726 	do {
2727 		wc->wc_type = WAPBL_WC_INODES;
2728 		wc->wc_len = blocklen;
2729 		wc->wc_inocnt = 0;
2730 		wc->wc_clear = (i == 0);
2731 		while (i < wl->wl_inohashcnt && wc->wc_inocnt < iph) {
2732 			while (!wi) {
2733 				KASSERT((wih - &wl->wl_inohash[0])
2734 				    <= wl->wl_inohashmask);
2735 				wi = LIST_FIRST(wih++);
2736 			}
2737 			wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino;
2738 			wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode;
2739 			wc->wc_inocnt++;
2740 			i++;
2741 			wi = LIST_NEXT(wi, wi_hash);
2742 		}
2743 		WAPBL_PRINTF(WAPBL_PRINT_WRITE,
2744 		    ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n",
2745 			wc->wc_len, (intmax_t)off));
2746 		error = wapbl_circ_write(wl, wc, blocklen, &off);
2747 		if (error)
2748 			return error;
2749 	} while (i < wl->wl_inohashcnt);
2750 
2751 	*offp = off;
2752 	return 0;
2753 }
2754 
2755 #endif /* _KERNEL */
2756 
2757 /****************************************************************/
2758 
2759 struct wapbl_blk {
2760 	LIST_ENTRY(wapbl_blk) wb_hash;
2761 	daddr_t wb_blk;
2762 	off_t wb_off; /* Offset of this block in the log */
2763 };
2764 #define	WAPBL_BLKPOOL_MIN 83
2765 
2766 static void
2767 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size)
2768 {
2769 
2770 	if (size < WAPBL_BLKPOOL_MIN)
2771 		size = WAPBL_BLKPOOL_MIN;
2772 	KASSERT(wr->wr_blkhash == 0);
2773 #ifdef _KERNEL
2774 	wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask);
2775 #else /* ! _KERNEL */
2776 	/* Manually implement hashinit */
2777 	{
2778 		unsigned long i, hashsize;
2779 
2780 		for (hashsize = 1; hashsize < size; hashsize <<= 1)
2781 			continue;
2782 		wr->wr_blkhash = wapbl_alloc(hashsize *
2783 		    sizeof(*wr->wr_blkhash));
2784 		for (i = 0; i < hashsize; i++)
2785 			LIST_INIT(&wr->wr_blkhash[i]);
2786 		wr->wr_blkhashmask = hashsize - 1;
2787 	}
2788 #endif /* ! _KERNEL */
2789 }
2790 
2791 static void
2792 wapbl_blkhash_free(struct wapbl_replay *wr)
2793 {
2794 
2795 	KASSERT(wr->wr_blkhashcnt == 0);
2796 #ifdef _KERNEL
2797 	hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask);
2798 #else /* ! _KERNEL */
2799 	wapbl_free(wr->wr_blkhash,
2800 	    (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash));
2801 #endif /* ! _KERNEL */
2802 }
2803 
2804 static struct wapbl_blk *
2805 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk)
2806 {
2807 	struct wapbl_blk_head *wbh;
2808 	struct wapbl_blk *wb;
2809 
2810 	wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2811 	LIST_FOREACH(wb, wbh, wb_hash) {
2812 		if (blk == wb->wb_blk)
2813 			return wb;
2814 	}
2815 	return 0;
2816 }
2817 
2818 static void
2819 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off)
2820 {
2821 	struct wapbl_blk_head *wbh;
2822 	struct wapbl_blk *wb;
2823 
2824 	wb = wapbl_blkhash_get(wr, blk);
2825 	if (wb) {
2826 		KASSERT(wb->wb_blk == blk);
2827 		wb->wb_off = off;
2828 	} else {
2829 		wb = wapbl_alloc(sizeof(*wb));
2830 		wb->wb_blk = blk;
2831 		wb->wb_off = off;
2832 		wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask];
2833 		LIST_INSERT_HEAD(wbh, wb, wb_hash);
2834 		wr->wr_blkhashcnt++;
2835 	}
2836 }
2837 
2838 static void
2839 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk)
2840 {
2841 	struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
2842 
2843 	if (wb) {
2844 		KASSERT(wr->wr_blkhashcnt > 0);
2845 		wr->wr_blkhashcnt--;
2846 		LIST_REMOVE(wb, wb_hash);
2847 		wapbl_free(wb, sizeof(*wb));
2848 	}
2849 }
2850 
2851 static void
2852 wapbl_blkhash_clear(struct wapbl_replay *wr)
2853 {
2854 	unsigned long i;
2855 
2856 	for (i = 0; i <= wr->wr_blkhashmask; i++) {
2857 		struct wapbl_blk *wb;
2858 
2859 		while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) {
2860 			KASSERT(wr->wr_blkhashcnt > 0);
2861 			wr->wr_blkhashcnt--;
2862 			LIST_REMOVE(wb, wb_hash);
2863 			wapbl_free(wb, sizeof(*wb));
2864 		}
2865 	}
2866 	KASSERT(wr->wr_blkhashcnt == 0);
2867 }
2868 
2869 /****************************************************************/
2870 
2871 /*
2872  * wapbl_circ_read(wr, data, len, offp)
2873  *
2874  *	Read len bytes into data from the circular queue of wr,
2875  *	starting at the linear byte offset *offp, and returning the new
2876  *	linear byte offset in *offp.
2877  *
2878  *	If the starting linear byte offset precedes wr->wr_circ_off,
2879  *	the read instead begins at wr->wr_circ_off.  XXX WTF?  This
2880  *	should be a KASSERT, not a conditional.
2881  */
2882 static int
2883 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp)
2884 {
2885 	size_t slen;
2886 	off_t off = *offp;
2887 	int error;
2888 	daddr_t pbn;
2889 
2890 	KASSERT(((len >> wr->wr_log_dev_bshift) << wr->wr_log_dev_bshift) ==
2891 	    len);
2892 
2893 	if (off < wr->wr_circ_off)
2894 		off = wr->wr_circ_off;
2895 	slen = wr->wr_circ_off + wr->wr_circ_size - off;
2896 	if (slen < len) {
2897 		pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2898 #ifdef _KERNEL
2899 		pbn = btodb(pbn << wr->wr_log_dev_bshift);
2900 #endif
2901 		error = wapbl_read(data, slen, wr->wr_devvp, pbn);
2902 		if (error)
2903 			return error;
2904 		data = (uint8_t *)data + slen;
2905 		len -= slen;
2906 		off = wr->wr_circ_off;
2907 	}
2908 	pbn = wr->wr_logpbn + (off >> wr->wr_log_dev_bshift);
2909 #ifdef _KERNEL
2910 	pbn = btodb(pbn << wr->wr_log_dev_bshift);
2911 #endif
2912 	error = wapbl_read(data, len, wr->wr_devvp, pbn);
2913 	if (error)
2914 		return error;
2915 	off += len;
2916 	if (off >= wr->wr_circ_off + wr->wr_circ_size)
2917 		off = wr->wr_circ_off;
2918 	*offp = off;
2919 	return 0;
2920 }
2921 
2922 /*
2923  * wapbl_circ_advance(wr, len, offp)
2924  *
2925  *	Compute the linear byte offset of the circular queue of wr that
2926  *	is len bytes past *offp, and store it in *offp.
2927  *
2928  *	This is as if wapbl_circ_read, but without actually reading
2929  *	anything.
2930  *
2931  *	If the starting linear byte offset precedes wr->wr_circ_off, it
2932  *	is taken to be wr->wr_circ_off instead.  XXX WTF?  This should
2933  *	be a KASSERT, not a conditional.
2934  */
2935 static void
2936 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp)
2937 {
2938 	size_t slen;
2939 	off_t off = *offp;
2940 
2941 	KASSERT(((len >> wr->wr_log_dev_bshift) << wr->wr_log_dev_bshift) ==
2942 	    len);
2943 
2944 	if (off < wr->wr_circ_off)
2945 		off = wr->wr_circ_off;
2946 	slen = wr->wr_circ_off + wr->wr_circ_size - off;
2947 	if (slen < len) {
2948 		len -= slen;
2949 		off = wr->wr_circ_off;
2950 	}
2951 	off += len;
2952 	if (off >= wr->wr_circ_off + wr->wr_circ_size)
2953 		off = wr->wr_circ_off;
2954 	*offp = off;
2955 }
2956 
2957 /****************************************************************/
2958 
2959 int
2960 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp,
2961     daddr_t off, size_t count, size_t blksize)
2962 {
2963 	struct wapbl_replay *wr;
2964 	int error;
2965 	struct vnode *devvp;
2966 	daddr_t logpbn;
2967 	uint8_t *scratch;
2968 	struct wapbl_wc_header *wch;
2969 	struct wapbl_wc_header *wch2;
2970 	/* Use this until we read the actual log header */
2971 	int log_dev_bshift = ilog2(blksize);
2972 	size_t used;
2973 	daddr_t pbn;
2974 
2975 	WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
2976 	    ("wapbl_replay_start: vp=%p off=%"PRId64" count=%zu blksize=%zu\n",
2977 		vp, off, count, blksize));
2978 
2979 	if (off < 0)
2980 		return SET_ERROR(EINVAL);
2981 
2982 	if (blksize < DEV_BSIZE)
2983 		return SET_ERROR(EINVAL);
2984 	if (blksize % DEV_BSIZE)
2985 		return SET_ERROR(EINVAL);
2986 
2987 #ifdef _KERNEL
2988 #if 0
2989 	/* XXX vp->v_size isn't reliably set for VBLK devices,
2990 	 * especially root.  However, we might still want to verify
2991 	 * that the full load is readable */
2992 	if ((off + count) * blksize > vp->v_size)
2993 		return SET_ERROR(EINVAL);
2994 #endif
2995 	if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) {
2996 		return error;
2997 	}
2998 #else /* ! _KERNEL */
2999 	devvp = vp;
3000 	logpbn = off;
3001 #endif /* ! _KERNEL */
3002 
3003 	scratch = wapbl_alloc(MAXBSIZE);
3004 
3005 	pbn = logpbn;
3006 #ifdef _KERNEL
3007 	pbn = btodb(pbn << log_dev_bshift);
3008 #endif
3009 	error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, pbn);
3010 	if (error)
3011 		goto errout;
3012 
3013 	wch = (struct wapbl_wc_header *)scratch;
3014 	wch2 =
3015 	    (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift));
3016 	/* XXX verify checksums and magic numbers */
3017 	if (wch->wc_type != WAPBL_WC_HEADER) {
3018 		printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type);
3019 		error = SET_ERROR(EFTYPE);
3020 		goto errout;
3021 	}
3022 
3023 	if (wch2->wc_generation > wch->wc_generation)
3024 		wch = wch2;
3025 
3026 	wr = wapbl_calloc(1, sizeof(*wr));
3027 
3028 	wr->wr_logvp = vp;
3029 	wr->wr_devvp = devvp;
3030 	wr->wr_logpbn = logpbn;
3031 
3032 	wr->wr_scratch = scratch;
3033 
3034 	wr->wr_log_dev_bshift = wch->wc_log_dev_bshift;
3035 	wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift;
3036 	wr->wr_circ_off = wch->wc_circ_off;
3037 	wr->wr_circ_size = wch->wc_circ_size;
3038 	wr->wr_generation = wch->wc_generation;
3039 
3040 	used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail);
3041 
3042 	WAPBL_PRINTF(WAPBL_PRINT_REPLAY,
3043 	    ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64
3044 		" len=%"PRId64" used=%zu\n",
3045 		wch->wc_head, wch->wc_tail, wch->wc_circ_off,
3046 		wch->wc_circ_size, used));
3047 
3048 	wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift));
3049 
3050 	error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail);
3051 	if (error) {
3052 		wapbl_replay_stop(wr);
3053 		wapbl_replay_free(wr);
3054 		return error;
3055 	}
3056 
3057 	*wrp = wr;
3058 	return 0;
3059 
3060 errout:
3061 	wapbl_free(scratch, MAXBSIZE);
3062 	return error;
3063 }
3064 
3065 void
3066 wapbl_replay_stop(struct wapbl_replay *wr)
3067 {
3068 
3069 	if (!wapbl_replay_isopen(wr))
3070 		return;
3071 
3072 	WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n"));
3073 
3074 	wapbl_free(wr->wr_scratch, MAXBSIZE);
3075 	wr->wr_scratch = NULL;
3076 
3077 	wr->wr_logvp = NULL;
3078 
3079 	wapbl_blkhash_clear(wr);
3080 	wapbl_blkhash_free(wr);
3081 }
3082 
3083 void
3084 wapbl_replay_free(struct wapbl_replay *wr)
3085 {
3086 
3087 	KDASSERT(!wapbl_replay_isopen(wr));
3088 
3089 	if (wr->wr_inodes) {
3090 		wapbl_free(wr->wr_inodes,
3091 		    wr->wr_inodescnt * sizeof(wr->wr_inodes[0]));
3092 	}
3093 	wapbl_free(wr, sizeof(*wr));
3094 }
3095 
3096 #ifdef _KERNEL
3097 int
3098 wapbl_replay_isopen1(struct wapbl_replay *wr)
3099 {
3100 
3101 	return wapbl_replay_isopen(wr);
3102 }
3103 #endif
3104 
3105 /*
3106  * calculate the disk address for the i'th block in the wc_blockblist
3107  * offset by j blocks of size blen.
3108  *
3109  * wc_daddr is always a kernel disk address in DEV_BSIZE units that
3110  * was written to the journal.
3111  *
3112  * The kernel needs that address plus the offset in DEV_BSIZE units.
3113  *
3114  * Userland needs that address plus the offset in blen units.
3115  *
3116  */
3117 static daddr_t
3118 wapbl_block_daddr(struct wapbl_wc_blocklist *wc, int i, int j, int blen)
3119 {
3120 	daddr_t pbn;
3121 
3122 #ifdef _KERNEL
3123 	pbn = wc->wc_blocks[i].wc_daddr + btodb(j * blen);
3124 #else
3125 	pbn = dbtob(wc->wc_blocks[i].wc_daddr) / blen + j;
3126 #endif
3127 
3128 	return pbn;
3129 }
3130 
3131 static void
3132 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp)
3133 {
3134 	struct wapbl_wc_blocklist *wc =
3135 	    (struct wapbl_wc_blocklist *)wr->wr_scratch;
3136 	int fsblklen = 1 << wr->wr_fs_dev_bshift;
3137 	int i, j, n;
3138 
3139 	for (i = 0; i < wc->wc_blkcount; i++) {
3140 		/*
3141 		 * Enter each physical block into the hashtable independently.
3142 		 */
3143 		n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3144 		for (j = 0; j < n; j++) {
3145 			wapbl_blkhash_ins(wr,
3146 			    wapbl_block_daddr(wc, i, j, fsblklen),
3147 			    *offp);
3148 			wapbl_circ_advance(wr, fsblklen, offp);
3149 		}
3150 	}
3151 }
3152 
3153 static void
3154 wapbl_replay_process_revocations(struct wapbl_replay *wr)
3155 {
3156 	struct wapbl_wc_blocklist *wc =
3157 	    (struct wapbl_wc_blocklist *)wr->wr_scratch;
3158 	int fsblklen = 1 << wr->wr_fs_dev_bshift;
3159 	int i, j, n;
3160 
3161 	for (i = 0; i < wc->wc_blkcount; i++) {
3162 		/*
3163 		 * Remove any blocks found from the hashtable.
3164 		 */
3165 		n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift;
3166 		for (j = 0; j < n; j++) {
3167 			wapbl_blkhash_rem(wr, wapbl_block_daddr(wc, i, j,
3168 				fsblklen));
3169 		}
3170 	}
3171 }
3172 
3173 static void
3174 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff,
3175     off_t newoff)
3176 {
3177 	struct wapbl_wc_inodelist *wc =
3178 	    (struct wapbl_wc_inodelist *)wr->wr_scratch;
3179 	void *new_inodes;
3180 	const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]);
3181 
3182 	KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0]));
3183 
3184 	/*
3185 	 * Keep track of where we found this so location won't be
3186 	 * overwritten.
3187 	 */
3188 	if (wc->wc_clear) {
3189 		wr->wr_inodestail = oldoff;
3190 		wr->wr_inodescnt = 0;
3191 		if (wr->wr_inodes != NULL) {
3192 			wapbl_free(wr->wr_inodes, oldsize);
3193 			wr->wr_inodes = NULL;
3194 		}
3195 	}
3196 	wr->wr_inodeshead = newoff;
3197 	if (wc->wc_inocnt == 0)
3198 		return;
3199 
3200 	new_inodes = wapbl_alloc((wr->wr_inodescnt + wc->wc_inocnt) *
3201 	    sizeof(wr->wr_inodes[0]));
3202 	if (wr->wr_inodes != NULL) {
3203 		memcpy(new_inodes, wr->wr_inodes, oldsize);
3204 		wapbl_free(wr->wr_inodes, oldsize);
3205 	}
3206 	wr->wr_inodes = new_inodes;
3207 	memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes,
3208 	    wc->wc_inocnt * sizeof(wr->wr_inodes[0]));
3209 	wr->wr_inodescnt += wc->wc_inocnt;
3210 }
3211 
3212 static int
3213 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail)
3214 {
3215 	off_t off;
3216 	int error;
3217 
3218 	int logblklen = 1 << wr->wr_log_dev_bshift;
3219 
3220 	wapbl_blkhash_clear(wr);
3221 
3222 	off = tail;
3223 	while (off != head) {
3224 		struct wapbl_wc_null *wcn;
3225 		off_t saveoff = off;
3226 		error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3227 		if (error)
3228 			goto errout;
3229 		wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3230 		switch (wcn->wc_type) {
3231 		case WAPBL_WC_BLOCKS:
3232 			wapbl_replay_process_blocks(wr, &off);
3233 			break;
3234 
3235 		case WAPBL_WC_REVOCATIONS:
3236 			wapbl_replay_process_revocations(wr);
3237 			break;
3238 
3239 		case WAPBL_WC_INODES:
3240 			wapbl_replay_process_inodes(wr, saveoff, off);
3241 			break;
3242 
3243 		default:
3244 			printf("Unrecognized wapbl type: 0x%08x\n",
3245 			    wcn->wc_type);
3246 			error = SET_ERROR(EFTYPE);
3247 			goto errout;
3248 		}
3249 		wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3250 		if (off != saveoff) {
3251 			printf("wapbl_replay: corrupted records\n");
3252 			error = SET_ERROR(EFTYPE);
3253 			goto errout;
3254 		}
3255 	}
3256 	return 0;
3257 
3258 errout:
3259 	wapbl_blkhash_clear(wr);
3260 	return error;
3261 }
3262 
3263 #if 0
3264 int
3265 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp)
3266 {
3267 	off_t off;
3268 	int mismatchcnt = 0;
3269 	int logblklen = 1 << wr->wr_log_dev_bshift;
3270 	int fsblklen = 1 << wr->wr_fs_dev_bshift;
3271 	void *scratch1 = wapbl_alloc(MAXBSIZE);
3272 	void *scratch2 = wapbl_alloc(MAXBSIZE);
3273 	int error = 0;
3274 
3275 	KDASSERT(wapbl_replay_isopen(wr));
3276 
3277 	off = wch->wc_tail;
3278 	while (off != wch->wc_head) {
3279 		struct wapbl_wc_null *wcn;
3280 #ifdef DEBUG
3281 		off_t saveoff = off;
3282 #endif
3283 		error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off);
3284 		if (error)
3285 			goto out;
3286 		wcn = (struct wapbl_wc_null *)wr->wr_scratch;
3287 		switch (wcn->wc_type) {
3288 		case WAPBL_WC_BLOCKS: {
3289 			struct wapbl_wc_blocklist *wc =
3290 			    (struct wapbl_wc_blocklist *)wr->wr_scratch;
3291 			int i;
3292 			for (i = 0; i < wc->wc_blkcount; i++) {
3293 				int foundcnt = 0;
3294 				int dirtycnt = 0;
3295 				int j, n;
3296 				/*
3297 				 * Check each physical block into the
3298 				 * hashtable independently
3299 				 */
3300 				n = wc->wc_blocks[i].wc_dlen >>
3301 				    wch->wc_fs_dev_bshift;
3302 				for (j = 0; j < n; j++) {
3303 					struct wapbl_blk *wb =
3304 					    wapbl_blkhash_get(wr,
3305 						wapbl_block_daddr(wc, i, j,
3306 						    fsblklen));
3307 					if (wb && wb->wb_off == off) {
3308 						foundcnt++;
3309 						error =
3310 						    wapbl_circ_read(wr,
3311 							scratch1, fsblklen,
3312 							&off);
3313 						if (error)
3314 							goto out;
3315 						error =
3316 						    wapbl_read(scratch2,
3317 							fsblklen, fsdevvp,
3318 							wb->wb_blk);
3319 						if (error)
3320 							goto out;
3321 						if (memcmp(scratch1,
3322 							scratch2,
3323 							fsblklen)) {
3324 							printf("wapbl_verify:"
3325 							    " mismatch block"
3326 							    " %"PRId64
3327 							    " at off"
3328 							    " %"PRIdMAX"\n",
3329 							    wb->wb_blk,
3330 							    (intmax_t)off);
3331 							dirtycnt++;
3332 							mismatchcnt++;
3333 						}
3334 					} else {
3335 						wapbl_circ_advance(wr,
3336 						    fsblklen, &off);
3337 					}
3338 				}
3339 #if 0
3340 				/*
3341 				 * If all of the blocks in an entry
3342 				 * are clean, then remove all of its
3343 				 * blocks from the hashtable since they
3344 				 * never will need replay.
3345 				 */
3346 				if (foundcnt != 0 && dirtycnt == 0) {
3347 					off = saveoff;
3348 					wapbl_circ_advance(wr, logblklen,
3349 					    &off);
3350 					for (j = 0; j < n; j++) {
3351 						struct wapbl_blk *wb =
3352 						    wapbl_blkhash_get(wr,
3353 							wapbl_block_daddr(wc,
3354 							    i, j, fsblklen));
3355 						if (wb &&
3356 						    (wb->wb_off == off)) {
3357 							wapbl_blkhash_rem(wr,
3358 							    wb->wb_blk);
3359 						}
3360 						wapbl_circ_advance(wr,
3361 						    fsblklen, &off);
3362 					}
3363 				}
3364 #endif
3365 			}
3366 		}
3367 			break;
3368 		case WAPBL_WC_REVOCATIONS:
3369 		case WAPBL_WC_INODES:
3370 			break;
3371 		default:
3372 			KASSERT(0);
3373 		}
3374 #ifdef DEBUG
3375 		wapbl_circ_advance(wr, wcn->wc_len, &saveoff);
3376 		KASSERT(off == saveoff);
3377 #endif
3378 	}
3379 out:
3380 	wapbl_free(scratch1, MAXBSIZE);
3381 	wapbl_free(scratch2, MAXBSIZE);
3382 	if (!error && mismatchcnt)
3383 		error = SET_ERROR(EFTYPE);
3384 	return error;
3385 }
3386 #endif
3387 
3388 int
3389 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp)
3390 {
3391 	struct wapbl_blk *wb;
3392 	size_t i;
3393 	off_t off;
3394 	void *scratch;
3395 	int error = 0;
3396 	int fsblklen = 1 << wr->wr_fs_dev_bshift;
3397 
3398 	KDASSERT(wapbl_replay_isopen(wr));
3399 
3400 	scratch = wapbl_alloc(MAXBSIZE);
3401 
3402 	for (i = 0; i <= wr->wr_blkhashmask; ++i) {
3403 		LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) {
3404 			off = wb->wb_off;
3405 			error = wapbl_circ_read(wr, scratch, fsblklen, &off);
3406 			if (error)
3407 				break;
3408 			error = wapbl_write(scratch, fsblklen, fsdevvp,
3409 			    wb->wb_blk);
3410 			if (error)
3411 				break;
3412 		}
3413 	}
3414 
3415 	wapbl_free(scratch, MAXBSIZE);
3416 	return error;
3417 }
3418 
3419 int
3420 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len)
3421 {
3422 	int fsblklen = 1 << wr->wr_fs_dev_bshift;
3423 
3424 	KDASSERT(wapbl_replay_isopen(wr));
3425 	KASSERT((len % fsblklen) == 0);
3426 
3427 	while (len != 0) {
3428 		struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3429 		if (wb)
3430 			return 1;
3431 		len -= fsblklen;
3432 	}
3433 	return 0;
3434 }
3435 
3436 int
3437 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len)
3438 {
3439 	int fsblklen = 1 << wr->wr_fs_dev_bshift;
3440 
3441 	KDASSERT(wapbl_replay_isopen(wr));
3442 
3443 	KASSERT((len % fsblklen) == 0);
3444 
3445 	while (len != 0) {
3446 		struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk);
3447 		if (wb) {
3448 			off_t off = wb->wb_off;
3449 			int error;
3450 			error = wapbl_circ_read(wr, data, fsblklen, &off);
3451 			if (error)
3452 				return error;
3453 		}
3454 		data = (uint8_t *)data + fsblklen;
3455 		len -= fsblklen;
3456 		blk++;
3457 	}
3458 	return 0;
3459 }
3460 
3461 #ifdef _KERNEL
3462 
3463 MODULE(MODULE_CLASS_VFS, wapbl, NULL);
3464 
3465 static int
3466 wapbl_modcmd(modcmd_t cmd, void *arg)
3467 {
3468 
3469 	switch (cmd) {
3470 	case MODULE_CMD_INIT:
3471 		wapbl_init();
3472 		return 0;
3473 	case MODULE_CMD_FINI:
3474 		return wapbl_fini();
3475 	default:
3476 		return SET_ERROR(ENOTTY);
3477 	}
3478 }
3479 
3480 #endif /* _KERNEL */
3481