xref: /dflybsd-src/sys/vfs/hammer/hammer_io.c (revision a9656fbcd49c376aba5e04370d8b0f1fa96e063c)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
35  */
36 /*
37  * IO Primitives and buffer cache management
38  *
39  * All major data-tracking structures in HAMMER contain a struct hammer_io
40  * which is used to manage their backing store.  We use filesystem buffers
41  * for backing store and we leave them passively associated with their
42  * HAMMER structures.
43  *
44  * If the kernel tries to destroy a passively associated buf which we cannot
45  * yet let go we set B_LOCKED in the buffer and then actively released it
46  * later when we can.
47  */
48 
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
54 
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 static void hammer_io_flush_mark(hammer_volume_t volume);
64 
65 
66 /*
67  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
68  * an existing hammer_io structure which may have switched to another type.
69  */
70 void
71 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
72 {
73 	io->volume = volume;
74 	io->hmp = volume->io.hmp;
75 	io->type = type;
76 }
77 
78 /*
79  * Determine if an io can be clustered for the storage cdev.  We have to
80  * be careful to avoid creating overlapping buffers.
81  *
82  * (1) Any clustering is limited to within a largeblock, since going into
83  *     an adjacent largeblock will change the zone.
84  *
85  * (2) The large-data zone can contain mixed buffer sizes.  Other zones
86  *     contain only HAMMER_BUFSIZE sized buffer sizes (16K).
87  */
88 static int
89 hammer_io_clusterable(hammer_io_t io, hammer_off_t *limitp)
90 {
91 	hammer_buffer_t buffer;
92 	hammer_off_t eoz;
93 
94 	/*
95 	 * Can't cluster non hammer_buffer_t's
96 	 */
97 	if (io->type != HAMMER_STRUCTURE_DATA_BUFFER &&
98 	    io->type != HAMMER_STRUCTURE_META_BUFFER &&
99 	    io->type != HAMMER_STRUCTURE_UNDO_BUFFER) {
100 		return(0);
101 	}
102 
103 	/*
104 	 * We cannot cluster the large-data zone.  This primarily targets
105 	 * the reblocker.  The normal file handling code will still cluster
106 	 * file reads via file vnodes.
107 	 */
108 	buffer = (void *)io;
109 	if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) ==
110 	    HAMMER_ZONE_LARGE_DATA) {
111 		return(0);
112 	}
113 
114 	/*
115 	 * Do not allow the cluster operation to cross a largeblock
116 	 * boundary.
117 	 */
118 	eoz = (io->offset + HAMMER_LARGEBLOCK_SIZE64 - 1) &
119 		~HAMMER_LARGEBLOCK_MASK64;
120 	if (*limitp > eoz)
121 		*limitp = eoz;
122 	return(1);
123 }
124 
125 /*
126  * Helper routine to disassociate a buffer cache buffer from an I/O
127  * structure.  The buffer is unlocked and marked appropriate for reclamation.
128  *
129  * The io may have 0 or 1 references depending on who called us.  The
130  * caller is responsible for dealing with the refs.
131  *
132  * This call can only be made when no action is required on the buffer.
133  *
134  * The caller must own the buffer and the IO must indicate that the
135  * structure no longer owns it (io.released != 0).
136  */
137 static void
138 hammer_io_disassociate(hammer_io_structure_t iou)
139 {
140 	struct buf *bp = iou->io.bp;
141 
142 	KKASSERT(iou->io.released);
143 	KKASSERT(iou->io.modified == 0);
144 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
145 	buf_dep_init(bp);
146 	iou->io.bp = NULL;
147 
148 	/*
149 	 * If the buffer was locked someone wanted to get rid of it.
150 	 */
151 	if (bp->b_flags & B_LOCKED) {
152 		--hammer_count_io_locked;
153 		bp->b_flags &= ~B_LOCKED;
154 	}
155 	if (iou->io.reclaim) {
156 		bp->b_flags |= B_NOCACHE|B_RELBUF;
157 		iou->io.reclaim = 0;
158 	}
159 
160 	switch(iou->io.type) {
161 	case HAMMER_STRUCTURE_VOLUME:
162 		iou->volume.ondisk = NULL;
163 		break;
164 	case HAMMER_STRUCTURE_DATA_BUFFER:
165 	case HAMMER_STRUCTURE_META_BUFFER:
166 	case HAMMER_STRUCTURE_UNDO_BUFFER:
167 		iou->buffer.ondisk = NULL;
168 		break;
169 	case HAMMER_STRUCTURE_DUMMY:
170 		panic("hammer_io_disassociate: bad io type");
171 		break;
172 	}
173 }
174 
175 /*
176  * Wait for any physical IO to complete
177  *
178  * XXX we aren't interlocked against a spinlock or anything so there
179  *     is a small window in the interlock / io->running == 0 test.
180  */
181 void
182 hammer_io_wait(hammer_io_t io)
183 {
184 	if (io->running) {
185 		for (;;) {
186 			io->waiting = 1;
187 			tsleep_interlock(io, 0);
188 			if (io->running == 0)
189 				break;
190 			tsleep(io, PINTERLOCKED, "hmrflw", hz);
191 			if (io->running == 0)
192 				break;
193 		}
194 	}
195 }
196 
197 /*
198  * Wait for all currently queued HAMMER-initiated I/Os to complete.
199  *
200  * This is not supposed to count direct I/O's but some can leak
201  * through (for non-full-sized direct I/Os).
202  */
203 void
204 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
205 {
206 	struct hammer_io iodummy;
207 	hammer_io_t io;
208 
209 	/*
210 	 * Degenerate case, no I/O is running
211 	 */
212 	crit_enter();
213 	if (TAILQ_EMPTY(&hmp->iorun_list)) {
214 		crit_exit();
215 		if (doflush)
216 			hammer_io_flush_sync(hmp);
217 		return;
218 	}
219 	bzero(&iodummy, sizeof(iodummy));
220 	iodummy.type = HAMMER_STRUCTURE_DUMMY;
221 
222 	/*
223 	 * Add placemarker and then wait until it becomes the head of
224 	 * the list.
225 	 */
226 	TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
227 	while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
228 		tsleep(&iodummy, 0, ident, 0);
229 	}
230 
231 	/*
232 	 * Chain in case several placemarkers are present.
233 	 */
234 	TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
235 	io = TAILQ_FIRST(&hmp->iorun_list);
236 	if (io && io->type == HAMMER_STRUCTURE_DUMMY)
237 		wakeup(io);
238 	crit_exit();
239 
240 	if (doflush)
241 		hammer_io_flush_sync(hmp);
242 }
243 
244 /*
245  * Clear a flagged error condition on a I/O buffer.  The caller must hold
246  * its own ref on the buffer.
247  */
248 void
249 hammer_io_clear_error(struct hammer_io *io)
250 {
251 	if (io->ioerror) {
252 		io->ioerror = 0;
253 		hammer_rel(&io->lock);
254 		KKASSERT(hammer_isactive(&io->lock));
255 	}
256 }
257 
258 /*
259  * This is an advisory function only which tells the buffer cache
260  * the bp is not a meta-data buffer, even though it is backed by
261  * a block device.
262  *
263  * This is used by HAMMER's reblocking code to avoid trying to
264  * swapcache the filesystem's data when it is read or written
265  * by the reblocking code.
266  */
267 void
268 hammer_io_notmeta(hammer_buffer_t buffer)
269 {
270 	buffer->io.bp->b_flags |= B_NOTMETA;
271 }
272 
273 
274 #define HAMMER_MAXRA	4
275 
276 /*
277  * Load bp for a HAMMER structure.  The io must be exclusively locked by
278  * the caller.
279  *
280  * This routine is mostly used on meta-data and small-data blocks.  Generally
281  * speaking HAMMER assumes some locality of reference and will cluster
282  * a 64K read.
283  *
284  * Note that the clustering which occurs here is clustering within the
285  * block device... typically meta-data and small-file data.  Regular
286  * file clustering is different and handled in hammer_vnops.c
287  */
288 int
289 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
290 {
291 	struct buf *bp;
292 	int   error;
293 
294 	if ((bp = io->bp) == NULL) {
295 		hammer_count_io_running_read += io->bytes;
296 		if (hammer_cluster_enable &&
297 		    hammer_io_clusterable(io, &limit)) {
298 			error = cluster_read(devvp, limit,
299 					     io->offset, io->bytes,
300 					     HAMMER_CLUSTER_SIZE,
301 					     HAMMER_CLUSTER_SIZE,
302 					     &io->bp);
303 		} else {
304 			error = bread(devvp, io->offset, io->bytes, &io->bp);
305 		}
306 		hammer_stats_disk_read += io->bytes;
307 		hammer_count_io_running_read -= io->bytes;
308 
309 		/*
310 		 * The code generally assumes b_ops/b_dep has been set-up,
311 		 * even if we error out here.
312 		 */
313 		bp = io->bp;
314 		bp->b_ops = &hammer_bioops;
315 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
316 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
317 		BUF_KERNPROC(bp);
318 		KKASSERT(io->modified == 0);
319 		KKASSERT(io->running == 0);
320 		KKASSERT(io->waiting == 0);
321 		io->released = 0;	/* we hold an active lock on bp */
322 	} else {
323 		error = 0;
324 	}
325 	return(error);
326 }
327 
328 /*
329  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
330  * Must be called with the IO exclusively locked.
331  *
332  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
333  * I/O by forcing the buffer to not be in a released state before calling
334  * it.
335  *
336  * This function will also mark the IO as modified but it will not
337  * increment the modify_refs count.
338  */
339 int
340 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
341 {
342 	struct buf *bp;
343 
344 	if ((bp = io->bp) == NULL) {
345 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
346 		bp = io->bp;
347 		bp->b_ops = &hammer_bioops;
348 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
349 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
350 		io->released = 0;
351 		KKASSERT(io->running == 0);
352 		io->waiting = 0;
353 		BUF_KERNPROC(bp);
354 	} else {
355 		if (io->released) {
356 			regetblk(bp);
357 			BUF_KERNPROC(bp);
358 			io->released = 0;
359 		}
360 	}
361 	hammer_io_modify(io, 0);
362 	vfs_bio_clrbuf(bp);
363 	return(0);
364 }
365 
366 /*
367  * Advance the activity count on the underlying buffer because
368  * HAMMER does not getblk/brelse on every access.
369  */
370 void
371 hammer_io_advance(struct hammer_io *io)
372 {
373 	if (io->bp)
374 		buf_act_advance(io->bp);
375 }
376 
377 /*
378  * Remove potential device level aliases against buffers managed by high level
379  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
380  * direct access to the backing store device.
381  *
382  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
383  * does not exist its backing VM pages might, and we have to invalidate
384  * those as well or a getblk() will reinstate them.
385  *
386  * Buffer cache buffers associated with hammer_buffers cannot be
387  * invalidated.
388  */
389 int
390 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
391 {
392 	hammer_io_structure_t iou;
393 	hammer_off_t phys_offset;
394 	struct buf *bp;
395 	int error;
396 
397 	phys_offset = volume->ondisk->vol_buf_beg +
398 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
399 	crit_enter();
400 	if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
401 		bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
402 	else
403 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
404 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
405 #if 0
406 		hammer_ref(&iou->io.lock);
407 		hammer_io_clear_modify(&iou->io, 1);
408 		bundirty(bp);
409 		iou->io.released = 0;
410 		BUF_KERNPROC(bp);
411 		iou->io.reclaim = 1;
412 		iou->io.waitdep = 1;
413 		KKASSERT(hammer_isactive(&iou->io.lock) == 1);
414 		hammer_rel_buffer(&iou->buffer, 0);
415 		/*hammer_io_deallocate(bp);*/
416 #endif
417 		bqrelse(bp);
418 		error = EAGAIN;
419 	} else {
420 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
421 		bundirty(bp);
422 		bp->b_flags |= B_NOCACHE|B_RELBUF;
423 		brelse(bp);
424 		error = 0;
425 	}
426 	crit_exit();
427 	return(error);
428 }
429 
430 /*
431  * This routine is called on the last reference to a hammer structure.
432  * The io must be interlocked with a refcount of zero.  The hammer structure
433  * will remain interlocked on return.
434  *
435  * This routine may return a non-NULL bp to the caller for dispoal.
436  * The caller typically brelse()'s the bp.
437  *
438  * The bp may or may not still be passively associated with the IO.  It
439  * will remain passively associated if it is unreleasable (e.g. a modified
440  * meta-data buffer).
441  *
442  * The only requirement here is that modified meta-data and volume-header
443  * buffer may NOT be disassociated from the IO structure, and consequently
444  * we also leave such buffers actively associated with the IO if they already
445  * are (since the kernel can't do anything with them anyway).  Only the
446  * flusher is allowed to write such buffers out.  Modified pure-data and
447  * undo buffers are returned to the kernel but left passively associated
448  * so we can track when the kernel writes the bp out.
449  */
450 struct buf *
451 hammer_io_release(struct hammer_io *io, int flush)
452 {
453 	union hammer_io_structure *iou = (void *)io;
454 	struct buf *bp;
455 
456 	if ((bp = io->bp) == NULL)
457 		return(NULL);
458 
459 	/*
460 	 * Try to flush a dirty IO to disk if asked to by the
461 	 * caller or if the kernel tried to flush the buffer in the past.
462 	 *
463 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
464 	 * meta-data and volume buffers can only be flushed explicitly
465 	 * by HAMMER.
466 	 */
467 	if (io->modified) {
468 		if (flush) {
469 			hammer_io_flush(io, 0);
470 		} else if (bp->b_flags & B_LOCKED) {
471 			switch(io->type) {
472 			case HAMMER_STRUCTURE_DATA_BUFFER:
473 				hammer_io_flush(io, 0);
474 				break;
475 			case HAMMER_STRUCTURE_UNDO_BUFFER:
476 				hammer_io_flush(io, hammer_undo_reclaim(io));
477 				break;
478 			default:
479 				break;
480 			}
481 		} /* else no explicit request to flush the buffer */
482 	}
483 
484 	/*
485 	 * Wait for the IO to complete if asked to.  This occurs when
486 	 * the buffer must be disposed of definitively during an umount
487 	 * or buffer invalidation.
488 	 */
489 	if (io->waitdep && io->running) {
490 		hammer_io_wait(io);
491 	}
492 
493 	/*
494 	 * Return control of the buffer to the kernel (with the provisio
495 	 * that our bioops can override kernel decisions with regards to
496 	 * the buffer).
497 	 */
498 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
499 		/*
500 		 * Always disassociate the bp if an explicit flush
501 		 * was requested and the IO completed with no error
502 		 * (so unmount can really clean up the structure).
503 		 */
504 		if (io->released) {
505 			regetblk(bp);
506 			BUF_KERNPROC(bp);
507 		} else {
508 			io->released = 1;
509 		}
510 		hammer_io_disassociate((hammer_io_structure_t)io);
511 		/* return the bp */
512 	} else if (io->modified) {
513 		/*
514 		 * Only certain IO types can be released to the kernel if
515 		 * the buffer has been modified.
516 		 *
517 		 * volume and meta-data IO types may only be explicitly
518 		 * flushed by HAMMER.
519 		 */
520 		switch(io->type) {
521 		case HAMMER_STRUCTURE_DATA_BUFFER:
522 		case HAMMER_STRUCTURE_UNDO_BUFFER:
523 			if (io->released == 0) {
524 				io->released = 1;
525 				bdwrite(bp);
526 			}
527 			break;
528 		default:
529 			break;
530 		}
531 		bp = NULL;	/* bp left associated */
532 	} else if (io->released == 0) {
533 		/*
534 		 * Clean buffers can be generally released to the kernel.
535 		 * We leave the bp passively associated with the HAMMER
536 		 * structure and use bioops to disconnect it later on
537 		 * if the kernel wants to discard the buffer.
538 		 *
539 		 * We can steal the structure's ownership of the bp.
540 		 */
541 		io->released = 1;
542 		if (bp->b_flags & B_LOCKED) {
543 			hammer_io_disassociate(iou);
544 			/* return the bp */
545 		} else {
546 			if (io->reclaim) {
547 				hammer_io_disassociate(iou);
548 				/* return the bp */
549 			} else {
550 				/* return the bp (bp passively associated) */
551 			}
552 		}
553 	} else {
554 		/*
555 		 * A released buffer is passively associate with our
556 		 * hammer_io structure.  The kernel cannot destroy it
557 		 * without making a bioops call.  If the kernel (B_LOCKED)
558 		 * or we (reclaim) requested that the buffer be destroyed
559 		 * we destroy it, otherwise we do a quick get/release to
560 		 * reset its position in the kernel's LRU list.
561 		 *
562 		 * Leaving the buffer passively associated allows us to
563 		 * use the kernel's LRU buffer flushing mechanisms rather
564 		 * then rolling our own.
565 		 *
566 		 * XXX there are two ways of doing this.  We can re-acquire
567 		 * and passively release to reset the LRU, or not.
568 		 */
569 		if (io->running == 0) {
570 			regetblk(bp);
571 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
572 				hammer_io_disassociate(iou);
573 				/* return the bp */
574 			} else {
575 				/* return the bp (bp passively associated) */
576 			}
577 		} else {
578 			/*
579 			 * bp is left passively associated but we do not
580 			 * try to reacquire it.  Interactions with the io
581 			 * structure will occur on completion of the bp's
582 			 * I/O.
583 			 */
584 			bp = NULL;
585 		}
586 	}
587 	return(bp);
588 }
589 
590 /*
591  * This routine is called with a locked IO when a flush is desired and
592  * no other references to the structure exists other then ours.  This
593  * routine is ONLY called when HAMMER believes it is safe to flush a
594  * potentially modified buffer out.
595  */
596 void
597 hammer_io_flush(struct hammer_io *io, int reclaim)
598 {
599 	struct buf *bp;
600 
601 	/*
602 	 * Degenerate case - nothing to flush if nothing is dirty.
603 	 */
604 	if (io->modified == 0) {
605 		return;
606 	}
607 
608 	KKASSERT(io->bp);
609 	KKASSERT(io->modify_refs <= 0);
610 
611 	/*
612 	 * Acquire ownership of the bp, particularly before we clear our
613 	 * modified flag.
614 	 *
615 	 * We are going to bawrite() this bp.  Don't leave a window where
616 	 * io->released is set, we actually own the bp rather then our
617 	 * buffer.
618 	 */
619 	bp = io->bp;
620 	if (io->released) {
621 		regetblk(bp);
622 		/* BUF_KERNPROC(io->bp); */
623 		/* io->released = 0; */
624 		KKASSERT(io->released);
625 		KKASSERT(io->bp == bp);
626 	}
627 	io->released = 1;
628 
629 	if (reclaim) {
630 		io->reclaim = 1;
631 		if ((bp->b_flags & B_LOCKED) == 0) {
632 			bp->b_flags |= B_LOCKED;
633 			++hammer_count_io_locked;
634 		}
635 	}
636 
637 	/*
638 	 * Acquire exclusive access to the bp and then clear the modified
639 	 * state of the buffer prior to issuing I/O to interlock any
640 	 * modifications made while the I/O is in progress.  This shouldn't
641 	 * happen anyway but losing data would be worse.  The modified bit
642 	 * will be rechecked after the IO completes.
643 	 *
644 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
645 	 *
646 	 * This is only legal when lock.refs == 1 (otherwise we might clear
647 	 * the modified bit while there are still users of the cluster
648 	 * modifying the data).
649 	 *
650 	 * Do this before potentially blocking so any attempt to modify the
651 	 * ondisk while we are blocked blocks waiting for us.
652 	 */
653 	hammer_ref(&io->lock);
654 	hammer_io_clear_modify(io, 0);
655 	hammer_rel(&io->lock);
656 
657 	if (hammer_debug_io & 0x0002)
658 		kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
659 
660 	/*
661 	 * Transfer ownership to the kernel and initiate I/O.
662 	 */
663 	io->running = 1;
664 	io->hmp->io_running_space += io->bytes;
665 	TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
666 	hammer_count_io_running_write += io->bytes;
667 	bawrite(bp);
668 	hammer_io_flush_mark(io->volume);
669 }
670 
671 /************************************************************************
672  *				BUFFER DIRTYING				*
673  ************************************************************************
674  *
675  * These routines deal with dependancies created when IO buffers get
676  * modified.  The caller must call hammer_modify_*() on a referenced
677  * HAMMER structure prior to modifying its on-disk data.
678  *
679  * Any intent to modify an IO buffer acquires the related bp and imposes
680  * various write ordering dependancies.
681  */
682 
683 /*
684  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
685  * are locked until the flusher can deal with them, pure data buffers
686  * can be written out.
687  */
688 static
689 void
690 hammer_io_modify(hammer_io_t io, int count)
691 {
692 	/*
693 	 * io->modify_refs must be >= 0
694 	 */
695 	while (io->modify_refs < 0) {
696 		io->waitmod = 1;
697 		tsleep(io, 0, "hmrmod", 0);
698 	}
699 
700 	/*
701 	 * Shortcut if nothing to do.
702 	 */
703 	KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
704 	io->modify_refs += count;
705 	if (io->modified && io->released == 0)
706 		return;
707 
708 	hammer_lock_ex(&io->lock);
709 	if (io->modified == 0) {
710 		hammer_io_set_modlist(io);
711 		io->modified = 1;
712 	}
713 	if (io->released) {
714 		regetblk(io->bp);
715 		BUF_KERNPROC(io->bp);
716 		io->released = 0;
717 		KKASSERT(io->modified != 0);
718 	}
719 	hammer_unlock(&io->lock);
720 }
721 
722 static __inline
723 void
724 hammer_io_modify_done(hammer_io_t io)
725 {
726 	KKASSERT(io->modify_refs > 0);
727 	--io->modify_refs;
728 	if (io->modify_refs == 0 && io->waitmod) {
729 		io->waitmod = 0;
730 		wakeup(io);
731 	}
732 }
733 
734 void
735 hammer_io_write_interlock(hammer_io_t io)
736 {
737 	while (io->modify_refs != 0) {
738 		io->waitmod = 1;
739 		tsleep(io, 0, "hmrmod", 0);
740 	}
741 	io->modify_refs = -1;
742 }
743 
744 void
745 hammer_io_done_interlock(hammer_io_t io)
746 {
747 	KKASSERT(io->modify_refs == -1);
748 	io->modify_refs = 0;
749 	if (io->waitmod) {
750 		io->waitmod = 0;
751 		wakeup(io);
752 	}
753 }
754 
755 /*
756  * Caller intends to modify a volume's ondisk structure.
757  *
758  * This is only allowed if we are the flusher or we have a ref on the
759  * sync_lock.
760  */
761 void
762 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
763 		     void *base, int len)
764 {
765 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
766 
767 	hammer_io_modify(&volume->io, 1);
768 	if (len) {
769 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
770 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
771 		hammer_generate_undo(trans,
772 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
773 			 base, len);
774 	}
775 }
776 
777 /*
778  * Caller intends to modify a buffer's ondisk structure.
779  *
780  * This is only allowed if we are the flusher or we have a ref on the
781  * sync_lock.
782  */
783 void
784 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
785 		     void *base, int len)
786 {
787 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
788 
789 	hammer_io_modify(&buffer->io, 1);
790 	if (len) {
791 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
792 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
793 		hammer_generate_undo(trans,
794 				     buffer->zone2_offset + rel_offset,
795 				     base, len);
796 	}
797 }
798 
799 void
800 hammer_modify_volume_done(hammer_volume_t volume)
801 {
802 	hammer_io_modify_done(&volume->io);
803 }
804 
805 void
806 hammer_modify_buffer_done(hammer_buffer_t buffer)
807 {
808 	hammer_io_modify_done(&buffer->io);
809 }
810 
811 /*
812  * Mark an entity as not being dirty any more and finalize any
813  * delayed adjustments to the buffer.
814  *
815  * Delayed adjustments are an important performance enhancement, allowing
816  * us to avoid recalculating B-Tree node CRCs over and over again when
817  * making bulk-modifications to the B-Tree.
818  *
819  * If inval is non-zero delayed adjustments are ignored.
820  *
821  * This routine may dereference related btree nodes and cause the
822  * buffer to be dereferenced.  The caller must own a reference on io.
823  */
824 void
825 hammer_io_clear_modify(struct hammer_io *io, int inval)
826 {
827 	if (io->modified == 0)
828 		return;
829 
830 	/*
831 	 * Take us off the mod-list and clear the modified bit.
832 	 */
833 	KKASSERT(io->mod_list != NULL);
834 	if (io->mod_list == &io->hmp->volu_list ||
835 	    io->mod_list == &io->hmp->meta_list) {
836 		io->hmp->locked_dirty_space -= io->bytes;
837 		hammer_count_dirtybufspace -= io->bytes;
838 	}
839 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
840 	io->mod_list = NULL;
841 	io->modified = 0;
842 
843 	/*
844 	 * If this bit is not set there are no delayed adjustments.
845 	 */
846 	if (io->gencrc == 0)
847 		return;
848 	io->gencrc = 0;
849 
850 	/*
851 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
852 	 * on the node (& underlying buffer).  Release the node after clearing
853 	 * the flag.
854 	 */
855 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
856 		hammer_buffer_t buffer = (void *)io;
857 		hammer_node_t node;
858 
859 restart:
860 		TAILQ_FOREACH(node, &buffer->clist, entry) {
861 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
862 				continue;
863 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
864 			KKASSERT(node->ondisk);
865 			if (inval == 0)
866 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
867 			hammer_rel_node(node);
868 			goto restart;
869 		}
870 	}
871 	/* caller must still have ref on io */
872 	KKASSERT(hammer_isactive(&io->lock));
873 }
874 
875 /*
876  * Clear the IO's modify list.  Even though the IO is no longer modified
877  * it may still be on the lose_list.  This routine is called just before
878  * the governing hammer_buffer is destroyed.
879  */
880 void
881 hammer_io_clear_modlist(struct hammer_io *io)
882 {
883 	KKASSERT(io->modified == 0);
884 	if (io->mod_list) {
885 		crit_enter();	/* biodone race against list */
886 		KKASSERT(io->mod_list == &io->hmp->lose_list);
887 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
888 		io->mod_list = NULL;
889 		crit_exit();
890 	}
891 }
892 
893 static void
894 hammer_io_set_modlist(struct hammer_io *io)
895 {
896 	struct hammer_mount *hmp = io->hmp;
897 
898 	KKASSERT(io->mod_list == NULL);
899 
900 	switch(io->type) {
901 	case HAMMER_STRUCTURE_VOLUME:
902 		io->mod_list = &hmp->volu_list;
903 		hmp->locked_dirty_space += io->bytes;
904 		hammer_count_dirtybufspace += io->bytes;
905 		break;
906 	case HAMMER_STRUCTURE_META_BUFFER:
907 		io->mod_list = &hmp->meta_list;
908 		hmp->locked_dirty_space += io->bytes;
909 		hammer_count_dirtybufspace += io->bytes;
910 		break;
911 	case HAMMER_STRUCTURE_UNDO_BUFFER:
912 		io->mod_list = &hmp->undo_list;
913 		break;
914 	case HAMMER_STRUCTURE_DATA_BUFFER:
915 		io->mod_list = &hmp->data_list;
916 		break;
917 	case HAMMER_STRUCTURE_DUMMY:
918 		panic("hammer_io_disassociate: bad io type");
919 		break;
920 	}
921 	TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
922 }
923 
924 /************************************************************************
925  *				HAMMER_BIOOPS				*
926  ************************************************************************
927  *
928  */
929 
930 /*
931  * Pre-IO initiation kernel callback - cluster build only
932  */
933 static void
934 hammer_io_start(struct buf *bp)
935 {
936 }
937 
938 /*
939  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
940  *
941  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
942  * may also be set if we were marking a cluster header open.  Only remove
943  * our dependancy if the modified bit is clear.
944  */
945 static void
946 hammer_io_complete(struct buf *bp)
947 {
948 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
949 	struct hammer_mount *hmp = iou->io.hmp;
950 	struct hammer_io *ionext;
951 
952 	KKASSERT(iou->io.released == 1);
953 
954 	/*
955 	 * Deal with people waiting for I/O to drain
956 	 */
957 	if (iou->io.running) {
958 		/*
959 		 * Deal with critical write errors.  Once a critical error
960 		 * has been flagged in hmp the UNDO FIFO will not be updated.
961 		 * That way crash recover will give us a consistent
962 		 * filesystem.
963 		 *
964 		 * Because of this we can throw away failed UNDO buffers.  If
965 		 * we throw away META or DATA buffers we risk corrupting
966 		 * the now read-only version of the filesystem visible to
967 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
968 		 * by the kernel and ref the io so it doesn't get thrown
969 		 * away.
970 		 */
971 		if (bp->b_flags & B_ERROR) {
972 			hammer_critical_error(hmp, NULL, bp->b_error,
973 					      "while flushing meta-data");
974 			switch(iou->io.type) {
975 			case HAMMER_STRUCTURE_UNDO_BUFFER:
976 				break;
977 			default:
978 				if (iou->io.ioerror == 0) {
979 					iou->io.ioerror = 1;
980 					hammer_ref(&iou->io.lock);
981 				}
982 				break;
983 			}
984 			bp->b_flags &= ~B_ERROR;
985 			bundirty(bp);
986 #if 0
987 			hammer_io_set_modlist(&iou->io);
988 			iou->io.modified = 1;
989 #endif
990 		}
991 		hammer_stats_disk_write += iou->io.bytes;
992 		hammer_count_io_running_write -= iou->io.bytes;
993 		hmp->io_running_space -= iou->io.bytes;
994 		if (hmp->io_running_wakeup &&
995 		    hmp->io_running_space < hammer_limit_running_io / 2) {
996 		    hmp->io_running_wakeup = 0;
997 		    wakeup(&hmp->io_running_wakeup);
998 		}
999 		KKASSERT(hmp->io_running_space >= 0);
1000 		iou->io.running = 0;
1001 
1002 		/*
1003 		 * Remove from iorun list and wakeup any multi-io waiter(s).
1004 		 */
1005 		if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
1006 			ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1007 			if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1008 				wakeup(ionext);
1009 		}
1010 		TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
1011 	} else {
1012 		hammer_stats_disk_read += iou->io.bytes;
1013 	}
1014 
1015 	if (iou->io.waiting) {
1016 		iou->io.waiting = 0;
1017 		wakeup(iou);
1018 	}
1019 
1020 	/*
1021 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
1022 	 * point, try to do it now.  The operation will fail if there are
1023 	 * refs or if hammer_io_deallocate() is unable to gain the
1024 	 * interlock.
1025 	 */
1026 	if (bp->b_flags & B_LOCKED) {
1027 		--hammer_count_io_locked;
1028 		bp->b_flags &= ~B_LOCKED;
1029 		hammer_io_deallocate(bp);
1030 		/* structure may be dead now */
1031 	}
1032 }
1033 
1034 /*
1035  * Callback from kernel when it wishes to deallocate a passively
1036  * associated structure.  This mostly occurs with clean buffers
1037  * but it may be possible for a holding structure to be marked dirty
1038  * while its buffer is passively associated.  The caller owns the bp.
1039  *
1040  * If we cannot disassociate we set B_LOCKED to prevent the buffer
1041  * from getting reused.
1042  *
1043  * WARNING: Because this can be called directly by getnewbuf we cannot
1044  * recurse into the tree.  If a bp cannot be immediately disassociated
1045  * our only recourse is to set B_LOCKED.
1046  *
1047  * WARNING: This may be called from an interrupt via hammer_io_complete()
1048  */
1049 static void
1050 hammer_io_deallocate(struct buf *bp)
1051 {
1052 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
1053 
1054 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
1055 	if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1056 		/*
1057 		 * We cannot safely disassociate a bp from a referenced
1058 		 * or interlocked HAMMER structure.
1059 		 */
1060 		bp->b_flags |= B_LOCKED;
1061 		++hammer_count_io_locked;
1062 	} else if (iou->io.modified) {
1063 		/*
1064 		 * It is not legal to disassociate a modified buffer.  This
1065 		 * case really shouldn't ever occur.
1066 		 */
1067 		bp->b_flags |= B_LOCKED;
1068 		++hammer_count_io_locked;
1069 		hammer_put_interlock(&iou->io.lock, 0);
1070 	} else {
1071 		/*
1072 		 * Disassociate the BP.  If the io has no refs left we
1073 		 * have to add it to the loose list.
1074 		 */
1075 		hammer_io_disassociate(iou);
1076 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1077 			KKASSERT(iou->io.bp == NULL);
1078 			KKASSERT(iou->io.mod_list == NULL);
1079 			crit_enter();	/* biodone race against list */
1080 			iou->io.mod_list = &iou->io.hmp->lose_list;
1081 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
1082 			crit_exit();
1083 		}
1084 		hammer_put_interlock(&iou->io.lock, 1);
1085 	}
1086 }
1087 
1088 static int
1089 hammer_io_fsync(struct vnode *vp)
1090 {
1091 	return(0);
1092 }
1093 
1094 /*
1095  * NOTE: will not be called unless we tell the kernel about the
1096  * bioops.  Unused... we use the mount's VFS_SYNC instead.
1097  */
1098 static int
1099 hammer_io_sync(struct mount *mp)
1100 {
1101 	return(0);
1102 }
1103 
1104 static void
1105 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1106 {
1107 }
1108 
1109 /*
1110  * I/O pre-check for reading and writing.  HAMMER only uses this for
1111  * B_CACHE buffers so checkread just shouldn't happen, but if it does
1112  * allow it.
1113  *
1114  * Writing is a different case.  We don't want the kernel to try to write
1115  * out a buffer that HAMMER may be modifying passively or which has a
1116  * dependancy.  In addition, kernel-demanded writes can only proceed for
1117  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
1118  * buffer types can only be explicitly written by the flusher.
1119  *
1120  * checkwrite will only be called for bdwrite()n buffers.  If we return
1121  * success the kernel is guaranteed to initiate the buffer write.
1122  */
1123 static int
1124 hammer_io_checkread(struct buf *bp)
1125 {
1126 	return(0);
1127 }
1128 
1129 static int
1130 hammer_io_checkwrite(struct buf *bp)
1131 {
1132 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1133 
1134 	/*
1135 	 * This shouldn't happen under normal operation.
1136 	 */
1137 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
1138 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
1139 		if (!panicstr)
1140 			panic("hammer_io_checkwrite: illegal buffer");
1141 		if ((bp->b_flags & B_LOCKED) == 0) {
1142 			bp->b_flags |= B_LOCKED;
1143 			++hammer_count_io_locked;
1144 		}
1145 		return(1);
1146 	}
1147 
1148 	/*
1149 	 * We can only clear the modified bit if the IO is not currently
1150 	 * undergoing modification.  Otherwise we may miss changes.
1151 	 *
1152 	 * Only data and undo buffers can reach here.  These buffers do
1153 	 * not have terminal crc functions but we temporarily reference
1154 	 * the IO anyway, just in case.
1155 	 */
1156 	if (io->modify_refs == 0 && io->modified) {
1157 		hammer_ref(&io->lock);
1158 		hammer_io_clear_modify(io, 0);
1159 		hammer_rel(&io->lock);
1160 	} else if (io->modified) {
1161 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1162 	}
1163 
1164 	/*
1165 	 * The kernel is going to start the IO, set io->running.
1166 	 */
1167 	KKASSERT(io->running == 0);
1168 	io->running = 1;
1169 	io->hmp->io_running_space += io->bytes;
1170 	TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1171 	hammer_count_io_running_write += io->bytes;
1172 	return(0);
1173 }
1174 
1175 /*
1176  * Return non-zero if we wish to delay the kernel's attempt to flush
1177  * this buffer to disk.
1178  */
1179 static int
1180 hammer_io_countdeps(struct buf *bp, int n)
1181 {
1182 	return(0);
1183 }
1184 
1185 struct bio_ops hammer_bioops = {
1186 	.io_start	= hammer_io_start,
1187 	.io_complete	= hammer_io_complete,
1188 	.io_deallocate	= hammer_io_deallocate,
1189 	.io_fsync	= hammer_io_fsync,
1190 	.io_sync	= hammer_io_sync,
1191 	.io_movedeps	= hammer_io_movedeps,
1192 	.io_countdeps	= hammer_io_countdeps,
1193 	.io_checkread	= hammer_io_checkread,
1194 	.io_checkwrite	= hammer_io_checkwrite,
1195 };
1196 
1197 /************************************************************************
1198  *				DIRECT IO OPS 				*
1199  ************************************************************************
1200  *
1201  * These functions operate directly on the buffer cache buffer associated
1202  * with a front-end vnode rather then a back-end device vnode.
1203  */
1204 
1205 /*
1206  * Read a buffer associated with a front-end vnode directly from the
1207  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1208  * we validate the CRC.
1209  *
1210  * We must check for the presence of a HAMMER buffer to handle the case
1211  * where the reblocker has rewritten the data (which it does via the HAMMER
1212  * buffer system, not via the high-level vnode buffer cache), but not yet
1213  * committed the buffer to the media.
1214  */
1215 int
1216 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1217 		      hammer_btree_leaf_elm_t leaf)
1218 {
1219 	hammer_off_t buf_offset;
1220 	hammer_off_t zone2_offset;
1221 	hammer_volume_t volume;
1222 	struct buf *bp;
1223 	struct bio *nbio;
1224 	int vol_no;
1225 	int error;
1226 
1227 	buf_offset = bio->bio_offset;
1228 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1229 		 HAMMER_ZONE_LARGE_DATA);
1230 
1231 	/*
1232 	 * The buffer cache may have an aliased buffer (the reblocker can
1233 	 * write them).  If it does we have to sync any dirty data before
1234 	 * we can build our direct-read.  This is a non-critical code path.
1235 	 */
1236 	bp = bio->bio_buf;
1237 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1238 
1239 	/*
1240 	 * Resolve to a zone-2 offset.  The conversion just requires
1241 	 * munging the top 4 bits but we want to abstract it anyway
1242 	 * so the blockmap code can verify the zone assignment.
1243 	 */
1244 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1245 	if (error)
1246 		goto done;
1247 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1248 		 HAMMER_ZONE_RAW_BUFFER);
1249 
1250 	/*
1251 	 * Resolve volume and raw-offset for 3rd level bio.  The
1252 	 * offset will be specific to the volume.
1253 	 */
1254 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1255 	volume = hammer_get_volume(hmp, vol_no, &error);
1256 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1257 		error = EIO;
1258 
1259 	if (error == 0) {
1260 		/*
1261 		 * 3rd level bio
1262 		 */
1263 		nbio = push_bio(bio);
1264 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1265 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
1266 #if 0
1267 		/*
1268 		 * XXX disabled - our CRC check doesn't work if the OS
1269 		 * does bogus_page replacement on the direct-read.
1270 		 */
1271 		if (leaf && hammer_verify_data) {
1272 			nbio->bio_done = hammer_io_direct_read_complete;
1273 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1274 		}
1275 #endif
1276 		hammer_stats_disk_read += bp->b_bufsize;
1277 		vn_strategy(volume->devvp, nbio);
1278 	}
1279 	hammer_rel_volume(volume, 0);
1280 done:
1281 	if (error) {
1282 		kprintf("hammer_direct_read: failed @ %016llx\n",
1283 			(long long)zone2_offset);
1284 		bp->b_error = error;
1285 		bp->b_flags |= B_ERROR;
1286 		biodone(bio);
1287 	}
1288 	return(error);
1289 }
1290 
1291 #if 0
1292 /*
1293  * On completion of the BIO this callback must check the data CRC
1294  * and chain to the previous bio.
1295  */
1296 static
1297 void
1298 hammer_io_direct_read_complete(struct bio *nbio)
1299 {
1300 	struct bio *obio;
1301 	struct buf *bp;
1302 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1303 
1304 	bp = nbio->bio_buf;
1305 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1306 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
1307 			nbio->bio_offset, bp->b_bufsize);
1308 		if (hammer_debug_critical)
1309 			Debugger("data_crc on read");
1310 		bp->b_flags |= B_ERROR;
1311 		bp->b_error = EIO;
1312 	}
1313 	obio = pop_bio(nbio);
1314 	biodone(obio);
1315 }
1316 #endif
1317 
1318 /*
1319  * Write a buffer associated with a front-end vnode directly to the
1320  * disk media.  The bio may be issued asynchronously.
1321  *
1322  * The BIO is associated with the specified record and RECF_DIRECT_IO
1323  * is set.  The recorded is added to its object.
1324  */
1325 int
1326 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1327 		       hammer_record_t record)
1328 {
1329 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1330 	hammer_off_t buf_offset;
1331 	hammer_off_t zone2_offset;
1332 	hammer_volume_t volume;
1333 	hammer_buffer_t buffer;
1334 	struct buf *bp;
1335 	struct bio *nbio;
1336 	char *ptr;
1337 	int vol_no;
1338 	int error;
1339 
1340 	buf_offset = leaf->data_offset;
1341 
1342 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1343 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1344 
1345 	/*
1346 	 * Issue or execute the I/O.  The new memory record must replace
1347 	 * the old one before the I/O completes, otherwise a reaquisition of
1348 	 * the buffer will load the old media data instead of the new.
1349 	 */
1350 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1351 	    leaf->data_len >= HAMMER_BUFSIZE) {
1352 		/*
1353 		 * We are using the vnode's bio to write directly to the
1354 		 * media, any hammer_buffer at the same zone-X offset will
1355 		 * now have stale data.
1356 		 */
1357 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1358 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1359 		volume = hammer_get_volume(hmp, vol_no, &error);
1360 
1361 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1362 			error = EIO;
1363 		if (error == 0) {
1364 			bp = bio->bio_buf;
1365 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1366 			/*
1367 			hammer_del_buffers(hmp, buf_offset,
1368 					   zone2_offset, bp->b_bufsize);
1369 			*/
1370 
1371 			/*
1372 			 * Second level bio - cached zone2 offset.
1373 			 *
1374 			 * (We can put our bio_done function in either the
1375 			 *  2nd or 3rd level).
1376 			 */
1377 			nbio = push_bio(bio);
1378 			nbio->bio_offset = zone2_offset;
1379 			nbio->bio_done = hammer_io_direct_write_complete;
1380 			nbio->bio_caller_info1.ptr = record;
1381 			record->zone2_offset = zone2_offset;
1382 			record->flags |= HAMMER_RECF_DIRECT_IO |
1383 					 HAMMER_RECF_DIRECT_INVAL;
1384 
1385 			/*
1386 			 * Third level bio - raw offset specific to the
1387 			 * correct volume.
1388 			 */
1389 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1390 			nbio = push_bio(nbio);
1391 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1392 					   zone2_offset;
1393 			hammer_stats_disk_write += bp->b_bufsize;
1394 			hammer_ip_replace_bulk(hmp, record);
1395 			vn_strategy(volume->devvp, nbio);
1396 			hammer_io_flush_mark(volume);
1397 		}
1398 		hammer_rel_volume(volume, 0);
1399 	} else {
1400 		/*
1401 		 * Must fit in a standard HAMMER buffer.  In this case all
1402 		 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1403 		 * does not need to be set-up.
1404 		 */
1405 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1406 		buffer = NULL;
1407 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1408 		if (error == 0) {
1409 			bp = bio->bio_buf;
1410 			bp->b_flags |= B_AGE;
1411 			hammer_io_modify(&buffer->io, 1);
1412 			bcopy(bp->b_data, ptr, leaf->data_len);
1413 			hammer_io_modify_done(&buffer->io);
1414 			hammer_rel_buffer(buffer, 0);
1415 			bp->b_resid = 0;
1416 			hammer_ip_replace_bulk(hmp, record);
1417 			biodone(bio);
1418 		}
1419 	}
1420 	if (error) {
1421 		/*
1422 		 * Major suckage occured.  Also note:  The record was
1423 		 * never added to the tree so we do not have to worry
1424 		 * about the backend.
1425 		 */
1426 		kprintf("hammer_direct_write: failed @ %016llx\n",
1427 			(long long)leaf->data_offset);
1428 		bp = bio->bio_buf;
1429 		bp->b_resid = 0;
1430 		bp->b_error = EIO;
1431 		bp->b_flags |= B_ERROR;
1432 		biodone(bio);
1433 		record->flags |= HAMMER_RECF_DELETED_FE;
1434 		hammer_rel_mem_record(record);
1435 	}
1436 	return(error);
1437 }
1438 
1439 /*
1440  * On completion of the BIO this callback must disconnect
1441  * it from the hammer_record and chain to the previous bio.
1442  *
1443  * An I/O error forces the mount to read-only.  Data buffers
1444  * are not B_LOCKED like meta-data buffers are, so we have to
1445  * throw the buffer away to prevent the kernel from retrying.
1446  */
1447 static
1448 void
1449 hammer_io_direct_write_complete(struct bio *nbio)
1450 {
1451 	struct bio *obio;
1452 	struct buf *bp;
1453 	hammer_record_t record = nbio->bio_caller_info1.ptr;
1454 
1455 	bp = nbio->bio_buf;
1456 	obio = pop_bio(nbio);
1457 	if (bp->b_flags & B_ERROR) {
1458 		hammer_critical_error(record->ip->hmp, record->ip,
1459 				      bp->b_error,
1460 				      "while writing bulk data");
1461 		bp->b_flags |= B_INVAL;
1462 	}
1463 	biodone(obio);
1464 
1465 	KKASSERT(record != NULL);
1466 	KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1467 	if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1468 		record->flags &= ~(HAMMER_RECF_DIRECT_IO |
1469 				   HAMMER_RECF_DIRECT_WAIT);
1470 		/* record can disappear once DIRECT_IO flag is cleared */
1471 		wakeup(&record->flags);
1472 	} else {
1473 		record->flags &= ~HAMMER_RECF_DIRECT_IO;
1474 		/* record can disappear once DIRECT_IO flag is cleared */
1475 	}
1476 }
1477 
1478 
1479 /*
1480  * This is called before a record is either committed to the B-Tree
1481  * or destroyed, to resolve any associated direct-IO.
1482  *
1483  * (1) We must wait for any direct-IO related to the record to complete.
1484  *
1485  * (2) We must remove any buffer cache aliases for data accessed via
1486  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1487  *     (the mirroring and reblocking code) do not see stale data.
1488  */
1489 void
1490 hammer_io_direct_wait(hammer_record_t record)
1491 {
1492 	/*
1493 	 * Wait for I/O to complete
1494 	 */
1495 	if (record->flags & HAMMER_RECF_DIRECT_IO) {
1496 		crit_enter();
1497 		while (record->flags & HAMMER_RECF_DIRECT_IO) {
1498 			record->flags |= HAMMER_RECF_DIRECT_WAIT;
1499 			tsleep(&record->flags, 0, "hmdiow", 0);
1500 		}
1501 		crit_exit();
1502 	}
1503 
1504 	/*
1505 	 * Invalidate any related buffer cache aliases associated with the
1506 	 * backing device.  This is needed because the buffer cache buffer
1507 	 * for file data is associated with the file vnode, not the backing
1508 	 * device vnode.
1509 	 *
1510 	 * XXX I do not think this case can occur any more now that
1511 	 * reservations ensure that all such buffers are removed before
1512 	 * an area can be reused.
1513 	 */
1514 	if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1515 		KKASSERT(record->leaf.data_offset);
1516 		hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1517 				   record->zone2_offset, record->leaf.data_len,
1518 				   1);
1519 		record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1520 	}
1521 }
1522 
1523 /*
1524  * This is called to remove the second-level cached zone-2 offset from
1525  * frontend buffer cache buffers, now stale due to a data relocation.
1526  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1527  * by hammer_vop_strategy_read().
1528  *
1529  * This is rather nasty because here we have something like the reblocker
1530  * scanning the raw B-Tree with no held references on anything, really,
1531  * other then a shared lock on the B-Tree node, and we have to access the
1532  * frontend's buffer cache to check for and clean out the association.
1533  * Specifically, if the reblocker is moving data on the disk, these cached
1534  * offsets will become invalid.
1535  *
1536  * Only data record types associated with the large-data zone are subject
1537  * to direct-io and need to be checked.
1538  *
1539  */
1540 void
1541 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1542 {
1543 	struct hammer_inode_info iinfo;
1544 	int zone;
1545 
1546 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1547 		return;
1548 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1549 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1550 		return;
1551 	iinfo.obj_id = leaf->base.obj_id;
1552 	iinfo.obj_asof = 0;	/* unused */
1553 	iinfo.obj_localization = leaf->base.localization &
1554 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1555 	iinfo.u.leaf = leaf;
1556 	hammer_scan_inode_snapshots(hmp, &iinfo,
1557 				    hammer_io_direct_uncache_callback,
1558 				    leaf);
1559 }
1560 
1561 static int
1562 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1563 {
1564 	hammer_inode_info_t iinfo = data;
1565 	hammer_off_t data_offset;
1566 	hammer_off_t file_offset;
1567 	struct vnode *vp;
1568 	struct buf *bp;
1569 	int blksize;
1570 
1571 	if (ip->vp == NULL)
1572 		return(0);
1573 	data_offset = iinfo->u.leaf->data_offset;
1574 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1575 	blksize = iinfo->u.leaf->data_len;
1576 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1577 
1578 	hammer_ref(&ip->lock);
1579 	if (hammer_get_vnode(ip, &vp) == 0) {
1580 		if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1581 		    bp->b_bio2.bio_offset != NOOFFSET) {
1582 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1583 			bp->b_bio2.bio_offset = NOOFFSET;
1584 			brelse(bp);
1585 		}
1586 		vput(vp);
1587 	}
1588 	hammer_rel_inode(ip, 0);
1589 	return(0);
1590 }
1591 
1592 
1593 /*
1594  * This function is called when writes may have occured on the volume,
1595  * indicating that the device may be holding cached writes.
1596  */
1597 static void
1598 hammer_io_flush_mark(hammer_volume_t volume)
1599 {
1600 	volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1601 }
1602 
1603 /*
1604  * This function ensures that the device has flushed any cached writes out.
1605  */
1606 void
1607 hammer_io_flush_sync(hammer_mount_t hmp)
1608 {
1609 	hammer_volume_t volume;
1610 	struct buf *bp_base = NULL;
1611 	struct buf *bp;
1612 
1613 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1614 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1615 			volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1616 			bp = getpbuf(NULL);
1617 			bp->b_bio1.bio_offset = 0;
1618 			bp->b_bufsize = 0;
1619 			bp->b_bcount = 0;
1620 			bp->b_cmd = BUF_CMD_FLUSH;
1621 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1622 			bp->b_bio1.bio_done = biodone_sync;
1623 			bp->b_bio1.bio_flags |= BIO_SYNC;
1624 			bp_base = bp;
1625 			vn_strategy(volume->devvp, &bp->b_bio1);
1626 		}
1627 	}
1628 	while ((bp = bp_base) != NULL) {
1629 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1630 		biowait(&bp->b_bio1, "hmrFLS");
1631 		relpbuf(bp, NULL);
1632 	}
1633 }
1634 
1635 /*
1636  * Limit the amount of backlog which we allow to build up
1637  */
1638 void
1639 hammer_io_limit_backlog(hammer_mount_t hmp)
1640 {
1641         while (hmp->io_running_space > hammer_limit_running_io) {
1642                 hmp->io_running_wakeup = 1;
1643                 tsleep(&hmp->io_running_wakeup, 0, "hmiolm", hz / 10);
1644         }
1645 }
1646