xref: /dflybsd-src/sys/vfs/hammer/hammer_io.c (revision cdb6e4e646693cfea7ddde77447f9cf665cda9c0)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.51 2008/07/18 00:19:53 dillon Exp $
35  */
36 /*
37  * IO Primitives and buffer cache management
38  *
39  * All major data-tracking structures in HAMMER contain a struct hammer_io
40  * which is used to manage their backing store.  We use filesystem buffers
41  * for backing store and we leave them passively associated with their
42  * HAMMER structures.
43  *
44  * If the kernel tries to destroy a passively associated buf which we cannot
45  * yet let go we set B_LOCKED in the buffer and then actively released it
46  * later when we can.
47  */
48 
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
54 
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 
64 /*
65  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
66  * an existing hammer_io structure which may have switched to another type.
67  */
68 void
69 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
70 {
71 	io->hmp = hmp;
72 	io->type = type;
73 }
74 
75 /*
76  * Helper routine to disassociate a buffer cache buffer from an I/O
77  * structure.  The buffer is unlocked and marked appropriate for reclamation.
78  *
79  * The io may have 0 or 1 references depending on who called us.  The
80  * caller is responsible for dealing with the refs.
81  *
82  * This call can only be made when no action is required on the buffer.
83  *
84  * The caller must own the buffer and the IO must indicate that the
85  * structure no longer owns it (io.released != 0).
86  */
87 static void
88 hammer_io_disassociate(hammer_io_structure_t iou)
89 {
90 	struct buf *bp = iou->io.bp;
91 
92 	KKASSERT(iou->io.released);
93 	KKASSERT(iou->io.modified == 0);
94 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
95 	buf_dep_init(bp);
96 	iou->io.bp = NULL;
97 
98 	/*
99 	 * If the buffer was locked someone wanted to get rid of it.
100 	 */
101 	if (bp->b_flags & B_LOCKED) {
102 		--hammer_count_io_locked;
103 		bp->b_flags &= ~B_LOCKED;
104 	}
105 	if (iou->io.reclaim) {
106 		bp->b_flags |= B_NOCACHE|B_RELBUF;
107 		iou->io.reclaim = 0;
108 	}
109 
110 	switch(iou->io.type) {
111 	case HAMMER_STRUCTURE_VOLUME:
112 		iou->volume.ondisk = NULL;
113 		break;
114 	case HAMMER_STRUCTURE_DATA_BUFFER:
115 	case HAMMER_STRUCTURE_META_BUFFER:
116 	case HAMMER_STRUCTURE_UNDO_BUFFER:
117 		iou->buffer.ondisk = NULL;
118 		break;
119 	}
120 }
121 
122 /*
123  * Wait for any physical IO to complete
124  */
125 void
126 hammer_io_wait(hammer_io_t io)
127 {
128 	if (io->running) {
129 		crit_enter();
130 		tsleep_interlock(io);
131 		io->waiting = 1;
132 		for (;;) {
133 			tsleep(io, 0, "hmrflw", 0);
134 			if (io->running == 0)
135 				break;
136 			tsleep_interlock(io);
137 			io->waiting = 1;
138 			if (io->running == 0)
139 				break;
140 		}
141 		crit_exit();
142 	}
143 }
144 
145 /*
146  * Wait for all hammer_io-initated write I/O's to complete.  This is not
147  * supposed to count direct I/O's but some can leak through (for
148  * non-full-sized direct I/Os).
149  */
150 void
151 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
152 {
153 	crit_enter();
154 	while (hmp->io_running_space)
155 		tsleep(&hmp->io_running_space, 0, ident, 0);
156 	crit_exit();
157 }
158 
159 #define HAMMER_MAXRA	4
160 
161 /*
162  * Load bp for a HAMMER structure.  The io must be exclusively locked by
163  * the caller.
164  *
165  * This routine is mostly used on meta-data and small-data blocks.  Generally
166  * speaking HAMMER assumes some locality of reference and will cluster
167  * a 64K read.
168  *
169  * Note that clustering occurs at the device layer, not the logical layer.
170  * If the buffers do not apply to the current operation they may apply to
171  * some other.
172  */
173 int
174 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
175 {
176 	struct buf *bp;
177 	int   error;
178 
179 	if ((bp = io->bp) == NULL) {
180 		hammer_count_io_running_read += io->bytes;
181 		if (hammer_cluster_enable) {
182 			error = cluster_read(devvp, limit,
183 					     io->offset, io->bytes,
184 					     HAMMER_CLUSTER_SIZE,
185 					     HAMMER_CLUSTER_BUFS, &io->bp);
186 		} else {
187 			error = bread(devvp, io->offset, io->bytes, &io->bp);
188 		}
189 		hammer_stats_disk_read += io->bytes;
190 		hammer_count_io_running_read -= io->bytes;
191 
192 		/*
193 		 * The code generally assumes b_ops/b_dep has been set-up,
194 		 * even if we error out here.
195 		 */
196 		bp = io->bp;
197 		bp->b_ops = &hammer_bioops;
198 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
199 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
200 		BUF_KERNPROC(bp);
201 		KKASSERT(io->modified == 0);
202 		KKASSERT(io->running == 0);
203 		KKASSERT(io->waiting == 0);
204 		io->released = 0;	/* we hold an active lock on bp */
205 	} else {
206 		error = 0;
207 	}
208 	return(error);
209 }
210 
211 /*
212  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
213  * Must be called with the IO exclusively locked.
214  *
215  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
216  * I/O by forcing the buffer to not be in a released state before calling
217  * it.
218  *
219  * This function will also mark the IO as modified but it will not
220  * increment the modify_refs count.
221  */
222 int
223 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
224 {
225 	struct buf *bp;
226 
227 	if ((bp = io->bp) == NULL) {
228 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
229 		bp = io->bp;
230 		bp->b_ops = &hammer_bioops;
231 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
232 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
233 		io->released = 0;
234 		KKASSERT(io->running == 0);
235 		io->waiting = 0;
236 		BUF_KERNPROC(bp);
237 	} else {
238 		if (io->released) {
239 			regetblk(bp);
240 			BUF_KERNPROC(bp);
241 			io->released = 0;
242 		}
243 	}
244 	hammer_io_modify(io, 0);
245 	vfs_bio_clrbuf(bp);
246 	return(0);
247 }
248 
249 /*
250  * Remove potential device level aliases against buffers managed by high level
251  * vnodes.
252  */
253 void
254 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
255 {
256 	hammer_io_structure_t iou;
257 	hammer_off_t phys_offset;
258 	struct buf *bp;
259 
260 	phys_offset = volume->ondisk->vol_buf_beg +
261 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
262 	crit_enter();
263 	if ((bp = findblk(volume->devvp, phys_offset)) != NULL) {
264 		bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
265 		if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
266 			hammer_io_clear_modify(&iou->io, 1);
267 			bundirty(bp);
268 			iou->io.reclaim = 1;
269 			hammer_io_deallocate(bp);
270 		} else {
271 			KKASSERT((bp->b_flags & B_LOCKED) == 0);
272 			bundirty(bp);
273 			bp->b_flags |= B_NOCACHE|B_RELBUF;
274 		}
275 		brelse(bp);
276 	}
277 	crit_exit();
278 }
279 
280 /*
281  * This routine is called on the last reference to a hammer structure.
282  * The io is usually interlocked with io.loading and io.refs must be 1.
283  *
284  * This routine may return a non-NULL bp to the caller for dispoal.  Disposal
285  * simply means the caller finishes decrementing the ref-count on the
286  * IO structure then brelse()'s the bp.  The bp may or may not still be
287  * passively associated with the IO.
288  *
289  * The only requirement here is that modified meta-data and volume-header
290  * buffer may NOT be disassociated from the IO structure, and consequently
291  * we also leave such buffers actively associated with the IO if they already
292  * are (since the kernel can't do anything with them anyway).  Only the
293  * flusher is allowed to write such buffers out.  Modified pure-data and
294  * undo buffers are returned to the kernel but left passively associated
295  * so we can track when the kernel writes the bp out.
296  */
297 struct buf *
298 hammer_io_release(struct hammer_io *io, int flush)
299 {
300 	union hammer_io_structure *iou = (void *)io;
301 	struct buf *bp;
302 
303 	if ((bp = io->bp) == NULL)
304 		return(NULL);
305 
306 	/*
307 	 * Try to flush a dirty IO to disk if asked to by the
308 	 * caller or if the kernel tried to flush the buffer in the past.
309 	 *
310 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
311 	 * meta-data and volume buffers can only be flushed explicitly
312 	 * by HAMMER.
313 	 */
314 	if (io->modified) {
315 		if (flush) {
316 			hammer_io_flush(io);
317 		} else if (bp->b_flags & B_LOCKED) {
318 			switch(io->type) {
319 			case HAMMER_STRUCTURE_DATA_BUFFER:
320 			case HAMMER_STRUCTURE_UNDO_BUFFER:
321 				hammer_io_flush(io);
322 				break;
323 			default:
324 				break;
325 			}
326 		} /* else no explicit request to flush the buffer */
327 	}
328 
329 	/*
330 	 * Wait for the IO to complete if asked to.
331 	 */
332 	if (io->waitdep && io->running) {
333 		hammer_io_wait(io);
334 	}
335 
336 	/*
337 	 * Return control of the buffer to the kernel (with the provisio
338 	 * that our bioops can override kernel decisions with regards to
339 	 * the buffer).
340 	 */
341 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
342 		/*
343 		 * Always disassociate the bp if an explicit flush
344 		 * was requested and the IO completed with no error
345 		 * (so unmount can really clean up the structure).
346 		 */
347 		if (io->released) {
348 			regetblk(bp);
349 			BUF_KERNPROC(bp);
350 		} else {
351 			io->released = 1;
352 		}
353 		hammer_io_disassociate((hammer_io_structure_t)io);
354 		/* return the bp */
355 	} else if (io->modified) {
356 		/*
357 		 * Only certain IO types can be released to the kernel if
358 		 * the buffer has been modified.
359 		 *
360 		 * volume and meta-data IO types may only be explicitly
361 		 * flushed by HAMMER.
362 		 */
363 		switch(io->type) {
364 		case HAMMER_STRUCTURE_DATA_BUFFER:
365 		case HAMMER_STRUCTURE_UNDO_BUFFER:
366 			if (io->released == 0) {
367 				io->released = 1;
368 				bdwrite(bp);
369 			}
370 			break;
371 		default:
372 			break;
373 		}
374 		bp = NULL;	/* bp left associated */
375 	} else if (io->released == 0) {
376 		/*
377 		 * Clean buffers can be generally released to the kernel.
378 		 * We leave the bp passively associated with the HAMMER
379 		 * structure and use bioops to disconnect it later on
380 		 * if the kernel wants to discard the buffer.
381 		 *
382 		 * We can steal the structure's ownership of the bp.
383 		 */
384 		io->released = 1;
385 		if (bp->b_flags & B_LOCKED) {
386 			hammer_io_disassociate(iou);
387 			/* return the bp */
388 		} else {
389 			if (io->reclaim) {
390 				hammer_io_disassociate(iou);
391 				/* return the bp */
392 			} else {
393 				/* return the bp (bp passively associated) */
394 			}
395 		}
396 	} else {
397 		/*
398 		 * A released buffer is passively associate with our
399 		 * hammer_io structure.  The kernel cannot destroy it
400 		 * without making a bioops call.  If the kernel (B_LOCKED)
401 		 * or we (reclaim) requested that the buffer be destroyed
402 		 * we destroy it, otherwise we do a quick get/release to
403 		 * reset its position in the kernel's LRU list.
404 		 *
405 		 * Leaving the buffer passively associated allows us to
406 		 * use the kernel's LRU buffer flushing mechanisms rather
407 		 * then rolling our own.
408 		 *
409 		 * XXX there are two ways of doing this.  We can re-acquire
410 		 * and passively release to reset the LRU, or not.
411 		 */
412 		if (io->running == 0) {
413 			regetblk(bp);
414 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
415 				hammer_io_disassociate(iou);
416 				/* return the bp */
417 			} else {
418 				/* return the bp (bp passively associated) */
419 			}
420 		} else {
421 			/*
422 			 * bp is left passively associated but we do not
423 			 * try to reacquire it.  Interactions with the io
424 			 * structure will occur on completion of the bp's
425 			 * I/O.
426 			 */
427 			bp = NULL;
428 		}
429 	}
430 	return(bp);
431 }
432 
433 /*
434  * This routine is called with a locked IO when a flush is desired and
435  * no other references to the structure exists other then ours.  This
436  * routine is ONLY called when HAMMER believes it is safe to flush a
437  * potentially modified buffer out.
438  */
439 void
440 hammer_io_flush(struct hammer_io *io)
441 {
442 	struct buf *bp;
443 
444 	/*
445 	 * Degenerate case - nothing to flush if nothing is dirty.
446 	 */
447 	if (io->modified == 0) {
448 		return;
449 	}
450 
451 	KKASSERT(io->bp);
452 	KKASSERT(io->modify_refs <= 0);
453 
454 	/*
455 	 * Acquire ownership of the bp, particularly before we clear our
456 	 * modified flag.
457 	 *
458 	 * We are going to bawrite() this bp.  Don't leave a window where
459 	 * io->released is set, we actually own the bp rather then our
460 	 * buffer.
461 	 */
462 	bp = io->bp;
463 	if (io->released) {
464 		regetblk(bp);
465 		/* BUF_KERNPROC(io->bp); */
466 		/* io->released = 0; */
467 		KKASSERT(io->released);
468 		KKASSERT(io->bp == bp);
469 	}
470 	io->released = 1;
471 
472 	/*
473 	 * Acquire exclusive access to the bp and then clear the modified
474 	 * state of the buffer prior to issuing I/O to interlock any
475 	 * modifications made while the I/O is in progress.  This shouldn't
476 	 * happen anyway but losing data would be worse.  The modified bit
477 	 * will be rechecked after the IO completes.
478 	 *
479 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
480 	 *
481 	 * This is only legal when lock.refs == 1 (otherwise we might clear
482 	 * the modified bit while there are still users of the cluster
483 	 * modifying the data).
484 	 *
485 	 * Do this before potentially blocking so any attempt to modify the
486 	 * ondisk while we are blocked blocks waiting for us.
487 	 */
488 	hammer_io_clear_modify(io, 0);
489 
490 	/*
491 	 * Transfer ownership to the kernel and initiate I/O.
492 	 */
493 	io->running = 1;
494 	io->hmp->io_running_space += io->bytes;
495 	hammer_count_io_running_write += io->bytes;
496 	bawrite(bp);
497 }
498 
499 /************************************************************************
500  *				BUFFER DIRTYING				*
501  ************************************************************************
502  *
503  * These routines deal with dependancies created when IO buffers get
504  * modified.  The caller must call hammer_modify_*() on a referenced
505  * HAMMER structure prior to modifying its on-disk data.
506  *
507  * Any intent to modify an IO buffer acquires the related bp and imposes
508  * various write ordering dependancies.
509  */
510 
511 /*
512  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
513  * are locked until the flusher can deal with them, pure data buffers
514  * can be written out.
515  */
516 static
517 void
518 hammer_io_modify(hammer_io_t io, int count)
519 {
520 	/*
521 	 * io->modify_refs must be >= 0
522 	 */
523 	while (io->modify_refs < 0) {
524 		io->waitmod = 1;
525 		tsleep(io, 0, "hmrmod", 0);
526 	}
527 
528 	/*
529 	 * Shortcut if nothing to do.
530 	 */
531 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
532 	io->modify_refs += count;
533 	if (io->modified && io->released == 0)
534 		return;
535 
536 	hammer_lock_ex(&io->lock);
537 	if (io->modified == 0) {
538 		hammer_io_set_modlist(io);
539 		io->modified = 1;
540 	}
541 	if (io->released) {
542 		regetblk(io->bp);
543 		BUF_KERNPROC(io->bp);
544 		io->released = 0;
545 		KKASSERT(io->modified != 0);
546 	}
547 	hammer_unlock(&io->lock);
548 }
549 
550 static __inline
551 void
552 hammer_io_modify_done(hammer_io_t io)
553 {
554 	KKASSERT(io->modify_refs > 0);
555 	--io->modify_refs;
556 	if (io->modify_refs == 0 && io->waitmod) {
557 		io->waitmod = 0;
558 		wakeup(io);
559 	}
560 }
561 
562 void
563 hammer_io_write_interlock(hammer_io_t io)
564 {
565 	while (io->modify_refs != 0) {
566 		io->waitmod = 1;
567 		tsleep(io, 0, "hmrmod", 0);
568 	}
569 	io->modify_refs = -1;
570 }
571 
572 void
573 hammer_io_done_interlock(hammer_io_t io)
574 {
575 	KKASSERT(io->modify_refs == -1);
576 	io->modify_refs = 0;
577 	if (io->waitmod) {
578 		io->waitmod = 0;
579 		wakeup(io);
580 	}
581 }
582 
583 /*
584  * Caller intends to modify a volume's ondisk structure.
585  *
586  * This is only allowed if we are the flusher or we have a ref on the
587  * sync_lock.
588  */
589 void
590 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
591 		     void *base, int len)
592 {
593 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
594 
595 	hammer_io_modify(&volume->io, 1);
596 	if (len) {
597 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
598 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
599 		hammer_generate_undo(trans, &volume->io,
600 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
601 			 base, len);
602 	}
603 }
604 
605 /*
606  * Caller intends to modify a buffer's ondisk structure.
607  *
608  * This is only allowed if we are the flusher or we have a ref on the
609  * sync_lock.
610  */
611 void
612 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
613 		     void *base, int len)
614 {
615 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
616 
617 	hammer_io_modify(&buffer->io, 1);
618 	if (len) {
619 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
620 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
621 		hammer_generate_undo(trans, &buffer->io,
622 				     buffer->zone2_offset + rel_offset,
623 				     base, len);
624 	}
625 }
626 
627 void
628 hammer_modify_volume_done(hammer_volume_t volume)
629 {
630 	hammer_io_modify_done(&volume->io);
631 }
632 
633 void
634 hammer_modify_buffer_done(hammer_buffer_t buffer)
635 {
636 	hammer_io_modify_done(&buffer->io);
637 }
638 
639 /*
640  * Mark an entity as not being dirty any more and finalize any
641  * delayed adjustments to the buffer.
642  *
643  * Delayed adjustments are an important performance enhancement, allowing
644  * us to avoid recalculating B-Tree node CRCs over and over again when
645  * making bulk-modifications to the B-Tree.
646  *
647  * If inval is non-zero delayed adjustments are ignored.
648  */
649 void
650 hammer_io_clear_modify(struct hammer_io *io, int inval)
651 {
652 	if (io->modified == 0)
653 		return;
654 
655 	/*
656 	 * Take us off the mod-list and clear the modified bit.
657 	 */
658 	KKASSERT(io->mod_list != NULL);
659 	if (io->mod_list == &io->hmp->volu_list ||
660 	    io->mod_list == &io->hmp->meta_list) {
661 		io->hmp->locked_dirty_space -= io->bytes;
662 		hammer_count_dirtybufspace -= io->bytes;
663 	}
664 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
665 	io->mod_list = NULL;
666 	io->modified = 0;
667 
668 	/*
669 	 * If this bit is not set there are no delayed adjustments.
670 	 */
671 	if (io->gencrc == 0)
672 		return;
673 	io->gencrc = 0;
674 
675 	/*
676 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
677 	 * on the node (& underlying buffer).  Release the node after clearing
678 	 * the flag.
679 	 */
680 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
681 		hammer_buffer_t buffer = (void *)io;
682 		hammer_node_t node;
683 
684 restart:
685 		TAILQ_FOREACH(node, &buffer->clist, entry) {
686 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
687 				continue;
688 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
689 			KKASSERT(node->ondisk);
690 			if (inval == 0)
691 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
692 			hammer_rel_node(node);
693 			goto restart;
694 		}
695 	}
696 
697 }
698 
699 /*
700  * Clear the IO's modify list.  Even though the IO is no longer modified
701  * it may still be on the lose_list.  This routine is called just before
702  * the governing hammer_buffer is destroyed.
703  */
704 void
705 hammer_io_clear_modlist(struct hammer_io *io)
706 {
707 	KKASSERT(io->modified == 0);
708 	if (io->mod_list) {
709 		crit_enter();	/* biodone race against list */
710 		KKASSERT(io->mod_list == &io->hmp->lose_list);
711 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
712 		io->mod_list = NULL;
713 		crit_exit();
714 	}
715 }
716 
717 static void
718 hammer_io_set_modlist(struct hammer_io *io)
719 {
720 	struct hammer_mount *hmp = io->hmp;
721 
722 	KKASSERT(io->mod_list == NULL);
723 
724 	switch(io->type) {
725 	case HAMMER_STRUCTURE_VOLUME:
726 		io->mod_list = &hmp->volu_list;
727 		hmp->locked_dirty_space += io->bytes;
728 		hammer_count_dirtybufspace += io->bytes;
729 		break;
730 	case HAMMER_STRUCTURE_META_BUFFER:
731 		io->mod_list = &hmp->meta_list;
732 		hmp->locked_dirty_space += io->bytes;
733 		hammer_count_dirtybufspace += io->bytes;
734 		break;
735 	case HAMMER_STRUCTURE_UNDO_BUFFER:
736 		io->mod_list = &hmp->undo_list;
737 		break;
738 	case HAMMER_STRUCTURE_DATA_BUFFER:
739 		io->mod_list = &hmp->data_list;
740 		break;
741 	}
742 	TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
743 }
744 
745 /************************************************************************
746  *				HAMMER_BIOOPS				*
747  ************************************************************************
748  *
749  */
750 
751 /*
752  * Pre-IO initiation kernel callback - cluster build only
753  */
754 static void
755 hammer_io_start(struct buf *bp)
756 {
757 }
758 
759 /*
760  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
761  *
762  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
763  * may also be set if we were marking a cluster header open.  Only remove
764  * our dependancy if the modified bit is clear.
765  */
766 static void
767 hammer_io_complete(struct buf *bp)
768 {
769 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
770 
771 	KKASSERT(iou->io.released == 1);
772 
773 	/*
774 	 * Deal with people waiting for I/O to drain
775 	 */
776 	if (iou->io.running) {
777 		/*
778 		 * Deal with critical write errors.  Once a critical error
779 		 * has been flagged in hmp the UNDO FIFO will not be updated.
780 		 * That way crash recover will give us a consistent
781 		 * filesystem.
782 		 *
783 		 * Because of this we can throw away failed UNDO buffers.  If
784 		 * we throw away META or DATA buffers we risk corrupting
785 		 * the now read-only version of the filesystem visible to
786 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
787 		 * by the kernel and ref the io so it doesn't get thrown
788 		 * away.
789 		 */
790 		if (bp->b_flags & B_ERROR) {
791 			hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
792 					      "while flushing meta-data");
793 			switch(iou->io.type) {
794 			case HAMMER_STRUCTURE_UNDO_BUFFER:
795 				break;
796 			default:
797 				if (iou->io.ioerror == 0) {
798 					iou->io.ioerror = 1;
799 					if (iou->io.lock.refs == 0)
800 						++hammer_count_refedbufs;
801 					hammer_ref(&iou->io.lock);
802 				}
803 				break;
804 			}
805 			bp->b_flags &= ~B_ERROR;
806 			bundirty(bp);
807 #if 0
808 			hammer_io_set_modlist(&iou->io);
809 			iou->io.modified = 1;
810 #endif
811 		}
812 		hammer_stats_disk_write += iou->io.bytes;
813 		hammer_count_io_running_write -= iou->io.bytes;
814 		iou->io.hmp->io_running_space -= iou->io.bytes;
815 		if (iou->io.hmp->io_running_space == 0)
816 			wakeup(&iou->io.hmp->io_running_space);
817 		KKASSERT(iou->io.hmp->io_running_space >= 0);
818 		iou->io.running = 0;
819 	} else {
820 		hammer_stats_disk_read += iou->io.bytes;
821 	}
822 
823 	if (iou->io.waiting) {
824 		iou->io.waiting = 0;
825 		wakeup(iou);
826 	}
827 
828 	/*
829 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
830 	 * point, do it now if refs has become zero.
831 	 */
832 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
833 		KKASSERT(iou->io.modified == 0);
834 		--hammer_count_io_locked;
835 		bp->b_flags &= ~B_LOCKED;
836 		hammer_io_deallocate(bp);
837 		/* structure may be dead now */
838 	}
839 }
840 
841 /*
842  * Callback from kernel when it wishes to deallocate a passively
843  * associated structure.  This mostly occurs with clean buffers
844  * but it may be possible for a holding structure to be marked dirty
845  * while its buffer is passively associated.  The caller owns the bp.
846  *
847  * If we cannot disassociate we set B_LOCKED to prevent the buffer
848  * from getting reused.
849  *
850  * WARNING: Because this can be called directly by getnewbuf we cannot
851  * recurse into the tree.  If a bp cannot be immediately disassociated
852  * our only recourse is to set B_LOCKED.
853  *
854  * WARNING: This may be called from an interrupt via hammer_io_complete()
855  */
856 static void
857 hammer_io_deallocate(struct buf *bp)
858 {
859 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
860 
861 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
862 	if (iou->io.lock.refs > 0 || iou->io.modified) {
863 		/*
864 		 * It is not legal to disassociate a modified buffer.  This
865 		 * case really shouldn't ever occur.
866 		 */
867 		bp->b_flags |= B_LOCKED;
868 		++hammer_count_io_locked;
869 	} else {
870 		/*
871 		 * Disassociate the BP.  If the io has no refs left we
872 		 * have to add it to the loose list.
873 		 */
874 		hammer_io_disassociate(iou);
875 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
876 			KKASSERT(iou->io.bp == NULL);
877 			KKASSERT(iou->io.mod_list == NULL);
878 			crit_enter();	/* biodone race against list */
879 			iou->io.mod_list = &iou->io.hmp->lose_list;
880 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
881 			crit_exit();
882 		}
883 	}
884 }
885 
886 static int
887 hammer_io_fsync(struct vnode *vp)
888 {
889 	return(0);
890 }
891 
892 /*
893  * NOTE: will not be called unless we tell the kernel about the
894  * bioops.  Unused... we use the mount's VFS_SYNC instead.
895  */
896 static int
897 hammer_io_sync(struct mount *mp)
898 {
899 	return(0);
900 }
901 
902 static void
903 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
904 {
905 }
906 
907 /*
908  * I/O pre-check for reading and writing.  HAMMER only uses this for
909  * B_CACHE buffers so checkread just shouldn't happen, but if it does
910  * allow it.
911  *
912  * Writing is a different case.  We don't want the kernel to try to write
913  * out a buffer that HAMMER may be modifying passively or which has a
914  * dependancy.  In addition, kernel-demanded writes can only proceed for
915  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
916  * buffer types can only be explicitly written by the flusher.
917  *
918  * checkwrite will only be called for bdwrite()n buffers.  If we return
919  * success the kernel is guaranteed to initiate the buffer write.
920  */
921 static int
922 hammer_io_checkread(struct buf *bp)
923 {
924 	return(0);
925 }
926 
927 static int
928 hammer_io_checkwrite(struct buf *bp)
929 {
930 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
931 
932 	/*
933 	 * This shouldn't happen under normal operation.
934 	 */
935 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
936 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
937 		if (!panicstr)
938 			panic("hammer_io_checkwrite: illegal buffer");
939 		if ((bp->b_flags & B_LOCKED) == 0) {
940 			bp->b_flags |= B_LOCKED;
941 			++hammer_count_io_locked;
942 		}
943 		return(1);
944 	}
945 
946 	/*
947 	 * We can only clear the modified bit if the IO is not currently
948 	 * undergoing modification.  Otherwise we may miss changes.
949 	 */
950 	if (io->modify_refs == 0 && io->modified)
951 		hammer_io_clear_modify(io, 0);
952 
953 	/*
954 	 * The kernel is going to start the IO, set io->running.
955 	 */
956 	KKASSERT(io->running == 0);
957 	io->running = 1;
958 	io->hmp->io_running_space += io->bytes;
959 	hammer_count_io_running_write += io->bytes;
960 	return(0);
961 }
962 
963 /*
964  * Return non-zero if we wish to delay the kernel's attempt to flush
965  * this buffer to disk.
966  */
967 static int
968 hammer_io_countdeps(struct buf *bp, int n)
969 {
970 	return(0);
971 }
972 
973 struct bio_ops hammer_bioops = {
974 	.io_start	= hammer_io_start,
975 	.io_complete	= hammer_io_complete,
976 	.io_deallocate	= hammer_io_deallocate,
977 	.io_fsync	= hammer_io_fsync,
978 	.io_sync	= hammer_io_sync,
979 	.io_movedeps	= hammer_io_movedeps,
980 	.io_countdeps	= hammer_io_countdeps,
981 	.io_checkread	= hammer_io_checkread,
982 	.io_checkwrite	= hammer_io_checkwrite,
983 };
984 
985 /************************************************************************
986  *				DIRECT IO OPS 				*
987  ************************************************************************
988  *
989  * These functions operate directly on the buffer cache buffer associated
990  * with a front-end vnode rather then a back-end device vnode.
991  */
992 
993 /*
994  * Read a buffer associated with a front-end vnode directly from the
995  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
996  * we validate the CRC.
997  *
998  * A second-level bio already resolved to a zone-2 offset (typically by
999  * the BMAP code, or by a previous hammer_io_direct_write()), is passed.
1000  *
1001  * We must check for the presence of a HAMMER buffer to handle the case
1002  * where the reblocker has rewritten the data (which it does via the HAMMER
1003  * buffer system, not via the high-level vnode buffer cache), but not yet
1004  * committed the buffer to the media.
1005  */
1006 int
1007 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1008 		      hammer_btree_leaf_elm_t leaf)
1009 {
1010 	hammer_off_t buf_offset;
1011 	hammer_off_t zone2_offset;
1012 	hammer_volume_t volume;
1013 	struct buf *bp;
1014 	struct bio *nbio;
1015 	int vol_no;
1016 	int error;
1017 
1018 	buf_offset = bio->bio_offset;
1019 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1020 		 HAMMER_ZONE_LARGE_DATA);
1021 
1022 	/*
1023 	 * The buffer cache may have an aliased buffer (the reblocker can
1024 	 * write them).  If it does we have to sync any dirty data before
1025 	 * we can build our direct-read.  This is a non-critical code path.
1026 	 */
1027 	bp = bio->bio_buf;
1028 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1029 
1030 	/*
1031 	 * Resolve to a zone-2 offset.  The conversion just requires
1032 	 * munging the top 4 bits but we want to abstract it anyway
1033 	 * so the blockmap code can verify the zone assignment.
1034 	 */
1035 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1036 	if (error)
1037 		goto done;
1038 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1039 		 HAMMER_ZONE_RAW_BUFFER);
1040 
1041 	/*
1042 	 * Resolve volume and raw-offset for 3rd level bio.  The
1043 	 * offset will be specific to the volume.
1044 	 */
1045 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1046 	volume = hammer_get_volume(hmp, vol_no, &error);
1047 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1048 		error = EIO;
1049 
1050 	if (error == 0) {
1051 		zone2_offset &= HAMMER_OFF_SHORT_MASK;
1052 
1053 		nbio = push_bio(bio);
1054 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1055 				   zone2_offset;
1056 #if 0
1057 		/*
1058 		 * XXX disabled - our CRC check doesn't work if the OS
1059 		 * does bogus_page replacement on the direct-read.
1060 		 */
1061 		if (leaf && hammer_verify_data) {
1062 			nbio->bio_done = hammer_io_direct_read_complete;
1063 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1064 		}
1065 #endif
1066 		hammer_stats_disk_read += bp->b_bufsize;
1067 		vn_strategy(volume->devvp, nbio);
1068 	}
1069 	hammer_rel_volume(volume, 0);
1070 done:
1071 	if (error) {
1072 		kprintf("hammer_direct_read: failed @ %016llx\n",
1073 			zone2_offset);
1074 		bp->b_error = error;
1075 		bp->b_flags |= B_ERROR;
1076 		biodone(bio);
1077 	}
1078 	return(error);
1079 }
1080 
1081 #if 0
1082 /*
1083  * On completion of the BIO this callback must check the data CRC
1084  * and chain to the previous bio.
1085  */
1086 static
1087 void
1088 hammer_io_direct_read_complete(struct bio *nbio)
1089 {
1090 	struct bio *obio;
1091 	struct buf *bp;
1092 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1093 
1094 	bp = nbio->bio_buf;
1095 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1096 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
1097 			nbio->bio_offset, bp->b_bufsize);
1098 		if (hammer_debug_debug)
1099 			Debugger("");
1100 		bp->b_flags |= B_ERROR;
1101 		bp->b_error = EIO;
1102 	}
1103 	obio = pop_bio(nbio);
1104 	biodone(obio);
1105 }
1106 #endif
1107 
1108 /*
1109  * Write a buffer associated with a front-end vnode directly to the
1110  * disk media.  The bio may be issued asynchronously.
1111  *
1112  * The BIO is associated with the specified record and RECF_DIRECT_IO
1113  * is set.
1114  */
1115 int
1116 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1117 		       struct bio *bio)
1118 {
1119 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1120 	hammer_off_t buf_offset;
1121 	hammer_off_t zone2_offset;
1122 	hammer_volume_t volume;
1123 	hammer_buffer_t buffer;
1124 	struct buf *bp;
1125 	struct bio *nbio;
1126 	char *ptr;
1127 	int vol_no;
1128 	int error;
1129 
1130 	buf_offset = leaf->data_offset;
1131 
1132 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1133 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1134 
1135 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1136 	    leaf->data_len >= HAMMER_BUFSIZE) {
1137 		/*
1138 		 * We are using the vnode's bio to write directly to the
1139 		 * media, any hammer_buffer at the same zone-X offset will
1140 		 * now have stale data.
1141 		 */
1142 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1143 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1144 		volume = hammer_get_volume(hmp, vol_no, &error);
1145 
1146 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1147 			error = EIO;
1148 		if (error == 0) {
1149 			bp = bio->bio_buf;
1150 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1151 			hammer_del_buffers(hmp, buf_offset,
1152 					   zone2_offset, bp->b_bufsize);
1153 
1154 			/*
1155 			 * Second level bio - cached zone2 offset.
1156 			 *
1157 			 * (We can put our bio_done function in either the
1158 			 *  2nd or 3rd level).
1159 			 */
1160 			nbio = push_bio(bio);
1161 			nbio->bio_offset = zone2_offset;
1162 			nbio->bio_done = hammer_io_direct_write_complete;
1163 			nbio->bio_caller_info1.ptr = record;
1164 			record->flags |= HAMMER_RECF_DIRECT_IO;
1165 
1166 			/*
1167 			 * Third level bio - raw offset specific to the
1168 			 * correct volume.
1169 			 */
1170 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1171 			nbio = push_bio(nbio);
1172 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1173 					   zone2_offset;
1174 			hammer_stats_disk_write += bp->b_bufsize;
1175 			vn_strategy(volume->devvp, nbio);
1176 		}
1177 		hammer_rel_volume(volume, 0);
1178 	} else {
1179 		/*
1180 		 * Must fit in a standard HAMMER buffer.  In this case all
1181 		 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1182 		 * does not need to be set-up.
1183 		 */
1184 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1185 		buffer = NULL;
1186 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1187 		if (error == 0) {
1188 			bp = bio->bio_buf;
1189 			bp->b_flags |= B_AGE;
1190 			hammer_io_modify(&buffer->io, 1);
1191 			bcopy(bp->b_data, ptr, leaf->data_len);
1192 			hammer_io_modify_done(&buffer->io);
1193 			hammer_rel_buffer(buffer, 0);
1194 			bp->b_resid = 0;
1195 			biodone(bio);
1196 		}
1197 	}
1198 	if (error) {
1199 		kprintf("hammer_direct_write: failed @ %016llx\n",
1200 			leaf->data_offset);
1201 		bp = bio->bio_buf;
1202 		bp->b_resid = 0;
1203 		bp->b_error = EIO;
1204 		bp->b_flags |= B_ERROR;
1205 		biodone(bio);
1206 	}
1207 	return(error);
1208 }
1209 
1210 /*
1211  * On completion of the BIO this callback must disconnect
1212  * it from the hammer_record and chain to the previous bio.
1213  *
1214  * An I/O error forces the mount to read-only.  Data buffers
1215  * are not B_LOCKED like meta-data buffers are, so we have to
1216  * throw the buffer away to prevent the kernel from retrying.
1217  */
1218 static
1219 void
1220 hammer_io_direct_write_complete(struct bio *nbio)
1221 {
1222 	struct bio *obio;
1223 	hammer_record_t record = nbio->bio_caller_info1.ptr;
1224 
1225 	obio = pop_bio(nbio);
1226 	if (obio->bio_buf->b_flags & B_ERROR) {
1227 		hammer_critical_error(record->ip->hmp, record->ip,
1228 				      obio->bio_buf->b_error,
1229 				      "while writing bulk data");
1230 		obio->bio_buf->b_flags |= B_INVAL;
1231 	}
1232 	biodone(obio);
1233 	KKASSERT(record != NULL && (record->flags & HAMMER_RECF_DIRECT_IO));
1234 	record->flags &= ~HAMMER_RECF_DIRECT_IO;
1235 	if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1236 		record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
1237 		wakeup(&record->flags);
1238 	}
1239 }
1240 
1241 
1242 /*
1243  * This is called before a record is either committed to the B-Tree
1244  * or destroyed, to resolve any associated direct-IO.  We must
1245  * ensure that the data is available on-media to other consumers
1246  * such as the reblocker or mirroring code.
1247  *
1248  * Note that other consumers might access the data via the block
1249  * device's buffer cache and not the high level vnode's buffer cache.
1250  */
1251 void
1252 hammer_io_direct_wait(hammer_record_t record)
1253 {
1254 	crit_enter();
1255 	while (record->flags & HAMMER_RECF_DIRECT_IO) {
1256 		record->flags |= HAMMER_RECF_DIRECT_WAIT;
1257 		tsleep(&record->flags, 0, "hmdiow", 0);
1258 	}
1259 	crit_exit();
1260 }
1261 
1262 /*
1263  * This is called to remove the second-level cached zone-2 offset from
1264  * frontend buffer cache buffers, now stale due to a data relocation.
1265  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1266  * by hammer_vop_strategy_read().
1267  *
1268  * This is rather nasty because here we have something like the reblocker
1269  * scanning the raw B-Tree with no held references on anything, really,
1270  * other then a shared lock on the B-Tree node, and we have to access the
1271  * frontend's buffer cache to check for and clean out the association.
1272  * Specifically, if the reblocker is moving data on the disk, these cached
1273  * offsets will become invalid.
1274  *
1275  * Only data record types associated with the large-data zone are subject
1276  * to direct-io and need to be checked.
1277  *
1278  */
1279 void
1280 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1281 {
1282 	struct hammer_inode_info iinfo;
1283 	int zone;
1284 
1285 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1286 		return;
1287 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1288 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1289 		return;
1290 	iinfo.obj_id = leaf->base.obj_id;
1291 	iinfo.obj_asof = 0;	/* unused */
1292 	iinfo.obj_localization = leaf->base.localization &
1293 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1294 	iinfo.u.leaf = leaf;
1295 	hammer_scan_inode_snapshots(hmp, &iinfo,
1296 				    hammer_io_direct_uncache_callback,
1297 				    leaf);
1298 }
1299 
1300 static int
1301 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1302 {
1303 	hammer_inode_info_t iinfo = data;
1304 	hammer_off_t data_offset;
1305 	hammer_off_t file_offset;
1306 	struct vnode *vp;
1307 	struct buf *bp;
1308 	int blksize;
1309 
1310 	if (ip->vp == NULL)
1311 		return(0);
1312 	data_offset = iinfo->u.leaf->data_offset;
1313 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1314 	blksize = iinfo->u.leaf->data_len;
1315 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1316 
1317 	hammer_ref(&ip->lock);
1318 	if (hammer_get_vnode(ip, &vp) == 0) {
1319 		if ((bp = findblk(ip->vp, file_offset)) != NULL &&
1320 		    bp->b_bio2.bio_offset != NOOFFSET) {
1321 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1322 			bp->b_bio2.bio_offset = NOOFFSET;
1323 			brelse(bp);
1324 		}
1325 		vput(vp);
1326 	}
1327 	hammer_rel_inode(ip, 0);
1328 	return(0);
1329 }
1330 
1331