xref: /dflybsd-src/sys/vfs/hammer/hammer_io.c (revision d54592ee9e96c920b951af2e00cd72c0081ccae3)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.53 2008/08/06 15:38:58 dillon Exp $
35  */
36 /*
37  * IO Primitives and buffer cache management
38  *
39  * All major data-tracking structures in HAMMER contain a struct hammer_io
40  * which is used to manage their backing store.  We use filesystem buffers
41  * for backing store and we leave them passively associated with their
42  * HAMMER structures.
43  *
44  * If the kernel tries to destroy a passively associated buf which we cannot
45  * yet let go we set B_LOCKED in the buffer and then actively released it
46  * later when we can.
47  */
48 
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
54 
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 
64 /*
65  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
66  * an existing hammer_io structure which may have switched to another type.
67  */
68 void
69 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
70 {
71 	io->hmp = hmp;
72 	io->type = type;
73 }
74 
75 /*
76  * Helper routine to disassociate a buffer cache buffer from an I/O
77  * structure.  The buffer is unlocked and marked appropriate for reclamation.
78  *
79  * The io may have 0 or 1 references depending on who called us.  The
80  * caller is responsible for dealing with the refs.
81  *
82  * This call can only be made when no action is required on the buffer.
83  *
84  * The caller must own the buffer and the IO must indicate that the
85  * structure no longer owns it (io.released != 0).
86  */
87 static void
88 hammer_io_disassociate(hammer_io_structure_t iou)
89 {
90 	struct buf *bp = iou->io.bp;
91 
92 	KKASSERT(iou->io.released);
93 	KKASSERT(iou->io.modified == 0);
94 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
95 	buf_dep_init(bp);
96 	iou->io.bp = NULL;
97 
98 	/*
99 	 * If the buffer was locked someone wanted to get rid of it.
100 	 */
101 	if (bp->b_flags & B_LOCKED) {
102 		--hammer_count_io_locked;
103 		bp->b_flags &= ~B_LOCKED;
104 	}
105 	if (iou->io.reclaim) {
106 		bp->b_flags |= B_NOCACHE|B_RELBUF;
107 		iou->io.reclaim = 0;
108 	}
109 
110 	switch(iou->io.type) {
111 	case HAMMER_STRUCTURE_VOLUME:
112 		iou->volume.ondisk = NULL;
113 		break;
114 	case HAMMER_STRUCTURE_DATA_BUFFER:
115 	case HAMMER_STRUCTURE_META_BUFFER:
116 	case HAMMER_STRUCTURE_UNDO_BUFFER:
117 		iou->buffer.ondisk = NULL;
118 		break;
119 	}
120 }
121 
122 /*
123  * Wait for any physical IO to complete
124  */
125 void
126 hammer_io_wait(hammer_io_t io)
127 {
128 	if (io->running) {
129 		crit_enter();
130 		tsleep_interlock(io);
131 		io->waiting = 1;
132 		for (;;) {
133 			tsleep(io, 0, "hmrflw", 0);
134 			if (io->running == 0)
135 				break;
136 			tsleep_interlock(io);
137 			io->waiting = 1;
138 			if (io->running == 0)
139 				break;
140 		}
141 		crit_exit();
142 	}
143 }
144 
145 /*
146  * Wait for all hammer_io-initated write I/O's to complete.  This is not
147  * supposed to count direct I/O's but some can leak through (for
148  * non-full-sized direct I/Os).
149  */
150 void
151 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
152 {
153 	crit_enter();
154 	while (hmp->io_running_space)
155 		tsleep(&hmp->io_running_space, 0, ident, 0);
156 	crit_exit();
157 }
158 
159 #define HAMMER_MAXRA	4
160 
161 /*
162  * Load bp for a HAMMER structure.  The io must be exclusively locked by
163  * the caller.
164  *
165  * This routine is mostly used on meta-data and small-data blocks.  Generally
166  * speaking HAMMER assumes some locality of reference and will cluster
167  * a 64K read.
168  *
169  * Note that clustering occurs at the device layer, not the logical layer.
170  * If the buffers do not apply to the current operation they may apply to
171  * some other.
172  */
173 int
174 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
175 {
176 	struct buf *bp;
177 	int   error;
178 
179 	if ((bp = io->bp) == NULL) {
180 		hammer_count_io_running_read += io->bytes;
181 		if (hammer_cluster_enable) {
182 			error = cluster_read(devvp, limit,
183 					     io->offset, io->bytes,
184 					     HAMMER_CLUSTER_SIZE,
185 					     HAMMER_CLUSTER_BUFS, &io->bp);
186 		} else {
187 			error = bread(devvp, io->offset, io->bytes, &io->bp);
188 		}
189 		hammer_stats_disk_read += io->bytes;
190 		hammer_count_io_running_read -= io->bytes;
191 
192 		/*
193 		 * The code generally assumes b_ops/b_dep has been set-up,
194 		 * even if we error out here.
195 		 */
196 		bp = io->bp;
197 		bp->b_ops = &hammer_bioops;
198 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
199 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
200 		BUF_KERNPROC(bp);
201 		KKASSERT(io->modified == 0);
202 		KKASSERT(io->running == 0);
203 		KKASSERT(io->waiting == 0);
204 		io->released = 0;	/* we hold an active lock on bp */
205 	} else {
206 		error = 0;
207 	}
208 	return(error);
209 }
210 
211 /*
212  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
213  * Must be called with the IO exclusively locked.
214  *
215  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
216  * I/O by forcing the buffer to not be in a released state before calling
217  * it.
218  *
219  * This function will also mark the IO as modified but it will not
220  * increment the modify_refs count.
221  */
222 int
223 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
224 {
225 	struct buf *bp;
226 
227 	if ((bp = io->bp) == NULL) {
228 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
229 		bp = io->bp;
230 		bp->b_ops = &hammer_bioops;
231 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
232 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
233 		io->released = 0;
234 		KKASSERT(io->running == 0);
235 		io->waiting = 0;
236 		BUF_KERNPROC(bp);
237 	} else {
238 		if (io->released) {
239 			regetblk(bp);
240 			BUF_KERNPROC(bp);
241 			io->released = 0;
242 		}
243 	}
244 	hammer_io_modify(io, 0);
245 	vfs_bio_clrbuf(bp);
246 	return(0);
247 }
248 
249 /*
250  * Remove potential device level aliases against buffers managed by high level
251  * vnodes.  Aliases can also be created due to mixed buffer sizes.
252  *
253  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
254  * does not exist its backing VM pages might, and we have to invalidate
255  * those as well or a getblk() will reinstate them.
256  */
257 void
258 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
259 {
260 	hammer_io_structure_t iou;
261 	hammer_off_t phys_offset;
262 	struct buf *bp;
263 
264 	phys_offset = volume->ondisk->vol_buf_beg +
265 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
266 	crit_enter();
267 	if ((bp = findblk(volume->devvp, phys_offset)) != NULL)
268 		bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
269 	else
270 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
271 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
272 		hammer_ref(&iou->io.lock);
273 		hammer_io_clear_modify(&iou->io, 1);
274 		bundirty(bp);
275 		iou->io.reclaim = 1;
276 		iou->io.waitdep = 1;
277 		KKASSERT(iou->io.lock.refs == 0);
278 		hammer_rel_buffer(&iou->buffer, 0);
279 		/*hammer_io_deallocate(bp);*/
280 	} else {
281 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
282 		bundirty(bp);
283 		bp->b_flags |= B_NOCACHE|B_RELBUF;
284 	}
285 	brelse(bp);
286 	crit_exit();
287 }
288 
289 /*
290  * This routine is called on the last reference to a hammer structure.
291  * The io is usually interlocked with io.loading and io.refs must be 1.
292  *
293  * This routine may return a non-NULL bp to the caller for dispoal.  Disposal
294  * simply means the caller finishes decrementing the ref-count on the
295  * IO structure then brelse()'s the bp.  The bp may or may not still be
296  * passively associated with the IO.
297  *
298  * The only requirement here is that modified meta-data and volume-header
299  * buffer may NOT be disassociated from the IO structure, and consequently
300  * we also leave such buffers actively associated with the IO if they already
301  * are (since the kernel can't do anything with them anyway).  Only the
302  * flusher is allowed to write such buffers out.  Modified pure-data and
303  * undo buffers are returned to the kernel but left passively associated
304  * so we can track when the kernel writes the bp out.
305  */
306 struct buf *
307 hammer_io_release(struct hammer_io *io, int flush)
308 {
309 	union hammer_io_structure *iou = (void *)io;
310 	struct buf *bp;
311 
312 	if ((bp = io->bp) == NULL)
313 		return(NULL);
314 
315 	/*
316 	 * Try to flush a dirty IO to disk if asked to by the
317 	 * caller or if the kernel tried to flush the buffer in the past.
318 	 *
319 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
320 	 * meta-data and volume buffers can only be flushed explicitly
321 	 * by HAMMER.
322 	 */
323 	if (io->modified) {
324 		if (flush) {
325 			hammer_io_flush(io);
326 		} else if (bp->b_flags & B_LOCKED) {
327 			switch(io->type) {
328 			case HAMMER_STRUCTURE_DATA_BUFFER:
329 			case HAMMER_STRUCTURE_UNDO_BUFFER:
330 				hammer_io_flush(io);
331 				break;
332 			default:
333 				break;
334 			}
335 		} /* else no explicit request to flush the buffer */
336 	}
337 
338 	/*
339 	 * Wait for the IO to complete if asked to.  This occurs when
340 	 * the buffer must be disposed of definitively during an umount
341 	 * or buffer invalidation.
342 	 */
343 	if (io->waitdep && io->running) {
344 		hammer_io_wait(io);
345 	}
346 
347 	/*
348 	 * Return control of the buffer to the kernel (with the provisio
349 	 * that our bioops can override kernel decisions with regards to
350 	 * the buffer).
351 	 */
352 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
353 		/*
354 		 * Always disassociate the bp if an explicit flush
355 		 * was requested and the IO completed with no error
356 		 * (so unmount can really clean up the structure).
357 		 */
358 		if (io->released) {
359 			regetblk(bp);
360 			BUF_KERNPROC(bp);
361 		} else {
362 			io->released = 1;
363 		}
364 		hammer_io_disassociate((hammer_io_structure_t)io);
365 		/* return the bp */
366 	} else if (io->modified) {
367 		/*
368 		 * Only certain IO types can be released to the kernel if
369 		 * the buffer has been modified.
370 		 *
371 		 * volume and meta-data IO types may only be explicitly
372 		 * flushed by HAMMER.
373 		 */
374 		switch(io->type) {
375 		case HAMMER_STRUCTURE_DATA_BUFFER:
376 		case HAMMER_STRUCTURE_UNDO_BUFFER:
377 			if (io->released == 0) {
378 				io->released = 1;
379 				bdwrite(bp);
380 			}
381 			break;
382 		default:
383 			break;
384 		}
385 		bp = NULL;	/* bp left associated */
386 	} else if (io->released == 0) {
387 		/*
388 		 * Clean buffers can be generally released to the kernel.
389 		 * We leave the bp passively associated with the HAMMER
390 		 * structure and use bioops to disconnect it later on
391 		 * if the kernel wants to discard the buffer.
392 		 *
393 		 * We can steal the structure's ownership of the bp.
394 		 */
395 		io->released = 1;
396 		if (bp->b_flags & B_LOCKED) {
397 			hammer_io_disassociate(iou);
398 			/* return the bp */
399 		} else {
400 			if (io->reclaim) {
401 				hammer_io_disassociate(iou);
402 				/* return the bp */
403 			} else {
404 				/* return the bp (bp passively associated) */
405 			}
406 		}
407 	} else {
408 		/*
409 		 * A released buffer is passively associate with our
410 		 * hammer_io structure.  The kernel cannot destroy it
411 		 * without making a bioops call.  If the kernel (B_LOCKED)
412 		 * or we (reclaim) requested that the buffer be destroyed
413 		 * we destroy it, otherwise we do a quick get/release to
414 		 * reset its position in the kernel's LRU list.
415 		 *
416 		 * Leaving the buffer passively associated allows us to
417 		 * use the kernel's LRU buffer flushing mechanisms rather
418 		 * then rolling our own.
419 		 *
420 		 * XXX there are two ways of doing this.  We can re-acquire
421 		 * and passively release to reset the LRU, or not.
422 		 */
423 		if (io->running == 0) {
424 			regetblk(bp);
425 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
426 				hammer_io_disassociate(iou);
427 				/* return the bp */
428 			} else {
429 				/* return the bp (bp passively associated) */
430 			}
431 		} else {
432 			/*
433 			 * bp is left passively associated but we do not
434 			 * try to reacquire it.  Interactions with the io
435 			 * structure will occur on completion of the bp's
436 			 * I/O.
437 			 */
438 			bp = NULL;
439 		}
440 	}
441 	return(bp);
442 }
443 
444 /*
445  * This routine is called with a locked IO when a flush is desired and
446  * no other references to the structure exists other then ours.  This
447  * routine is ONLY called when HAMMER believes it is safe to flush a
448  * potentially modified buffer out.
449  */
450 void
451 hammer_io_flush(struct hammer_io *io)
452 {
453 	struct buf *bp;
454 
455 	/*
456 	 * Degenerate case - nothing to flush if nothing is dirty.
457 	 */
458 	if (io->modified == 0) {
459 		return;
460 	}
461 
462 	KKASSERT(io->bp);
463 	KKASSERT(io->modify_refs <= 0);
464 
465 	/*
466 	 * Acquire ownership of the bp, particularly before we clear our
467 	 * modified flag.
468 	 *
469 	 * We are going to bawrite() this bp.  Don't leave a window where
470 	 * io->released is set, we actually own the bp rather then our
471 	 * buffer.
472 	 */
473 	bp = io->bp;
474 	if (io->released) {
475 		regetblk(bp);
476 		/* BUF_KERNPROC(io->bp); */
477 		/* io->released = 0; */
478 		KKASSERT(io->released);
479 		KKASSERT(io->bp == bp);
480 	}
481 	io->released = 1;
482 
483 	/*
484 	 * Acquire exclusive access to the bp and then clear the modified
485 	 * state of the buffer prior to issuing I/O to interlock any
486 	 * modifications made while the I/O is in progress.  This shouldn't
487 	 * happen anyway but losing data would be worse.  The modified bit
488 	 * will be rechecked after the IO completes.
489 	 *
490 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
491 	 *
492 	 * This is only legal when lock.refs == 1 (otherwise we might clear
493 	 * the modified bit while there are still users of the cluster
494 	 * modifying the data).
495 	 *
496 	 * Do this before potentially blocking so any attempt to modify the
497 	 * ondisk while we are blocked blocks waiting for us.
498 	 */
499 	hammer_ref(&io->lock);
500 	hammer_io_clear_modify(io, 0);
501 	hammer_unref(&io->lock);
502 
503 	/*
504 	 * Transfer ownership to the kernel and initiate I/O.
505 	 */
506 	io->running = 1;
507 	io->hmp->io_running_space += io->bytes;
508 	hammer_count_io_running_write += io->bytes;
509 	bawrite(bp);
510 }
511 
512 /************************************************************************
513  *				BUFFER DIRTYING				*
514  ************************************************************************
515  *
516  * These routines deal with dependancies created when IO buffers get
517  * modified.  The caller must call hammer_modify_*() on a referenced
518  * HAMMER structure prior to modifying its on-disk data.
519  *
520  * Any intent to modify an IO buffer acquires the related bp and imposes
521  * various write ordering dependancies.
522  */
523 
524 /*
525  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
526  * are locked until the flusher can deal with them, pure data buffers
527  * can be written out.
528  */
529 static
530 void
531 hammer_io_modify(hammer_io_t io, int count)
532 {
533 	/*
534 	 * io->modify_refs must be >= 0
535 	 */
536 	while (io->modify_refs < 0) {
537 		io->waitmod = 1;
538 		tsleep(io, 0, "hmrmod", 0);
539 	}
540 
541 	/*
542 	 * Shortcut if nothing to do.
543 	 */
544 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
545 	io->modify_refs += count;
546 	if (io->modified && io->released == 0)
547 		return;
548 
549 	hammer_lock_ex(&io->lock);
550 	if (io->modified == 0) {
551 		hammer_io_set_modlist(io);
552 		io->modified = 1;
553 	}
554 	if (io->released) {
555 		regetblk(io->bp);
556 		BUF_KERNPROC(io->bp);
557 		io->released = 0;
558 		KKASSERT(io->modified != 0);
559 	}
560 	hammer_unlock(&io->lock);
561 }
562 
563 static __inline
564 void
565 hammer_io_modify_done(hammer_io_t io)
566 {
567 	KKASSERT(io->modify_refs > 0);
568 	--io->modify_refs;
569 	if (io->modify_refs == 0 && io->waitmod) {
570 		io->waitmod = 0;
571 		wakeup(io);
572 	}
573 }
574 
575 void
576 hammer_io_write_interlock(hammer_io_t io)
577 {
578 	while (io->modify_refs != 0) {
579 		io->waitmod = 1;
580 		tsleep(io, 0, "hmrmod", 0);
581 	}
582 	io->modify_refs = -1;
583 }
584 
585 void
586 hammer_io_done_interlock(hammer_io_t io)
587 {
588 	KKASSERT(io->modify_refs == -1);
589 	io->modify_refs = 0;
590 	if (io->waitmod) {
591 		io->waitmod = 0;
592 		wakeup(io);
593 	}
594 }
595 
596 /*
597  * Caller intends to modify a volume's ondisk structure.
598  *
599  * This is only allowed if we are the flusher or we have a ref on the
600  * sync_lock.
601  */
602 void
603 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
604 		     void *base, int len)
605 {
606 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
607 
608 	hammer_io_modify(&volume->io, 1);
609 	if (len) {
610 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
611 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
612 		hammer_generate_undo(trans, &volume->io,
613 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
614 			 base, len);
615 	}
616 }
617 
618 /*
619  * Caller intends to modify a buffer's ondisk structure.
620  *
621  * This is only allowed if we are the flusher or we have a ref on the
622  * sync_lock.
623  */
624 void
625 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
626 		     void *base, int len)
627 {
628 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
629 
630 	hammer_io_modify(&buffer->io, 1);
631 	if (len) {
632 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
633 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
634 		hammer_generate_undo(trans, &buffer->io,
635 				     buffer->zone2_offset + rel_offset,
636 				     base, len);
637 	}
638 }
639 
640 void
641 hammer_modify_volume_done(hammer_volume_t volume)
642 {
643 	hammer_io_modify_done(&volume->io);
644 }
645 
646 void
647 hammer_modify_buffer_done(hammer_buffer_t buffer)
648 {
649 	hammer_io_modify_done(&buffer->io);
650 }
651 
652 /*
653  * Mark an entity as not being dirty any more and finalize any
654  * delayed adjustments to the buffer.
655  *
656  * Delayed adjustments are an important performance enhancement, allowing
657  * us to avoid recalculating B-Tree node CRCs over and over again when
658  * making bulk-modifications to the B-Tree.
659  *
660  * If inval is non-zero delayed adjustments are ignored.
661  *
662  * This routine may dereference related btree nodes and cause the
663  * buffer to be dereferenced.  The caller must own a reference on io.
664  */
665 void
666 hammer_io_clear_modify(struct hammer_io *io, int inval)
667 {
668 	if (io->modified == 0)
669 		return;
670 
671 	/*
672 	 * Take us off the mod-list and clear the modified bit.
673 	 */
674 	KKASSERT(io->mod_list != NULL);
675 	if (io->mod_list == &io->hmp->volu_list ||
676 	    io->mod_list == &io->hmp->meta_list) {
677 		io->hmp->locked_dirty_space -= io->bytes;
678 		hammer_count_dirtybufspace -= io->bytes;
679 	}
680 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
681 	io->mod_list = NULL;
682 	io->modified = 0;
683 
684 	/*
685 	 * If this bit is not set there are no delayed adjustments.
686 	 */
687 	if (io->gencrc == 0)
688 		return;
689 	io->gencrc = 0;
690 
691 	/*
692 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
693 	 * on the node (& underlying buffer).  Release the node after clearing
694 	 * the flag.
695 	 */
696 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
697 		hammer_buffer_t buffer = (void *)io;
698 		hammer_node_t node;
699 
700 restart:
701 		TAILQ_FOREACH(node, &buffer->clist, entry) {
702 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
703 				continue;
704 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
705 			KKASSERT(node->ondisk);
706 			if (inval == 0)
707 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
708 			hammer_rel_node(node);
709 			goto restart;
710 		}
711 	}
712 	/* caller must still have ref on io */
713 	KKASSERT(io->lock.refs > 0);
714 }
715 
716 /*
717  * Clear the IO's modify list.  Even though the IO is no longer modified
718  * it may still be on the lose_list.  This routine is called just before
719  * the governing hammer_buffer is destroyed.
720  */
721 void
722 hammer_io_clear_modlist(struct hammer_io *io)
723 {
724 	KKASSERT(io->modified == 0);
725 	if (io->mod_list) {
726 		crit_enter();	/* biodone race against list */
727 		KKASSERT(io->mod_list == &io->hmp->lose_list);
728 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
729 		io->mod_list = NULL;
730 		crit_exit();
731 	}
732 }
733 
734 static void
735 hammer_io_set_modlist(struct hammer_io *io)
736 {
737 	struct hammer_mount *hmp = io->hmp;
738 
739 	KKASSERT(io->mod_list == NULL);
740 
741 	switch(io->type) {
742 	case HAMMER_STRUCTURE_VOLUME:
743 		io->mod_list = &hmp->volu_list;
744 		hmp->locked_dirty_space += io->bytes;
745 		hammer_count_dirtybufspace += io->bytes;
746 		break;
747 	case HAMMER_STRUCTURE_META_BUFFER:
748 		io->mod_list = &hmp->meta_list;
749 		hmp->locked_dirty_space += io->bytes;
750 		hammer_count_dirtybufspace += io->bytes;
751 		break;
752 	case HAMMER_STRUCTURE_UNDO_BUFFER:
753 		io->mod_list = &hmp->undo_list;
754 		break;
755 	case HAMMER_STRUCTURE_DATA_BUFFER:
756 		io->mod_list = &hmp->data_list;
757 		break;
758 	}
759 	TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
760 }
761 
762 /************************************************************************
763  *				HAMMER_BIOOPS				*
764  ************************************************************************
765  *
766  */
767 
768 /*
769  * Pre-IO initiation kernel callback - cluster build only
770  */
771 static void
772 hammer_io_start(struct buf *bp)
773 {
774 }
775 
776 /*
777  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
778  *
779  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
780  * may also be set if we were marking a cluster header open.  Only remove
781  * our dependancy if the modified bit is clear.
782  */
783 static void
784 hammer_io_complete(struct buf *bp)
785 {
786 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
787 
788 	KKASSERT(iou->io.released == 1);
789 
790 	/*
791 	 * Deal with people waiting for I/O to drain
792 	 */
793 	if (iou->io.running) {
794 		/*
795 		 * Deal with critical write errors.  Once a critical error
796 		 * has been flagged in hmp the UNDO FIFO will not be updated.
797 		 * That way crash recover will give us a consistent
798 		 * filesystem.
799 		 *
800 		 * Because of this we can throw away failed UNDO buffers.  If
801 		 * we throw away META or DATA buffers we risk corrupting
802 		 * the now read-only version of the filesystem visible to
803 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
804 		 * by the kernel and ref the io so it doesn't get thrown
805 		 * away.
806 		 */
807 		if (bp->b_flags & B_ERROR) {
808 			hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
809 					      "while flushing meta-data");
810 			switch(iou->io.type) {
811 			case HAMMER_STRUCTURE_UNDO_BUFFER:
812 				break;
813 			default:
814 				if (iou->io.ioerror == 0) {
815 					iou->io.ioerror = 1;
816 					if (iou->io.lock.refs == 0)
817 						++hammer_count_refedbufs;
818 					hammer_ref(&iou->io.lock);
819 				}
820 				break;
821 			}
822 			bp->b_flags &= ~B_ERROR;
823 			bundirty(bp);
824 #if 0
825 			hammer_io_set_modlist(&iou->io);
826 			iou->io.modified = 1;
827 #endif
828 		}
829 		hammer_stats_disk_write += iou->io.bytes;
830 		hammer_count_io_running_write -= iou->io.bytes;
831 		iou->io.hmp->io_running_space -= iou->io.bytes;
832 		if (iou->io.hmp->io_running_space == 0)
833 			wakeup(&iou->io.hmp->io_running_space);
834 		KKASSERT(iou->io.hmp->io_running_space >= 0);
835 		iou->io.running = 0;
836 	} else {
837 		hammer_stats_disk_read += iou->io.bytes;
838 	}
839 
840 	if (iou->io.waiting) {
841 		iou->io.waiting = 0;
842 		wakeup(iou);
843 	}
844 
845 	/*
846 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
847 	 * point, do it now if refs has become zero.
848 	 */
849 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
850 		KKASSERT(iou->io.modified == 0);
851 		--hammer_count_io_locked;
852 		bp->b_flags &= ~B_LOCKED;
853 		hammer_io_deallocate(bp);
854 		/* structure may be dead now */
855 	}
856 }
857 
858 /*
859  * Callback from kernel when it wishes to deallocate a passively
860  * associated structure.  This mostly occurs with clean buffers
861  * but it may be possible for a holding structure to be marked dirty
862  * while its buffer is passively associated.  The caller owns the bp.
863  *
864  * If we cannot disassociate we set B_LOCKED to prevent the buffer
865  * from getting reused.
866  *
867  * WARNING: Because this can be called directly by getnewbuf we cannot
868  * recurse into the tree.  If a bp cannot be immediately disassociated
869  * our only recourse is to set B_LOCKED.
870  *
871  * WARNING: This may be called from an interrupt via hammer_io_complete()
872  */
873 static void
874 hammer_io_deallocate(struct buf *bp)
875 {
876 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
877 
878 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
879 	if (iou->io.lock.refs > 0 || iou->io.modified) {
880 		/*
881 		 * It is not legal to disassociate a modified buffer.  This
882 		 * case really shouldn't ever occur.
883 		 */
884 		bp->b_flags |= B_LOCKED;
885 		++hammer_count_io_locked;
886 	} else {
887 		/*
888 		 * Disassociate the BP.  If the io has no refs left we
889 		 * have to add it to the loose list.
890 		 */
891 		hammer_io_disassociate(iou);
892 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
893 			KKASSERT(iou->io.bp == NULL);
894 			KKASSERT(iou->io.mod_list == NULL);
895 			crit_enter();	/* biodone race against list */
896 			iou->io.mod_list = &iou->io.hmp->lose_list;
897 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
898 			crit_exit();
899 		}
900 	}
901 }
902 
903 static int
904 hammer_io_fsync(struct vnode *vp)
905 {
906 	return(0);
907 }
908 
909 /*
910  * NOTE: will not be called unless we tell the kernel about the
911  * bioops.  Unused... we use the mount's VFS_SYNC instead.
912  */
913 static int
914 hammer_io_sync(struct mount *mp)
915 {
916 	return(0);
917 }
918 
919 static void
920 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
921 {
922 }
923 
924 /*
925  * I/O pre-check for reading and writing.  HAMMER only uses this for
926  * B_CACHE buffers so checkread just shouldn't happen, but if it does
927  * allow it.
928  *
929  * Writing is a different case.  We don't want the kernel to try to write
930  * out a buffer that HAMMER may be modifying passively or which has a
931  * dependancy.  In addition, kernel-demanded writes can only proceed for
932  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
933  * buffer types can only be explicitly written by the flusher.
934  *
935  * checkwrite will only be called for bdwrite()n buffers.  If we return
936  * success the kernel is guaranteed to initiate the buffer write.
937  */
938 static int
939 hammer_io_checkread(struct buf *bp)
940 {
941 	return(0);
942 }
943 
944 static int
945 hammer_io_checkwrite(struct buf *bp)
946 {
947 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
948 
949 	/*
950 	 * This shouldn't happen under normal operation.
951 	 */
952 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
953 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
954 		if (!panicstr)
955 			panic("hammer_io_checkwrite: illegal buffer");
956 		if ((bp->b_flags & B_LOCKED) == 0) {
957 			bp->b_flags |= B_LOCKED;
958 			++hammer_count_io_locked;
959 		}
960 		return(1);
961 	}
962 
963 	/*
964 	 * We can only clear the modified bit if the IO is not currently
965 	 * undergoing modification.  Otherwise we may miss changes.
966 	 *
967 	 * Only data and undo buffers can reach here.  These buffers do
968 	 * not have terminal crc functions but we temporarily reference
969 	 * the IO anyway, just in case.
970 	 */
971 	if (io->modify_refs == 0 && io->modified) {
972 		hammer_ref(&io->lock);
973 		hammer_io_clear_modify(io, 0);
974 		hammer_unref(&io->lock);
975 	} else if (io->modified) {
976 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
977 	}
978 
979 	/*
980 	 * The kernel is going to start the IO, set io->running.
981 	 */
982 	KKASSERT(io->running == 0);
983 	io->running = 1;
984 	io->hmp->io_running_space += io->bytes;
985 	hammer_count_io_running_write += io->bytes;
986 	return(0);
987 }
988 
989 /*
990  * Return non-zero if we wish to delay the kernel's attempt to flush
991  * this buffer to disk.
992  */
993 static int
994 hammer_io_countdeps(struct buf *bp, int n)
995 {
996 	return(0);
997 }
998 
999 struct bio_ops hammer_bioops = {
1000 	.io_start	= hammer_io_start,
1001 	.io_complete	= hammer_io_complete,
1002 	.io_deallocate	= hammer_io_deallocate,
1003 	.io_fsync	= hammer_io_fsync,
1004 	.io_sync	= hammer_io_sync,
1005 	.io_movedeps	= hammer_io_movedeps,
1006 	.io_countdeps	= hammer_io_countdeps,
1007 	.io_checkread	= hammer_io_checkread,
1008 	.io_checkwrite	= hammer_io_checkwrite,
1009 };
1010 
1011 /************************************************************************
1012  *				DIRECT IO OPS 				*
1013  ************************************************************************
1014  *
1015  * These functions operate directly on the buffer cache buffer associated
1016  * with a front-end vnode rather then a back-end device vnode.
1017  */
1018 
1019 /*
1020  * Read a buffer associated with a front-end vnode directly from the
1021  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1022  * we validate the CRC.
1023  *
1024  * We must check for the presence of a HAMMER buffer to handle the case
1025  * where the reblocker has rewritten the data (which it does via the HAMMER
1026  * buffer system, not via the high-level vnode buffer cache), but not yet
1027  * committed the buffer to the media.
1028  */
1029 int
1030 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1031 		      hammer_btree_leaf_elm_t leaf)
1032 {
1033 	hammer_off_t buf_offset;
1034 	hammer_off_t zone2_offset;
1035 	hammer_volume_t volume;
1036 	struct buf *bp;
1037 	struct bio *nbio;
1038 	int vol_no;
1039 	int error;
1040 
1041 	buf_offset = bio->bio_offset;
1042 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1043 		 HAMMER_ZONE_LARGE_DATA);
1044 
1045 	/*
1046 	 * The buffer cache may have an aliased buffer (the reblocker can
1047 	 * write them).  If it does we have to sync any dirty data before
1048 	 * we can build our direct-read.  This is a non-critical code path.
1049 	 */
1050 	bp = bio->bio_buf;
1051 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1052 
1053 	/*
1054 	 * Resolve to a zone-2 offset.  The conversion just requires
1055 	 * munging the top 4 bits but we want to abstract it anyway
1056 	 * so the blockmap code can verify the zone assignment.
1057 	 */
1058 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1059 	if (error)
1060 		goto done;
1061 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1062 		 HAMMER_ZONE_RAW_BUFFER);
1063 
1064 	/*
1065 	 * Resolve volume and raw-offset for 3rd level bio.  The
1066 	 * offset will be specific to the volume.
1067 	 */
1068 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1069 	volume = hammer_get_volume(hmp, vol_no, &error);
1070 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1071 		error = EIO;
1072 
1073 	if (error == 0) {
1074 		/*
1075 		 * 3rd level bio
1076 		 */
1077 		nbio = push_bio(bio);
1078 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1079 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
1080 #if 0
1081 		/*
1082 		 * XXX disabled - our CRC check doesn't work if the OS
1083 		 * does bogus_page replacement on the direct-read.
1084 		 */
1085 		if (leaf && hammer_verify_data) {
1086 			nbio->bio_done = hammer_io_direct_read_complete;
1087 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1088 		}
1089 #endif
1090 		hammer_stats_disk_read += bp->b_bufsize;
1091 		vn_strategy(volume->devvp, nbio);
1092 	}
1093 	hammer_rel_volume(volume, 0);
1094 done:
1095 	if (error) {
1096 		kprintf("hammer_direct_read: failed @ %016llx\n",
1097 			zone2_offset);
1098 		bp->b_error = error;
1099 		bp->b_flags |= B_ERROR;
1100 		biodone(bio);
1101 	}
1102 	return(error);
1103 }
1104 
1105 #if 0
1106 /*
1107  * On completion of the BIO this callback must check the data CRC
1108  * and chain to the previous bio.
1109  */
1110 static
1111 void
1112 hammer_io_direct_read_complete(struct bio *nbio)
1113 {
1114 	struct bio *obio;
1115 	struct buf *bp;
1116 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1117 
1118 	bp = nbio->bio_buf;
1119 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1120 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
1121 			nbio->bio_offset, bp->b_bufsize);
1122 		if (hammer_debug_debug)
1123 			Debugger("");
1124 		bp->b_flags |= B_ERROR;
1125 		bp->b_error = EIO;
1126 	}
1127 	obio = pop_bio(nbio);
1128 	biodone(obio);
1129 }
1130 #endif
1131 
1132 /*
1133  * Write a buffer associated with a front-end vnode directly to the
1134  * disk media.  The bio may be issued asynchronously.
1135  *
1136  * The BIO is associated with the specified record and RECF_DIRECT_IO
1137  * is set.  The recorded is added to its object.
1138  */
1139 int
1140 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1141 		       struct bio *bio)
1142 {
1143 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1144 	hammer_off_t buf_offset;
1145 	hammer_off_t zone2_offset;
1146 	hammer_volume_t volume;
1147 	hammer_buffer_t buffer;
1148 	struct buf *bp;
1149 	struct bio *nbio;
1150 	char *ptr;
1151 	int vol_no;
1152 	int error;
1153 
1154 	buf_offset = leaf->data_offset;
1155 
1156 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1157 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1158 
1159 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1160 	    leaf->data_len >= HAMMER_BUFSIZE) {
1161 		/*
1162 		 * We are using the vnode's bio to write directly to the
1163 		 * media, any hammer_buffer at the same zone-X offset will
1164 		 * now have stale data.
1165 		 */
1166 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1167 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1168 		volume = hammer_get_volume(hmp, vol_no, &error);
1169 
1170 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1171 			error = EIO;
1172 		if (error == 0) {
1173 			bp = bio->bio_buf;
1174 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1175 			/*
1176 			hammer_del_buffers(hmp, buf_offset,
1177 					   zone2_offset, bp->b_bufsize);
1178 			*/
1179 
1180 			/*
1181 			 * Second level bio - cached zone2 offset.
1182 			 *
1183 			 * (We can put our bio_done function in either the
1184 			 *  2nd or 3rd level).
1185 			 */
1186 			nbio = push_bio(bio);
1187 			nbio->bio_offset = zone2_offset;
1188 			nbio->bio_done = hammer_io_direct_write_complete;
1189 			nbio->bio_caller_info1.ptr = record;
1190 			record->zone2_offset = zone2_offset;
1191 			record->flags |= HAMMER_RECF_DIRECT_IO |
1192 					 HAMMER_RECF_DIRECT_INVAL;
1193 
1194 			/*
1195 			 * Third level bio - raw offset specific to the
1196 			 * correct volume.
1197 			 */
1198 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1199 			nbio = push_bio(nbio);
1200 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1201 					   zone2_offset;
1202 			hammer_stats_disk_write += bp->b_bufsize;
1203 			vn_strategy(volume->devvp, nbio);
1204 		}
1205 		hammer_rel_volume(volume, 0);
1206 	} else {
1207 		/*
1208 		 * Must fit in a standard HAMMER buffer.  In this case all
1209 		 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1210 		 * does not need to be set-up.
1211 		 */
1212 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1213 		buffer = NULL;
1214 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1215 		if (error == 0) {
1216 			bp = bio->bio_buf;
1217 			bp->b_flags |= B_AGE;
1218 			hammer_io_modify(&buffer->io, 1);
1219 			bcopy(bp->b_data, ptr, leaf->data_len);
1220 			hammer_io_modify_done(&buffer->io);
1221 			hammer_rel_buffer(buffer, 0);
1222 			bp->b_resid = 0;
1223 			biodone(bio);
1224 		}
1225 	}
1226 	if (error == 0) {
1227 		/*
1228 		 * The record is all setup now, add it.  Potential conflics
1229 		 * have already been dealt with.
1230 		 */
1231 		error = hammer_mem_add(record);
1232 		KKASSERT(error == 0);
1233 	} else {
1234 		/*
1235 		 * Major suckage occured.
1236 		 */
1237 		kprintf("hammer_direct_write: failed @ %016llx\n",
1238 			leaf->data_offset);
1239 		bp = bio->bio_buf;
1240 		bp->b_resid = 0;
1241 		bp->b_error = EIO;
1242 		bp->b_flags |= B_ERROR;
1243 		biodone(bio);
1244 		record->flags |= HAMMER_RECF_DELETED_FE;
1245 		hammer_rel_mem_record(record);
1246 	}
1247 	return(error);
1248 }
1249 
1250 /*
1251  * On completion of the BIO this callback must disconnect
1252  * it from the hammer_record and chain to the previous bio.
1253  *
1254  * An I/O error forces the mount to read-only.  Data buffers
1255  * are not B_LOCKED like meta-data buffers are, so we have to
1256  * throw the buffer away to prevent the kernel from retrying.
1257  */
1258 static
1259 void
1260 hammer_io_direct_write_complete(struct bio *nbio)
1261 {
1262 	struct bio *obio;
1263 	struct buf *bp;
1264 	hammer_record_t record = nbio->bio_caller_info1.ptr;
1265 
1266 	bp = nbio->bio_buf;
1267 	obio = pop_bio(nbio);
1268 	if (bp->b_flags & B_ERROR) {
1269 		hammer_critical_error(record->ip->hmp, record->ip,
1270 				      bp->b_error,
1271 				      "while writing bulk data");
1272 		bp->b_flags |= B_INVAL;
1273 	}
1274 	biodone(obio);
1275 
1276 	KKASSERT(record != NULL);
1277 	KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1278 	record->flags &= ~HAMMER_RECF_DIRECT_IO;
1279 	if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1280 		record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
1281 		wakeup(&record->flags);
1282 	}
1283 }
1284 
1285 
1286 /*
1287  * This is called before a record is either committed to the B-Tree
1288  * or destroyed, to resolve any associated direct-IO.
1289  *
1290  * (1) We must wait for any direct-IO related to the record to complete.
1291  *
1292  * (2) We must remove any buffer cache aliases for data accessed via
1293  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1294  *     (the mirroring and reblocking code) do not see stale data.
1295  */
1296 void
1297 hammer_io_direct_wait(hammer_record_t record)
1298 {
1299 	/*
1300 	 * Wait for I/O to complete
1301 	 */
1302 	if (record->flags & HAMMER_RECF_DIRECT_IO) {
1303 		crit_enter();
1304 		while (record->flags & HAMMER_RECF_DIRECT_IO) {
1305 			record->flags |= HAMMER_RECF_DIRECT_WAIT;
1306 			tsleep(&record->flags, 0, "hmdiow", 0);
1307 		}
1308 		crit_exit();
1309 	}
1310 
1311 	/*
1312 	 * Invalidate any related buffer cache aliases.
1313 	 */
1314 	if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1315 		KKASSERT(record->leaf.data_offset);
1316 		hammer_del_buffers(record->ip->hmp,
1317 				   record->leaf.data_offset,
1318 				   record->zone2_offset,
1319 				   record->leaf.data_len);
1320 		record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1321 	}
1322 }
1323 
1324 /*
1325  * This is called to remove the second-level cached zone-2 offset from
1326  * frontend buffer cache buffers, now stale due to a data relocation.
1327  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1328  * by hammer_vop_strategy_read().
1329  *
1330  * This is rather nasty because here we have something like the reblocker
1331  * scanning the raw B-Tree with no held references on anything, really,
1332  * other then a shared lock on the B-Tree node, and we have to access the
1333  * frontend's buffer cache to check for and clean out the association.
1334  * Specifically, if the reblocker is moving data on the disk, these cached
1335  * offsets will become invalid.
1336  *
1337  * Only data record types associated with the large-data zone are subject
1338  * to direct-io and need to be checked.
1339  *
1340  */
1341 void
1342 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1343 {
1344 	struct hammer_inode_info iinfo;
1345 	int zone;
1346 
1347 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1348 		return;
1349 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1350 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1351 		return;
1352 	iinfo.obj_id = leaf->base.obj_id;
1353 	iinfo.obj_asof = 0;	/* unused */
1354 	iinfo.obj_localization = leaf->base.localization &
1355 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1356 	iinfo.u.leaf = leaf;
1357 	hammer_scan_inode_snapshots(hmp, &iinfo,
1358 				    hammer_io_direct_uncache_callback,
1359 				    leaf);
1360 }
1361 
1362 static int
1363 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1364 {
1365 	hammer_inode_info_t iinfo = data;
1366 	hammer_off_t data_offset;
1367 	hammer_off_t file_offset;
1368 	struct vnode *vp;
1369 	struct buf *bp;
1370 	int blksize;
1371 
1372 	if (ip->vp == NULL)
1373 		return(0);
1374 	data_offset = iinfo->u.leaf->data_offset;
1375 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1376 	blksize = iinfo->u.leaf->data_len;
1377 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1378 
1379 	hammer_ref(&ip->lock);
1380 	if (hammer_get_vnode(ip, &vp) == 0) {
1381 		if ((bp = findblk(ip->vp, file_offset)) != NULL &&
1382 		    bp->b_bio2.bio_offset != NOOFFSET) {
1383 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1384 			bp->b_bio2.bio_offset = NOOFFSET;
1385 			brelse(bp);
1386 		}
1387 		vput(vp);
1388 	}
1389 	hammer_rel_inode(ip, 0);
1390 	return(0);
1391 }
1392 
1393