xref: /dflybsd-src/sys/vfs/hammer2/hammer2_chain.c (revision 7a27faded6bccbda68815bb82b3be97f731409b9)
1 /*
2  * Copyright (c) 2011-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  * This subsystem implements most of the core support functions for
37  * the hammer2_chain and hammer2_chain_core structures.
38  *
39  * Chains represent the filesystem media topology in-memory.  Any given
40  * chain can represent an inode, indirect block, data, or other types
41  * of blocks.
42  *
43  * This module provides APIs for direct and indirect block searches,
44  * iterations, recursions, creation, deletion, replication, and snapshot
45  * views (used by the flush and snapshot code).
46  *
47  * Generally speaking any modification made to a chain must propagate all
48  * the way back to the volume header, issuing copy-on-write updates to the
49  * blockref tables all the way up.  Any chain except the volume header itself
50  * can be flushed to disk at any time, in any order.  None of it matters
51  * until we get to the point where we want to synchronize the volume header
52  * (see the flush code).
53  *
54  * The chain structure supports snapshot views in time, which are primarily
55  * used until the related data and meta-data is flushed to allow the
56  * filesystem to make snapshots without requiring it to first flush,
57  * and to allow the filesystem flush and modify the filesystem concurrently
58  * with minimal or no stalls.
59  */
60 #include <sys/cdefs.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
64 #include <sys/lock.h>
65 #include <sys/kern_syscall.h>
66 #include <sys/uuid.h>
67 
68 #include "hammer2.h"
69 
70 static int hammer2_indirect_optimize;	/* XXX SYSCTL */
71 
72 static hammer2_chain_t *hammer2_chain_create_indirect(
73 		hammer2_trans_t *trans, hammer2_chain_t *parent,
74 		hammer2_key_t key, int keybits, int for_type, int *errorp);
75 static void adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
76 
77 /*
78  * We use a red-black tree to guarantee safe lookups under shared locks.
79  *
80  * Chains can be overloaded onto the same index, creating a different
81  * view of a blockref table based on a transaction id.  The RBTREE
82  * deconflicts the view by sub-sorting on delete_tid.
83  *
84  * NOTE: Any 'current' chain which is not yet deleted will have a
85  *	 delete_tid of HAMMER2_MAX_TID (0xFFF....FFFLLU).
86  */
87 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
88 
89 int
90 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
91 {
92 	if (chain1->index < chain2->index)
93 		return(-1);
94 	if (chain1->index > chain2->index)
95 		return(1);
96 	if (chain1->delete_tid < chain2->delete_tid)
97 		return(-1);
98 	if (chain1->delete_tid > chain2->delete_tid)
99 		return(1);
100 	return(0);
101 }
102 
103 static __inline
104 int
105 hammer2_isclusterable(hammer2_chain_t *chain)
106 {
107 	if (hammer2_cluster_enable) {
108 		if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
109 		    chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
110 		    chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
111 			return(1);
112 		}
113 	}
114 	return(0);
115 }
116 
117 /*
118  * Recursively set the SUBMODIFIED flag up to the root starting at chain's
119  * parent.  SUBMODIFIED is not set in chain itself.
120  *
121  * This function only operates on current-time transactions and is not
122  * used during flushes.  Instead, the flush code manages the flag itself.
123  */
124 void
125 hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain)
126 {
127 	hammer2_chain_core_t *above;
128 
129 	if (trans->flags & HAMMER2_TRANS_ISFLUSH)
130 		return;
131 	while ((above = chain->above) != NULL) {
132 		spin_lock(&above->cst.spin);
133 		chain = above->first_parent;
134 		while (hammer2_chain_refactor_test(chain, 1))
135 			chain = chain->next_parent;
136 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
137 		spin_unlock(&above->cst.spin);
138 	}
139 }
140 
141 /*
142  * Allocate a new disconnected chain element representing the specified
143  * bref.  chain->refs is set to 1 and the passed bref is copied to
144  * chain->bref.  chain->bytes is derived from the bref.
145  *
146  * chain->core is NOT allocated and the media data and bp pointers are left
147  * NULL.  The caller must call chain_core_alloc() to allocate or associate
148  * a core with the chain.
149  *
150  * NOTE: Returns a referenced but unlocked (because there is no core) chain.
151  */
152 hammer2_chain_t *
153 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_trans_t *trans,
154 		    hammer2_blockref_t *bref)
155 {
156 	hammer2_chain_t *chain;
157 	u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
158 
159 	/*
160 	 * Construct the appropriate system structure.
161 	 */
162 	switch(bref->type) {
163 	case HAMMER2_BREF_TYPE_INODE:
164 	case HAMMER2_BREF_TYPE_INDIRECT:
165 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
166 	case HAMMER2_BREF_TYPE_DATA:
167 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
168 		chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
169 		break;
170 	case HAMMER2_BREF_TYPE_VOLUME:
171 	case HAMMER2_BREF_TYPE_FREEMAP:
172 		chain = NULL;
173 		panic("hammer2_chain_alloc volume type illegal for op");
174 	default:
175 		chain = NULL;
176 		panic("hammer2_chain_alloc: unrecognized blockref type: %d",
177 		      bref->type);
178 	}
179 
180 	chain->hmp = hmp;
181 	chain->bref = *bref;
182 	chain->index = -1;		/* not yet assigned */
183 	chain->bytes = bytes;
184 	chain->refs = 1;
185 	chain->flags = HAMMER2_CHAIN_ALLOCATED;
186 	chain->delete_tid = HAMMER2_MAX_TID;
187 	if (trans)
188 		chain->modify_tid = trans->sync_tid;
189 
190 	return (chain);
191 }
192 
193 /*
194  * Associate an existing core with the chain or allocate a new core.
195  *
196  * The core is not locked.  No additional refs on the chain are made.
197  */
198 void
199 hammer2_chain_core_alloc(hammer2_chain_t *chain, hammer2_chain_core_t *core)
200 {
201 	hammer2_chain_t **scanp;
202 
203 	KKASSERT(chain->core == NULL);
204 	KKASSERT(chain->next_parent == NULL);
205 
206 	if (core == NULL) {
207 		core = kmalloc(sizeof(*core), chain->hmp->mchain,
208 			       M_WAITOK | M_ZERO);
209 		RB_INIT(&core->rbtree);
210 		core->sharecnt = 1;
211 		chain->core = core;
212 		ccms_cst_init(&core->cst, chain);
213 		core->first_parent = chain;
214 	} else {
215 		atomic_add_int(&core->sharecnt, 1);
216 		chain->core = core;
217 		spin_lock(&core->cst.spin);
218 		if (core->first_parent == NULL) {
219 			core->first_parent = chain;
220 		} else {
221 			scanp = &core->first_parent;
222 			while (*scanp)
223 				scanp = &(*scanp)->next_parent;
224 			*scanp = chain;
225 			hammer2_chain_ref(chain);	/* next_parent link */
226 		}
227 		spin_unlock(&core->cst.spin);
228 	}
229 }
230 
231 /*
232  * Add a reference to a chain element, preventing its destruction.
233  */
234 void
235 hammer2_chain_ref(hammer2_chain_t *chain)
236 {
237 	atomic_add_int(&chain->refs, 1);
238 }
239 
240 /*
241  * Drop the caller's reference to the chain.  When the ref count drops to
242  * zero this function will disassociate the chain from its parent and
243  * deallocate it, then recursely drop the parent using the implied ref
244  * from the chain's chain->parent.
245  *
246  * WARNING! Just because we are able to deallocate a chain doesn't mean
247  *	    that chain->core->rbtree is empty.  There can still be a sharecnt
248  *	    on chain->core and RBTREE entries that refer to different parents.
249  */
250 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
251 
252 void
253 hammer2_chain_drop(hammer2_chain_t *chain)
254 {
255 	u_int refs;
256 	u_int need = 0;
257 
258 #if 1
259 	if (chain->flags & HAMMER2_CHAIN_MOVED)
260 		++need;
261 	if (chain->flags & HAMMER2_CHAIN_MODIFIED)
262 		++need;
263 	KKASSERT(chain->refs > need);
264 #endif
265 
266 	while (chain) {
267 		refs = chain->refs;
268 		cpu_ccfence();
269 		KKASSERT(refs > 0);
270 
271 		if (refs == 1) {
272 			chain = hammer2_chain_lastdrop(chain);
273 		} else {
274 			if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
275 				break;
276 			/* retry the same chain */
277 		}
278 	}
279 }
280 
281 /*
282  * Safe handling of the 1->0 transition on chain.  Returns a chain for
283  * recursive drop or NULL, possibly returning the same chain of the atomic
284  * op fails.
285  *
286  * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
287  */
288 static
289 hammer2_chain_t *
290 hammer2_chain_lastdrop(hammer2_chain_t *chain)
291 {
292 	hammer2_mount_t *hmp;
293 	hammer2_chain_core_t *above;
294 	hammer2_chain_core_t *core;
295 	hammer2_chain_t *rdrop1;
296 	hammer2_chain_t *rdrop2;
297 
298 	/*
299 	 * Spinlock the core and check to see if it is empty.  If it is
300 	 * not empty we leave chain intact with refs == 0.
301 	 */
302 	if ((core = chain->core) != NULL) {
303 		spin_lock(&core->cst.spin);
304 		if (RB_ROOT(&core->rbtree)) {
305 			if (atomic_cmpset_int(&chain->refs, 1, 0)) {
306 				/* 1->0 transition successful */
307 				spin_unlock(&core->cst.spin);
308 				return(NULL);
309 			} else {
310 				/* 1->0 transition failed, retry */
311 				spin_unlock(&core->cst.spin);
312 				return(chain);
313 			}
314 		}
315 	}
316 
317 	hmp = chain->hmp;
318 	rdrop1 = NULL;
319 	rdrop2 = NULL;
320 
321 	/*
322 	 * Spinlock the parent and try to drop the last ref.  On success
323 	 * remove chain from its parent.
324 	 */
325 	if ((above = chain->above) != NULL) {
326 		spin_lock(&above->cst.spin);
327 		if (!atomic_cmpset_int(&chain->refs, 1, 0)) {
328 			/* 1->0 transition failed */
329 			spin_unlock(&above->cst.spin);
330 			if (core)
331 				spin_unlock(&core->cst.spin);
332 			return(chain);
333 			/* stop */
334 		}
335 
336 		/*
337 		 * 1->0 transition successful
338 		 */
339 		KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
340 		RB_REMOVE(hammer2_chain_tree, &above->rbtree, chain);
341 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
342 		chain->above = NULL;
343 
344 		/*
345 		 * Calculate a chain to return for a recursive drop.
346 		 *
347 		 * XXX this needs help, we have a potential deep-recursion
348 		 * problem which we try to address but sometimes we wind up
349 		 * with two elements that have to be dropped.
350 		 *
351 		 * If the chain has an associated core with refs at 0
352 		 * the chain must be the first in the core's linked list
353 		 * by definition, and we will recursively drop the ref
354 		 * implied by the chain->next_parent field.
355 		 *
356 		 * Otherwise if the rbtree containing chain is empty we try
357 		 * to recursively drop our parent (only the first one could
358 		 * possibly have refs == 0 since the rest are linked via
359 		 * next_parent).
360 		 *
361 		 * Otherwise we try to recursively drop a sibling.
362 		 */
363 		if (chain->next_parent) {
364 			KKASSERT(core != NULL);
365 			rdrop1 = chain->next_parent;
366 		}
367 		if (RB_EMPTY(&above->rbtree)) {
368 			rdrop2 = above->first_parent;
369 			if (rdrop2 == NULL || rdrop2->refs ||
370 			    atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0) {
371 				rdrop2 = NULL;
372 			}
373 		} else {
374 			rdrop2 = RB_ROOT(&above->rbtree);
375 			if (atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0)
376 				rdrop2 = NULL;
377 		}
378 		spin_unlock(&above->cst.spin);
379 		above = NULL;	/* safety */
380 	} else {
381 		if (chain->next_parent) {
382 			KKASSERT(core != NULL);
383 			rdrop1 = chain->next_parent;
384 		}
385 	}
386 
387 	/*
388 	 * We still have the core spinlock (if core is non-NULL).  The
389 	 * above spinlock is gone.
390 	 */
391 	if (core) {
392 		KKASSERT(core->first_parent == chain);
393 		if (chain->next_parent) {
394 			/* parent should already be set */
395 			KKASSERT(rdrop1 == chain->next_parent);
396 		}
397 		core->first_parent = chain->next_parent;
398 		chain->next_parent = NULL;
399 		chain->core = NULL;
400 
401 		if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
402 			/*
403 			 * On the 1->0 transition of core we can destroy
404 			 * it.
405 			 */
406 			spin_unlock(&core->cst.spin);
407 			KKASSERT(core->cst.count == 0);
408 			KKASSERT(core->cst.upgrade == 0);
409 			kfree(core, hmp->mchain);
410 		} else {
411 			spin_unlock(&core->cst.spin);
412 		}
413 		core = NULL;	/* safety */
414 	}
415 
416 	/*
417 	 * All spin locks are gone, finish freeing stuff.
418 	 */
419 	KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
420 				  HAMMER2_CHAIN_MODIFIED)) == 0);
421 
422 	switch(chain->bref.type) {
423 	case HAMMER2_BREF_TYPE_VOLUME:
424 	case HAMMER2_BREF_TYPE_FREEMAP:
425 		chain->data = NULL;
426 		break;
427 	case HAMMER2_BREF_TYPE_INODE:
428 		if (chain->data) {
429 			kfree(chain->data, hmp->mchain);
430 			chain->data = NULL;
431 		}
432 		break;
433 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
434 		if (chain->data) {
435 			kfree(chain->data, hmp->mchain);
436 			chain->data = NULL;
437 		}
438 		break;
439 	default:
440 		KKASSERT(chain->data == NULL);
441 		break;
442 	}
443 
444 	KKASSERT(chain->bp == NULL);
445 	chain->hmp = NULL;
446 
447 	if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
448 		chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
449 		kfree(chain, hmp->mchain);
450 	}
451 	if (rdrop1 && rdrop2) {
452 		hammer2_chain_drop(rdrop1);
453 		return(rdrop2);
454 	} else if (rdrop1)
455 		return(rdrop1);
456 	else
457 		return(rdrop2);
458 }
459 
460 /*
461  * Ref and lock a chain element, acquiring its data with I/O if necessary,
462  * and specify how you would like the data to be resolved.
463  *
464  * Returns 0 on success or an error code if the data could not be acquired.
465  * The chain element is locked on return regardless of whether an error
466  * occurred or not.
467  *
468  * The lock is allowed to recurse, multiple locking ops will aggregate
469  * the requested resolve types.  Once data is assigned it will not be
470  * removed until the last unlock.
471  *
472  * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
473  *			   (typically used to avoid device/logical buffer
474  *			    aliasing for data)
475  *
476  * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
477  *			   the INITIAL-create state (indirect blocks only).
478  *
479  *			   Do not resolve data elements for DATA chains.
480  *			   (typically used to avoid device/logical buffer
481  *			    aliasing for data)
482  *
483  * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
484  *
485  * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
486  *			   it will be locked exclusive.
487  *
488  * NOTE: Embedded elements (volume header, inodes) are always resolved
489  *	 regardless.
490  *
491  * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
492  *	 element will instantiate and zero its buffer, and flush it on
493  *	 release.
494  *
495  * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
496  *	 so as not to instantiate a device buffer, which could alias against
497  *	 a logical file buffer.  However, if ALWAYS is specified the
498  *	 device buffer will be instantiated anyway.
499  *
500  * WARNING! If data must be fetched a shared lock will temporarily be
501  *	    upgraded to exclusive.  However, a deadlock can occur if
502  *	    the caller owns more than one shared lock.
503  */
504 int
505 hammer2_chain_lock(hammer2_chain_t *chain, int how)
506 {
507 	hammer2_mount_t *hmp;
508 	hammer2_chain_core_t *core;
509 	hammer2_blockref_t *bref;
510 	hammer2_off_t pbase;
511 	hammer2_off_t pmask;
512 	hammer2_off_t peof;
513 	ccms_state_t ostate;
514 	size_t boff;
515 	size_t psize;
516 	int error;
517 	char *bdata;
518 
519 	/*
520 	 * Ref and lock the element.  Recursive locks are allowed.
521 	 */
522 	if ((how & HAMMER2_RESOLVE_NOREF) == 0)
523 		hammer2_chain_ref(chain);
524 	atomic_add_int(&chain->lockcnt, 1);
525 
526 	hmp = chain->hmp;
527 	KKASSERT(hmp != NULL);
528 
529 	/*
530 	 * Get the appropriate lock.
531 	 */
532 	core = chain->core;
533 	if (how & HAMMER2_RESOLVE_SHARED)
534 		ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
535 	else
536 		ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
537 
538 	/*
539 	 * If we already have a valid data pointer no further action is
540 	 * necessary.
541 	 */
542 	if (chain->data)
543 		return (0);
544 
545 	/*
546 	 * Do we have to resolve the data?
547 	 */
548 	switch(how & HAMMER2_RESOLVE_MASK) {
549 	case HAMMER2_RESOLVE_NEVER:
550 		return(0);
551 	case HAMMER2_RESOLVE_MAYBE:
552 		if (chain->flags & HAMMER2_CHAIN_INITIAL)
553 			return(0);
554 		if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
555 			return(0);
556 #if 0
557 		if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
558 			return(0);
559 #endif
560 		if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
561 			return(0);
562 		/* fall through */
563 	case HAMMER2_RESOLVE_ALWAYS:
564 		break;
565 	}
566 
567 	/*
568 	 * Upgrade to an exclusive lock so we can safely manipulate the
569 	 * buffer cache.  If another thread got to it before us we
570 	 * can just return.
571 	 */
572 	ostate = ccms_thread_lock_upgrade(&core->cst);
573 	if (chain->data) {
574 		ccms_thread_lock_downgrade(&core->cst, ostate);
575 		return (0);
576 	}
577 
578 	/*
579 	 * We must resolve to a device buffer, either by issuing I/O or
580 	 * by creating a zero-fill element.  We do not mark the buffer
581 	 * dirty when creating a zero-fill element (the hammer2_chain_modify()
582 	 * API must still be used to do that).
583 	 *
584 	 * The device buffer is variable-sized in powers of 2 down
585 	 * to HAMMER2_MIN_ALLOC (typically 1K).  A 64K physical storage
586 	 * chunk always contains buffers of the same size. (XXX)
587 	 *
588 	 * The minimum physical IO size may be larger than the variable
589 	 * block size.
590 	 */
591 	bref = &chain->bref;
592 
593 	psize = hammer2_devblksize(chain->bytes);
594 	pmask = (hammer2_off_t)psize - 1;
595 	pbase = bref->data_off & ~pmask;
596 	boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
597 	KKASSERT(pbase != 0);
598 	peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
599 
600 	/*
601 	 * The getblk() optimization can only be used on newly created
602 	 * elements if the physical block size matches the request.
603 	 */
604 	if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
605 	    chain->bytes == psize) {
606 		chain->bp = getblk(hmp->devvp, pbase, psize, 0, 0);
607 		error = 0;
608 	} else if (hammer2_isclusterable(chain)) {
609 		error = cluster_read(hmp->devvp, peof, pbase, psize,
610 				     psize, HAMMER2_PBUFSIZE*4,
611 				     &chain->bp);
612 		adjreadcounter(&chain->bref, chain->bytes);
613 	} else {
614 		error = bread(hmp->devvp, pbase, psize, &chain->bp);
615 		adjreadcounter(&chain->bref, chain->bytes);
616 	}
617 
618 	if (error) {
619 		kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
620 			(intmax_t)pbase, error);
621 		bqrelse(chain->bp);
622 		chain->bp = NULL;
623 		ccms_thread_lock_downgrade(&core->cst, ostate);
624 		return (error);
625 	}
626 
627 	/*
628 	 * Zero the data area if the chain is in the INITIAL-create state.
629 	 * Mark the buffer for bdwrite().  This clears the INITIAL state
630 	 * but does not mark the chain modified.
631 	 */
632 	bdata = (char *)chain->bp->b_data + boff;
633 	if (chain->flags & HAMMER2_CHAIN_INITIAL) {
634 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
635 		bzero(bdata, chain->bytes);
636 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
637 	}
638 
639 	/*
640 	 * Setup the data pointer, either pointing it to an embedded data
641 	 * structure and copying the data from the buffer, or pointing it
642 	 * into the buffer.
643 	 *
644 	 * The buffer is not retained when copying to an embedded data
645 	 * structure in order to avoid potential deadlocks or recursions
646 	 * on the same physical buffer.
647 	 */
648 	switch (bref->type) {
649 	case HAMMER2_BREF_TYPE_VOLUME:
650 	case HAMMER2_BREF_TYPE_FREEMAP:
651 		/*
652 		 * Copy data from bp to embedded buffer
653 		 */
654 		panic("hammer2_chain_lock: called on unresolved volume header");
655 #if 0
656 		/* NOT YET */
657 		KKASSERT(pbase == 0);
658 		KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
659 		bcopy(bdata, &hmp->voldata, chain->bytes);
660 		chain->data = (void *)&hmp->voldata;
661 		bqrelse(chain->bp);
662 		chain->bp = NULL;
663 #endif
664 		break;
665 	case HAMMER2_BREF_TYPE_INODE:
666 		/*
667 		 * Copy data from bp to embedded buffer, do not retain the
668 		 * device buffer.
669 		 */
670 		KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
671 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
672 		chain->data = kmalloc(sizeof(chain->data->ipdata),
673 				      hmp->mchain, M_WAITOK | M_ZERO);
674 		bcopy(bdata, &chain->data->ipdata, chain->bytes);
675 		bqrelse(chain->bp);
676 		chain->bp = NULL;
677 		break;
678 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
679 		KKASSERT(chain->bytes == sizeof(chain->data->bmdata));
680 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
681 		chain->data = kmalloc(sizeof(chain->data->bmdata),
682 				      hmp->mchain, M_WAITOK | M_ZERO);
683 		bcopy(bdata, &chain->data->bmdata, chain->bytes);
684 		bqrelse(chain->bp);
685 		chain->bp = NULL;
686 		break;
687 	case HAMMER2_BREF_TYPE_INDIRECT:
688 	case HAMMER2_BREF_TYPE_DATA:
689 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
690 	default:
691 		/*
692 		 * Point data at the device buffer and leave bp intact.
693 		 */
694 		chain->data = (void *)bdata;
695 		break;
696 	}
697 
698 	/*
699 	 * Make sure the bp is not specifically owned by this thread before
700 	 * restoring to a possibly shared lock, so another hammer2 thread
701 	 * can release it.
702 	 */
703 	if (chain->bp)
704 		BUF_KERNPROC(chain->bp);
705 	ccms_thread_lock_downgrade(&core->cst, ostate);
706 	return (0);
707 }
708 
709 /*
710  * Asynchronously read the device buffer (dbp) and execute the specified
711  * callback.  The caller should pass-in a locked chain (shared lock is ok).
712  * The function is responsible for unlocking the chain and for disposing
713  * of dbp.
714  *
715  * NOTE!  A NULL dbp (but non-NULL data) will be passed to the function
716  *	  if the dbp is integrated into the chain, because we do not want
717  *	  the caller to dispose of dbp in that situation.
718  */
719 static void hammer2_chain_load_async_callback(struct bio *bio);
720 
721 void
722 hammer2_chain_load_async(hammer2_chain_t *chain,
723 	void (*func)(hammer2_chain_t *, struct buf *, char *, void *),
724 	void *arg)
725 {
726 	hammer2_cbinfo_t *cbinfo;
727 	hammer2_mount_t *hmp;
728 	hammer2_blockref_t *bref;
729 	hammer2_off_t pbase;
730 	hammer2_off_t pmask;
731 	hammer2_off_t peof;
732 	struct buf *dbp;
733 	size_t boff;
734 	size_t psize;
735 	char *bdata;
736 
737 	if (chain->data) {
738 		func(chain, NULL, (char *)chain->data, arg);
739 		return;
740 	}
741 
742 	/*
743 	 * We must resolve to a device buffer, either by issuing I/O or
744 	 * by creating a zero-fill element.  We do not mark the buffer
745 	 * dirty when creating a zero-fill element (the hammer2_chain_modify()
746 	 * API must still be used to do that).
747 	 *
748 	 * The device buffer is variable-sized in powers of 2 down
749 	 * to HAMMER2_MIN_ALLOC (typically 1K).  A 64K physical storage
750 	 * chunk always contains buffers of the same size. (XXX)
751 	 *
752 	 * The minimum physical IO size may be larger than the variable
753 	 * block size.
754 	 */
755 	bref = &chain->bref;
756 
757 	psize = hammer2_devblksize(chain->bytes);
758 	pmask = (hammer2_off_t)psize - 1;
759 	pbase = bref->data_off & ~pmask;
760 	boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
761 	KKASSERT(pbase != 0);
762 	peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
763 
764 	hmp = chain->hmp;
765 
766 	/*
767 	 * The getblk() optimization can only be used on newly created
768 	 * elements if the physical block size matches the request.
769 	 */
770 	if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
771 	    chain->bytes == psize) {
772 		dbp = getblk(hmp->devvp, pbase, psize, 0, 0);
773 		/*atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);*/
774 		bdata = (char *)dbp->b_data + boff;
775 		bzero(bdata, chain->bytes);
776 		/*atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);*/
777 		func(chain, dbp, bdata, arg);
778 		bqrelse(dbp);
779 		return;
780 	}
781 
782 	adjreadcounter(&chain->bref, chain->bytes);
783 	cbinfo = kmalloc(sizeof(*cbinfo), hmp->mchain, M_INTWAIT | M_ZERO);
784 	cbinfo->chain = chain;
785 	cbinfo->func = func;
786 	cbinfo->arg = arg;
787 	cbinfo->boff = boff;
788 
789 	cluster_readcb(hmp->devvp, peof, pbase, psize,
790 		HAMMER2_PBUFSIZE*4, HAMMER2_PBUFSIZE*4,
791 		hammer2_chain_load_async_callback, cbinfo);
792 }
793 
794 static void
795 hammer2_chain_load_async_callback(struct bio *bio)
796 {
797 	hammer2_cbinfo_t *cbinfo;
798 	hammer2_mount_t *hmp;
799 	struct buf *dbp;
800 	char *data;
801 
802 	/*
803 	 * Nobody is waiting for bio/dbp to complete, we are
804 	 * responsible for handling the biowait() equivalent
805 	 * on dbp which means clearing BIO_DONE and BIO_SYNC
806 	 * and calling bpdone() if it hasn't already been called
807 	 * to restore any covered holes in the buffer's backing
808 	 * store.
809 	 */
810 	dbp = bio->bio_buf;
811 	if ((bio->bio_flags & BIO_DONE) == 0)
812 		bpdone(dbp, 0);
813 	bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
814 
815 	/*
816 	 * Extract the auxillary info and issue the callback.
817 	 * Finish up with the dbp after it returns.
818 	 */
819 	cbinfo = bio->bio_caller_info1.ptr;
820 	/*ccms_thread_lock_setown(cbinfo->chain->core);*/
821 	data = dbp->b_data + cbinfo->boff;
822 	hmp = cbinfo->chain->hmp;
823 
824 	cbinfo = bio->bio_caller_info1.ptr;
825 	if (cbinfo->chain->flags & HAMMER2_CHAIN_INITIAL)
826 		bzero(data, cbinfo->chain->bytes);
827 	cbinfo->func(cbinfo->chain, dbp, data, cbinfo->arg);
828 	/* cbinfo->chain is stale now */
829 	bqrelse(dbp);
830 	kfree(cbinfo, hmp->mchain);
831 }
832 
833 /*
834  * Unlock and deref a chain element.
835  *
836  * On the last lock release any non-embedded data (chain->bp) will be
837  * retired.
838  */
839 void
840 hammer2_chain_unlock(hammer2_chain_t *chain)
841 {
842 	hammer2_chain_core_t *core = chain->core;
843 	ccms_state_t ostate;
844 	long *counterp;
845 	u_int lockcnt;
846 
847 	/*
848 	 * The core->cst lock can be shared across several chains so we
849 	 * need to track the per-chain lockcnt separately.
850 	 *
851 	 * If multiple locks are present (or being attempted) on this
852 	 * particular chain we can just unlock, drop refs, and return.
853 	 *
854 	 * Otherwise fall-through on the 1->0 transition.
855 	 */
856 	for (;;) {
857 		lockcnt = chain->lockcnt;
858 		KKASSERT(lockcnt > 0);
859 		cpu_ccfence();
860 		if (lockcnt > 1) {
861 			if (atomic_cmpset_int(&chain->lockcnt,
862 					      lockcnt, lockcnt - 1)) {
863 				ccms_thread_unlock(&core->cst);
864 				hammer2_chain_drop(chain);
865 				return;
866 			}
867 		} else {
868 			if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
869 				break;
870 		}
871 		/* retry */
872 	}
873 
874 	/*
875 	 * On the 1->0 transition we upgrade the core lock (if necessary)
876 	 * to exclusive for terminal processing.  If after upgrading we find
877 	 * that lockcnt is non-zero, another thread is racing us and will
878 	 * handle the unload for us later on, so just cleanup and return
879 	 * leaving the data/bp intact
880 	 *
881 	 * Otherwise if lockcnt is still 0 it is possible for it to become
882 	 * non-zero and race, but since we hold the core->cst lock
883 	 * exclusively all that will happen is that the chain will be
884 	 * reloaded after we unload it.
885 	 */
886 	ostate = ccms_thread_lock_upgrade(&core->cst);
887 	if (chain->lockcnt) {
888 		ccms_thread_unlock_upgraded(&core->cst, ostate);
889 		hammer2_chain_drop(chain);
890 		return;
891 	}
892 
893 	/*
894 	 * Shortcut the case if the data is embedded or not resolved.
895 	 *
896 	 * Do NOT NULL out chain->data (e.g. inode data), it might be
897 	 * dirty.
898 	 *
899 	 * The DIRTYBP flag is non-applicable in this situation and can
900 	 * be cleared to keep the flags state clean.
901 	 */
902 	if (chain->bp == NULL) {
903 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
904 		ccms_thread_unlock_upgraded(&core->cst, ostate);
905 		hammer2_chain_drop(chain);
906 		return;
907 	}
908 
909 	/*
910 	 * Statistics
911 	 */
912 	if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
913 		;
914 	} else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
915 		switch(chain->bref.type) {
916 		case HAMMER2_BREF_TYPE_DATA:
917 			counterp = &hammer2_ioa_file_write;
918 			break;
919 		case HAMMER2_BREF_TYPE_INODE:
920 			counterp = &hammer2_ioa_meta_write;
921 			break;
922 		case HAMMER2_BREF_TYPE_INDIRECT:
923 			counterp = &hammer2_ioa_indr_write;
924 			break;
925 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
926 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
927 			counterp = &hammer2_ioa_fmap_write;
928 			break;
929 		default:
930 			counterp = &hammer2_ioa_volu_write;
931 			break;
932 		}
933 		*counterp += chain->bytes;
934 	} else {
935 		switch(chain->bref.type) {
936 		case HAMMER2_BREF_TYPE_DATA:
937 			counterp = &hammer2_iod_file_write;
938 			break;
939 		case HAMMER2_BREF_TYPE_INODE:
940 			counterp = &hammer2_iod_meta_write;
941 			break;
942 		case HAMMER2_BREF_TYPE_INDIRECT:
943 			counterp = &hammer2_iod_indr_write;
944 			break;
945 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
946 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
947 			counterp = &hammer2_iod_fmap_write;
948 			break;
949 		default:
950 			counterp = &hammer2_iod_volu_write;
951 			break;
952 		}
953 		*counterp += chain->bytes;
954 	}
955 
956 	/*
957 	 * Clean out the bp.
958 	 *
959 	 * If a device buffer was used for data be sure to destroy the
960 	 * buffer when we are done to avoid aliases (XXX what about the
961 	 * underlying VM pages?).
962 	 *
963 	 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
964 	 *	 is possible.
965 	 */
966 #if 0
967 	/*
968 	 * XXX our primary cache is now the block device, not
969 	 * the logical file. don't release the buffer.
970 	 */
971 	if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
972 		chain->bp->b_flags |= B_RELBUF;
973 #endif
974 
975 	/*
976 	 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
977 	 * or not.  The flag will get re-set when chain_modify() is called,
978 	 * even if MODIFIED is already set, allowing the OS to retire the
979 	 * buffer independent of a hammer2 flus.
980 	 */
981 	chain->data = NULL;
982 	if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
983 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
984 		if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
985 			atomic_clear_int(&chain->flags,
986 					 HAMMER2_CHAIN_IOFLUSH);
987 			chain->bp->b_flags |= B_RELBUF;
988 			cluster_awrite(chain->bp);
989 		} else {
990 			chain->bp->b_flags |= B_CLUSTEROK;
991 			bdwrite(chain->bp);
992 		}
993 	} else {
994 		if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
995 			atomic_clear_int(&chain->flags,
996 					 HAMMER2_CHAIN_IOFLUSH);
997 			chain->bp->b_flags |= B_RELBUF;
998 			brelse(chain->bp);
999 		} else {
1000 			/* bp might still be dirty */
1001 			bqrelse(chain->bp);
1002 		}
1003 	}
1004 	chain->bp = NULL;
1005 	ccms_thread_unlock_upgraded(&core->cst, ostate);
1006 	hammer2_chain_drop(chain);
1007 }
1008 
1009 /*
1010  * Resize the chain's physical storage allocation in-place.  This may
1011  * replace the passed-in chain with a new chain.
1012  *
1013  * Chains can be resized smaller without reallocating the storage.
1014  * Resizing larger will reallocate the storage.
1015  *
1016  * Must be passed an exclusively locked parent and chain, returns a new
1017  * exclusively locked chain at the same index and unlocks the old chain.
1018  * Flushes the buffer if necessary.
1019  *
1020  * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
1021  * to avoid instantiating a device buffer that conflicts with the vnode
1022  * data buffer.  That is, the passed-in bp is a logical buffer, whereas
1023  * any chain-oriented bp would be a device buffer.
1024  *
1025  * XXX flags currently ignored, uses chain->bp to detect data/no-data.
1026  * XXX return error if cannot resize.
1027  */
1028 void
1029 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
1030 		     struct buf *bp,
1031 		     hammer2_chain_t *parent, hammer2_chain_t **chainp,
1032 		     int nradix, int flags)
1033 {
1034 	hammer2_mount_t *hmp;
1035 	hammer2_chain_t *chain;
1036 	hammer2_off_t pbase;
1037 	size_t obytes;
1038 	size_t nbytes;
1039 	size_t bbytes;
1040 	int boff;
1041 
1042 	chain = *chainp;
1043 	hmp = chain->hmp;
1044 
1045 	/*
1046 	 * Only data and indirect blocks can be resized for now.
1047 	 * (The volu root, inodes, and freemap elements use a fixed size).
1048 	 */
1049 	KKASSERT(chain != &hmp->vchain);
1050 	KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
1051 		 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
1052 
1053 	/*
1054 	 * Nothing to do if the element is already the proper size
1055 	 */
1056 	obytes = chain->bytes;
1057 	nbytes = 1U << nradix;
1058 	if (obytes == nbytes)
1059 		return;
1060 
1061 	/*
1062 	 * Delete the old chain and duplicate it at the same (parent, index),
1063 	 * returning a new chain.  This allows the old chain to still be
1064 	 * used by the flush code.  Duplication occurs in-place.
1065 	 *
1066 	 * The parent does not have to be locked for the delete/duplicate call,
1067 	 * but is in this particular code path.
1068 	 *
1069 	 * NOTE: If we are not crossing a synchronization point the
1070 	 *	 duplication code will simply reuse the existing chain
1071 	 *	 structure.
1072 	 */
1073 	hammer2_chain_delete_duplicate(trans, &chain, 0);
1074 
1075 	/*
1076 	 * Set MODIFIED and add a chain ref to prevent destruction.  Both
1077 	 * modified flags share the same ref.  (duplicated chains do not
1078 	 * start out MODIFIED unless possibly if the duplication code
1079 	 * decided to reuse the existing chain as-is).
1080 	 *
1081 	 * If the chain is already marked MODIFIED then we can safely
1082 	 * return the previous allocation to the pool without having to
1083 	 * worry about snapshots.  XXX check flush synchronization.
1084 	 */
1085 	if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1086 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1087 		hammer2_chain_ref(chain);
1088 	}
1089 
1090 	/*
1091 	 * Relocate the block, even if making it smaller (because different
1092 	 * block sizes may be in different regions).
1093 	 */
1094 	hammer2_freemap_alloc(trans, chain->hmp, &chain->bref, nbytes);
1095 	chain->bytes = nbytes;
1096 	/*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
1097 
1098 	/*
1099 	 * The device buffer may be larger than the allocation size.
1100 	 */
1101 	bbytes = hammer2_devblksize(chain->bytes);
1102 	pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
1103 	boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
1104 
1105 	/*
1106 	 * For now just support it on DATA chains (and not on indirect
1107 	 * blocks).
1108 	 */
1109 	KKASSERT(chain->bp == NULL);
1110 
1111 	/*
1112 	 * Make sure the chain is marked MOVED and SUBMOD is set in the
1113 	 * parent(s) so the adjustments are picked up by flush.
1114 	 */
1115 	if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1116 		hammer2_chain_ref(chain);
1117 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1118 	}
1119 	hammer2_chain_setsubmod(trans, chain);
1120 	*chainp = chain;
1121 }
1122 
1123 /*
1124  * Set a chain modified, making it read-write and duplicating it if necessary.
1125  * This function will assign a new physical block to the chain if necessary
1126  *
1127  * Duplication of already-modified chains is possible when the modification
1128  * crosses a flush synchronization boundary.
1129  *
1130  * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
1131  *		     level or the COW operation will not work.
1132  *
1133  * Data blocks	   - The chain is usually locked RESOLVE_NEVER so as not to
1134  *		     run the data through the device buffers.
1135  *
1136  * This function may return a different chain than was passed, in which case
1137  * the old chain will be unlocked and the new chain will be locked.
1138  *
1139  * ip->chain may be adjusted by hammer2_chain_modify_ip().
1140  */
1141 hammer2_inode_data_t *
1142 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1143 			hammer2_chain_t **chainp, int flags)
1144 {
1145 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1146 	hammer2_chain_modify(trans, chainp, flags);
1147 	if (ip->chain != *chainp)
1148 		hammer2_inode_repoint(ip, NULL, *chainp);
1149 	return(&ip->chain->data->ipdata);
1150 }
1151 
1152 void
1153 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1154 		     int flags)
1155 {
1156 	hammer2_mount_t *hmp;
1157 	hammer2_chain_t *chain;
1158 	hammer2_off_t pbase;
1159 	hammer2_off_t pmask;
1160 	hammer2_off_t peof;
1161 	hammer2_tid_t flush_tid;
1162 	struct buf *nbp;
1163 	int error;
1164 	int wasinitial;
1165 	size_t psize;
1166 	size_t boff;
1167 	void *bdata;
1168 
1169 	/*
1170 	 * Data must be resolved if already assigned unless explicitly
1171 	 * flagged otherwise.
1172 	 */
1173 	chain = *chainp;
1174 	hmp = chain->hmp;
1175 	if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1176 	    (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1177 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1178 		hammer2_chain_unlock(chain);
1179 	}
1180 
1181 	/*
1182 	 * data is not optional for freemap chains (we must always be sure
1183 	 * to copy the data on COW storage allocations).
1184 	 */
1185 	if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1186 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1187 		KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1188 			 (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1189 	}
1190 
1191 	/*
1192 	 * If the chain is already marked MODIFIED we can usually just
1193 	 * return.  However, if a modified chain is modified again in
1194 	 * a synchronization-point-crossing manner we have to issue a
1195 	 * delete/duplicate on the chain to avoid flush interference.
1196 	 */
1197 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1198 		/*
1199 		 * Which flush_tid do we need to check?  If the chain is
1200 		 * related to the freemap we have to use the freemap flush
1201 		 * tid (free_flush_tid), otherwise we use the normal filesystem
1202 		 * flush tid (topo_flush_tid).  The two flush domains are
1203 		 * almost completely independent of each other.
1204 		 */
1205 		if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1206 		    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1207 			flush_tid = hmp->topo_flush_tid; /* XXX */
1208 			goto skipxx;	/* XXX */
1209 		} else {
1210 			flush_tid = hmp->topo_flush_tid;
1211 		}
1212 
1213 		/*
1214 		 * Main tests
1215 		 */
1216 		if (chain->modify_tid <= flush_tid &&
1217 		    trans->sync_tid > flush_tid) {
1218 			/*
1219 			 * Modifications cross synchronization point,
1220 			 * requires delete-duplicate.
1221 			 */
1222 			KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0);
1223 			hammer2_chain_delete_duplicate(trans, chainp, 0);
1224 			chain = *chainp;
1225 			/* fall through using duplicate */
1226 		}
1227 skipxx: /* XXX */
1228 		/*
1229 		 * Quick return path, set DIRTYBP to ensure that
1230 		 * the later retirement of bp will write it out.
1231 		 *
1232 		 * quick return path also needs the modify_tid
1233 		 * logic.
1234 		 */
1235 		if (chain->bp)
1236 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1237 		if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1238 			chain->bref.modify_tid = trans->sync_tid;
1239 		chain->modify_tid = trans->sync_tid;
1240 		return;
1241 	}
1242 
1243 	/*
1244 	 * modify_tid is only update for primary modifications, not for
1245 	 * propagated brefs.  mirror_tid will be updated regardless during
1246 	 * the flush, no need to set it here.
1247 	 */
1248 	if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1249 		chain->bref.modify_tid = trans->sync_tid;
1250 
1251 	/*
1252 	 * Set MODIFIED and add a chain ref to prevent destruction.  Both
1253 	 * modified flags share the same ref.
1254 	 */
1255 	if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1256 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1257 		hammer2_chain_ref(chain);
1258 	}
1259 
1260 	/*
1261 	 * Adjust chain->modify_tid so the flusher knows when the
1262 	 * modification occurred.
1263 	 */
1264 	chain->modify_tid = trans->sync_tid;
1265 
1266 	/*
1267 	 * The modification or re-modification requires an allocation and
1268 	 * possible COW.
1269 	 *
1270 	 * We normally always allocate new storage here.  If storage exists
1271 	 * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1272 	 */
1273 	if (chain != &hmp->vchain &&
1274 	    chain != &hmp->fchain &&
1275 	    ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1276 	     (flags & HAMMER2_MODIFY_NOREALLOC) == 0)
1277 	) {
1278 		hammer2_freemap_alloc(trans, chain->hmp,
1279 				      &chain->bref, chain->bytes);
1280 		/* XXX failed allocation */
1281 	}
1282 
1283 	/*
1284 	 * Do not COW if OPTDATA is set.  INITIAL flag remains unchanged.
1285 	 * (OPTDATA does not prevent [re]allocation of storage, only the
1286 	 * related copy-on-write op).
1287 	 */
1288 	if (flags & HAMMER2_MODIFY_OPTDATA)
1289 		goto skip2;
1290 
1291 	/*
1292 	 * Clearing the INITIAL flag (for indirect blocks) indicates that
1293 	 * we've processed the uninitialized storage allocation.
1294 	 *
1295 	 * If this flag is already clear we are likely in a copy-on-write
1296 	 * situation but we have to be sure NOT to bzero the storage if
1297 	 * no data is present.
1298 	 */
1299 	if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1300 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1301 		wasinitial = 1;
1302 	} else {
1303 		wasinitial = 0;
1304 	}
1305 
1306 #if 0
1307 	/*
1308 	 * We currently should never instantiate a device buffer for a
1309 	 * file data chain.  (We definitely can for a freemap chain).
1310 	 *
1311 	 * XXX we can now do this
1312 	 */
1313 	KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1314 #endif
1315 
1316 	/*
1317 	 * Instantiate data buffer and possibly execute COW operation
1318 	 */
1319 	switch(chain->bref.type) {
1320 	case HAMMER2_BREF_TYPE_VOLUME:
1321 	case HAMMER2_BREF_TYPE_FREEMAP:
1322 	case HAMMER2_BREF_TYPE_INODE:
1323 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1324 		/*
1325 		 * The data is embedded, no copy-on-write operation is
1326 		 * needed.
1327 		 */
1328 		KKASSERT(chain->bp == NULL);
1329 		break;
1330 	case HAMMER2_BREF_TYPE_DATA:
1331 	case HAMMER2_BREF_TYPE_INDIRECT:
1332 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1333 		/*
1334 		 * Perform the copy-on-write operation
1335 		 */
1336 		KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1337 
1338 		psize = hammer2_devblksize(chain->bytes);
1339 		pmask = (hammer2_off_t)psize - 1;
1340 		pbase = chain->bref.data_off & ~pmask;
1341 		boff = chain->bref.data_off & (HAMMER2_OFF_MASK & pmask);
1342 		KKASSERT(pbase != 0);
1343 		peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
1344 
1345 		/*
1346 		 * The getblk() optimization can only be used if the
1347 		 * chain element size matches the physical block size.
1348 		 */
1349 		if (chain->bp && chain->bp->b_loffset == pbase) {
1350 			nbp = chain->bp;
1351 			error = 0;
1352 		} else if (chain->bytes == psize) {
1353 			nbp = getblk(hmp->devvp, pbase, psize, 0, 0);
1354 			error = 0;
1355 		} else if (hammer2_isclusterable(chain)) {
1356 			error = cluster_read(hmp->devvp, peof, pbase, psize,
1357 					     psize, HAMMER2_PBUFSIZE*4,
1358 					     &nbp);
1359 			adjreadcounter(&chain->bref, chain->bytes);
1360 		} else {
1361 			error = bread(hmp->devvp, pbase, psize, &nbp);
1362 			adjreadcounter(&chain->bref, chain->bytes);
1363 		}
1364 		KKASSERT(error == 0);
1365 		bdata = (char *)nbp->b_data + boff;
1366 
1367 		/*
1368 		 * Copy or zero-fill on write depending on whether
1369 		 * chain->data exists or not.  Retire the existing bp
1370 		 * based on the DIRTYBP flag.  Set the DIRTYBP flag to
1371 		 * indicate that retirement of nbp should use bdwrite().
1372 		 */
1373 		if (chain->data) {
1374 			KKASSERT(chain->bp != NULL);
1375 			if (chain->data != bdata) {
1376 				bcopy(chain->data, bdata, chain->bytes);
1377 			}
1378 		} else if (wasinitial) {
1379 			bzero(bdata, chain->bytes);
1380 		} else {
1381 			/*
1382 			 * We have a problem.  We were asked to COW but
1383 			 * we don't have any data to COW with!
1384 			 */
1385 			panic("hammer2_chain_modify: having a COW %p\n",
1386 			      chain);
1387 		}
1388 		if (chain->bp != nbp) {
1389 			if (chain->bp) {
1390 				if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
1391 					chain->bp->b_flags |= B_CLUSTEROK;
1392 					bdwrite(chain->bp);
1393 				} else {
1394 					chain->bp->b_flags |= B_RELBUF;
1395 					brelse(chain->bp);
1396 				}
1397 			}
1398 			chain->bp = nbp;
1399 			BUF_KERNPROC(chain->bp);
1400 		}
1401 		chain->data = bdata;
1402 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1403 		break;
1404 	default:
1405 		panic("hammer2_chain_modify: illegal non-embedded type %d",
1406 		      chain->bref.type);
1407 		break;
1408 
1409 	}
1410 skip2:
1411 	hammer2_chain_setsubmod(trans, chain);
1412 }
1413 
1414 /*
1415  * Mark the volume as having been modified.  This short-cut version
1416  * does not have to lock the volume's chain, which allows the ioctl
1417  * code to make adjustments to connections without deadlocking.  XXX
1418  *
1419  * No ref is made on vchain when flagging it MODIFIED.
1420  */
1421 void
1422 hammer2_modify_volume(hammer2_mount_t *hmp)
1423 {
1424 	hammer2_voldata_lock(hmp);
1425 	hammer2_voldata_unlock(hmp, 1);
1426 }
1427 
1428 /*
1429  * Locate an in-memory chain.  The parent must be locked.  The in-memory
1430  * chain is returned with a reference and without a lock, or NULL
1431  * if not found.
1432  *
1433  * This function returns the chain at the specified index with the highest
1434  * delete_tid.  The caller must check whether the chain is flagged
1435  * CHAIN_DELETED or not.  However, because chain iterations can be removed
1436  * from memory we must ALSO check that DELETED chains are not flushed.  A
1437  * DELETED chain which has been flushed must be ignored (the caller must
1438  * check the parent's blockref array).
1439  *
1440  * NOTE: If no chain is found the caller usually must check the on-media
1441  *	 array to determine if a blockref exists at the index.
1442  */
1443 struct hammer2_chain_find_info {
1444 	hammer2_chain_t *best;
1445 	hammer2_tid_t	delete_tid;
1446 	int index;
1447 };
1448 
1449 static
1450 int
1451 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1452 {
1453 	struct hammer2_chain_find_info *info = data;
1454 
1455 	if (child->index < info->index)
1456 		return(-1);
1457 	if (child->index > info->index)
1458 		return(1);
1459 	return(0);
1460 }
1461 
1462 static
1463 int
1464 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1465 {
1466 	struct hammer2_chain_find_info *info = data;
1467 
1468 	if (info->delete_tid < child->delete_tid) {
1469 		info->delete_tid = child->delete_tid;
1470 		info->best = child;
1471 	}
1472 	return(0);
1473 }
1474 
1475 static
1476 hammer2_chain_t *
1477 hammer2_chain_find_locked(hammer2_chain_t *parent, int index)
1478 {
1479 	struct hammer2_chain_find_info info;
1480 	hammer2_chain_t *child;
1481 
1482 	info.index = index;
1483 	info.delete_tid = 0;
1484 	info.best = NULL;
1485 
1486 	RB_SCAN(hammer2_chain_tree, &parent->core->rbtree,
1487 		hammer2_chain_find_cmp, hammer2_chain_find_callback,
1488 		&info);
1489 	child = info.best;
1490 
1491 	return (child);
1492 }
1493 
1494 hammer2_chain_t *
1495 hammer2_chain_find(hammer2_chain_t *parent, int index)
1496 {
1497 	hammer2_chain_t *child;
1498 
1499 	spin_lock(&parent->core->cst.spin);
1500 	child = hammer2_chain_find_locked(parent, index);
1501 	if (child)
1502 		hammer2_chain_ref(child);
1503 	spin_unlock(&parent->core->cst.spin);
1504 
1505 	return (child);
1506 }
1507 
1508 /*
1509  * Return a locked chain structure with all associated data acquired.
1510  * (if LOOKUP_NOLOCK is requested the returned chain is only referenced).
1511  *
1512  * Caller must hold the parent locked shared or exclusive since we may
1513  * need the parent's bref array to find our block.
1514  *
1515  * The returned child is locked as requested.  If NOLOCK, the returned
1516  * child is still at least referenced.
1517  */
1518 hammer2_chain_t *
1519 hammer2_chain_get(hammer2_chain_t *parent, int index, int flags)
1520 {
1521 	hammer2_blockref_t *bref;
1522 	hammer2_mount_t *hmp = parent->hmp;
1523 	hammer2_chain_core_t *above = parent->core;
1524 	hammer2_chain_t *chain;
1525 	hammer2_chain_t dummy;
1526 	int how;
1527 
1528 	/*
1529 	 * Figure out how to lock.  MAYBE can be used to optimized
1530 	 * the initial-create state for indirect blocks.
1531 	 */
1532 	if (flags & HAMMER2_LOOKUP_ALWAYS)
1533 		how = HAMMER2_RESOLVE_ALWAYS;
1534 	else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1535 		how = HAMMER2_RESOLVE_NEVER;
1536 	else
1537 		how = HAMMER2_RESOLVE_MAYBE;
1538 	if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1539 		how |= HAMMER2_RESOLVE_SHARED;
1540 
1541 retry:
1542 	/*
1543 	 * First see if we have a (possibly modified) chain element cached
1544 	 * for this (parent, index).  Acquire the data if necessary.
1545 	 *
1546 	 * If chain->data is non-NULL the chain should already be marked
1547 	 * modified.
1548 	 */
1549 	dummy.flags = 0;
1550 	dummy.index = index;
1551 	dummy.delete_tid = HAMMER2_MAX_TID;
1552 	spin_lock(&above->cst.spin);
1553 	chain = RB_FIND(hammer2_chain_tree, &above->rbtree, &dummy);
1554 	if (chain) {
1555 		hammer2_chain_ref(chain);
1556 		spin_unlock(&above->cst.spin);
1557 		if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1558 			hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1559 		return(chain);
1560 	}
1561 	spin_unlock(&above->cst.spin);
1562 
1563 	/*
1564 	 * The parent chain must not be in the INITIAL state.
1565 	 */
1566 	if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1567 		panic("hammer2_chain_get: Missing bref(1)");
1568 		/* NOT REACHED */
1569 	}
1570 
1571 	/*
1572 	 * No RBTREE entry found, lookup the bref and issue I/O (switch on
1573 	 * the parent's bref to determine where and how big the array is).
1574 	 */
1575 	switch(parent->bref.type) {
1576 	case HAMMER2_BREF_TYPE_INODE:
1577 		KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1578 		bref = &parent->data->ipdata.u.blockset.blockref[index];
1579 		break;
1580 	case HAMMER2_BREF_TYPE_INDIRECT:
1581 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1582 		KKASSERT(parent->data != NULL);
1583 		KKASSERT(index >= 0 &&
1584 			 index < parent->bytes / sizeof(hammer2_blockref_t));
1585 		bref = &parent->data->npdata[index];
1586 		break;
1587 	case HAMMER2_BREF_TYPE_VOLUME:
1588 		KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1589 		bref = &hmp->voldata.sroot_blockset.blockref[index];
1590 		break;
1591 	case HAMMER2_BREF_TYPE_FREEMAP:
1592 		KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1593 		bref = &hmp->voldata.freemap_blockset.blockref[index];
1594 		break;
1595 	default:
1596 		bref = NULL;
1597 		panic("hammer2_chain_get: unrecognized blockref type: %d",
1598 		      parent->bref.type);
1599 	}
1600 	if (bref->type == 0) {
1601 		panic("hammer2_chain_get: Missing bref(2)");
1602 		/* NOT REACHED */
1603 	}
1604 
1605 	/*
1606 	 * Allocate a chain structure representing the existing media
1607 	 * entry.  Resulting chain has one ref and is not locked.
1608 	 *
1609 	 * The locking operation we do later will issue I/O to read it.
1610 	 */
1611 	chain = hammer2_chain_alloc(hmp, NULL, bref);
1612 	hammer2_chain_core_alloc(chain, NULL);	/* ref'd chain returned */
1613 
1614 	/*
1615 	 * Link the chain into its parent.  A spinlock is required to safely
1616 	 * access the RBTREE, and it is possible to collide with another
1617 	 * hammer2_chain_get() operation because the caller might only hold
1618 	 * a shared lock on the parent.
1619 	 */
1620 	KKASSERT(parent->refs > 0);
1621 	spin_lock(&above->cst.spin);
1622 	chain->above = above;
1623 	chain->index = index;
1624 	if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain)) {
1625 		chain->above = NULL;
1626 		chain->index = -1;
1627 		spin_unlock(&above->cst.spin);
1628 		hammer2_chain_drop(chain);
1629 		goto retry;
1630 	}
1631 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1632 	spin_unlock(&above->cst.spin);
1633 
1634 	/*
1635 	 * Our new chain is referenced but NOT locked.  Lock the chain
1636 	 * below.  The locking operation also resolves its data.
1637 	 *
1638 	 * If NOLOCK is set the release will release the one-and-only lock.
1639 	 */
1640 	if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1641 		hammer2_chain_lock(chain, how);	/* recusive lock */
1642 		hammer2_chain_drop(chain);	/* excess ref */
1643 	}
1644 	return (chain);
1645 }
1646 
1647 /*
1648  * Lookup initialization/completion API
1649  */
1650 hammer2_chain_t *
1651 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1652 {
1653 	if (flags & HAMMER2_LOOKUP_SHARED) {
1654 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1655 					   HAMMER2_RESOLVE_SHARED);
1656 	} else {
1657 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1658 	}
1659 	return (parent);
1660 }
1661 
1662 void
1663 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1664 {
1665 	if (parent)
1666 		hammer2_chain_unlock(parent);
1667 }
1668 
1669 static
1670 hammer2_chain_t *
1671 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1672 {
1673 	hammer2_chain_t *oparent;
1674 	hammer2_chain_t *nparent;
1675 	hammer2_chain_core_t *above;
1676 
1677 	oparent = *parentp;
1678 	above = oparent->above;
1679 
1680 	spin_lock(&above->cst.spin);
1681 	nparent = above->first_parent;
1682 	while (hammer2_chain_refactor_test(nparent, 1))
1683 		nparent = nparent->next_parent;
1684 	hammer2_chain_ref(nparent);	/* protect nparent, use in lock */
1685 	spin_unlock(&above->cst.spin);
1686 
1687 	hammer2_chain_unlock(oparent);
1688 	hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1689 	*parentp = nparent;
1690 
1691 	return (nparent);
1692 }
1693 
1694 /*
1695  * Locate any key between key_beg and key_end inclusive.  (*parentp)
1696  * typically points to an inode but can also point to a related indirect
1697  * block and this function will recurse upwards and find the inode again.
1698  *
1699  * WARNING!  THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER!  ANY KEY
1700  *	     WITHIN THE RANGE CAN BE RETURNED.  HOWEVER, AN ITERATION
1701  *	     WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN
1702  *	     AND ALL IN-RANGE KEYS WILL EVENTUALLY BE RETURNED (NOT
1703  *	     NECESSARILY IN ORDER).
1704  *
1705  * (*parentp) must be exclusively locked and referenced and can be an inode
1706  * or an existing indirect block within the inode.
1707  *
1708  * On return (*parentp) will be modified to point at the deepest parent chain
1709  * element encountered during the search, as a helper for an insertion or
1710  * deletion.   The new (*parentp) will be locked and referenced and the old
1711  * will be unlocked and dereferenced (no change if they are both the same).
1712  *
1713  * The matching chain will be returned exclusively locked.  If NOLOCK is
1714  * requested the chain will be returned only referenced.
1715  *
1716  * NULL is returned if no match was found, but (*parentp) will still
1717  * potentially be adjusted.
1718  *
1719  * This function will also recurse up the chain if the key is not within the
1720  * current parent's range.  (*parentp) can never be set to NULL.  An iteration
1721  * can simply allow (*parentp) to float inside the loop.
1722  *
1723  * NOTE!  chain->data is not always resolved.  By default it will not be
1724  *	  resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF.  Use
1725  *	  HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
1726  *	  BREF_TYPE_DATA as the device buffer can alias the logical file
1727  *	  buffer).
1728  */
1729 hammer2_chain_t *
1730 hammer2_chain_lookup(hammer2_chain_t **parentp,
1731 		     hammer2_key_t key_beg, hammer2_key_t key_end,
1732 		     int flags)
1733 {
1734 	hammer2_mount_t *hmp;
1735 	hammer2_chain_t *parent;
1736 	hammer2_chain_t *chain;
1737 	hammer2_chain_t *tmp;
1738 	hammer2_blockref_t *base;
1739 	hammer2_blockref_t *bref;
1740 	hammer2_key_t scan_beg;
1741 	hammer2_key_t scan_end;
1742 	int count = 0;
1743 	int i;
1744 	int how_always = HAMMER2_RESOLVE_ALWAYS;
1745 	int how_maybe = HAMMER2_RESOLVE_MAYBE;
1746 
1747 	if (flags & HAMMER2_LOOKUP_ALWAYS)
1748 		how_maybe = how_always;
1749 
1750 	if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1751 		how_maybe |= HAMMER2_RESOLVE_SHARED;
1752 		how_always |= HAMMER2_RESOLVE_SHARED;
1753 	}
1754 
1755 	/*
1756 	 * Recurse (*parentp) upward if necessary until the parent completely
1757 	 * encloses the key range or we hit the inode.
1758 	 */
1759 	parent = *parentp;
1760 	hmp = parent->hmp;
1761 
1762 	while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1763 	       parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1764 		scan_beg = parent->bref.key;
1765 		scan_end = scan_beg +
1766 			   ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1767 		if (key_beg >= scan_beg && key_end <= scan_end)
1768 			break;
1769 		parent = hammer2_chain_getparent(parentp, how_maybe);
1770 	}
1771 
1772 again:
1773 	/*
1774 	 * Locate the blockref array.  Currently we do a fully associative
1775 	 * search through the array.
1776 	 */
1777 	switch(parent->bref.type) {
1778 	case HAMMER2_BREF_TYPE_INODE:
1779 		/*
1780 		 * Special shortcut for embedded data returns the inode
1781 		 * itself.  Callers must detect this condition and access
1782 		 * the embedded data (the strategy code does this for us).
1783 		 *
1784 		 * This is only applicable to regular files and softlinks.
1785 		 */
1786 		if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1787 			if (flags & HAMMER2_LOOKUP_NOLOCK)
1788 				hammer2_chain_ref(parent);
1789 			else
1790 				hammer2_chain_lock(parent, how_always);
1791 			return (parent);
1792 		}
1793 		base = &parent->data->ipdata.u.blockset.blockref[0];
1794 		count = HAMMER2_SET_COUNT;
1795 		break;
1796 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1797 	case HAMMER2_BREF_TYPE_INDIRECT:
1798 		/*
1799 		 * Handle MATCHIND on the parent
1800 		 */
1801 		if (flags & HAMMER2_LOOKUP_MATCHIND) {
1802 			scan_beg = parent->bref.key;
1803 			scan_end = scan_beg +
1804 			       ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1805 			if (key_beg == scan_beg && key_end == scan_end) {
1806 				chain = parent;
1807 				hammer2_chain_lock(chain, how_maybe);
1808 				goto done;
1809 			}
1810 		}
1811 		/*
1812 		 * Optimize indirect blocks in the INITIAL state to avoid
1813 		 * I/O.
1814 		 */
1815 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1816 			base = NULL;
1817 		} else {
1818 			if (parent->data == NULL)
1819 				panic("parent->data is NULL");
1820 			base = &parent->data->npdata[0];
1821 		}
1822 		count = parent->bytes / sizeof(hammer2_blockref_t);
1823 		break;
1824 	case HAMMER2_BREF_TYPE_VOLUME:
1825 		base = &hmp->voldata.sroot_blockset.blockref[0];
1826 		count = HAMMER2_SET_COUNT;
1827 		break;
1828 	case HAMMER2_BREF_TYPE_FREEMAP:
1829 		base = &hmp->voldata.freemap_blockset.blockref[0];
1830 		count = HAMMER2_SET_COUNT;
1831 		break;
1832 	default:
1833 		panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1834 		      parent->bref.type);
1835 		base = NULL;	/* safety */
1836 		count = 0;	/* safety */
1837 	}
1838 
1839 	/*
1840 	 * If the element and key overlap we use the element.
1841 	 *
1842 	 * NOTE! Deleted elements are effectively invisible.  Deletions
1843 	 *	 proactively clear the parent bref to the deleted child
1844 	 *	 so we do not try to shadow here to avoid parent updates
1845 	 *	 (which would be difficult since multiple deleted elements
1846 	 *	 might represent different flush synchronization points).
1847 	 */
1848 	bref = NULL;
1849 	scan_beg = 0;	/* avoid compiler warning */
1850 	scan_end = 0;	/* avoid compiler warning */
1851 
1852 	for (i = 0; i < count; ++i) {
1853 		tmp = hammer2_chain_find(parent, i);
1854 		if (tmp) {
1855 			if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1856 				hammer2_chain_drop(tmp);
1857 				continue;
1858 			}
1859 			bref = &tmp->bref;
1860 			KKASSERT(bref->type != 0);
1861 		} else if (base == NULL || base[i].type == 0) {
1862 			continue;
1863 		} else {
1864 			bref = &base[i];
1865 		}
1866 		scan_beg = bref->key;
1867 		scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1868 		if (tmp)
1869 			hammer2_chain_drop(tmp);
1870 		if (key_beg <= scan_end && key_end >= scan_beg)
1871 			break;
1872 	}
1873 	if (i == count) {
1874 		if (key_beg == key_end)
1875 			return (NULL);
1876 		return (hammer2_chain_next(parentp, NULL,
1877 					   key_beg, key_end, flags));
1878 	}
1879 
1880 	/*
1881 	 * Acquire the new chain element.  If the chain element is an
1882 	 * indirect block we must search recursively.
1883 	 *
1884 	 * It is possible for the tmp chain above to be removed from
1885 	 * the RBTREE but the parent lock ensures it would not have been
1886 	 * destroyed from the media, so the chain_get() code will simply
1887 	 * reload it from the media in that case.
1888 	 */
1889 	chain = hammer2_chain_get(parent, i, flags);
1890 	if (chain == NULL)
1891 		return (NULL);
1892 
1893 	/*
1894 	 * If the chain element is an indirect block it becomes the new
1895 	 * parent and we loop on it.
1896 	 *
1897 	 * The parent always has to be locked with at least RESOLVE_MAYBE
1898 	 * so we can access its data.  It might need a fixup if the caller
1899 	 * passed incompatible flags.  Be careful not to cause a deadlock
1900 	 * as a data-load requires an exclusive lock.
1901 	 *
1902 	 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
1903 	 * range is within the requested key range we return the indirect
1904 	 * block and do NOT loop.  This is usually only used to acquire
1905 	 * freemap nodes.
1906 	 */
1907 	if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1908 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1909 		hammer2_chain_unlock(parent);
1910 		*parentp = parent = chain;
1911 		if (flags & HAMMER2_LOOKUP_NOLOCK) {
1912 			hammer2_chain_lock(chain,
1913 					   how_maybe |
1914 					   HAMMER2_RESOLVE_NOREF);
1915 		} else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1916 			   chain->data == NULL) {
1917 			hammer2_chain_ref(chain);
1918 			hammer2_chain_unlock(chain);
1919 			hammer2_chain_lock(chain,
1920 					   how_maybe |
1921 					   HAMMER2_RESOLVE_NOREF);
1922 		}
1923 		goto again;
1924 	}
1925 done:
1926 	/*
1927 	 * All done, return the chain
1928 	 */
1929 	return (chain);
1930 }
1931 
1932 /*
1933  * After having issued a lookup we can iterate all matching keys.
1934  *
1935  * If chain is non-NULL we continue the iteration from just after it's index.
1936  *
1937  * If chain is NULL we assume the parent was exhausted and continue the
1938  * iteration at the next parent.
1939  *
1940  * parent must be locked on entry and remains locked throughout.  chain's
1941  * lock status must match flags.  Chain is always at least referenced.
1942  *
1943  * WARNING!  The MATCHIND flag does not apply to this function.
1944  */
1945 hammer2_chain_t *
1946 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1947 		   hammer2_key_t key_beg, hammer2_key_t key_end,
1948 		   int flags)
1949 {
1950 	hammer2_mount_t *hmp;
1951 	hammer2_chain_t *parent;
1952 	hammer2_chain_t *tmp;
1953 	hammer2_blockref_t *base;
1954 	hammer2_blockref_t *bref;
1955 	hammer2_key_t scan_beg;
1956 	hammer2_key_t scan_end;
1957 	int i;
1958 	int how_maybe = HAMMER2_RESOLVE_MAYBE;
1959 	int count;
1960 
1961 	if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1962 		how_maybe |= HAMMER2_RESOLVE_SHARED;
1963 
1964 	parent = *parentp;
1965 	hmp = parent->hmp;
1966 
1967 again:
1968 	/*
1969 	 * Calculate the next index and recalculate the parent if necessary.
1970 	 */
1971 	if (chain) {
1972 		/*
1973 		 * Continue iteration within current parent.  If not NULL
1974 		 * the passed-in chain may or may not be locked, based on
1975 		 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1976 		 * or a prior next).
1977 		 */
1978 		i = chain->index + 1;
1979 		if (flags & HAMMER2_LOOKUP_NOLOCK)
1980 			hammer2_chain_drop(chain);
1981 		else
1982 			hammer2_chain_unlock(chain);
1983 
1984 		/*
1985 		 * Any scan where the lookup returned degenerate data embedded
1986 		 * in the inode has an invalid index and must terminate.
1987 		 */
1988 		if (chain == parent)
1989 			return(NULL);
1990 		chain = NULL;
1991 	} else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1992 		   parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1993 		/*
1994 		 * We reached the end of the iteration.
1995 		 */
1996 		return (NULL);
1997 	} else {
1998 		/*
1999 		 * Continue iteration with next parent unless the current
2000 		 * parent covers the range.
2001 		 */
2002 		scan_beg = parent->bref.key;
2003 		scan_end = scan_beg +
2004 			    ((hammer2_key_t)1 << parent->bref.keybits) - 1;
2005 		if (key_beg >= scan_beg && key_end <= scan_end)
2006 			return (NULL);
2007 
2008 		i = parent->index + 1;
2009 		parent = hammer2_chain_getparent(parentp, how_maybe);
2010 	}
2011 
2012 again2:
2013 	/*
2014 	 * Locate the blockref array.  Currently we do a fully associative
2015 	 * search through the array.
2016 	 */
2017 	switch(parent->bref.type) {
2018 	case HAMMER2_BREF_TYPE_INODE:
2019 		base = &parent->data->ipdata.u.blockset.blockref[0];
2020 		count = HAMMER2_SET_COUNT;
2021 		break;
2022 	case HAMMER2_BREF_TYPE_INDIRECT:
2023 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2024 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2025 			base = NULL;
2026 		} else {
2027 			KKASSERT(parent->data != NULL);
2028 			base = &parent->data->npdata[0];
2029 		}
2030 		count = parent->bytes / sizeof(hammer2_blockref_t);
2031 		break;
2032 	case HAMMER2_BREF_TYPE_VOLUME:
2033 		base = &hmp->voldata.sroot_blockset.blockref[0];
2034 		count = HAMMER2_SET_COUNT;
2035 		break;
2036 	case HAMMER2_BREF_TYPE_FREEMAP:
2037 		base = &hmp->voldata.freemap_blockset.blockref[0];
2038 		count = HAMMER2_SET_COUNT;
2039 		break;
2040 	default:
2041 		panic("hammer2_chain_next: unrecognized blockref type: %d",
2042 		      parent->bref.type);
2043 		base = NULL;	/* safety */
2044 		count = 0;	/* safety */
2045 		break;
2046 	}
2047 	KKASSERT(i <= count);
2048 
2049 	/*
2050 	 * Look for the key.  If we are unable to find a match and an exact
2051 	 * match was requested we return NULL.  If a range was requested we
2052 	 * run hammer2_chain_next() to iterate.
2053 	 *
2054 	 * NOTE! Deleted elements are effectively invisible.  Deletions
2055 	 *	 proactively clear the parent bref to the deleted child
2056 	 *	 so we do not try to shadow here to avoid parent updates
2057 	 *	 (which would be difficult since multiple deleted elements
2058 	 *	 might represent different flush synchronization points).
2059 	 */
2060 	bref = NULL;
2061 	scan_beg = 0;	/* avoid compiler warning */
2062 	scan_end = 0;	/* avoid compiler warning */
2063 
2064 	while (i < count) {
2065 		tmp = hammer2_chain_find(parent, i);
2066 		if (tmp) {
2067 			if (tmp->flags & HAMMER2_CHAIN_DELETED) {
2068 				hammer2_chain_drop(tmp);
2069 				++i;
2070 				continue;
2071 			}
2072 			bref = &tmp->bref;
2073 		} else if (base == NULL || base[i].type == 0) {
2074 			++i;
2075 			continue;
2076 		} else {
2077 			bref = &base[i];
2078 		}
2079 		scan_beg = bref->key;
2080 		scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
2081 		if (tmp)
2082 			hammer2_chain_drop(tmp);
2083 		if (key_beg <= scan_end && key_end >= scan_beg)
2084 			break;
2085 		++i;
2086 	}
2087 
2088 	/*
2089 	 * If we couldn't find a match recurse up a parent to continue the
2090 	 * search.
2091 	 */
2092 	if (i == count)
2093 		goto again;
2094 
2095 	/*
2096 	 * Acquire the new chain element.  If the chain element is an
2097 	 * indirect block we must search recursively.
2098 	 */
2099 	chain = hammer2_chain_get(parent, i, flags);
2100 	if (chain == NULL)
2101 		return (NULL);
2102 
2103 	/*
2104 	 * If the chain element is an indirect block it becomes the new
2105 	 * parent and we loop on it.
2106 	 *
2107 	 * The parent always has to be locked with at least RESOLVE_MAYBE
2108 	 * so we can access its data.  It might need a fixup if the caller
2109 	 * passed incompatible flags.  Be careful not to cause a deadlock
2110 	 * as a data-load requires an exclusive lock.
2111 	 *
2112 	 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
2113 	 * range is within the requested key range we return the indirect
2114 	 * block and do NOT loop.  This is usually only used to acquire
2115 	 * freemap nodes.
2116 	 */
2117 	if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
2118 	    chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
2119 		if ((flags & HAMMER2_LOOKUP_MATCHIND) == 0 ||
2120 		    key_beg > scan_beg || key_end < scan_end) {
2121 			hammer2_chain_unlock(parent);
2122 			*parentp = parent = chain;
2123 			chain = NULL;
2124 			if (flags & HAMMER2_LOOKUP_NOLOCK) {
2125 				hammer2_chain_lock(parent,
2126 						   how_maybe |
2127 						   HAMMER2_RESOLVE_NOREF);
2128 			} else if ((flags & HAMMER2_LOOKUP_NODATA) &&
2129 				   parent->data == NULL) {
2130 				hammer2_chain_ref(parent);
2131 				hammer2_chain_unlock(parent);
2132 				hammer2_chain_lock(parent,
2133 						   how_maybe |
2134 						   HAMMER2_RESOLVE_NOREF);
2135 			}
2136 			i = 0;
2137 			goto again2;
2138 		}
2139 	}
2140 
2141 	/*
2142 	 * All done, return chain
2143 	 */
2144 	return (chain);
2145 }
2146 
2147 /*
2148  * Loop on parent's children, issuing the callback for each child.
2149  *
2150  * Uses LOOKUP flags.
2151  */
2152 int
2153 hammer2_chain_iterate(hammer2_chain_t *parent,
2154 		      int (*callback)(hammer2_chain_t *parent,
2155 				      hammer2_chain_t **chainp,
2156 				      void *arg),
2157 		      void *arg, int flags)
2158 {
2159 	hammer2_chain_t *chain;
2160 	hammer2_blockref_t *base;
2161 	int count;
2162 	int i;
2163 	int res;
2164 
2165 	/*
2166 	 * Scan the children (if any)
2167 	 */
2168 	res = 0;
2169 	i = 0;
2170 	for (;;) {
2171 		/*
2172 		 * Calculate the blockref array on each loop in order
2173 		 * to allow the callback to temporarily unlock/relock
2174 		 * the parent.
2175 		 */
2176 		switch(parent->bref.type) {
2177 		case HAMMER2_BREF_TYPE_INODE:
2178 			base = &parent->data->ipdata.u.blockset.blockref[0];
2179 			count = HAMMER2_SET_COUNT;
2180 			break;
2181 		case HAMMER2_BREF_TYPE_INDIRECT:
2182 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2183 			if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2184 				base = NULL;
2185 			} else {
2186 				KKASSERT(parent->data != NULL);
2187 				base = &parent->data->npdata[0];
2188 			}
2189 			count = parent->bytes / sizeof(hammer2_blockref_t);
2190 			break;
2191 		case HAMMER2_BREF_TYPE_VOLUME:
2192 			base = &parent->hmp->voldata.sroot_blockset.blockref[0];
2193 			count = HAMMER2_SET_COUNT;
2194 			break;
2195 		case HAMMER2_BREF_TYPE_FREEMAP:
2196 			base = &parent->hmp->voldata.freemap_blockset.blockref[0];
2197 			count = HAMMER2_SET_COUNT;
2198 			break;
2199 		default:
2200 			/*
2201 			 * The function allows calls on non-recursive
2202 			 * chains and will effectively be a nop() in that
2203 			 * case.
2204 			 */
2205 			base = NULL;
2206 			count = 0;
2207 			break;
2208 		}
2209 
2210 		/*
2211 		 * Loop termination
2212 		 */
2213 		if (i >= count)
2214 			break;
2215 
2216 		/*
2217 		 * Lookup the child, properly overloading any elements
2218 		 * held in memory.
2219 		 *
2220 		 * NOTE: Deleted elements cover any underlying base[] entry
2221 		 *	 (which might not have been zero'd out yet).
2222 		 *
2223 		 * NOTE: The fact that there can be multiple stacked
2224 		 *	 deleted elements at the same index is hidden
2225 		 *	 by hammer2_chain_find().
2226 		 */
2227 		chain = hammer2_chain_find(parent, i);
2228 		if (chain) {
2229 			if (chain->flags & HAMMER2_CHAIN_DELETED) {
2230 				hammer2_chain_drop(chain);
2231 				++i;
2232 				continue;
2233 			}
2234 		} else if (base == NULL || base[i].type == 0) {
2235 			++i;
2236 			continue;
2237 		}
2238 		if (chain)
2239 			hammer2_chain_drop(chain);
2240 		chain = hammer2_chain_get(parent, i, flags);
2241 		if (chain) {
2242 			res = callback(parent, &chain, arg);
2243 			if (chain) {
2244 				if (flags & HAMMER2_LOOKUP_NOLOCK)
2245 					hammer2_chain_drop(chain);
2246 				else
2247 					hammer2_chain_unlock(chain);
2248 			}
2249 			if (res < 0)
2250 				break;
2251 		}
2252 		++i;
2253 	}
2254 	return res;
2255 }
2256 
2257 /*
2258  * Create and return a new hammer2 system memory structure of the specified
2259  * key, type and size and insert it under (*parentp).  This is a full
2260  * insertion, based on the supplied key/keybits, and may involve creating
2261  * indirect blocks and moving other chains around via delete/duplicate.
2262  *
2263  * (*parentp) must be exclusive locked and may be replaced on return
2264  * depending on how much work the function had to do.
2265  *
2266  * (*chainp) usually starts out NULL and returns the newly created chain,
2267  * but if the caller desires the caller may allocate a disconnected chain
2268  * and pass it in instead.  (It is also possible for the caller to use
2269  * chain_duplicate() to create a disconnected chain, manipulate it, then
2270  * pass it into this function to insert it).
2271  *
2272  * This function should NOT be used to insert INDIRECT blocks.  It is
2273  * typically used to create/insert inodes and data blocks.
2274  *
2275  * Caller must pass-in an exclusively locked parent the new chain is to
2276  * be inserted under, and optionally pass-in a disconnected, exclusively
2277  * locked chain to insert (else we create a new chain).  The function will
2278  * adjust (*parentp) as necessary, create or connect the chain, and
2279  * return an exclusively locked chain in *chainp.
2280  */
2281 int
2282 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2283 		     hammer2_chain_t **chainp,
2284 		     hammer2_key_t key, int keybits, int type, size_t bytes)
2285 {
2286 	hammer2_mount_t *hmp;
2287 	hammer2_chain_t *chain;
2288 	hammer2_chain_t *child;
2289 	hammer2_chain_t *parent = *parentp;
2290 	hammer2_chain_core_t *above;
2291 	hammer2_blockref_t dummy;
2292 	hammer2_blockref_t *base;
2293 	int allocated = 0;
2294 	int error = 0;
2295 	int count;
2296 	int i;
2297 
2298 	above = parent->core;
2299 	KKASSERT(ccms_thread_lock_owned(&above->cst));
2300 	hmp = parent->hmp;
2301 	chain = *chainp;
2302 
2303 	if (chain == NULL) {
2304 		/*
2305 		 * First allocate media space and construct the dummy bref,
2306 		 * then allocate the in-memory chain structure.  Set the
2307 		 * INITIAL flag for fresh chains.
2308 		 */
2309 		bzero(&dummy, sizeof(dummy));
2310 		dummy.type = type;
2311 		dummy.key = key;
2312 		dummy.keybits = keybits;
2313 		dummy.data_off = hammer2_getradix(bytes);
2314 		dummy.methods = parent->bref.methods;
2315 		chain = hammer2_chain_alloc(hmp, trans, &dummy);
2316 		hammer2_chain_core_alloc(chain, NULL);
2317 
2318 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2319 
2320 		/*
2321 		 * Lock the chain manually, chain_lock will load the chain
2322 		 * which we do NOT want to do.  (note: chain->refs is set
2323 		 * to 1 by chain_alloc() for us, but lockcnt is not).
2324 		 */
2325 		chain->lockcnt = 1;
2326 		ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
2327 		allocated = 1;
2328 
2329 		/*
2330 		 * We do NOT set INITIAL here (yet).  INITIAL is only
2331 		 * used for indirect blocks.
2332 		 *
2333 		 * Recalculate bytes to reflect the actual media block
2334 		 * allocation.
2335 		 */
2336 		bytes = (hammer2_off_t)1 <<
2337 			(int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2338 		chain->bytes = bytes;
2339 
2340 		switch(type) {
2341 		case HAMMER2_BREF_TYPE_VOLUME:
2342 		case HAMMER2_BREF_TYPE_FREEMAP:
2343 			panic("hammer2_chain_create: called with volume type");
2344 			break;
2345 		case HAMMER2_BREF_TYPE_INODE:
2346 			KKASSERT(bytes == HAMMER2_INODE_BYTES);
2347 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2348 			chain->data = kmalloc(sizeof(chain->data->ipdata),
2349 					      hmp->mchain, M_WAITOK | M_ZERO);
2350 			break;
2351 		case HAMMER2_BREF_TYPE_INDIRECT:
2352 			panic("hammer2_chain_create: cannot be used to"
2353 			      "create indirect block");
2354 			break;
2355 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2356 			panic("hammer2_chain_create: cannot be used to"
2357 			      "create freemap root or node");
2358 			break;
2359 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2360 			KKASSERT(bytes == sizeof(chain->data->bmdata));
2361 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2362 			chain->data = kmalloc(sizeof(chain->data->bmdata),
2363 					      hmp->mchain, M_WAITOK | M_ZERO);
2364 			break;
2365 		case HAMMER2_BREF_TYPE_DATA:
2366 		default:
2367 			/* leave chain->data NULL */
2368 			KKASSERT(chain->data == NULL);
2369 			break;
2370 		}
2371 	} else {
2372 		/*
2373 		 * Potentially update the existing chain's key/keybits.
2374 		 *
2375 		 * Do NOT mess with the current state of the INITIAL flag.
2376 		 */
2377 		chain->bref.key = key;
2378 		chain->bref.keybits = keybits;
2379 		KKASSERT(chain->above == NULL);
2380 	}
2381 
2382 again:
2383 	above = parent->core;
2384 
2385 	/*
2386 	 * Locate a free blockref in the parent's array
2387 	 */
2388 	switch(parent->bref.type) {
2389 	case HAMMER2_BREF_TYPE_INODE:
2390 		KKASSERT((parent->data->ipdata.op_flags &
2391 			  HAMMER2_OPFLAG_DIRECTDATA) == 0);
2392 		KKASSERT(parent->data != NULL);
2393 		base = &parent->data->ipdata.u.blockset.blockref[0];
2394 		count = HAMMER2_SET_COUNT;
2395 		break;
2396 	case HAMMER2_BREF_TYPE_INDIRECT:
2397 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2398 		if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2399 			base = NULL;
2400 		} else {
2401 			KKASSERT(parent->data != NULL);
2402 			base = &parent->data->npdata[0];
2403 		}
2404 		count = parent->bytes / sizeof(hammer2_blockref_t);
2405 		break;
2406 	case HAMMER2_BREF_TYPE_VOLUME:
2407 		KKASSERT(parent->data != NULL);
2408 		base = &hmp->voldata.sroot_blockset.blockref[0];
2409 		count = HAMMER2_SET_COUNT;
2410 		break;
2411 	case HAMMER2_BREF_TYPE_FREEMAP:
2412 		KKASSERT(parent->data != NULL);
2413 		base = &hmp->voldata.freemap_blockset.blockref[0];
2414 		count = HAMMER2_SET_COUNT;
2415 		break;
2416 	default:
2417 		panic("hammer2_chain_create: unrecognized blockref type: %d",
2418 		      parent->bref.type);
2419 		count = 0;
2420 		break;
2421 	}
2422 
2423 	/*
2424 	 * Scan for an unallocated bref, also skipping any slots occupied
2425 	 * by in-memory chain elements that may not yet have been updated
2426 	 * in the parent's bref array.
2427 	 *
2428 	 * We don't have to hold the spinlock to save an empty slot as
2429 	 * new slots can only transition from empty if the parent is
2430 	 * locked exclusively.
2431 	 */
2432 	spin_lock(&above->cst.spin);
2433 	for (i = 0; i < count; ++i) {
2434 		child = hammer2_chain_find_locked(parent, i);
2435 		if (child) {
2436 			if (child->flags & HAMMER2_CHAIN_DELETED)
2437 				break;
2438 			continue;
2439 		}
2440 		if (base == NULL)
2441 			break;
2442 		if (base[i].type == 0)
2443 			break;
2444 	}
2445 	spin_unlock(&above->cst.spin);
2446 
2447 	/*
2448 	 * If no free blockref could be found we must create an indirect
2449 	 * block and move a number of blockrefs into it.  With the parent
2450 	 * locked we can safely lock each child in order to move it without
2451 	 * causing a deadlock.
2452 	 *
2453 	 * This may return the new indirect block or the old parent depending
2454 	 * on where the key falls.  NULL is returned on error.
2455 	 */
2456 	if (i == count) {
2457 		hammer2_chain_t *nparent;
2458 
2459 		nparent = hammer2_chain_create_indirect(trans, parent,
2460 							key, keybits,
2461 							type, &error);
2462 		if (nparent == NULL) {
2463 			if (allocated)
2464 				hammer2_chain_drop(chain);
2465 			chain = NULL;
2466 			goto done;
2467 		}
2468 		if (parent != nparent) {
2469 			hammer2_chain_unlock(parent);
2470 			parent = *parentp = nparent;
2471 		}
2472 		goto again;
2473 	}
2474 
2475 	/*
2476 	 * Link the chain into its parent.  Later on we will have to set
2477 	 * the MOVED bit in situations where we don't mark the new chain
2478 	 * as being modified.
2479 	 */
2480 	if (chain->above != NULL)
2481 		panic("hammer2: hammer2_chain_create: chain already connected");
2482 	KKASSERT(chain->above == NULL);
2483 	KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2484 
2485 	chain->above = above;
2486 	chain->index = i;
2487 	spin_lock(&above->cst.spin);
2488 	if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain))
2489 		panic("hammer2_chain_create: collision");
2490 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2491 	spin_unlock(&above->cst.spin);
2492 
2493 	if (allocated) {
2494 		/*
2495 		 * Mark the newly created chain modified.
2496 		 *
2497 		 * Device buffers are not instantiated for DATA elements
2498 		 * as these are handled by logical buffers.
2499 		 *
2500 		 * Indirect and freemap node indirect blocks are handled
2501 		 * by hammer2_chain_create_indirect() and not by this
2502 		 * function.
2503 		 *
2504 		 * Data for all other bref types is expected to be
2505 		 * instantiated (INODE, LEAF).
2506 		 */
2507 		switch(chain->bref.type) {
2508 		case HAMMER2_BREF_TYPE_DATA:
2509 			hammer2_chain_modify(trans, &chain,
2510 					     HAMMER2_MODIFY_OPTDATA |
2511 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2512 			break;
2513 		case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2514 		case HAMMER2_BREF_TYPE_INODE:
2515 			hammer2_chain_modify(trans, &chain,
2516 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2517 			break;
2518 		default:
2519 			/*
2520 			 * Remaining types are not supported by this function.
2521 			 * In particular, INDIRECT and LEAF_NODE types are
2522 			 * handled by create_indirect().
2523 			 */
2524 			panic("hammer2_chain_create: bad type: %d",
2525 			      chain->bref.type);
2526 			/* NOT REACHED */
2527 			break;
2528 		}
2529 	} else {
2530 		/*
2531 		 * When reconnecting a chain we must set MOVED and setsubmod
2532 		 * so the flush recognizes that it must update the bref in
2533 		 * the parent.
2534 		 */
2535 		if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2536 			hammer2_chain_ref(chain);
2537 			atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2538 		}
2539 		hammer2_chain_setsubmod(trans, chain);
2540 	}
2541 
2542 done:
2543 	*chainp = chain;
2544 
2545 	return (error);
2546 }
2547 
2548 /*
2549  * Replace (*chainp) with a duplicate.  The original *chainp is unlocked
2550  * and the replacement will be returned locked.  Both the original and the
2551  * new chain will share the same RBTREE (have the same chain->core), with
2552  * the new chain becoming the 'current' chain (meaning it is the first in
2553  * the linked list at core->chain_first).
2554  *
2555  * If (parent, i) then the new duplicated chain is inserted under the parent
2556  * at the specified index (the parent must not have a ref at that index).
2557  *
2558  * If (NULL, -1) then the new duplicated chain is not inserted anywhere,
2559  * similar to if it had just been chain_alloc()'d (suitable for passing into
2560  * hammer2_chain_create() after this function returns).
2561  *
2562  * NOTE! Duplication is used in order to retain the original topology to
2563  *	 support flush synchronization points.  Both the original and the
2564  *	 new chain will have the same transaction id and thus the operation
2565  *	 appears atomic w/regards to media flushes.
2566  */
2567 static void hammer2_chain_dup_fixup(hammer2_chain_t *ochain,
2568 				    hammer2_chain_t *nchain);
2569 
2570 void
2571 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t *parent, int i,
2572 			hammer2_chain_t **chainp, hammer2_blockref_t *bref)
2573 {
2574 	hammer2_mount_t *hmp;
2575 	hammer2_blockref_t *base;
2576 	hammer2_chain_t *ochain;
2577 	hammer2_chain_t *nchain;
2578 	hammer2_chain_t *scan;
2579 	hammer2_chain_core_t *above;
2580 	size_t bytes;
2581 	int count;
2582 	int oflags;
2583 	void *odata;
2584 
2585 	/*
2586 	 * First create a duplicate of the chain structure, associating
2587 	 * it with the same core, making it the same size, pointing it
2588 	 * to the same bref (the same media block).
2589 	 */
2590 	ochain = *chainp;
2591 	hmp = ochain->hmp;
2592 	if (bref == NULL)
2593 		bref = &ochain->bref;
2594 	nchain = hammer2_chain_alloc(hmp, trans, bref);
2595 	hammer2_chain_core_alloc(nchain, ochain->core);
2596 	bytes = (hammer2_off_t)1 <<
2597 		(int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2598 	nchain->bytes = bytes;
2599 	nchain->modify_tid = ochain->modify_tid;
2600 
2601 	hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2602 	hammer2_chain_dup_fixup(ochain, nchain);
2603 
2604 	/*
2605 	 * If parent is not NULL, insert into the parent at the requested
2606 	 * index.  The newly duplicated chain must be marked MOVED and
2607 	 * SUBMODIFIED set in its parent(s).
2608 	 *
2609 	 * Having both chains locked is extremely important for atomicy.
2610 	 */
2611 	if (parent) {
2612 		/*
2613 		 * Locate a free blockref in the parent's array
2614 		 */
2615 		above = parent->core;
2616 		KKASSERT(ccms_thread_lock_owned(&above->cst));
2617 
2618 		switch(parent->bref.type) {
2619 		case HAMMER2_BREF_TYPE_INODE:
2620 			KKASSERT((parent->data->ipdata.op_flags &
2621 				  HAMMER2_OPFLAG_DIRECTDATA) == 0);
2622 			KKASSERT(parent->data != NULL);
2623 			base = &parent->data->ipdata.u.blockset.blockref[0];
2624 			count = HAMMER2_SET_COUNT;
2625 			break;
2626 		case HAMMER2_BREF_TYPE_INDIRECT:
2627 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2628 			if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2629 				base = NULL;
2630 			} else {
2631 				KKASSERT(parent->data != NULL);
2632 				base = &parent->data->npdata[0];
2633 			}
2634 			count = parent->bytes / sizeof(hammer2_blockref_t);
2635 			break;
2636 		case HAMMER2_BREF_TYPE_VOLUME:
2637 			KKASSERT(parent->data != NULL);
2638 			base = &hmp->voldata.sroot_blockset.blockref[0];
2639 			count = HAMMER2_SET_COUNT;
2640 			break;
2641 		case HAMMER2_BREF_TYPE_FREEMAP:
2642 			KKASSERT(parent->data != NULL);
2643 			base = &hmp->voldata.freemap_blockset.blockref[0];
2644 			count = HAMMER2_SET_COUNT;
2645 			break;
2646 		default:
2647 			panic("hammer2_chain_create: unrecognized "
2648 			      "blockref type: %d",
2649 			      parent->bref.type);
2650 			count = 0;
2651 			break;
2652 		}
2653 		KKASSERT(i >= 0 && i < count);
2654 
2655 		KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2656 		KKASSERT(parent->refs > 0);
2657 
2658 		spin_lock(&above->cst.spin);
2659 		nchain->above = above;
2660 		nchain->index = i;
2661 		scan = hammer2_chain_find_locked(parent, i);
2662 		KKASSERT(base == NULL || base[i].type == 0 ||
2663 			 scan == NULL ||
2664 			 (scan->flags & HAMMER2_CHAIN_DELETED));
2665 		if (RB_INSERT(hammer2_chain_tree, &above->rbtree,
2666 			      nchain)) {
2667 			panic("hammer2_chain_duplicate: collision");
2668 		}
2669 		atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2670 		spin_unlock(&above->cst.spin);
2671 
2672 		if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2673 			hammer2_chain_ref(nchain);
2674 			atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2675 		}
2676 		hammer2_chain_setsubmod(trans, nchain);
2677 	}
2678 
2679 	/*
2680 	 * We have to unlock ochain to flush any dirty data, asserting the
2681 	 * case (data == NULL) to catch any extra locks that might have been
2682 	 * present, then transfer state to nchain.
2683 	 */
2684 	oflags = ochain->flags;
2685 	odata = ochain->data;
2686 	hammer2_chain_unlock(ochain);
2687 	KKASSERT((ochain->flags & HAMMER2_CHAIN_EMBEDDED) ||
2688 		 ochain->data == NULL);
2689 
2690 	if (oflags & HAMMER2_CHAIN_INITIAL)
2691 		atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2692 
2693 	/*
2694 	 * WARNING!  We should never resolve DATA to device buffers
2695 	 *	     (XXX allow it if the caller did?), and since
2696 	 *	     we currently do not have the logical buffer cache
2697 	 *	     buffer in-hand to fix its cached physical offset
2698 	 *	     we also force the modify code to not COW it. XXX
2699 	 */
2700 	if (oflags & HAMMER2_CHAIN_MODIFIED) {
2701 		if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2702 			hammer2_chain_modify(trans, &nchain,
2703 					     HAMMER2_MODIFY_OPTDATA |
2704 					     HAMMER2_MODIFY_NOREALLOC |
2705 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2706 		} else if (oflags & HAMMER2_CHAIN_INITIAL) {
2707 			hammer2_chain_modify(trans, &nchain,
2708 					     HAMMER2_MODIFY_OPTDATA |
2709 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2710 		} else {
2711 			hammer2_chain_modify(trans, &nchain,
2712 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2713 		}
2714 		hammer2_chain_drop(nchain);
2715 	} else {
2716 		if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2717 			hammer2_chain_drop(nchain);
2718 		} else if (oflags & HAMMER2_CHAIN_INITIAL) {
2719 			hammer2_chain_drop(nchain);
2720 		} else {
2721 			hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS |
2722 						   HAMMER2_RESOLVE_NOREF);
2723 			hammer2_chain_unlock(nchain);
2724 		}
2725 	}
2726 	atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2727 	*chainp = nchain;
2728 }
2729 
2730 #if 0
2731 		/*
2732 		 * When the chain is in the INITIAL state we must still
2733 		 * ensure that a block has been assigned so MOVED processing
2734 		 * works as expected.
2735 		 */
2736 		KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2737 		hammer2_chain_modify(trans, &nchain,
2738 				     HAMMER2_MODIFY_OPTDATA |
2739 				     HAMMER2_MODIFY_ASSERTNOCOPY);
2740 
2741 
2742 	hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE |
2743 				   HAMMER2_RESOLVE_NOREF); /* eat excess ref */
2744 	hammer2_chain_unlock(nchain);
2745 #endif
2746 
2747 /*
2748  * Special in-place delete-duplicate sequence which does not require a
2749  * locked parent.  (*chainp) is marked DELETED and atomically replaced
2750  * with a duplicate.  Atomicy is at the very-fine spin-lock level in
2751  * order to ensure that lookups do not race us.
2752  */
2753 void
2754 hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp,
2755 			       int flags)
2756 {
2757 	hammer2_mount_t *hmp;
2758 	hammer2_chain_t *ochain;
2759 	hammer2_chain_t *nchain;
2760 	hammer2_chain_core_t *above;
2761 	size_t bytes;
2762 	int oflags;
2763 	void *odata;
2764 
2765 	/*
2766 	 * First create a duplicate of the chain structure
2767 	 */
2768 	ochain = *chainp;
2769 	hmp = ochain->hmp;
2770 	nchain = hammer2_chain_alloc(hmp, trans, &ochain->bref);    /* 1 ref */
2771 	if (flags & HAMMER2_DELDUP_RECORE)
2772 		hammer2_chain_core_alloc(nchain, NULL);
2773 	else
2774 		hammer2_chain_core_alloc(nchain, ochain->core);
2775 	above = ochain->above;
2776 
2777 	bytes = (hammer2_off_t)1 <<
2778 		(int)(ochain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2779 	nchain->bytes = bytes;
2780 	nchain->modify_tid = ochain->modify_tid;
2781 	nchain->data_count += ochain->data_count;
2782 	nchain->inode_count += ochain->inode_count;
2783 
2784 	/*
2785 	 * Lock nchain and insert into ochain's core hierarchy, marking
2786 	 * ochain DELETED at the same time.  Having both chains locked
2787 	 * is extremely important for atomicy.
2788 	 */
2789 	hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2790 	hammer2_chain_dup_fixup(ochain, nchain);
2791 	/* extra ref still present from original allocation */
2792 
2793 	nchain->index = ochain->index;
2794 
2795 	spin_lock(&above->cst.spin);
2796 	atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2797 	ochain->delete_tid = trans->sync_tid;
2798 	nchain->above = above;
2799 	atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED);
2800 	if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2801 		hammer2_chain_ref(ochain);
2802 		atomic_set_int(&ochain->flags, HAMMER2_CHAIN_MOVED);
2803 	}
2804 	if (RB_INSERT(hammer2_chain_tree, &above->rbtree, nchain)) {
2805 		panic("hammer2_chain_delete_duplicate: collision");
2806 	}
2807 	spin_unlock(&above->cst.spin);
2808 
2809 	/*
2810 	 * We have to unlock ochain to flush any dirty data, asserting the
2811 	 * case (data == NULL) to catch any extra locks that might have been
2812 	 * present, then transfer state to nchain.
2813 	 */
2814 	oflags = ochain->flags;
2815 	odata = ochain->data;
2816 	hammer2_chain_unlock(ochain);	/* replacing ochain */
2817 	KKASSERT(ochain->bref.type == HAMMER2_BREF_TYPE_INODE ||
2818 		 ochain->data == NULL);
2819 
2820 	if (oflags & HAMMER2_CHAIN_INITIAL)
2821 		atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2822 
2823 	/*
2824 	 * WARNING!  We should never resolve DATA to device buffers
2825 	 *	     (XXX allow it if the caller did?), and since
2826 	 *	     we currently do not have the logical buffer cache
2827 	 *	     buffer in-hand to fix its cached physical offset
2828 	 *	     we also force the modify code to not COW it. XXX
2829 	 */
2830 	if (oflags & HAMMER2_CHAIN_MODIFIED) {
2831 		if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2832 			hammer2_chain_modify(trans, &nchain,
2833 					     HAMMER2_MODIFY_OPTDATA |
2834 					     HAMMER2_MODIFY_NOREALLOC |
2835 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2836 		} else if (oflags & HAMMER2_CHAIN_INITIAL) {
2837 			hammer2_chain_modify(trans, &nchain,
2838 					     HAMMER2_MODIFY_OPTDATA |
2839 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2840 		} else {
2841 			hammer2_chain_modify(trans, &nchain,
2842 					     HAMMER2_MODIFY_ASSERTNOCOPY);
2843 		}
2844 		hammer2_chain_drop(nchain);
2845 	} else {
2846 		if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2847 			hammer2_chain_drop(nchain);
2848 		} else if (oflags & HAMMER2_CHAIN_INITIAL) {
2849 			hammer2_chain_drop(nchain);
2850 		} else {
2851 			hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS |
2852 						   HAMMER2_RESOLVE_NOREF);
2853 			hammer2_chain_unlock(nchain);
2854 		}
2855 	}
2856 
2857 	/*
2858 	 * Unconditionally set the MOVED and SUBMODIFIED bit to force
2859 	 * update of parent bref and indirect blockrefs during flush.
2860 	 */
2861 	if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2862 		atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2863 		hammer2_chain_ref(nchain);
2864 	}
2865 	atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2866 	hammer2_chain_setsubmod(trans, nchain);
2867 	*chainp = nchain;
2868 }
2869 
2870 /*
2871  * Helper function to fixup inodes.  The caller procedure stack may hold
2872  * multiple locks on ochain if it represents an inode, preventing our
2873  * unlock from retiring its state to the buffer cache.
2874  *
2875  * In this situation any attempt to access the buffer cache could result
2876  * either in stale data or a deadlock.  Work around the problem by copying
2877  * the embedded data directly.
2878  */
2879 static
2880 void
2881 hammer2_chain_dup_fixup(hammer2_chain_t *ochain, hammer2_chain_t *nchain)
2882 {
2883 	if (ochain->data == NULL)
2884 		return;
2885 	switch(ochain->bref.type) {
2886 	case HAMMER2_BREF_TYPE_INODE:
2887 		KKASSERT(nchain->data == NULL);
2888 		atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2889 		nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2890 				       ochain->hmp->mchain, M_WAITOK | M_ZERO);
2891 		nchain->data->ipdata = ochain->data->ipdata;
2892 		break;
2893 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2894 		KKASSERT(nchain->data == NULL);
2895 		atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2896 		nchain->data = kmalloc(sizeof(nchain->data->bmdata),
2897 				       ochain->hmp->mchain, M_WAITOK | M_ZERO);
2898 		bcopy(ochain->data->bmdata,
2899 		      nchain->data->bmdata,
2900 		      sizeof(nchain->data->bmdata));
2901 		break;
2902 	default:
2903 		break;
2904 	}
2905 }
2906 
2907 /*
2908  * Create a snapshot of the specified {parent, chain} with the specified
2909  * label.
2910  *
2911  * (a) We create a duplicate connected to the super-root as the specified
2912  *     label.
2913  *
2914  * (b) We issue a restricted flush using the current transaction on the
2915  *     duplicate.
2916  *
2917  * (c) We disconnect and reallocate the duplicate's core.
2918  */
2919 int
2920 hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_inode_t *ip,
2921 		       hammer2_ioc_pfs_t *pfs)
2922 {
2923 	hammer2_cluster_t *cluster;
2924 	hammer2_mount_t *hmp;
2925 	hammer2_chain_t *chain;
2926 	hammer2_chain_t *nchain;
2927 	hammer2_chain_t *parent;
2928 	hammer2_inode_data_t *ipdata;
2929 	size_t name_len;
2930 	hammer2_key_t lhc;
2931 	int error;
2932 
2933 	name_len = strlen(pfs->name);
2934 	lhc = hammer2_dirhash(pfs->name, name_len);
2935 	cluster = ip->pmp->mount_cluster;
2936 	hmp = ip->chain->hmp;
2937 	KKASSERT(hmp == cluster->hmp);	/* XXX */
2938 
2939 	/*
2940 	 * Create disconnected duplicate
2941 	 */
2942 	KKASSERT((trans->flags & HAMMER2_TRANS_RESTRICTED) == 0);
2943 	nchain = ip->chain;
2944 	hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE);
2945 	hammer2_chain_duplicate(trans, NULL, -1, &nchain, NULL);
2946 	atomic_set_int(&nchain->flags, HAMMER2_CHAIN_RECYCLE |
2947 				       HAMMER2_CHAIN_SNAPSHOT);
2948 
2949 	/*
2950 	 * Create named entry in the super-root.
2951 	 */
2952         parent = hammer2_chain_lookup_init(hmp->schain, 0);
2953 	error = 0;
2954 	while (error == 0) {
2955 		chain = hammer2_chain_lookup(&parent, lhc, lhc, 0);
2956 		if (chain == NULL)
2957 			break;
2958 		if ((lhc & HAMMER2_DIRHASH_LOMASK) == HAMMER2_DIRHASH_LOMASK)
2959 			error = ENOSPC;
2960 		hammer2_chain_unlock(chain);
2961 		chain = NULL;
2962 		++lhc;
2963 	}
2964 	hammer2_chain_create(trans, &parent, &nchain, lhc, 0,
2965 			     HAMMER2_BREF_TYPE_INODE,
2966 			     HAMMER2_INODE_BYTES);
2967 	hammer2_chain_modify(trans, &nchain, HAMMER2_MODIFY_ASSERTNOCOPY);
2968 	hammer2_chain_lookup_done(parent);
2969 	parent = NULL;	/* safety */
2970 
2971 	/*
2972 	 * Name fixup
2973 	 */
2974 	ipdata = &nchain->data->ipdata;
2975 	ipdata->name_key = lhc;
2976 	ipdata->name_len = name_len;
2977 	ksnprintf(ipdata->filename, sizeof(ipdata->filename), "%s", pfs->name);
2978 
2979 	/*
2980 	 * Set PFS type, generate a unique filesystem id, and generate
2981 	 * a cluster id.  Use the same clid when snapshotting a PFS root,
2982 	 * which theoretically allows the snapshot to be used as part of
2983 	 * the same cluster (perhaps as a cache).
2984 	 */
2985 	ipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT;
2986 	kern_uuidgen(&ipdata->pfs_fsid, 1);
2987 	if (ip->chain == cluster->rchain)
2988 		ipdata->pfs_clid = ip->chain->data->ipdata.pfs_clid;
2989 	else
2990 		kern_uuidgen(&ipdata->pfs_clid, 1);
2991 
2992 	/*
2993 	 * Issue a restricted flush of the snapshot.  This is a synchronous
2994 	 * operation.
2995 	 */
2996 	trans->flags |= HAMMER2_TRANS_RESTRICTED;
2997 	kprintf("SNAPSHOTA\n");
2998 	tsleep(trans, 0, "snapslp", hz*4);
2999 	kprintf("SNAPSHOTB\n");
3000 	hammer2_chain_flush(trans, nchain);
3001 	trans->flags &= ~HAMMER2_TRANS_RESTRICTED;
3002 
3003 #if 0
3004 	/*
3005 	 * Remove the link b/c nchain is a snapshot and snapshots don't
3006 	 * follow CHAIN_DELETED semantics ?
3007 	 */
3008 	chain = ip->chain;
3009 
3010 
3011 	KKASSERT(chain->duplink == nchain);
3012 	KKASSERT(chain->core == nchain->core);
3013 	KKASSERT(nchain->refs >= 2);
3014 	chain->duplink = nchain->duplink;
3015 	atomic_clear_int(&nchain->flags, HAMMER2_CHAIN_DUPTARGET);
3016 	hammer2_chain_drop(nchain);
3017 #endif
3018 
3019 	kprintf("snapshot %s nchain->refs %d nchain->flags %08x\n",
3020 		pfs->name, nchain->refs, nchain->flags);
3021 	hammer2_chain_unlock(nchain);
3022 
3023 	return (error);
3024 }
3025 
3026 /*
3027  * Create an indirect block that covers one or more of the elements in the
3028  * current parent.  Either returns the existing parent with no locking or
3029  * ref changes or returns the new indirect block locked and referenced
3030  * and leaving the original parent lock/ref intact as well.
3031  *
3032  * If an error occurs, NULL is returned and *errorp is set to the error.
3033  *
3034  * The returned chain depends on where the specified key falls.
3035  *
3036  * The key/keybits for the indirect mode only needs to follow three rules:
3037  *
3038  * (1) That all elements underneath it fit within its key space and
3039  *
3040  * (2) That all elements outside it are outside its key space.
3041  *
3042  * (3) When creating the new indirect block any elements in the current
3043  *     parent that fit within the new indirect block's keyspace must be
3044  *     moved into the new indirect block.
3045  *
3046  * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
3047  *     keyspace the the current parent, but lookup/iteration rules will
3048  *     ensure (and must ensure) that rule (2) for all parents leading up
3049  *     to the nearest inode or the root volume header is adhered to.  This
3050  *     is accomplished by always recursing through matching keyspaces in
3051  *     the hammer2_chain_lookup() and hammer2_chain_next() API.
3052  *
3053  * The current implementation calculates the current worst-case keyspace by
3054  * iterating the current parent and then divides it into two halves, choosing
3055  * whichever half has the most elements (not necessarily the half containing
3056  * the requested key).
3057  *
3058  * We can also opt to use the half with the least number of elements.  This
3059  * causes lower-numbered keys (aka logical file offsets) to recurse through
3060  * fewer indirect blocks and higher-numbered keys to recurse through more.
3061  * This also has the risk of not moving enough elements to the new indirect
3062  * block and being forced to create several indirect blocks before the element
3063  * can be inserted.
3064  *
3065  * Must be called with an exclusively locked parent.
3066  */
3067 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
3068 				hammer2_key_t *keyp, int keybits,
3069 				hammer2_blockref_t *base, int count);
3070 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
3071 				hammer2_key_t *keyp, int keybits,
3072 				hammer2_blockref_t *base, int count);
3073 static
3074 hammer2_chain_t *
3075 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
3076 			      hammer2_key_t create_key, int create_bits,
3077 			      int for_type, int *errorp)
3078 {
3079 	hammer2_mount_t *hmp;
3080 	hammer2_chain_core_t *above;
3081 	hammer2_chain_core_t *icore;
3082 	hammer2_blockref_t *base;
3083 	hammer2_blockref_t *bref;
3084 	hammer2_chain_t *chain;
3085 	hammer2_chain_t *child;
3086 	hammer2_chain_t *ichain;
3087 	hammer2_chain_t dummy;
3088 	hammer2_key_t key = create_key;
3089 	int keybits = create_bits;
3090 	int count;
3091 	int nbytes;
3092 	int i;
3093 
3094 	/*
3095 	 * Calculate the base blockref pointer or NULL if the chain
3096 	 * is known to be empty.  We need to calculate the array count
3097 	 * for RB lookups either way.
3098 	 */
3099 	hmp = parent->hmp;
3100 	*errorp = 0;
3101 	KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
3102 	above = parent->core;
3103 
3104 	/*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
3105 	if (parent->flags & HAMMER2_CHAIN_INITIAL) {
3106 		base = NULL;
3107 
3108 		switch(parent->bref.type) {
3109 		case HAMMER2_BREF_TYPE_INODE:
3110 			count = HAMMER2_SET_COUNT;
3111 			break;
3112 		case HAMMER2_BREF_TYPE_INDIRECT:
3113 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3114 			count = parent->bytes / sizeof(hammer2_blockref_t);
3115 			break;
3116 		case HAMMER2_BREF_TYPE_VOLUME:
3117 			count = HAMMER2_SET_COUNT;
3118 			break;
3119 		case HAMMER2_BREF_TYPE_FREEMAP:
3120 			count = HAMMER2_SET_COUNT;
3121 			break;
3122 		default:
3123 			panic("hammer2_chain_create_indirect: "
3124 			      "unrecognized blockref type: %d",
3125 			      parent->bref.type);
3126 			count = 0;
3127 			break;
3128 		}
3129 	} else {
3130 		switch(parent->bref.type) {
3131 		case HAMMER2_BREF_TYPE_INODE:
3132 			base = &parent->data->ipdata.u.blockset.blockref[0];
3133 			count = HAMMER2_SET_COUNT;
3134 			break;
3135 		case HAMMER2_BREF_TYPE_INDIRECT:
3136 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3137 			base = &parent->data->npdata[0];
3138 			count = parent->bytes / sizeof(hammer2_blockref_t);
3139 			break;
3140 		case HAMMER2_BREF_TYPE_VOLUME:
3141 			base = &hmp->voldata.sroot_blockset.blockref[0];
3142 			count = HAMMER2_SET_COUNT;
3143 			break;
3144 		case HAMMER2_BREF_TYPE_FREEMAP:
3145 			base = &hmp->voldata.freemap_blockset.blockref[0];
3146 			count = HAMMER2_SET_COUNT;
3147 			break;
3148 		default:
3149 			panic("hammer2_chain_create_indirect: "
3150 			      "unrecognized blockref type: %d",
3151 			      parent->bref.type);
3152 			count = 0;
3153 			break;
3154 		}
3155 	}
3156 
3157 	/*
3158 	 * dummy used in later chain allocation (no longer used for lookups).
3159 	 */
3160 	bzero(&dummy, sizeof(dummy));
3161 	dummy.delete_tid = HAMMER2_MAX_TID;
3162 
3163 	/*
3164 	 * When creating an indirect block for a freemap node or leaf
3165 	 * the key/keybits must be fitted to static radix levels because
3166 	 * particular radix levels use particular reserved blocks in the
3167 	 * related zone.
3168 	 *
3169 	 * This routine calculates the key/radix of the indirect block
3170 	 * we need to create, and whether it is on the high-side or the
3171 	 * low-side.
3172 	 */
3173 	if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3174 	    for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3175 		keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
3176 						       base, count);
3177 	} else {
3178 		keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
3179 						      base, count);
3180 	}
3181 
3182 	/*
3183 	 * Normalize the key for the radix being represented, keeping the
3184 	 * high bits and throwing away the low bits.
3185 	 */
3186 	key &= ~(((hammer2_key_t)1 << keybits) - 1);
3187 
3188 	/*
3189 	 * How big should our new indirect block be?  It has to be at least
3190 	 * as large as its parent.
3191 	 */
3192 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
3193 		nbytes = HAMMER2_IND_BYTES_MIN;
3194 	else
3195 		nbytes = HAMMER2_IND_BYTES_MAX;
3196 	if (nbytes < count * sizeof(hammer2_blockref_t))
3197 		nbytes = count * sizeof(hammer2_blockref_t);
3198 
3199 	/*
3200 	 * Ok, create our new indirect block
3201 	 */
3202 	if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
3203 	    for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
3204 		dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
3205 	} else {
3206 		dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
3207 	}
3208 	dummy.bref.key = key;
3209 	dummy.bref.keybits = keybits;
3210 	dummy.bref.data_off = hammer2_getradix(nbytes);
3211 	dummy.bref.methods = parent->bref.methods;
3212 
3213 	ichain = hammer2_chain_alloc(hmp, trans, &dummy.bref);
3214 	atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
3215 	hammer2_chain_core_alloc(ichain, NULL);
3216 	icore = ichain->core;
3217 	hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
3218 	hammer2_chain_drop(ichain);	/* excess ref from alloc */
3219 
3220 	/*
3221 	 * We have to mark it modified to allocate its block, but use
3222 	 * OPTDATA to allow it to remain in the INITIAL state.  Otherwise
3223 	 * it won't be acted upon by the flush code.
3224 	 *
3225 	 * XXX leave the node unmodified, depend on the SUBMODIFIED
3226 	 * flush to assign and modify parent blocks.
3227 	 */
3228 	hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);
3229 
3230 	/*
3231 	 * Iterate the original parent and move the matching brefs into
3232 	 * the new indirect block.
3233 	 *
3234 	 * At the same time locate an empty slot (or what will become an
3235 	 * empty slot) and assign the new indirect block to that slot.
3236 	 *
3237 	 * XXX handle flushes.
3238 	 */
3239 	spin_lock(&above->cst.spin);
3240 	for (i = 0; i < count; ++i) {
3241 		/*
3242 		 * For keying purposes access the bref from the media or
3243 		 * from our in-memory cache.  In cases where the in-memory
3244 		 * cache overrides the media the keyrefs will be the same
3245 		 * anyway so we can avoid checking the cache when the media
3246 		 * has a key.
3247 		 */
3248 		child = hammer2_chain_find_locked(parent, i);
3249 		if (child) {
3250 			if (child->flags & HAMMER2_CHAIN_DELETED) {
3251 				if (ichain->index < 0)
3252 					ichain->index = i;
3253 				continue;
3254 			}
3255 			bref = &child->bref;
3256 		} else if (base && base[i].type) {
3257 			bref = &base[i];
3258 		} else {
3259 			if (ichain->index < 0)
3260 				ichain->index = i;
3261 			continue;
3262 		}
3263 
3264 		/*
3265 		 * Skip keys that are not within the key/radix of the new
3266 		 * indirect block.  They stay in the parent.
3267 		 */
3268 		if ((~(((hammer2_key_t)1 << keybits) - 1) &
3269 		    (key ^ bref->key)) != 0) {
3270 			continue;
3271 		}
3272 
3273 		/*
3274 		 * This element is being moved from the parent, its slot
3275 		 * is available for our new indirect block.
3276 		 */
3277 		if (ichain->index < 0)
3278 			ichain->index = i;
3279 
3280 		/*
3281 		 * Load the new indirect block by acquiring or allocating
3282 		 * the related chain entries, then move them to the new
3283 		 * parent (ichain) by deleting them from their old location
3284 		 * and inserting a duplicate of the chain and any modified
3285 		 * sub-chain in the new location.
3286 		 *
3287 		 * We must set MOVED in the chain being duplicated and
3288 		 * SUBMODIFIED in the parent(s) so the flush code knows
3289 		 * what is going on.  The latter is done after the loop.
3290 		 *
3291 		 * WARNING! above->cst.spin must be held when parent is
3292 		 *	    modified, even though we own the full blown lock,
3293 		 *	    to deal with setsubmod and rename races.
3294 		 *	    (XXX remove this req).
3295 		 */
3296 		spin_unlock(&above->cst.spin);
3297 		chain = hammer2_chain_get(parent, i, HAMMER2_LOOKUP_NODATA);
3298 		hammer2_chain_delete(trans, chain, HAMMER2_DELETE_WILLDUP);
3299 		hammer2_chain_duplicate(trans, ichain, i, &chain, NULL);
3300 		hammer2_chain_unlock(chain);
3301 		KKASSERT(parent->refs > 0);
3302 		chain = NULL;
3303 		spin_lock(&above->cst.spin);
3304 	}
3305 	spin_unlock(&above->cst.spin);
3306 
3307 	/*
3308 	 * Insert the new indirect block into the parent now that we've
3309 	 * cleared out some entries in the parent.  We calculated a good
3310 	 * insertion index in the loop above (ichain->index).
3311 	 *
3312 	 * We don't have to set MOVED here because we mark ichain modified
3313 	 * down below (so the normal modified -> flush -> set-moved sequence
3314 	 * applies).
3315 	 *
3316 	 * The insertion shouldn't race as this is a completely new block
3317 	 * and the parent is locked.
3318 	 */
3319 	if (ichain->index < 0)
3320 		kprintf("indirect parent %p count %d key %016jx/%d\n",
3321 			parent, count, (intmax_t)key, keybits);
3322 	KKASSERT(ichain->index >= 0);
3323 	KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3324 	spin_lock(&above->cst.spin);
3325 	if (RB_INSERT(hammer2_chain_tree, &above->rbtree, ichain))
3326 		panic("hammer2_chain_create_indirect: ichain insertion");
3327 	atomic_set_int(&ichain->flags, HAMMER2_CHAIN_ONRBTREE);
3328 	ichain->above = above;
3329 	spin_unlock(&above->cst.spin);
3330 
3331 	/*
3332 	 * Mark the new indirect block modified after insertion, which
3333 	 * will propagate up through parent all the way to the root and
3334 	 * also allocate the physical block in ichain for our caller,
3335 	 * and assign ichain->data to a pre-zero'd space (because there
3336 	 * is not prior data to copy into it).
3337 	 *
3338 	 * We have to set SUBMODIFIED in ichain's flags manually so the
3339 	 * flusher knows it has to recurse through it to get to all of
3340 	 * our moved blocks, then call setsubmod() to set the bit
3341 	 * recursively.
3342 	 */
3343 	/*hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);*/
3344 	atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
3345 	hammer2_chain_setsubmod(trans, ichain);
3346 
3347 	/*
3348 	 * Figure out what to return.
3349 	 */
3350 	if (~(((hammer2_key_t)1 << keybits) - 1) &
3351 		   (create_key ^ key)) {
3352 		/*
3353 		 * Key being created is outside the key range,
3354 		 * return the original parent.
3355 		 */
3356 		hammer2_chain_unlock(ichain);
3357 	} else {
3358 		/*
3359 		 * Otherwise its in the range, return the new parent.
3360 		 * (leave both the new and old parent locked).
3361 		 */
3362 		parent = ichain;
3363 	}
3364 
3365 	return(parent);
3366 }
3367 
3368 /*
3369  * Calculate the keybits and highside/lowside of the freemap node the
3370  * caller is creating.
3371  *
3372  * This routine will specify the next higher-level freemap key/radix
3373  * representing the lowest-ordered set.  By doing so, eventually all
3374  * low-ordered sets will be moved one level down.
3375  *
3376  * We have to be careful here because the freemap reserves a limited
3377  * number of blocks for a limited number of levels.  So we can't just
3378  * push indiscriminately.
3379  */
3380 int
3381 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
3382 			     int keybits, hammer2_blockref_t *base, int count)
3383 {
3384 	hammer2_chain_core_t *above;
3385 	hammer2_chain_t *child;
3386 	hammer2_blockref_t *bref;
3387 	hammer2_key_t key;
3388 	int locount;
3389 	int hicount;
3390 	int i;
3391 
3392 	key = *keyp;
3393 	above = parent->core;
3394 	locount = 0;
3395 	hicount = 0;
3396 	keybits = 64;
3397 
3398 	/*
3399 	 * Calculate the range of keys in the array being careful to skip
3400 	 * slots which are overridden with a deletion.
3401 	 */
3402 	spin_lock(&above->cst.spin);
3403 	for (i = 0; i < count; ++i) {
3404 		child = hammer2_chain_find_locked(parent, i);
3405 		if (child) {
3406 			if (child->flags & HAMMER2_CHAIN_DELETED)
3407 				continue;
3408 			bref = &child->bref;
3409 		} else if (base && base[i].type) {
3410 			bref = &base[i];
3411 		} else {
3412 			continue;
3413 		}
3414 
3415 		if (keybits > bref->keybits) {
3416 			key = bref->key;
3417 			keybits = bref->keybits;
3418 		} else if (keybits == bref->keybits && bref->key < key) {
3419 			key = bref->key;
3420 		}
3421 	}
3422 	spin_unlock(&above->cst.spin);
3423 
3424 	/*
3425 	 * Return the keybits for a higher-level FREEMAP_NODE covering
3426 	 * this node.
3427 	 */
3428 	switch(keybits) {
3429 	case HAMMER2_FREEMAP_LEVEL0_RADIX:
3430 		keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3431 		break;
3432 	case HAMMER2_FREEMAP_LEVEL1_RADIX:
3433 		keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3434 		break;
3435 	case HAMMER2_FREEMAP_LEVEL2_RADIX:
3436 		keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3437 		break;
3438 	case HAMMER2_FREEMAP_LEVEL3_RADIX:
3439 		keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3440 		break;
3441 	case HAMMER2_FREEMAP_LEVEL4_RADIX:
3442 		panic("hammer2_chain_indkey_freemap: level too high");
3443 		break;
3444 	default:
3445 		panic("hammer2_chain_indkey_freemap: bad radix");
3446 		break;
3447 	}
3448 	*keyp = key;
3449 
3450 	return (keybits);
3451 }
3452 
3453 /*
3454  * Calculate the keybits and highside/lowside of the indirect block the
3455  * caller is creating.
3456  */
3457 static int
3458 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3459 			    int keybits, hammer2_blockref_t *base, int count)
3460 {
3461 	hammer2_chain_core_t *above;
3462 	hammer2_chain_t *child;
3463 	hammer2_blockref_t *bref;
3464 	hammer2_key_t key;
3465 	int nkeybits;
3466 	int locount;
3467 	int hicount;
3468 	int i;
3469 
3470 	key = *keyp;
3471 	above = parent->core;
3472 	locount = 0;
3473 	hicount = 0;
3474 
3475 	/*
3476 	 * Calculate the range of keys in the array being careful to skip
3477 	 * slots which are overridden with a deletion.  Once the scan
3478 	 * completes we will cut the key range in half and shift half the
3479 	 * range into the new indirect block.
3480 	 */
3481 	spin_lock(&above->cst.spin);
3482 	for (i = 0; i < count; ++i) {
3483 		child = hammer2_chain_find_locked(parent, i);
3484 		if (child) {
3485 			if (child->flags & HAMMER2_CHAIN_DELETED)
3486 				continue;
3487 			bref = &child->bref;
3488 		} else if (base && base[i].type) {
3489 			bref = &base[i];
3490 		} else {
3491 			continue;
3492 		}
3493 
3494 		/*
3495 		 * Expand our calculated key range (key, keybits) to fit
3496 		 * the scanned key.  nkeybits represents the full range
3497 		 * that we will later cut in half (two halves @ nkeybits - 1).
3498 		 */
3499 		nkeybits = keybits;
3500 		if (nkeybits < bref->keybits) {
3501 			if (bref->keybits > 64) {
3502 				kprintf("bad bref index %d chain %p bref %p\n",
3503 					i, child, bref);
3504 				Debugger("fubar");
3505 			}
3506 			nkeybits = bref->keybits;
3507 		}
3508 		while (nkeybits < 64 &&
3509 		       (~(((hammer2_key_t)1 << nkeybits) - 1) &
3510 		        (key ^ bref->key)) != 0) {
3511 			++nkeybits;
3512 		}
3513 
3514 		/*
3515 		 * If the new key range is larger we have to determine
3516 		 * which side of the new key range the existing keys fall
3517 		 * under by checking the high bit, then collapsing the
3518 		 * locount into the hicount or vise-versa.
3519 		 */
3520 		if (keybits != nkeybits) {
3521 			if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3522 				hicount += locount;
3523 				locount = 0;
3524 			} else {
3525 				locount += hicount;
3526 				hicount = 0;
3527 			}
3528 			keybits = nkeybits;
3529 		}
3530 
3531 		/*
3532 		 * The newly scanned key will be in the lower half or the
3533 		 * higher half of the (new) key range.
3534 		 */
3535 		if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3536 			++hicount;
3537 		else
3538 			++locount;
3539 	}
3540 	spin_unlock(&above->cst.spin);
3541 	bref = NULL;	/* now invalid (safety) */
3542 
3543 	/*
3544 	 * Adjust keybits to represent half of the full range calculated
3545 	 * above (radix 63 max)
3546 	 */
3547 	--keybits;
3548 
3549 	/*
3550 	 * Select whichever half contains the most elements.  Theoretically
3551 	 * we can select either side as long as it contains at least one
3552 	 * element (in order to ensure that a free slot is present to hold
3553 	 * the indirect block).
3554 	 */
3555 	if (hammer2_indirect_optimize) {
3556 		/*
3557 		 * Insert node for least number of keys, this will arrange
3558 		 * the first few blocks of a large file or the first few
3559 		 * inodes in a directory with fewer indirect blocks when
3560 		 * created linearly.
3561 		 */
3562 		if (hicount < locount && hicount != 0)
3563 			key |= (hammer2_key_t)1 << keybits;
3564 		else
3565 			key &= ~(hammer2_key_t)1 << keybits;
3566 	} else {
3567 		/*
3568 		 * Insert node for most number of keys, best for heavily
3569 		 * fragmented files.
3570 		 */
3571 		if (hicount > locount)
3572 			key |= (hammer2_key_t)1 << keybits;
3573 		else
3574 			key &= ~(hammer2_key_t)1 << keybits;
3575 	}
3576 	*keyp = key;
3577 
3578 	return (keybits);
3579 }
3580 
3581 /*
3582  * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
3583  * set chain->delete_tid.
3584  *
3585  * This function does NOT generate a modification to the parent.  It
3586  * would be nearly impossible to figure out which parent to modify anyway.
3587  * Such modifications are handled by the flush code and are properly merged
3588  * using the flush synchronization point.
3589  *
3590  * The find/get code will properly overload the RBTREE check on top of
3591  * the bref check to detect deleted entries.
3592  *
3593  * This function is NOT recursive.  Any entity already pushed into the
3594  * chain (such as an inode) may still need visibility into its contents,
3595  * as well as the ability to read and modify the contents.  For example,
3596  * for an unlinked file which is still open.
3597  *
3598  * NOTE: This function does NOT set chain->modify_tid, allowing future
3599  *	 code to distinguish between live and deleted chains by testing
3600  *	 sync_tid.
3601  *
3602  * NOTE: Deletions normally do not occur in the middle of a duplication
3603  *	 chain but we use a trick for hardlink migration that refactors
3604  *	 the originating inode without deleting it, so we make no assumptions
3605  *	 here.
3606  */
3607 void
3608 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
3609 {
3610 	KKASSERT(ccms_thread_lock_owned(&chain->core->cst));
3611 
3612 	/*
3613 	 * Nothing to do if already marked.
3614 	 */
3615 	if (chain->flags & HAMMER2_CHAIN_DELETED)
3616 		return;
3617 
3618 	/*
3619 	 * We must set MOVED along with DELETED for the flush code to
3620 	 * recognize the operation and properly disconnect the chain
3621 	 * in-memory.
3622 	 *
3623 	 * The setting of DELETED causes finds, lookups, and _next iterations
3624 	 * to no longer recognize the chain.  RB_SCAN()s will still have
3625 	 * visibility (needed for flush serialization points).
3626 	 *
3627 	 * We need the spinlock on the core whos RBTREE contains chain
3628 	 * to protect against races.
3629 	 */
3630 	spin_lock(&chain->above->cst.spin);
3631 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
3632 	if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3633 		hammer2_chain_ref(chain);
3634 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3635 	}
3636 	chain->delete_tid = trans->sync_tid;
3637 	spin_unlock(&chain->above->cst.spin);
3638 
3639 	/*
3640 	 * Mark the underlying block as possibly being free unless WILLDUP
3641 	 * is set.  Duplication can occur in many situations, particularly
3642 	 * when chains are moved to indirect blocks.
3643 	 */
3644 	if ((flags & HAMMER2_DELETE_WILLDUP) == 0)
3645 		hammer2_freemap_free(trans, chain->hmp, &chain->bref, 0);
3646 	hammer2_chain_setsubmod(trans, chain);
3647 }
3648 
3649 void
3650 hammer2_chain_wait(hammer2_chain_t *chain)
3651 {
3652 	tsleep(chain, 0, "chnflw", 1);
3653 }
3654 
3655 static
3656 void
3657 adjreadcounter(hammer2_blockref_t *bref, size_t bytes)
3658 {
3659 	long *counterp;
3660 
3661 	switch(bref->type) {
3662 	case HAMMER2_BREF_TYPE_DATA:
3663 		counterp = &hammer2_iod_file_read;
3664 		break;
3665 	case HAMMER2_BREF_TYPE_INODE:
3666 		counterp = &hammer2_iod_meta_read;
3667 		break;
3668 	case HAMMER2_BREF_TYPE_INDIRECT:
3669 		counterp = &hammer2_iod_indr_read;
3670 		break;
3671 	case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3672 	case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
3673 		counterp = &hammer2_iod_fmap_read;
3674 		break;
3675 	default:
3676 		counterp = &hammer2_iod_volu_read;
3677 		break;
3678 	}
3679 	*counterp += bytes;
3680 }
3681