xref: /dflybsd-src/sys/vfs/hammer2/hammer2_flush.c (revision f1e3af6c0d9fb009456c1ad7a10c323acbf7022f)
1 /*
2  * Copyright (c) 2011-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/types.h>
40 #include <sys/lock.h>
41 #include <sys/uuid.h>
42 
43 #include "hammer2.h"
44 
45 /*
46  * Recursively flush the specified chain.  The chain is locked and
47  * referenced by the caller and will remain so on return.  The chain
48  * will remain referenced throughout but can temporarily lose its
49  * lock during the recursion to avoid unnecessarily stalling user
50  * processes.
51  */
52 struct hammer2_flush_info {
53 	hammer2_mount_t	*hmp;
54 	hammer2_chain_t *parent;
55 	hammer2_trans_t	*trans;
56 	int		depth;
57 	int		diddeferral;
58 	struct flush_deferral_list flush_list;
59 	hammer2_tid_t	sync_tid;	/* flush synchronization point */
60 	hammer2_tid_t	mirror_tid;	/* collect mirror TID updates */
61 };
62 
63 typedef struct hammer2_flush_info hammer2_flush_info_t;
64 
65 static void hammer2_chain_flush_core(hammer2_flush_info_t *info,
66 				hammer2_chain_t *chain);
67 static int hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data);
68 static int hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data);
69 
70 /*
71  * Transaction support functions for writing to the filesystem.
72  *
73  * Initializing a new transaction allocates a transaction ID.  We
74  * don't bother marking the volume header MODIFIED.  Instead, the volume
75  * will be synchronized at a later time as part of a larger flush sequence.
76  *
77  * Non-flush transactions can typically run concurrently.  However if
78  * there are non-flush transaction both before AND after a flush trans,
79  * the transactions after stall until the ones before finish.
80  *
81  * Non-flush transactions occuring after a flush pointer can run concurrently
82  * with that flush.  They only have to wait for transactions prior to the
83  * flush trans to complete before they unstall.
84  *
85  * WARNING! Modifications to the root volume cannot dup the root volume
86  *	    header to handle synchronization points, so alloc_tid can
87  *	    wind up (harmlessly) more advanced on flush.
88  *
89  * WARNING! Operations which might call inode_duplicate()/chain_duplicate()
90  *	    depend heavily on having a unique sync_tid to avoid duplication
91  *	    collisions (which key off of delete_tid).
92  */
93 void
94 hammer2_trans_init(hammer2_mount_t *hmp, hammer2_trans_t *trans, int flags)
95 {
96 	hammer2_trans_t *scan;
97 
98 	bzero(trans, sizeof(*trans));
99 	trans->hmp = hmp;
100 
101 	hammer2_voldata_lock(hmp);
102 	trans->sync_tid = hmp->voldata.alloc_tid++;
103 	trans->flags = flags;
104 	trans->td = curthread;
105 	TAILQ_INSERT_TAIL(&hmp->transq, trans, entry);
106 
107 	if (flags & HAMMER2_TRANS_ISFLUSH) {
108 		/*
109 		 * If we are a flush we have to wait for all transactions
110 		 * prior to our flush synchronization point to complete
111 		 * before we can start our flush.
112 		 */
113 		++hmp->flushcnt;
114 		if (hmp->curflush == NULL) {
115 			hmp->curflush = trans;
116 			hmp->flush_tid = trans->sync_tid;
117 		}
118 		while (TAILQ_FIRST(&hmp->transq) != trans) {
119 			lksleep(&trans->sync_tid, &hmp->voldatalk,
120 				0, "h2syncw", hz);
121 		}
122 
123 		/*
124 		 * Once we become the running flush we can wakeup anyone
125 		 * who blocked on us.
126 		 */
127 		scan = trans;
128 		while ((scan = TAILQ_NEXT(scan, entry)) != NULL) {
129 			if (scan->flags & HAMMER2_TRANS_ISFLUSH)
130 				break;
131 			if (scan->blocked == 0)
132 				break;
133 			scan->blocked = 0;
134 			wakeup(&scan->blocked);
135 		}
136 	} else {
137 		/*
138 		 * If we are not a flush but our sync_tid is after a
139 		 * stalled flush, we have to wait until that flush unstalls
140 		 * (that is, all transactions prior to that flush complete),
141 		 * but then we can run concurrently with that flush.
142 		 *
143 		 * (flushcnt check only good as pre-condition, otherwise it
144 		 *  may represent elements queued after us after we block).
145 		 */
146 		if (hmp->flushcnt > 1 ||
147 		    (hmp->curflush &&
148 		     TAILQ_FIRST(&hmp->transq) != hmp->curflush)) {
149 			trans->blocked = 1;
150 			while (trans->blocked) {
151 				lksleep(&trans->blocked, &hmp->voldatalk,
152 					0, "h2trans", hz);
153 			}
154 		}
155 	}
156 	hammer2_voldata_unlock(hmp, 0);
157 }
158 
159 void
160 hammer2_trans_done(hammer2_trans_t *trans)
161 {
162 	hammer2_mount_t *hmp = trans->hmp;
163 	hammer2_trans_t *scan;
164 
165 	hammer2_voldata_lock(hmp);
166 	TAILQ_REMOVE(&hmp->transq, trans, entry);
167 	if (trans->flags & HAMMER2_TRANS_ISFLUSH) {
168 		/*
169 		 * If we were a flush we have to adjust curflush to the
170 		 * next flush.
171 		 *
172 		 * flush_tid is used to partition copy-on-write operations
173 		 * (mostly duplicate-on-modify ops), which is what allows
174 		 * us to execute a flush concurrent with modifying operations
175 		 * with higher TIDs.
176 		 */
177 		--hmp->flushcnt;
178 		if (hmp->flushcnt) {
179 			TAILQ_FOREACH(scan, &hmp->transq, entry) {
180 				if (scan->flags & HAMMER2_TRANS_ISFLUSH)
181 					break;
182 			}
183 			KKASSERT(scan);
184 			hmp->curflush = scan;
185 			hmp->flush_tid = scan->sync_tid;
186 		} else {
187 			/*
188 			 * Theoretically we don't have to clear flush_tid
189 			 * here since the flush will have synchronized
190 			 * all operations <= flush_tid already.  But for
191 			 * now zero-it.
192 			 */
193 			hmp->curflush = NULL;
194 			hmp->flush_tid = 0;
195 		}
196 	} else {
197 		/*
198 		 * If we are not a flush but a flush is now at the head
199 		 * of the queue and we were previously blocking it,
200 		 * we can now unblock it.
201 		 */
202 		if (hmp->flushcnt &&
203 		    (scan = TAILQ_FIRST(&hmp->transq)) != NULL &&
204 		    trans->sync_tid < scan->sync_tid &&
205 		    (scan->flags & HAMMER2_TRANS_ISFLUSH)) {
206 			wakeup(&scan->sync_tid);
207 		}
208 	}
209 	hammer2_voldata_unlock(hmp, 0);
210 
211 	trans->hmp = NULL;
212 }
213 
214 /*
215  * Flush the chain and all modified sub-chains through the specified
216  * synchronization point (sync_tid), propagating parent chain modifications
217  * and mirror_tid updates back up as needed.  Since we are recursing downward
218  * we do not have to deal with the complexities of multi-homed chains (chains
219  * with multiple parents).
220  *
221  * Caller must have interlocked against any non-flush-related modifying
222  * operations in progress whos modify_tid values are less than or equal
223  * to the passed sync_tid.
224  *
225  * Caller must have already vetted synchronization points to ensure they
226  * are properly flushed.  Only snapshots and cluster flushes can create
227  * these sorts of synchronization points.
228  *
229  * This routine can be called from several places but the most important
230  * is from the hammer2_vop_reclaim() function.  We want to try to completely
231  * clean out the inode structure to prevent disconnected inodes from
232  * building up and blowing out the kmalloc pool.  However, it is not actually
233  * necessary to flush reclaimed inodes to maintain HAMMER2's crash recovery
234  * capability.
235  *
236  * chain is locked on call and will remain locked on return.  If a flush
237  * occured, the chain's MOVED bit will be set indicating that its parent
238  * (which is not part of the flush) should be updated.
239  */
240 void
241 hammer2_chain_flush(hammer2_trans_t *trans, hammer2_chain_t *chain)
242 {
243 	hammer2_chain_t *scan;
244 	hammer2_chain_core_t *core;
245 	hammer2_flush_info_t info;
246 
247 	/*
248 	 * Execute the recursive flush and handle deferrals.
249 	 *
250 	 * Chains can be ridiculously long (thousands deep), so to
251 	 * avoid blowing out the kernel stack the recursive flush has a
252 	 * depth limit.  Elements at the limit are placed on a list
253 	 * for re-execution after the stack has been popped.
254 	 */
255 	bzero(&info, sizeof(info));
256 	TAILQ_INIT(&info.flush_list);
257 	info.hmp = trans->hmp;
258 	info.trans = trans;
259 	info.sync_tid = trans->sync_tid;
260 	info.mirror_tid = 0;
261 
262 	core = chain->core;
263 
264 	for (;;) {
265 		/*
266 		 * Unwind deep recursions which had been deferred.  This
267 		 * can leave MOVED set for these chains, which will be
268 		 * handled when we [re]flush chain after the unwind.
269 		 */
270 		while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
271 			KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
272 			TAILQ_REMOVE(&info.flush_list, scan, flush_node);
273 			atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
274 
275 			/*
276 			 * Now that we've popped back up we can do a secondary
277 			 * recursion on the deferred elements.
278 			 */
279 			if (hammer2_debug & 0x0040)
280 				kprintf("defered flush %p\n", scan);
281 			hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE);
282 			hammer2_chain_flush(trans, scan);
283 			hammer2_chain_unlock(scan);
284 			hammer2_chain_drop(scan);	/* ref from deferral */
285 		}
286 
287 		/*
288 		 * Flush pass1 on root.
289 		 */
290 		info.diddeferral = 0;
291 		hammer2_chain_flush_core(&info, chain);
292 #if FLUSH_DEBUG
293 		kprintf("flush_core_done parent=<base> chain=%p.%d %08x\n",
294 			chain, chain->bref.type, chain->flags);
295 #endif
296 
297 		/*
298 		 * Only loop if deep recursions have been deferred.
299 		 */
300 		if (TAILQ_EMPTY(&info.flush_list))
301 			break;
302 	}
303 }
304 
305 /*
306  * This is the core of the chain flushing code.  The chain is locked by the
307  * caller and remains locked on return.  This function is keyed off of
308  * the SUBMODIFIED bit but must make fine-grained choices based on the
309  * synchronization point we are flushing to.
310  *
311  * If the flush accomplished any work chain will be flagged MOVED
312  * indicating a copy-on-write propagation back up is required.
313  * Deep sub-nodes may also have been entered onto the deferral list.
314  * MOVED is never set on the volume root.
315  *
316  * NOTE: modify_tid is different from MODIFIED.  modify_tid is updated
317  *	 only when a chain is specifically modified, and not updated
318  *	 for copy-on-write propagations.  MODIFIED is set on any modification
319  *	 including copy-on-write propagations.
320  */
321 static void
322 hammer2_chain_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain)
323 {
324 	hammer2_mount_t *hmp;
325 	hammer2_blockref_t *bref;
326 	hammer2_off_t pbase;
327 	hammer2_tid_t saved_sync;
328 	hammer2_trans_t *trans = info->trans;
329 	hammer2_chain_core_t *core;
330 	size_t bbytes;
331 	size_t boff;
332 	char *bdata;
333 	struct buf *bp;
334 	int error;
335 	int wasmodified;
336 	int diddeferral = 0;
337 
338 	hmp = info->hmp;
339 
340 #if FLUSH_DEBUG
341 	if (info->parent)
342 		kprintf("flush_core %p->%p.%d %08x (%s)\n",
343 			info->parent, chain, chain->bref.type,
344 			chain->flags,
345 			((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
346 				chain->data->ipdata.filename : "?"));
347 	else
348 		kprintf("flush_core NULL->%p.%d %08x (%s)\n",
349 			chain, chain->bref.type,
350 			chain->flags,
351 			((chain->bref.type == HAMMER2_BREF_TYPE_INODE) ?
352 				chain->data->ipdata.filename : "?"));
353 #endif
354 	/*
355 	 * Ignore chains modified beyond the current flush point.  These
356 	 * will be treated as if they did not exist.
357 	 */
358 	if (chain->modify_tid > info->sync_tid)
359 		return;
360 
361 	/*
362 	 * Deleted chains which have not been destroyed must be retained,
363 	 * and we probably have to recurse to clean-up any sub-trees.
364 	 * However, restricted flushes can stop processing here because
365 	 * the chain cleanup will be handled by a later normal flush.
366 	 *
367 	 * The MODIFIED bit can likely be cleared in this situation and we
368 	 * will do so later on in this procedure.
369 	 */
370 	if (chain->delete_tid <= info->sync_tid) {
371 		if (trans->flags & HAMMER2_TRANS_RESTRICTED)
372 			return;
373 	}
374 
375 	saved_sync = info->sync_tid;
376 	core = chain->core;
377 
378 	/*
379 	 * If SUBMODIFIED is set we recurse the flush and adjust the
380 	 * blockrefs accordingly.
381 	 *
382 	 * NOTE: Looping on SUBMODIFIED can prevent a flush from ever
383 	 *	 finishing in the face of filesystem activity.
384 	 */
385 	if (chain->flags & HAMMER2_CHAIN_SUBMODIFIED) {
386 		hammer2_chain_t *saved_parent;
387 		hammer2_tid_t saved_mirror;
388 
389 		/*
390 		 * Clear SUBMODIFIED to catch races.  Note that any child
391 		 * with MODIFIED, DELETED, or MOVED set during Scan2, after
392 		 * it processes the child, will cause SUBMODIFIED to be
393 		 * re-set.
394 		 * child has to be flushed SUBMODIFIED will wind up being
395 		 * set again (for next time), but this does not stop us from
396 		 * synchronizing block updates which occurred.
397 		 *
398 		 * We don't want to set our chain to MODIFIED gratuitously.
399 		 *
400 		 * We need an extra ref on chain because we are going to
401 		 * release its lock temporarily in our child loop.
402 		 */
403 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
404 		hammer2_chain_ref(chain);
405 
406 		/*
407 		 * Run two passes.  The first pass handles MODIFIED and
408 		 * SUBMODIFIED chains and recurses while the second pass
409 		 * handles MOVED chains on the way back up.
410 		 *
411 		 * If the stack gets too deep we defer scan1, but must
412 		 * be sure to still run scan2 if on the next loop the
413 		 * deferred chain has been flushed and now needs MOVED
414 		 * handling on the way back up.
415 		 *
416 		 * Scan1 is recursive.
417 		 *
418 		 * NOTE: The act of handling a modified/submodified chain can
419 		 *	 cause the MOVED Flag to be set.  It can also be set
420 		 *	 via hammer2_chain_delete() and in other situations.
421 		 *
422 		 * NOTE: RB_SCAN() must be used instead of RB_FOREACH()
423 		 *	 because children can be physically removed during
424 		 *	 the scan.
425 		 */
426 		saved_parent = info->parent;
427 		saved_mirror = info->mirror_tid;
428 		info->parent = chain;
429 		info->mirror_tid = chain->bref.mirror_tid;
430 
431 		if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) {
432 			if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
433 				hammer2_chain_ref(chain);
434 				TAILQ_INSERT_TAIL(&info->flush_list,
435 						  chain, flush_node);
436 				atomic_set_int(&chain->flags,
437 					       HAMMER2_CHAIN_DEFERRED);
438 			}
439 			diddeferral = 1;
440 		} else {
441 			info->diddeferral = 0;
442 			spin_lock(&core->cst.spin);
443 			RB_SCAN(hammer2_chain_tree, &chain->core->rbtree,
444 				NULL, hammer2_chain_flush_scan1, info);
445 			spin_unlock(&core->cst.spin);
446 			diddeferral += info->diddeferral;
447 		}
448 
449 		/*
450 		 * Handle successfully flushed children who are in the MOVED
451 		 * state on the way back up the recursion.  This can have
452 		 * the side-effect of clearing MOVED.
453 		 *
454 		 * We execute this even if there were deferrals to try to
455 		 * keep the chain topology cleaner.
456 		 *
457 		 * Scan2 is non-recursive.
458 		 */
459 		if (diddeferral) {
460 			atomic_set_int(&chain->flags,
461 				       HAMMER2_CHAIN_SUBMODIFIED);
462 		} else {
463 #if FLUSH_DEBUG
464 			kprintf("scan2_start parent %p %08x\n",
465 				chain, chain->flags);
466 #endif
467 			spin_lock(&core->cst.spin);
468 			RB_SCAN(hammer2_chain_tree, &core->rbtree,
469 				NULL, hammer2_chain_flush_scan2, info);
470 			spin_unlock(&core->cst.spin);
471 #if FLUSH_DEBUG
472 			kprintf("scan2_stop  parent %p %08x\n",
473 				chain, chain->flags);
474 #endif
475 		}
476 		chain->bref.mirror_tid = info->mirror_tid;
477 		info->mirror_tid = saved_mirror;
478 		info->parent = saved_parent;
479 		hammer2_chain_drop(chain);
480 	}
481 
482 	/*
483 	 * Restore sync_tid in case it was restricted by a delete/duplicate.
484 	 */
485 	info->sync_tid = saved_sync;
486 
487 	/*
488 	 * Rollup diddeferral for caller.  Note direct assignment, not +=.
489 	 */
490 	info->diddeferral = diddeferral;
491 
492 	/*
493 	 * Do not flush chain if there were any deferrals.  It will be
494 	 * retried later after the deferrals are independently handled.
495 	 */
496 	if (diddeferral) {
497 		if (hammer2_debug & 0x0008) {
498 			kprintf("%*.*s} %p/%d %04x (deferred)",
499 				info->depth, info->depth, "",
500 				chain, chain->refs, chain->flags);
501 		}
502 		return;
503 	}
504 
505 	/*
506 	 * If we encounter a deleted chain within our flush we can clear
507 	 * the MODIFIED bit and avoid flushing it whether it has been
508 	 * destroyed or not.
509 	 */
510 	if (chain->delete_tid <= info->sync_tid) {
511 		if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
512 			if (chain->bp)
513 				chain->bp->b_flags |= B_INVAL|B_RELBUF;
514 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
515 			hammer2_chain_drop(chain);
516 		}
517 		return;
518 	}
519 #if 0
520 	if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
521 	    (chain->flags & HAMMER2_CHAIN_DELETED) &&
522 	    (trans->flags & HAMMER2_TRANS_RESTRICTED) == 0) {
523 		/*
524 		 * Throw-away the MODIFIED flag
525 		 */
526 		if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
527 			if (chain->bp)
528 				chain->bp->b_flags |= B_INVAL|B_RELBUF;
529 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
530 			hammer2_chain_drop(chain);
531 		}
532 		return;
533 	}
534 #endif
535 
536 	/*
537 	 * A degenerate flush might not have flushed anything and thus not
538 	 * processed modified blocks on the way back up.  Detect the case.
539 	 *
540 	 * Note that MOVED can be set without MODIFIED being set due to
541 	 * a deletion, in which case it is handled by Scan2 later on.
542 	 *
543 	 * Both bits can be set along with DELETED due to a deletion if
544 	 * modified data within the synchronization zone and the chain
545 	 * was then deleted beyond the zone, in which case we still have
546 	 * to flush for synchronization point consistency.  Otherwise though
547 	 * DELETED and MODIFIED are treated as separate flags.
548 	 */
549 	if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
550 		return;
551 
552 	/*
553 	 * Issue flush.
554 	 *
555 	 * A DESTROYED node that reaches this point must be flushed for
556 	 * synchronization point consistency.
557 	 */
558 
559 	/*
560 	 * Update mirror_tid, clear MODIFIED, and set MOVED.
561 	 *
562 	 * The caller will update the parent's reference to this chain
563 	 * by testing MOVED as long as the modification was in-bounds.
564 	 *
565 	 * MOVED is never set on the volume root as there is no parent
566 	 * to adjust.
567 	 */
568 	if (chain->bref.mirror_tid < info->sync_tid)
569 		chain->bref.mirror_tid = info->sync_tid;
570 	wasmodified = (chain->flags & HAMMER2_CHAIN_MODIFIED) != 0;
571 	atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
572 	if (chain == &hmp->vchain)
573 		kprintf("(FLUSHED VOLUME HEADER)\n");
574 
575 	if ((chain->flags & HAMMER2_CHAIN_MOVED) ||
576 	    chain == &hmp->vchain) {
577 		/*
578 		 * Drop the ref from the MODIFIED bit we cleared.
579 		 */
580 		if (wasmodified)
581 			hammer2_chain_drop(chain);
582 	} else {
583 		/*
584 		 * If we were MODIFIED we inherit the ref from clearing
585 		 * that bit, otherwise we need another ref.
586 		 */
587 		if (wasmodified == 0)
588 			hammer2_chain_ref(chain);
589 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
590 	}
591 
592 	/*
593 	 * If this is part of a recursive flush we can go ahead and write
594 	 * out the buffer cache buffer and pass a new bref back up the chain
595 	 * via the MOVED bit.
596 	 *
597 	 * Volume headers are NOT flushed here as they require special
598 	 * processing.
599 	 */
600 	switch(chain->bref.type) {
601 	case HAMMER2_BREF_TYPE_VOLUME:
602 		/*
603 		 * The volume header is flushed manually by the syncer, not
604 		 * here.  All we do is adjust the crc's.
605 		 */
606 		KKASSERT(chain->data != NULL);
607 		KKASSERT(chain->bp == NULL);
608 		kprintf("volume header mirror_tid %jd\n",
609 			hmp->voldata.mirror_tid);
610 
611 		hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
612 			hammer2_icrc32(
613 				(char *)&hmp->voldata +
614 				 HAMMER2_VOLUME_ICRC1_OFF,
615 				HAMMER2_VOLUME_ICRC1_SIZE);
616 		hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
617 			hammer2_icrc32(
618 				(char *)&hmp->voldata +
619 				 HAMMER2_VOLUME_ICRC0_OFF,
620 				HAMMER2_VOLUME_ICRC0_SIZE);
621 		hmp->voldata.icrc_volheader =
622 			hammer2_icrc32(
623 				(char *)&hmp->voldata +
624 				 HAMMER2_VOLUME_ICRCVH_OFF,
625 				HAMMER2_VOLUME_ICRCVH_SIZE);
626 		hmp->volsync = hmp->voldata;
627 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC);
628 		break;
629 	case HAMMER2_BREF_TYPE_DATA:
630 		/*
631 		 * Data elements have already been flushed via the logical
632 		 * file buffer cache.  Their hash was set in the bref by
633 		 * the vop_write code.
634 		 *
635 		 * Make sure any device buffer(s) have been flushed out here.
636 		 * (there aren't usually any to flush).
637 		 */
638 		bbytes = chain->bytes;
639 		pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
640 		boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
641 
642 		bp = getblk(hmp->devvp, pbase, bbytes, GETBLK_NOWAIT, 0);
643 		if (bp) {
644 			if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
645 			    (B_CACHE | B_DIRTY)) {
646 				cluster_awrite(bp);
647 			} else {
648 				bp->b_flags |= B_RELBUF;
649 				brelse(bp);
650 			}
651 		}
652 		break;
653 	case HAMMER2_BREF_TYPE_INDIRECT:
654 		/*
655 		 * Indirect blocks may be in an INITIAL state.  Use the
656 		 * chain_lock() call to ensure that the buffer has been
657 		 * instantiated (even though it is already locked the buffer
658 		 * might not have been instantiated).
659 		 *
660 		 * Only write the buffer out if it is dirty, it is possible
661 		 * the operating system had already written out the buffer.
662 		 */
663 		hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
664 		KKASSERT(chain->bp != NULL);
665 
666 		bp = chain->bp;
667 		if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) ||
668 		    (bp->b_flags & B_DIRTY)) {
669 			bdwrite(chain->bp);
670 		} else {
671 			brelse(chain->bp);
672 		}
673 		chain->bp = NULL;
674 		chain->data = NULL;
675 		hammer2_chain_unlock(chain);
676 		break;
677 	default:
678 		/*
679 		 * Embedded elements have to be flushed out.
680 		 */
681 		KKASSERT(chain->data != NULL);
682 		KKASSERT(chain->bp == NULL);
683 		bref = &chain->bref;
684 
685 		KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
686 		KKASSERT(HAMMER2_DEC_CHECK(chain->bref.methods) ==
687 			 HAMMER2_CHECK_ISCSI32);
688 
689 		if (chain->bp == NULL) {
690 			/*
691 			 * The data is embedded, we have to acquire the
692 			 * buffer cache buffer and copy the data into it.
693 			 */
694 			if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
695 				bbytes = HAMMER2_MINIOSIZE;
696 			pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
697 			boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
698 
699 			/*
700 			 * The getblk() optimization can only be used if the
701 			 * physical block size matches the request.
702 			 */
703 			if (chain->bytes == bbytes) {
704 				bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
705 				error = 0;
706 			} else {
707 				error = bread(hmp->devvp, pbase, bbytes, &bp);
708 				KKASSERT(error == 0);
709 			}
710 			bdata = (char *)bp->b_data + boff;
711 
712 			/*
713 			 * Copy the data to the buffer, mark the buffer
714 			 * dirty, and convert the chain to unmodified.
715 			 */
716 			bcopy(chain->data, bdata, chain->bytes);
717 			bp->b_flags |= B_CLUSTEROK;
718 			bdwrite(bp);
719 			bp = NULL;
720 			chain->bref.check.iscsi32.value =
721 				hammer2_icrc32(chain->data, chain->bytes);
722 			if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
723 				++hammer2_iod_meta_write;
724 			else
725 				++hammer2_iod_indr_write;
726 		} else {
727 			chain->bref.check.iscsi32.value =
728 				hammer2_icrc32(chain->data, chain->bytes);
729 		}
730 	}
731 }
732 
733 /*
734  * Flush helper scan1 (recursive)
735  *
736  * Flushes the children of the caller's chain (parent) and updates
737  * the blockref, restricted by sync_tid.
738  *
739  * Ripouts during the loop should not cause any problems.  Because we are
740  * flushing to a synchronization point, modification races will occur after
741  * sync_tid and do not have to be flushed anyway.
742  *
743  * It is also ok if the parent is chain_duplicate()'d while unlocked because
744  * the delete/duplication will install a delete_tid that is still larger than
745  * our current sync_tid.
746  */
747 static int
748 hammer2_chain_flush_scan1(hammer2_chain_t *child, void *data)
749 {
750 	hammer2_flush_info_t *info = data;
751 	hammer2_trans_t *trans = info->trans;
752 	hammer2_chain_t *parent = info->parent;
753 	/*hammer2_mount_t *hmp = info->hmp;*/
754 	int diddeferral;
755 
756 	/*
757 	 * We should only need to recurse if SUBMODIFIED is set, but as
758 	 * a safety also recurse if MODIFIED is also set.
759 	 *
760 	 * Return early if neither bit is set.  We must re-assert the
761 	 * SUBMODIFIED flag in the parent if any child covered by the
762 	 * parent (via delete_tid) is skipped.
763 	 */
764 	if ((child->flags & (HAMMER2_CHAIN_MODIFIED |
765 			     HAMMER2_CHAIN_SUBMODIFIED)) == 0) {
766 		return (0);
767 	}
768 	if (child->modify_tid > trans->sync_tid) {
769 		if (parent->delete_tid > trans->sync_tid) {
770 			atomic_set_int(&parent->flags,
771 				       HAMMER2_CHAIN_SUBMODIFIED);
772 		}
773 		return (0);
774 	}
775 
776 	hammer2_chain_ref(child);
777 	spin_unlock(&parent->core->cst.spin);
778 
779 	/*
780 	 * The caller has added a ref to the parent so we can temporarily
781 	 * unlock it in order to lock the child.  Re-check the flags before
782 	 * continuing.
783 	 */
784 	hammer2_chain_unlock(parent);
785 	hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE);
786 
787 	if ((child->flags & (HAMMER2_CHAIN_MODIFIED |
788 			     HAMMER2_CHAIN_SUBMODIFIED)) == 0) {
789 		hammer2_chain_unlock(child);
790 		hammer2_chain_drop(child);
791 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
792 		spin_lock(&parent->core->cst.spin);
793 		return (0);
794 	}
795 	if (child->modify_tid > trans->sync_tid) {
796 		hammer2_chain_unlock(child);
797 		hammer2_chain_drop(child);
798 		hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
799 		spin_lock(&parent->core->cst.spin);
800 		if (parent->delete_tid > trans->sync_tid) {
801 			atomic_set_int(&parent->flags,
802 				       HAMMER2_CHAIN_SUBMODIFIED);
803 		}
804 		return (0);
805 	}
806 
807 	/*
808 	 * The DESTROYED flag can only be initially set on an unreferenced
809 	 * deleted inode and will propagate downward via the mechanic below.
810 	 * Such inode chains have been deleted for good and should no longer
811 	 * be subject to delete/duplication.
812 	 *
813 	 * This optimization allows the inode reclaim (destroy unlinked file
814 	 * on vnode reclamation after last close) to be flagged by just
815 	 * setting HAMMER2_CHAIN_DESTROYED at the top level and then will
816 	 * cause the chains to be terminated and related buffers to be
817 	 * invalidated and not flushed out.
818 	 *
819 	 * We have to be careful not to propagate the DESTROYED flag if
820 	 * the destruction occurred after our flush sync_tid.
821 	 */
822 	if ((parent->flags & HAMMER2_CHAIN_DESTROYED) &&
823 	    (child->flags & HAMMER2_CHAIN_DELETED) &&
824 	    (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
825 		atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROYED |
826 					      HAMMER2_CHAIN_SUBMODIFIED);
827 	}
828 
829 	/*
830 	 * Recurse and collect deferral data.
831 	 */
832 	diddeferral = info->diddeferral;
833 	++info->depth;
834 	hammer2_chain_flush_core(info, child);
835 #if FLUSH_DEBUG
836 	kprintf("flush_core_done parent=%p flags=%08x child=%p.%d %08x\n",
837 		parent, parent->flags, child, child->bref.type, child->flags);
838 #endif
839 	--info->depth;
840 	info->diddeferral += diddeferral;
841 
842 	if (child->flags & HAMMER2_CHAIN_SUBMODIFIED)
843 		atomic_set_int(&parent->flags, HAMMER2_CHAIN_SUBMODIFIED);
844 
845 	hammer2_chain_unlock(child);
846 	hammer2_chain_drop(child);
847 
848 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE);
849 
850 	spin_lock(&parent->core->cst.spin);
851 	return (0);
852 }
853 
854 /*
855  * Flush helper scan2 (non-recursive)
856  *
857  * This pass on a chain's children propagates any MOVED or DELETED
858  * elements back up the chain towards the root after those elements have
859  * been fully flushed.  Unlike scan1, this function is NOT recursive and
860  * the parent remains locked across the entire scan.
861  *
862  * NOTE!  We must re-set SUBMODIFIED on the parent(s) as appropriate, and
863  *	  due to the above conditions it is possible to do this and still
864  *	  have some children flagged MOVED depending on the synchronization.
865  *
866  * NOTE!  A deletion is a visbility issue, there can still be referenced to
867  *	  deleted elements (for example, to an unlinked file which is still
868  *	  open), and there can also be multiple chains pointing to the same
869  *	  bref where some are deleted and some are not (for example due to
870  *	  a rename).   So a chain marked for deletion is basically considered
871  *	  to be live until it is explicitly destroyed or until its ref-count
872  *	  reaches zero (also implying that MOVED and MODIFIED are clear).
873  */
874 static int
875 hammer2_chain_flush_scan2(hammer2_chain_t *child, void *data)
876 {
877 	hammer2_flush_info_t *info = data;
878 	hammer2_chain_t *parent = info->parent;
879 	hammer2_chain_core_t *above = child->above;
880 	hammer2_mount_t *hmp = info->hmp;
881 	hammer2_trans_t *trans = info->trans;
882 	hammer2_blockref_t *base;
883 	int count;
884 
885 	/*
886 	 * Inodes with stale children that have been converted to DIRECTDATA
887 	 * mode (file extension or hardlink conversion typically) need to
888 	 * skipped right now before we start messing with a non-existant
889 	 * block table.
890 	 */
891 #if 0
892 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
893 	    (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)) {
894 #if FLUSH_DEBUG
895 		kprintf("B");
896 #endif
897 		goto finalize;
898 	}
899 #endif
900 
901 	/*
902 	 * Ignore children created after our flush point, treating them as
903 	 * if they did not exist).  These children will not cause the parent
904 	 * to be updated.
905 	 *
906 	 * When we encounter such children and the parent chain has not been
907 	 * deleted, delete/duplicated, or delete/duplicated-for-move, then
908 	 * the parent may be used to funnel through several flush points.
909 	 * We must re-set the SUBMODIFIED flag in the parent to ensure that
910 	 * those flushes have visbility.  A simple test of delete_tid suffices
911 	 * to determine if the parent spans beyond our current flush.
912 	 */
913 	if (child->modify_tid > trans->sync_tid) {
914 #if FLUSH_DEBUG
915 		kprintf("E");
916 #endif
917 		goto finalize;
918 	}
919 
920 	/*
921 	 * Ignore children which have not changed.  The parent's block table
922 	 * is already correct.
923 	 */
924 	if ((child->flags & HAMMER2_CHAIN_MOVED) == 0) {
925 #if FLUSH_DEBUG
926 		kprintf("D");
927 #endif
928 		goto finalize;
929 	}
930 
931 
932 	hammer2_chain_ref(child);
933 	spin_unlock(&above->cst.spin);
934 
935 	/*
936 	 * The MOVED bit implies an additional reference which prevents
937 	 * the child from being destroyed out from under our operation
938 	 * so we can lock the child safely without worrying about it
939 	 * getting ripped up (?).
940 	 *
941 	 * We can only update parents where child->parent matches.  The
942 	 * child->parent link will migrate along the chain but the flush
943 	 * order must be enforced absolutely.  Parent reflushed after the
944 	 * child has passed them by should skip due to the modify_tid test.
945 	 */
946 	hammer2_chain_lock(child, HAMMER2_RESOLVE_NEVER);
947 
948 	/*
949 	 * The parent's blockref to the child must be deleted or updated.
950 	 *
951 	 * This point is not reached on successful DESTROYED optimizations
952 	 * but can be reached on recursive deletions and restricted flushes.
953 	 *
954 	 * Because flushes are ordered we do not have to make a
955 	 * modify/duplicate of indirect blocks.  That is, the flush
956 	 * code does not have to kmalloc or duplicate anything.  We
957 	 * can adjust the indirect block table in-place and reuse the
958 	 * chain.  It IS possible that the chain has already been duplicated
959 	 * or may wind up being duplicated on-the-fly by modifying code
960 	 * on the frontend.  We simply use the original and ignore such
961 	 * chains.  However, it does mean we can't clear the MOVED bit.
962 	 *
963 	 * XXX recursive deletions not optimized.
964 	 */
965 	hammer2_chain_modify(trans, &parent,
966 			     HAMMER2_MODIFY_NO_MODIFY_TID |
967 			     HAMMER2_MODIFY_ASSERTNOCOPY);
968 
969 	switch(parent->bref.type) {
970 	case HAMMER2_BREF_TYPE_INODE:
971 		/*
972 		 * XXX Should assert that OPFLAG_DIRECTDATA is 0 once we
973 		 * properly duplicate the inode headers and do proper flush
974 		 * range checks (all the children should be beyond the flush
975 		 * point).  For now just don't sync the non-applicable
976 		 * children.
977 		 *
978 		 * XXX Can also occur due to hardlink consolidation.  We
979 		 * set OPFLAG_DIRECTDATA to prevent the indirect and data
980 		 * blocks from syncing ot the hardlink pointer.
981 		 */
982 #if 0
983 		KKASSERT((parent->data->ipdata.op_flags &
984 			  HAMMER2_OPFLAG_DIRECTDATA) == 0);
985 #endif
986 #if 0
987 		if (parent->data->ipdata.op_flags &
988 		    HAMMER2_OPFLAG_DIRECTDATA) {
989 			base = NULL;
990 		} else
991 #endif
992 		{
993 			base = &parent->data->ipdata.u.blockset.blockref[0];
994 			count = HAMMER2_SET_COUNT;
995 		}
996 		break;
997 	case HAMMER2_BREF_TYPE_INDIRECT:
998 		if (parent->data) {
999 			base = &parent->data->npdata.blockref[0];
1000 		} else {
1001 			base = NULL;
1002 			KKASSERT(child->flags & HAMMER2_CHAIN_DELETED);
1003 		}
1004 		count = parent->bytes / sizeof(hammer2_blockref_t);
1005 		break;
1006 	case HAMMER2_BREF_TYPE_VOLUME:
1007 		base = &hmp->voldata.sroot_blockset.blockref[0];
1008 		count = HAMMER2_SET_COUNT;
1009 		break;
1010 	default:
1011 		base = NULL;
1012 		count = 0;
1013 		panic("hammer2_chain_get: "
1014 		      "unrecognized blockref type: %d",
1015 		      parent->bref.type);
1016 	}
1017 
1018 	/*
1019 	 * Update the parent's blockref table and propagate mirror_tid.
1020 	 *
1021 	 * NOTE! Children with modify_tid's beyond our flush point are
1022 	 *	 considered to not exist for the purposes of updating the
1023 	 *	 parent's blockref array.
1024 	 *
1025 	 * NOTE! Updates to a parent's blockref table do not adjust the
1026 	 *	 parent's bref.modify_tid, only its bref.mirror_tid.
1027 	 */
1028 	KKASSERT(child->index >= 0);
1029 	if (child->delete_tid <= trans->sync_tid) {
1030 		if (base) {
1031 			KKASSERT(child->index < count);
1032 			bzero(&base[child->index], sizeof(child->bref));
1033 			if (info->mirror_tid < child->delete_tid)
1034 				info->mirror_tid = child->delete_tid;
1035 		}
1036 	} else {
1037 		if (base) {
1038 			KKASSERT(child->index < count);
1039 			base[child->index] = child->bref;
1040 			if (info->mirror_tid < child->modify_tid)
1041 				info->mirror_tid = child->modify_tid;
1042 		}
1043 	}
1044 
1045 	if (info->mirror_tid < child->bref.mirror_tid) {
1046 		info->mirror_tid = child->bref.mirror_tid;
1047 	}
1048 	if (parent->bref.type == HAMMER2_BREF_TYPE_VOLUME &&
1049 	    hmp->voldata.mirror_tid < child->bref.mirror_tid) {
1050 		hmp->voldata.mirror_tid = child->bref.mirror_tid;
1051 	}
1052 
1053 	/*
1054 	 * When can we safely clear the MOVED flag?  Flushes down duplicate
1055 	 * paths can occur out of order, for example if an inode is moved
1056 	 * as part of a hardlink consolidation or if an inode is moved into
1057 	 * an indirect block indexed before the inode.
1058 	 *
1059 	 * Only clear MOVED once all possible parents have been flushed.
1060 	 */
1061 	if (child->flags & HAMMER2_CHAIN_MOVED) {
1062 		hammer2_chain_t *scan;
1063 		int ok = 1;
1064 
1065 		spin_lock(&above->cst.spin);
1066 		for (scan = above->first_parent;
1067 		     scan;
1068 		     scan = scan->next_parent) {
1069 			/*
1070 			 * XXX weird code also checked at the top of scan2,
1071 			 *     I would like to fix this by detaching the core
1072 			 *     on initial hardlink consolidation (1->2 nlinks).
1073 			 */
1074 #if 0
1075 			if (scan->bref.type == HAMMER2_BREF_TYPE_INODE &&
1076 			    (scan->data->ipdata.op_flags &
1077 			     HAMMER2_OPFLAG_DIRECTDATA)) {
1078 				continue;
1079 			}
1080 #endif
1081 			if (scan->flags & HAMMER2_CHAIN_SUBMODIFIED) {
1082 				ok = 0;
1083 				break;
1084 			}
1085 		}
1086 		spin_unlock(&above->cst.spin);
1087 		if (ok) {
1088 			atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED);
1089 			hammer2_chain_drop(child);	/* flag */
1090 		}
1091 	}
1092 
1093 	/*
1094 	 * Unlock the child.  This can wind up dropping the child's
1095 	 * last ref, removing it from the parent's RB tree, and deallocating
1096 	 * the structure.  The RB_SCAN() our caller is doing handles the
1097 	 * situation.
1098 	 */
1099 	hammer2_chain_unlock(child);
1100 	hammer2_chain_drop(child);
1101 	spin_lock(&above->cst.spin);
1102 #if FLUSH_DEBUG
1103 	kprintf("F");
1104 #endif
1105 
1106 	/*
1107 	 * The parent cleared SUBMODIFIED prior to the scan.  If the child
1108 	 * still requires a flush (possibly due to being outside the current
1109 	 * synchronization zone), we must re-set SUBMODIFIED on the way back
1110 	 * up.
1111 	 */
1112 finalize:
1113 #if FLUSH_DEBUG
1114 	kprintf("G child %p 08x\n", child, child->flags);
1115 #endif
1116 	return (0);
1117 }
1118