xref: /dflybsd-src/sys/vfs/hammer/hammer_inode.c (revision 201c8c4447cad562e0a54ebbe0e7ee4e8a0be647)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41 
42 static int	hammer_unload_inode(struct hammer_inode *ip);
43 static void	hammer_free_inode(hammer_inode_t ip);
44 static void	hammer_flush_inode_core(hammer_inode_t ip,
45 					hammer_flush_group_t flg, int flags);
46 static int	hammer_setup_child_callback(hammer_record_t rec, void *data);
47 #if 0
48 static int	hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
49 #endif
50 static int	hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
51 					hammer_flush_group_t flg);
52 static int	hammer_setup_parent_inodes_helper(hammer_record_t record,
53 					int depth, hammer_flush_group_t flg);
54 static void	hammer_inode_wakereclaims(hammer_inode_t ip, int dowake);
55 
56 #ifdef DEBUG_TRUNCATE
57 extern struct hammer_inode *HammerTruncIp;
58 #endif
59 
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66 	if (ip1->obj_localization < ip2->obj_localization)
67 		return(-1);
68 	if (ip1->obj_localization > ip2->obj_localization)
69 		return(1);
70 	if (ip1->obj_id < ip2->obj_id)
71 		return(-1);
72 	if (ip1->obj_id > ip2->obj_id)
73 		return(1);
74 	if (ip1->obj_asof < ip2->obj_asof)
75 		return(-1);
76 	if (ip1->obj_asof > ip2->obj_asof)
77 		return(1);
78 	return(0);
79 }
80 
81 /*
82  * RB-Tree support for inode structures / special LOOKUP_INFO
83  */
84 static int
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
86 {
87 	if (info->obj_localization < ip->obj_localization)
88 		return(-1);
89 	if (info->obj_localization > ip->obj_localization)
90 		return(1);
91 	if (info->obj_id < ip->obj_id)
92 		return(-1);
93 	if (info->obj_id > ip->obj_id)
94 		return(1);
95 	if (info->obj_asof < ip->obj_asof)
96 		return(-1);
97 	if (info->obj_asof > ip->obj_asof)
98 		return(1);
99 	return(0);
100 }
101 
102 /*
103  * Used by hammer_scan_inode_snapshots() to locate all of an object's
104  * snapshots.  Note that the asof field is not tested, which we can get
105  * away with because it is the lowest-priority field.
106  */
107 static int
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
109 {
110 	hammer_inode_info_t info = data;
111 
112 	if (ip->obj_localization > info->obj_localization)
113 		return(1);
114 	if (ip->obj_localization < info->obj_localization)
115 		return(-1);
116 	if (ip->obj_id > info->obj_id)
117 		return(1);
118 	if (ip->obj_id < info->obj_id)
119 		return(-1);
120 	return(0);
121 }
122 
123 /*
124  * Used by hammer_unload_pseudofs() to locate all inodes associated with
125  * a particular PFS.
126  */
127 static int
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
129 {
130 	u_int32_t localization = *(u_int32_t *)data;
131 	if (ip->obj_localization > localization)
132 		return(1);
133 	if (ip->obj_localization < localization)
134 		return(-1);
135 	return(0);
136 }
137 
138 /*
139  * RB-Tree support for pseudofs structures
140  */
141 static int
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
143 {
144 	if (p1->localization < p2->localization)
145 		return(-1);
146 	if (p1->localization > p2->localization)
147 		return(1);
148 	return(0);
149 }
150 
151 
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154 		hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156              hammer_pfs_rb_compare, u_int32_t, localization);
157 
158 /*
159  * The kernel is not actively referencing this vnode but is still holding
160  * it cached.
161  *
162  * This is called from the frontend.
163  */
164 int
165 hammer_vop_inactive(struct vop_inactive_args *ap)
166 {
167 	struct hammer_inode *ip = VTOI(ap->a_vp);
168 
169 	/*
170 	 * Degenerate case
171 	 */
172 	if (ip == NULL) {
173 		vrecycle(ap->a_vp);
174 		return(0);
175 	}
176 
177 	/*
178 	 * If the inode no longer has visibility in the filesystem try to
179 	 * recycle it immediately, even if the inode is dirty.  Recycling
180 	 * it quickly allows the system to reclaim buffer cache and VM
181 	 * resources which can matter a lot in a heavily loaded system.
182 	 *
183 	 * This can deadlock in vfsync() if we aren't careful.
184 	 *
185 	 * Do not queue the inode to the flusher if we still have visibility,
186 	 * otherwise namespace calls such as chmod will unnecessarily generate
187 	 * multiple inode updates.
188 	 */
189 	hammer_inode_unloadable_check(ip, 0);
190 	if (ip->ino_data.nlinks == 0) {
191 		if (ip->flags & HAMMER_INODE_MODMASK)
192 			hammer_flush_inode(ip, 0);
193 		vrecycle(ap->a_vp);
194 	}
195 	return(0);
196 }
197 
198 /*
199  * Release the vnode association.  This is typically (but not always)
200  * the last reference on the inode.
201  *
202  * Once the association is lost we are on our own with regards to
203  * flushing the inode.
204  */
205 int
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
207 {
208 	struct hammer_inode *ip;
209 	hammer_mount_t hmp;
210 	struct vnode *vp;
211 
212 	vp = ap->a_vp;
213 
214 	if ((ip = vp->v_data) != NULL) {
215 		hmp = ip->hmp;
216 		vp->v_data = NULL;
217 		ip->vp = NULL;
218 
219 		if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220 			++hammer_count_reclaiming;
221 			++hmp->inode_reclaims;
222 			ip->flags |= HAMMER_INODE_RECLAIM;
223 		}
224 		hammer_rel_inode(ip, 1);
225 	}
226 	return(0);
227 }
228 
229 /*
230  * Return a locked vnode for the specified inode.  The inode must be
231  * referenced but NOT LOCKED on entry and will remain referenced on
232  * return.
233  *
234  * Called from the frontend.
235  */
236 int
237 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
238 {
239 	hammer_mount_t hmp;
240 	struct vnode *vp;
241 	int error = 0;
242 	u_int8_t obj_type;
243 
244 	hmp = ip->hmp;
245 
246 	for (;;) {
247 		if ((vp = ip->vp) == NULL) {
248 			error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
249 			if (error)
250 				break;
251 			hammer_lock_ex(&ip->lock);
252 			if (ip->vp != NULL) {
253 				hammer_unlock(&ip->lock);
254 				vp = *vpp;
255 				vp->v_type = VBAD;
256 				vx_put(vp);
257 				continue;
258 			}
259 			hammer_ref(&ip->lock);
260 			vp = *vpp;
261 			ip->vp = vp;
262 
263 			obj_type = ip->ino_data.obj_type;
264 			vp->v_type = hammer_get_vnode_type(obj_type);
265 
266 			hammer_inode_wakereclaims(ip, 0);
267 
268 			switch(ip->ino_data.obj_type) {
269 			case HAMMER_OBJTYPE_CDEV:
270 			case HAMMER_OBJTYPE_BDEV:
271 				vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
272 				addaliasu(vp, ip->ino_data.rmajor,
273 					  ip->ino_data.rminor);
274 				break;
275 			case HAMMER_OBJTYPE_FIFO:
276 				vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
277 				break;
278 			default:
279 				break;
280 			}
281 
282 			/*
283 			 * Only mark as the root vnode if the ip is not
284 			 * historical, otherwise the VFS cache will get
285 			 * confused.  The other half of the special handling
286 			 * is in hammer_vop_nlookupdotdot().
287 			 *
288 			 * Pseudo-filesystem roots can be accessed via
289 			 * non-root filesystem paths and setting VROOT may
290 			 * confuse the namecache.  Set VPFSROOT instead.
291 			 */
292 			if (ip->obj_id == HAMMER_OBJID_ROOT &&
293 			    ip->obj_asof == hmp->asof) {
294 				if (ip->obj_localization == 0)
295 					vp->v_flag |= VROOT;
296 				else
297 					vp->v_flag |= VPFSROOT;
298 			}
299 
300 			vp->v_data = (void *)ip;
301 			/* vnode locked by getnewvnode() */
302 			/* make related vnode dirty if inode dirty? */
303 			hammer_unlock(&ip->lock);
304 			if (vp->v_type == VREG)
305 				vinitvmio(vp, ip->ino_data.size);
306 			break;
307 		}
308 
309 		/*
310 		 * loop if the vget fails (aka races), or if the vp
311 		 * no longer matches ip->vp.
312 		 */
313 		if (vget(vp, LK_EXCLUSIVE) == 0) {
314 			if (vp == ip->vp)
315 				break;
316 			vput(vp);
317 		}
318 	}
319 	*vpp = vp;
320 	return(error);
321 }
322 
323 /*
324  * Locate all copies of the inode for obj_id compatible with the specified
325  * asof, reference, and issue the related call-back.  This routine is used
326  * for direct-io invalidation and does not create any new inodes.
327  */
328 void
329 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
330 		            int (*callback)(hammer_inode_t ip, void *data),
331 			    void *data)
332 {
333 	hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
334 				   hammer_inode_info_cmp_all_history,
335 				   callback, iinfo);
336 }
337 
338 /*
339  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
340  * do not attach or detach the related vnode (use hammer_get_vnode() for
341  * that).
342  *
343  * The flags argument is only applied for newly created inodes, and only
344  * certain flags are inherited.
345  *
346  * Called from the frontend.
347  */
348 struct hammer_inode *
349 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
350 		 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
351 		 int flags, int *errorp)
352 {
353 	hammer_mount_t hmp = trans->hmp;
354 	struct hammer_node_cache *cachep;
355 	struct hammer_inode_info iinfo;
356 	struct hammer_cursor cursor;
357 	struct hammer_inode *ip;
358 
359 
360 	/*
361 	 * Determine if we already have an inode cached.  If we do then
362 	 * we are golden.
363 	 *
364 	 * If we find an inode with no vnode we have to mark the
365 	 * transaction such that hammer_inode_waitreclaims() is
366 	 * called later on to avoid building up an infinite number
367 	 * of inodes.  Otherwise we can continue to * add new inodes
368 	 * faster then they can be disposed of, even with the tsleep
369 	 * delay.
370 	 *
371 	 * If we find a dummy inode we return a failure so dounlink
372 	 * (which does another lookup) doesn't try to mess with the
373 	 * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
374 	 * to ref dummy inodes.
375 	 */
376 	iinfo.obj_id = obj_id;
377 	iinfo.obj_asof = asof;
378 	iinfo.obj_localization = localization;
379 loop:
380 	ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
381 	if (ip) {
382 		if (ip->flags & HAMMER_INODE_DUMMY) {
383 			*errorp = ENOENT;
384 			return(NULL);
385 		}
386 		hammer_ref(&ip->lock);
387 		*errorp = 0;
388 		return(ip);
389 	}
390 
391 	/*
392 	 * Allocate a new inode structure and deal with races later.
393 	 */
394 	ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
395 	++hammer_count_inodes;
396 	++hmp->count_inodes;
397 	ip->obj_id = obj_id;
398 	ip->obj_asof = iinfo.obj_asof;
399 	ip->obj_localization = localization;
400 	ip->hmp = hmp;
401 	ip->flags = flags & HAMMER_INODE_RO;
402 	ip->cache[0].ip = ip;
403 	ip->cache[1].ip = ip;
404 	ip->cache[2].ip = ip;
405 	ip->cache[3].ip = ip;
406 	if (hmp->ronly)
407 		ip->flags |= HAMMER_INODE_RO;
408 	ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
409 		0x7FFFFFFFFFFFFFFFLL;
410 	RB_INIT(&ip->rec_tree);
411 	TAILQ_INIT(&ip->target_list);
412 	hammer_ref(&ip->lock);
413 
414 	/*
415 	 * Locate the on-disk inode.  If this is a PFS root we always
416 	 * access the current version of the root inode and (if it is not
417 	 * a master) always access information under it with a snapshot
418 	 * TID.
419 	 *
420 	 * We cache recent inode lookups in this directory in dip->cache[2].
421 	 * If we can't find it we assume the inode we are looking for is
422 	 * close to the directory inode.
423 	 */
424 retry:
425 	cachep = NULL;
426 	if (dip) {
427 		if (dip->cache[2].node)
428 			cachep = &dip->cache[2];
429 		else
430 			cachep = &dip->cache[0];
431 	}
432 	hammer_init_cursor(trans, &cursor, cachep, NULL);
433 	cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
434 	cursor.key_beg.obj_id = ip->obj_id;
435 	cursor.key_beg.key = 0;
436 	cursor.key_beg.create_tid = 0;
437 	cursor.key_beg.delete_tid = 0;
438 	cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
439 	cursor.key_beg.obj_type = 0;
440 
441 	cursor.asof = iinfo.obj_asof;
442 	cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
443 		       HAMMER_CURSOR_ASOF;
444 
445 	*errorp = hammer_btree_lookup(&cursor);
446 	if (*errorp == EDEADLK) {
447 		hammer_done_cursor(&cursor);
448 		goto retry;
449 	}
450 
451 	/*
452 	 * On success the B-Tree lookup will hold the appropriate
453 	 * buffer cache buffers and provide a pointer to the requested
454 	 * information.  Copy the information to the in-memory inode
455 	 * and cache the B-Tree node to improve future operations.
456 	 */
457 	if (*errorp == 0) {
458 		ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
459 		ip->ino_data = cursor.data->inode;
460 
461 		/*
462 		 * cache[0] tries to cache the location of the object inode.
463 		 * The assumption is that it is near the directory inode.
464 		 *
465 		 * cache[1] tries to cache the location of the object data.
466 		 * We might have something in the governing directory from
467 		 * scan optimizations (see the strategy code in
468 		 * hammer_vnops.c).
469 		 *
470 		 * We update dip->cache[2], if possible, with the location
471 		 * of the object inode for future directory shortcuts.
472 		 */
473 		hammer_cache_node(&ip->cache[0], cursor.node);
474 		if (dip) {
475 			if (dip->cache[3].node) {
476 				hammer_cache_node(&ip->cache[1],
477 						  dip->cache[3].node);
478 			}
479 			hammer_cache_node(&dip->cache[2], cursor.node);
480 		}
481 
482 		/*
483 		 * The file should not contain any data past the file size
484 		 * stored in the inode.  Setting save_trunc_off to the
485 		 * file size instead of max reduces B-Tree lookup overheads
486 		 * on append by allowing the flusher to avoid checking for
487 		 * record overwrites.
488 		 */
489 		ip->save_trunc_off = ip->ino_data.size;
490 
491 		/*
492 		 * Locate and assign the pseudofs management structure to
493 		 * the inode.
494 		 */
495 		if (dip && dip->obj_localization == ip->obj_localization) {
496 			ip->pfsm = dip->pfsm;
497 			hammer_ref(&ip->pfsm->lock);
498 		} else {
499 			ip->pfsm = hammer_load_pseudofs(trans,
500 							ip->obj_localization,
501 							errorp);
502 			*errorp = 0;	/* ignore ENOENT */
503 		}
504 	}
505 
506 	/*
507 	 * The inode is placed on the red-black tree and will be synced to
508 	 * the media when flushed or by the filesystem sync.  If this races
509 	 * another instantiation/lookup the insertion will fail.
510 	 */
511 	if (*errorp == 0) {
512 		if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
513 			hammer_free_inode(ip);
514 			hammer_done_cursor(&cursor);
515 			goto loop;
516 		}
517 		ip->flags |= HAMMER_INODE_ONDISK;
518 	} else {
519 		if (ip->flags & HAMMER_INODE_RSV_INODES) {
520 			ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
521 			--hmp->rsv_inodes;
522 		}
523 
524 		hammer_free_inode(ip);
525 		ip = NULL;
526 	}
527 	hammer_done_cursor(&cursor);
528 	trans->flags |= HAMMER_TRANSF_NEWINODE;
529 	return (ip);
530 }
531 
532 /*
533  * Get a dummy inode to placemark a broken directory entry.
534  */
535 struct hammer_inode *
536 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
537 		 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
538 		 int flags, int *errorp)
539 {
540 	hammer_mount_t hmp = trans->hmp;
541 	struct hammer_inode_info iinfo;
542 	struct hammer_inode *ip;
543 
544 	/*
545 	 * Determine if we already have an inode cached.  If we do then
546 	 * we are golden.
547 	 *
548 	 * If we find an inode with no vnode we have to mark the
549 	 * transaction such that hammer_inode_waitreclaims() is
550 	 * called later on to avoid building up an infinite number
551 	 * of inodes.  Otherwise we can continue to * add new inodes
552 	 * faster then they can be disposed of, even with the tsleep
553 	 * delay.
554 	 *
555 	 * If we find a non-fake inode we return an error.  Only fake
556 	 * inodes can be returned by this routine.
557 	 */
558 	iinfo.obj_id = obj_id;
559 	iinfo.obj_asof = asof;
560 	iinfo.obj_localization = localization;
561 loop:
562 	*errorp = 0;
563 	ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
564 	if (ip) {
565 		if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
566 			*errorp = ENOENT;
567 			return(NULL);
568 		}
569 		hammer_ref(&ip->lock);
570 		return(ip);
571 	}
572 
573 	/*
574 	 * Allocate a new inode structure and deal with races later.
575 	 */
576 	ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
577 	++hammer_count_inodes;
578 	++hmp->count_inodes;
579 	ip->obj_id = obj_id;
580 	ip->obj_asof = iinfo.obj_asof;
581 	ip->obj_localization = localization;
582 	ip->hmp = hmp;
583 	ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
584 	ip->cache[0].ip = ip;
585 	ip->cache[1].ip = ip;
586 	ip->cache[2].ip = ip;
587 	ip->cache[3].ip = ip;
588 	ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
589 		0x7FFFFFFFFFFFFFFFLL;
590 	RB_INIT(&ip->rec_tree);
591 	TAILQ_INIT(&ip->target_list);
592 	hammer_ref(&ip->lock);
593 
594 	/*
595 	 * Populate the dummy inode.  Leave everything zero'd out.
596 	 *
597 	 * (ip->ino_leaf and ip->ino_data)
598 	 *
599 	 * Make the dummy inode a FIFO object which most copy programs
600 	 * will properly ignore.
601 	 */
602 	ip->save_trunc_off = ip->ino_data.size;
603 	ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
604 
605 	/*
606 	 * Locate and assign the pseudofs management structure to
607 	 * the inode.
608 	 */
609 	if (dip && dip->obj_localization == ip->obj_localization) {
610 		ip->pfsm = dip->pfsm;
611 		hammer_ref(&ip->pfsm->lock);
612 	} else {
613 		ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
614 						errorp);
615 		*errorp = 0;	/* ignore ENOENT */
616 	}
617 
618 	/*
619 	 * The inode is placed on the red-black tree and will be synced to
620 	 * the media when flushed or by the filesystem sync.  If this races
621 	 * another instantiation/lookup the insertion will fail.
622 	 *
623 	 * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
624 	 */
625 	if (*errorp == 0) {
626 		if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
627 			hammer_free_inode(ip);
628 			goto loop;
629 		}
630 	} else {
631 		if (ip->flags & HAMMER_INODE_RSV_INODES) {
632 			ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
633 			--hmp->rsv_inodes;
634 		}
635 		hammer_free_inode(ip);
636 		ip = NULL;
637 	}
638 	trans->flags |= HAMMER_TRANSF_NEWINODE;
639 	return (ip);
640 }
641 
642 /*
643  * Return a referenced inode only if it is in our inode cache.
644  *
645  * Dummy inodes do not count.
646  */
647 struct hammer_inode *
648 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
649 		  hammer_tid_t asof, u_int32_t localization)
650 {
651 	hammer_mount_t hmp = trans->hmp;
652 	struct hammer_inode_info iinfo;
653 	struct hammer_inode *ip;
654 
655 	iinfo.obj_id = obj_id;
656 	iinfo.obj_asof = asof;
657 	iinfo.obj_localization = localization;
658 
659 	ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
660 	if (ip) {
661 		if (ip->flags & HAMMER_INODE_DUMMY)
662 			ip = NULL;
663 		else
664 			hammer_ref(&ip->lock);
665 	}
666 	return(ip);
667 }
668 
669 /*
670  * Create a new filesystem object, returning the inode in *ipp.  The
671  * returned inode will be referenced.  The inode is created in-memory.
672  *
673  * If pfsm is non-NULL the caller wishes to create the root inode for
674  * a master PFS.
675  */
676 int
677 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
678 		    struct ucred *cred,
679 		    hammer_inode_t dip, const char *name, int namelen,
680 		    hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
681 {
682 	hammer_mount_t hmp;
683 	hammer_inode_t ip;
684 	uid_t xuid;
685 	int error;
686 	int64_t namekey;
687 	u_int32_t dummy;
688 
689 	hmp = trans->hmp;
690 
691 	ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
692 	++hammer_count_inodes;
693 	++hmp->count_inodes;
694 	trans->flags |= HAMMER_TRANSF_NEWINODE;
695 
696 	if (pfsm) {
697 		KKASSERT(pfsm->localization != 0);
698 		ip->obj_id = HAMMER_OBJID_ROOT;
699 		ip->obj_localization = pfsm->localization;
700 	} else {
701 		KKASSERT(dip != NULL);
702 		namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
703 		ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
704 		ip->obj_localization = dip->obj_localization;
705 	}
706 
707 	KKASSERT(ip->obj_id != 0);
708 	ip->obj_asof = hmp->asof;
709 	ip->hmp = hmp;
710 	ip->flush_state = HAMMER_FST_IDLE;
711 	ip->flags = HAMMER_INODE_DDIRTY |
712 		    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
713 	ip->cache[0].ip = ip;
714 	ip->cache[1].ip = ip;
715 	ip->cache[2].ip = ip;
716 	ip->cache[3].ip = ip;
717 
718 	ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
719 	/* ip->save_trunc_off = 0; (already zero) */
720 	RB_INIT(&ip->rec_tree);
721 	TAILQ_INIT(&ip->target_list);
722 
723 	ip->ino_data.atime = trans->time;
724 	ip->ino_data.mtime = trans->time;
725 	ip->ino_data.size = 0;
726 	ip->ino_data.nlinks = 0;
727 
728 	/*
729 	 * A nohistory designator on the parent directory is inherited by
730 	 * the child.  We will do this even for pseudo-fs creation... the
731 	 * sysad can turn it off.
732 	 */
733 	if (dip) {
734 		ip->ino_data.uflags = dip->ino_data.uflags &
735 				      (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
736 	}
737 
738 	ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
739 	ip->ino_leaf.base.localization = ip->obj_localization +
740 					 HAMMER_LOCALIZE_INODE;
741 	ip->ino_leaf.base.obj_id = ip->obj_id;
742 	ip->ino_leaf.base.key = 0;
743 	ip->ino_leaf.base.create_tid = 0;
744 	ip->ino_leaf.base.delete_tid = 0;
745 	ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
746 	ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
747 
748 	ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
749 	ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
750 	ip->ino_data.mode = vap->va_mode;
751 	ip->ino_data.ctime = trans->time;
752 
753 	/*
754 	 * If we are running version 2 or greater directory entries are
755 	 * inode-localized instead of data-localized.
756 	 */
757 	if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
758 		if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
759 			ip->ino_data.cap_flags |=
760 				HAMMER_INODE_CAP_DIR_LOCAL_INO;
761 		}
762 	}
763 
764 	/*
765 	 * Setup the ".." pointer.  This only needs to be done for directories
766 	 * but we do it for all objects as a recovery aid.
767 	 */
768 	if (dip)
769 		ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
770 #if 0
771 	/*
772 	 * The parent_obj_localization field only applies to pseudo-fs roots.
773 	 * XXX this is no longer applicable, PFSs are no longer directly
774 	 * tied into the parent's directory structure.
775 	 */
776 	if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
777 	    ip->obj_id == HAMMER_OBJID_ROOT) {
778 		ip->ino_data.ext.obj.parent_obj_localization =
779 						dip->obj_localization;
780 	}
781 #endif
782 
783 	switch(ip->ino_leaf.base.obj_type) {
784 	case HAMMER_OBJTYPE_CDEV:
785 	case HAMMER_OBJTYPE_BDEV:
786 		ip->ino_data.rmajor = vap->va_rmajor;
787 		ip->ino_data.rminor = vap->va_rminor;
788 		break;
789 	default:
790 		break;
791 	}
792 
793 	/*
794 	 * Calculate default uid/gid and overwrite with information from
795 	 * the vap.
796 	 */
797 	if (dip) {
798 		xuid = hammer_to_unix_xid(&dip->ino_data.uid);
799 		xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
800 					     xuid, cred, &vap->va_mode);
801 	} else {
802 		xuid = 0;
803 	}
804 	ip->ino_data.mode = vap->va_mode;
805 
806 	if (vap->va_vaflags & VA_UID_UUID_VALID)
807 		ip->ino_data.uid = vap->va_uid_uuid;
808 	else if (vap->va_uid != (uid_t)VNOVAL)
809 		hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
810 	else
811 		hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
812 
813 	if (vap->va_vaflags & VA_GID_UUID_VALID)
814 		ip->ino_data.gid = vap->va_gid_uuid;
815 	else if (vap->va_gid != (gid_t)VNOVAL)
816 		hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
817 	else if (dip)
818 		ip->ino_data.gid = dip->ino_data.gid;
819 
820 	hammer_ref(&ip->lock);
821 
822 	if (pfsm) {
823 		ip->pfsm = pfsm;
824 		hammer_ref(&pfsm->lock);
825 		error = 0;
826 	} else if (dip->obj_localization == ip->obj_localization) {
827 		ip->pfsm = dip->pfsm;
828 		hammer_ref(&ip->pfsm->lock);
829 		error = 0;
830 	} else {
831 		ip->pfsm = hammer_load_pseudofs(trans,
832 						ip->obj_localization,
833 						&error);
834 		error = 0;	/* ignore ENOENT */
835 	}
836 
837 	if (error) {
838 		hammer_free_inode(ip);
839 		ip = NULL;
840 	} else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
841 		panic("hammer_create_inode: duplicate obj_id %llx",
842 		      (long long)ip->obj_id);
843 		/* not reached */
844 		hammer_free_inode(ip);
845 	}
846 	*ipp = ip;
847 	return(error);
848 }
849 
850 /*
851  * Final cleanup / freeing of an inode structure
852  */
853 static void
854 hammer_free_inode(hammer_inode_t ip)
855 {
856 	struct hammer_mount *hmp;
857 
858 	hmp = ip->hmp;
859 	KKASSERT(ip->lock.refs == 1);
860 	hammer_uncache_node(&ip->cache[0]);
861 	hammer_uncache_node(&ip->cache[1]);
862 	hammer_uncache_node(&ip->cache[2]);
863 	hammer_uncache_node(&ip->cache[3]);
864 	hammer_inode_wakereclaims(ip, 1);
865 	if (ip->objid_cache)
866 		hammer_clear_objid(ip);
867 	--hammer_count_inodes;
868 	--hmp->count_inodes;
869 	if (ip->pfsm) {
870 		hammer_rel_pseudofs(hmp, ip->pfsm);
871 		ip->pfsm = NULL;
872 	}
873 	kfree(ip, hmp->m_inodes);
874 	ip = NULL;
875 }
876 
877 /*
878  * Retrieve pseudo-fs data.  NULL will never be returned.
879  *
880  * If an error occurs *errorp will be set and a default template is returned,
881  * otherwise *errorp is set to 0.  Typically when an error occurs it will
882  * be ENOENT.
883  */
884 hammer_pseudofs_inmem_t
885 hammer_load_pseudofs(hammer_transaction_t trans,
886 		     u_int32_t localization, int *errorp)
887 {
888 	hammer_mount_t hmp = trans->hmp;
889 	hammer_inode_t ip;
890 	hammer_pseudofs_inmem_t pfsm;
891 	struct hammer_cursor cursor;
892 	int bytes;
893 
894 retry:
895 	pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
896 	if (pfsm) {
897 		hammer_ref(&pfsm->lock);
898 		*errorp = 0;
899 		return(pfsm);
900 	}
901 
902 	/*
903 	 * PFS records are stored in the root inode (not the PFS root inode,
904 	 * but the real root).  Avoid an infinite recursion if loading
905 	 * the PFS for the real root.
906 	 */
907 	if (localization) {
908 		ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
909 				      HAMMER_MAX_TID,
910 				      HAMMER_DEF_LOCALIZATION, 0, errorp);
911 	} else {
912 		ip = NULL;
913 	}
914 
915 	pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
916 	pfsm->localization = localization;
917 	pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
918 	pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
919 
920 	hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
921 	cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
922 				      HAMMER_LOCALIZE_MISC;
923 	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
924 	cursor.key_beg.create_tid = 0;
925 	cursor.key_beg.delete_tid = 0;
926 	cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
927 	cursor.key_beg.obj_type = 0;
928 	cursor.key_beg.key = localization;
929 	cursor.asof = HAMMER_MAX_TID;
930 	cursor.flags |= HAMMER_CURSOR_ASOF;
931 
932 	if (ip)
933 		*errorp = hammer_ip_lookup(&cursor);
934 	else
935 		*errorp = hammer_btree_lookup(&cursor);
936 	if (*errorp == 0) {
937 		*errorp = hammer_ip_resolve_data(&cursor);
938 		if (*errorp == 0) {
939 			if (cursor.data->pfsd.mirror_flags &
940 			    HAMMER_PFSD_DELETED) {
941 				*errorp = ENOENT;
942 			} else {
943 				bytes = cursor.leaf->data_len;
944 				if (bytes > sizeof(pfsm->pfsd))
945 					bytes = sizeof(pfsm->pfsd);
946 				bcopy(cursor.data, &pfsm->pfsd, bytes);
947 			}
948 		}
949 	}
950 	hammer_done_cursor(&cursor);
951 
952 	pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
953 	hammer_ref(&pfsm->lock);
954 	if (ip)
955 		hammer_rel_inode(ip, 0);
956 	if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
957 		kfree(pfsm, hmp->m_misc);
958 		goto retry;
959 	}
960 	return(pfsm);
961 }
962 
963 /*
964  * Store pseudo-fs data.  The backend will automatically delete any prior
965  * on-disk pseudo-fs data but we have to delete in-memory versions.
966  */
967 int
968 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
969 {
970 	struct hammer_cursor cursor;
971 	hammer_record_t record;
972 	hammer_inode_t ip;
973 	int error;
974 
975 	ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
976 			      HAMMER_DEF_LOCALIZATION, 0, &error);
977 retry:
978 	pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
979 	hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
980 	cursor.key_beg.localization = ip->obj_localization +
981 				      HAMMER_LOCALIZE_MISC;
982 	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
983 	cursor.key_beg.create_tid = 0;
984 	cursor.key_beg.delete_tid = 0;
985 	cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
986 	cursor.key_beg.obj_type = 0;
987 	cursor.key_beg.key = pfsm->localization;
988 	cursor.asof = HAMMER_MAX_TID;
989 	cursor.flags |= HAMMER_CURSOR_ASOF;
990 
991 	/*
992 	 * Replace any in-memory version of the record.
993 	 */
994 	error = hammer_ip_lookup(&cursor);
995 	if (error == 0 && hammer_cursor_inmem(&cursor)) {
996 		record = cursor.iprec;
997 		if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
998 			KKASSERT(cursor.deadlk_rec == NULL);
999 			hammer_ref(&record->lock);
1000 			cursor.deadlk_rec = record;
1001 			error = EDEADLK;
1002 		} else {
1003 			record->flags |= HAMMER_RECF_DELETED_FE;
1004 			error = 0;
1005 		}
1006 	}
1007 
1008 	/*
1009 	 * Allocate replacement general record.  The backend flush will
1010 	 * delete any on-disk version of the record.
1011 	 */
1012 	if (error == 0 || error == ENOENT) {
1013 		record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1014 		record->type = HAMMER_MEM_RECORD_GENERAL;
1015 
1016 		record->leaf.base.localization = ip->obj_localization +
1017 						 HAMMER_LOCALIZE_MISC;
1018 		record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1019 		record->leaf.base.key = pfsm->localization;
1020 		record->leaf.data_len = sizeof(pfsm->pfsd);
1021 		bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1022 		error = hammer_ip_add_record(trans, record);
1023 	}
1024 	hammer_done_cursor(&cursor);
1025 	if (error == EDEADLK)
1026 		goto retry;
1027 	hammer_rel_inode(ip, 0);
1028 	return(error);
1029 }
1030 
1031 /*
1032  * Create a root directory for a PFS if one does not alredy exist.
1033  *
1034  * The PFS root stands alone so we must also bump the nlinks count
1035  * to prevent it from being destroyed on release.
1036  */
1037 int
1038 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1039 		       hammer_pseudofs_inmem_t pfsm)
1040 {
1041 	hammer_inode_t ip;
1042 	struct vattr vap;
1043 	int error;
1044 
1045 	ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1046 			      pfsm->localization, 0, &error);
1047 	if (ip == NULL) {
1048 		vattr_null(&vap);
1049 		vap.va_mode = 0755;
1050 		vap.va_type = VDIR;
1051 		error = hammer_create_inode(trans, &vap, cred,
1052 					    NULL, NULL, 0,
1053 					    pfsm, &ip);
1054 		if (error == 0) {
1055 			++ip->ino_data.nlinks;
1056 			hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1057 		}
1058 	}
1059 	if (ip)
1060 		hammer_rel_inode(ip, 0);
1061 	return(error);
1062 }
1063 
1064 /*
1065  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1066  * if we are unable to disassociate all the inodes.
1067  */
1068 static
1069 int
1070 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1071 {
1072 	int res;
1073 
1074 	hammer_ref(&ip->lock);
1075 	if (ip->lock.refs == 2 && ip->vp)
1076 		vclean_unlocked(ip->vp);
1077 	if (ip->lock.refs == 1 && ip->vp == NULL)
1078 		res = 0;
1079 	else
1080 		res = -1;	/* stop, someone is using the inode */
1081 	hammer_rel_inode(ip, 0);
1082 	return(res);
1083 }
1084 
1085 int
1086 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1087 {
1088 	int res;
1089 	int try;
1090 
1091 	for (try = res = 0; try < 4; ++try) {
1092 		res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1093 					   hammer_inode_pfs_cmp,
1094 					   hammer_unload_pseudofs_callback,
1095 					   &localization);
1096 		if (res == 0 && try > 1)
1097 			break;
1098 		hammer_flusher_sync(trans->hmp);
1099 	}
1100 	if (res != 0)
1101 		res = ENOTEMPTY;
1102 	return(res);
1103 }
1104 
1105 
1106 /*
1107  * Release a reference on a PFS
1108  */
1109 void
1110 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1111 {
1112 	hammer_unref(&pfsm->lock);
1113 	if (pfsm->lock.refs == 0) {
1114 		RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1115 		kfree(pfsm, hmp->m_misc);
1116 	}
1117 }
1118 
1119 /*
1120  * Called by hammer_sync_inode().
1121  */
1122 static int
1123 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1124 {
1125 	hammer_transaction_t trans = cursor->trans;
1126 	hammer_record_t record;
1127 	int error;
1128 	int redirty;
1129 
1130 retry:
1131 	error = 0;
1132 
1133 	/*
1134 	 * If the inode has a presence on-disk then locate it and mark
1135 	 * it deleted, setting DELONDISK.
1136 	 *
1137 	 * The record may or may not be physically deleted, depending on
1138 	 * the retention policy.
1139 	 */
1140 	if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1141 	    HAMMER_INODE_ONDISK) {
1142 		hammer_normalize_cursor(cursor);
1143 		cursor->key_beg.localization = ip->obj_localization +
1144 					       HAMMER_LOCALIZE_INODE;
1145 		cursor->key_beg.obj_id = ip->obj_id;
1146 		cursor->key_beg.key = 0;
1147 		cursor->key_beg.create_tid = 0;
1148 		cursor->key_beg.delete_tid = 0;
1149 		cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1150 		cursor->key_beg.obj_type = 0;
1151 		cursor->asof = ip->obj_asof;
1152 		cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1153 		cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1154 		cursor->flags |= HAMMER_CURSOR_BACKEND;
1155 
1156 		error = hammer_btree_lookup(cursor);
1157 		if (hammer_debug_inode)
1158 			kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1159 
1160 		if (error == 0) {
1161 			error = hammer_ip_delete_record(cursor, ip, trans->tid);
1162 			if (hammer_debug_inode)
1163 				kprintf(" error %d\n", error);
1164 			if (error == 0) {
1165 				ip->flags |= HAMMER_INODE_DELONDISK;
1166 			}
1167 			if (cursor->node)
1168 				hammer_cache_node(&ip->cache[0], cursor->node);
1169 		}
1170 		if (error == EDEADLK) {
1171 			hammer_done_cursor(cursor);
1172 			error = hammer_init_cursor(trans, cursor,
1173 						   &ip->cache[0], ip);
1174 			if (hammer_debug_inode)
1175 				kprintf("IPDED %p %d\n", ip, error);
1176 			if (error == 0)
1177 				goto retry;
1178 		}
1179 	}
1180 
1181 	/*
1182 	 * Ok, write out the initial record or a new record (after deleting
1183 	 * the old one), unless the DELETED flag is set.  This routine will
1184 	 * clear DELONDISK if it writes out a record.
1185 	 *
1186 	 * Update our inode statistics if this is the first application of
1187 	 * the inode on-disk.
1188 	 */
1189 	if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1190 		/*
1191 		 * Generate a record and write it to the media.  We clean-up
1192 		 * the state before releasing so we do not have to set-up
1193 		 * a flush_group.
1194 		 */
1195 		record = hammer_alloc_mem_record(ip, 0);
1196 		record->type = HAMMER_MEM_RECORD_INODE;
1197 		record->flush_state = HAMMER_FST_FLUSH;
1198 		record->leaf = ip->sync_ino_leaf;
1199 		record->leaf.base.create_tid = trans->tid;
1200 		record->leaf.data_len = sizeof(ip->sync_ino_data);
1201 		record->leaf.create_ts = trans->time32;
1202 		record->data = (void *)&ip->sync_ino_data;
1203 		record->flags |= HAMMER_RECF_INTERLOCK_BE;
1204 
1205 		/*
1206 		 * If this flag is set we cannot sync the new file size
1207 		 * because we haven't finished related truncations.  The
1208 		 * inode will be flushed in another flush group to finish
1209 		 * the job.
1210 		 */
1211 		if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1212 		    ip->sync_ino_data.size != ip->ino_data.size) {
1213 			redirty = 1;
1214 			ip->sync_ino_data.size = ip->ino_data.size;
1215 		} else {
1216 			redirty = 0;
1217 		}
1218 
1219 		for (;;) {
1220 			error = hammer_ip_sync_record_cursor(cursor, record);
1221 			if (hammer_debug_inode)
1222 				kprintf("GENREC %p rec %08x %d\n",
1223 					ip, record->flags, error);
1224 			if (error != EDEADLK)
1225 				break;
1226 			hammer_done_cursor(cursor);
1227 			error = hammer_init_cursor(trans, cursor,
1228 						   &ip->cache[0], ip);
1229 			if (hammer_debug_inode)
1230 				kprintf("GENREC reinit %d\n", error);
1231 			if (error)
1232 				break;
1233 		}
1234 
1235 		/*
1236 		 * Note:  The record was never on the inode's record tree
1237 		 * so just wave our hands importantly and destroy it.
1238 		 */
1239 		record->flags |= HAMMER_RECF_COMMITTED;
1240 		record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1241 		record->flush_state = HAMMER_FST_IDLE;
1242 		++ip->rec_generation;
1243 		hammer_rel_mem_record(record);
1244 
1245 		/*
1246 		 * Finish up.
1247 		 */
1248 		if (error == 0) {
1249 			if (hammer_debug_inode)
1250 				kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1251 			ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1252 					    HAMMER_INODE_ATIME |
1253 					    HAMMER_INODE_MTIME);
1254 			ip->flags &= ~HAMMER_INODE_DELONDISK;
1255 			if (redirty)
1256 				ip->sync_flags |= HAMMER_INODE_DDIRTY;
1257 
1258 			/*
1259 			 * Root volume count of inodes
1260 			 */
1261 			hammer_sync_lock_sh(trans);
1262 			if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1263 				hammer_modify_volume_field(trans,
1264 							   trans->rootvol,
1265 							   vol0_stat_inodes);
1266 				++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1267 				hammer_modify_volume_done(trans->rootvol);
1268 				ip->flags |= HAMMER_INODE_ONDISK;
1269 				if (hammer_debug_inode)
1270 					kprintf("NOWONDISK %p\n", ip);
1271 			}
1272 			hammer_sync_unlock(trans);
1273 		}
1274 	}
1275 
1276 	/*
1277 	 * If the inode has been destroyed, clean out any left-over flags
1278 	 * that may have been set by the frontend.
1279 	 */
1280 	if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1281 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1282 				    HAMMER_INODE_ATIME |
1283 				    HAMMER_INODE_MTIME);
1284 	}
1285 	return(error);
1286 }
1287 
1288 /*
1289  * Update only the itimes fields.
1290  *
1291  * ATIME can be updated without generating any UNDO.  MTIME is updated
1292  * with UNDO so it is guaranteed to be synchronized properly in case of
1293  * a crash.
1294  *
1295  * Neither field is included in the B-Tree leaf element's CRC, which is how
1296  * we can get away with updating ATIME the way we do.
1297  */
1298 static int
1299 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1300 {
1301 	hammer_transaction_t trans = cursor->trans;
1302 	int error;
1303 
1304 retry:
1305 	if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1306 	    HAMMER_INODE_ONDISK) {
1307 		return(0);
1308 	}
1309 
1310 	hammer_normalize_cursor(cursor);
1311 	cursor->key_beg.localization = ip->obj_localization +
1312 				       HAMMER_LOCALIZE_INODE;
1313 	cursor->key_beg.obj_id = ip->obj_id;
1314 	cursor->key_beg.key = 0;
1315 	cursor->key_beg.create_tid = 0;
1316 	cursor->key_beg.delete_tid = 0;
1317 	cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1318 	cursor->key_beg.obj_type = 0;
1319 	cursor->asof = ip->obj_asof;
1320 	cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1321 	cursor->flags |= HAMMER_CURSOR_ASOF;
1322 	cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1323 	cursor->flags |= HAMMER_CURSOR_GET_DATA;
1324 	cursor->flags |= HAMMER_CURSOR_BACKEND;
1325 
1326 	error = hammer_btree_lookup(cursor);
1327 	if (error == 0) {
1328 		hammer_cache_node(&ip->cache[0], cursor->node);
1329 		if (ip->sync_flags & HAMMER_INODE_MTIME) {
1330 			/*
1331 			 * Updating MTIME requires an UNDO.  Just cover
1332 			 * both atime and mtime.
1333 			 */
1334 			hammer_sync_lock_sh(trans);
1335 			hammer_modify_buffer(trans, cursor->data_buffer,
1336 				     HAMMER_ITIMES_BASE(&cursor->data->inode),
1337 				     HAMMER_ITIMES_BYTES);
1338 			cursor->data->inode.atime = ip->sync_ino_data.atime;
1339 			cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1340 			hammer_modify_buffer_done(cursor->data_buffer);
1341 			hammer_sync_unlock(trans);
1342 		} else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1343 			/*
1344 			 * Updating atime only can be done in-place with
1345 			 * no UNDO.
1346 			 */
1347 			hammer_sync_lock_sh(trans);
1348 			hammer_modify_buffer(trans, cursor->data_buffer,
1349 					     NULL, 0);
1350 			cursor->data->inode.atime = ip->sync_ino_data.atime;
1351 			hammer_modify_buffer_done(cursor->data_buffer);
1352 			hammer_sync_unlock(trans);
1353 		}
1354 		ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1355 	}
1356 	if (error == EDEADLK) {
1357 		hammer_done_cursor(cursor);
1358 		error = hammer_init_cursor(trans, cursor,
1359 					   &ip->cache[0], ip);
1360 		if (error == 0)
1361 			goto retry;
1362 	}
1363 	return(error);
1364 }
1365 
1366 /*
1367  * Release a reference on an inode, flush as requested.
1368  *
1369  * On the last reference we queue the inode to the flusher for its final
1370  * disposition.
1371  */
1372 void
1373 hammer_rel_inode(struct hammer_inode *ip, int flush)
1374 {
1375 	/*hammer_mount_t hmp = ip->hmp;*/
1376 
1377 	/*
1378 	 * Handle disposition when dropping the last ref.
1379 	 */
1380 	for (;;) {
1381 		if (ip->lock.refs == 1) {
1382 			/*
1383 			 * Determine whether on-disk action is needed for
1384 			 * the inode's final disposition.
1385 			 */
1386 			KKASSERT(ip->vp == NULL);
1387 			hammer_inode_unloadable_check(ip, 0);
1388 			if (ip->flags & HAMMER_INODE_MODMASK) {
1389 				hammer_flush_inode(ip, 0);
1390 			} else if (ip->lock.refs == 1) {
1391 				hammer_unload_inode(ip);
1392 				break;
1393 			}
1394 		} else {
1395 			if (flush)
1396 				hammer_flush_inode(ip, 0);
1397 
1398 			/*
1399 			 * The inode still has multiple refs, try to drop
1400 			 * one ref.
1401 			 */
1402 			KKASSERT(ip->lock.refs >= 1);
1403 			if (ip->lock.refs > 1) {
1404 				hammer_unref(&ip->lock);
1405 				break;
1406 			}
1407 		}
1408 	}
1409 }
1410 
1411 /*
1412  * Unload and destroy the specified inode.  Must be called with one remaining
1413  * reference.  The reference is disposed of.
1414  *
1415  * The inode must be completely clean.
1416  */
1417 static int
1418 hammer_unload_inode(struct hammer_inode *ip)
1419 {
1420 	hammer_mount_t hmp = ip->hmp;
1421 
1422 	KASSERT(ip->lock.refs == 1,
1423 		("hammer_unload_inode: %d refs\n", ip->lock.refs));
1424 	KKASSERT(ip->vp == NULL);
1425 	KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1426 	KKASSERT(ip->cursor_ip_refs == 0);
1427 	KKASSERT(ip->lock.lockcount == 0);
1428 	KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1429 
1430 	KKASSERT(RB_EMPTY(&ip->rec_tree));
1431 	KKASSERT(TAILQ_EMPTY(&ip->target_list));
1432 
1433 	RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1434 
1435 	hammer_free_inode(ip);
1436 	return(0);
1437 }
1438 
1439 /*
1440  * Called during unmounting if a critical error occured.  The in-memory
1441  * inode and all related structures are destroyed.
1442  *
1443  * If a critical error did not occur the unmount code calls the standard
1444  * release and asserts that the inode is gone.
1445  */
1446 int
1447 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1448 {
1449 	hammer_record_t rec;
1450 
1451 	/*
1452 	 * Get rid of the inodes in-memory records, regardless of their
1453 	 * state, and clear the mod-mask.
1454 	 */
1455 	while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1456 		TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1457 		rec->target_ip = NULL;
1458 		if (rec->flush_state == HAMMER_FST_SETUP)
1459 			rec->flush_state = HAMMER_FST_IDLE;
1460 	}
1461 	while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1462 		if (rec->flush_state == HAMMER_FST_FLUSH)
1463 			--rec->flush_group->refs;
1464 		else
1465 			hammer_ref(&rec->lock);
1466 		KKASSERT(rec->lock.refs == 1);
1467 		rec->flush_state = HAMMER_FST_IDLE;
1468 		rec->flush_group = NULL;
1469 		rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1470 		rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1471 		++ip->rec_generation;
1472 		hammer_rel_mem_record(rec);
1473 	}
1474 	ip->flags &= ~HAMMER_INODE_MODMASK;
1475 	ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1476 	KKASSERT(ip->vp == NULL);
1477 
1478 	/*
1479 	 * Remove the inode from any flush group, force it idle.  FLUSH
1480 	 * and SETUP states have an inode ref.
1481 	 */
1482 	switch(ip->flush_state) {
1483 	case HAMMER_FST_FLUSH:
1484 		TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1485 		--ip->flush_group->refs;
1486 		ip->flush_group = NULL;
1487 		/* fall through */
1488 	case HAMMER_FST_SETUP:
1489 		hammer_unref(&ip->lock);
1490 		ip->flush_state = HAMMER_FST_IDLE;
1491 		/* fall through */
1492 	case HAMMER_FST_IDLE:
1493 		break;
1494 	}
1495 
1496 	/*
1497 	 * There shouldn't be any associated vnode.  The unload needs at
1498 	 * least one ref, if we do have a vp steal its ip ref.
1499 	 */
1500 	if (ip->vp) {
1501 		kprintf("hammer_destroy_inode_callback: Unexpected "
1502 			"vnode association ip %p vp %p\n", ip, ip->vp);
1503 		ip->vp->v_data = NULL;
1504 		ip->vp = NULL;
1505 	} else {
1506 		hammer_ref(&ip->lock);
1507 	}
1508 	hammer_unload_inode(ip);
1509 	return(0);
1510 }
1511 
1512 /*
1513  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1514  * the read-only flag for cached inodes.
1515  *
1516  * This routine is called from a RB_SCAN().
1517  */
1518 int
1519 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1520 {
1521 	hammer_mount_t hmp = ip->hmp;
1522 
1523 	if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1524 		ip->flags |= HAMMER_INODE_RO;
1525 	else
1526 		ip->flags &= ~HAMMER_INODE_RO;
1527 	return(0);
1528 }
1529 
1530 /*
1531  * A transaction has modified an inode, requiring updates as specified by
1532  * the passed flags.
1533  *
1534  * HAMMER_INODE_DDIRTY: Inode data has been updated
1535  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1536  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1537  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1538  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1539  */
1540 void
1541 hammer_modify_inode(hammer_inode_t ip, int flags)
1542 {
1543 	/*
1544 	 * ronly of 0 or 2 does not trigger assertion.
1545 	 * 2 is a special error state
1546 	 */
1547 	KKASSERT(ip->hmp->ronly != 1 ||
1548 		  (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1549 			    HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1550 			    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1551 	if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1552 		ip->flags |= HAMMER_INODE_RSV_INODES;
1553 		++ip->hmp->rsv_inodes;
1554 	}
1555 
1556 	ip->flags |= flags;
1557 }
1558 
1559 /*
1560  * Request that an inode be flushed.  This whole mess cannot block and may
1561  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1562  * actively flush the inode until the flush can be done.
1563  *
1564  * The inode may already be flushing, or may be in a setup state.  We can
1565  * place the inode in a flushing state if it is currently idle and flag it
1566  * to reflush if it is currently flushing.
1567  *
1568  * Upon return if the inode could not be flushed due to a setup
1569  * dependancy, then it will be automatically flushed when the dependancy
1570  * is satisfied.
1571  */
1572 void
1573 hammer_flush_inode(hammer_inode_t ip, int flags)
1574 {
1575 	hammer_mount_t hmp;
1576 	hammer_flush_group_t flg;
1577 	int good;
1578 
1579 	/*
1580 	 * next_flush_group is the first flush group we can place the inode
1581 	 * in.  It may be NULL.  If it becomes full we append a new flush
1582 	 * group and make that the next_flush_group.
1583 	 */
1584 	hmp = ip->hmp;
1585 	while ((flg = hmp->next_flush_group) != NULL) {
1586 		KKASSERT(flg->running == 0);
1587 		if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1588 			break;
1589 		hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1590 		hammer_flusher_async(ip->hmp, flg);
1591 	}
1592 	if (flg == NULL) {
1593 		flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1594 		hmp->next_flush_group = flg;
1595 		TAILQ_INIT(&flg->flush_list);
1596 		TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1597 	}
1598 
1599 	/*
1600 	 * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1601 	 * state we have to put it back into an IDLE state so we can
1602 	 * drop the extra ref.
1603 	 *
1604 	 * If we have a parent dependancy we must still fall through
1605 	 * so we can run it.
1606 	 */
1607 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1608 		if (ip->flush_state == HAMMER_FST_SETUP &&
1609 		    TAILQ_EMPTY(&ip->target_list)) {
1610 			ip->flush_state = HAMMER_FST_IDLE;
1611 			hammer_rel_inode(ip, 0);
1612 		}
1613 		if (ip->flush_state == HAMMER_FST_IDLE)
1614 			return;
1615 	}
1616 
1617 	/*
1618 	 * Our flush action will depend on the current state.
1619 	 */
1620 	switch(ip->flush_state) {
1621 	case HAMMER_FST_IDLE:
1622 		/*
1623 		 * We have no dependancies and can flush immediately.  Some
1624 		 * our children may not be flushable so we have to re-test
1625 		 * with that additional knowledge.
1626 		 */
1627 		hammer_flush_inode_core(ip, flg, flags);
1628 		break;
1629 	case HAMMER_FST_SETUP:
1630 		/*
1631 		 * Recurse upwards through dependancies via target_list
1632 		 * and start their flusher actions going if possible.
1633 		 *
1634 		 * 'good' is our connectivity.  -1 means we have none and
1635 		 * can't flush, 0 means there weren't any dependancies, and
1636 		 * 1 means we have good connectivity.
1637 		 */
1638 		good = hammer_setup_parent_inodes(ip, 0, flg);
1639 
1640 		if (good >= 0) {
1641 			/*
1642 			 * We can continue if good >= 0.  Determine how
1643 			 * many records under our inode can be flushed (and
1644 			 * mark them).
1645 			 */
1646 			hammer_flush_inode_core(ip, flg, flags);
1647 		} else {
1648 			/*
1649 			 * Parent has no connectivity, tell it to flush
1650 			 * us as soon as it does.
1651 			 *
1652 			 * The REFLUSH flag is also needed to trigger
1653 			 * dependancy wakeups.
1654 			 */
1655 			ip->flags |= HAMMER_INODE_CONN_DOWN |
1656 				     HAMMER_INODE_REFLUSH;
1657 			if (flags & HAMMER_FLUSH_SIGNAL) {
1658 				ip->flags |= HAMMER_INODE_RESIGNAL;
1659 				hammer_flusher_async(ip->hmp, flg);
1660 			}
1661 		}
1662 		break;
1663 	case HAMMER_FST_FLUSH:
1664 		/*
1665 		 * We are already flushing, flag the inode to reflush
1666 		 * if needed after it completes its current flush.
1667 		 *
1668 		 * The REFLUSH flag is also needed to trigger
1669 		 * dependancy wakeups.
1670 		 */
1671 		if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1672 			ip->flags |= HAMMER_INODE_REFLUSH;
1673 		if (flags & HAMMER_FLUSH_SIGNAL) {
1674 			ip->flags |= HAMMER_INODE_RESIGNAL;
1675 			hammer_flusher_async(ip->hmp, flg);
1676 		}
1677 		break;
1678 	}
1679 }
1680 
1681 /*
1682  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1683  * ip which reference our ip.
1684  *
1685  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1686  *     so for now do not ref/deref the structures.  Note that if we use the
1687  *     ref/rel code later, the rel CAN block.
1688  */
1689 static int
1690 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1691 			   hammer_flush_group_t flg)
1692 {
1693 	hammer_record_t depend;
1694 	int good;
1695 	int r;
1696 
1697 	/*
1698 	 * If we hit our recursion limit and we have parent dependencies
1699 	 * We cannot continue.  Returning < 0 will cause us to be flagged
1700 	 * for reflush.  Returning -2 cuts off additional dependency checks
1701 	 * because they are likely to also hit the depth limit.
1702 	 *
1703 	 * We cannot return < 0 if there are no dependencies or there might
1704 	 * not be anything to wakeup (ip).
1705 	 */
1706 	if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1707 		kprintf("HAMMER Warning: depth limit reached on "
1708 			"setup recursion, inode %p %016llx\n",
1709 			ip, (long long)ip->obj_id);
1710 		return(-2);
1711 	}
1712 
1713 	/*
1714 	 * Scan dependencies
1715 	 */
1716 	good = 0;
1717 	TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1718 		r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1719 		KKASSERT(depend->target_ip == ip);
1720 		if (r < 0 && good == 0)
1721 			good = -1;
1722 		if (r > 0)
1723 			good = 1;
1724 
1725 		/*
1726 		 * If we failed due to the recursion depth limit then stop
1727 		 * now.
1728 		 */
1729 		if (r == -2)
1730 			break;
1731 	}
1732 	return(good);
1733 }
1734 
1735 /*
1736  * This helper function takes a record representing the dependancy between
1737  * the parent inode and child inode.
1738  *
1739  * record->ip		= parent inode
1740  * record->target_ip	= child inode
1741  *
1742  * We are asked to recurse upwards and convert the record from SETUP
1743  * to FLUSH if possible.
1744  *
1745  * Return 1 if the record gives us connectivity
1746  *
1747  * Return 0 if the record is not relevant
1748  *
1749  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1750  */
1751 static int
1752 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1753 				  hammer_flush_group_t flg)
1754 {
1755 	hammer_mount_t hmp;
1756 	hammer_inode_t pip;
1757 	int good;
1758 
1759 	KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1760 	pip = record->ip;
1761 	hmp = pip->hmp;
1762 
1763 	/*
1764 	 * If the record is already flushing, is it in our flush group?
1765 	 *
1766 	 * If it is in our flush group but it is a general record or a
1767 	 * delete-on-disk, it does not improve our connectivity (return 0),
1768 	 * and if the target inode is not trying to destroy itself we can't
1769 	 * allow the operation yet anyway (the second return -1).
1770 	 */
1771 	if (record->flush_state == HAMMER_FST_FLUSH) {
1772 		/*
1773 		 * If not in our flush group ask the parent to reflush
1774 		 * us as soon as possible.
1775 		 */
1776 		if (record->flush_group != flg) {
1777 			pip->flags |= HAMMER_INODE_REFLUSH;
1778 			record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1779 			return(-1);
1780 		}
1781 
1782 		/*
1783 		 * If in our flush group everything is already set up,
1784 		 * just return whether the record will improve our
1785 		 * visibility or not.
1786 		 */
1787 		if (record->type == HAMMER_MEM_RECORD_ADD)
1788 			return(1);
1789 		return(0);
1790 	}
1791 
1792 	/*
1793 	 * It must be a setup record.  Try to resolve the setup dependancies
1794 	 * by recursing upwards so we can place ip on the flush list.
1795 	 *
1796 	 * Limit ourselves to 20 levels of recursion to avoid blowing out
1797 	 * the kernel stack.  If we hit the recursion limit we can't flush
1798 	 * until the parent flushes.  The parent will flush independantly
1799 	 * on its own and ultimately a deep recursion will be resolved.
1800 	 */
1801 	KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1802 
1803 	good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1804 
1805 	/*
1806 	 * If good < 0 the parent has no connectivity and we cannot safely
1807 	 * flush the directory entry, which also means we can't flush our
1808 	 * ip.  Flag us for downward recursion once the parent's
1809 	 * connectivity is resolved.  Flag the parent for [re]flush or it
1810 	 * may not check for downward recursions.
1811 	 */
1812 	if (good < 0) {
1813 		pip->flags |= HAMMER_INODE_REFLUSH;
1814 		record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1815 		return(good);
1816 	}
1817 
1818 	/*
1819 	 * We are go, place the parent inode in a flushing state so we can
1820 	 * place its record in a flushing state.  Note that the parent
1821 	 * may already be flushing.  The record must be in the same flush
1822 	 * group as the parent.
1823 	 */
1824 	if (pip->flush_state != HAMMER_FST_FLUSH)
1825 		hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1826 	KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1827 	KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1828 
1829 #if 0
1830 	if (record->type == HAMMER_MEM_RECORD_DEL &&
1831 	    (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1832 		/*
1833 		 * Regardless of flushing state we cannot sync this path if the
1834 		 * record represents a delete-on-disk but the target inode
1835 		 * is not ready to sync its own deletion.
1836 		 *
1837 		 * XXX need to count effective nlinks to determine whether
1838 		 * the flush is ok, otherwise removing a hardlink will
1839 		 * just leave the DEL record to rot.
1840 		 */
1841 		record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1842 		return(-1);
1843 	} else
1844 #endif
1845 	if (pip->flush_group == flg) {
1846 		/*
1847 		 * Because we have not calculated nlinks yet we can just
1848 		 * set records to the flush state if the parent is in
1849 		 * the same flush group as we are.
1850 		 */
1851 		record->flush_state = HAMMER_FST_FLUSH;
1852 		record->flush_group = flg;
1853 		++record->flush_group->refs;
1854 		hammer_ref(&record->lock);
1855 
1856 		/*
1857 		 * A general directory-add contributes to our visibility.
1858 		 *
1859 		 * Otherwise it is probably a directory-delete or
1860 		 * delete-on-disk record and does not contribute to our
1861 		 * visbility (but we can still flush it).
1862 		 */
1863 		if (record->type == HAMMER_MEM_RECORD_ADD)
1864 			return(1);
1865 		return(0);
1866 	} else {
1867 		/*
1868 		 * If the parent is not in our flush group we cannot
1869 		 * flush this record yet, there is no visibility.
1870 		 * We tell the parent to reflush and mark ourselves
1871 		 * so the parent knows it should flush us too.
1872 		 */
1873 		pip->flags |= HAMMER_INODE_REFLUSH;
1874 		record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1875 		return(-1);
1876 	}
1877 }
1878 
1879 /*
1880  * This is the core routine placing an inode into the FST_FLUSH state.
1881  */
1882 static void
1883 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1884 {
1885 	int go_count;
1886 
1887 	/*
1888 	 * Set flush state and prevent the flusher from cycling into
1889 	 * the next flush group.  Do not place the ip on the list yet.
1890 	 * Inodes not in the idle state get an extra reference.
1891 	 */
1892 	KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1893 	if (ip->flush_state == HAMMER_FST_IDLE)
1894 		hammer_ref(&ip->lock);
1895 	ip->flush_state = HAMMER_FST_FLUSH;
1896 	ip->flush_group = flg;
1897 	++ip->hmp->flusher.group_lock;
1898 	++ip->hmp->count_iqueued;
1899 	++hammer_count_iqueued;
1900 	++flg->total_count;
1901 
1902 	/*
1903 	 * If the flush group reaches the autoflush limit we want to signal
1904 	 * the flusher.  This is particularly important for remove()s.
1905 	 */
1906 	if (flg->total_count == hammer_autoflush)
1907 		flags |= HAMMER_FLUSH_SIGNAL;
1908 
1909 	/*
1910 	 * We need to be able to vfsync/truncate from the backend.
1911 	 */
1912 	KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1913 	if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1914 		ip->flags |= HAMMER_INODE_VHELD;
1915 		vref(ip->vp);
1916 	}
1917 
1918 	/*
1919 	 * Figure out how many in-memory records we can actually flush
1920 	 * (not including inode meta-data, buffers, etc).
1921 	 */
1922 	KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1923 	if (flags & HAMMER_FLUSH_RECURSION) {
1924 		/*
1925 		 * If this is a upwards recursion we do not want to
1926 		 * recurse down again!
1927 		 */
1928 		go_count = 1;
1929 #if 0
1930 	} else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1931 		/*
1932 		 * No new records are added if we must complete a flush
1933 		 * from a previous cycle, but we do have to move the records
1934 		 * from the previous cycle to the current one.
1935 		 */
1936 #if 0
1937 		go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1938 				   hammer_syncgrp_child_callback, NULL);
1939 #endif
1940 		go_count = 1;
1941 #endif
1942 	} else {
1943 		/*
1944 		 * Normal flush, scan records and bring them into the flush.
1945 		 * Directory adds and deletes are usually skipped (they are
1946 		 * grouped with the related inode rather then with the
1947 		 * directory).
1948 		 *
1949 		 * go_count can be negative, which means the scan aborted
1950 		 * due to the flush group being over-full and we should
1951 		 * flush what we have.
1952 		 */
1953 		go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1954 				   hammer_setup_child_callback, NULL);
1955 	}
1956 
1957 	/*
1958 	 * This is a more involved test that includes go_count.  If we
1959 	 * can't flush, flag the inode and return.  If go_count is 0 we
1960 	 * were are unable to flush any records in our rec_tree and
1961 	 * must ignore the XDIRTY flag.
1962 	 */
1963 	if (go_count == 0) {
1964 		if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1965 			--ip->hmp->count_iqueued;
1966 			--hammer_count_iqueued;
1967 
1968 			--flg->total_count;
1969 			ip->flush_state = HAMMER_FST_SETUP;
1970 			ip->flush_group = NULL;
1971 			if (ip->flags & HAMMER_INODE_VHELD) {
1972 				ip->flags &= ~HAMMER_INODE_VHELD;
1973 				vrele(ip->vp);
1974 			}
1975 
1976 			/*
1977 			 * REFLUSH is needed to trigger dependancy wakeups
1978 			 * when an inode is in SETUP.
1979 			 */
1980 			ip->flags |= HAMMER_INODE_REFLUSH;
1981 			if (flags & HAMMER_FLUSH_SIGNAL) {
1982 				ip->flags |= HAMMER_INODE_RESIGNAL;
1983 				hammer_flusher_async(ip->hmp, flg);
1984 			}
1985 			if (--ip->hmp->flusher.group_lock == 0)
1986 				wakeup(&ip->hmp->flusher.group_lock);
1987 			return;
1988 		}
1989 	}
1990 
1991 	/*
1992 	 * Snapshot the state of the inode for the backend flusher.
1993 	 *
1994 	 * We continue to retain save_trunc_off even when all truncations
1995 	 * have been resolved as an optimization to determine if we can
1996 	 * skip the B-Tree lookup for overwrite deletions.
1997 	 *
1998 	 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1999 	 * and stays in ip->flags.  Once set, it stays set until the
2000 	 * inode is destroyed.
2001 	 */
2002 	if (ip->flags & HAMMER_INODE_TRUNCATED) {
2003 		KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2004 		ip->sync_trunc_off = ip->trunc_off;
2005 		ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2006 		ip->flags &= ~HAMMER_INODE_TRUNCATED;
2007 		ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2008 
2009 		/*
2010 		 * The save_trunc_off used to cache whether the B-Tree
2011 		 * holds any records past that point is not used until
2012 		 * after the truncation has succeeded, so we can safely
2013 		 * set it now.
2014 		 */
2015 		if (ip->save_trunc_off > ip->sync_trunc_off)
2016 			ip->save_trunc_off = ip->sync_trunc_off;
2017 	}
2018 	ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2019 			   ~HAMMER_INODE_TRUNCATED);
2020 	ip->sync_ino_leaf = ip->ino_leaf;
2021 	ip->sync_ino_data = ip->ino_data;
2022 	ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2023 #ifdef DEBUG_TRUNCATE
2024 	if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2025 		kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2026 #endif
2027 
2028 	/*
2029 	 * The flusher list inherits our inode and reference.
2030 	 */
2031 	KKASSERT(flg->running == 0);
2032 	TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
2033 	if (--ip->hmp->flusher.group_lock == 0)
2034 		wakeup(&ip->hmp->flusher.group_lock);
2035 
2036 	if (flags & HAMMER_FLUSH_SIGNAL) {
2037 		hammer_flusher_async(ip->hmp, flg);
2038 	}
2039 }
2040 
2041 /*
2042  * Callback for scan of ip->rec_tree.  Try to include each record in our
2043  * flush.  ip->flush_group has been set but the inode has not yet been
2044  * moved into a flushing state.
2045  *
2046  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2047  * both inodes.
2048  *
2049  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2050  * the caller from shortcutting the flush.
2051  */
2052 static int
2053 hammer_setup_child_callback(hammer_record_t rec, void *data)
2054 {
2055 	hammer_flush_group_t flg;
2056 	hammer_inode_t target_ip;
2057 	hammer_inode_t ip;
2058 	int r;
2059 
2060 	/*
2061 	 * Records deleted or committed by the backend are ignored.
2062 	 * Note that the flush detects deleted frontend records at
2063 	 * multiple points to deal with races.  This is just the first
2064 	 * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2065 	 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2066 	 * messes up link-count calculations.
2067 	 *
2068 	 * NOTE: Don't get confused between record deletion and, say,
2069 	 * directory entry deletion.  The deletion of a directory entry
2070 	 * which is on-media has nothing to do with the record deletion
2071 	 * flags.
2072 	 */
2073 	if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2074 			  HAMMER_RECF_COMMITTED)) {
2075 		if (rec->flush_state == HAMMER_FST_FLUSH) {
2076 			KKASSERT(rec->flush_group == rec->ip->flush_group);
2077 			r = 1;
2078 		} else {
2079 			r = 0;
2080 		}
2081 		return(r);
2082 	}
2083 
2084 	/*
2085 	 * If the record is in an idle state it has no dependancies and
2086 	 * can be flushed.
2087 	 */
2088 	ip = rec->ip;
2089 	flg = ip->flush_group;
2090 	r = 0;
2091 
2092 	switch(rec->flush_state) {
2093 	case HAMMER_FST_IDLE:
2094 		/*
2095 		 * The record has no setup dependancy, we can flush it.
2096 		 */
2097 		KKASSERT(rec->target_ip == NULL);
2098 		rec->flush_state = HAMMER_FST_FLUSH;
2099 		rec->flush_group = flg;
2100 		++flg->refs;
2101 		hammer_ref(&rec->lock);
2102 		r = 1;
2103 		break;
2104 	case HAMMER_FST_SETUP:
2105 		/*
2106 		 * The record has a setup dependancy.  These are typically
2107 		 * directory entry adds and deletes.  Such entries will be
2108 		 * flushed when their inodes are flushed so we do not
2109 		 * usually have to add them to the flush here.  However,
2110 		 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2111 		 * it is asking us to flush this record (and it).
2112 		 */
2113 		target_ip = rec->target_ip;
2114 		KKASSERT(target_ip != NULL);
2115 		KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2116 
2117 		/*
2118 		 * If the target IP is already flushing in our group
2119 		 * we could associate the record, but target_ip has
2120 		 * already synced ino_data to sync_ino_data and we
2121 		 * would also have to adjust nlinks.   Plus there are
2122 		 * ordering issues for adds and deletes.
2123 		 *
2124 		 * Reflush downward if this is an ADD, and upward if
2125 		 * this is a DEL.
2126 		 */
2127 		if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2128 			if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2129 				ip->flags |= HAMMER_INODE_REFLUSH;
2130 			else
2131 				target_ip->flags |= HAMMER_INODE_REFLUSH;
2132 			break;
2133 		}
2134 
2135 		/*
2136 		 * Target IP is not yet flushing.  This can get complex
2137 		 * because we have to be careful about the recursion.
2138 		 *
2139 		 * Directories create an issue for us in that if a flush
2140 		 * of a directory is requested the expectation is to flush
2141 		 * any pending directory entries, but this will cause the
2142 		 * related inodes to recursively flush as well.  We can't
2143 		 * really defer the operation so just get as many as we
2144 		 * can and
2145 		 */
2146 #if 0
2147 		if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2148 		    (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2149 			/*
2150 			 * We aren't reclaiming and the target ip was not
2151 			 * previously prevented from flushing due to this
2152 			 * record dependancy.  Do not flush this record.
2153 			 */
2154 			/*r = 0;*/
2155 		} else
2156 #endif
2157 		if (flg->total_count + flg->refs >
2158 			   ip->hmp->undo_rec_limit) {
2159 			/*
2160 			 * Our flush group is over-full and we risk blowing
2161 			 * out the UNDO FIFO.  Stop the scan, flush what we
2162 			 * have, then reflush the directory.
2163 			 *
2164 			 * The directory may be forced through multiple
2165 			 * flush groups before it can be completely
2166 			 * flushed.
2167 			 */
2168 			ip->flags |= HAMMER_INODE_RESIGNAL |
2169 				     HAMMER_INODE_REFLUSH;
2170 			r = -1;
2171 		} else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2172 			/*
2173 			 * If the target IP is not flushing we can force
2174 			 * it to flush, even if it is unable to write out
2175 			 * any of its own records we have at least one in
2176 			 * hand that we CAN deal with.
2177 			 */
2178 			rec->flush_state = HAMMER_FST_FLUSH;
2179 			rec->flush_group = flg;
2180 			++flg->refs;
2181 			hammer_ref(&rec->lock);
2182 			hammer_flush_inode_core(target_ip, flg,
2183 						HAMMER_FLUSH_RECURSION);
2184 			r = 1;
2185 		} else {
2186 			/*
2187 			 * General or delete-on-disk record.
2188 			 *
2189 			 * XXX this needs help.  If a delete-on-disk we could
2190 			 * disconnect the target.  If the target has its own
2191 			 * dependancies they really need to be flushed.
2192 			 *
2193 			 * XXX
2194 			 */
2195 			rec->flush_state = HAMMER_FST_FLUSH;
2196 			rec->flush_group = flg;
2197 			++flg->refs;
2198 			hammer_ref(&rec->lock);
2199 			hammer_flush_inode_core(target_ip, flg,
2200 						HAMMER_FLUSH_RECURSION);
2201 			r = 1;
2202 		}
2203 		break;
2204 	case HAMMER_FST_FLUSH:
2205 		/*
2206 		 * The flush_group should already match.
2207 		 */
2208 		KKASSERT(rec->flush_group == flg);
2209 		r = 1;
2210 		break;
2211 	}
2212 	return(r);
2213 }
2214 
2215 #if 0
2216 /*
2217  * This version just moves records already in a flush state to the new
2218  * flush group and that is it.
2219  */
2220 static int
2221 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2222 {
2223 	hammer_inode_t ip = rec->ip;
2224 
2225 	switch(rec->flush_state) {
2226 	case HAMMER_FST_FLUSH:
2227 		KKASSERT(rec->flush_group == ip->flush_group);
2228 		break;
2229 	default:
2230 		break;
2231 	}
2232 	return(0);
2233 }
2234 #endif
2235 
2236 /*
2237  * Wait for a previously queued flush to complete.
2238  *
2239  * If a critical error occured we don't try to wait.
2240  */
2241 void
2242 hammer_wait_inode(hammer_inode_t ip)
2243 {
2244 	hammer_flush_group_t flg;
2245 
2246 	flg = NULL;
2247 	if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2248 		while (ip->flush_state != HAMMER_FST_IDLE &&
2249 		       (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2250 			if (ip->flush_state == HAMMER_FST_SETUP)
2251 				hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2252 			if (ip->flush_state != HAMMER_FST_IDLE) {
2253 				ip->flags |= HAMMER_INODE_FLUSHW;
2254 				tsleep(&ip->flags, 0, "hmrwin", 0);
2255 			}
2256 		}
2257 	}
2258 }
2259 
2260 /*
2261  * Called by the backend code when a flush has been completed.
2262  * The inode has already been removed from the flush list.
2263  *
2264  * A pipelined flush can occur, in which case we must re-enter the
2265  * inode on the list and re-copy its fields.
2266  */
2267 void
2268 hammer_flush_inode_done(hammer_inode_t ip, int error)
2269 {
2270 	hammer_mount_t hmp;
2271 	int dorel;
2272 
2273 	KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2274 
2275 	hmp = ip->hmp;
2276 
2277 	/*
2278 	 * Auto-reflush if the backend could not completely flush
2279 	 * the inode.  This fixes a case where a deferred buffer flush
2280 	 * could cause fsync to return early.
2281 	 */
2282 	if (ip->sync_flags & HAMMER_INODE_MODMASK)
2283 		ip->flags |= HAMMER_INODE_REFLUSH;
2284 
2285 	/*
2286 	 * Merge left-over flags back into the frontend and fix the state.
2287 	 * Incomplete truncations are retained by the backend.
2288 	 */
2289 	ip->error = error;
2290 	ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2291 	ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2292 
2293 	/*
2294 	 * The backend may have adjusted nlinks, so if the adjusted nlinks
2295 	 * does not match the fronttend set the frontend's RDIRTY flag again.
2296 	 */
2297 	if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2298 		ip->flags |= HAMMER_INODE_DDIRTY;
2299 
2300 	/*
2301 	 * Fix up the dirty buffer status.
2302 	 */
2303 	if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2304 		ip->flags |= HAMMER_INODE_BUFS;
2305 	}
2306 
2307 	/*
2308 	 * Re-set the XDIRTY flag if some of the inode's in-memory records
2309 	 * could not be flushed.
2310 	 */
2311 	KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2312 		  (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2313 		 (!RB_EMPTY(&ip->rec_tree) &&
2314 		  (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2315 
2316 	/*
2317 	 * Do not lose track of inodes which no longer have vnode
2318 	 * assocations, otherwise they may never get flushed again.
2319 	 *
2320 	 * The reflush flag can be set superfluously, causing extra pain
2321 	 * for no reason.  If the inode is no longer modified it no longer
2322 	 * needs to be flushed.
2323 	 */
2324 	if (ip->flags & HAMMER_INODE_MODMASK) {
2325 		if (ip->vp == NULL)
2326 			ip->flags |= HAMMER_INODE_REFLUSH;
2327 	} else {
2328 		ip->flags &= ~HAMMER_INODE_REFLUSH;
2329 	}
2330 
2331 	/*
2332 	 * Adjust the flush state.
2333 	 */
2334 	if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2335 		/*
2336 		 * We were unable to flush out all our records, leave the
2337 		 * inode in a flush state and in the current flush group.
2338 		 * The flush group will be re-run.
2339 		 *
2340 		 * This occurs if the UNDO block gets too full or there is
2341 		 * too much dirty meta-data and allows the flusher to
2342 		 * finalize the UNDO block and then re-flush.
2343 		 */
2344 		ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2345 		dorel = 0;
2346 	} else {
2347 		/*
2348 		 * Remove from the flush_group
2349 		 */
2350 		TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2351 		ip->flush_group = NULL;
2352 
2353 		/*
2354 		 * Clean up the vnode ref and tracking counts.
2355 		 */
2356 		if (ip->flags & HAMMER_INODE_VHELD) {
2357 			ip->flags &= ~HAMMER_INODE_VHELD;
2358 			vrele(ip->vp);
2359 		}
2360 		--hmp->count_iqueued;
2361 		--hammer_count_iqueued;
2362 
2363 		/*
2364 		 * And adjust the state.
2365 		 */
2366 		if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2367 			ip->flush_state = HAMMER_FST_IDLE;
2368 			dorel = 1;
2369 		} else {
2370 			ip->flush_state = HAMMER_FST_SETUP;
2371 			dorel = 0;
2372 		}
2373 
2374 		/*
2375 		 * If the frontend is waiting for a flush to complete,
2376 		 * wake it up.
2377 		 */
2378 		if (ip->flags & HAMMER_INODE_FLUSHW) {
2379 			ip->flags &= ~HAMMER_INODE_FLUSHW;
2380 			wakeup(&ip->flags);
2381 		}
2382 
2383 		/*
2384 		 * If the frontend made more changes and requested another
2385 		 * flush, then try to get it running.
2386 		 *
2387 		 * Reflushes are aborted when the inode is errored out.
2388 		 */
2389 		if (ip->flags & HAMMER_INODE_REFLUSH) {
2390 			ip->flags &= ~HAMMER_INODE_REFLUSH;
2391 			if (ip->flags & HAMMER_INODE_RESIGNAL) {
2392 				ip->flags &= ~HAMMER_INODE_RESIGNAL;
2393 				hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2394 			} else {
2395 				hammer_flush_inode(ip, 0);
2396 			}
2397 		}
2398 	}
2399 
2400 	/*
2401 	 * If we have no parent dependancies we can clear CONN_DOWN
2402 	 */
2403 	if (TAILQ_EMPTY(&ip->target_list))
2404 		ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2405 
2406 	/*
2407 	 * If the inode is now clean drop the space reservation.
2408 	 */
2409 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2410 	    (ip->flags & HAMMER_INODE_RSV_INODES)) {
2411 		ip->flags &= ~HAMMER_INODE_RSV_INODES;
2412 		--hmp->rsv_inodes;
2413 	}
2414 
2415 	if (dorel)
2416 		hammer_rel_inode(ip, 0);
2417 }
2418 
2419 /*
2420  * Called from hammer_sync_inode() to synchronize in-memory records
2421  * to the media.
2422  */
2423 static int
2424 hammer_sync_record_callback(hammer_record_t record, void *data)
2425 {
2426 	hammer_cursor_t cursor = data;
2427 	hammer_transaction_t trans = cursor->trans;
2428 	hammer_mount_t hmp = trans->hmp;
2429 	int error;
2430 
2431 	/*
2432 	 * Skip records that do not belong to the current flush.
2433 	 */
2434 	++hammer_stats_record_iterations;
2435 	if (record->flush_state != HAMMER_FST_FLUSH)
2436 		return(0);
2437 
2438 #if 1
2439 	if (record->flush_group != record->ip->flush_group) {
2440 		kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2441 		Debugger("blah2");
2442 		return(0);
2443 	}
2444 #endif
2445 	KKASSERT(record->flush_group == record->ip->flush_group);
2446 
2447 	/*
2448 	 * Interlock the record using the BE flag.  Once BE is set the
2449 	 * frontend cannot change the state of FE.
2450 	 *
2451 	 * NOTE: If FE is set prior to us setting BE we still sync the
2452 	 * record out, but the flush completion code converts it to
2453 	 * a delete-on-disk record instead of destroying it.
2454 	 */
2455 	KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2456 	record->flags |= HAMMER_RECF_INTERLOCK_BE;
2457 
2458 	/*
2459 	 * The backend has already disposed of the record.
2460 	 */
2461 	if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2462 		error = 0;
2463 		goto done;
2464 	}
2465 
2466 	/*
2467 	 * If the whole inode is being deleting all on-disk records will
2468 	 * be deleted very soon, we can't sync any new records to disk
2469 	 * because they will be deleted in the same transaction they were
2470 	 * created in (delete_tid == create_tid), which will assert.
2471 	 *
2472 	 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2473 	 * that we currently panic on.
2474 	 */
2475 	if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2476 		switch(record->type) {
2477 		case HAMMER_MEM_RECORD_DATA:
2478 			/*
2479 			 * We don't have to do anything, if the record was
2480 			 * committed the space will have been accounted for
2481 			 * in the blockmap.
2482 			 */
2483 			/* fall through */
2484 		case HAMMER_MEM_RECORD_GENERAL:
2485 			/*
2486 			 * Set deleted-by-backend flag.  Do not set the
2487 			 * backend committed flag, because we are throwing
2488 			 * the record away.
2489 			 */
2490 			record->flags |= HAMMER_RECF_DELETED_BE;
2491 			++record->ip->rec_generation;
2492 			error = 0;
2493 			goto done;
2494 		case HAMMER_MEM_RECORD_ADD:
2495 			panic("hammer_sync_record_callback: illegal add "
2496 			      "during inode deletion record %p", record);
2497 			break; /* NOT REACHED */
2498 		case HAMMER_MEM_RECORD_INODE:
2499 			panic("hammer_sync_record_callback: attempt to "
2500 			      "sync inode record %p?", record);
2501 			break; /* NOT REACHED */
2502 		case HAMMER_MEM_RECORD_DEL:
2503 			/*
2504 			 * Follow through and issue the on-disk deletion
2505 			 */
2506 			break;
2507 		}
2508 	}
2509 
2510 	/*
2511 	 * If DELETED_FE is set special handling is needed for directory
2512 	 * entries.  Dependant pieces related to the directory entry may
2513 	 * have already been synced to disk.  If this occurs we have to
2514 	 * sync the directory entry and then change the in-memory record
2515 	 * from an ADD to a DELETE to cover the fact that it's been
2516 	 * deleted by the frontend.
2517 	 *
2518 	 * A directory delete covering record (MEM_RECORD_DEL) can never
2519 	 * be deleted by the frontend.
2520 	 *
2521 	 * Any other record type (aka DATA) can be deleted by the frontend.
2522 	 * XXX At the moment the flusher must skip it because there may
2523 	 * be another data record in the flush group for the same block,
2524 	 * meaning that some frontend data changes can leak into the backend's
2525 	 * synchronization point.
2526 	 */
2527 	if (record->flags & HAMMER_RECF_DELETED_FE) {
2528 		if (record->type == HAMMER_MEM_RECORD_ADD) {
2529 			/*
2530 			 * Convert a front-end deleted directory-add to
2531 			 * a directory-delete entry later.
2532 			 */
2533 			record->flags |= HAMMER_RECF_CONVERT_DELETE;
2534 		} else {
2535 			/*
2536 			 * Dispose of the record (race case).  Mark as
2537 			 * deleted by backend (and not committed).
2538 			 */
2539 			KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2540 			record->flags |= HAMMER_RECF_DELETED_BE;
2541 			++record->ip->rec_generation;
2542 			error = 0;
2543 			goto done;
2544 		}
2545 	}
2546 
2547 	/*
2548 	 * Assign the create_tid for new records.  Deletions already
2549 	 * have the record's entire key properly set up.
2550 	 */
2551 	if (record->type != HAMMER_MEM_RECORD_DEL) {
2552 		record->leaf.base.create_tid = trans->tid;
2553 		record->leaf.create_ts = trans->time32;
2554 	}
2555 	for (;;) {
2556 		error = hammer_ip_sync_record_cursor(cursor, record);
2557 		if (error != EDEADLK)
2558 			break;
2559 		hammer_done_cursor(cursor);
2560 		error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2561 					   record->ip);
2562 		if (error)
2563 			break;
2564 	}
2565 	record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2566 
2567 	if (error)
2568 		error = -error;
2569 done:
2570 	hammer_flush_record_done(record, error);
2571 
2572 	/*
2573 	 * Do partial finalization if we have built up too many dirty
2574 	 * buffers.  Otherwise a buffer cache deadlock can occur when
2575 	 * doing things like creating tens of thousands of tiny files.
2576 	 *
2577 	 * We must release our cursor lock to avoid a 3-way deadlock
2578 	 * due to the exclusive sync lock the finalizer must get.
2579 	 */
2580         if (hammer_flusher_meta_limit(hmp)) {
2581 		hammer_unlock_cursor(cursor);
2582                 hammer_flusher_finalize(trans, 0);
2583 		hammer_lock_cursor(cursor);
2584 	}
2585 
2586 	return(error);
2587 }
2588 
2589 /*
2590  * Backend function called by the flusher to sync an inode to media.
2591  */
2592 int
2593 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2594 {
2595 	struct hammer_cursor cursor;
2596 	hammer_node_t tmp_node;
2597 	hammer_record_t depend;
2598 	hammer_record_t next;
2599 	int error, tmp_error;
2600 	u_int64_t nlinks;
2601 
2602 	if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2603 		return(0);
2604 
2605 	error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2606 	if (error)
2607 		goto done;
2608 
2609 	/*
2610 	 * Any directory records referencing this inode which are not in
2611 	 * our current flush group must adjust our nlink count for the
2612 	 * purposes of synchronization to disk.
2613 	 *
2614 	 * Records which are in our flush group can be unlinked from our
2615 	 * inode now, potentially allowing the inode to be physically
2616 	 * deleted.
2617 	 *
2618 	 * This cannot block.
2619 	 */
2620 	nlinks = ip->ino_data.nlinks;
2621 	next = TAILQ_FIRST(&ip->target_list);
2622 	while ((depend = next) != NULL) {
2623 		next = TAILQ_NEXT(depend, target_entry);
2624 		if (depend->flush_state == HAMMER_FST_FLUSH &&
2625 		    depend->flush_group == ip->flush_group) {
2626 			/*
2627 			 * If this is an ADD that was deleted by the frontend
2628 			 * the frontend nlinks count will have already been
2629 			 * decremented, but the backend is going to sync its
2630 			 * directory entry and must account for it.  The
2631 			 * record will be converted to a delete-on-disk when
2632 			 * it gets synced.
2633 			 *
2634 			 * If the ADD was not deleted by the frontend we
2635 			 * can remove the dependancy from our target_list.
2636 			 */
2637 			if (depend->flags & HAMMER_RECF_DELETED_FE) {
2638 				++nlinks;
2639 			} else {
2640 				TAILQ_REMOVE(&ip->target_list, depend,
2641 					     target_entry);
2642 				depend->target_ip = NULL;
2643 			}
2644 		} else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2645 			/*
2646 			 * Not part of our flush group and not deleted by
2647 			 * the front-end, adjust the link count synced to
2648 			 * the media (undo what the frontend did when it
2649 			 * queued the record).
2650 			 */
2651 			KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2652 			switch(depend->type) {
2653 			case HAMMER_MEM_RECORD_ADD:
2654 				--nlinks;
2655 				break;
2656 			case HAMMER_MEM_RECORD_DEL:
2657 				++nlinks;
2658 				break;
2659 			default:
2660 				break;
2661 			}
2662 		}
2663 	}
2664 
2665 	/*
2666 	 * Set dirty if we had to modify the link count.
2667 	 */
2668 	if (ip->sync_ino_data.nlinks != nlinks) {
2669 		KKASSERT((int64_t)nlinks >= 0);
2670 		ip->sync_ino_data.nlinks = nlinks;
2671 		ip->sync_flags |= HAMMER_INODE_DDIRTY;
2672 	}
2673 
2674 	/*
2675 	 * If there is a trunction queued destroy any data past the (aligned)
2676 	 * truncation point.  Userland will have dealt with the buffer
2677 	 * containing the truncation point for us.
2678 	 *
2679 	 * We don't flush pending frontend data buffers until after we've
2680 	 * dealt with the truncation.
2681 	 */
2682 	if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2683 		/*
2684 		 * Interlock trunc_off.  The VOP front-end may continue to
2685 		 * make adjustments to it while we are blocked.
2686 		 */
2687 		off_t trunc_off;
2688 		off_t aligned_trunc_off;
2689 		int blkmask;
2690 
2691 		trunc_off = ip->sync_trunc_off;
2692 		blkmask = hammer_blocksize(trunc_off) - 1;
2693 		aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2694 
2695 		/*
2696 		 * Delete any whole blocks on-media.  The front-end has
2697 		 * already cleaned out any partial block and made it
2698 		 * pending.  The front-end may have updated trunc_off
2699 		 * while we were blocked so we only use sync_trunc_off.
2700 		 *
2701 		 * This operation can blow out the buffer cache, EWOULDBLOCK
2702 		 * means we were unable to complete the deletion.  The
2703 		 * deletion will update sync_trunc_off in that case.
2704 		 */
2705 		error = hammer_ip_delete_range(&cursor, ip,
2706 						aligned_trunc_off,
2707 						0x7FFFFFFFFFFFFFFFLL, 2);
2708 		if (error == EWOULDBLOCK) {
2709 			ip->flags |= HAMMER_INODE_WOULDBLOCK;
2710 			error = 0;
2711 			goto defer_buffer_flush;
2712 		}
2713 
2714 		if (error)
2715 			goto done;
2716 
2717 		/*
2718 		 * Clear the truncation flag on the backend after we have
2719 		 * complete the deletions.  Backend data is now good again
2720 		 * (including new records we are about to sync, below).
2721 		 *
2722 		 * Leave sync_trunc_off intact.  As we write additional
2723 		 * records the backend will update sync_trunc_off.  This
2724 		 * tells the backend whether it can skip the overwrite
2725 		 * test.  This should work properly even when the backend
2726 		 * writes full blocks where the truncation point straddles
2727 		 * the block because the comparison is against the base
2728 		 * offset of the record.
2729 		 */
2730 		ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2731 		/* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2732 	} else {
2733 		error = 0;
2734 	}
2735 
2736 	/*
2737 	 * Now sync related records.  These will typically be directory
2738 	 * entries, records tracking direct-writes, or delete-on-disk records.
2739 	 */
2740 	if (error == 0) {
2741 		tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2742 				    hammer_sync_record_callback, &cursor);
2743 		if (tmp_error < 0)
2744 			tmp_error = -error;
2745 		if (tmp_error)
2746 			error = tmp_error;
2747 	}
2748 	hammer_cache_node(&ip->cache[1], cursor.node);
2749 
2750 	/*
2751 	 * Re-seek for inode update, assuming our cache hasn't been ripped
2752 	 * out from under us.
2753 	 */
2754 	if (error == 0) {
2755 		tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2756 		if (tmp_node) {
2757 			hammer_cursor_downgrade(&cursor);
2758 			hammer_lock_sh(&tmp_node->lock);
2759 			if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2760 				hammer_cursor_seek(&cursor, tmp_node, 0);
2761 			hammer_unlock(&tmp_node->lock);
2762 			hammer_rel_node(tmp_node);
2763 		}
2764 		error = 0;
2765 	}
2766 
2767 	/*
2768 	 * If we are deleting the inode the frontend had better not have
2769 	 * any active references on elements making up the inode.
2770 	 *
2771 	 * The call to hammer_ip_delete_clean() cleans up auxillary records
2772 	 * but not DB or DATA records.  Those must have already been deleted
2773 	 * by the normal truncation mechanic.
2774 	 */
2775 	if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2776 		RB_EMPTY(&ip->rec_tree)  &&
2777 	    (ip->sync_flags & HAMMER_INODE_DELETING) &&
2778 	    (ip->flags & HAMMER_INODE_DELETED) == 0) {
2779 		int count1 = 0;
2780 
2781 		error = hammer_ip_delete_clean(&cursor, ip, &count1);
2782 		if (error == 0) {
2783 			ip->flags |= HAMMER_INODE_DELETED;
2784 			ip->sync_flags &= ~HAMMER_INODE_DELETING;
2785 			ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2786 			KKASSERT(RB_EMPTY(&ip->rec_tree));
2787 
2788 			/*
2789 			 * Set delete_tid in both the frontend and backend
2790 			 * copy of the inode record.  The DELETED flag handles
2791 			 * this, do not set RDIRTY.
2792 			 */
2793 			ip->ino_leaf.base.delete_tid = trans->tid;
2794 			ip->sync_ino_leaf.base.delete_tid = trans->tid;
2795 			ip->ino_leaf.delete_ts = trans->time32;
2796 			ip->sync_ino_leaf.delete_ts = trans->time32;
2797 
2798 
2799 			/*
2800 			 * Adjust the inode count in the volume header
2801 			 */
2802 			hammer_sync_lock_sh(trans);
2803 			if (ip->flags & HAMMER_INODE_ONDISK) {
2804 				hammer_modify_volume_field(trans,
2805 							   trans->rootvol,
2806 							   vol0_stat_inodes);
2807 				--ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2808 				hammer_modify_volume_done(trans->rootvol);
2809 			}
2810 			hammer_sync_unlock(trans);
2811 		}
2812 	}
2813 
2814 	if (error)
2815 		goto done;
2816 	ip->sync_flags &= ~HAMMER_INODE_BUFS;
2817 
2818 defer_buffer_flush:
2819 	/*
2820 	 * Now update the inode's on-disk inode-data and/or on-disk record.
2821 	 * DELETED and ONDISK are managed only in ip->flags.
2822 	 *
2823 	 * In the case of a defered buffer flush we still update the on-disk
2824 	 * inode to satisfy visibility requirements if there happen to be
2825 	 * directory dependancies.
2826 	 */
2827 	switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2828 	case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2829 		/*
2830 		 * If deleted and on-disk, don't set any additional flags.
2831 		 * the delete flag takes care of things.
2832 		 *
2833 		 * Clear flags which may have been set by the frontend.
2834 		 */
2835 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2836 				    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2837 				    HAMMER_INODE_DELETING);
2838 		break;
2839 	case HAMMER_INODE_DELETED:
2840 		/*
2841 		 * Take care of the case where a deleted inode was never
2842 		 * flushed to the disk in the first place.
2843 		 *
2844 		 * Clear flags which may have been set by the frontend.
2845 		 */
2846 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2847 				    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2848 				    HAMMER_INODE_DELETING);
2849 		while (RB_ROOT(&ip->rec_tree)) {
2850 			hammer_record_t record = RB_ROOT(&ip->rec_tree);
2851 			hammer_ref(&record->lock);
2852 			KKASSERT(record->lock.refs == 1);
2853 			record->flags |= HAMMER_RECF_DELETED_BE;
2854 			++record->ip->rec_generation;
2855 			hammer_rel_mem_record(record);
2856 		}
2857 		break;
2858 	case HAMMER_INODE_ONDISK:
2859 		/*
2860 		 * If already on-disk, do not set any additional flags.
2861 		 */
2862 		break;
2863 	default:
2864 		/*
2865 		 * If not on-disk and not deleted, set DDIRTY to force
2866 		 * an initial record to be written.
2867 		 *
2868 		 * Also set the create_tid in both the frontend and backend
2869 		 * copy of the inode record.
2870 		 */
2871 		ip->ino_leaf.base.create_tid = trans->tid;
2872 		ip->ino_leaf.create_ts = trans->time32;
2873 		ip->sync_ino_leaf.base.create_tid = trans->tid;
2874 		ip->sync_ino_leaf.create_ts = trans->time32;
2875 		ip->sync_flags |= HAMMER_INODE_DDIRTY;
2876 		break;
2877 	}
2878 
2879 	/*
2880 	 * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
2881 	 * is already on-disk the old record is marked as deleted.
2882 	 *
2883 	 * If DELETED is set hammer_update_inode() will delete the existing
2884 	 * record without writing out a new one.
2885 	 *
2886 	 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2887 	 */
2888 	if (ip->flags & HAMMER_INODE_DELETED) {
2889 		error = hammer_update_inode(&cursor, ip);
2890 	} else
2891 	if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2892 	    (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2893 		error = hammer_update_itimes(&cursor, ip);
2894 	} else
2895 	if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2896 		error = hammer_update_inode(&cursor, ip);
2897 	}
2898 done:
2899 	if (error) {
2900 		hammer_critical_error(ip->hmp, ip, error,
2901 				      "while syncing inode");
2902 	}
2903 	hammer_done_cursor(&cursor);
2904 	return(error);
2905 }
2906 
2907 /*
2908  * This routine is called when the OS is no longer actively referencing
2909  * the inode (but might still be keeping it cached), or when releasing
2910  * the last reference to an inode.
2911  *
2912  * At this point if the inode's nlinks count is zero we want to destroy
2913  * it, which may mean destroying it on-media too.
2914  */
2915 void
2916 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2917 {
2918 	struct vnode *vp;
2919 
2920 	/*
2921 	 * Set the DELETING flag when the link count drops to 0 and the
2922 	 * OS no longer has any opens on the inode.
2923 	 *
2924 	 * The backend will clear DELETING (a mod flag) and set DELETED
2925 	 * (a state flag) when it is actually able to perform the
2926 	 * operation.
2927 	 *
2928 	 * Don't reflag the deletion if the flusher is currently syncing
2929 	 * one that was already flagged.  A previously set DELETING flag
2930 	 * may bounce around flags and sync_flags until the operation is
2931 	 * completely done.
2932 	 */
2933 	if (ip->ino_data.nlinks == 0 &&
2934 	    ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2935 		ip->flags |= HAMMER_INODE_DELETING;
2936 		ip->flags |= HAMMER_INODE_TRUNCATED;
2937 		ip->trunc_off = 0;
2938 		vp = NULL;
2939 		if (getvp) {
2940 			if (hammer_get_vnode(ip, &vp) != 0)
2941 				return;
2942 		}
2943 
2944 		/*
2945 		 * Final cleanup
2946 		 */
2947 		if (ip->vp) {
2948 			vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2949 			vnode_pager_setsize(ip->vp, 0);
2950 		}
2951 		if (getvp) {
2952 			vput(vp);
2953 		}
2954 	}
2955 }
2956 
2957 /*
2958  * After potentially resolving a dependancy the inode is tested
2959  * to determine whether it needs to be reflushed.
2960  */
2961 void
2962 hammer_test_inode(hammer_inode_t ip)
2963 {
2964 	if (ip->flags & HAMMER_INODE_REFLUSH) {
2965 		ip->flags &= ~HAMMER_INODE_REFLUSH;
2966 		hammer_ref(&ip->lock);
2967 		if (ip->flags & HAMMER_INODE_RESIGNAL) {
2968 			ip->flags &= ~HAMMER_INODE_RESIGNAL;
2969 			hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2970 		} else {
2971 			hammer_flush_inode(ip, 0);
2972 		}
2973 		hammer_rel_inode(ip, 0);
2974 	}
2975 }
2976 
2977 /*
2978  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
2979  * reassociated with a vp or just before it gets freed.
2980  *
2981  * Pipeline wakeups to threads blocked due to an excessive number of
2982  * detached inodes.  The reclaim count generates a bit of negative
2983  * feedback.
2984  */
2985 static void
2986 hammer_inode_wakereclaims(hammer_inode_t ip, int dowake)
2987 {
2988 	struct hammer_reclaim *reclaim;
2989 	hammer_mount_t hmp = ip->hmp;
2990 
2991 	if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2992 		return;
2993 
2994 	--hammer_count_reclaiming;
2995 	--hmp->inode_reclaims;
2996 	ip->flags &= ~HAMMER_INODE_RECLAIM;
2997 
2998 	if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || dowake) {
2999 		reclaim = TAILQ_FIRST(&hmp->reclaim_list);
3000 		if (reclaim && reclaim->count > 0 && --reclaim->count == 0) {
3001 			TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3002 			wakeup(reclaim);
3003 		}
3004 	}
3005 }
3006 
3007 /*
3008  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3009  * inodes build up before we start blocking.
3010  *
3011  * When we block we don't care *which* inode has finished reclaiming,
3012  * as lone as one does.  This is somewhat heuristical... we also put a
3013  * cap on how long we are willing to wait.
3014  */
3015 void
3016 hammer_inode_waitreclaims(hammer_mount_t hmp)
3017 {
3018 	struct hammer_reclaim reclaim;
3019 	int delay;
3020 
3021 	if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT)
3022 		return;
3023 	delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
3024 		(HAMMER_RECLAIM_WAIT * 3) + 1;
3025 	if (delay > 0) {
3026 		reclaim.count = 2;
3027 		TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3028 		tsleep(&reclaim, 0, "hmrrcm", delay);
3029 		if (reclaim.count > 0)
3030 			TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3031 	}
3032 }
3033 
3034 /*
3035  * A larger then normal backlog of inodes is sitting in the flusher,
3036  * enforce a general slowdown to let it catch up.  This routine is only
3037  * called on completion of a non-flusher-related transaction which
3038  * performed B-Tree node I/O.
3039  *
3040  * It is possible for the flusher to stall in a continuous load.
3041  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3042  * If the flusher is unable to catch up the inode count can bloat until
3043  * we run out of kvm.
3044  *
3045  * This is a bit of a hack.
3046  */
3047 void
3048 hammer_inode_waithard(hammer_mount_t hmp)
3049 {
3050 	/*
3051 	 * Hysteresis.
3052 	 */
3053 	if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3054 		if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT / 2 &&
3055 		    hmp->count_iqueued < hmp->count_inodes / 20) {
3056 			hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3057 			return;
3058 		}
3059 	} else {
3060 		if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT ||
3061 		    hmp->count_iqueued < hmp->count_inodes / 10) {
3062 			return;
3063 		}
3064 		hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3065 	}
3066 
3067 	/*
3068 	 * Block for one flush cycle.
3069 	 */
3070 	hammer_flusher_wait_next(hmp);
3071 }
3072 
3073