xref: /dflybsd-src/sys/vfs/hammer/hammer_inode.c (revision a9656fbcd49c376aba5e04370d8b0f1fa96e063c)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 
40 static int	hammer_unload_inode(struct hammer_inode *ip);
41 static void	hammer_free_inode(hammer_inode_t ip);
42 static void	hammer_flush_inode_core(hammer_inode_t ip,
43 					hammer_flush_group_t flg, int flags);
44 static int	hammer_setup_child_callback(hammer_record_t rec, void *data);
45 #if 0
46 static int	hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
47 #endif
48 static int	hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
49 					hammer_flush_group_t flg);
50 static int	hammer_setup_parent_inodes_helper(hammer_record_t record,
51 					int depth, hammer_flush_group_t flg);
52 static void	hammer_inode_wakereclaims(hammer_inode_t ip);
53 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
54 					pid_t pid);
55 
56 #ifdef DEBUG_TRUNCATE
57 extern struct hammer_inode *HammerTruncIp;
58 #endif
59 
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66 	if (ip1->obj_localization < ip2->obj_localization)
67 		return(-1);
68 	if (ip1->obj_localization > ip2->obj_localization)
69 		return(1);
70 	if (ip1->obj_id < ip2->obj_id)
71 		return(-1);
72 	if (ip1->obj_id > ip2->obj_id)
73 		return(1);
74 	if (ip1->obj_asof < ip2->obj_asof)
75 		return(-1);
76 	if (ip1->obj_asof > ip2->obj_asof)
77 		return(1);
78 	return(0);
79 }
80 
81 int
82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
83 {
84 	if (ip1->redo_fifo_start < ip2->redo_fifo_start)
85 		return(-1);
86 	if (ip1->redo_fifo_start > ip2->redo_fifo_start)
87 		return(1);
88 	return(0);
89 }
90 
91 /*
92  * RB-Tree support for inode structures / special LOOKUP_INFO
93  */
94 static int
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
96 {
97 	if (info->obj_localization < ip->obj_localization)
98 		return(-1);
99 	if (info->obj_localization > ip->obj_localization)
100 		return(1);
101 	if (info->obj_id < ip->obj_id)
102 		return(-1);
103 	if (info->obj_id > ip->obj_id)
104 		return(1);
105 	if (info->obj_asof < ip->obj_asof)
106 		return(-1);
107 	if (info->obj_asof > ip->obj_asof)
108 		return(1);
109 	return(0);
110 }
111 
112 /*
113  * Used by hammer_scan_inode_snapshots() to locate all of an object's
114  * snapshots.  Note that the asof field is not tested, which we can get
115  * away with because it is the lowest-priority field.
116  */
117 static int
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
119 {
120 	hammer_inode_info_t info = data;
121 
122 	if (ip->obj_localization > info->obj_localization)
123 		return(1);
124 	if (ip->obj_localization < info->obj_localization)
125 		return(-1);
126 	if (ip->obj_id > info->obj_id)
127 		return(1);
128 	if (ip->obj_id < info->obj_id)
129 		return(-1);
130 	return(0);
131 }
132 
133 /*
134  * Used by hammer_unload_pseudofs() to locate all inodes associated with
135  * a particular PFS.
136  */
137 static int
138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
139 {
140 	u_int32_t localization = *(u_int32_t *)data;
141 	if (ip->obj_localization > localization)
142 		return(1);
143 	if (ip->obj_localization < localization)
144 		return(-1);
145 	return(0);
146 }
147 
148 /*
149  * RB-Tree support for pseudofs structures
150  */
151 static int
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
153 {
154 	if (p1->localization < p2->localization)
155 		return(-1);
156 	if (p1->localization > p2->localization)
157 		return(1);
158 	return(0);
159 }
160 
161 
162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164 		hammer_inode_info_cmp, hammer_inode_info_t);
165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166              hammer_pfs_rb_compare, u_int32_t, localization);
167 
168 /*
169  * The kernel is not actively referencing this vnode but is still holding
170  * it cached.
171  *
172  * This is called from the frontend.
173  *
174  * MPALMOSTSAFE
175  */
176 int
177 hammer_vop_inactive(struct vop_inactive_args *ap)
178 {
179 	struct hammer_inode *ip = VTOI(ap->a_vp);
180 
181 	/*
182 	 * Degenerate case
183 	 */
184 	if (ip == NULL) {
185 		vrecycle(ap->a_vp);
186 		return(0);
187 	}
188 
189 	/*
190 	 * If the inode no longer has visibility in the filesystem try to
191 	 * recycle it immediately, even if the inode is dirty.  Recycling
192 	 * it quickly allows the system to reclaim buffer cache and VM
193 	 * resources which can matter a lot in a heavily loaded system.
194 	 *
195 	 * This can deadlock in vfsync() if we aren't careful.
196 	 *
197 	 * Do not queue the inode to the flusher if we still have visibility,
198 	 * otherwise namespace calls such as chmod will unnecessarily generate
199 	 * multiple inode updates.
200 	 */
201 	if (ip->ino_data.nlinks == 0) {
202 		get_mplock();
203 		hammer_inode_unloadable_check(ip, 0);
204 		if (ip->flags & HAMMER_INODE_MODMASK)
205 			hammer_flush_inode(ip, 0);
206 		vrecycle(ap->a_vp);
207 		rel_mplock();
208 	}
209 	return(0);
210 }
211 
212 /*
213  * Release the vnode association.  This is typically (but not always)
214  * the last reference on the inode.
215  *
216  * Once the association is lost we are on our own with regards to
217  * flushing the inode.
218  *
219  * We must interlock ip->vp so hammer_get_vnode() can avoid races.
220  */
221 int
222 hammer_vop_reclaim(struct vop_reclaim_args *ap)
223 {
224 	struct hammer_inode *ip;
225 	hammer_mount_t hmp;
226 	struct vnode *vp;
227 
228 	vp = ap->a_vp;
229 
230 	if ((ip = vp->v_data) != NULL) {
231 		hmp = ip->hmp;
232 		hammer_lock_ex(&ip->lock);
233 		vp->v_data = NULL;
234 		ip->vp = NULL;
235 
236 		if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
237 			++hammer_count_reclaiming;
238 			++hmp->inode_reclaims;
239 			ip->flags |= HAMMER_INODE_RECLAIM;
240 		}
241 		hammer_unlock(&ip->lock);
242 		hammer_rel_inode(ip, 1);
243 	}
244 	return(0);
245 }
246 
247 /*
248  * Return a locked vnode for the specified inode.  The inode must be
249  * referenced but NOT LOCKED on entry and will remain referenced on
250  * return.
251  *
252  * Called from the frontend.
253  */
254 int
255 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
256 {
257 	hammer_mount_t hmp;
258 	struct vnode *vp;
259 	int error = 0;
260 	u_int8_t obj_type;
261 
262 	hmp = ip->hmp;
263 
264 	for (;;) {
265 		if ((vp = ip->vp) == NULL) {
266 			error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
267 			if (error)
268 				break;
269 			hammer_lock_ex(&ip->lock);
270 			if (ip->vp != NULL) {
271 				hammer_unlock(&ip->lock);
272 				vp = *vpp;
273 				vp->v_type = VBAD;
274 				vx_put(vp);
275 				continue;
276 			}
277 			hammer_ref(&ip->lock);
278 			vp = *vpp;
279 			ip->vp = vp;
280 
281 			obj_type = ip->ino_data.obj_type;
282 			vp->v_type = hammer_get_vnode_type(obj_type);
283 
284 			hammer_inode_wakereclaims(ip);
285 
286 			switch(ip->ino_data.obj_type) {
287 			case HAMMER_OBJTYPE_CDEV:
288 			case HAMMER_OBJTYPE_BDEV:
289 				vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
290 				addaliasu(vp, ip->ino_data.rmajor,
291 					  ip->ino_data.rminor);
292 				break;
293 			case HAMMER_OBJTYPE_FIFO:
294 				vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
295 				break;
296 			case HAMMER_OBJTYPE_REGFILE:
297 				break;
298 			default:
299 				break;
300 			}
301 
302 			/*
303 			 * Only mark as the root vnode if the ip is not
304 			 * historical, otherwise the VFS cache will get
305 			 * confused.  The other half of the special handling
306 			 * is in hammer_vop_nlookupdotdot().
307 			 *
308 			 * Pseudo-filesystem roots can be accessed via
309 			 * non-root filesystem paths and setting VROOT may
310 			 * confuse the namecache.  Set VPFSROOT instead.
311 			 */
312 			if (ip->obj_id == HAMMER_OBJID_ROOT &&
313 			    ip->obj_asof == hmp->asof) {
314 				if (ip->obj_localization == 0)
315 					vsetflags(vp, VROOT);
316 				else
317 					vsetflags(vp, VPFSROOT);
318 			}
319 
320 			vp->v_data = (void *)ip;
321 			/* vnode locked by getnewvnode() */
322 			/* make related vnode dirty if inode dirty? */
323 			hammer_unlock(&ip->lock);
324 			if (vp->v_type == VREG) {
325 				vinitvmio(vp, ip->ino_data.size,
326 					  hammer_blocksize(ip->ino_data.size),
327 					  hammer_blockoff(ip->ino_data.size));
328 			}
329 			break;
330 		}
331 
332 		/*
333 		 * Interlock vnode clearing.  This does not prevent the
334 		 * vnode from going into a reclaimed state but it does
335 		 * prevent it from being destroyed or reused so the vget()
336 		 * will properly fail.
337 		 */
338 		hammer_lock_ex(&ip->lock);
339 		if ((vp = ip->vp) == NULL) {
340 			hammer_unlock(&ip->lock);
341 			continue;
342 		}
343 		vhold_interlocked(vp);
344 		hammer_unlock(&ip->lock);
345 
346 		/*
347 		 * loop if the vget fails (aka races), or if the vp
348 		 * no longer matches ip->vp.
349 		 */
350 		if (vget(vp, LK_EXCLUSIVE) == 0) {
351 			if (vp == ip->vp) {
352 				vdrop(vp);
353 				break;
354 			}
355 			vput(vp);
356 		}
357 		vdrop(vp);
358 	}
359 	*vpp = vp;
360 	return(error);
361 }
362 
363 /*
364  * Locate all copies of the inode for obj_id compatible with the specified
365  * asof, reference, and issue the related call-back.  This routine is used
366  * for direct-io invalidation and does not create any new inodes.
367  */
368 void
369 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
370 		            int (*callback)(hammer_inode_t ip, void *data),
371 			    void *data)
372 {
373 	hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
374 				   hammer_inode_info_cmp_all_history,
375 				   callback, iinfo);
376 }
377 
378 /*
379  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
380  * do not attach or detach the related vnode (use hammer_get_vnode() for
381  * that).
382  *
383  * The flags argument is only applied for newly created inodes, and only
384  * certain flags are inherited.
385  *
386  * Called from the frontend.
387  */
388 struct hammer_inode *
389 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
390 		 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
391 		 int flags, int *errorp)
392 {
393 	hammer_mount_t hmp = trans->hmp;
394 	struct hammer_node_cache *cachep;
395 	struct hammer_inode_info iinfo;
396 	struct hammer_cursor cursor;
397 	struct hammer_inode *ip;
398 
399 
400 	/*
401 	 * Determine if we already have an inode cached.  If we do then
402 	 * we are golden.
403 	 *
404 	 * If we find an inode with no vnode we have to mark the
405 	 * transaction such that hammer_inode_waitreclaims() is
406 	 * called later on to avoid building up an infinite number
407 	 * of inodes.  Otherwise we can continue to * add new inodes
408 	 * faster then they can be disposed of, even with the tsleep
409 	 * delay.
410 	 *
411 	 * If we find a dummy inode we return a failure so dounlink
412 	 * (which does another lookup) doesn't try to mess with the
413 	 * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
414 	 * to ref dummy inodes.
415 	 */
416 	iinfo.obj_id = obj_id;
417 	iinfo.obj_asof = asof;
418 	iinfo.obj_localization = localization;
419 loop:
420 	ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
421 	if (ip) {
422 		if (ip->flags & HAMMER_INODE_DUMMY) {
423 			*errorp = ENOENT;
424 			return(NULL);
425 		}
426 		hammer_ref(&ip->lock);
427 		*errorp = 0;
428 		return(ip);
429 	}
430 
431 	/*
432 	 * Allocate a new inode structure and deal with races later.
433 	 */
434 	ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
435 	++hammer_count_inodes;
436 	++hmp->count_inodes;
437 	ip->obj_id = obj_id;
438 	ip->obj_asof = iinfo.obj_asof;
439 	ip->obj_localization = localization;
440 	ip->hmp = hmp;
441 	ip->flags = flags & HAMMER_INODE_RO;
442 	ip->cache[0].ip = ip;
443 	ip->cache[1].ip = ip;
444 	ip->cache[2].ip = ip;
445 	ip->cache[3].ip = ip;
446 	if (hmp->ronly)
447 		ip->flags |= HAMMER_INODE_RO;
448 	ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
449 		0x7FFFFFFFFFFFFFFFLL;
450 	RB_INIT(&ip->rec_tree);
451 	TAILQ_INIT(&ip->target_list);
452 	hammer_ref(&ip->lock);
453 
454 	/*
455 	 * Locate the on-disk inode.  If this is a PFS root we always
456 	 * access the current version of the root inode and (if it is not
457 	 * a master) always access information under it with a snapshot
458 	 * TID.
459 	 *
460 	 * We cache recent inode lookups in this directory in dip->cache[2].
461 	 * If we can't find it we assume the inode we are looking for is
462 	 * close to the directory inode.
463 	 */
464 retry:
465 	cachep = NULL;
466 	if (dip) {
467 		if (dip->cache[2].node)
468 			cachep = &dip->cache[2];
469 		else
470 			cachep = &dip->cache[0];
471 	}
472 	hammer_init_cursor(trans, &cursor, cachep, NULL);
473 	cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
474 	cursor.key_beg.obj_id = ip->obj_id;
475 	cursor.key_beg.key = 0;
476 	cursor.key_beg.create_tid = 0;
477 	cursor.key_beg.delete_tid = 0;
478 	cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
479 	cursor.key_beg.obj_type = 0;
480 
481 	cursor.asof = iinfo.obj_asof;
482 	cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
483 		       HAMMER_CURSOR_ASOF;
484 
485 	*errorp = hammer_btree_lookup(&cursor);
486 	if (*errorp == EDEADLK) {
487 		hammer_done_cursor(&cursor);
488 		goto retry;
489 	}
490 
491 	/*
492 	 * On success the B-Tree lookup will hold the appropriate
493 	 * buffer cache buffers and provide a pointer to the requested
494 	 * information.  Copy the information to the in-memory inode
495 	 * and cache the B-Tree node to improve future operations.
496 	 */
497 	if (*errorp == 0) {
498 		ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
499 		ip->ino_data = cursor.data->inode;
500 
501 		/*
502 		 * cache[0] tries to cache the location of the object inode.
503 		 * The assumption is that it is near the directory inode.
504 		 *
505 		 * cache[1] tries to cache the location of the object data.
506 		 * We might have something in the governing directory from
507 		 * scan optimizations (see the strategy code in
508 		 * hammer_vnops.c).
509 		 *
510 		 * We update dip->cache[2], if possible, with the location
511 		 * of the object inode for future directory shortcuts.
512 		 */
513 		hammer_cache_node(&ip->cache[0], cursor.node);
514 		if (dip) {
515 			if (dip->cache[3].node) {
516 				hammer_cache_node(&ip->cache[1],
517 						  dip->cache[3].node);
518 			}
519 			hammer_cache_node(&dip->cache[2], cursor.node);
520 		}
521 
522 		/*
523 		 * The file should not contain any data past the file size
524 		 * stored in the inode.  Setting save_trunc_off to the
525 		 * file size instead of max reduces B-Tree lookup overheads
526 		 * on append by allowing the flusher to avoid checking for
527 		 * record overwrites.
528 		 */
529 		ip->save_trunc_off = ip->ino_data.size;
530 
531 		/*
532 		 * Locate and assign the pseudofs management structure to
533 		 * the inode.
534 		 */
535 		if (dip && dip->obj_localization == ip->obj_localization) {
536 			ip->pfsm = dip->pfsm;
537 			hammer_ref(&ip->pfsm->lock);
538 		} else {
539 			ip->pfsm = hammer_load_pseudofs(trans,
540 							ip->obj_localization,
541 							errorp);
542 			*errorp = 0;	/* ignore ENOENT */
543 		}
544 	}
545 
546 	/*
547 	 * The inode is placed on the red-black tree and will be synced to
548 	 * the media when flushed or by the filesystem sync.  If this races
549 	 * another instantiation/lookup the insertion will fail.
550 	 */
551 	if (*errorp == 0) {
552 		if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
553 			hammer_free_inode(ip);
554 			hammer_done_cursor(&cursor);
555 			goto loop;
556 		}
557 		ip->flags |= HAMMER_INODE_ONDISK;
558 	} else {
559 		if (ip->flags & HAMMER_INODE_RSV_INODES) {
560 			ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
561 			--hmp->rsv_inodes;
562 		}
563 
564 		hammer_free_inode(ip);
565 		ip = NULL;
566 	}
567 	hammer_done_cursor(&cursor);
568 
569 	/*
570 	 * NEWINODE is only set if the inode becomes dirty later,
571 	 * setting it here just leads to unnecessary stalls.
572 	 *
573 	 * trans->flags |= HAMMER_TRANSF_NEWINODE;
574 	 */
575 	return (ip);
576 }
577 
578 /*
579  * Get a dummy inode to placemark a broken directory entry.
580  */
581 struct hammer_inode *
582 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
583 		 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
584 		 int flags, int *errorp)
585 {
586 	hammer_mount_t hmp = trans->hmp;
587 	struct hammer_inode_info iinfo;
588 	struct hammer_inode *ip;
589 
590 	/*
591 	 * Determine if we already have an inode cached.  If we do then
592 	 * we are golden.
593 	 *
594 	 * If we find an inode with no vnode we have to mark the
595 	 * transaction such that hammer_inode_waitreclaims() is
596 	 * called later on to avoid building up an infinite number
597 	 * of inodes.  Otherwise we can continue to * add new inodes
598 	 * faster then they can be disposed of, even with the tsleep
599 	 * delay.
600 	 *
601 	 * If we find a non-fake inode we return an error.  Only fake
602 	 * inodes can be returned by this routine.
603 	 */
604 	iinfo.obj_id = obj_id;
605 	iinfo.obj_asof = asof;
606 	iinfo.obj_localization = localization;
607 loop:
608 	*errorp = 0;
609 	ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
610 	if (ip) {
611 		if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
612 			*errorp = ENOENT;
613 			return(NULL);
614 		}
615 		hammer_ref(&ip->lock);
616 		return(ip);
617 	}
618 
619 	/*
620 	 * Allocate a new inode structure and deal with races later.
621 	 */
622 	ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
623 	++hammer_count_inodes;
624 	++hmp->count_inodes;
625 	ip->obj_id = obj_id;
626 	ip->obj_asof = iinfo.obj_asof;
627 	ip->obj_localization = localization;
628 	ip->hmp = hmp;
629 	ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
630 	ip->cache[0].ip = ip;
631 	ip->cache[1].ip = ip;
632 	ip->cache[2].ip = ip;
633 	ip->cache[3].ip = ip;
634 	ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
635 		0x7FFFFFFFFFFFFFFFLL;
636 	RB_INIT(&ip->rec_tree);
637 	TAILQ_INIT(&ip->target_list);
638 	hammer_ref(&ip->lock);
639 
640 	/*
641 	 * Populate the dummy inode.  Leave everything zero'd out.
642 	 *
643 	 * (ip->ino_leaf and ip->ino_data)
644 	 *
645 	 * Make the dummy inode a FIFO object which most copy programs
646 	 * will properly ignore.
647 	 */
648 	ip->save_trunc_off = ip->ino_data.size;
649 	ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
650 
651 	/*
652 	 * Locate and assign the pseudofs management structure to
653 	 * the inode.
654 	 */
655 	if (dip && dip->obj_localization == ip->obj_localization) {
656 		ip->pfsm = dip->pfsm;
657 		hammer_ref(&ip->pfsm->lock);
658 	} else {
659 		ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
660 						errorp);
661 		*errorp = 0;	/* ignore ENOENT */
662 	}
663 
664 	/*
665 	 * The inode is placed on the red-black tree and will be synced to
666 	 * the media when flushed or by the filesystem sync.  If this races
667 	 * another instantiation/lookup the insertion will fail.
668 	 *
669 	 * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
670 	 */
671 	if (*errorp == 0) {
672 		if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
673 			hammer_free_inode(ip);
674 			goto loop;
675 		}
676 	} else {
677 		if (ip->flags & HAMMER_INODE_RSV_INODES) {
678 			ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
679 			--hmp->rsv_inodes;
680 		}
681 		hammer_free_inode(ip);
682 		ip = NULL;
683 	}
684 	trans->flags |= HAMMER_TRANSF_NEWINODE;
685 	return (ip);
686 }
687 
688 /*
689  * Return a referenced inode only if it is in our inode cache.
690  *
691  * Dummy inodes do not count.
692  */
693 struct hammer_inode *
694 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
695 		  hammer_tid_t asof, u_int32_t localization)
696 {
697 	hammer_mount_t hmp = trans->hmp;
698 	struct hammer_inode_info iinfo;
699 	struct hammer_inode *ip;
700 
701 	iinfo.obj_id = obj_id;
702 	iinfo.obj_asof = asof;
703 	iinfo.obj_localization = localization;
704 
705 	ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
706 	if (ip) {
707 		if (ip->flags & HAMMER_INODE_DUMMY)
708 			ip = NULL;
709 		else
710 			hammer_ref(&ip->lock);
711 	}
712 	return(ip);
713 }
714 
715 /*
716  * Create a new filesystem object, returning the inode in *ipp.  The
717  * returned inode will be referenced.  The inode is created in-memory.
718  *
719  * If pfsm is non-NULL the caller wishes to create the root inode for
720  * a master PFS.
721  */
722 int
723 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
724 		    struct ucred *cred,
725 		    hammer_inode_t dip, const char *name, int namelen,
726 		    hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
727 {
728 	hammer_mount_t hmp;
729 	hammer_inode_t ip;
730 	uid_t xuid;
731 	int error;
732 	int64_t namekey;
733 	u_int32_t dummy;
734 
735 	hmp = trans->hmp;
736 
737 	ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
738 	++hammer_count_inodes;
739 	++hmp->count_inodes;
740 	trans->flags |= HAMMER_TRANSF_NEWINODE;
741 
742 	if (pfsm) {
743 		KKASSERT(pfsm->localization != 0);
744 		ip->obj_id = HAMMER_OBJID_ROOT;
745 		ip->obj_localization = pfsm->localization;
746 	} else {
747 		KKASSERT(dip != NULL);
748 		namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
749 		ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
750 		ip->obj_localization = dip->obj_localization;
751 	}
752 
753 	KKASSERT(ip->obj_id != 0);
754 	ip->obj_asof = hmp->asof;
755 	ip->hmp = hmp;
756 	ip->flush_state = HAMMER_FST_IDLE;
757 	ip->flags = HAMMER_INODE_DDIRTY |
758 		    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
759 	ip->cache[0].ip = ip;
760 	ip->cache[1].ip = ip;
761 	ip->cache[2].ip = ip;
762 	ip->cache[3].ip = ip;
763 
764 	ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
765 	/* ip->save_trunc_off = 0; (already zero) */
766 	RB_INIT(&ip->rec_tree);
767 	TAILQ_INIT(&ip->target_list);
768 
769 	ip->ino_data.atime = trans->time;
770 	ip->ino_data.mtime = trans->time;
771 	ip->ino_data.size = 0;
772 	ip->ino_data.nlinks = 0;
773 
774 	/*
775 	 * A nohistory designator on the parent directory is inherited by
776 	 * the child.  We will do this even for pseudo-fs creation... the
777 	 * sysad can turn it off.
778 	 */
779 	if (dip) {
780 		ip->ino_data.uflags = dip->ino_data.uflags &
781 				      (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
782 	}
783 
784 	ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
785 	ip->ino_leaf.base.localization = ip->obj_localization +
786 					 HAMMER_LOCALIZE_INODE;
787 	ip->ino_leaf.base.obj_id = ip->obj_id;
788 	ip->ino_leaf.base.key = 0;
789 	ip->ino_leaf.base.create_tid = 0;
790 	ip->ino_leaf.base.delete_tid = 0;
791 	ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
792 	ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
793 
794 	ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
795 	ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
796 	ip->ino_data.mode = vap->va_mode;
797 	ip->ino_data.ctime = trans->time;
798 
799 	/*
800 	 * If we are running version 2 or greater directory entries are
801 	 * inode-localized instead of data-localized.
802 	 */
803 	if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
804 		if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
805 			ip->ino_data.cap_flags |=
806 				HAMMER_INODE_CAP_DIR_LOCAL_INO;
807 		}
808 	}
809 
810 	/*
811 	 * Setup the ".." pointer.  This only needs to be done for directories
812 	 * but we do it for all objects as a recovery aid.
813 	 */
814 	if (dip)
815 		ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
816 #if 0
817 	/*
818 	 * The parent_obj_localization field only applies to pseudo-fs roots.
819 	 * XXX this is no longer applicable, PFSs are no longer directly
820 	 * tied into the parent's directory structure.
821 	 */
822 	if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
823 	    ip->obj_id == HAMMER_OBJID_ROOT) {
824 		ip->ino_data.ext.obj.parent_obj_localization =
825 						dip->obj_localization;
826 	}
827 #endif
828 
829 	switch(ip->ino_leaf.base.obj_type) {
830 	case HAMMER_OBJTYPE_CDEV:
831 	case HAMMER_OBJTYPE_BDEV:
832 		ip->ino_data.rmajor = vap->va_rmajor;
833 		ip->ino_data.rminor = vap->va_rminor;
834 		break;
835 	default:
836 		break;
837 	}
838 
839 	/*
840 	 * Calculate default uid/gid and overwrite with information from
841 	 * the vap.
842 	 */
843 	if (dip) {
844 		xuid = hammer_to_unix_xid(&dip->ino_data.uid);
845 		xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
846 					     xuid, cred, &vap->va_mode);
847 	} else {
848 		xuid = 0;
849 	}
850 	ip->ino_data.mode = vap->va_mode;
851 
852 	if (vap->va_vaflags & VA_UID_UUID_VALID)
853 		ip->ino_data.uid = vap->va_uid_uuid;
854 	else if (vap->va_uid != (uid_t)VNOVAL)
855 		hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
856 	else
857 		hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
858 
859 	if (vap->va_vaflags & VA_GID_UUID_VALID)
860 		ip->ino_data.gid = vap->va_gid_uuid;
861 	else if (vap->va_gid != (gid_t)VNOVAL)
862 		hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
863 	else if (dip)
864 		ip->ino_data.gid = dip->ino_data.gid;
865 
866 	hammer_ref(&ip->lock);
867 
868 	if (pfsm) {
869 		ip->pfsm = pfsm;
870 		hammer_ref(&pfsm->lock);
871 		error = 0;
872 	} else if (dip->obj_localization == ip->obj_localization) {
873 		ip->pfsm = dip->pfsm;
874 		hammer_ref(&ip->pfsm->lock);
875 		error = 0;
876 	} else {
877 		ip->pfsm = hammer_load_pseudofs(trans,
878 						ip->obj_localization,
879 						&error);
880 		error = 0;	/* ignore ENOENT */
881 	}
882 
883 	if (error) {
884 		hammer_free_inode(ip);
885 		ip = NULL;
886 	} else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
887 		panic("hammer_create_inode: duplicate obj_id %llx",
888 		      (long long)ip->obj_id);
889 		/* not reached */
890 		hammer_free_inode(ip);
891 	}
892 	*ipp = ip;
893 	return(error);
894 }
895 
896 /*
897  * Final cleanup / freeing of an inode structure
898  */
899 static void
900 hammer_free_inode(hammer_inode_t ip)
901 {
902 	struct hammer_mount *hmp;
903 
904 	hmp = ip->hmp;
905 	KKASSERT(hammer_oneref(&ip->lock));
906 	hammer_uncache_node(&ip->cache[0]);
907 	hammer_uncache_node(&ip->cache[1]);
908 	hammer_uncache_node(&ip->cache[2]);
909 	hammer_uncache_node(&ip->cache[3]);
910 	hammer_inode_wakereclaims(ip);
911 	if (ip->objid_cache)
912 		hammer_clear_objid(ip);
913 	--hammer_count_inodes;
914 	--hmp->count_inodes;
915 	if (ip->pfsm) {
916 		hammer_rel_pseudofs(hmp, ip->pfsm);
917 		ip->pfsm = NULL;
918 	}
919 	kfree(ip, hmp->m_inodes);
920 	ip = NULL;
921 }
922 
923 /*
924  * Retrieve pseudo-fs data.  NULL will never be returned.
925  *
926  * If an error occurs *errorp will be set and a default template is returned,
927  * otherwise *errorp is set to 0.  Typically when an error occurs it will
928  * be ENOENT.
929  */
930 hammer_pseudofs_inmem_t
931 hammer_load_pseudofs(hammer_transaction_t trans,
932 		     u_int32_t localization, int *errorp)
933 {
934 	hammer_mount_t hmp = trans->hmp;
935 	hammer_inode_t ip;
936 	hammer_pseudofs_inmem_t pfsm;
937 	struct hammer_cursor cursor;
938 	int bytes;
939 
940 retry:
941 	pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
942 	if (pfsm) {
943 		hammer_ref(&pfsm->lock);
944 		*errorp = 0;
945 		return(pfsm);
946 	}
947 
948 	/*
949 	 * PFS records are stored in the root inode (not the PFS root inode,
950 	 * but the real root).  Avoid an infinite recursion if loading
951 	 * the PFS for the real root.
952 	 */
953 	if (localization) {
954 		ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
955 				      HAMMER_MAX_TID,
956 				      HAMMER_DEF_LOCALIZATION, 0, errorp);
957 	} else {
958 		ip = NULL;
959 	}
960 
961 	pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
962 	pfsm->localization = localization;
963 	pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
964 	pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
965 
966 	hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
967 	cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
968 				      HAMMER_LOCALIZE_MISC;
969 	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
970 	cursor.key_beg.create_tid = 0;
971 	cursor.key_beg.delete_tid = 0;
972 	cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
973 	cursor.key_beg.obj_type = 0;
974 	cursor.key_beg.key = localization;
975 	cursor.asof = HAMMER_MAX_TID;
976 	cursor.flags |= HAMMER_CURSOR_ASOF;
977 
978 	if (ip)
979 		*errorp = hammer_ip_lookup(&cursor);
980 	else
981 		*errorp = hammer_btree_lookup(&cursor);
982 	if (*errorp == 0) {
983 		*errorp = hammer_ip_resolve_data(&cursor);
984 		if (*errorp == 0) {
985 			if (cursor.data->pfsd.mirror_flags &
986 			    HAMMER_PFSD_DELETED) {
987 				*errorp = ENOENT;
988 			} else {
989 				bytes = cursor.leaf->data_len;
990 				if (bytes > sizeof(pfsm->pfsd))
991 					bytes = sizeof(pfsm->pfsd);
992 				bcopy(cursor.data, &pfsm->pfsd, bytes);
993 			}
994 		}
995 	}
996 	hammer_done_cursor(&cursor);
997 
998 	pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
999 	hammer_ref(&pfsm->lock);
1000 	if (ip)
1001 		hammer_rel_inode(ip, 0);
1002 	if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1003 		kfree(pfsm, hmp->m_misc);
1004 		goto retry;
1005 	}
1006 	return(pfsm);
1007 }
1008 
1009 /*
1010  * Store pseudo-fs data.  The backend will automatically delete any prior
1011  * on-disk pseudo-fs data but we have to delete in-memory versions.
1012  */
1013 int
1014 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1015 {
1016 	struct hammer_cursor cursor;
1017 	hammer_record_t record;
1018 	hammer_inode_t ip;
1019 	int error;
1020 
1021 	ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1022 			      HAMMER_DEF_LOCALIZATION, 0, &error);
1023 retry:
1024 	pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1025 	hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1026 	cursor.key_beg.localization = ip->obj_localization +
1027 				      HAMMER_LOCALIZE_MISC;
1028 	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1029 	cursor.key_beg.create_tid = 0;
1030 	cursor.key_beg.delete_tid = 0;
1031 	cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1032 	cursor.key_beg.obj_type = 0;
1033 	cursor.key_beg.key = pfsm->localization;
1034 	cursor.asof = HAMMER_MAX_TID;
1035 	cursor.flags |= HAMMER_CURSOR_ASOF;
1036 
1037 	/*
1038 	 * Replace any in-memory version of the record.
1039 	 */
1040 	error = hammer_ip_lookup(&cursor);
1041 	if (error == 0 && hammer_cursor_inmem(&cursor)) {
1042 		record = cursor.iprec;
1043 		if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1044 			KKASSERT(cursor.deadlk_rec == NULL);
1045 			hammer_ref(&record->lock);
1046 			cursor.deadlk_rec = record;
1047 			error = EDEADLK;
1048 		} else {
1049 			record->flags |= HAMMER_RECF_DELETED_FE;
1050 			error = 0;
1051 		}
1052 	}
1053 
1054 	/*
1055 	 * Allocate replacement general record.  The backend flush will
1056 	 * delete any on-disk version of the record.
1057 	 */
1058 	if (error == 0 || error == ENOENT) {
1059 		record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1060 		record->type = HAMMER_MEM_RECORD_GENERAL;
1061 
1062 		record->leaf.base.localization = ip->obj_localization +
1063 						 HAMMER_LOCALIZE_MISC;
1064 		record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1065 		record->leaf.base.key = pfsm->localization;
1066 		record->leaf.data_len = sizeof(pfsm->pfsd);
1067 		bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1068 		error = hammer_ip_add_record(trans, record);
1069 	}
1070 	hammer_done_cursor(&cursor);
1071 	if (error == EDEADLK)
1072 		goto retry;
1073 	hammer_rel_inode(ip, 0);
1074 	return(error);
1075 }
1076 
1077 /*
1078  * Create a root directory for a PFS if one does not alredy exist.
1079  *
1080  * The PFS root stands alone so we must also bump the nlinks count
1081  * to prevent it from being destroyed on release.
1082  */
1083 int
1084 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1085 		       hammer_pseudofs_inmem_t pfsm)
1086 {
1087 	hammer_inode_t ip;
1088 	struct vattr vap;
1089 	int error;
1090 
1091 	ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1092 			      pfsm->localization, 0, &error);
1093 	if (ip == NULL) {
1094 		vattr_null(&vap);
1095 		vap.va_mode = 0755;
1096 		vap.va_type = VDIR;
1097 		error = hammer_create_inode(trans, &vap, cred,
1098 					    NULL, NULL, 0,
1099 					    pfsm, &ip);
1100 		if (error == 0) {
1101 			++ip->ino_data.nlinks;
1102 			hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1103 		}
1104 	}
1105 	if (ip)
1106 		hammer_rel_inode(ip, 0);
1107 	return(error);
1108 }
1109 
1110 /*
1111  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1112  * if we are unable to disassociate all the inodes.
1113  */
1114 static
1115 int
1116 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1117 {
1118 	int res;
1119 
1120 	hammer_ref(&ip->lock);
1121 	if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1122 		vclean_unlocked(ip->vp);
1123 	if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1124 		res = 0;
1125 	else
1126 		res = -1;	/* stop, someone is using the inode */
1127 	hammer_rel_inode(ip, 0);
1128 	return(res);
1129 }
1130 
1131 int
1132 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1133 {
1134 	int res;
1135 	int try;
1136 
1137 	for (try = res = 0; try < 4; ++try) {
1138 		res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1139 					   hammer_inode_pfs_cmp,
1140 					   hammer_unload_pseudofs_callback,
1141 					   &localization);
1142 		if (res == 0 && try > 1)
1143 			break;
1144 		hammer_flusher_sync(trans->hmp);
1145 	}
1146 	if (res != 0)
1147 		res = ENOTEMPTY;
1148 	return(res);
1149 }
1150 
1151 
1152 /*
1153  * Release a reference on a PFS
1154  */
1155 void
1156 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1157 {
1158 	hammer_rel(&pfsm->lock);
1159 	if (hammer_norefs(&pfsm->lock)) {
1160 		RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1161 		kfree(pfsm, hmp->m_misc);
1162 	}
1163 }
1164 
1165 /*
1166  * Called by hammer_sync_inode().
1167  */
1168 static int
1169 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1170 {
1171 	hammer_transaction_t trans = cursor->trans;
1172 	hammer_record_t record;
1173 	int error;
1174 	int redirty;
1175 
1176 retry:
1177 	error = 0;
1178 
1179 	/*
1180 	 * If the inode has a presence on-disk then locate it and mark
1181 	 * it deleted, setting DELONDISK.
1182 	 *
1183 	 * The record may or may not be physically deleted, depending on
1184 	 * the retention policy.
1185 	 */
1186 	if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1187 	    HAMMER_INODE_ONDISK) {
1188 		hammer_normalize_cursor(cursor);
1189 		cursor->key_beg.localization = ip->obj_localization +
1190 					       HAMMER_LOCALIZE_INODE;
1191 		cursor->key_beg.obj_id = ip->obj_id;
1192 		cursor->key_beg.key = 0;
1193 		cursor->key_beg.create_tid = 0;
1194 		cursor->key_beg.delete_tid = 0;
1195 		cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1196 		cursor->key_beg.obj_type = 0;
1197 		cursor->asof = ip->obj_asof;
1198 		cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1199 		cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1200 		cursor->flags |= HAMMER_CURSOR_BACKEND;
1201 
1202 		error = hammer_btree_lookup(cursor);
1203 		if (hammer_debug_inode)
1204 			kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1205 
1206 		if (error == 0) {
1207 			error = hammer_ip_delete_record(cursor, ip, trans->tid);
1208 			if (hammer_debug_inode)
1209 				kprintf(" error %d\n", error);
1210 			if (error == 0) {
1211 				ip->flags |= HAMMER_INODE_DELONDISK;
1212 			}
1213 			if (cursor->node)
1214 				hammer_cache_node(&ip->cache[0], cursor->node);
1215 		}
1216 		if (error == EDEADLK) {
1217 			hammer_done_cursor(cursor);
1218 			error = hammer_init_cursor(trans, cursor,
1219 						   &ip->cache[0], ip);
1220 			if (hammer_debug_inode)
1221 				kprintf("IPDED %p %d\n", ip, error);
1222 			if (error == 0)
1223 				goto retry;
1224 		}
1225 	}
1226 
1227 	/*
1228 	 * Ok, write out the initial record or a new record (after deleting
1229 	 * the old one), unless the DELETED flag is set.  This routine will
1230 	 * clear DELONDISK if it writes out a record.
1231 	 *
1232 	 * Update our inode statistics if this is the first application of
1233 	 * the inode on-disk.
1234 	 */
1235 	if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1236 		/*
1237 		 * Generate a record and write it to the media.  We clean-up
1238 		 * the state before releasing so we do not have to set-up
1239 		 * a flush_group.
1240 		 */
1241 		record = hammer_alloc_mem_record(ip, 0);
1242 		record->type = HAMMER_MEM_RECORD_INODE;
1243 		record->flush_state = HAMMER_FST_FLUSH;
1244 		record->leaf = ip->sync_ino_leaf;
1245 		record->leaf.base.create_tid = trans->tid;
1246 		record->leaf.data_len = sizeof(ip->sync_ino_data);
1247 		record->leaf.create_ts = trans->time32;
1248 		record->data = (void *)&ip->sync_ino_data;
1249 		record->flags |= HAMMER_RECF_INTERLOCK_BE;
1250 
1251 		/*
1252 		 * If this flag is set we cannot sync the new file size
1253 		 * because we haven't finished related truncations.  The
1254 		 * inode will be flushed in another flush group to finish
1255 		 * the job.
1256 		 */
1257 		if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1258 		    ip->sync_ino_data.size != ip->ino_data.size) {
1259 			redirty = 1;
1260 			ip->sync_ino_data.size = ip->ino_data.size;
1261 		} else {
1262 			redirty = 0;
1263 		}
1264 
1265 		for (;;) {
1266 			error = hammer_ip_sync_record_cursor(cursor, record);
1267 			if (hammer_debug_inode)
1268 				kprintf("GENREC %p rec %08x %d\n",
1269 					ip, record->flags, error);
1270 			if (error != EDEADLK)
1271 				break;
1272 			hammer_done_cursor(cursor);
1273 			error = hammer_init_cursor(trans, cursor,
1274 						   &ip->cache[0], ip);
1275 			if (hammer_debug_inode)
1276 				kprintf("GENREC reinit %d\n", error);
1277 			if (error)
1278 				break;
1279 		}
1280 
1281 		/*
1282 		 * Note:  The record was never on the inode's record tree
1283 		 * so just wave our hands importantly and destroy it.
1284 		 */
1285 		record->flags |= HAMMER_RECF_COMMITTED;
1286 		record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1287 		record->flush_state = HAMMER_FST_IDLE;
1288 		++ip->rec_generation;
1289 		hammer_rel_mem_record(record);
1290 
1291 		/*
1292 		 * Finish up.
1293 		 */
1294 		if (error == 0) {
1295 			if (hammer_debug_inode)
1296 				kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1297 			ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1298 					    HAMMER_INODE_SDIRTY |
1299 					    HAMMER_INODE_ATIME |
1300 					    HAMMER_INODE_MTIME);
1301 			ip->flags &= ~HAMMER_INODE_DELONDISK;
1302 			if (redirty)
1303 				ip->sync_flags |= HAMMER_INODE_DDIRTY;
1304 
1305 			/*
1306 			 * Root volume count of inodes
1307 			 */
1308 			hammer_sync_lock_sh(trans);
1309 			if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1310 				hammer_modify_volume_field(trans,
1311 							   trans->rootvol,
1312 							   vol0_stat_inodes);
1313 				++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1314 				hammer_modify_volume_done(trans->rootvol);
1315 				ip->flags |= HAMMER_INODE_ONDISK;
1316 				if (hammer_debug_inode)
1317 					kprintf("NOWONDISK %p\n", ip);
1318 			}
1319 			hammer_sync_unlock(trans);
1320 		}
1321 	}
1322 
1323 	/*
1324 	 * If the inode has been destroyed, clean out any left-over flags
1325 	 * that may have been set by the frontend.
1326 	 */
1327 	if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1328 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1329 				    HAMMER_INODE_SDIRTY |
1330 				    HAMMER_INODE_ATIME |
1331 				    HAMMER_INODE_MTIME);
1332 	}
1333 	return(error);
1334 }
1335 
1336 /*
1337  * Update only the itimes fields.
1338  *
1339  * ATIME can be updated without generating any UNDO.  MTIME is updated
1340  * with UNDO so it is guaranteed to be synchronized properly in case of
1341  * a crash.
1342  *
1343  * Neither field is included in the B-Tree leaf element's CRC, which is how
1344  * we can get away with updating ATIME the way we do.
1345  */
1346 static int
1347 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1348 {
1349 	hammer_transaction_t trans = cursor->trans;
1350 	int error;
1351 
1352 retry:
1353 	if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1354 	    HAMMER_INODE_ONDISK) {
1355 		return(0);
1356 	}
1357 
1358 	hammer_normalize_cursor(cursor);
1359 	cursor->key_beg.localization = ip->obj_localization +
1360 				       HAMMER_LOCALIZE_INODE;
1361 	cursor->key_beg.obj_id = ip->obj_id;
1362 	cursor->key_beg.key = 0;
1363 	cursor->key_beg.create_tid = 0;
1364 	cursor->key_beg.delete_tid = 0;
1365 	cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1366 	cursor->key_beg.obj_type = 0;
1367 	cursor->asof = ip->obj_asof;
1368 	cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1369 	cursor->flags |= HAMMER_CURSOR_ASOF;
1370 	cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1371 	cursor->flags |= HAMMER_CURSOR_GET_DATA;
1372 	cursor->flags |= HAMMER_CURSOR_BACKEND;
1373 
1374 	error = hammer_btree_lookup(cursor);
1375 	if (error == 0) {
1376 		hammer_cache_node(&ip->cache[0], cursor->node);
1377 		if (ip->sync_flags & HAMMER_INODE_MTIME) {
1378 			/*
1379 			 * Updating MTIME requires an UNDO.  Just cover
1380 			 * both atime and mtime.
1381 			 */
1382 			hammer_sync_lock_sh(trans);
1383 			hammer_modify_buffer(trans, cursor->data_buffer,
1384 				     HAMMER_ITIMES_BASE(&cursor->data->inode),
1385 				     HAMMER_ITIMES_BYTES);
1386 			cursor->data->inode.atime = ip->sync_ino_data.atime;
1387 			cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1388 			hammer_modify_buffer_done(cursor->data_buffer);
1389 			hammer_sync_unlock(trans);
1390 		} else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1391 			/*
1392 			 * Updating atime only can be done in-place with
1393 			 * no UNDO.
1394 			 */
1395 			hammer_sync_lock_sh(trans);
1396 			hammer_modify_buffer(trans, cursor->data_buffer,
1397 					     NULL, 0);
1398 			cursor->data->inode.atime = ip->sync_ino_data.atime;
1399 			hammer_modify_buffer_done(cursor->data_buffer);
1400 			hammer_sync_unlock(trans);
1401 		}
1402 		ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1403 	}
1404 	if (error == EDEADLK) {
1405 		hammer_done_cursor(cursor);
1406 		error = hammer_init_cursor(trans, cursor,
1407 					   &ip->cache[0], ip);
1408 		if (error == 0)
1409 			goto retry;
1410 	}
1411 	return(error);
1412 }
1413 
1414 /*
1415  * Release a reference on an inode, flush as requested.
1416  *
1417  * On the last reference we queue the inode to the flusher for its final
1418  * disposition.
1419  */
1420 void
1421 hammer_rel_inode(struct hammer_inode *ip, int flush)
1422 {
1423 	/*hammer_mount_t hmp = ip->hmp;*/
1424 
1425 	/*
1426 	 * Handle disposition when dropping the last ref.
1427 	 */
1428 	for (;;) {
1429 		if (hammer_oneref(&ip->lock)) {
1430 			/*
1431 			 * Determine whether on-disk action is needed for
1432 			 * the inode's final disposition.
1433 			 */
1434 			KKASSERT(ip->vp == NULL);
1435 			hammer_inode_unloadable_check(ip, 0);
1436 			if (ip->flags & HAMMER_INODE_MODMASK) {
1437 				hammer_flush_inode(ip, 0);
1438 			} else if (hammer_oneref(&ip->lock)) {
1439 				hammer_unload_inode(ip);
1440 				break;
1441 			}
1442 		} else {
1443 			if (flush)
1444 				hammer_flush_inode(ip, 0);
1445 
1446 			/*
1447 			 * The inode still has multiple refs, try to drop
1448 			 * one ref.
1449 			 */
1450 			KKASSERT(hammer_isactive(&ip->lock) >= 1);
1451 			if (hammer_isactive(&ip->lock) > 1) {
1452 				hammer_rel(&ip->lock);
1453 				break;
1454 			}
1455 		}
1456 	}
1457 }
1458 
1459 /*
1460  * Unload and destroy the specified inode.  Must be called with one remaining
1461  * reference.  The reference is disposed of.
1462  *
1463  * The inode must be completely clean.
1464  */
1465 static int
1466 hammer_unload_inode(struct hammer_inode *ip)
1467 {
1468 	hammer_mount_t hmp = ip->hmp;
1469 
1470 	KASSERT(hammer_oneref(&ip->lock),
1471 		("hammer_unload_inode: %d refs\n", hammer_isactive(&ip->lock)));
1472 	KKASSERT(ip->vp == NULL);
1473 	KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1474 	KKASSERT(ip->cursor_ip_refs == 0);
1475 	KKASSERT(hammer_notlocked(&ip->lock));
1476 	KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1477 
1478 	KKASSERT(RB_EMPTY(&ip->rec_tree));
1479 	KKASSERT(TAILQ_EMPTY(&ip->target_list));
1480 
1481 	if (ip->flags & HAMMER_INODE_RDIRTY) {
1482 		RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1483 		ip->flags &= ~HAMMER_INODE_RDIRTY;
1484 	}
1485 	RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1486 
1487 	hammer_free_inode(ip);
1488 	return(0);
1489 }
1490 
1491 /*
1492  * Called during unmounting if a critical error occured.  The in-memory
1493  * inode and all related structures are destroyed.
1494  *
1495  * If a critical error did not occur the unmount code calls the standard
1496  * release and asserts that the inode is gone.
1497  */
1498 int
1499 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1500 {
1501 	hammer_record_t rec;
1502 
1503 	/*
1504 	 * Get rid of the inodes in-memory records, regardless of their
1505 	 * state, and clear the mod-mask.
1506 	 */
1507 	while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1508 		TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1509 		rec->target_ip = NULL;
1510 		if (rec->flush_state == HAMMER_FST_SETUP)
1511 			rec->flush_state = HAMMER_FST_IDLE;
1512 	}
1513 	while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1514 		if (rec->flush_state == HAMMER_FST_FLUSH)
1515 			--rec->flush_group->refs;
1516 		else
1517 			hammer_ref(&rec->lock);
1518 		KKASSERT(hammer_oneref(&rec->lock));
1519 		rec->flush_state = HAMMER_FST_IDLE;
1520 		rec->flush_group = NULL;
1521 		rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1522 		rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1523 		++ip->rec_generation;
1524 		hammer_rel_mem_record(rec);
1525 	}
1526 	ip->flags &= ~HAMMER_INODE_MODMASK;
1527 	ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1528 	KKASSERT(ip->vp == NULL);
1529 
1530 	/*
1531 	 * Remove the inode from any flush group, force it idle.  FLUSH
1532 	 * and SETUP states have an inode ref.
1533 	 */
1534 	switch(ip->flush_state) {
1535 	case HAMMER_FST_FLUSH:
1536 		RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1537 		--ip->flush_group->refs;
1538 		ip->flush_group = NULL;
1539 		/* fall through */
1540 	case HAMMER_FST_SETUP:
1541 		hammer_rel(&ip->lock);
1542 		ip->flush_state = HAMMER_FST_IDLE;
1543 		/* fall through */
1544 	case HAMMER_FST_IDLE:
1545 		break;
1546 	}
1547 
1548 	/*
1549 	 * There shouldn't be any associated vnode.  The unload needs at
1550 	 * least one ref, if we do have a vp steal its ip ref.
1551 	 */
1552 	if (ip->vp) {
1553 		kprintf("hammer_destroy_inode_callback: Unexpected "
1554 			"vnode association ip %p vp %p\n", ip, ip->vp);
1555 		ip->vp->v_data = NULL;
1556 		ip->vp = NULL;
1557 	} else {
1558 		hammer_ref(&ip->lock);
1559 	}
1560 	hammer_unload_inode(ip);
1561 	return(0);
1562 }
1563 
1564 /*
1565  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1566  * the read-only flag for cached inodes.
1567  *
1568  * This routine is called from a RB_SCAN().
1569  */
1570 int
1571 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1572 {
1573 	hammer_mount_t hmp = ip->hmp;
1574 
1575 	if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1576 		ip->flags |= HAMMER_INODE_RO;
1577 	else
1578 		ip->flags &= ~HAMMER_INODE_RO;
1579 	return(0);
1580 }
1581 
1582 /*
1583  * A transaction has modified an inode, requiring updates as specified by
1584  * the passed flags.
1585  *
1586  * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1587  *			and not including size changes due to write-append
1588  *			(but other size changes are included).
1589  * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1590  *			write-append.
1591  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1592  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1593  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1594  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1595  */
1596 void
1597 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1598 {
1599 	/*
1600 	 * ronly of 0 or 2 does not trigger assertion.
1601 	 * 2 is a special error state
1602 	 */
1603 	KKASSERT(ip->hmp->ronly != 1 ||
1604 		  (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1605 			    HAMMER_INODE_SDIRTY |
1606 			    HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1607 			    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1608 	if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1609 		ip->flags |= HAMMER_INODE_RSV_INODES;
1610 		++ip->hmp->rsv_inodes;
1611 	}
1612 
1613 	/*
1614 	 * Set the NEWINODE flag in the transaction if the inode
1615 	 * transitions to a dirty state.  This is used to track
1616 	 * the load on the inode cache.
1617 	 */
1618 	if (trans &&
1619 	    (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1620 	    (flags & HAMMER_INODE_MODMASK)) {
1621 		trans->flags |= HAMMER_TRANSF_NEWINODE;
1622 	}
1623 
1624 	ip->flags |= flags;
1625 }
1626 
1627 /*
1628  * Request that an inode be flushed.  This whole mess cannot block and may
1629  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1630  * actively flush the inode until the flush can be done.
1631  *
1632  * The inode may already be flushing, or may be in a setup state.  We can
1633  * place the inode in a flushing state if it is currently idle and flag it
1634  * to reflush if it is currently flushing.
1635  *
1636  * Upon return if the inode could not be flushed due to a setup
1637  * dependancy, then it will be automatically flushed when the dependancy
1638  * is satisfied.
1639  */
1640 void
1641 hammer_flush_inode(hammer_inode_t ip, int flags)
1642 {
1643 	hammer_mount_t hmp;
1644 	hammer_flush_group_t flg;
1645 	int good;
1646 
1647 	/*
1648 	 * next_flush_group is the first flush group we can place the inode
1649 	 * in.  It may be NULL.  If it becomes full we append a new flush
1650 	 * group and make that the next_flush_group.
1651 	 */
1652 	hmp = ip->hmp;
1653 	while ((flg = hmp->next_flush_group) != NULL) {
1654 		KKASSERT(flg->running == 0);
1655 		if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1656 			break;
1657 		hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1658 		hammer_flusher_async(ip->hmp, flg);
1659 	}
1660 	if (flg == NULL) {
1661 		flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1662 		hmp->next_flush_group = flg;
1663 		RB_INIT(&flg->flush_tree);
1664 		TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1665 	}
1666 
1667 	/*
1668 	 * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1669 	 * state we have to put it back into an IDLE state so we can
1670 	 * drop the extra ref.
1671 	 *
1672 	 * If we have a parent dependancy we must still fall through
1673 	 * so we can run it.
1674 	 */
1675 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1676 		if (ip->flush_state == HAMMER_FST_SETUP &&
1677 		    TAILQ_EMPTY(&ip->target_list)) {
1678 			ip->flush_state = HAMMER_FST_IDLE;
1679 			hammer_rel_inode(ip, 0);
1680 		}
1681 		if (ip->flush_state == HAMMER_FST_IDLE)
1682 			return;
1683 	}
1684 
1685 	/*
1686 	 * Our flush action will depend on the current state.
1687 	 */
1688 	switch(ip->flush_state) {
1689 	case HAMMER_FST_IDLE:
1690 		/*
1691 		 * We have no dependancies and can flush immediately.  Some
1692 		 * our children may not be flushable so we have to re-test
1693 		 * with that additional knowledge.
1694 		 */
1695 		hammer_flush_inode_core(ip, flg, flags);
1696 		break;
1697 	case HAMMER_FST_SETUP:
1698 		/*
1699 		 * Recurse upwards through dependancies via target_list
1700 		 * and start their flusher actions going if possible.
1701 		 *
1702 		 * 'good' is our connectivity.  -1 means we have none and
1703 		 * can't flush, 0 means there weren't any dependancies, and
1704 		 * 1 means we have good connectivity.
1705 		 */
1706 		good = hammer_setup_parent_inodes(ip, 0, flg);
1707 
1708 		if (good >= 0) {
1709 			/*
1710 			 * We can continue if good >= 0.  Determine how
1711 			 * many records under our inode can be flushed (and
1712 			 * mark them).
1713 			 */
1714 			hammer_flush_inode_core(ip, flg, flags);
1715 		} else {
1716 			/*
1717 			 * Parent has no connectivity, tell it to flush
1718 			 * us as soon as it does.
1719 			 *
1720 			 * The REFLUSH flag is also needed to trigger
1721 			 * dependancy wakeups.
1722 			 */
1723 			ip->flags |= HAMMER_INODE_CONN_DOWN |
1724 				     HAMMER_INODE_REFLUSH;
1725 			if (flags & HAMMER_FLUSH_SIGNAL) {
1726 				ip->flags |= HAMMER_INODE_RESIGNAL;
1727 				hammer_flusher_async(ip->hmp, flg);
1728 			}
1729 		}
1730 		break;
1731 	case HAMMER_FST_FLUSH:
1732 		/*
1733 		 * We are already flushing, flag the inode to reflush
1734 		 * if needed after it completes its current flush.
1735 		 *
1736 		 * The REFLUSH flag is also needed to trigger
1737 		 * dependancy wakeups.
1738 		 */
1739 		if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1740 			ip->flags |= HAMMER_INODE_REFLUSH;
1741 		if (flags & HAMMER_FLUSH_SIGNAL) {
1742 			ip->flags |= HAMMER_INODE_RESIGNAL;
1743 			hammer_flusher_async(ip->hmp, flg);
1744 		}
1745 		break;
1746 	}
1747 }
1748 
1749 /*
1750  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1751  * ip which reference our ip.
1752  *
1753  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1754  *     so for now do not ref/deref the structures.  Note that if we use the
1755  *     ref/rel code later, the rel CAN block.
1756  */
1757 static int
1758 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1759 			   hammer_flush_group_t flg)
1760 {
1761 	hammer_record_t depend;
1762 	int good;
1763 	int r;
1764 
1765 	/*
1766 	 * If we hit our recursion limit and we have parent dependencies
1767 	 * We cannot continue.  Returning < 0 will cause us to be flagged
1768 	 * for reflush.  Returning -2 cuts off additional dependency checks
1769 	 * because they are likely to also hit the depth limit.
1770 	 *
1771 	 * We cannot return < 0 if there are no dependencies or there might
1772 	 * not be anything to wakeup (ip).
1773 	 */
1774 	if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1775 		kprintf("HAMMER Warning: depth limit reached on "
1776 			"setup recursion, inode %p %016llx\n",
1777 			ip, (long long)ip->obj_id);
1778 		return(-2);
1779 	}
1780 
1781 	/*
1782 	 * Scan dependencies
1783 	 */
1784 	good = 0;
1785 	TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1786 		r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1787 		KKASSERT(depend->target_ip == ip);
1788 		if (r < 0 && good == 0)
1789 			good = -1;
1790 		if (r > 0)
1791 			good = 1;
1792 
1793 		/*
1794 		 * If we failed due to the recursion depth limit then stop
1795 		 * now.
1796 		 */
1797 		if (r == -2)
1798 			break;
1799 	}
1800 	return(good);
1801 }
1802 
1803 /*
1804  * This helper function takes a record representing the dependancy between
1805  * the parent inode and child inode.
1806  *
1807  * record->ip		= parent inode
1808  * record->target_ip	= child inode
1809  *
1810  * We are asked to recurse upwards and convert the record from SETUP
1811  * to FLUSH if possible.
1812  *
1813  * Return 1 if the record gives us connectivity
1814  *
1815  * Return 0 if the record is not relevant
1816  *
1817  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1818  */
1819 static int
1820 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1821 				  hammer_flush_group_t flg)
1822 {
1823 	hammer_mount_t hmp;
1824 	hammer_inode_t pip;
1825 	int good;
1826 
1827 	KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1828 	pip = record->ip;
1829 	hmp = pip->hmp;
1830 
1831 	/*
1832 	 * If the record is already flushing, is it in our flush group?
1833 	 *
1834 	 * If it is in our flush group but it is a general record or a
1835 	 * delete-on-disk, it does not improve our connectivity (return 0),
1836 	 * and if the target inode is not trying to destroy itself we can't
1837 	 * allow the operation yet anyway (the second return -1).
1838 	 */
1839 	if (record->flush_state == HAMMER_FST_FLUSH) {
1840 		/*
1841 		 * If not in our flush group ask the parent to reflush
1842 		 * us as soon as possible.
1843 		 */
1844 		if (record->flush_group != flg) {
1845 			pip->flags |= HAMMER_INODE_REFLUSH;
1846 			record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1847 			return(-1);
1848 		}
1849 
1850 		/*
1851 		 * If in our flush group everything is already set up,
1852 		 * just return whether the record will improve our
1853 		 * visibility or not.
1854 		 */
1855 		if (record->type == HAMMER_MEM_RECORD_ADD)
1856 			return(1);
1857 		return(0);
1858 	}
1859 
1860 	/*
1861 	 * It must be a setup record.  Try to resolve the setup dependancies
1862 	 * by recursing upwards so we can place ip on the flush list.
1863 	 *
1864 	 * Limit ourselves to 20 levels of recursion to avoid blowing out
1865 	 * the kernel stack.  If we hit the recursion limit we can't flush
1866 	 * until the parent flushes.  The parent will flush independantly
1867 	 * on its own and ultimately a deep recursion will be resolved.
1868 	 */
1869 	KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1870 
1871 	good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1872 
1873 	/*
1874 	 * If good < 0 the parent has no connectivity and we cannot safely
1875 	 * flush the directory entry, which also means we can't flush our
1876 	 * ip.  Flag us for downward recursion once the parent's
1877 	 * connectivity is resolved.  Flag the parent for [re]flush or it
1878 	 * may not check for downward recursions.
1879 	 */
1880 	if (good < 0) {
1881 		pip->flags |= HAMMER_INODE_REFLUSH;
1882 		record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1883 		return(good);
1884 	}
1885 
1886 	/*
1887 	 * We are go, place the parent inode in a flushing state so we can
1888 	 * place its record in a flushing state.  Note that the parent
1889 	 * may already be flushing.  The record must be in the same flush
1890 	 * group as the parent.
1891 	 */
1892 	if (pip->flush_state != HAMMER_FST_FLUSH)
1893 		hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1894 	KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1895 
1896 	/*
1897 	 * It is possible for a rename to create a loop in the recursion
1898 	 * and revisit a record.  This will result in the record being
1899 	 * placed in a flush state unexpectedly.  This check deals with
1900 	 * the case.
1901 	 */
1902 	if (record->flush_state == HAMMER_FST_FLUSH) {
1903 		if (record->type == HAMMER_MEM_RECORD_ADD)
1904 			return(1);
1905 		return(0);
1906 	}
1907 
1908 	KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1909 
1910 #if 0
1911 	if (record->type == HAMMER_MEM_RECORD_DEL &&
1912 	    (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1913 		/*
1914 		 * Regardless of flushing state we cannot sync this path if the
1915 		 * record represents a delete-on-disk but the target inode
1916 		 * is not ready to sync its own deletion.
1917 		 *
1918 		 * XXX need to count effective nlinks to determine whether
1919 		 * the flush is ok, otherwise removing a hardlink will
1920 		 * just leave the DEL record to rot.
1921 		 */
1922 		record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1923 		return(-1);
1924 	} else
1925 #endif
1926 	if (pip->flush_group == flg) {
1927 		/*
1928 		 * Because we have not calculated nlinks yet we can just
1929 		 * set records to the flush state if the parent is in
1930 		 * the same flush group as we are.
1931 		 */
1932 		record->flush_state = HAMMER_FST_FLUSH;
1933 		record->flush_group = flg;
1934 		++record->flush_group->refs;
1935 		hammer_ref(&record->lock);
1936 
1937 		/*
1938 		 * A general directory-add contributes to our visibility.
1939 		 *
1940 		 * Otherwise it is probably a directory-delete or
1941 		 * delete-on-disk record and does not contribute to our
1942 		 * visbility (but we can still flush it).
1943 		 */
1944 		if (record->type == HAMMER_MEM_RECORD_ADD)
1945 			return(1);
1946 		return(0);
1947 	} else {
1948 		/*
1949 		 * If the parent is not in our flush group we cannot
1950 		 * flush this record yet, there is no visibility.
1951 		 * We tell the parent to reflush and mark ourselves
1952 		 * so the parent knows it should flush us too.
1953 		 */
1954 		pip->flags |= HAMMER_INODE_REFLUSH;
1955 		record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1956 		return(-1);
1957 	}
1958 }
1959 
1960 /*
1961  * This is the core routine placing an inode into the FST_FLUSH state.
1962  */
1963 static void
1964 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1965 {
1966 	int go_count;
1967 
1968 	/*
1969 	 * Set flush state and prevent the flusher from cycling into
1970 	 * the next flush group.  Do not place the ip on the list yet.
1971 	 * Inodes not in the idle state get an extra reference.
1972 	 */
1973 	KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1974 	if (ip->flush_state == HAMMER_FST_IDLE)
1975 		hammer_ref(&ip->lock);
1976 	ip->flush_state = HAMMER_FST_FLUSH;
1977 	ip->flush_group = flg;
1978 	++ip->hmp->flusher.group_lock;
1979 	++ip->hmp->count_iqueued;
1980 	++hammer_count_iqueued;
1981 	++flg->total_count;
1982 	hammer_redo_fifo_start_flush(ip);
1983 
1984 	/*
1985 	 * If the flush group reaches the autoflush limit we want to signal
1986 	 * the flusher.  This is particularly important for remove()s.
1987 	 *
1988 	 * If the default hammer_limit_reclaim is changed via sysctl
1989 	 * make sure we don't hit a degenerate case where we don't start
1990 	 * a flush but blocked on further inode ops.
1991 	 */
1992 	if (flg->total_count == hammer_autoflush ||
1993 	    flg->total_count >= hammer_limit_reclaim / 4)
1994 		flags |= HAMMER_FLUSH_SIGNAL;
1995 
1996 #if 0
1997 	/*
1998 	 * We need to be able to vfsync/truncate from the backend.
1999 	 *
2000 	 * XXX Any truncation from the backend will acquire the vnode
2001 	 *     independently.
2002 	 */
2003 	KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2004 	if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2005 		ip->flags |= HAMMER_INODE_VHELD;
2006 		vref(ip->vp);
2007 	}
2008 #endif
2009 
2010 	/*
2011 	 * Figure out how many in-memory records we can actually flush
2012 	 * (not including inode meta-data, buffers, etc).
2013 	 */
2014 	KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2015 	if (flags & HAMMER_FLUSH_RECURSION) {
2016 		/*
2017 		 * If this is a upwards recursion we do not want to
2018 		 * recurse down again!
2019 		 */
2020 		go_count = 1;
2021 #if 0
2022 	} else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2023 		/*
2024 		 * No new records are added if we must complete a flush
2025 		 * from a previous cycle, but we do have to move the records
2026 		 * from the previous cycle to the current one.
2027 		 */
2028 #if 0
2029 		go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2030 				   hammer_syncgrp_child_callback, NULL);
2031 #endif
2032 		go_count = 1;
2033 #endif
2034 	} else {
2035 		/*
2036 		 * Normal flush, scan records and bring them into the flush.
2037 		 * Directory adds and deletes are usually skipped (they are
2038 		 * grouped with the related inode rather then with the
2039 		 * directory).
2040 		 *
2041 		 * go_count can be negative, which means the scan aborted
2042 		 * due to the flush group being over-full and we should
2043 		 * flush what we have.
2044 		 */
2045 		go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2046 				   hammer_setup_child_callback, NULL);
2047 	}
2048 
2049 	/*
2050 	 * This is a more involved test that includes go_count.  If we
2051 	 * can't flush, flag the inode and return.  If go_count is 0 we
2052 	 * were are unable to flush any records in our rec_tree and
2053 	 * must ignore the XDIRTY flag.
2054 	 */
2055 	if (go_count == 0) {
2056 		if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2057 			--ip->hmp->count_iqueued;
2058 			--hammer_count_iqueued;
2059 
2060 			--flg->total_count;
2061 			ip->flush_state = HAMMER_FST_SETUP;
2062 			ip->flush_group = NULL;
2063 #if 0
2064 			if (ip->flags & HAMMER_INODE_VHELD) {
2065 				ip->flags &= ~HAMMER_INODE_VHELD;
2066 				vrele(ip->vp);
2067 			}
2068 #endif
2069 
2070 			/*
2071 			 * REFLUSH is needed to trigger dependancy wakeups
2072 			 * when an inode is in SETUP.
2073 			 */
2074 			ip->flags |= HAMMER_INODE_REFLUSH;
2075 			if (flags & HAMMER_FLUSH_SIGNAL) {
2076 				ip->flags |= HAMMER_INODE_RESIGNAL;
2077 				hammer_flusher_async(ip->hmp, flg);
2078 			}
2079 			if (--ip->hmp->flusher.group_lock == 0)
2080 				wakeup(&ip->hmp->flusher.group_lock);
2081 			return;
2082 		}
2083 	}
2084 
2085 	/*
2086 	 * Snapshot the state of the inode for the backend flusher.
2087 	 *
2088 	 * We continue to retain save_trunc_off even when all truncations
2089 	 * have been resolved as an optimization to determine if we can
2090 	 * skip the B-Tree lookup for overwrite deletions.
2091 	 *
2092 	 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2093 	 * and stays in ip->flags.  Once set, it stays set until the
2094 	 * inode is destroyed.
2095 	 */
2096 	if (ip->flags & HAMMER_INODE_TRUNCATED) {
2097 		KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2098 		ip->sync_trunc_off = ip->trunc_off;
2099 		ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2100 		ip->flags &= ~HAMMER_INODE_TRUNCATED;
2101 		ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2102 
2103 		/*
2104 		 * The save_trunc_off used to cache whether the B-Tree
2105 		 * holds any records past that point is not used until
2106 		 * after the truncation has succeeded, so we can safely
2107 		 * set it now.
2108 		 */
2109 		if (ip->save_trunc_off > ip->sync_trunc_off)
2110 			ip->save_trunc_off = ip->sync_trunc_off;
2111 	}
2112 	ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2113 			   ~HAMMER_INODE_TRUNCATED);
2114 	ip->sync_ino_leaf = ip->ino_leaf;
2115 	ip->sync_ino_data = ip->ino_data;
2116 	ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2117 #ifdef DEBUG_TRUNCATE
2118 	if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2119 		kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2120 #endif
2121 
2122 	/*
2123 	 * The flusher list inherits our inode and reference.
2124 	 */
2125 	KKASSERT(flg->running == 0);
2126 	RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2127 	if (--ip->hmp->flusher.group_lock == 0)
2128 		wakeup(&ip->hmp->flusher.group_lock);
2129 
2130 	if (flags & HAMMER_FLUSH_SIGNAL) {
2131 		hammer_flusher_async(ip->hmp, flg);
2132 	}
2133 }
2134 
2135 /*
2136  * Callback for scan of ip->rec_tree.  Try to include each record in our
2137  * flush.  ip->flush_group has been set but the inode has not yet been
2138  * moved into a flushing state.
2139  *
2140  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2141  * both inodes.
2142  *
2143  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2144  * the caller from shortcutting the flush.
2145  */
2146 static int
2147 hammer_setup_child_callback(hammer_record_t rec, void *data)
2148 {
2149 	hammer_flush_group_t flg;
2150 	hammer_inode_t target_ip;
2151 	hammer_inode_t ip;
2152 	int r;
2153 
2154 	/*
2155 	 * Records deleted or committed by the backend are ignored.
2156 	 * Note that the flush detects deleted frontend records at
2157 	 * multiple points to deal with races.  This is just the first
2158 	 * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2159 	 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2160 	 * messes up link-count calculations.
2161 	 *
2162 	 * NOTE: Don't get confused between record deletion and, say,
2163 	 * directory entry deletion.  The deletion of a directory entry
2164 	 * which is on-media has nothing to do with the record deletion
2165 	 * flags.
2166 	 */
2167 	if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2168 			  HAMMER_RECF_COMMITTED)) {
2169 		if (rec->flush_state == HAMMER_FST_FLUSH) {
2170 			KKASSERT(rec->flush_group == rec->ip->flush_group);
2171 			r = 1;
2172 		} else {
2173 			r = 0;
2174 		}
2175 		return(r);
2176 	}
2177 
2178 	/*
2179 	 * If the record is in an idle state it has no dependancies and
2180 	 * can be flushed.
2181 	 */
2182 	ip = rec->ip;
2183 	flg = ip->flush_group;
2184 	r = 0;
2185 
2186 	switch(rec->flush_state) {
2187 	case HAMMER_FST_IDLE:
2188 		/*
2189 		 * The record has no setup dependancy, we can flush it.
2190 		 */
2191 		KKASSERT(rec->target_ip == NULL);
2192 		rec->flush_state = HAMMER_FST_FLUSH;
2193 		rec->flush_group = flg;
2194 		++flg->refs;
2195 		hammer_ref(&rec->lock);
2196 		r = 1;
2197 		break;
2198 	case HAMMER_FST_SETUP:
2199 		/*
2200 		 * The record has a setup dependancy.  These are typically
2201 		 * directory entry adds and deletes.  Such entries will be
2202 		 * flushed when their inodes are flushed so we do not
2203 		 * usually have to add them to the flush here.  However,
2204 		 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2205 		 * it is asking us to flush this record (and it).
2206 		 */
2207 		target_ip = rec->target_ip;
2208 		KKASSERT(target_ip != NULL);
2209 		KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2210 
2211 		/*
2212 		 * If the target IP is already flushing in our group
2213 		 * we could associate the record, but target_ip has
2214 		 * already synced ino_data to sync_ino_data and we
2215 		 * would also have to adjust nlinks.   Plus there are
2216 		 * ordering issues for adds and deletes.
2217 		 *
2218 		 * Reflush downward if this is an ADD, and upward if
2219 		 * this is a DEL.
2220 		 */
2221 		if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2222 			if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2223 				ip->flags |= HAMMER_INODE_REFLUSH;
2224 			else
2225 				target_ip->flags |= HAMMER_INODE_REFLUSH;
2226 			break;
2227 		}
2228 
2229 		/*
2230 		 * Target IP is not yet flushing.  This can get complex
2231 		 * because we have to be careful about the recursion.
2232 		 *
2233 		 * Directories create an issue for us in that if a flush
2234 		 * of a directory is requested the expectation is to flush
2235 		 * any pending directory entries, but this will cause the
2236 		 * related inodes to recursively flush as well.  We can't
2237 		 * really defer the operation so just get as many as we
2238 		 * can and
2239 		 */
2240 #if 0
2241 		if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2242 		    (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2243 			/*
2244 			 * We aren't reclaiming and the target ip was not
2245 			 * previously prevented from flushing due to this
2246 			 * record dependancy.  Do not flush this record.
2247 			 */
2248 			/*r = 0;*/
2249 		} else
2250 #endif
2251 		if (flg->total_count + flg->refs >
2252 			   ip->hmp->undo_rec_limit) {
2253 			/*
2254 			 * Our flush group is over-full and we risk blowing
2255 			 * out the UNDO FIFO.  Stop the scan, flush what we
2256 			 * have, then reflush the directory.
2257 			 *
2258 			 * The directory may be forced through multiple
2259 			 * flush groups before it can be completely
2260 			 * flushed.
2261 			 */
2262 			ip->flags |= HAMMER_INODE_RESIGNAL |
2263 				     HAMMER_INODE_REFLUSH;
2264 			r = -1;
2265 		} else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2266 			/*
2267 			 * If the target IP is not flushing we can force
2268 			 * it to flush, even if it is unable to write out
2269 			 * any of its own records we have at least one in
2270 			 * hand that we CAN deal with.
2271 			 */
2272 			rec->flush_state = HAMMER_FST_FLUSH;
2273 			rec->flush_group = flg;
2274 			++flg->refs;
2275 			hammer_ref(&rec->lock);
2276 			hammer_flush_inode_core(target_ip, flg,
2277 						HAMMER_FLUSH_RECURSION);
2278 			r = 1;
2279 		} else {
2280 			/*
2281 			 * General or delete-on-disk record.
2282 			 *
2283 			 * XXX this needs help.  If a delete-on-disk we could
2284 			 * disconnect the target.  If the target has its own
2285 			 * dependancies they really need to be flushed.
2286 			 *
2287 			 * XXX
2288 			 */
2289 			rec->flush_state = HAMMER_FST_FLUSH;
2290 			rec->flush_group = flg;
2291 			++flg->refs;
2292 			hammer_ref(&rec->lock);
2293 			hammer_flush_inode_core(target_ip, flg,
2294 						HAMMER_FLUSH_RECURSION);
2295 			r = 1;
2296 		}
2297 		break;
2298 	case HAMMER_FST_FLUSH:
2299 		/*
2300 		 * The flush_group should already match.
2301 		 */
2302 		KKASSERT(rec->flush_group == flg);
2303 		r = 1;
2304 		break;
2305 	}
2306 	return(r);
2307 }
2308 
2309 #if 0
2310 /*
2311  * This version just moves records already in a flush state to the new
2312  * flush group and that is it.
2313  */
2314 static int
2315 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2316 {
2317 	hammer_inode_t ip = rec->ip;
2318 
2319 	switch(rec->flush_state) {
2320 	case HAMMER_FST_FLUSH:
2321 		KKASSERT(rec->flush_group == ip->flush_group);
2322 		break;
2323 	default:
2324 		break;
2325 	}
2326 	return(0);
2327 }
2328 #endif
2329 
2330 /*
2331  * Wait for a previously queued flush to complete.
2332  *
2333  * If a critical error occured we don't try to wait.
2334  */
2335 void
2336 hammer_wait_inode(hammer_inode_t ip)
2337 {
2338 	hammer_flush_group_t flg;
2339 
2340 	flg = NULL;
2341 	if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2342 		while (ip->flush_state != HAMMER_FST_IDLE &&
2343 		       (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2344 			if (ip->flush_state == HAMMER_FST_SETUP)
2345 				hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2346 			if (ip->flush_state != HAMMER_FST_IDLE) {
2347 				ip->flags |= HAMMER_INODE_FLUSHW;
2348 				tsleep(&ip->flags, 0, "hmrwin", 0);
2349 			}
2350 		}
2351 	}
2352 }
2353 
2354 /*
2355  * Called by the backend code when a flush has been completed.
2356  * The inode has already been removed from the flush list.
2357  *
2358  * A pipelined flush can occur, in which case we must re-enter the
2359  * inode on the list and re-copy its fields.
2360  */
2361 void
2362 hammer_flush_inode_done(hammer_inode_t ip, int error)
2363 {
2364 	hammer_mount_t hmp;
2365 	int dorel;
2366 
2367 	KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2368 
2369 	hmp = ip->hmp;
2370 
2371 	/*
2372 	 * Auto-reflush if the backend could not completely flush
2373 	 * the inode.  This fixes a case where a deferred buffer flush
2374 	 * could cause fsync to return early.
2375 	 */
2376 	if (ip->sync_flags & HAMMER_INODE_MODMASK)
2377 		ip->flags |= HAMMER_INODE_REFLUSH;
2378 
2379 	/*
2380 	 * Merge left-over flags back into the frontend and fix the state.
2381 	 * Incomplete truncations are retained by the backend.
2382 	 */
2383 	ip->error = error;
2384 	ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2385 	ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2386 
2387 	/*
2388 	 * The backend may have adjusted nlinks, so if the adjusted nlinks
2389 	 * does not match the fronttend set the frontend's DDIRTY flag again.
2390 	 */
2391 	if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2392 		ip->flags |= HAMMER_INODE_DDIRTY;
2393 
2394 	/*
2395 	 * Fix up the dirty buffer status.
2396 	 */
2397 	if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2398 		ip->flags |= HAMMER_INODE_BUFS;
2399 	}
2400 	hammer_redo_fifo_end_flush(ip);
2401 
2402 	/*
2403 	 * Re-set the XDIRTY flag if some of the inode's in-memory records
2404 	 * could not be flushed.
2405 	 */
2406 	KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2407 		  (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2408 		 (!RB_EMPTY(&ip->rec_tree) &&
2409 		  (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2410 
2411 	/*
2412 	 * Do not lose track of inodes which no longer have vnode
2413 	 * assocations, otherwise they may never get flushed again.
2414 	 *
2415 	 * The reflush flag can be set superfluously, causing extra pain
2416 	 * for no reason.  If the inode is no longer modified it no longer
2417 	 * needs to be flushed.
2418 	 */
2419 	if (ip->flags & HAMMER_INODE_MODMASK) {
2420 		if (ip->vp == NULL)
2421 			ip->flags |= HAMMER_INODE_REFLUSH;
2422 	} else {
2423 		ip->flags &= ~HAMMER_INODE_REFLUSH;
2424 	}
2425 
2426 	/*
2427 	 * Adjust the flush state.
2428 	 */
2429 	if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2430 		/*
2431 		 * We were unable to flush out all our records, leave the
2432 		 * inode in a flush state and in the current flush group.
2433 		 * The flush group will be re-run.
2434 		 *
2435 		 * This occurs if the UNDO block gets too full or there is
2436 		 * too much dirty meta-data and allows the flusher to
2437 		 * finalize the UNDO block and then re-flush.
2438 		 */
2439 		ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2440 		dorel = 0;
2441 	} else {
2442 		/*
2443 		 * Remove from the flush_group
2444 		 */
2445 		RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2446 		ip->flush_group = NULL;
2447 
2448 #if 0
2449 		/*
2450 		 * Clean up the vnode ref and tracking counts.
2451 		 */
2452 		if (ip->flags & HAMMER_INODE_VHELD) {
2453 			ip->flags &= ~HAMMER_INODE_VHELD;
2454 			vrele(ip->vp);
2455 		}
2456 #endif
2457 		--hmp->count_iqueued;
2458 		--hammer_count_iqueued;
2459 
2460 		/*
2461 		 * And adjust the state.
2462 		 */
2463 		if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2464 			ip->flush_state = HAMMER_FST_IDLE;
2465 			dorel = 1;
2466 		} else {
2467 			ip->flush_state = HAMMER_FST_SETUP;
2468 			dorel = 0;
2469 		}
2470 
2471 		/*
2472 		 * If the frontend is waiting for a flush to complete,
2473 		 * wake it up.
2474 		 */
2475 		if (ip->flags & HAMMER_INODE_FLUSHW) {
2476 			ip->flags &= ~HAMMER_INODE_FLUSHW;
2477 			wakeup(&ip->flags);
2478 		}
2479 
2480 		/*
2481 		 * If the frontend made more changes and requested another
2482 		 * flush, then try to get it running.
2483 		 *
2484 		 * Reflushes are aborted when the inode is errored out.
2485 		 */
2486 		if (ip->flags & HAMMER_INODE_REFLUSH) {
2487 			ip->flags &= ~HAMMER_INODE_REFLUSH;
2488 			if (ip->flags & HAMMER_INODE_RESIGNAL) {
2489 				ip->flags &= ~HAMMER_INODE_RESIGNAL;
2490 				hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2491 			} else {
2492 				hammer_flush_inode(ip, 0);
2493 			}
2494 		}
2495 	}
2496 
2497 	/*
2498 	 * If we have no parent dependancies we can clear CONN_DOWN
2499 	 */
2500 	if (TAILQ_EMPTY(&ip->target_list))
2501 		ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2502 
2503 	/*
2504 	 * If the inode is now clean drop the space reservation.
2505 	 */
2506 	if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2507 	    (ip->flags & HAMMER_INODE_RSV_INODES)) {
2508 		ip->flags &= ~HAMMER_INODE_RSV_INODES;
2509 		--hmp->rsv_inodes;
2510 	}
2511 
2512 	if (dorel)
2513 		hammer_rel_inode(ip, 0);
2514 }
2515 
2516 /*
2517  * Called from hammer_sync_inode() to synchronize in-memory records
2518  * to the media.
2519  */
2520 static int
2521 hammer_sync_record_callback(hammer_record_t record, void *data)
2522 {
2523 	hammer_cursor_t cursor = data;
2524 	hammer_transaction_t trans = cursor->trans;
2525 	hammer_mount_t hmp = trans->hmp;
2526 	int error;
2527 
2528 	/*
2529 	 * Skip records that do not belong to the current flush.
2530 	 */
2531 	++hammer_stats_record_iterations;
2532 	if (record->flush_state != HAMMER_FST_FLUSH)
2533 		return(0);
2534 
2535 #if 1
2536 	if (record->flush_group != record->ip->flush_group) {
2537 		kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2538 		if (hammer_debug_critical)
2539 			Debugger("blah2");
2540 		return(0);
2541 	}
2542 #endif
2543 	KKASSERT(record->flush_group == record->ip->flush_group);
2544 
2545 	/*
2546 	 * Interlock the record using the BE flag.  Once BE is set the
2547 	 * frontend cannot change the state of FE.
2548 	 *
2549 	 * NOTE: If FE is set prior to us setting BE we still sync the
2550 	 * record out, but the flush completion code converts it to
2551 	 * a delete-on-disk record instead of destroying it.
2552 	 */
2553 	KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2554 	record->flags |= HAMMER_RECF_INTERLOCK_BE;
2555 
2556 	/*
2557 	 * The backend has already disposed of the record.
2558 	 */
2559 	if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2560 		error = 0;
2561 		goto done;
2562 	}
2563 
2564 	/*
2565 	 * If the whole inode is being deleting all on-disk records will
2566 	 * be deleted very soon, we can't sync any new records to disk
2567 	 * because they will be deleted in the same transaction they were
2568 	 * created in (delete_tid == create_tid), which will assert.
2569 	 *
2570 	 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2571 	 * that we currently panic on.
2572 	 */
2573 	if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2574 		switch(record->type) {
2575 		case HAMMER_MEM_RECORD_DATA:
2576 			/*
2577 			 * We don't have to do anything, if the record was
2578 			 * committed the space will have been accounted for
2579 			 * in the blockmap.
2580 			 */
2581 			/* fall through */
2582 		case HAMMER_MEM_RECORD_GENERAL:
2583 			/*
2584 			 * Set deleted-by-backend flag.  Do not set the
2585 			 * backend committed flag, because we are throwing
2586 			 * the record away.
2587 			 */
2588 			record->flags |= HAMMER_RECF_DELETED_BE;
2589 			++record->ip->rec_generation;
2590 			error = 0;
2591 			goto done;
2592 		case HAMMER_MEM_RECORD_ADD:
2593 			panic("hammer_sync_record_callback: illegal add "
2594 			      "during inode deletion record %p", record);
2595 			break; /* NOT REACHED */
2596 		case HAMMER_MEM_RECORD_INODE:
2597 			panic("hammer_sync_record_callback: attempt to "
2598 			      "sync inode record %p?", record);
2599 			break; /* NOT REACHED */
2600 		case HAMMER_MEM_RECORD_DEL:
2601 			/*
2602 			 * Follow through and issue the on-disk deletion
2603 			 */
2604 			break;
2605 		}
2606 	}
2607 
2608 	/*
2609 	 * If DELETED_FE is set special handling is needed for directory
2610 	 * entries.  Dependant pieces related to the directory entry may
2611 	 * have already been synced to disk.  If this occurs we have to
2612 	 * sync the directory entry and then change the in-memory record
2613 	 * from an ADD to a DELETE to cover the fact that it's been
2614 	 * deleted by the frontend.
2615 	 *
2616 	 * A directory delete covering record (MEM_RECORD_DEL) can never
2617 	 * be deleted by the frontend.
2618 	 *
2619 	 * Any other record type (aka DATA) can be deleted by the frontend.
2620 	 * XXX At the moment the flusher must skip it because there may
2621 	 * be another data record in the flush group for the same block,
2622 	 * meaning that some frontend data changes can leak into the backend's
2623 	 * synchronization point.
2624 	 */
2625 	if (record->flags & HAMMER_RECF_DELETED_FE) {
2626 		if (record->type == HAMMER_MEM_RECORD_ADD) {
2627 			/*
2628 			 * Convert a front-end deleted directory-add to
2629 			 * a directory-delete entry later.
2630 			 */
2631 			record->flags |= HAMMER_RECF_CONVERT_DELETE;
2632 		} else {
2633 			/*
2634 			 * Dispose of the record (race case).  Mark as
2635 			 * deleted by backend (and not committed).
2636 			 */
2637 			KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2638 			record->flags |= HAMMER_RECF_DELETED_BE;
2639 			++record->ip->rec_generation;
2640 			error = 0;
2641 			goto done;
2642 		}
2643 	}
2644 
2645 	/*
2646 	 * Assign the create_tid for new records.  Deletions already
2647 	 * have the record's entire key properly set up.
2648 	 */
2649 	if (record->type != HAMMER_MEM_RECORD_DEL) {
2650 		record->leaf.base.create_tid = trans->tid;
2651 		record->leaf.create_ts = trans->time32;
2652 	}
2653 
2654 	/*
2655 	 * This actually moves the record to the on-media B-Tree.  We
2656 	 * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2657 	 * indicating that the related REDO_WRITE(s) have been committed.
2658 	 *
2659 	 * During recovery any REDO_TERM's within the nominal recovery span
2660 	 * are ignored since the related meta-data is being undone, causing
2661 	 * any matching REDO_WRITEs to execute.  The REDO_TERMs outside
2662 	 * the nominal recovery span will match against REDO_WRITEs and
2663 	 * prevent them from being executed (because the meta-data has
2664 	 * already been synchronized).
2665 	 */
2666 	if (record->flags & HAMMER_RECF_REDO) {
2667 		KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2668 		hammer_generate_redo(trans, record->ip,
2669 				     record->leaf.base.key -
2670 					 record->leaf.data_len,
2671 				     HAMMER_REDO_TERM_WRITE,
2672 				     NULL,
2673 				     record->leaf.data_len);
2674 	}
2675 	for (;;) {
2676 		error = hammer_ip_sync_record_cursor(cursor, record);
2677 		if (error != EDEADLK)
2678 			break;
2679 		hammer_done_cursor(cursor);
2680 		error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2681 					   record->ip);
2682 		if (error)
2683 			break;
2684 	}
2685 	record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2686 
2687 	if (error)
2688 		error = -error;
2689 done:
2690 	hammer_flush_record_done(record, error);
2691 
2692 	/*
2693 	 * Do partial finalization if we have built up too many dirty
2694 	 * buffers.  Otherwise a buffer cache deadlock can occur when
2695 	 * doing things like creating tens of thousands of tiny files.
2696 	 *
2697 	 * We must release our cursor lock to avoid a 3-way deadlock
2698 	 * due to the exclusive sync lock the finalizer must get.
2699 	 *
2700 	 * WARNING: See warnings in hammer_unlock_cursor() function.
2701 	 */
2702         if (hammer_flusher_meta_limit(hmp)) {
2703 		hammer_unlock_cursor(cursor);
2704                 hammer_flusher_finalize(trans, 0);
2705 		hammer_lock_cursor(cursor);
2706 	}
2707 
2708 	return(error);
2709 }
2710 
2711 /*
2712  * Backend function called by the flusher to sync an inode to media.
2713  */
2714 int
2715 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2716 {
2717 	struct hammer_cursor cursor;
2718 	hammer_node_t tmp_node;
2719 	hammer_record_t depend;
2720 	hammer_record_t next;
2721 	int error, tmp_error;
2722 	u_int64_t nlinks;
2723 
2724 	if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2725 		return(0);
2726 
2727 	error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2728 	if (error)
2729 		goto done;
2730 
2731 	/*
2732 	 * Any directory records referencing this inode which are not in
2733 	 * our current flush group must adjust our nlink count for the
2734 	 * purposes of synchronizating to disk.
2735 	 *
2736 	 * Records which are in our flush group can be unlinked from our
2737 	 * inode now, potentially allowing the inode to be physically
2738 	 * deleted.
2739 	 *
2740 	 * This cannot block.
2741 	 */
2742 	nlinks = ip->ino_data.nlinks;
2743 	next = TAILQ_FIRST(&ip->target_list);
2744 	while ((depend = next) != NULL) {
2745 		next = TAILQ_NEXT(depend, target_entry);
2746 		if (depend->flush_state == HAMMER_FST_FLUSH &&
2747 		    depend->flush_group == ip->flush_group) {
2748 			/*
2749 			 * If this is an ADD that was deleted by the frontend
2750 			 * the frontend nlinks count will have already been
2751 			 * decremented, but the backend is going to sync its
2752 			 * directory entry and must account for it.  The
2753 			 * record will be converted to a delete-on-disk when
2754 			 * it gets synced.
2755 			 *
2756 			 * If the ADD was not deleted by the frontend we
2757 			 * can remove the dependancy from our target_list.
2758 			 */
2759 			if (depend->flags & HAMMER_RECF_DELETED_FE) {
2760 				++nlinks;
2761 			} else {
2762 				TAILQ_REMOVE(&ip->target_list, depend,
2763 					     target_entry);
2764 				depend->target_ip = NULL;
2765 			}
2766 		} else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2767 			/*
2768 			 * Not part of our flush group and not deleted by
2769 			 * the front-end, adjust the link count synced to
2770 			 * the media (undo what the frontend did when it
2771 			 * queued the record).
2772 			 */
2773 			KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2774 			switch(depend->type) {
2775 			case HAMMER_MEM_RECORD_ADD:
2776 				--nlinks;
2777 				break;
2778 			case HAMMER_MEM_RECORD_DEL:
2779 				++nlinks;
2780 				break;
2781 			default:
2782 				break;
2783 			}
2784 		}
2785 	}
2786 
2787 	/*
2788 	 * Set dirty if we had to modify the link count.
2789 	 */
2790 	if (ip->sync_ino_data.nlinks != nlinks) {
2791 		KKASSERT((int64_t)nlinks >= 0);
2792 		ip->sync_ino_data.nlinks = nlinks;
2793 		ip->sync_flags |= HAMMER_INODE_DDIRTY;
2794 	}
2795 
2796 	/*
2797 	 * If there is a trunction queued destroy any data past the (aligned)
2798 	 * truncation point.  Userland will have dealt with the buffer
2799 	 * containing the truncation point for us.
2800 	 *
2801 	 * We don't flush pending frontend data buffers until after we've
2802 	 * dealt with the truncation.
2803 	 */
2804 	if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2805 		/*
2806 		 * Interlock trunc_off.  The VOP front-end may continue to
2807 		 * make adjustments to it while we are blocked.
2808 		 */
2809 		off_t trunc_off;
2810 		off_t aligned_trunc_off;
2811 		int blkmask;
2812 
2813 		trunc_off = ip->sync_trunc_off;
2814 		blkmask = hammer_blocksize(trunc_off) - 1;
2815 		aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2816 
2817 		/*
2818 		 * Delete any whole blocks on-media.  The front-end has
2819 		 * already cleaned out any partial block and made it
2820 		 * pending.  The front-end may have updated trunc_off
2821 		 * while we were blocked so we only use sync_trunc_off.
2822 		 *
2823 		 * This operation can blow out the buffer cache, EWOULDBLOCK
2824 		 * means we were unable to complete the deletion.  The
2825 		 * deletion will update sync_trunc_off in that case.
2826 		 */
2827 		error = hammer_ip_delete_range(&cursor, ip,
2828 						aligned_trunc_off,
2829 						0x7FFFFFFFFFFFFFFFLL, 2);
2830 		if (error == EWOULDBLOCK) {
2831 			ip->flags |= HAMMER_INODE_WOULDBLOCK;
2832 			error = 0;
2833 			goto defer_buffer_flush;
2834 		}
2835 
2836 		if (error)
2837 			goto done;
2838 
2839 		/*
2840 		 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2841 		 *
2842 		 * XXX we do this even if we did not previously generate
2843 		 * a REDO_TRUNC record.  This operation may enclosed the
2844 		 * range for multiple prior truncation entries in the REDO
2845 		 * log.
2846 		 */
2847 		if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
2848 		    (ip->flags & HAMMER_INODE_RDIRTY)) {
2849 			hammer_generate_redo(trans, ip, aligned_trunc_off,
2850 					     HAMMER_REDO_TERM_TRUNC,
2851 					     NULL, 0);
2852 		}
2853 
2854 		/*
2855 		 * Clear the truncation flag on the backend after we have
2856 		 * completed the deletions.  Backend data is now good again
2857 		 * (including new records we are about to sync, below).
2858 		 *
2859 		 * Leave sync_trunc_off intact.  As we write additional
2860 		 * records the backend will update sync_trunc_off.  This
2861 		 * tells the backend whether it can skip the overwrite
2862 		 * test.  This should work properly even when the backend
2863 		 * writes full blocks where the truncation point straddles
2864 		 * the block because the comparison is against the base
2865 		 * offset of the record.
2866 		 */
2867 		ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2868 		/* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2869 	} else {
2870 		error = 0;
2871 	}
2872 
2873 	/*
2874 	 * Now sync related records.  These will typically be directory
2875 	 * entries, records tracking direct-writes, or delete-on-disk records.
2876 	 */
2877 	if (error == 0) {
2878 		tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2879 				    hammer_sync_record_callback, &cursor);
2880 		if (tmp_error < 0)
2881 			tmp_error = -error;
2882 		if (tmp_error)
2883 			error = tmp_error;
2884 	}
2885 	hammer_cache_node(&ip->cache[1], cursor.node);
2886 
2887 	/*
2888 	 * Re-seek for inode update, assuming our cache hasn't been ripped
2889 	 * out from under us.
2890 	 */
2891 	if (error == 0) {
2892 		tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2893 		if (tmp_node) {
2894 			hammer_cursor_downgrade(&cursor);
2895 			hammer_lock_sh(&tmp_node->lock);
2896 			if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2897 				hammer_cursor_seek(&cursor, tmp_node, 0);
2898 			hammer_unlock(&tmp_node->lock);
2899 			hammer_rel_node(tmp_node);
2900 		}
2901 		error = 0;
2902 	}
2903 
2904 	/*
2905 	 * If we are deleting the inode the frontend had better not have
2906 	 * any active references on elements making up the inode.
2907 	 *
2908 	 * The call to hammer_ip_delete_clean() cleans up auxillary records
2909 	 * but not DB or DATA records.  Those must have already been deleted
2910 	 * by the normal truncation mechanic.
2911 	 */
2912 	if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2913 		RB_EMPTY(&ip->rec_tree)  &&
2914 	    (ip->sync_flags & HAMMER_INODE_DELETING) &&
2915 	    (ip->flags & HAMMER_INODE_DELETED) == 0) {
2916 		int count1 = 0;
2917 
2918 		error = hammer_ip_delete_clean(&cursor, ip, &count1);
2919 		if (error == 0) {
2920 			ip->flags |= HAMMER_INODE_DELETED;
2921 			ip->sync_flags &= ~HAMMER_INODE_DELETING;
2922 			ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2923 			KKASSERT(RB_EMPTY(&ip->rec_tree));
2924 
2925 			/*
2926 			 * Set delete_tid in both the frontend and backend
2927 			 * copy of the inode record.  The DELETED flag handles
2928 			 * this, do not set DDIRTY.
2929 			 */
2930 			ip->ino_leaf.base.delete_tid = trans->tid;
2931 			ip->sync_ino_leaf.base.delete_tid = trans->tid;
2932 			ip->ino_leaf.delete_ts = trans->time32;
2933 			ip->sync_ino_leaf.delete_ts = trans->time32;
2934 
2935 
2936 			/*
2937 			 * Adjust the inode count in the volume header
2938 			 */
2939 			hammer_sync_lock_sh(trans);
2940 			if (ip->flags & HAMMER_INODE_ONDISK) {
2941 				hammer_modify_volume_field(trans,
2942 							   trans->rootvol,
2943 							   vol0_stat_inodes);
2944 				--ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2945 				hammer_modify_volume_done(trans->rootvol);
2946 			}
2947 			hammer_sync_unlock(trans);
2948 		}
2949 	}
2950 
2951 	if (error)
2952 		goto done;
2953 	ip->sync_flags &= ~HAMMER_INODE_BUFS;
2954 
2955 defer_buffer_flush:
2956 	/*
2957 	 * Now update the inode's on-disk inode-data and/or on-disk record.
2958 	 * DELETED and ONDISK are managed only in ip->flags.
2959 	 *
2960 	 * In the case of a defered buffer flush we still update the on-disk
2961 	 * inode to satisfy visibility requirements if there happen to be
2962 	 * directory dependancies.
2963 	 */
2964 	switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2965 	case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2966 		/*
2967 		 * If deleted and on-disk, don't set any additional flags.
2968 		 * the delete flag takes care of things.
2969 		 *
2970 		 * Clear flags which may have been set by the frontend.
2971 		 */
2972 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2973 				    HAMMER_INODE_SDIRTY |
2974 				    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2975 				    HAMMER_INODE_DELETING);
2976 		break;
2977 	case HAMMER_INODE_DELETED:
2978 		/*
2979 		 * Take care of the case where a deleted inode was never
2980 		 * flushed to the disk in the first place.
2981 		 *
2982 		 * Clear flags which may have been set by the frontend.
2983 		 */
2984 		ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2985 				    HAMMER_INODE_SDIRTY |
2986 				    HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2987 				    HAMMER_INODE_DELETING);
2988 		while (RB_ROOT(&ip->rec_tree)) {
2989 			hammer_record_t record = RB_ROOT(&ip->rec_tree);
2990 			hammer_ref(&record->lock);
2991 			KKASSERT(hammer_oneref(&record->lock));
2992 			record->flags |= HAMMER_RECF_DELETED_BE;
2993 			++record->ip->rec_generation;
2994 			hammer_rel_mem_record(record);
2995 		}
2996 		break;
2997 	case HAMMER_INODE_ONDISK:
2998 		/*
2999 		 * If already on-disk, do not set any additional flags.
3000 		 */
3001 		break;
3002 	default:
3003 		/*
3004 		 * If not on-disk and not deleted, set DDIRTY to force
3005 		 * an initial record to be written.
3006 		 *
3007 		 * Also set the create_tid in both the frontend and backend
3008 		 * copy of the inode record.
3009 		 */
3010 		ip->ino_leaf.base.create_tid = trans->tid;
3011 		ip->ino_leaf.create_ts = trans->time32;
3012 		ip->sync_ino_leaf.base.create_tid = trans->tid;
3013 		ip->sync_ino_leaf.create_ts = trans->time32;
3014 		ip->sync_flags |= HAMMER_INODE_DDIRTY;
3015 		break;
3016 	}
3017 
3018 	/*
3019 	 * If DDIRTY or SDIRTY is set, write out a new record.
3020 	 * If the inode is already on-disk the old record is marked as
3021 	 * deleted.
3022 	 *
3023 	 * If DELETED is set hammer_update_inode() will delete the existing
3024 	 * record without writing out a new one.
3025 	 *
3026 	 * If *ONLY* the ITIMES flag is set we can update the record in-place.
3027 	 */
3028 	if (ip->flags & HAMMER_INODE_DELETED) {
3029 		error = hammer_update_inode(&cursor, ip);
3030 	} else
3031 	if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3032 	    (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3033 		error = hammer_update_itimes(&cursor, ip);
3034 	} else
3035 	if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3036 			      HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3037 		error = hammer_update_inode(&cursor, ip);
3038 	}
3039 done:
3040 	if (error) {
3041 		hammer_critical_error(ip->hmp, ip, error,
3042 				      "while syncing inode");
3043 	}
3044 	hammer_done_cursor(&cursor);
3045 	return(error);
3046 }
3047 
3048 /*
3049  * This routine is called when the OS is no longer actively referencing
3050  * the inode (but might still be keeping it cached), or when releasing
3051  * the last reference to an inode.
3052  *
3053  * At this point if the inode's nlinks count is zero we want to destroy
3054  * it, which may mean destroying it on-media too.
3055  */
3056 void
3057 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3058 {
3059 	struct vnode *vp;
3060 
3061 	/*
3062 	 * Set the DELETING flag when the link count drops to 0 and the
3063 	 * OS no longer has any opens on the inode.
3064 	 *
3065 	 * The backend will clear DELETING (a mod flag) and set DELETED
3066 	 * (a state flag) when it is actually able to perform the
3067 	 * operation.
3068 	 *
3069 	 * Don't reflag the deletion if the flusher is currently syncing
3070 	 * one that was already flagged.  A previously set DELETING flag
3071 	 * may bounce around flags and sync_flags until the operation is
3072 	 * completely done.
3073 	 */
3074 	if (ip->ino_data.nlinks == 0 &&
3075 	    ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3076 		ip->flags |= HAMMER_INODE_DELETING;
3077 		ip->flags |= HAMMER_INODE_TRUNCATED;
3078 		ip->trunc_off = 0;
3079 		vp = NULL;
3080 		if (getvp) {
3081 			if (hammer_get_vnode(ip, &vp) != 0)
3082 				return;
3083 		}
3084 
3085 		/*
3086 		 * Final cleanup
3087 		 */
3088 		if (ip->vp)
3089 			nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0);
3090 		if (getvp)
3091 			vput(vp);
3092 	}
3093 }
3094 
3095 /*
3096  * After potentially resolving a dependancy the inode is tested
3097  * to determine whether it needs to be reflushed.
3098  */
3099 void
3100 hammer_test_inode(hammer_inode_t ip)
3101 {
3102 	if (ip->flags & HAMMER_INODE_REFLUSH) {
3103 		ip->flags &= ~HAMMER_INODE_REFLUSH;
3104 		hammer_ref(&ip->lock);
3105 		if (ip->flags & HAMMER_INODE_RESIGNAL) {
3106 			ip->flags &= ~HAMMER_INODE_RESIGNAL;
3107 			hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3108 		} else {
3109 			hammer_flush_inode(ip, 0);
3110 		}
3111 		hammer_rel_inode(ip, 0);
3112 	}
3113 }
3114 
3115 /*
3116  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
3117  * reassociated with a vp or just before it gets freed.
3118  *
3119  * Pipeline wakeups to threads blocked due to an excessive number of
3120  * detached inodes.  This typically occurs when atime updates accumulate
3121  * while scanning a directory tree.
3122  */
3123 static void
3124 hammer_inode_wakereclaims(hammer_inode_t ip)
3125 {
3126 	struct hammer_reclaim *reclaim;
3127 	hammer_mount_t hmp = ip->hmp;
3128 
3129 	if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3130 		return;
3131 
3132 	--hammer_count_reclaiming;
3133 	--hmp->inode_reclaims;
3134 	ip->flags &= ~HAMMER_INODE_RECLAIM;
3135 
3136 	while ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3137 		if (reclaim->count > 0 && --reclaim->count == 0) {
3138 			TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3139 			wakeup(reclaim);
3140 		}
3141 		if (hmp->inode_reclaims > hammer_limit_reclaim / 2)
3142 			break;
3143 	}
3144 }
3145 
3146 /*
3147  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3148  * inodes build up before we start blocking.  This routine is called
3149  * if a new inode is created or an inode is loaded from media.
3150  *
3151  * When we block we don't care *which* inode has finished reclaiming,
3152  * as lone as one does.
3153  */
3154 void
3155 hammer_inode_waitreclaims(hammer_transaction_t trans)
3156 {
3157 	hammer_mount_t hmp = trans->hmp;
3158 	struct hammer_reclaim reclaim;
3159 
3160 	/*
3161 	 * Track inode load
3162 	 */
3163 	if (curthread->td_proc) {
3164 		struct hammer_inostats *stats;
3165 		int lower_limit;
3166 
3167 		stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3168 		++stats->count;
3169 
3170 		if (stats->count > hammer_limit_reclaim / 2)
3171 			stats->count = hammer_limit_reclaim / 2;
3172 		lower_limit = hammer_limit_reclaim - stats->count;
3173 		if (hammer_debug_general & 0x10000)
3174 			kprintf("pid %5d limit %d\n", (int)curthread->td_proc->p_pid, lower_limit);
3175 
3176 		if (hmp->inode_reclaims < lower_limit)
3177 			return;
3178 	} else {
3179 		/*
3180 		 * Default mode
3181 		 */
3182 		if (hmp->inode_reclaims < hammer_limit_reclaim)
3183 			return;
3184 	}
3185 	reclaim.count = 1;
3186 	TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3187 	tsleep(&reclaim, 0, "hmrrcm", hz);
3188 	if (reclaim.count > 0)
3189 		TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3190 }
3191 
3192 /*
3193  * Keep track of reclaim statistics on a per-pid basis using a loose
3194  * 4-way set associative hash table.  Collisions inherit the count of
3195  * the previous entry.
3196  *
3197  * NOTE: We want to be careful here to limit the chain size.  If the chain
3198  *	 size is too large a pid will spread its stats out over too many
3199  *	 entries under certain types of heavy filesystem activity and
3200  *	 wind up not delaying long enough.
3201  */
3202 static
3203 struct hammer_inostats *
3204 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3205 {
3206 	struct hammer_inostats *stats;
3207 	int delta;
3208 	int chain;
3209 	static int iterator;	/* we don't care about MP races */
3210 
3211 	/*
3212 	 * Chain up to 4 times to find our entry.
3213 	 */
3214 	for (chain = 0; chain < 4; ++chain) {
3215 		stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3216 		if (stats->pid == pid)
3217 			break;
3218 	}
3219 
3220 	/*
3221 	 * Replace one of the four chaining entries with our new entry.
3222 	 */
3223 	if (chain == 4) {
3224 		stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3225 				       HAMMER_INOSTATS_HMASK];
3226 		stats->pid = pid;
3227 	}
3228 
3229 	/*
3230 	 * Decay the entry
3231 	 */
3232 	if (stats->count && stats->ltick != ticks) {
3233 		delta = ticks - stats->ltick;
3234 		stats->ltick = ticks;
3235 		if (delta <= 0 || delta > hz * 60)
3236 			stats->count = 0;
3237 		else
3238 			stats->count = stats->count * hz / (hz + delta);
3239 	}
3240 	if (hammer_debug_general & 0x10000)
3241 		kprintf("pid %5d stats %d\n", (int)pid, stats->count);
3242 	return (stats);
3243 }
3244 
3245 #if 0
3246 
3247 /*
3248  * XXX not used, doesn't work very well due to the large batching nature
3249  * of flushes.
3250  *
3251  * A larger then normal backlog of inodes is sitting in the flusher,
3252  * enforce a general slowdown to let it catch up.  This routine is only
3253  * called on completion of a non-flusher-related transaction which
3254  * performed B-Tree node I/O.
3255  *
3256  * It is possible for the flusher to stall in a continuous load.
3257  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3258  * If the flusher is unable to catch up the inode count can bloat until
3259  * we run out of kvm.
3260  *
3261  * This is a bit of a hack.
3262  */
3263 void
3264 hammer_inode_waithard(hammer_mount_t hmp)
3265 {
3266 	/*
3267 	 * Hysteresis.
3268 	 */
3269 	if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3270 		if (hmp->inode_reclaims < hammer_limit_reclaim / 2 &&
3271 		    hmp->count_iqueued < hmp->count_inodes / 20) {
3272 			hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3273 			return;
3274 		}
3275 	} else {
3276 		if (hmp->inode_reclaims < hammer_limit_reclaim ||
3277 		    hmp->count_iqueued < hmp->count_inodes / 10) {
3278 			return;
3279 		}
3280 		hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3281 	}
3282 
3283 	/*
3284 	 * Block for one flush cycle.
3285 	 */
3286 	hammer_flusher_wait_next(hmp);
3287 }
3288 
3289 #endif
3290