xref: /dflybsd-src/sys/vfs/hammer/hammer_vfsops.c (revision b45803e36700b8126fbc3863d4292d3fccaa7203)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/nlookup.h>
36 #include <sys/fcntl.h>
37 #include <sys/sysctl.h>
38 
39 #include "hammer.h"
40 
41 /*
42  * NOTE!  Global statistics may not be MPSAFE so HAMMER never uses them
43  *	  in conditionals.
44  */
45 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
46 int hammer_debug_io;
47 int hammer_debug_general;
48 int hammer_debug_debug = 1;		/* medium-error panics */
49 int hammer_debug_inode;
50 int hammer_debug_locks;
51 int hammer_debug_btree;
52 int hammer_debug_tid;
53 int hammer_debug_recover;		/* -1 will disable, +1 will force */
54 int hammer_debug_recover_faults;
55 int hammer_debug_critical;		/* non-zero enter debugger on error */
56 int hammer_cluster_enable = 1;		/* enable read clustering by default */
57 int hammer_live_dedup = 0;
58 int hammer_tdmux_ticks;
59 int hammer_count_fsyncs;
60 int hammer_count_inodes;
61 int hammer_count_iqueued;
62 int hammer_count_reclaims;
63 int hammer_count_records;
64 int hammer_count_record_datas;
65 int hammer_count_volumes;
66 int hammer_count_buffers;
67 int hammer_count_nodes;
68 int64_t hammer_count_extra_space_used;
69 int64_t hammer_stats_btree_lookups;
70 int64_t hammer_stats_btree_searches;
71 int64_t hammer_stats_btree_inserts;
72 int64_t hammer_stats_btree_deletes;
73 int64_t hammer_stats_btree_elements;
74 int64_t hammer_stats_btree_splits;
75 int64_t hammer_stats_btree_iterations;
76 int64_t hammer_stats_btree_root_iterations;
77 int64_t hammer_stats_record_iterations;
78 
79 int64_t hammer_stats_file_read;
80 int64_t hammer_stats_file_write;
81 int64_t hammer_stats_file_iopsr;
82 int64_t hammer_stats_file_iopsw;
83 int64_t hammer_stats_disk_read;
84 int64_t hammer_stats_disk_write;
85 int64_t hammer_stats_inode_flushes;
86 int64_t hammer_stats_commits;
87 int64_t hammer_stats_undo;
88 int64_t hammer_stats_redo;
89 
90 long hammer_count_dirtybufspace;	/* global */
91 int hammer_count_refedbufs;		/* global */
92 int hammer_count_reservations;
93 long hammer_count_io_running_read;
94 long hammer_count_io_running_write;
95 int hammer_count_io_locked;
96 long hammer_limit_dirtybufspace;	/* per-mount */
97 int hammer_limit_recs;			/* as a whole XXX */
98 int hammer_limit_inode_recs = 2048;	/* per inode */
99 int hammer_limit_reclaims;
100 int hammer_live_dedup_cache_size = DEDUP_CACHE_SIZE;
101 int hammer_limit_redo = 4096 * 1024;	/* per inode */
102 int hammer_autoflush = 500;		/* auto flush (typ on reclaim) */
103 int hammer_bio_count;
104 int hammer_verify_zone;
105 int hammer_verify_data = 1;
106 int hammer_write_mode;
107 int hammer_double_buffer;
108 int hammer_btree_full_undo = 1;
109 int hammer_yield_check = 16;
110 int hammer_fsync_mode = 3;
111 int64_t hammer_contention_count;
112 int64_t hammer_zone_limit;
113 
114 /*
115  * Live dedup debug counters (sysctls are writable so that counters
116  * can be reset from userspace).
117  */
118 int64_t hammer_live_dedup_vnode_bcmps = 0;
119 int64_t hammer_live_dedup_device_bcmps = 0;
120 int64_t hammer_live_dedup_findblk_failures = 0;
121 int64_t hammer_live_dedup_bmap_saves = 0;
122 
123 
124 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
125 
126 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
127 	   &hammer_supported_version, 0, "");
128 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
129 	   &hammer_debug_general, 0, "");
130 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
131 	   &hammer_debug_io, 0, "");
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
133 	   &hammer_debug_debug, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
135 	   &hammer_debug_inode, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
137 	   &hammer_debug_locks, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
139 	   &hammer_debug_btree, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
141 	   &hammer_debug_tid, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
143 	   &hammer_debug_recover, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
145 	   &hammer_debug_recover_faults, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
147 	   &hammer_debug_critical, 0, "");
148 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
149 	   &hammer_cluster_enable, 0, "");
150 /*
151  * 0 - live dedup is disabled
152  * 1 - dedup cache is populated on reads only
153  * 2 - dedup cache is populated on both reads and writes
154  *
155  * LIVE_DEDUP IS DISABLED PERMANENTLY!  This feature appears to cause
156  * blockmap corruption over time so we've turned it off permanently.
157  */
158 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup, CTLFLAG_RD,
159 	   &hammer_live_dedup, 0, "Enable live dedup (experimental)");
160 SYSCTL_INT(_vfs_hammer, OID_AUTO, tdmux_ticks, CTLFLAG_RW,
161 	   &hammer_tdmux_ticks, 0, "Hammer tdmux ticks");
162 
163 SYSCTL_LONG(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
164 	   &hammer_limit_dirtybufspace, 0, "");
165 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
166 	   &hammer_limit_recs, 0, "");
167 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
168 	   &hammer_limit_inode_recs, 0, "");
169 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaims, CTLFLAG_RW,
170 	   &hammer_limit_reclaims, 0, "");
171 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup_cache_size, CTLFLAG_RW,
172 	   &hammer_live_dedup_cache_size, 0,
173 	   "Number of cache entries");
174 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
175 	   &hammer_limit_redo, 0, "");
176 
177 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
178 	   &hammer_count_fsyncs, 0, "");
179 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
180 	   &hammer_count_inodes, 0, "");
181 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
182 	   &hammer_count_iqueued, 0, "");
183 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaims, CTLFLAG_RD,
184 	   &hammer_count_reclaims, 0, "");
185 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
186 	   &hammer_count_records, 0, "");
187 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
188 	   &hammer_count_record_datas, 0, "");
189 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
190 	   &hammer_count_volumes, 0, "");
191 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
192 	   &hammer_count_buffers, 0, "");
193 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
194 	   &hammer_count_nodes, 0, "");
195 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
196 	   &hammer_count_extra_space_used, 0, "");
197 
198 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
199 	   &hammer_stats_btree_searches, 0, "");
200 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
201 	   &hammer_stats_btree_lookups, 0, "");
202 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
203 	   &hammer_stats_btree_inserts, 0, "");
204 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
205 	   &hammer_stats_btree_deletes, 0, "");
206 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
207 	   &hammer_stats_btree_elements, 0, "");
208 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
209 	   &hammer_stats_btree_splits, 0, "");
210 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
211 	   &hammer_stats_btree_iterations, 0, "");
212 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
213 	   &hammer_stats_btree_root_iterations, 0, "");
214 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
215 	   &hammer_stats_record_iterations, 0, "");
216 
217 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
218 	   &hammer_stats_file_read, 0, "");
219 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
220 	   &hammer_stats_file_write, 0, "");
221 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
222 	   &hammer_stats_file_iopsr, 0, "");
223 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
224 	   &hammer_stats_file_iopsw, 0, "");
225 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
226 	   &hammer_stats_disk_read, 0, "");
227 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
228 	   &hammer_stats_disk_write, 0, "");
229 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
230 	   &hammer_stats_inode_flushes, 0, "");
231 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
232 	   &hammer_stats_commits, 0, "");
233 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
234 	   &hammer_stats_undo, 0, "");
235 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
236 	   &hammer_stats_redo, 0, "");
237 
238 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_vnode_bcmps, CTLFLAG_RW,
239 	    &hammer_live_dedup_vnode_bcmps, 0,
240 	    "successful vnode buffer comparisons");
241 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_device_bcmps, CTLFLAG_RW,
242 	    &hammer_live_dedup_device_bcmps, 0,
243 	    "successful device buffer comparisons");
244 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_findblk_failures, CTLFLAG_RW,
245 	    &hammer_live_dedup_findblk_failures, 0,
246 	    "block lookup failures for comparison");
247 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_bmap_saves, CTLFLAG_RW,
248 	    &hammer_live_dedup_bmap_saves, 0,
249 	    "useful physical block lookups");
250 
251 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
252 	   &hammer_count_dirtybufspace, 0, "");
253 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
254 	   &hammer_count_refedbufs, 0, "");
255 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
256 	   &hammer_count_reservations, 0, "");
257 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
258 	   &hammer_count_io_running_read, 0, "");
259 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
260 	   &hammer_count_io_locked, 0, "");
261 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
262 	   &hammer_count_io_running_write, 0, "");
263 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
264 	   &hammer_zone_limit, 0, "");
265 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
266 	   &hammer_contention_count, 0, "");
267 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
268 	   &hammer_autoflush, 0, "");
269 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
270 	   &hammer_verify_zone, 0, "");
271 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
272 	   &hammer_verify_data, 0, "");
273 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
274 	   &hammer_write_mode, 0, "");
275 SYSCTL_INT(_vfs_hammer, OID_AUTO, double_buffer, CTLFLAG_RW,
276 	   &hammer_double_buffer, 0, "");
277 SYSCTL_INT(_vfs_hammer, OID_AUTO, btree_full_undo, CTLFLAG_RW,
278 	   &hammer_btree_full_undo, 0, "");
279 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
280 	   &hammer_yield_check, 0, "");
281 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
282 	   &hammer_fsync_mode, 0, "");
283 
284 /* KTR_INFO_MASTER(hammer); */
285 
286 /*
287  * VFS ABI
288  */
289 static void	hammer_free_hmp(struct mount *mp);
290 
291 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
292 				struct ucred *cred);
293 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
294 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
295 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
296 				struct ucred *cred);
297 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
298 				struct ucred *cred);
299 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
300 static int	hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
301 				ino_t ino, struct vnode **vpp);
302 static int	hammer_vfs_init(struct vfsconf *conf);
303 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
304 				struct fid *fhp, struct vnode **vpp);
305 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
306 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
307 				int *exflagsp, struct ucred **credanonp);
308 
309 
310 static struct vfsops hammer_vfsops = {
311 	.vfs_mount	= hammer_vfs_mount,
312 	.vfs_unmount	= hammer_vfs_unmount,
313 	.vfs_root	= hammer_vfs_root,
314 	.vfs_statfs	= hammer_vfs_statfs,
315 	.vfs_statvfs	= hammer_vfs_statvfs,
316 	.vfs_sync	= hammer_vfs_sync,
317 	.vfs_vget	= hammer_vfs_vget,
318 	.vfs_init	= hammer_vfs_init,
319 	.vfs_vptofh	= hammer_vfs_vptofh,
320 	.vfs_fhtovp	= hammer_vfs_fhtovp,
321 	.vfs_checkexp	= hammer_vfs_checkexp
322 };
323 
324 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
325 
326 VFS_SET(hammer_vfsops, hammer, 0);
327 MODULE_VERSION(hammer, 1);
328 
329 static int
330 hammer_vfs_init(struct vfsconf *conf)
331 {
332 	long n;
333 
334 	/*
335 	 * Wait up to this long for an exclusive deadlock to clear
336 	 * before acquiring a new shared lock on the ip.  The deadlock
337 	 * may have occured on a b-tree node related to the ip.
338 	 */
339 	if (hammer_tdmux_ticks == 0)
340 		hammer_tdmux_ticks = hz / 5;
341 
342 	/*
343 	 * Autosize, but be careful because a hammer filesystem's
344 	 * reserve is partially calculated based on dirtybufspace,
345 	 * so we simply cannot allow it to get too large.
346 	 */
347 	if (hammer_limit_recs == 0) {
348 		n = nbuf * 25;
349 		if (n > kmalloc_limit(M_HAMMER) / 512)
350 			n = kmalloc_limit(M_HAMMER) / 512;
351 		if (n > 2 * 1024 * 1024)
352 			n = 2 * 1024 * 1024;
353 		hammer_limit_recs = (int)n;
354 	}
355 	if (hammer_limit_dirtybufspace == 0) {
356 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
357 		if (hammer_limit_dirtybufspace < 1L * 1024 * 1024)
358 			hammer_limit_dirtybufspace = 1024L * 1024;
359 		if (hammer_limit_dirtybufspace > 1024L * 1024 * 1024)
360 			hammer_limit_dirtybufspace = 1024L * 1024 * 1024;
361 	}
362 
363 	/*
364 	 * The hammer_inode structure detaches from the vnode on reclaim.
365 	 * This limits the number of inodes in this state to prevent a
366 	 * memory pool blowout.
367 	 */
368 	if (hammer_limit_reclaims == 0)
369 		hammer_limit_reclaims = desiredvnodes / 10;
370 
371 	return(0);
372 }
373 
374 static int
375 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
376 		 struct ucred *cred)
377 {
378 	struct hammer_mount_info info;
379 	hammer_mount_t hmp;
380 	hammer_volume_t rootvol;
381 	struct vnode *rootvp;
382 	struct vnode *devvp = NULL;
383 	const char *upath;	/* volume name in userspace */
384 	char *path;		/* volume name in system space */
385 	int error;
386 	int i;
387 	int master_id;
388 	int nvolumes;
389 	char *next_volume_ptr = NULL;
390 
391 	/*
392 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
393 	 */
394 	if (mntpt == NULL) {
395 		bzero(&info, sizeof(info));
396 		info.asof = 0;
397 		info.hflags = 0;
398 		info.nvolumes = 1;
399 
400 		next_volume_ptr = mp->mnt_stat.f_mntfromname;
401 
402 		/* Count number of volumes separated by ':' */
403 		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
404 			if (*p == ':') {
405 				++info.nvolumes;
406 			}
407 		}
408 
409 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
410 	} else {
411 		if ((error = copyin(data, &info, sizeof(info))) != 0)
412 			return (error);
413 	}
414 
415 	/*
416 	 * updating or new mount
417 	 */
418 	if (mp->mnt_flag & MNT_UPDATE) {
419 		hmp = (void *)mp->mnt_data;
420 		KKASSERT(hmp != NULL);
421 	} else {
422 		if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES)
423 			return (EINVAL);
424 		hmp = NULL;
425 	}
426 
427 	/*
428 	 * master-id validation.  The master id may not be changed by a
429 	 * mount update.
430 	 */
431 	if (info.hflags & HMNT_MASTERID) {
432 		if (hmp && hmp->master_id != info.master_id) {
433 			kprintf("hammer: cannot change master id "
434 				"with mount update\n");
435 			return(EINVAL);
436 		}
437 		master_id = info.master_id;
438 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
439 			return (EINVAL);
440 	} else {
441 		if (hmp)
442 			master_id = hmp->master_id;
443 		else
444 			master_id = 0;
445 	}
446 
447 	/*
448 	 * Internal mount data structure
449 	 */
450 	if (hmp == NULL) {
451 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
452 		mp->mnt_data = (qaddr_t)hmp;
453 		hmp->mp = mp;
454 		/*TAILQ_INIT(&hmp->recycle_list);*/
455 
456 		/*
457 		 * Make sure kmalloc type limits are set appropriately.
458 		 *
459 		 * Our inode kmalloc group is sized based on maxvnodes
460 		 * (controlled by the system, not us).
461 		 */
462 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
463 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
464 
465 		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */
466 
467 		hmp->root_btree_beg.localization = 0x00000000U;
468 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
469 		hmp->root_btree_beg.key = -0x8000000000000000LL;
470 		hmp->root_btree_beg.create_tid = 1;
471 		hmp->root_btree_beg.delete_tid = 1;
472 		hmp->root_btree_beg.rec_type = 0;
473 		hmp->root_btree_beg.obj_type = 0;
474 		hmp->root_btree_beg.btype = HAMMER_BTREE_TYPE_NONE;
475 
476 		hmp->root_btree_end.localization = 0xFFFFFFFFU;
477 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
478 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
479 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
480 		hmp->root_btree_end.delete_tid = 0;   /* special case */
481 		hmp->root_btree_end.rec_type = 0xFFFFU;
482 		hmp->root_btree_end.obj_type = 0;
483 		hmp->root_btree_end.btype = HAMMER_BTREE_TYPE_NONE;
484 
485 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
486 		hmp->krate.count = -16;	/* initial burst */
487 		hmp->kdiag.freq = 1;	/* maximum reporting rate (hz) */
488 		hmp->kdiag.count = -16;	/* initial burst */
489 
490 		hmp->sync_lock.refs = 1;
491 		hmp->free_lock.refs = 1;
492 		hmp->undo_lock.refs = 1;
493 		hmp->blkmap_lock.refs = 1;
494 		hmp->snapshot_lock.refs = 1;
495 		hmp->volume_lock.refs = 1;
496 
497 		TAILQ_INIT(&hmp->delay_list);
498 		TAILQ_INIT(&hmp->flush_group_list);
499 		TAILQ_INIT(&hmp->objid_cache_list);
500 		TAILQ_INIT(&hmp->undo_lru_list);
501 		TAILQ_INIT(&hmp->reclaim_list);
502 
503 		RB_INIT(&hmp->rb_dedup_crc_root);
504 		RB_INIT(&hmp->rb_dedup_off_root);
505 		TAILQ_INIT(&hmp->dedup_lru_list);
506 	}
507 	hmp->hflags &= ~HMNT_USERFLAGS;
508 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
509 
510 	hmp->master_id = master_id;
511 
512 	if (info.asof) {
513 		mp->mnt_flag |= MNT_RDONLY;
514 		hmp->asof = info.asof;
515 	} else {
516 		hmp->asof = HAMMER_MAX_TID;
517 	}
518 
519 	hmp->volume_to_remove = -1;
520 
521 	/*
522 	 * Re-open read-write if originally read-only, or vise-versa.
523 	 *
524 	 * When going from read-only to read-write execute the stage2
525 	 * recovery if it has not already been run.
526 	 */
527 	if (mp->mnt_flag & MNT_UPDATE) {
528 		lwkt_gettoken(&hmp->fs_token);
529 		error = 0;
530 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
531 			kprintf("HAMMER read-only -> read-write\n");
532 			hmp->ronly = 0;
533 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
534 				hammer_adjust_volume_mode, NULL);
535 			rootvol = hammer_get_root_volume(hmp, &error);
536 			if (rootvol) {
537 				hammer_recover_flush_buffers(hmp, rootvol, 1);
538 				error = hammer_recover_stage2(hmp, rootvol);
539 				bcopy(rootvol->ondisk->vol0_blockmap,
540 				      hmp->blockmap,
541 				      sizeof(hmp->blockmap));
542 				hammer_rel_volume(rootvol, 0);
543 			}
544 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
545 				hammer_reload_inode, NULL);
546 			/* kernel clears MNT_RDONLY */
547 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
548 			kprintf("HAMMER read-write -> read-only\n");
549 			hmp->ronly = 1;	/* messy */
550 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
551 				hammer_reload_inode, NULL);
552 			hmp->ronly = 0;
553 			hammer_flusher_sync(hmp);
554 			hammer_flusher_sync(hmp);
555 			hammer_flusher_sync(hmp);
556 			hmp->ronly = 1;
557 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
558 				hammer_adjust_volume_mode, NULL);
559 		}
560 		lwkt_reltoken(&hmp->fs_token);
561 		return(error);
562 	}
563 
564 	RB_INIT(&hmp->rb_vols_root);
565 	RB_INIT(&hmp->rb_inos_root);
566 	RB_INIT(&hmp->rb_redo_root);
567 	RB_INIT(&hmp->rb_nods_root);
568 	RB_INIT(&hmp->rb_undo_root);
569 	RB_INIT(&hmp->rb_resv_root);
570 	RB_INIT(&hmp->rb_bufs_root);
571 	RB_INIT(&hmp->rb_pfsm_root);
572 
573 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
574 
575 	RB_INIT(&hmp->volu_root);
576 	RB_INIT(&hmp->undo_root);
577 	RB_INIT(&hmp->data_root);
578 	RB_INIT(&hmp->meta_root);
579 	RB_INIT(&hmp->lose_root);
580 	TAILQ_INIT(&hmp->iorun_list);
581 
582 	lwkt_token_init(&hmp->fs_token, "hammerfs");
583 	lwkt_token_init(&hmp->io_token, "hammerio");
584 
585 	lwkt_gettoken(&hmp->fs_token);
586 
587 	/*
588 	 * Load volumes
589 	 */
590 	path = objcache_get(namei_oc, M_WAITOK);
591 	hmp->nvolumes = -1;
592 	for (i = 0; i < info.nvolumes; ++i) {
593 		if (mntpt == NULL) {
594 			/*
595 			 * Root mount.
596 			 */
597 			KKASSERT(next_volume_ptr != NULL);
598 			strcpy(path, "");
599 			if (*next_volume_ptr != '/') {
600 				/* relative path */
601 				strcpy(path, "/dev/");
602 			}
603 			int k;
604 			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
605 				if (*next_volume_ptr == '\0') {
606 					break;
607 				} else if (*next_volume_ptr == ':') {
608 					++next_volume_ptr;
609 					break;
610 				} else {
611 					path[k] = *next_volume_ptr;
612 					++next_volume_ptr;
613 				}
614 			}
615 			path[k] = '\0';
616 
617 			error = 0;
618 			cdev_t dev = kgetdiskbyname(path);
619 			error = bdevvp(dev, &devvp);
620 			if (error) {
621 				kprintf("hammer_mount: can't find devvp\n");
622 			}
623 		} else {
624 			error = copyin(&info.volumes[i], &upath,
625 				       sizeof(char *));
626 			if (error == 0)
627 				error = copyinstr(upath, path,
628 						  MAXPATHLEN, NULL);
629 		}
630 		if (error == 0)
631 			error = hammer_install_volume(hmp, path, devvp);
632 		if (error)
633 			break;
634 	}
635 	objcache_put(namei_oc, path);
636 
637 	/*
638 	 * Make sure we found a root volume
639 	 */
640 	if (hmp->rootvol == NULL) {
641 		kprintf("hammer_mount: No root volume found!\n");
642 		error = EINVAL;
643 		goto failed;
644 	}
645 
646 	/*
647 	 * Check that all required volumes are available
648 	 */
649 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
650 		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
651 		error = EINVAL;
652 		goto failed;
653 	}
654 
655 	/*
656 	 * Other errors
657 	 */
658 	if (error) {
659 		kprintf("hammer_mount: Failed to load volumes!\n");
660 		goto failed;
661 	}
662 
663 	nvolumes = hammer_get_installed_volumes(hmp);
664 	if (hmp->nvolumes != nvolumes) {
665 		kprintf("hammer_mount: volume header says %d volumes, "
666 			"but %d installed\n",
667 			hmp->nvolumes, nvolumes);
668 		error = EINVAL;
669 		goto failed;
670 	}
671 
672 	/*
673 	 * No errors, setup enough of the mount point so we can lookup the
674 	 * root vnode.
675 	 */
676 	mp->mnt_iosize_max = MAXPHYS;
677 	mp->mnt_kern_flag |= MNTK_FSMID;
678 	mp->mnt_kern_flag |= MNTK_THR_SYNC;	/* new vsyncscan semantics */
679 
680 	/*
681 	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
682 	 * will acquire a per-mount token prior to entry and release it
683 	 * on return.
684 	 */
685 	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
686 	/*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/
687 
688 	/*
689 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
690 	 * its VOP_BMAP call.
691 	 */
692 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
693 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
694 
695 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
696 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
697 
698 	mp->mnt_maxsymlinklen = 255;
699 	mp->mnt_flag |= MNT_LOCAL;
700 
701 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
702 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
703 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
704 
705 	/*
706 	 * The root volume's ondisk pointer is only valid if we hold a
707 	 * reference to it.
708 	 */
709 	rootvol = hammer_get_root_volume(hmp, &error);
710 	if (error)
711 		goto failed;
712 
713 	/*
714 	 * Perform any necessary UNDO operations.  The recovery code does
715 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
716 	 * and then re-copy it again after recovery is complete.
717 	 *
718 	 * If this is a read-only mount the UNDO information is retained
719 	 * in memory in the form of dirty buffer cache buffers, and not
720 	 * written back to the media.
721 	 */
722 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
723 	      sizeof(hmp->blockmap));
724 
725 	/*
726 	 * Check filesystem version
727 	 */
728 	hmp->version = rootvol->ondisk->vol_version;
729 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
730 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
731 		kprintf("HAMMER: mount unsupported fs version %d\n",
732 			hmp->version);
733 		error = ERANGE;
734 		goto done;
735 	}
736 
737 	/*
738 	 * The undo_rec_limit limits the size of flush groups to avoid
739 	 * blowing out the UNDO FIFO.  This calculation is typically in
740 	 * the tens of thousands and is designed primarily when small
741 	 * HAMMER filesystems are created.
742 	 */
743 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
744 	if (hammer_debug_general & 0x0001)
745 		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
746 
747 	/*
748 	 * NOTE: Recover stage1 not only handles meta-data recovery, it
749 	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
750 	 */
751 	error = hammer_recover_stage1(hmp, rootvol);
752 	if (error) {
753 		kprintf("Failed to recover HAMMER filesystem on mount\n");
754 		goto done;
755 	}
756 
757 	/*
758 	 * Finish setup now that we have a good root volume.
759 	 *
760 	 * The top 16 bits of fsid.val[1] is a pfs id.
761 	 */
762 	ksnprintf(mp->mnt_stat.f_mntfromname,
763 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
764 		  rootvol->ondisk->vol_name);
765 	mp->mnt_stat.f_fsid.val[0] =
766 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
767 	mp->mnt_stat.f_fsid.val[1] =
768 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
769 	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
770 
771 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
772 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
773 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
774 
775 	/*
776 	 * Certain often-modified fields in the root volume are cached in
777 	 * the hammer_mount structure so we do not have to generate lots
778 	 * of little UNDO structures for them.
779 	 *
780 	 * Recopy after recovery.  This also has the side effect of
781 	 * setting our cached undo FIFO's first_offset, which serves to
782 	 * placemark the FIFO start for the NEXT flush cycle while the
783 	 * on-disk first_offset represents the LAST flush cycle.
784 	 */
785 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
786 	hmp->flush_tid1 = hmp->next_tid;
787 	hmp->flush_tid2 = hmp->next_tid;
788 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
789 	      sizeof(hmp->blockmap));
790 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
791 
792 	hammer_flusher_create(hmp);
793 
794 	/*
795 	 * Locate the root directory using the root cluster's B-Tree as a
796 	 * starting point.  The root directory uses an obj_id of 1.
797 	 *
798 	 * FUTURE: Leave the root directory cached referenced but unlocked
799 	 * in hmp->rootvp (need to flush it on unmount).
800 	 */
801 	error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
802 	if (error)
803 		goto done;
804 	vput(rootvp);
805 	/*vn_unlock(hmp->rootvp);*/
806 	if (hmp->ronly == 0)
807 		error = hammer_recover_stage2(hmp, rootvol);
808 
809 	/*
810 	 * If the stage2 recovery fails be sure to clean out all cached
811 	 * vnodes before throwing away the mount structure or bad things
812 	 * will happen.
813 	 */
814 	if (error)
815 		vflush(mp, 0, 0);
816 
817 done:
818 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
819 		/* New mount */
820 
821 		/* Populate info for mount point (NULL pad)*/
822 		bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
823 		size_t size;
824 		if (mntpt) {
825 			copyinstr(mntpt, mp->mnt_stat.f_mntonname,
826 							MNAMELEN -1, &size);
827 		} else { /* Root mount */
828 			mp->mnt_stat.f_mntonname[0] = '/';
829 		}
830 	}
831 	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);
832 	hammer_rel_volume(rootvol, 0);
833 failed:
834 	/*
835 	 * Cleanup and return.
836 	 */
837 	if (error) {
838 		/* called with fs_token held */
839 		hammer_free_hmp(mp);
840 	} else {
841 		lwkt_reltoken(&hmp->fs_token);
842 	}
843 	return (error);
844 }
845 
846 static int
847 hammer_vfs_unmount(struct mount *mp, int mntflags)
848 {
849 	hammer_mount_t hmp = (void *)mp->mnt_data;
850 	int flags;
851 	int error;
852 
853 	/*
854 	 * Clean out the vnodes
855 	 */
856 	lwkt_gettoken(&hmp->fs_token);
857 	flags = 0;
858 	if (mntflags & MNT_FORCE)
859 		flags |= FORCECLOSE;
860 	error = vflush(mp, 0, flags);
861 
862 	/*
863 	 * Clean up the internal mount structure and related entities.  This
864 	 * may issue I/O.
865 	 */
866 	if (error == 0) {
867 		/* called with fs_token held */
868 		hammer_free_hmp(mp);
869 	} else {
870 		lwkt_reltoken(&hmp->fs_token);
871 	}
872 	return(error);
873 }
874 
875 /*
876  * Clean up the internal mount structure and disassociate it from the mount.
877  * This may issue I/O.
878  *
879  * Called with fs_token held.
880  */
881 static void
882 hammer_free_hmp(struct mount *mp)
883 {
884 	hammer_mount_t hmp = (void *)mp->mnt_data;
885 	hammer_flush_group_t flg;
886 	int count;
887 	int dummy;
888 
889 	/*
890 	 * Flush anything dirty.  This won't even run if the
891 	 * filesystem errored-out.
892 	 */
893 	count = 0;
894 	while (hammer_flusher_haswork(hmp)) {
895 		hammer_flusher_sync(hmp);
896 		++count;
897 		if (count >= 5) {
898 			if (count == 5)
899 				kprintf("HAMMER: umount flushing.");
900 			else
901 				kprintf(".");
902 			tsleep(&dummy, 0, "hmrufl", hz);
903 		}
904 		if (count == 30) {
905 			kprintf("giving up\n");
906 			break;
907 		}
908 	}
909 	if (count >= 5 && count < 30)
910 		kprintf("\n");
911 
912 	/*
913 	 * If the mount had a critical error we have to destroy any
914 	 * remaining inodes before we can finish cleaning up the flusher.
915 	 */
916 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
917 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
918 			hammer_destroy_inode_callback, NULL);
919 	}
920 
921 	/*
922 	 * There shouldn't be any inodes left now and any left over
923 	 * flush groups should now be empty.
924 	 */
925 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
926 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
927 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
928 		KKASSERT(RB_EMPTY(&flg->flush_tree));
929 		if (flg->refs) {
930 			kprintf("HAMMER: Warning, flush_group %p was "
931 				"not empty on umount!\n", flg);
932 		}
933 		kfree(flg, hmp->m_misc);
934 	}
935 
936 	/*
937 	 * We can finally destroy the flusher
938 	 */
939 	hammer_flusher_destroy(hmp);
940 
941 	/*
942 	 * We may have held recovered buffers due to a read-only mount.
943 	 * These must be discarded.
944 	 */
945 	if (hmp->ronly)
946 		hammer_recover_flush_buffers(hmp, NULL, -1);
947 
948 	/*
949 	 * Unload buffers and then volumes
950 	 */
951         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
952 		hammer_unload_buffer, NULL);
953 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
954 		hammer_unload_volume, NULL);
955 
956 	mp->mnt_data = NULL;
957 	mp->mnt_flag &= ~MNT_LOCAL;
958 	hmp->mp = NULL;
959 	hammer_destroy_objid_cache(hmp);
960 	hammer_destroy_dedup_cache(hmp);
961 	if (hmp->dedup_free_cache != NULL) {
962 		kfree(hmp->dedup_free_cache, hmp->m_misc);
963 		hmp->dedup_free_cache = NULL;
964 	}
965 	kmalloc_destroy(&hmp->m_misc);
966 	kmalloc_destroy(&hmp->m_inodes);
967 	lwkt_reltoken(&hmp->fs_token);
968 	kfree(hmp, M_HAMMER);
969 }
970 
971 /*
972  * Report critical errors.  ip may be NULL.
973  */
974 void
975 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
976 		      int error, const char *msg)
977 {
978 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
979 
980 	krateprintf(&hmp->krate,
981 		    "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
982 		    hmp->mp->mnt_stat.f_mntfromname,
983 		    (intmax_t)(ip ? ip->obj_id : -1),
984 		    error, msg);
985 
986 	if (hmp->ronly == 0) {
987 		hmp->ronly = 2;		/* special errored read-only mode */
988 		hmp->mp->mnt_flag |= MNT_RDONLY;
989 		RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
990 			hammer_adjust_volume_mode, NULL);
991 		kprintf("HAMMER(%s): Forcing read-only mode\n",
992 			hmp->mp->mnt_stat.f_mntfromname);
993 	}
994 	hmp->error = error;
995 	if (hammer_debug_critical)
996 		Debugger("Entering debugger");
997 }
998 
999 
1000 /*
1001  * Obtain a vnode for the specified inode number.  An exclusively locked
1002  * vnode is returned.
1003  */
1004 int
1005 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
1006 		ino_t ino, struct vnode **vpp)
1007 {
1008 	struct hammer_transaction trans;
1009 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1010 	struct hammer_inode *ip;
1011 	int error;
1012 	u_int32_t localization;
1013 
1014 	lwkt_gettoken(&hmp->fs_token);
1015 	hammer_simple_transaction(&trans, hmp);
1016 
1017 	/*
1018 	 * If a directory vnode is supplied (mainly NFS) then we can acquire
1019 	 * the PFS domain from it.  Otherwise we would only be able to vget
1020 	 * inodes in the root PFS.
1021 	 */
1022 	if (dvp) {
1023 		localization = HAMMER_DEF_LOCALIZATION +
1024 				VTOI(dvp)->obj_localization;
1025 	} else {
1026 		localization = HAMMER_DEF_LOCALIZATION;
1027 	}
1028 
1029 	/*
1030 	 * Lookup the requested HAMMER inode.  The structure must be
1031 	 * left unlocked while we manipulate the related vnode to avoid
1032 	 * a deadlock.
1033 	 */
1034 	ip = hammer_get_inode(&trans, NULL, ino,
1035 			      hmp->asof, localization,
1036 			      0, &error);
1037 	if (ip == NULL) {
1038 		*vpp = NULL;
1039 	} else {
1040 		error = hammer_get_vnode(ip, vpp);
1041 		hammer_rel_inode(ip, 0);
1042 	}
1043 	hammer_done_transaction(&trans);
1044 	lwkt_reltoken(&hmp->fs_token);
1045 	return (error);
1046 }
1047 
1048 /*
1049  * Return the root vnode for the filesystem.
1050  *
1051  * HAMMER stores the root vnode in the hammer_mount structure so
1052  * getting it is easy.
1053  */
1054 static int
1055 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
1056 {
1057 	int error;
1058 
1059 	error = hammer_vfs_vget(mp, NULL, 1, vpp);
1060 	return (error);
1061 }
1062 
1063 static int
1064 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1065 {
1066 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1067 	hammer_volume_t volume;
1068 	hammer_volume_ondisk_t ondisk;
1069 	int error;
1070 	int64_t bfree;
1071 	int64_t breserved;
1072 
1073 	lwkt_gettoken(&hmp->fs_token);
1074 	volume = hammer_get_root_volume(hmp, &error);
1075 	if (error) {
1076 		lwkt_reltoken(&hmp->fs_token);
1077 		return(error);
1078 	}
1079 	ondisk = volume->ondisk;
1080 
1081 	/*
1082 	 * Basic stats
1083 	 */
1084 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1085 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
1086 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1087 	hammer_rel_volume(volume, 0);
1088 
1089 	mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1090 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1091 	if (mp->mnt_stat.f_files < 0)
1092 		mp->mnt_stat.f_files = 0;
1093 
1094 	*sbp = mp->mnt_stat;
1095 	lwkt_reltoken(&hmp->fs_token);
1096 	return(0);
1097 }
1098 
1099 static int
1100 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1101 {
1102 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1103 	hammer_volume_t volume;
1104 	hammer_volume_ondisk_t ondisk;
1105 	int error;
1106 	int64_t bfree;
1107 	int64_t breserved;
1108 
1109 	lwkt_gettoken(&hmp->fs_token);
1110 	volume = hammer_get_root_volume(hmp, &error);
1111 	if (error) {
1112 		lwkt_reltoken(&hmp->fs_token);
1113 		return(error);
1114 	}
1115 	ondisk = volume->ondisk;
1116 
1117 	/*
1118 	 * Basic stats
1119 	 */
1120 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1121 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
1122 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1123 	hammer_rel_volume(volume, 0);
1124 
1125 	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1126 	mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1127 	if (mp->mnt_vstat.f_files < 0)
1128 		mp->mnt_vstat.f_files = 0;
1129 	*sbp = mp->mnt_vstat;
1130 	lwkt_reltoken(&hmp->fs_token);
1131 	return(0);
1132 }
1133 
1134 /*
1135  * Sync the filesystem.  Currently we have to run it twice, the second
1136  * one will advance the undo start index to the end index, so if a crash
1137  * occurs no undos will be run on mount.
1138  *
1139  * We do not sync the filesystem if we are called from a panic.  If we did
1140  * we might end up blowing up a sync that was already in progress.
1141  */
1142 static int
1143 hammer_vfs_sync(struct mount *mp, int waitfor)
1144 {
1145 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1146 	int error;
1147 
1148 	lwkt_gettoken(&hmp->fs_token);
1149 	if (panicstr == NULL) {
1150 		error = hammer_sync_hmp(hmp, waitfor);
1151 	} else {
1152 		error = EIO;
1153 	}
1154 	lwkt_reltoken(&hmp->fs_token);
1155 	return (error);
1156 }
1157 
1158 /*
1159  * Convert a vnode to a file handle.
1160  *
1161  * Accesses read-only fields on already-referenced structures so
1162  * no token is needed.
1163  */
1164 static int
1165 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1166 {
1167 	hammer_inode_t ip;
1168 
1169 	KKASSERT(MAXFIDSZ >= 16);
1170 	ip = VTOI(vp);
1171 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
1172 	fhp->fid_ext = ip->obj_localization >> 16;
1173 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1174 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1175 	return(0);
1176 }
1177 
1178 
1179 /*
1180  * Convert a file handle back to a vnode.
1181  *
1182  * Use rootvp to enforce PFS isolation when a PFS is exported via a
1183  * null mount.
1184  */
1185 static int
1186 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1187 		  struct fid *fhp, struct vnode **vpp)
1188 {
1189 	hammer_mount_t hmp = (void *)mp->mnt_data;
1190 	struct hammer_transaction trans;
1191 	struct hammer_inode *ip;
1192 	struct hammer_inode_info info;
1193 	int error;
1194 	u_int32_t localization;
1195 
1196 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1197 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1198 	if (rootvp)
1199 		localization = VTOI(rootvp)->obj_localization;
1200 	else
1201 		localization = (u_int32_t)fhp->fid_ext << 16;
1202 
1203 	lwkt_gettoken(&hmp->fs_token);
1204 	hammer_simple_transaction(&trans, hmp);
1205 
1206 	/*
1207 	 * Get/allocate the hammer_inode structure.  The structure must be
1208 	 * unlocked while we manipulate the related vnode to avoid a
1209 	 * deadlock.
1210 	 */
1211 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
1212 			      info.obj_asof, localization, 0, &error);
1213 	if (ip) {
1214 		error = hammer_get_vnode(ip, vpp);
1215 		hammer_rel_inode(ip, 0);
1216 	} else {
1217 		*vpp = NULL;
1218 	}
1219 	hammer_done_transaction(&trans);
1220 	lwkt_reltoken(&hmp->fs_token);
1221 	return (error);
1222 }
1223 
1224 static int
1225 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1226 		    int *exflagsp, struct ucred **credanonp)
1227 {
1228 	hammer_mount_t hmp = (void *)mp->mnt_data;
1229 	struct netcred *np;
1230 	int error;
1231 
1232 	lwkt_gettoken(&hmp->fs_token);
1233 	np = vfs_export_lookup(mp, &hmp->export, nam);
1234 	if (np) {
1235 		*exflagsp = np->netc_exflags;
1236 		*credanonp = &np->netc_anon;
1237 		error = 0;
1238 	} else {
1239 		error = EACCES;
1240 	}
1241 	lwkt_reltoken(&hmp->fs_token);
1242 	return (error);
1243 
1244 }
1245 
1246 int
1247 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1248 {
1249 	hammer_mount_t hmp = (void *)mp->mnt_data;
1250 	int error;
1251 
1252 	lwkt_gettoken(&hmp->fs_token);
1253 
1254 	switch(op) {
1255 	case MOUNTCTL_SET_EXPORT:
1256 		error = vfs_export(mp, &hmp->export, export);
1257 		break;
1258 	default:
1259 		error = EOPNOTSUPP;
1260 		break;
1261 	}
1262 	lwkt_reltoken(&hmp->fs_token);
1263 
1264 	return(error);
1265 }
1266 
1267