xref: /dflybsd-src/sys/vfs/hammer/hammer_vfsops.c (revision 9d756529af421a4da46c2bc1340bde2c1d535602)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/mountctl.h>
36 
37 #include "hammer.h"
38 
39 /*
40  * NOTE!  Global statistics may not be MPSAFE so HAMMER never uses them
41  *	  in conditionals.
42  */
43 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
44 int hammer_debug_io;
45 int hammer_debug_general;
46 int hammer_debug_debug = 1;		/* medium-error panics */
47 int hammer_debug_inode;
48 int hammer_debug_locks;
49 int hammer_debug_btree;
50 int hammer_debug_tid;
51 int hammer_debug_recover;		/* -1 will disable, +1 will force */
52 int hammer_debug_recover_faults;
53 int hammer_debug_critical;		/* non-zero enter debugger on error */
54 int hammer_cluster_enable = 1;		/* enable read clustering by default */
55 int hammer_live_dedup = 0;
56 int hammer_tdmux_ticks;
57 int hammer_count_fsyncs;
58 int hammer_count_inodes;
59 int hammer_count_iqueued;
60 int hammer_count_reclaims;
61 int hammer_count_records;
62 int hammer_count_record_datas;
63 int hammer_count_volumes;
64 int hammer_count_buffers;
65 int hammer_count_nodes;
66 int64_t hammer_count_extra_space_used;
67 int64_t hammer_stats_btree_lookups;
68 int64_t hammer_stats_btree_searches;
69 int64_t hammer_stats_btree_inserts;
70 int64_t hammer_stats_btree_deletes;
71 int64_t hammer_stats_btree_elements;
72 int64_t hammer_stats_btree_splits;
73 int64_t hammer_stats_btree_iterations;
74 int64_t hammer_stats_btree_root_iterations;
75 int64_t hammer_stats_record_iterations;
76 
77 int64_t hammer_stats_file_read;
78 int64_t hammer_stats_file_write;
79 int64_t hammer_stats_file_iopsr;
80 int64_t hammer_stats_file_iopsw;
81 int64_t hammer_stats_disk_read;
82 int64_t hammer_stats_disk_write;
83 int64_t hammer_stats_inode_flushes;
84 int64_t hammer_stats_commits;
85 int64_t hammer_stats_undo;
86 int64_t hammer_stats_redo;
87 
88 long hammer_count_dirtybufspace;	/* global */
89 int hammer_count_refedbufs;		/* global */
90 int hammer_count_reservations;
91 long hammer_count_io_running_read;
92 long hammer_count_io_running_write;
93 int hammer_count_io_locked;
94 long hammer_limit_dirtybufspace;	/* per-mount */
95 int hammer_limit_recs;			/* as a whole XXX */
96 int hammer_limit_inode_recs = 2048;	/* per inode */
97 int hammer_limit_reclaims;
98 int hammer_live_dedup_cache_size = DEDUP_CACHE_SIZE;
99 int hammer_limit_redo = 4096 * 1024;	/* per inode */
100 int hammer_autoflush = 500;		/* auto flush (typ on reclaim) */
101 int hammer_bio_count;
102 int hammer_verify_zone;
103 int hammer_verify_data = 1;
104 int hammer_write_mode;
105 int hammer_double_buffer;
106 int hammer_btree_full_undo = 1;
107 int hammer_yield_check = 16;
108 int hammer_fsync_mode = 3;
109 int64_t hammer_contention_count;
110 int64_t hammer_zone_limit;
111 
112 /*
113  * Live dedup debug counters (sysctls are writable so that counters
114  * can be reset from userspace).
115  */
116 int64_t hammer_live_dedup_vnode_bcmps = 0;
117 int64_t hammer_live_dedup_device_bcmps = 0;
118 int64_t hammer_live_dedup_findblk_failures = 0;
119 int64_t hammer_live_dedup_bmap_saves = 0;
120 
121 
122 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
123 
124 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
125 	   &hammer_supported_version, 0, "");
126 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
127 	   &hammer_debug_general, 0, "");
128 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
129 	   &hammer_debug_io, 0, "");
130 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
131 	   &hammer_debug_debug, 0, "");
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
133 	   &hammer_debug_inode, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
135 	   &hammer_debug_locks, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
137 	   &hammer_debug_btree, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
139 	   &hammer_debug_tid, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
141 	   &hammer_debug_recover, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
143 	   &hammer_debug_recover_faults, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
145 	   &hammer_debug_critical, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
147 	   &hammer_cluster_enable, 0, "");
148 /*
149  * 0 - live dedup is disabled
150  * 1 - dedup cache is populated on reads only
151  * 2 - dedup cache is populated on both reads and writes
152  *
153  * LIVE_DEDUP IS DISABLED PERMANENTLY!  This feature appears to cause
154  * blockmap corruption over time so we've turned it off permanently.
155  */
156 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup, CTLFLAG_RD,
157 	   &hammer_live_dedup, 0, "Enable live dedup (experimental)");
158 SYSCTL_INT(_vfs_hammer, OID_AUTO, tdmux_ticks, CTLFLAG_RW,
159 	   &hammer_tdmux_ticks, 0, "Hammer tdmux ticks");
160 
161 SYSCTL_LONG(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
162 	   &hammer_limit_dirtybufspace, 0, "");
163 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
164 	   &hammer_limit_recs, 0, "");
165 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
166 	   &hammer_limit_inode_recs, 0, "");
167 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaims, CTLFLAG_RW,
168 	   &hammer_limit_reclaims, 0, "");
169 SYSCTL_INT(_vfs_hammer, OID_AUTO, live_dedup_cache_size, CTLFLAG_RW,
170 	   &hammer_live_dedup_cache_size, 0,
171 	   "Number of cache entries");
172 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
173 	   &hammer_limit_redo, 0, "");
174 
175 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
176 	   &hammer_count_fsyncs, 0, "");
177 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
178 	   &hammer_count_inodes, 0, "");
179 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
180 	   &hammer_count_iqueued, 0, "");
181 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaims, CTLFLAG_RD,
182 	   &hammer_count_reclaims, 0, "");
183 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
184 	   &hammer_count_records, 0, "");
185 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
186 	   &hammer_count_record_datas, 0, "");
187 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
188 	   &hammer_count_volumes, 0, "");
189 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
190 	   &hammer_count_buffers, 0, "");
191 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
192 	   &hammer_count_nodes, 0, "");
193 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
194 	   &hammer_count_extra_space_used, 0, "");
195 
196 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
197 	   &hammer_stats_btree_searches, 0, "");
198 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
199 	   &hammer_stats_btree_lookups, 0, "");
200 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
201 	   &hammer_stats_btree_inserts, 0, "");
202 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
203 	   &hammer_stats_btree_deletes, 0, "");
204 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
205 	   &hammer_stats_btree_elements, 0, "");
206 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
207 	   &hammer_stats_btree_splits, 0, "");
208 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
209 	   &hammer_stats_btree_iterations, 0, "");
210 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
211 	   &hammer_stats_btree_root_iterations, 0, "");
212 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
213 	   &hammer_stats_record_iterations, 0, "");
214 
215 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
216 	   &hammer_stats_file_read, 0, "");
217 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
218 	   &hammer_stats_file_write, 0, "");
219 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
220 	   &hammer_stats_file_iopsr, 0, "");
221 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
222 	   &hammer_stats_file_iopsw, 0, "");
223 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
224 	   &hammer_stats_disk_read, 0, "");
225 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
226 	   &hammer_stats_disk_write, 0, "");
227 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
228 	   &hammer_stats_inode_flushes, 0, "");
229 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
230 	   &hammer_stats_commits, 0, "");
231 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
232 	   &hammer_stats_undo, 0, "");
233 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
234 	   &hammer_stats_redo, 0, "");
235 
236 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_vnode_bcmps, CTLFLAG_RW,
237 	    &hammer_live_dedup_vnode_bcmps, 0,
238 	    "successful vnode buffer comparisons");
239 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_device_bcmps, CTLFLAG_RW,
240 	    &hammer_live_dedup_device_bcmps, 0,
241 	    "successful device buffer comparisons");
242 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_findblk_failures, CTLFLAG_RW,
243 	    &hammer_live_dedup_findblk_failures, 0,
244 	    "block lookup failures for comparison");
245 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, live_dedup_bmap_saves, CTLFLAG_RW,
246 	    &hammer_live_dedup_bmap_saves, 0,
247 	    "useful physical block lookups");
248 
249 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
250 	   &hammer_count_dirtybufspace, 0, "");
251 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
252 	   &hammer_count_refedbufs, 0, "");
253 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
254 	   &hammer_count_reservations, 0, "");
255 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
256 	   &hammer_count_io_running_read, 0, "");
257 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
258 	   &hammer_count_io_locked, 0, "");
259 SYSCTL_LONG(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
260 	   &hammer_count_io_running_write, 0, "");
261 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
262 	   &hammer_zone_limit, 0, "");
263 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
264 	   &hammer_contention_count, 0, "");
265 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
266 	   &hammer_autoflush, 0, "");
267 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
268 	   &hammer_verify_zone, 0, "");
269 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
270 	   &hammer_verify_data, 0, "");
271 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
272 	   &hammer_write_mode, 0, "");
273 SYSCTL_INT(_vfs_hammer, OID_AUTO, double_buffer, CTLFLAG_RW,
274 	   &hammer_double_buffer, 0, "");
275 SYSCTL_INT(_vfs_hammer, OID_AUTO, btree_full_undo, CTLFLAG_RW,
276 	   &hammer_btree_full_undo, 0, "");
277 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
278 	   &hammer_yield_check, 0, "");
279 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
280 	   &hammer_fsync_mode, 0, "");
281 
282 /* KTR_INFO_MASTER(hammer); */
283 
284 /*
285  * VFS ABI
286  */
287 static void	hammer_free_hmp(struct mount *mp);
288 
289 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
290 				struct ucred *cred);
291 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
292 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
293 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
294 				struct ucred *cred);
295 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
296 				struct ucred *cred);
297 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
298 static int	hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
299 				ino_t ino, struct vnode **vpp);
300 static int	hammer_vfs_init(struct vfsconf *conf);
301 static int	hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
302 				struct fid *fhp, struct vnode **vpp);
303 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
304 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
305 				int *exflagsp, struct ucred **credanonp);
306 
307 
308 static struct vfsops hammer_vfsops = {
309 	.vfs_mount	= hammer_vfs_mount,
310 	.vfs_unmount	= hammer_vfs_unmount,
311 	.vfs_root	= hammer_vfs_root,
312 	.vfs_statfs	= hammer_vfs_statfs,
313 	.vfs_statvfs	= hammer_vfs_statvfs,
314 	.vfs_sync	= hammer_vfs_sync,
315 	.vfs_vget	= hammer_vfs_vget,
316 	.vfs_init	= hammer_vfs_init,
317 	.vfs_vptofh	= hammer_vfs_vptofh,
318 	.vfs_fhtovp	= hammer_vfs_fhtovp,
319 	.vfs_checkexp	= hammer_vfs_checkexp
320 };
321 
322 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
323 
324 VFS_SET(hammer_vfsops, hammer, 0);
325 MODULE_VERSION(hammer, 1);
326 
327 static int
328 hammer_vfs_init(struct vfsconf *conf)
329 {
330 	long n;
331 
332 	/*
333 	 * Wait up to this long for an exclusive deadlock to clear
334 	 * before acquiring a new shared lock on the ip.  The deadlock
335 	 * may have occured on a b-tree node related to the ip.
336 	 */
337 	if (hammer_tdmux_ticks == 0)
338 		hammer_tdmux_ticks = hz / 5;
339 
340 	/*
341 	 * Autosize, but be careful because a hammer filesystem's
342 	 * reserve is partially calculated based on dirtybufspace,
343 	 * so we simply cannot allow it to get too large.
344 	 */
345 	if (hammer_limit_recs == 0) {
346 		n = nbuf * 25;
347 		if (n > kmalloc_limit(M_HAMMER) / 512)
348 			n = kmalloc_limit(M_HAMMER) / 512;
349 		if (n > 2 * 1024 * 1024)
350 			n = 2 * 1024 * 1024;
351 		hammer_limit_recs = (int)n;
352 	}
353 	if (hammer_limit_dirtybufspace == 0) {
354 		hammer_limit_dirtybufspace = hidirtybufspace / 2;
355 		if (hammer_limit_dirtybufspace < 1L * 1024 * 1024)
356 			hammer_limit_dirtybufspace = 1024L * 1024;
357 		if (hammer_limit_dirtybufspace > 1024L * 1024 * 1024)
358 			hammer_limit_dirtybufspace = 1024L * 1024 * 1024;
359 	}
360 
361 	/*
362 	 * The hammer_inode structure detaches from the vnode on reclaim.
363 	 * This limits the number of inodes in this state to prevent a
364 	 * memory pool blowout.
365 	 */
366 	if (hammer_limit_reclaims == 0)
367 		hammer_limit_reclaims = desiredvnodes / 10;
368 
369 	return(0);
370 }
371 
372 static int
373 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
374 		 struct ucred *cred)
375 {
376 	struct hammer_mount_info info;
377 	hammer_mount_t hmp;
378 	hammer_volume_t rootvol;
379 	struct vnode *rootvp;
380 	struct vnode *devvp = NULL;
381 	const char *upath;	/* volume name in userspace */
382 	char *path;		/* volume name in system space */
383 	int error;
384 	int i;
385 	int master_id;
386 	int nvolumes;
387 	char *next_volume_ptr = NULL;
388 
389 	/*
390 	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
391 	 */
392 	if (mntpt == NULL) {
393 		bzero(&info, sizeof(info));
394 		info.asof = 0;
395 		info.hflags = 0;
396 		info.nvolumes = 1;
397 
398 		next_volume_ptr = mp->mnt_stat.f_mntfromname;
399 
400 		/* Count number of volumes separated by ':' */
401 		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
402 			if (*p == ':') {
403 				++info.nvolumes;
404 			}
405 		}
406 
407 		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
408 	} else {
409 		if ((error = copyin(data, &info, sizeof(info))) != 0)
410 			return (error);
411 	}
412 
413 	/*
414 	 * updating or new mount
415 	 */
416 	if (mp->mnt_flag & MNT_UPDATE) {
417 		hmp = (void *)mp->mnt_data;
418 		KKASSERT(hmp != NULL);
419 	} else {
420 		if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES)
421 			return (EINVAL);
422 		hmp = NULL;
423 	}
424 
425 	/*
426 	 * master-id validation.  The master id may not be changed by a
427 	 * mount update.
428 	 */
429 	if (info.hflags & HMNT_MASTERID) {
430 		if (hmp && hmp->master_id != info.master_id) {
431 			kprintf("HAMMER: cannot change master id "
432 				"with mount update\n");
433 			return(EINVAL);
434 		}
435 		master_id = info.master_id;
436 		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
437 			return (EINVAL);
438 	} else {
439 		if (hmp)
440 			master_id = hmp->master_id;
441 		else
442 			master_id = 0;
443 	}
444 
445 	/*
446 	 * Internal mount data structure
447 	 */
448 	if (hmp == NULL) {
449 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
450 		mp->mnt_data = (qaddr_t)hmp;
451 		hmp->mp = mp;
452 
453 		/*
454 		 * Make sure kmalloc type limits are set appropriately.
455 		 *
456 		 * Our inode kmalloc group is sized based on maxvnodes
457 		 * (controlled by the system, not us).
458 		 */
459 		kmalloc_create(&hmp->m_misc, "HAMMER-others");
460 		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
461 
462 		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */
463 
464 		hmp->root_btree_beg.localization = 0x00000000U;
465 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
466 		hmp->root_btree_beg.key = -0x8000000000000000LL;
467 		hmp->root_btree_beg.create_tid = 1;
468 		hmp->root_btree_beg.delete_tid = 1;
469 		hmp->root_btree_beg.rec_type = 0;
470 		hmp->root_btree_beg.obj_type = 0;
471 		hmp->root_btree_beg.btype = HAMMER_BTREE_TYPE_NONE;
472 
473 		hmp->root_btree_end.localization = 0xFFFFFFFFU;
474 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
475 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
476 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
477 		hmp->root_btree_end.delete_tid = 0;   /* special case */
478 		hmp->root_btree_end.rec_type = 0xFFFFU;
479 		hmp->root_btree_end.obj_type = 0;
480 		hmp->root_btree_end.btype = HAMMER_BTREE_TYPE_NONE;
481 
482 		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
483 		hmp->krate.count = -16;	/* initial burst */
484 		hmp->kdiag.freq = 1;	/* maximum reporting rate (hz) */
485 		hmp->kdiag.count = -16;	/* initial burst */
486 
487 		hmp->sync_lock.refs = 1;
488 		hmp->free_lock.refs = 1;
489 		hmp->undo_lock.refs = 1;
490 		hmp->blkmap_lock.refs = 1;
491 		hmp->snapshot_lock.refs = 1;
492 		hmp->volume_lock.refs = 1;
493 
494 		TAILQ_INIT(&hmp->delay_list);
495 		TAILQ_INIT(&hmp->flush_group_list);
496 		TAILQ_INIT(&hmp->objid_cache_list);
497 		TAILQ_INIT(&hmp->undo_lru_list);
498 		TAILQ_INIT(&hmp->reclaim_list);
499 
500 		RB_INIT(&hmp->rb_dedup_crc_root);
501 		RB_INIT(&hmp->rb_dedup_off_root);
502 		TAILQ_INIT(&hmp->dedup_lru_list);
503 	}
504 	hmp->hflags &= ~HMNT_USERFLAGS;
505 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
506 
507 	hmp->master_id = master_id;
508 
509 	if (info.asof) {
510 		mp->mnt_flag |= MNT_RDONLY;
511 		hmp->asof = info.asof;
512 	} else {
513 		hmp->asof = HAMMER_MAX_TID;
514 	}
515 
516 	hmp->volume_to_remove = -1;
517 
518 	/*
519 	 * Re-open read-write if originally read-only, or vise-versa.
520 	 *
521 	 * When going from read-only to read-write execute the stage2
522 	 * recovery if it has not already been run.
523 	 */
524 	if (mp->mnt_flag & MNT_UPDATE) {
525 		lwkt_gettoken(&hmp->fs_token);
526 		error = 0;
527 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
528 			kprintf("HAMMER: read-only -> read-write\n");
529 			hmp->ronly = 0;
530 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
531 				hammer_adjust_volume_mode, NULL);
532 			rootvol = hammer_get_root_volume(hmp, &error);
533 			if (rootvol) {
534 				hammer_recover_flush_buffers(hmp, rootvol, 1);
535 				error = hammer_recover_stage2(hmp, rootvol);
536 				bcopy(rootvol->ondisk->vol0_blockmap,
537 				      hmp->blockmap,
538 				      sizeof(hmp->blockmap));
539 				hammer_rel_volume(rootvol, 0);
540 			}
541 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
542 				hammer_reload_inode, NULL);
543 			/* kernel clears MNT_RDONLY */
544 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
545 			kprintf("HAMMER: read-write -> read-only\n");
546 			hmp->ronly = 1;	/* messy */
547 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
548 				hammer_reload_inode, NULL);
549 			hmp->ronly = 0;
550 			hammer_flusher_sync(hmp);
551 			hammer_flusher_sync(hmp);
552 			hammer_flusher_sync(hmp);
553 			hmp->ronly = 1;
554 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
555 				hammer_adjust_volume_mode, NULL);
556 		}
557 		lwkt_reltoken(&hmp->fs_token);
558 		return(error);
559 	}
560 
561 	RB_INIT(&hmp->rb_vols_root);
562 	RB_INIT(&hmp->rb_inos_root);
563 	RB_INIT(&hmp->rb_redo_root);
564 	RB_INIT(&hmp->rb_nods_root);
565 	RB_INIT(&hmp->rb_undo_root);
566 	RB_INIT(&hmp->rb_resv_root);
567 	RB_INIT(&hmp->rb_bufs_root);
568 	RB_INIT(&hmp->rb_pfsm_root);
569 
570 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
571 
572 	RB_INIT(&hmp->volu_root);
573 	RB_INIT(&hmp->undo_root);
574 	RB_INIT(&hmp->data_root);
575 	RB_INIT(&hmp->meta_root);
576 	RB_INIT(&hmp->lose_root);
577 	TAILQ_INIT(&hmp->iorun_list);
578 
579 	lwkt_token_init(&hmp->fs_token, "hammerfs");
580 	lwkt_token_init(&hmp->io_token, "hammerio");
581 
582 	lwkt_gettoken(&hmp->fs_token);
583 
584 	/*
585 	 * Load volumes
586 	 */
587 	path = objcache_get(namei_oc, M_WAITOK);
588 	hmp->nvolumes = -1;
589 	for (i = 0; i < info.nvolumes; ++i) {
590 		if (mntpt == NULL) {
591 			/*
592 			 * Root mount.
593 			 */
594 			KKASSERT(next_volume_ptr != NULL);
595 			strcpy(path, "");
596 			if (*next_volume_ptr != '/') {
597 				/* relative path */
598 				strcpy(path, "/dev/");
599 			}
600 			int k;
601 			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
602 				if (*next_volume_ptr == '\0') {
603 					break;
604 				} else if (*next_volume_ptr == ':') {
605 					++next_volume_ptr;
606 					break;
607 				} else {
608 					path[k] = *next_volume_ptr;
609 					++next_volume_ptr;
610 				}
611 			}
612 			path[k] = '\0';
613 
614 			error = 0;
615 			cdev_t dev = kgetdiskbyname(path);
616 			error = bdevvp(dev, &devvp);
617 			if (error) {
618 				kprintf("hammer_mount: can't find devvp\n");
619 			}
620 		} else {
621 			error = copyin(&info.volumes[i], &upath,
622 				       sizeof(char *));
623 			if (error == 0)
624 				error = copyinstr(upath, path,
625 						  MAXPATHLEN, NULL);
626 		}
627 		if (error == 0)
628 			error = hammer_install_volume(hmp, path, devvp, NULL);
629 		if (error)
630 			break;
631 	}
632 	objcache_put(namei_oc, path);
633 
634 	/*
635 	 * Make sure we found a root volume
636 	 */
637 	if (hmp->rootvol == NULL) {
638 		if (error == EBUSY) {
639 			kprintf("hammer_mount: The volumes are probably mounted\n");
640 		} else {
641 			kprintf("hammer_mount: No root volume found!\n");
642 			error = EINVAL;
643 		}
644 		goto failed;
645 	}
646 
647 	/*
648 	 * Check that all required volumes are available
649 	 */
650 	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
651 		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
652 		error = EINVAL;
653 		goto failed;
654 	}
655 
656 	/*
657 	 * Other errors
658 	 */
659 	if (error) {
660 		kprintf("hammer_mount: Failed to load volumes!\n");
661 		goto failed;
662 	}
663 
664 	nvolumes = hammer_get_installed_volumes(hmp);
665 	if (hmp->nvolumes != nvolumes) {
666 		kprintf("hammer_mount: volume header says %d volumes, "
667 			"but %d installed\n",
668 			hmp->nvolumes, nvolumes);
669 		error = EINVAL;
670 		goto failed;
671 	}
672 
673 	/*
674 	 * No errors, setup enough of the mount point so we can lookup the
675 	 * root vnode.
676 	 */
677 	mp->mnt_iosize_max = MAXPHYS;
678 	mp->mnt_kern_flag |= MNTK_FSMID;
679 	mp->mnt_kern_flag |= MNTK_THR_SYNC;	/* new vsyncscan semantics */
680 
681 	/*
682 	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
683 	 * will acquire a per-mount token prior to entry and release it
684 	 * on return.
685 	 */
686 	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
687 
688 	/*
689 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
690 	 * its VOP_BMAP call.
691 	 */
692 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
693 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
694 
695 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
696 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
697 
698 	mp->mnt_maxsymlinklen = 255;
699 	mp->mnt_flag |= MNT_LOCAL;
700 
701 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
702 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
703 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
704 
705 	/*
706 	 * The root volume's ondisk pointer is only valid if we hold a
707 	 * reference to it.
708 	 */
709 	rootvol = hammer_get_root_volume(hmp, &error);
710 	if (error)
711 		goto failed;
712 
713 	/*
714 	 * Perform any necessary UNDO operations.  The recovery code does
715 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
716 	 * and then re-copy it again after recovery is complete.
717 	 *
718 	 * If this is a read-only mount the UNDO information is retained
719 	 * in memory in the form of dirty buffer cache buffers, and not
720 	 * written back to the media.
721 	 */
722 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
723 	      sizeof(hmp->blockmap));
724 
725 	/*
726 	 * Check filesystem version
727 	 */
728 	hmp->version = rootvol->ondisk->vol_version;
729 	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
730 	    hmp->version > HAMMER_VOL_VERSION_MAX) {
731 		kprintf("HAMMER: mount unsupported fs version %d\n",
732 			hmp->version);
733 		error = ERANGE;
734 		goto done;
735 	}
736 
737 	/*
738 	 * The undo_rec_limit limits the size of flush groups to avoid
739 	 * blowing out the UNDO FIFO.  This calculation is typically in
740 	 * the tens of thousands and is designed primarily when small
741 	 * HAMMER filesystems are created.
742 	 */
743 	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
744 	if (hammer_debug_general & 0x0001)
745 		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
746 
747 	/*
748 	 * NOTE: Recover stage1 not only handles meta-data recovery, it
749 	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
750 	 */
751 	error = hammer_recover_stage1(hmp, rootvol);
752 	if (error) {
753 		kprintf("Failed to recover HAMMER filesystem on mount\n");
754 		goto done;
755 	}
756 
757 	/*
758 	 * Finish setup now that we have a good root volume.
759 	 *
760 	 * The top 16 bits of fsid.val[1] is a pfs id.
761 	 */
762 	ksnprintf(mp->mnt_stat.f_mntfromname,
763 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
764 		  rootvol->ondisk->vol_name);
765 	mp->mnt_stat.f_fsid.val[0] =
766 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
767 	mp->mnt_stat.f_fsid.val[1] =
768 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
769 	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
770 
771 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
772 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
773 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
774 
775 	/*
776 	 * Certain often-modified fields in the root volume are cached in
777 	 * the hammer_mount structure so we do not have to generate lots
778 	 * of little UNDO structures for them.
779 	 *
780 	 * Recopy after recovery.  This also has the side effect of
781 	 * setting our cached undo FIFO's first_offset, which serves to
782 	 * placemark the FIFO start for the NEXT flush cycle while the
783 	 * on-disk first_offset represents the LAST flush cycle.
784 	 */
785 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
786 	hmp->flush_tid1 = hmp->next_tid;
787 	hmp->flush_tid2 = hmp->next_tid;
788 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
789 	      sizeof(hmp->blockmap));
790 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
791 
792 	hammer_flusher_create(hmp);
793 
794 	/*
795 	 * Locate the root directory with an obj_id of 1.
796 	 */
797 	error = hammer_vfs_vget(mp, NULL, HAMMER_OBJID_ROOT, &rootvp);
798 	if (error)
799 		goto done;
800 	vput(rootvp);
801 	if (hmp->ronly == 0)
802 		error = hammer_recover_stage2(hmp, rootvol);
803 
804 	/*
805 	 * If the stage2 recovery fails be sure to clean out all cached
806 	 * vnodes before throwing away the mount structure or bad things
807 	 * will happen.
808 	 */
809 	if (error)
810 		vflush(mp, 0, 0);
811 
812 done:
813 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
814 		/* New mount */
815 
816 		/* Populate info for mount point (NULL pad)*/
817 		bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
818 		size_t size;
819 		if (mntpt) {
820 			copyinstr(mntpt, mp->mnt_stat.f_mntonname,
821 							MNAMELEN -1, &size);
822 		} else { /* Root mount */
823 			mp->mnt_stat.f_mntonname[0] = '/';
824 		}
825 	}
826 	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);
827 	hammer_rel_volume(rootvol, 0);
828 failed:
829 	/*
830 	 * Cleanup and return.
831 	 */
832 	if (error) {
833 		/* called with fs_token held */
834 		hammer_free_hmp(mp);
835 	} else {
836 		lwkt_reltoken(&hmp->fs_token);
837 	}
838 	return (error);
839 }
840 
841 static int
842 hammer_vfs_unmount(struct mount *mp, int mntflags)
843 {
844 	hammer_mount_t hmp = (void *)mp->mnt_data;
845 	int flags;
846 	int error;
847 
848 	/*
849 	 * Clean out the vnodes
850 	 */
851 	lwkt_gettoken(&hmp->fs_token);
852 	flags = 0;
853 	if (mntflags & MNT_FORCE)
854 		flags |= FORCECLOSE;
855 	error = vflush(mp, 0, flags);
856 
857 	/*
858 	 * Clean up the internal mount structure and related entities.  This
859 	 * may issue I/O.
860 	 */
861 	if (error == 0) {
862 		/* called with fs_token held */
863 		hammer_free_hmp(mp);
864 	} else {
865 		lwkt_reltoken(&hmp->fs_token);
866 	}
867 	return(error);
868 }
869 
870 /*
871  * Clean up the internal mount structure and disassociate it from the mount.
872  * This may issue I/O.
873  *
874  * Called with fs_token held.
875  */
876 static void
877 hammer_free_hmp(struct mount *mp)
878 {
879 	hammer_mount_t hmp = (void *)mp->mnt_data;
880 	hammer_flush_group_t flg;
881 
882 	/*
883 	 * Flush anything dirty.  This won't even run if the
884 	 * filesystem errored-out.
885 	 */
886 	hammer_flush_dirty(hmp, 30);
887 
888 	/*
889 	 * If the mount had a critical error we have to destroy any
890 	 * remaining inodes before we can finish cleaning up the flusher.
891 	 */
892 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
893 		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
894 			hammer_destroy_inode_callback, NULL);
895 	}
896 
897 	/*
898 	 * There shouldn't be any inodes left now and any left over
899 	 * flush groups should now be empty.
900 	 */
901 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
902 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
903 		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
904 		KKASSERT(RB_EMPTY(&flg->flush_tree));
905 		if (flg->refs) {
906 			kprintf("HAMMER: Warning, flush_group %p was "
907 				"not empty on umount!\n", flg);
908 		}
909 		kfree(flg, hmp->m_misc);
910 	}
911 
912 	/*
913 	 * We can finally destroy the flusher
914 	 */
915 	hammer_flusher_destroy(hmp);
916 
917 	/*
918 	 * We may have held recovered buffers due to a read-only mount.
919 	 * These must be discarded.
920 	 */
921 	if (hmp->ronly)
922 		hammer_recover_flush_buffers(hmp, NULL, -1);
923 
924 	/*
925 	 * Unload buffers and then volumes
926 	 */
927         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
928 		hammer_unload_buffer, NULL);
929 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
930 		hammer_unload_volume, NULL);
931 
932 	mp->mnt_data = NULL;
933 	mp->mnt_flag &= ~MNT_LOCAL;
934 	hmp->mp = NULL;
935 	hammer_destroy_objid_cache(hmp);
936 	hammer_destroy_dedup_cache(hmp);
937 	if (hmp->dedup_free_cache != NULL) {
938 		kfree(hmp->dedup_free_cache, hmp->m_misc);
939 		hmp->dedup_free_cache = NULL;
940 	}
941 	kmalloc_destroy(&hmp->m_misc);
942 	kmalloc_destroy(&hmp->m_inodes);
943 	lwkt_reltoken(&hmp->fs_token);
944 	kfree(hmp, M_HAMMER);
945 }
946 
947 /*
948  * Report critical errors.  ip may be NULL.
949  */
950 void
951 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
952 		      int error, const char *msg)
953 {
954 	hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
955 
956 	krateprintf(&hmp->krate,
957 		    "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
958 		    hmp->mp->mnt_stat.f_mntfromname,
959 		    (intmax_t)(ip ? ip->obj_id : -1),
960 		    error, msg);
961 
962 	if (hmp->ronly == 0) {
963 		hmp->ronly = 2;		/* special errored read-only mode */
964 		hmp->mp->mnt_flag |= MNT_RDONLY;
965 		RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
966 			hammer_adjust_volume_mode, NULL);
967 		kprintf("HAMMER(%s): Forcing read-only mode\n",
968 			hmp->mp->mnt_stat.f_mntfromname);
969 	}
970 	hmp->error = error;
971 	if (hammer_debug_critical)
972 		Debugger("Entering debugger");
973 }
974 
975 
976 /*
977  * Obtain a vnode for the specified inode number.  An exclusively locked
978  * vnode is returned.
979  */
980 int
981 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
982 		ino_t ino, struct vnode **vpp)
983 {
984 	struct hammer_transaction trans;
985 	struct hammer_mount *hmp = (void *)mp->mnt_data;
986 	struct hammer_inode *ip;
987 	int error;
988 	u_int32_t localization;
989 
990 	lwkt_gettoken(&hmp->fs_token);
991 	hammer_simple_transaction(&trans, hmp);
992 
993 	/*
994 	 * If a directory vnode is supplied (mainly NFS) then we can acquire
995 	 * the PFS domain from it.  Otherwise we would only be able to vget
996 	 * inodes in the root PFS.
997 	 */
998 	if (dvp) {
999 		localization = HAMMER_DEF_LOCALIZATION +
1000 				VTOI(dvp)->obj_localization;
1001 	} else {
1002 		localization = HAMMER_DEF_LOCALIZATION;
1003 	}
1004 
1005 	/*
1006 	 * Lookup the requested HAMMER inode.  The structure must be
1007 	 * left unlocked while we manipulate the related vnode to avoid
1008 	 * a deadlock.
1009 	 */
1010 	ip = hammer_get_inode(&trans, NULL, ino,
1011 			      hmp->asof, localization,
1012 			      0, &error);
1013 	if (ip == NULL) {
1014 		*vpp = NULL;
1015 	} else {
1016 		error = hammer_get_vnode(ip, vpp);
1017 		hammer_rel_inode(ip, 0);
1018 	}
1019 	hammer_done_transaction(&trans);
1020 	lwkt_reltoken(&hmp->fs_token);
1021 	return (error);
1022 }
1023 
1024 /*
1025  * Return the root vnode for the filesystem.
1026  *
1027  * HAMMER stores the root vnode in the hammer_mount structure so
1028  * getting it is easy.
1029  */
1030 static int
1031 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
1032 {
1033 	int error;
1034 
1035 	error = hammer_vfs_vget(mp, NULL, HAMMER_OBJID_ROOT, vpp);
1036 	return (error);
1037 }
1038 
1039 static int
1040 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1041 {
1042 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1043 	hammer_volume_t volume;
1044 	hammer_volume_ondisk_t ondisk;
1045 	int error;
1046 	int64_t bfree;
1047 	int64_t breserved;
1048 
1049 	lwkt_gettoken(&hmp->fs_token);
1050 	volume = hammer_get_root_volume(hmp, &error);
1051 	if (error) {
1052 		lwkt_reltoken(&hmp->fs_token);
1053 		return(error);
1054 	}
1055 	ondisk = volume->ondisk;
1056 
1057 	/*
1058 	 * Basic stats
1059 	 */
1060 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1061 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
1062 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1063 	hammer_rel_volume(volume, 0);
1064 
1065 	mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1066 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
1067 	if (mp->mnt_stat.f_files < 0)
1068 		mp->mnt_stat.f_files = 0;
1069 
1070 	*sbp = mp->mnt_stat;
1071 	lwkt_reltoken(&hmp->fs_token);
1072 	return(0);
1073 }
1074 
1075 static int
1076 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1077 {
1078 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1079 	hammer_volume_t volume;
1080 	hammer_volume_ondisk_t ondisk;
1081 	int error;
1082 	int64_t bfree;
1083 	int64_t breserved;
1084 
1085 	lwkt_gettoken(&hmp->fs_token);
1086 	volume = hammer_get_root_volume(hmp, &error);
1087 	if (error) {
1088 		lwkt_reltoken(&hmp->fs_token);
1089 		return(error);
1090 	}
1091 	ondisk = volume->ondisk;
1092 
1093 	/*
1094 	 * Basic stats
1095 	 */
1096 	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
1097 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
1098 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
1099 	hammer_rel_volume(volume, 0);
1100 
1101 	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
1102 	mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
1103 	if (mp->mnt_vstat.f_files < 0)
1104 		mp->mnt_vstat.f_files = 0;
1105 	*sbp = mp->mnt_vstat;
1106 	lwkt_reltoken(&hmp->fs_token);
1107 	return(0);
1108 }
1109 
1110 /*
1111  * Sync the filesystem.  Currently we have to run it twice, the second
1112  * one will advance the undo start index to the end index, so if a crash
1113  * occurs no undos will be run on mount.
1114  *
1115  * We do not sync the filesystem if we are called from a panic.  If we did
1116  * we might end up blowing up a sync that was already in progress.
1117  */
1118 static int
1119 hammer_vfs_sync(struct mount *mp, int waitfor)
1120 {
1121 	struct hammer_mount *hmp = (void *)mp->mnt_data;
1122 	int error;
1123 
1124 	lwkt_gettoken(&hmp->fs_token);
1125 	if (panicstr == NULL) {
1126 		error = hammer_sync_hmp(hmp, waitfor);
1127 	} else {
1128 		error = EIO;
1129 	}
1130 	lwkt_reltoken(&hmp->fs_token);
1131 	return (error);
1132 }
1133 
1134 /*
1135  * Convert a vnode to a file handle.
1136  *
1137  * Accesses read-only fields on already-referenced structures so
1138  * no token is needed.
1139  */
1140 static int
1141 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1142 {
1143 	hammer_inode_t ip;
1144 
1145 	KKASSERT(MAXFIDSZ >= 16);
1146 	ip = VTOI(vp);
1147 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
1148 	fhp->fid_ext = ip->obj_localization >> 16;
1149 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1150 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1151 	return(0);
1152 }
1153 
1154 
1155 /*
1156  * Convert a file handle back to a vnode.
1157  *
1158  * Use rootvp to enforce PFS isolation when a PFS is exported via a
1159  * null mount.
1160  */
1161 static int
1162 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1163 		  struct fid *fhp, struct vnode **vpp)
1164 {
1165 	hammer_mount_t hmp = (void *)mp->mnt_data;
1166 	struct hammer_transaction trans;
1167 	struct hammer_inode *ip;
1168 	struct hammer_inode_info info;
1169 	int error;
1170 	u_int32_t localization;
1171 
1172 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1173 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1174 	if (rootvp)
1175 		localization = VTOI(rootvp)->obj_localization;
1176 	else
1177 		localization = (u_int32_t)fhp->fid_ext << 16;
1178 
1179 	lwkt_gettoken(&hmp->fs_token);
1180 	hammer_simple_transaction(&trans, hmp);
1181 
1182 	/*
1183 	 * Get/allocate the hammer_inode structure.  The structure must be
1184 	 * unlocked while we manipulate the related vnode to avoid a
1185 	 * deadlock.
1186 	 */
1187 	ip = hammer_get_inode(&trans, NULL, info.obj_id,
1188 			      info.obj_asof, localization, 0, &error);
1189 	if (ip) {
1190 		error = hammer_get_vnode(ip, vpp);
1191 		hammer_rel_inode(ip, 0);
1192 	} else {
1193 		*vpp = NULL;
1194 	}
1195 	hammer_done_transaction(&trans);
1196 	lwkt_reltoken(&hmp->fs_token);
1197 	return (error);
1198 }
1199 
1200 static int
1201 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1202 		    int *exflagsp, struct ucred **credanonp)
1203 {
1204 	hammer_mount_t hmp = (void *)mp->mnt_data;
1205 	struct netcred *np;
1206 	int error;
1207 
1208 	lwkt_gettoken(&hmp->fs_token);
1209 	np = vfs_export_lookup(mp, &hmp->export, nam);
1210 	if (np) {
1211 		*exflagsp = np->netc_exflags;
1212 		*credanonp = &np->netc_anon;
1213 		error = 0;
1214 	} else {
1215 		error = EACCES;
1216 	}
1217 	lwkt_reltoken(&hmp->fs_token);
1218 	return (error);
1219 
1220 }
1221 
1222 int
1223 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1224 {
1225 	hammer_mount_t hmp = (void *)mp->mnt_data;
1226 	int error;
1227 
1228 	lwkt_gettoken(&hmp->fs_token);
1229 
1230 	switch(op) {
1231 	case MOUNTCTL_SET_EXPORT:
1232 		error = vfs_export(mp, &hmp->export, export);
1233 		break;
1234 	default:
1235 		error = EOPNOTSUPP;
1236 		break;
1237 	}
1238 	lwkt_reltoken(&hmp->fs_token);
1239 
1240 	return(error);
1241 }
1242 
1243