xref: /dflybsd-src/sys/vfs/hammer/hammer_vfsops.c (revision d99d6bf5b82b5a1d0f53e04ed58c2854c738ff4c)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.46 2008/06/12 00:16:10 dillon Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
46 #include <sys/buf.h>
47 #include <sys/buf2.h>
48 #include "hammer.h"
49 
50 int hammer_debug_io;
51 int hammer_debug_general;
52 int hammer_debug_debug;
53 int hammer_debug_inode;
54 int hammer_debug_locks;
55 int hammer_debug_btree;
56 int hammer_debug_tid;
57 int hammer_debug_recover;		/* -1 will disable, +1 will force */
58 int hammer_debug_recover_faults;
59 int hammer_debug_write_release;		/* if 1 release buffer on strategy */
60 int hammer_debug_cluster_enable = 1;	/* enable read clustering by default */
61 int hammer_count_inodes;
62 int hammer_count_iqueued;
63 int hammer_count_reclaiming;
64 int hammer_count_records;
65 int hammer_count_record_datas;
66 int hammer_count_volumes;
67 int hammer_count_buffers;
68 int hammer_count_nodes;
69 int hammer_count_dirtybufs;		/* global */
70 int hammer_count_refedbufs;		/* global */
71 int hammer_count_reservations;
72 int hammer_count_io_running_read;
73 int hammer_count_io_running_write;
74 int hammer_count_io_locked;
75 int hammer_stats_btree_iterations;
76 int hammer_stats_record_iterations;
77 int hammer_limit_dirtybufs;		/* per-mount */
78 int hammer_limit_irecs;			/* per-inode */
79 int hammer_limit_recs;			/* as a whole XXX */
80 int hammer_limit_iqueued;		/* per-mount */
81 int hammer_bio_count;
82 int64_t hammer_contention_count;
83 int64_t hammer_zone_limit;
84 
85 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
86 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
87 	   &hammer_debug_general, 0, "");
88 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
89 	   &hammer_debug_io, 0, "");
90 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
91 	   &hammer_debug_debug, 0, "");
92 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
93 	   &hammer_debug_inode, 0, "");
94 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
95 	   &hammer_debug_locks, 0, "");
96 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
97 	   &hammer_debug_btree, 0, "");
98 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
99 	   &hammer_debug_tid, 0, "");
100 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
101 	   &hammer_debug_recover, 0, "");
102 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
103 	   &hammer_debug_recover_faults, 0, "");
104 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_write_release, CTLFLAG_RW,
105 	   &hammer_debug_write_release, 0, "");
106 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_cluster_enable, CTLFLAG_RW,
107 	   &hammer_debug_cluster_enable, 0, "");
108 
109 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufs, CTLFLAG_RW,
110 	   &hammer_limit_dirtybufs, 0, "");
111 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_irecs, CTLFLAG_RW,
112 	   &hammer_limit_irecs, 0, "");
113 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
114 	   &hammer_limit_recs, 0, "");
115 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_iqueued, CTLFLAG_RW,
116 	   &hammer_limit_iqueued, 0, "");
117 
118 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
119 	   &hammer_count_inodes, 0, "");
120 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
121 	   &hammer_count_iqueued, 0, "");
122 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
123 	   &hammer_count_reclaiming, 0, "");
124 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
125 	   &hammer_count_records, 0, "");
126 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
127 	   &hammer_count_record_datas, 0, "");
128 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
129 	   &hammer_count_volumes, 0, "");
130 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
131 	   &hammer_count_buffers, 0, "");
132 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
133 	   &hammer_count_nodes, 0, "");
134 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufs, CTLFLAG_RD,
135 	   &hammer_count_dirtybufs, 0, "");
136 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
137 	   &hammer_count_refedbufs, 0, "");
138 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
139 	   &hammer_count_reservations, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
141 	   &hammer_count_io_running_read, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
143 	   &hammer_count_io_locked, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
145 	   &hammer_count_io_running_write, 0, "");
146 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
147 	   &hammer_zone_limit, 0, "");
148 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
149 	   &hammer_contention_count, 0, "");
150 
151 /*
152  * VFS ABI
153  */
154 static void	hammer_free_hmp(struct mount *mp);
155 
156 static int	hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
157 				struct ucred *cred);
158 static int	hammer_vfs_unmount(struct mount *mp, int mntflags);
159 static int	hammer_vfs_root(struct mount *mp, struct vnode **vpp);
160 static int	hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
161 				struct ucred *cred);
162 static int	hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
163 				struct ucred *cred);
164 static int	hammer_vfs_sync(struct mount *mp, int waitfor);
165 static int	hammer_vfs_vget(struct mount *mp, ino_t ino,
166 				struct vnode **vpp);
167 static int	hammer_vfs_init(struct vfsconf *conf);
168 static int	hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp,
169 				struct vnode **vpp);
170 static int	hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
171 static int	hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
172 				int *exflagsp, struct ucred **credanonp);
173 
174 
175 static struct vfsops hammer_vfsops = {
176 	.vfs_mount	= hammer_vfs_mount,
177 	.vfs_unmount	= hammer_vfs_unmount,
178 	.vfs_root 	= hammer_vfs_root,
179 	.vfs_statfs	= hammer_vfs_statfs,
180 	.vfs_statvfs	= hammer_vfs_statvfs,
181 	.vfs_sync	= hammer_vfs_sync,
182 	.vfs_vget	= hammer_vfs_vget,
183 	.vfs_init	= hammer_vfs_init,
184 	.vfs_vptofh	= hammer_vfs_vptofh,
185 	.vfs_fhtovp	= hammer_vfs_fhtovp,
186 	.vfs_checkexp	= hammer_vfs_checkexp
187 };
188 
189 MALLOC_DEFINE(M_HAMMER, "hammer-mount", "hammer mount");
190 
191 VFS_SET(hammer_vfsops, hammer, 0);
192 MODULE_VERSION(hammer, 1);
193 
194 static int
195 hammer_vfs_init(struct vfsconf *conf)
196 {
197 	if (hammer_limit_irecs == 0)
198 		hammer_limit_irecs = nbuf * 8;
199 	if (hammer_limit_recs == 0)		/* XXX TODO */
200 		hammer_limit_recs = nbuf * 25;
201 	if (hammer_limit_dirtybufs == 0) {
202 		hammer_limit_dirtybufs = hidirtybuffers / 2;
203 		if (hammer_limit_dirtybufs < 100)
204 			hammer_limit_dirtybufs = 100;
205 	}
206 	if (hammer_limit_iqueued == 0)
207 		hammer_limit_iqueued = desiredvnodes / 5;
208 	return(0);
209 }
210 
211 static int
212 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
213 		 struct ucred *cred)
214 {
215 	struct hammer_mount_info info;
216 	hammer_mount_t hmp;
217 	hammer_volume_t rootvol;
218 	struct vnode *rootvp;
219 	const char *upath;	/* volume name in userspace */
220 	char *path;		/* volume name in system space */
221 	int error;
222 	int i;
223 
224 	if ((error = copyin(data, &info, sizeof(info))) != 0)
225 		return (error);
226 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
227 		if (info.nvolumes <= 0 || info.nvolumes >= 32768)
228 			return (EINVAL);
229 	}
230 
231 	/*
232 	 * Interal mount data structure
233 	 */
234 	if (mp->mnt_flag & MNT_UPDATE) {
235 		hmp = (void *)mp->mnt_data;
236 		KKASSERT(hmp != NULL);
237 	} else {
238 		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
239 		mp->mnt_data = (qaddr_t)hmp;
240 		hmp->mp = mp;
241 		hmp->zbuf = kmalloc(HAMMER_BUFSIZE, M_HAMMER, M_WAITOK|M_ZERO);
242 		hmp->namekey_iterator = mycpu->gd_time_seconds;
243 		/*TAILQ_INIT(&hmp->recycle_list);*/
244 
245 		hmp->root_btree_beg.localization = HAMMER_MIN_LOCALIZATION;
246 		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
247 		hmp->root_btree_beg.key = -0x8000000000000000LL;
248 		hmp->root_btree_beg.create_tid = 1;
249 		hmp->root_btree_beg.delete_tid = 1;
250 		hmp->root_btree_beg.rec_type = 0;
251 		hmp->root_btree_beg.obj_type = 0;
252 
253 		hmp->root_btree_end.localization = HAMMER_MAX_LOCALIZATION;
254 		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
255 		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
256 		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
257 		hmp->root_btree_end.delete_tid = 0;   /* special case */
258 		hmp->root_btree_end.rec_type = 0xFFFFU;
259 		hmp->root_btree_end.obj_type = 0;
260 
261 		hmp->sync_lock.refs = 1;
262 		hmp->free_lock.refs = 1;
263 		hmp->undo_lock.refs = 1;
264 		hmp->blkmap_lock.refs = 1;
265 
266 		TAILQ_INIT(&hmp->flush_list);
267 		TAILQ_INIT(&hmp->delay_list);
268 		TAILQ_INIT(&hmp->objid_cache_list);
269 		TAILQ_INIT(&hmp->undo_lru_list);
270 
271 		/*
272 		 * Set default zone limits.  This value can be reduced
273 		 * further by the zone limit specified in the root volume.
274 		 *
275 		 * The sysctl can force a small zone limit for debugging
276 		 * purposes.
277 		 */
278 		for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
279 			hmp->zone_limits[i] =
280 				HAMMER_ZONE_ENCODE(i, HAMMER_ZONE_LIMIT);
281 
282 			if (hammer_zone_limit) {
283 				hmp->zone_limits[i] =
284 				    HAMMER_ZONE_ENCODE(i, hammer_zone_limit);
285 			}
286 			hammer_init_holes(hmp, &hmp->holes[i]);
287 		}
288 	}
289 	hmp->hflags &= ~HMNT_USERFLAGS;
290 	hmp->hflags |= info.hflags & HMNT_USERFLAGS;
291 	if (info.asof) {
292 		kprintf("ASOF\n");
293 		mp->mnt_flag |= MNT_RDONLY;
294 		hmp->asof = info.asof;
295 	} else {
296 		hmp->asof = HAMMER_MAX_TID;
297 	}
298 
299 	/*
300 	 * Re-open read-write if originally read-only, or vise-versa.
301 	 */
302 	if (mp->mnt_flag & MNT_UPDATE) {
303 		error = 0;
304 		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
305 			kprintf("HAMMER read-only -> read-write\n");
306 			hmp->ronly = 0;
307 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
308 				hammer_adjust_volume_mode, NULL);
309 			rootvol = hammer_get_root_volume(hmp, &error);
310 			if (rootvol) {
311 				hammer_recover_flush_buffers(hmp, rootvol);
312 				bcopy(rootvol->ondisk->vol0_blockmap,
313 				      hmp->blockmap,
314 				      sizeof(hmp->blockmap));
315 				hammer_rel_volume(rootvol, 0);
316 			}
317 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
318 				hammer_reload_inode, NULL);
319 			/* kernel clears MNT_RDONLY */
320 		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
321 			kprintf("HAMMER read-write -> read-only\n");
322 			hmp->ronly = 1;	/* messy */
323 			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
324 				hammer_reload_inode, NULL);
325 			hmp->ronly = 0;
326 			hammer_flusher_sync(hmp);
327 			hammer_flusher_sync(hmp);
328 			hammer_flusher_sync(hmp);
329 			hmp->ronly = 1;
330 			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
331 				hammer_adjust_volume_mode, NULL);
332 		}
333 		return(error);
334 	}
335 
336 	RB_INIT(&hmp->rb_vols_root);
337 	RB_INIT(&hmp->rb_inos_root);
338 	RB_INIT(&hmp->rb_nods_root);
339 	RB_INIT(&hmp->rb_undo_root);
340 	RB_INIT(&hmp->rb_resv_root);
341 	RB_INIT(&hmp->rb_bufs_root);
342 
343 	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
344 
345 	TAILQ_INIT(&hmp->volu_list);
346 	TAILQ_INIT(&hmp->undo_list);
347 	TAILQ_INIT(&hmp->data_list);
348 	TAILQ_INIT(&hmp->meta_list);
349 	TAILQ_INIT(&hmp->lose_list);
350 
351 	/*
352 	 * Load volumes
353 	 */
354 	path = objcache_get(namei_oc, M_WAITOK);
355 	hmp->nvolumes = info.nvolumes;
356 	for (i = 0; i < info.nvolumes; ++i) {
357 		error = copyin(&info.volumes[i], &upath, sizeof(char *));
358 		if (error == 0)
359 			error = copyinstr(upath, path, MAXPATHLEN, NULL);
360 		if (error == 0)
361 			error = hammer_install_volume(hmp, path);
362 		if (error)
363 			break;
364 	}
365 	objcache_put(namei_oc, path);
366 
367 	/*
368 	 * Make sure we found a root volume
369 	 */
370 	if (error == 0 && hmp->rootvol == NULL) {
371 		kprintf("hammer_mount: No root volume found!\n");
372 		error = EINVAL;
373 	}
374 	if (error) {
375 		hammer_free_hmp(mp);
376 		return (error);
377 	}
378 
379 	/*
380 	 * No errors, setup enough of the mount point so we can lookup the
381 	 * root vnode.
382 	 */
383 	mp->mnt_iosize_max = MAXPHYS;
384 	mp->mnt_kern_flag |= MNTK_FSMID;
385 
386 	/*
387 	 * note: f_iosize is used by vnode_pager_haspage() when constructing
388 	 * its VOP_BMAP call.
389 	 */
390 	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
391 	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
392 
393 	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
394 	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
395 
396 	mp->mnt_maxsymlinklen = 255;
397 	mp->mnt_flag |= MNT_LOCAL;
398 
399 	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
400 	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
401 	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
402 
403 	/*
404 	 * The root volume's ondisk pointer is only valid if we hold a
405 	 * reference to it.
406 	 */
407 	rootvol = hammer_get_root_volume(hmp, &error);
408 	if (error)
409 		goto failed;
410 
411 	/*
412 	 * Perform any necessary UNDO operations.  The recovery code does
413 	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
414 	 * and then re-copy it again after recovery is complete.
415 	 *
416 	 * If this is a read-only mount the UNDO information is retained
417 	 * in memory in the form of dirty buffer cache buffers, and not
418 	 * written back to the media.
419 	 */
420 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
421 	      sizeof(hmp->blockmap));
422 
423 	error = hammer_recover(hmp, rootvol);
424 	if (error) {
425 		kprintf("Failed to recover HAMMER filesystem on mount\n");
426 		goto done;
427 	}
428 
429 	/*
430 	 * Finish setup now that we have a good root volume
431 	 */
432 	ksnprintf(mp->mnt_stat.f_mntfromname,
433 		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
434 		  rootvol->ondisk->vol_name);
435 	mp->mnt_stat.f_fsid.val[0] =
436 		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
437 	mp->mnt_stat.f_fsid.val[1] =
438 		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
439 
440 	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
441 	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
442 				     sizeof(mp->mnt_vstat.f_fsid_uuid));
443 
444 	/*
445 	 * Certain often-modified fields in the root volume are cached in
446 	 * the hammer_mount structure so we do not have to generate lots
447 	 * of little UNDO structures for them.
448 	 *
449 	 * Recopy after recovery.  This also has the side effect of
450 	 * setting our cached undo FIFO's first_offset, which serves to
451 	 * placemark the FIFO start for the NEXT flush cycle while the
452 	 * on-disk first_offset represents the LAST flush cycle.
453 	 */
454 	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
455 	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
456 	      sizeof(hmp->blockmap));
457 	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
458 
459 	/*
460 	 * Use the zone limit set by newfs_hammer, or the zone limit set by
461 	 * sysctl (for debugging), whichever is smaller.
462 	 */
463 	if (rootvol->ondisk->vol0_zone_limit) {
464 		hammer_off_t vol0_zone_limit;
465 
466 		vol0_zone_limit = rootvol->ondisk->vol0_zone_limit;
467 		for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
468 			if (hmp->zone_limits[i] > vol0_zone_limit)
469 				hmp->zone_limits[i] = vol0_zone_limit;
470 		}
471 	}
472 
473 	hammer_flusher_create(hmp);
474 
475 	/*
476 	 * Locate the root directory using the root cluster's B-Tree as a
477 	 * starting point.  The root directory uses an obj_id of 1.
478 	 *
479 	 * FUTURE: Leave the root directory cached referenced but unlocked
480 	 * in hmp->rootvp (need to flush it on unmount).
481 	 */
482 	error = hammer_vfs_vget(mp, 1, &rootvp);
483 	if (error)
484 		goto done;
485 	vput(rootvp);
486 	/*vn_unlock(hmp->rootvp);*/
487 
488 done:
489 	hammer_rel_volume(rootvol, 0);
490 failed:
491 	/*
492 	 * Cleanup and return.
493 	 */
494 	if (error)
495 		hammer_free_hmp(mp);
496 	return (error);
497 }
498 
499 static int
500 hammer_vfs_unmount(struct mount *mp, int mntflags)
501 {
502 #if 0
503 	struct hammer_mount *hmp = (void *)mp->mnt_data;
504 #endif
505 	int flags;
506 	int error;
507 
508 	/*
509 	 * Clean out the vnodes
510 	 */
511 	flags = 0;
512 	if (mntflags & MNT_FORCE)
513 		flags |= FORCECLOSE;
514 	if ((error = vflush(mp, 0, flags)) != 0)
515 		return (error);
516 
517 	/*
518 	 * Clean up the internal mount structure and related entities.  This
519 	 * may issue I/O.
520 	 */
521 	hammer_free_hmp(mp);
522 	return(0);
523 }
524 
525 /*
526  * Clean up the internal mount structure and disassociate it from the mount.
527  * This may issue I/O.
528  */
529 static void
530 hammer_free_hmp(struct mount *mp)
531 {
532 	struct hammer_mount *hmp = (void *)mp->mnt_data;
533 	int i;
534 
535 #if 0
536 	/*
537 	 * Clean up the root vnode
538 	 */
539 	if (hmp->rootvp) {
540 		vrele(hmp->rootvp);
541 		hmp->rootvp = NULL;
542 	}
543 #endif
544 	kprintf("X1");
545 	hammer_flusher_sync(hmp);
546 	kprintf("X2");
547 	hammer_flusher_sync(hmp);
548 	kprintf("X3");
549 	hammer_flusher_destroy(hmp);
550 	kprintf("X4");
551 
552 	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
553 
554 #if 0
555 	/*
556 	 * Unload & flush inodes
557 	 *
558 	 * XXX illegal to call this from here, it can only be done from
559 	 * the flusher.
560 	 */
561 	RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
562 		hammer_unload_inode, (void *)MNT_WAIT);
563 
564 	/*
565 	 * Unload & flush volumes
566 	 */
567 #endif
568 	/*
569 	 * Unload buffers and then volumes
570 	 */
571         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
572 		hammer_unload_buffer, NULL);
573 	kprintf("X5");
574 	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
575 		hammer_unload_volume, NULL);
576 	kprintf("X6");
577 
578 	mp->mnt_data = NULL;
579 	mp->mnt_flag &= ~MNT_LOCAL;
580 	hmp->mp = NULL;
581 	kprintf("X7");
582 	hammer_destroy_objid_cache(hmp);
583 	kprintf("X8");
584 	kfree(hmp->zbuf, M_HAMMER);
585 	kprintf("X9");
586 	kprintf("X10");
587 
588 	for (i = 0; i < HAMMER_MAX_ZONES; ++i)
589 		hammer_free_holes(hmp, &hmp->holes[i]);
590 	kprintf("X11");
591 
592 	kfree(hmp, M_HAMMER);
593 	kprintf("X12");
594 }
595 
596 /*
597  * Obtain a vnode for the specified inode number.  An exclusively locked
598  * vnode is returned.
599  */
600 int
601 hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
602 {
603 	struct hammer_transaction trans;
604 	struct hammer_mount *hmp = (void *)mp->mnt_data;
605 	struct hammer_inode *ip;
606 	int error;
607 
608 	hammer_simple_transaction(&trans, hmp);
609 
610 	/*
611 	 * Lookup the requested HAMMER inode.  The structure must be
612 	 * left unlocked while we manipulate the related vnode to avoid
613 	 * a deadlock.
614 	 */
615 	ip = hammer_get_inode(&trans, NULL, ino, hmp->asof, 0, &error);
616 	if (ip == NULL) {
617 		*vpp = NULL;
618 		return(error);
619 	}
620 	error = hammer_get_vnode(ip, vpp);
621 	hammer_rel_inode(ip, 0);
622 	hammer_done_transaction(&trans);
623 	return (error);
624 }
625 
626 /*
627  * Return the root vnode for the filesystem.
628  *
629  * HAMMER stores the root vnode in the hammer_mount structure so
630  * getting it is easy.
631  */
632 static int
633 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
634 {
635 #if 0
636 	struct hammer_mount *hmp = (void *)mp->mnt_data;
637 #endif
638 	int error;
639 
640 	error = hammer_vfs_vget(mp, 1, vpp);
641 	return (error);
642 }
643 
644 static int
645 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
646 {
647 	struct hammer_mount *hmp = (void *)mp->mnt_data;
648 	hammer_volume_t volume;
649 	hammer_volume_ondisk_t ondisk;
650 	int error;
651 	int64_t bfree;
652 
653 	volume = hammer_get_root_volume(hmp, &error);
654 	if (error)
655 		return(error);
656 	ondisk = volume->ondisk;
657 
658 	/*
659 	 * Basic stats
660 	 */
661 	mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
662 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
663 	hammer_rel_volume(volume, 0);
664 
665 	mp->mnt_stat.f_bfree = bfree / HAMMER_BUFSIZE;
666 	mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
667 	if (mp->mnt_stat.f_files < 0)
668 		mp->mnt_stat.f_files = 0;
669 
670 	*sbp = mp->mnt_stat;
671 	return(0);
672 }
673 
674 static int
675 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
676 {
677 	struct hammer_mount *hmp = (void *)mp->mnt_data;
678 	hammer_volume_t volume;
679 	hammer_volume_ondisk_t ondisk;
680 	int error;
681 	int64_t bfree;
682 
683 	volume = hammer_get_root_volume(hmp, &error);
684 	if (error)
685 		return(error);
686 	ondisk = volume->ondisk;
687 
688 	/*
689 	 * Basic stats
690 	 */
691 	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
692 	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
693 	hammer_rel_volume(volume, 0);
694 
695 	mp->mnt_vstat.f_bfree = bfree / HAMMER_BUFSIZE;
696 	mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree;
697 	if (mp->mnt_vstat.f_files < 0)
698 		mp->mnt_vstat.f_files = 0;
699 	*sbp = mp->mnt_vstat;
700 	return(0);
701 }
702 
703 /*
704  * Sync the filesystem.  Currently we have to run it twice, the second
705  * one will advance the undo start index to the end index, so if a crash
706  * occurs no undos will be run on mount.
707  *
708  * We do not sync the filesystem if we are called from a panic.  If we did
709  * we might end up blowing up a sync that was already in progress.
710  */
711 static int
712 hammer_vfs_sync(struct mount *mp, int waitfor)
713 {
714 	struct hammer_mount *hmp = (void *)mp->mnt_data;
715 	int error;
716 
717 	if (panicstr == NULL) {
718 		error = hammer_sync_hmp(hmp, waitfor);
719 		if (error == 0)
720 			error = hammer_sync_hmp(hmp, waitfor);
721 	} else {
722 		error = EIO;
723 	}
724 	return (error);
725 }
726 
727 /*
728  * Convert a vnode to a file handle.
729  */
730 static int
731 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
732 {
733 	hammer_inode_t ip;
734 
735 	KKASSERT(MAXFIDSZ >= 16);
736 	ip = VTOI(vp);
737 	fhp->fid_len = offsetof(struct fid, fid_data[16]);
738 	fhp->fid_reserved = 0;
739 	bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
740 	bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
741 	return(0);
742 }
743 
744 
745 /*
746  * Convert a file handle back to a vnode.
747  */
748 static int
749 hammer_vfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
750 {
751 	struct hammer_transaction trans;
752 	struct hammer_inode *ip;
753 	struct hammer_inode_info info;
754 	int error;
755 
756 	bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
757 	bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
758 
759 	hammer_simple_transaction(&trans, (void *)mp->mnt_data);
760 
761 	/*
762 	 * Get/allocate the hammer_inode structure.  The structure must be
763 	 * unlocked while we manipulate the related vnode to avoid a
764 	 * deadlock.
765 	 */
766 	ip = hammer_get_inode(&trans, NULL, info.obj_id, info.obj_asof,
767 			      0, &error);
768 	if (ip == NULL) {
769 		*vpp = NULL;
770 		return(error);
771 	}
772 	error = hammer_get_vnode(ip, vpp);
773 	hammer_rel_inode(ip, 0);
774 	hammer_done_transaction(&trans);
775 	return (error);
776 }
777 
778 static int
779 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
780 		    int *exflagsp, struct ucred **credanonp)
781 {
782 	hammer_mount_t hmp = (void *)mp->mnt_data;
783 	struct netcred *np;
784 	int error;
785 
786 	np = vfs_export_lookup(mp, &hmp->export, nam);
787 	if (np) {
788 		*exflagsp = np->netc_exflags;
789 		*credanonp = &np->netc_anon;
790 		error = 0;
791 	} else {
792 		error = EACCES;
793 	}
794 	return (error);
795 
796 }
797 
798 int
799 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
800 {
801 	hammer_mount_t hmp = (void *)mp->mnt_data;
802 	int error;
803 
804 	switch(op) {
805 	case MOUNTCTL_SET_EXPORT:
806 		error = vfs_export(mp, &hmp->export, export);
807 		break;
808 	default:
809 		error = EOPNOTSUPP;
810 		break;
811 	}
812 	return(error);
813 }
814 
815