1 /*- 2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/nlookup.h> 38 #include <sys/vnode.h> 39 #include <sys/mount.h> 40 #include <sys/fcntl.h> 41 #include <sys/buf.h> 42 #include <sys/uuid.h> 43 #include <sys/vfsops.h> 44 #include <sys/sysctl.h> 45 #include <sys/socket.h> 46 47 #include "hammer2.h" 48 #include "hammer2_disk.h" 49 #include "hammer2_mount.h" 50 51 #define REPORT_REFS_ERRORS 1 /* XXX remove me */ 52 53 struct hammer2_sync_info { 54 hammer2_trans_t trans; 55 int error; 56 int waitfor; 57 }; 58 59 TAILQ_HEAD(hammer2_mntlist, hammer2_mount); 60 static struct hammer2_mntlist hammer2_mntlist; 61 static struct lock hammer2_mntlk; 62 63 int hammer2_debug; 64 int hammer2_cluster_enable = 1; 65 int hammer2_hardlink_enable = 1; 66 long hammer2_iod_file_read; 67 long hammer2_iod_meta_read; 68 long hammer2_iod_indr_read; 69 long hammer2_iod_fmap_read; 70 long hammer2_iod_volu_read; 71 long hammer2_iod_file_write; 72 long hammer2_iod_meta_write; 73 long hammer2_iod_indr_write; 74 long hammer2_iod_fmap_write; 75 long hammer2_iod_volu_write; 76 long hammer2_ioa_file_read; 77 long hammer2_ioa_meta_read; 78 long hammer2_ioa_indr_read; 79 long hammer2_ioa_fmap_read; 80 long hammer2_ioa_volu_read; 81 long hammer2_ioa_fmap_write; 82 long hammer2_ioa_file_write; 83 long hammer2_ioa_meta_write; 84 long hammer2_ioa_indr_write; 85 long hammer2_ioa_volu_write; 86 87 SYSCTL_NODE(_vfs, OID_AUTO, hammer2, CTLFLAG_RW, 0, "HAMMER2 filesystem"); 88 89 SYSCTL_INT(_vfs_hammer2, OID_AUTO, debug, CTLFLAG_RW, 90 &hammer2_debug, 0, ""); 91 SYSCTL_INT(_vfs_hammer2, OID_AUTO, cluster_enable, CTLFLAG_RW, 92 &hammer2_cluster_enable, 0, ""); 93 SYSCTL_INT(_vfs_hammer2, OID_AUTO, hardlink_enable, CTLFLAG_RW, 94 &hammer2_hardlink_enable, 0, ""); 95 96 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_read, CTLFLAG_RW, 97 &hammer2_iod_file_read, 0, ""); 98 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_read, CTLFLAG_RW, 99 &hammer2_iod_meta_read, 0, ""); 100 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_read, CTLFLAG_RW, 101 &hammer2_iod_indr_read, 0, ""); 102 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_read, CTLFLAG_RW, 103 &hammer2_iod_fmap_read, 0, ""); 104 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_read, CTLFLAG_RW, 105 &hammer2_iod_volu_read, 0, ""); 106 107 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_file_write, CTLFLAG_RW, 108 &hammer2_iod_file_write, 0, ""); 109 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_meta_write, CTLFLAG_RW, 110 &hammer2_iod_meta_write, 0, ""); 111 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_indr_write, CTLFLAG_RW, 112 &hammer2_iod_indr_write, 0, ""); 113 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_fmap_write, CTLFLAG_RW, 114 &hammer2_iod_fmap_write, 0, ""); 115 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, iod_volu_write, CTLFLAG_RW, 116 &hammer2_iod_volu_write, 0, ""); 117 118 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_read, CTLFLAG_RW, 119 &hammer2_ioa_file_read, 0, ""); 120 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_read, CTLFLAG_RW, 121 &hammer2_ioa_meta_read, 0, ""); 122 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_read, CTLFLAG_RW, 123 &hammer2_ioa_indr_read, 0, ""); 124 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_read, CTLFLAG_RW, 125 &hammer2_ioa_fmap_read, 0, ""); 126 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_read, CTLFLAG_RW, 127 &hammer2_ioa_volu_read, 0, ""); 128 129 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_file_write, CTLFLAG_RW, 130 &hammer2_ioa_file_write, 0, ""); 131 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_meta_write, CTLFLAG_RW, 132 &hammer2_ioa_meta_write, 0, ""); 133 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_indr_write, CTLFLAG_RW, 134 &hammer2_ioa_indr_write, 0, ""); 135 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_fmap_write, CTLFLAG_RW, 136 &hammer2_ioa_fmap_write, 0, ""); 137 SYSCTL_LONG(_vfs_hammer2, OID_AUTO, ioa_volu_write, CTLFLAG_RW, 138 &hammer2_ioa_volu_write, 0, ""); 139 140 static int hammer2_vfs_init(struct vfsconf *conf); 141 static int hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 142 struct ucred *cred); 143 static int hammer2_remount(struct mount *, char *, struct vnode *, 144 struct ucred *); 145 static int hammer2_vfs_unmount(struct mount *mp, int mntflags); 146 static int hammer2_vfs_root(struct mount *mp, struct vnode **vpp); 147 static int hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, 148 struct ucred *cred); 149 static int hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, 150 struct ucred *cred); 151 static int hammer2_vfs_sync(struct mount *mp, int waitfor); 152 static int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 153 ino_t ino, struct vnode **vpp); 154 static int hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 155 struct fid *fhp, struct vnode **vpp); 156 static int hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp); 157 static int hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 158 int *exflagsp, struct ucred **credanonp); 159 160 static int hammer2_install_volume_header(hammer2_mount_t *hmp); 161 static int hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data); 162 static int hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 163 164 static int hammer2_rcvdmsg(kdmsg_msg_t *msg); 165 static void hammer2_autodmsg(kdmsg_msg_t *msg); 166 167 /* 168 * HAMMER2 vfs operations. 169 */ 170 static struct vfsops hammer2_vfsops = { 171 .vfs_init = hammer2_vfs_init, 172 .vfs_sync = hammer2_vfs_sync, 173 .vfs_mount = hammer2_vfs_mount, 174 .vfs_unmount = hammer2_vfs_unmount, 175 .vfs_root = hammer2_vfs_root, 176 .vfs_statfs = hammer2_vfs_statfs, 177 .vfs_statvfs = hammer2_vfs_statvfs, 178 .vfs_vget = hammer2_vfs_vget, 179 .vfs_vptofh = hammer2_vfs_vptofh, 180 .vfs_fhtovp = hammer2_vfs_fhtovp, 181 .vfs_checkexp = hammer2_vfs_checkexp 182 }; 183 184 MALLOC_DEFINE(M_HAMMER2, "HAMMER2-mount", ""); 185 186 VFS_SET(hammer2_vfsops, hammer2, 0); 187 MODULE_VERSION(hammer2, 1); 188 189 static 190 int 191 hammer2_vfs_init(struct vfsconf *conf) 192 { 193 int error; 194 195 error = 0; 196 197 if (HAMMER2_BLOCKREF_BYTES != sizeof(struct hammer2_blockref)) 198 error = EINVAL; 199 if (HAMMER2_INODE_BYTES != sizeof(struct hammer2_inode_data)) 200 error = EINVAL; 201 if (HAMMER2_VOLUME_BYTES != sizeof(struct hammer2_volume_data)) 202 error = EINVAL; 203 204 if (error) 205 kprintf("HAMMER2 structure size mismatch; cannot continue.\n"); 206 207 lockinit(&hammer2_mntlk, "mntlk", 0, 0); 208 TAILQ_INIT(&hammer2_mntlist); 209 210 return (error); 211 } 212 213 /* 214 * Mount or remount HAMMER2 fileystem from physical media 215 * 216 * mountroot 217 * mp mount point structure 218 * path NULL 219 * data <unused> 220 * cred <unused> 221 * 222 * mount 223 * mp mount point structure 224 * path path to mount point 225 * data pointer to argument structure in user space 226 * volume volume path (device@LABEL form) 227 * hflags user mount flags 228 * cred user credentials 229 * 230 * RETURNS: 0 Success 231 * !0 error number 232 */ 233 static 234 int 235 hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data, 236 struct ucred *cred) 237 { 238 struct hammer2_mount_info info; 239 hammer2_pfsmount_t *pmp; 240 hammer2_mount_t *hmp; 241 hammer2_key_t lhc; 242 struct vnode *devvp; 243 struct nlookupdata nd; 244 hammer2_chain_t *parent; 245 hammer2_chain_t *schain; 246 hammer2_chain_t *rchain; 247 struct file *fp; 248 char devstr[MNAMELEN]; 249 size_t size; 250 size_t done; 251 char *dev; 252 char *label; 253 int ronly = 1; 254 int error; 255 256 hmp = NULL; 257 pmp = NULL; 258 dev = NULL; 259 label = NULL; 260 devvp = NULL; 261 262 kprintf("hammer2_mount\n"); 263 264 if (path == NULL) { 265 /* 266 * Root mount 267 */ 268 bzero(&info, sizeof(info)); 269 info.cluster_fd = -1; 270 return (EOPNOTSUPP); 271 } else { 272 /* 273 * Non-root mount or updating a mount 274 */ 275 error = copyin(data, &info, sizeof(info)); 276 if (error) 277 return (error); 278 279 error = copyinstr(info.volume, devstr, MNAMELEN - 1, &done); 280 if (error) 281 return (error); 282 283 /* Extract device and label */ 284 dev = devstr; 285 label = strchr(devstr, '@'); 286 if (label == NULL || 287 ((label + 1) - dev) > done) { 288 return (EINVAL); 289 } 290 *label = '\0'; 291 label++; 292 if (*label == '\0') 293 return (EINVAL); 294 295 if (mp->mnt_flag & MNT_UPDATE) { 296 /* Update mount */ 297 /* HAMMER2 implements NFS export via mountctl */ 298 hmp = MPTOHMP(mp); 299 devvp = hmp->devvp; 300 error = hammer2_remount(mp, path, devvp, cred); 301 return error; 302 } 303 } 304 305 /* 306 * PFS mount 307 * 308 * Lookup name and verify it refers to a block device. 309 */ 310 error = nlookup_init(&nd, dev, UIO_SYSSPACE, NLC_FOLLOW); 311 if (error == 0) 312 error = nlookup(&nd); 313 if (error == 0) 314 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp); 315 nlookup_done(&nd); 316 317 if (error == 0) { 318 if (vn_isdisk(devvp, &error)) 319 error = vfs_mountedon(devvp); 320 } 321 322 /* 323 * Determine if the device has already been mounted. After this 324 * check hmp will be non-NULL if we are doing the second or more 325 * hammer2 mounts from the same device. 326 */ 327 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 328 TAILQ_FOREACH(hmp, &hammer2_mntlist, mntentry) { 329 if (hmp->devvp == devvp) 330 break; 331 } 332 333 /* 334 * Open the device if this isn't a secondary mount and construct 335 * the H2 device mount (hmp). 336 */ 337 if (hmp == NULL) { 338 if (error == 0 && vcount(devvp) > 0) 339 error = EBUSY; 340 341 /* 342 * Now open the device 343 */ 344 if (error == 0) { 345 ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 346 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 347 error = vinvalbuf(devvp, V_SAVE, 0, 0); 348 if (error == 0) { 349 error = VOP_OPEN(devvp, 350 ronly ? FREAD : FREAD | FWRITE, 351 FSCRED, NULL); 352 } 353 vn_unlock(devvp); 354 } 355 if (error && devvp) { 356 vrele(devvp); 357 devvp = NULL; 358 } 359 if (error) { 360 lockmgr(&hammer2_mntlk, LK_RELEASE); 361 return error; 362 } 363 hmp = kmalloc(sizeof(*hmp), M_HAMMER2, M_WAITOK | M_ZERO); 364 hmp->ronly = ronly; 365 hmp->devvp = devvp; 366 kmalloc_create(&hmp->mchain, "HAMMER2-chains"); 367 TAILQ_INSERT_TAIL(&hammer2_mntlist, hmp, mntentry); 368 369 lockinit(&hmp->alloclk, "h2alloc", 0, 0); 370 lockinit(&hmp->voldatalk, "voldata", 0, LK_CANRECURSE); 371 TAILQ_INIT(&hmp->transq); 372 373 /* 374 * vchain setup. vchain.data is embedded. 375 * vchain.refs is initialized and will never drop to 0. 376 */ 377 hmp->vchain.hmp = hmp; 378 hmp->vchain.refs = 1; 379 hmp->vchain.data = (void *)&hmp->voldata; 380 hmp->vchain.bref.type = HAMMER2_BREF_TYPE_VOLUME; 381 hmp->vchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 382 hmp->vchain.delete_tid = HAMMER2_MAX_TID; 383 hammer2_chain_core_alloc(&hmp->vchain, NULL); 384 /* hmp->vchain.u.xxx is left NULL */ 385 386 /* 387 * fchain setup. fchain.data is embedded. 388 * fchain.refs is initialized and will never drop to 0. 389 * 390 * The data is not used but needs to be initialized to 391 * pass assertion muster. We use this chain primarily 392 * as a placeholder for the freemap's top-level RBTREE 393 * so it does not interfere with the volume's topology 394 * RBTREE. 395 */ 396 hmp->fchain.hmp = hmp; 397 hmp->fchain.refs = 1; 398 hmp->fchain.data = (void *)&hmp->voldata.freemap_blockset; 399 hmp->fchain.bref.type = HAMMER2_BREF_TYPE_FREEMAP; 400 hmp->fchain.bref.data_off = 0 | HAMMER2_PBUFRADIX; 401 hmp->fchain.bref.methods = 402 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP) | 403 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE); 404 hmp->fchain.delete_tid = HAMMER2_MAX_TID; 405 406 hammer2_chain_core_alloc(&hmp->fchain, NULL); 407 /* hmp->fchain.u.xxx is left NULL */ 408 409 /* 410 * Install the volume header 411 */ 412 error = hammer2_install_volume_header(hmp); 413 if (error) { 414 hammer2_vfs_unmount(mp, MNT_FORCE); 415 return error; 416 } 417 418 /* 419 * First locate the super-root inode, which is key 0 420 * relative to the volume header's blockset. 421 * 422 * Then locate the root inode by scanning the directory keyspace 423 * represented by the label. 424 */ 425 parent = hammer2_chain_lookup_init(&hmp->vchain, 0); 426 schain = hammer2_chain_lookup(&parent, 427 HAMMER2_SROOT_KEY, HAMMER2_SROOT_KEY, 0); 428 hammer2_chain_lookup_done(parent); 429 if (schain == NULL) { 430 kprintf("hammer2_mount: invalid super-root\n"); 431 hammer2_vfs_unmount(mp, MNT_FORCE); 432 return EINVAL; 433 } 434 hammer2_chain_ref(schain); /* for hmp->schain */ 435 hmp->schain = schain; /* left locked for inode_get */ 436 hmp->sroot = hammer2_inode_get(NULL, NULL, schain); 437 hammer2_inode_ref(hmp->sroot); /* for hmp->sroot */ 438 hammer2_inode_unlock_ex(hmp->sroot, schain); 439 schain = NULL; 440 } 441 442 /* 443 * Block device opened successfully, finish initializing the 444 * mount structure. 445 * 446 * From this point on we have to call hammer2_unmount() on failure. 447 */ 448 pmp = kmalloc(sizeof(*pmp), M_HAMMER2, M_WAITOK | M_ZERO); 449 pmp->mount_cluster = kmalloc(sizeof(hammer2_cluster_t), M_HAMMER2, 450 M_WAITOK | M_ZERO); 451 pmp->cluster = pmp->mount_cluster; 452 453 kmalloc_create(&pmp->minode, "HAMMER2-inodes"); 454 kmalloc_create(&pmp->mmsg, "HAMMER2-pfsmsg"); 455 456 pmp->mount_cluster->hmp = hmp; 457 spin_init(&pmp->inum_spin); 458 RB_INIT(&pmp->inum_tree); 459 460 kdmsg_iocom_init(&pmp->iocom, pmp, 461 KDMSG_IOCOMF_AUTOCONN | 462 KDMSG_IOCOMF_AUTOSPAN | 463 KDMSG_IOCOMF_AUTOCIRC, 464 pmp->mmsg, hammer2_rcvdmsg); 465 466 ccms_domain_init(&pmp->ccms_dom); 467 ++hmp->pmp_count; 468 lockmgr(&hammer2_mntlk, LK_RELEASE); 469 kprintf("hammer2_mount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count); 470 471 mp->mnt_flag = MNT_LOCAL; 472 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /* all entry pts are SMP */ 473 474 /* 475 * required mount structure initializations 476 */ 477 mp->mnt_stat.f_iosize = HAMMER2_PBUFSIZE; 478 mp->mnt_stat.f_bsize = HAMMER2_PBUFSIZE; 479 480 mp->mnt_vstat.f_frsize = HAMMER2_PBUFSIZE; 481 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 482 483 /* 484 * Optional fields 485 */ 486 mp->mnt_iosize_max = MAXPHYS; 487 mp->mnt_data = (qaddr_t)pmp; 488 pmp->mp = mp; 489 490 /* 491 * schain only has 1 ref now for its hmp->schain assignment. 492 * Setup for lookup (which will lock it). 493 */ 494 parent = hammer2_chain_lookup_init(hmp->schain, 0); 495 lhc = hammer2_dirhash(label, strlen(label)); 496 rchain = hammer2_chain_lookup(&parent, 497 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 498 0); 499 while (rchain) { 500 if (rchain->bref.type == HAMMER2_BREF_TYPE_INODE && 501 strcmp(label, rchain->data->ipdata.filename) == 0) { 502 break; 503 } 504 rchain = hammer2_chain_next(&parent, rchain, 505 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 506 0); 507 } 508 hammer2_chain_lookup_done(parent); 509 if (rchain == NULL) { 510 kprintf("hammer2_mount: PFS label not found\n"); 511 hammer2_vfs_unmount(mp, MNT_FORCE); 512 return EINVAL; 513 } 514 if (rchain->flags & HAMMER2_CHAIN_MOUNTED) { 515 hammer2_chain_unlock(rchain); 516 kprintf("hammer2_mount: PFS label already mounted!\n"); 517 hammer2_vfs_unmount(mp, MNT_FORCE); 518 return EBUSY; 519 } 520 if (rchain->flags & HAMMER2_CHAIN_RECYCLE) { 521 kprintf("hammer2_mount: PFS label currently recycling\n"); 522 hammer2_vfs_unmount(mp, MNT_FORCE); 523 return EBUSY; 524 } 525 526 atomic_set_int(&rchain->flags, HAMMER2_CHAIN_MOUNTED); 527 528 /* 529 * NOTE: *_get() integrates chain's lock into the inode lock. 530 */ 531 hammer2_chain_ref(rchain); /* for pmp->rchain */ 532 pmp->mount_cluster->rchain = rchain; /* left held & unlocked */ 533 pmp->iroot = hammer2_inode_get(pmp, NULL, rchain); 534 hammer2_inode_ref(pmp->iroot); /* ref for pmp->iroot */ 535 hammer2_inode_unlock_ex(pmp->iroot, rchain); 536 537 kprintf("iroot %p\n", pmp->iroot); 538 539 /* 540 * Ref the cluster management messaging descriptor. The mount 541 * program deals with the other end of the communications pipe. 542 */ 543 fp = holdfp(curproc->p_fd, info.cluster_fd, -1); 544 if (fp == NULL) { 545 kprintf("hammer2_mount: bad cluster_fd!\n"); 546 hammer2_vfs_unmount(mp, MNT_FORCE); 547 return EBADF; 548 } 549 hammer2_cluster_reconnect(pmp, fp); 550 551 /* 552 * Finish setup 553 */ 554 vfs_getnewfsid(mp); 555 vfs_add_vnodeops(mp, &hammer2_vnode_vops, &mp->mnt_vn_norm_ops); 556 vfs_add_vnodeops(mp, &hammer2_spec_vops, &mp->mnt_vn_spec_ops); 557 vfs_add_vnodeops(mp, &hammer2_fifo_vops, &mp->mnt_vn_fifo_ops); 558 559 copyinstr(info.volume, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); 560 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 561 bzero(mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname)); 562 copyinstr(path, mp->mnt_stat.f_mntonname, 563 sizeof(mp->mnt_stat.f_mntonname) - 1, 564 &size); 565 566 /* 567 * Initial statfs to prime mnt_stat. 568 */ 569 hammer2_vfs_statfs(mp, &mp->mnt_stat, cred); 570 571 return 0; 572 } 573 574 static 575 int 576 hammer2_remount(struct mount *mp, char *path, struct vnode *devvp, 577 struct ucred *cred) 578 { 579 return (0); 580 } 581 582 static 583 int 584 hammer2_vfs_unmount(struct mount *mp, int mntflags) 585 { 586 hammer2_pfsmount_t *pmp; 587 hammer2_mount_t *hmp; 588 hammer2_cluster_t *cluster; 589 int flags; 590 int error = 0; 591 int ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); 592 int dumpcnt; 593 struct vnode *devvp; 594 595 pmp = MPTOPMP(mp); 596 cluster = pmp->mount_cluster; 597 hmp = cluster->hmp; 598 flags = 0; 599 600 if (mntflags & MNT_FORCE) 601 flags |= FORCECLOSE; 602 603 hammer2_mount_exlock(hmp); 604 605 /* 606 * If mount initialization proceeded far enough we must flush 607 * its vnodes. 608 */ 609 if (pmp->iroot) 610 error = vflush(mp, 0, flags); 611 612 if (error) { 613 hammer2_mount_unlock(hmp); 614 return error; 615 } 616 617 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 618 --hmp->pmp_count; 619 kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n", hmp, hmp->pmp_count); 620 621 /* 622 * Flush any left over chains. The voldata lock is only used 623 * to synchronize against HAMMER2_CHAIN_MODIFIED_AUX. 624 */ 625 hammer2_voldata_lock(hmp); 626 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED | 627 HAMMER2_CHAIN_SUBMODIFIED)) { 628 hammer2_voldata_unlock(hmp, 0); 629 hammer2_vfs_sync(mp, MNT_WAIT); 630 } else { 631 hammer2_voldata_unlock(hmp, 0); 632 } 633 if (hmp->pmp_count == 0) { 634 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED | 635 HAMMER2_CHAIN_SUBMODIFIED)) { 636 kprintf("hammer2_unmount: chains left over after " 637 "final sync\n"); 638 if (hammer2_debug & 0x0010) 639 Debugger("entered debugger"); 640 } 641 } 642 643 /* 644 * Cleanup the root and super-root chain elements (which should be 645 * clean). 646 */ 647 if (pmp->iroot) { 648 #if REPORT_REFS_ERRORS 649 if (pmp->iroot->refs != 1) 650 kprintf("PMP->IROOT %p REFS WRONG %d\n", 651 pmp->iroot, pmp->iroot->refs); 652 #else 653 KKASSERT(pmp->iroot->refs == 1); 654 #endif 655 hammer2_inode_drop(pmp->iroot); /* ref for pmp->iroot */ 656 pmp->iroot = NULL; 657 } 658 if (cluster->rchain) { 659 atomic_clear_int(&cluster->rchain->flags, 660 HAMMER2_CHAIN_MOUNTED); 661 #if REPORT_REFS_ERRORS 662 if (cluster->rchain->refs != 1) 663 kprintf("PMP->RCHAIN %p REFS WRONG %d\n", 664 cluster->rchain, cluster->rchain->refs); 665 #else 666 KKASSERT(cluster->rchain->refs == 1); 667 #endif 668 hammer2_chain_drop(cluster->rchain); 669 cluster->rchain = NULL; 670 } 671 ccms_domain_uninit(&pmp->ccms_dom); 672 673 /* 674 * Kill cluster controller 675 */ 676 kdmsg_iocom_uninit(&pmp->iocom); 677 678 /* 679 * If no PFS's left drop the master hammer2_mount for the device. 680 */ 681 if (hmp->pmp_count == 0) { 682 if (hmp->sroot) { 683 hammer2_inode_drop(hmp->sroot); 684 hmp->sroot = NULL; 685 } 686 if (hmp->schain) { 687 #if REPORT_REFS_ERRORS 688 if (hmp->schain->refs != 1) 689 kprintf("HMP->SCHAIN %p REFS WRONG %d\n", 690 hmp->schain, hmp->schain->refs); 691 #else 692 KKASSERT(hmp->schain->refs == 1); 693 #endif 694 hammer2_chain_drop(hmp->schain); 695 hmp->schain = NULL; 696 } 697 698 /* 699 * Finish up with the device vnode 700 */ 701 if ((devvp = hmp->devvp) != NULL) { 702 vinvalbuf(devvp, (ronly ? 0 : V_SAVE), 0, 0); 703 hmp->devvp = NULL; 704 VOP_CLOSE(devvp, (ronly ? FREAD : FREAD|FWRITE)); 705 vrele(devvp); 706 devvp = NULL; 707 } 708 709 /* 710 * Final drop of embedded freemap root chain to clean up 711 * fchain.core (fchain structure is not flagged ALLOCATED 712 * so it is cleaned out and then left to rot). 713 */ 714 hammer2_chain_drop(&hmp->fchain); 715 716 /* 717 * Final drop of embedded volume root chain to clean up 718 * vchain.core (vchain structure is not flagged ALLOCATED 719 * so it is cleaned out and then left to rot). 720 */ 721 dumpcnt = 50; 722 hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt); 723 hammer2_mount_unlock(hmp); 724 hammer2_chain_drop(&hmp->vchain); 725 } else { 726 hammer2_mount_unlock(hmp); 727 } 728 729 pmp->mp = NULL; 730 mp->mnt_data = NULL; 731 732 pmp->mount_cluster = NULL; 733 pmp->cluster = NULL; /* XXX */ 734 735 kmalloc_destroy(&pmp->mmsg); 736 kmalloc_destroy(&pmp->minode); 737 738 cluster->hmp = NULL; 739 740 kfree(cluster, M_HAMMER2); 741 kfree(pmp, M_HAMMER2); 742 if (hmp->pmp_count == 0) { 743 TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry); 744 kmalloc_destroy(&hmp->mchain); 745 kfree(hmp, M_HAMMER2); 746 } 747 lockmgr(&hammer2_mntlk, LK_RELEASE); 748 749 return (error); 750 } 751 752 static 753 int 754 hammer2_vfs_vget(struct mount *mp, struct vnode *dvp, 755 ino_t ino, struct vnode **vpp) 756 { 757 kprintf("hammer2_vget\n"); 758 return (EOPNOTSUPP); 759 } 760 761 static 762 int 763 hammer2_vfs_root(struct mount *mp, struct vnode **vpp) 764 { 765 hammer2_pfsmount_t *pmp; 766 hammer2_chain_t *parent; 767 int error; 768 struct vnode *vp; 769 770 pmp = MPTOPMP(mp); 771 if (pmp->iroot == NULL) { 772 *vpp = NULL; 773 error = EINVAL; 774 } else { 775 parent = hammer2_inode_lock_sh(pmp->iroot); 776 vp = hammer2_igetv(pmp->iroot, &error); 777 hammer2_inode_unlock_sh(pmp->iroot, parent); 778 *vpp = vp; 779 if (vp == NULL) 780 kprintf("vnodefail\n"); 781 } 782 783 return (error); 784 } 785 786 /* 787 * Filesystem status 788 * 789 * XXX incorporate ipdata->inode_quota and data_quota 790 */ 791 static 792 int 793 hammer2_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) 794 { 795 hammer2_pfsmount_t *pmp; 796 hammer2_mount_t *hmp; 797 798 pmp = MPTOPMP(mp); 799 hmp = MPTOHMP(mp); 800 801 mp->mnt_stat.f_files = pmp->inode_count; 802 mp->mnt_stat.f_ffree = 0; 803 mp->mnt_stat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE; 804 mp->mnt_stat.f_bfree = (hmp->voldata.allocator_size - 805 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE; 806 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree; 807 808 *sbp = mp->mnt_stat; 809 return (0); 810 } 811 812 static 813 int 814 hammer2_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) 815 { 816 hammer2_pfsmount_t *pmp; 817 hammer2_mount_t *hmp; 818 819 pmp = MPTOPMP(mp); 820 hmp = MPTOHMP(mp); 821 822 mp->mnt_vstat.f_bsize = HAMMER2_PBUFSIZE; 823 mp->mnt_vstat.f_files = pmp->inode_count; 824 mp->mnt_vstat.f_ffree = 0; 825 mp->mnt_vstat.f_blocks = hmp->voldata.allocator_size / HAMMER2_PBUFSIZE; 826 mp->mnt_vstat.f_bfree = (hmp->voldata.allocator_size - 827 hmp->voldata.allocator_beg) / HAMMER2_PBUFSIZE; 828 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree; 829 830 *sbp = mp->mnt_vstat; 831 return (0); 832 } 833 834 /* 835 * Sync the entire filesystem; this is called from the filesystem syncer 836 * process periodically and whenever a user calls sync(1) on the hammer 837 * mountpoint. 838 * 839 * Currently is actually called from the syncer! \o/ 840 * 841 * This task will have to snapshot the state of the dirty inode chain. 842 * From that, it will have to make sure all of the inodes on the dirty 843 * chain have IO initiated. We make sure that io is initiated for the root 844 * block. 845 * 846 * If waitfor is set, we wait for media to acknowledge the new rootblock. 847 * 848 * THINKS: side A vs side B, to have sync not stall all I/O? 849 */ 850 static 851 int 852 hammer2_vfs_sync(struct mount *mp, int waitfor) 853 { 854 struct hammer2_sync_info info; 855 hammer2_pfsmount_t *pmp; 856 hammer2_cluster_t *cluster; 857 hammer2_mount_t *hmp; 858 int flags; 859 int error; 860 int i; 861 862 pmp = MPTOPMP(mp); 863 864 flags = VMSC_GETVP; 865 if (waitfor & MNT_LAZY) 866 flags |= VMSC_ONEPASS; 867 868 hammer2_trans_init(&info.trans, pmp, HAMMER2_TRANS_ISFLUSH); 869 870 info.error = 0; 871 info.waitfor = MNT_NOWAIT; 872 vmntvnodescan(mp, flags | VMSC_NOWAIT, 873 hammer2_sync_scan1, 874 hammer2_sync_scan2, &info); 875 if (info.error == 0 && (waitfor & MNT_WAIT)) { 876 info.waitfor = waitfor; 877 vmntvnodescan(mp, flags, 878 hammer2_sync_scan1, 879 hammer2_sync_scan2, &info); 880 881 } 882 #if 0 883 if (waitfor == MNT_WAIT) { 884 /* XXX */ 885 } else { 886 /* XXX */ 887 } 888 #endif 889 890 cluster = pmp->cluster; 891 hmp = cluster->hmp; 892 893 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 894 if (hmp->vchain.flags & (HAMMER2_CHAIN_MODIFIED | 895 HAMMER2_CHAIN_SUBMODIFIED)) { 896 hammer2_chain_flush(&info.trans, &hmp->vchain); 897 } 898 hammer2_chain_unlock(&hmp->vchain); 899 900 #if 0 901 /* 902 * Rollup flush. The fsyncs above basically just flushed 903 * data blocks. The flush below gets all the meta-data. 904 */ 905 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 906 if (hmp->fchain.flags & (HAMMER2_CHAIN_MODIFIED | 907 HAMMER2_CHAIN_SUBMODIFIED)) { 908 /* this will modify vchain as a side effect */ 909 hammer2_chain_flush(&info.trans, &hmp->fchain); 910 } 911 hammer2_chain_unlock(&hmp->fchain); 912 #endif 913 914 915 error = 0; 916 917 /* 918 * We can't safely flush the volume header until we have 919 * flushed any device buffers which have built up. 920 * 921 * XXX this isn't being incremental 922 */ 923 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY); 924 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0); 925 vn_unlock(hmp->devvp); 926 927 /* 928 * The flush code sets CHAIN_VOLUMESYNC to indicate that the 929 * volume header needs synchronization via hmp->volsync. 930 * 931 * XXX synchronize the flag & data with only this flush XXX 932 */ 933 if (error == 0 && (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) { 934 struct buf *bp; 935 936 /* 937 * Synchronize the disk before flushing the volume 938 * header. 939 */ 940 bp = getpbuf(NULL); 941 bp->b_bio1.bio_offset = 0; 942 bp->b_bufsize = 0; 943 bp->b_bcount = 0; 944 bp->b_cmd = BUF_CMD_FLUSH; 945 bp->b_bio1.bio_done = biodone_sync; 946 bp->b_bio1.bio_flags |= BIO_SYNC; 947 vn_strategy(hmp->devvp, &bp->b_bio1); 948 biowait(&bp->b_bio1, "h2vol"); 949 relpbuf(bp, NULL); 950 951 /* 952 * Then we can safely flush the version of the volume header 953 * synchronized by the flush code. 954 */ 955 i = hmp->volhdrno + 1; 956 if (i >= HAMMER2_NUM_VOLHDRS) 957 i = 0; 958 if (i * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE > 959 hmp->volsync.volu_size) { 960 i = 0; 961 } 962 kprintf("sync volhdr %d %jd\n", 963 i, (intmax_t)hmp->volsync.volu_size); 964 bp = getblk(hmp->devvp, i * HAMMER2_ZONE_BYTES64, 965 HAMMER2_PBUFSIZE, 0, 0); 966 atomic_clear_int(&hmp->vchain.flags, HAMMER2_CHAIN_VOLUMESYNC); 967 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE); 968 bawrite(bp); 969 hmp->volhdrno = i; 970 } 971 hammer2_trans_done(&info.trans); 972 return (error); 973 } 974 975 /* 976 * Sync passes. 977 * 978 * NOTE: We don't test SUBMODIFIED or MOVED here because the fsync code 979 * won't flush on those flags. The syncer code above will do a 980 * general meta-data flush globally that will catch these flags. 981 */ 982 static int 983 hammer2_sync_scan1(struct mount *mp, struct vnode *vp, void *data) 984 { 985 hammer2_inode_t *ip; 986 987 ip = VTOI(vp); 988 if (vp->v_type == VNON || ip == NULL || 989 ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 && 990 RB_EMPTY(&vp->v_rbdirty_tree))) { 991 return(-1); 992 } 993 return(0); 994 } 995 996 static int 997 hammer2_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 998 { 999 struct hammer2_sync_info *info = data; 1000 hammer2_inode_t *ip; 1001 hammer2_chain_t *parent; 1002 int error; 1003 1004 ip = VTOI(vp); 1005 if (vp->v_type == VNON || vp->v_type == VBAD || 1006 ((ip->flags & HAMMER2_INODE_MODIFIED) == 0 && 1007 RB_EMPTY(&vp->v_rbdirty_tree))) { 1008 return(0); 1009 } 1010 1011 /* 1012 * VOP_FSYNC will start a new transaction so replicate some code 1013 * here to do it inline (see hammer2_vop_fsync()). 1014 */ 1015 parent = hammer2_inode_lock_ex(ip); 1016 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1017 if (ip->vp) 1018 vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL); 1019 hammer2_chain_flush(&info->trans, parent); 1020 hammer2_inode_unlock_ex(ip, parent); 1021 error = 0; 1022 #if 0 1023 error = VOP_FSYNC(vp, MNT_NOWAIT, 0); 1024 #endif 1025 if (error) 1026 info->error = error; 1027 return(0); 1028 } 1029 1030 static 1031 int 1032 hammer2_vfs_vptofh(struct vnode *vp, struct fid *fhp) 1033 { 1034 return (0); 1035 } 1036 1037 static 1038 int 1039 hammer2_vfs_fhtovp(struct mount *mp, struct vnode *rootvp, 1040 struct fid *fhp, struct vnode **vpp) 1041 { 1042 return (0); 1043 } 1044 1045 static 1046 int 1047 hammer2_vfs_checkexp(struct mount *mp, struct sockaddr *nam, 1048 int *exflagsp, struct ucred **credanonp) 1049 { 1050 return (0); 1051 } 1052 1053 /* 1054 * Support code for hammer2_mount(). Read, verify, and install the volume 1055 * header into the HMP 1056 * 1057 * XXX read four volhdrs and use the one with the highest TID whos CRC 1058 * matches. 1059 * 1060 * XXX check iCRCs. 1061 * 1062 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to 1063 * nonexistant locations. 1064 * 1065 * XXX Record selected volhdr and ring updates to each of 4 volhdrs 1066 */ 1067 static 1068 int 1069 hammer2_install_volume_header(hammer2_mount_t *hmp) 1070 { 1071 hammer2_volume_data_t *vd; 1072 struct buf *bp; 1073 hammer2_crc32_t crc0, crc, bcrc0, bcrc; 1074 int error_reported; 1075 int error; 1076 int valid; 1077 int i; 1078 1079 error_reported = 0; 1080 error = 0; 1081 valid = 0; 1082 bp = NULL; 1083 1084 /* 1085 * There are up to 4 copies of the volume header (syncs iterate 1086 * between them so there is no single master). We don't trust the 1087 * volu_size field so we don't know precisely how large the filesystem 1088 * is, so depend on the OS to return an error if we go beyond the 1089 * block device's EOF. 1090 */ 1091 for (i = 0; i < HAMMER2_NUM_VOLHDRS; i++) { 1092 error = bread(hmp->devvp, i * HAMMER2_ZONE_BYTES64, 1093 HAMMER2_VOLUME_BYTES, &bp); 1094 if (error) { 1095 brelse(bp); 1096 bp = NULL; 1097 continue; 1098 } 1099 1100 vd = (struct hammer2_volume_data *) bp->b_data; 1101 if ((vd->magic != HAMMER2_VOLUME_ID_HBO) && 1102 (vd->magic != HAMMER2_VOLUME_ID_ABO)) { 1103 brelse(bp); 1104 bp = NULL; 1105 continue; 1106 } 1107 1108 if (vd->magic == HAMMER2_VOLUME_ID_ABO) { 1109 /* XXX: Reversed-endianness filesystem */ 1110 kprintf("hammer2: reverse-endian filesystem detected"); 1111 brelse(bp); 1112 bp = NULL; 1113 continue; 1114 } 1115 1116 crc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT0]; 1117 crc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC0_OFF, 1118 HAMMER2_VOLUME_ICRC0_SIZE); 1119 bcrc = vd->icrc_sects[HAMMER2_VOL_ICRC_SECT1]; 1120 bcrc0 = hammer2_icrc32(bp->b_data + HAMMER2_VOLUME_ICRC1_OFF, 1121 HAMMER2_VOLUME_ICRC1_SIZE); 1122 if ((crc0 != crc) || (bcrc0 != bcrc)) { 1123 kprintf("hammer2 volume header crc " 1124 "mismatch copy #%d %08x/%08x\n", 1125 i, crc0, crc); 1126 error_reported = 1; 1127 brelse(bp); 1128 bp = NULL; 1129 continue; 1130 } 1131 if (valid == 0 || hmp->voldata.mirror_tid < vd->mirror_tid) { 1132 valid = 1; 1133 hmp->voldata = *vd; 1134 hmp->volhdrno = i; 1135 } 1136 brelse(bp); 1137 bp = NULL; 1138 } 1139 if (valid) { 1140 hmp->volsync = hmp->voldata; 1141 error = 0; 1142 if (error_reported || bootverbose || 1) { /* 1/DEBUG */ 1143 kprintf("hammer2: using volume header #%d\n", 1144 hmp->volhdrno); 1145 } 1146 } else { 1147 error = EINVAL; 1148 kprintf("hammer2: no valid volume headers found!\n"); 1149 } 1150 return (error); 1151 } 1152 1153 /* 1154 * Reconnect using the passed file pointer. The caller must ref the 1155 * fp for us. 1156 */ 1157 void 1158 hammer2_cluster_reconnect(hammer2_pfsmount_t *pmp, struct file *fp) 1159 { 1160 hammer2_inode_data_t *ipdata; 1161 hammer2_chain_t *parent; 1162 hammer2_mount_t *hmp; 1163 size_t name_len; 1164 1165 hmp = pmp->mount_cluster->hmp; 1166 1167 /* 1168 * Closes old comm descriptor, kills threads, cleans up 1169 * states, then installs the new descriptor and creates 1170 * new threads. 1171 */ 1172 kdmsg_iocom_reconnect(&pmp->iocom, fp, "hammer2"); 1173 1174 /* 1175 * Setup LNK_CONN fields for autoinitiated state machine 1176 */ 1177 parent = hammer2_inode_lock_ex(pmp->iroot); 1178 ipdata = &parent->data->ipdata; 1179 pmp->iocom.auto_lnk_conn.pfs_clid = ipdata->pfs_clid; 1180 pmp->iocom.auto_lnk_conn.pfs_fsid = ipdata->pfs_fsid; 1181 pmp->iocom.auto_lnk_conn.pfs_type = ipdata->pfs_type; 1182 pmp->iocom.auto_lnk_conn.proto_version = DMSG_SPAN_PROTO_1; 1183 pmp->iocom.auto_lnk_conn.peer_type = hmp->voldata.peer_type; 1184 1185 /* 1186 * Filter adjustment. Clients do not need visibility into other 1187 * clients (otherwise millions of clients would present a serious 1188 * problem). The fs_label also serves to restrict the namespace. 1189 */ 1190 pmp->iocom.auto_lnk_conn.peer_mask = 1LLU << HAMMER2_PEER_HAMMER2; 1191 pmp->iocom.auto_lnk_conn.pfs_mask = (uint64_t)-1; 1192 switch (ipdata->pfs_type) { 1193 case DMSG_PFSTYPE_CLIENT: 1194 pmp->iocom.auto_lnk_conn.peer_mask &= 1195 ~(1LLU << DMSG_PFSTYPE_CLIENT); 1196 break; 1197 default: 1198 break; 1199 } 1200 1201 name_len = ipdata->name_len; 1202 if (name_len >= sizeof(pmp->iocom.auto_lnk_conn.fs_label)) 1203 name_len = sizeof(pmp->iocom.auto_lnk_conn.fs_label) - 1; 1204 bcopy(ipdata->filename, 1205 pmp->iocom.auto_lnk_conn.fs_label, 1206 name_len); 1207 pmp->iocom.auto_lnk_conn.fs_label[name_len] = 0; 1208 1209 /* 1210 * Setup LNK_SPAN fields for autoinitiated state machine 1211 */ 1212 pmp->iocom.auto_lnk_span.pfs_clid = ipdata->pfs_clid; 1213 pmp->iocom.auto_lnk_span.pfs_fsid = ipdata->pfs_fsid; 1214 pmp->iocom.auto_lnk_span.pfs_type = ipdata->pfs_type; 1215 pmp->iocom.auto_lnk_span.peer_type = hmp->voldata.peer_type; 1216 pmp->iocom.auto_lnk_span.proto_version = DMSG_SPAN_PROTO_1; 1217 name_len = ipdata->name_len; 1218 if (name_len >= sizeof(pmp->iocom.auto_lnk_span.fs_label)) 1219 name_len = sizeof(pmp->iocom.auto_lnk_span.fs_label) - 1; 1220 bcopy(ipdata->filename, 1221 pmp->iocom.auto_lnk_span.fs_label, 1222 name_len); 1223 pmp->iocom.auto_lnk_span.fs_label[name_len] = 0; 1224 hammer2_inode_unlock_ex(pmp->iroot, parent); 1225 1226 kdmsg_iocom_autoinitiate(&pmp->iocom, hammer2_autodmsg); 1227 } 1228 1229 static int 1230 hammer2_rcvdmsg(kdmsg_msg_t *msg) 1231 { 1232 switch(msg->any.head.cmd & DMSGF_TRANSMASK) { 1233 case DMSG_DBG_SHELL: 1234 /* 1235 * (non-transaction) 1236 * Execute shell command (not supported atm) 1237 */ 1238 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP); 1239 break; 1240 case DMSG_DBG_SHELL | DMSGF_REPLY: 1241 /* 1242 * (non-transaction) 1243 */ 1244 if (msg->aux_data) { 1245 msg->aux_data[msg->aux_size - 1] = 0; 1246 kprintf("HAMMER2 DBG: %s\n", msg->aux_data); 1247 } 1248 break; 1249 default: 1250 /* 1251 * Unsupported message received. We only need to 1252 * reply if it's a transaction in order to close our end. 1253 * Ignore any one-way messages are any further messages 1254 * associated with the transaction. 1255 * 1256 * NOTE: This case also includes DMSG_LNK_ERROR messages 1257 * which might be one-way, replying to those would 1258 * cause an infinite ping-pong. 1259 */ 1260 if (msg->any.head.cmd & DMSGF_CREATE) 1261 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP); 1262 break; 1263 } 1264 return(0); 1265 } 1266 1267 /* 1268 * This function is called after KDMSG has automatically handled processing 1269 * of a LNK layer message (typically CONN, SPAN, or CIRC). 1270 * 1271 * We tag off the LNK_CONN to trigger our LNK_VOLCONF messages which 1272 * advertises all available hammer2 super-root volumes. 1273 */ 1274 static void 1275 hammer2_autodmsg(kdmsg_msg_t *msg) 1276 { 1277 hammer2_pfsmount_t *pmp = msg->iocom->handle; 1278 hammer2_mount_t *hmp = pmp->mount_cluster->hmp; 1279 int copyid; 1280 1281 /* 1282 * We only care about replies to our LNK_CONN auto-request. kdmsg 1283 * has already processed the reply, we use this calback as a shim 1284 * to know when we can advertise available super-root volumes. 1285 */ 1286 if ((msg->any.head.cmd & DMSGF_TRANSMASK) != 1287 (DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_REPLY) || 1288 msg->state == NULL) { 1289 return; 1290 } 1291 1292 kprintf("LNK_CONN REPLY RECEIVED CMD %08x\n", msg->any.head.cmd); 1293 1294 if (msg->any.head.cmd & DMSGF_CREATE) { 1295 kprintf("HAMMER2: VOLDATA DUMP\n"); 1296 1297 /* 1298 * Dump the configuration stored in the volume header 1299 */ 1300 hammer2_voldata_lock(hmp); 1301 for (copyid = 0; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 1302 if (hmp->voldata.copyinfo[copyid].copyid == 0) 1303 continue; 1304 hammer2_volconf_update(pmp, copyid); 1305 } 1306 hammer2_voldata_unlock(hmp, 0); 1307 } 1308 if ((msg->any.head.cmd & DMSGF_DELETE) && 1309 msg->state && (msg->state->txcmd & DMSGF_DELETE) == 0) { 1310 kprintf("HAMMER2: CONN WAS TERMINATED\n"); 1311 } 1312 } 1313 1314 /* 1315 * Volume configuration updates are passed onto the userland service 1316 * daemon via the open LNK_CONN transaction. 1317 */ 1318 void 1319 hammer2_volconf_update(hammer2_pfsmount_t *pmp, int index) 1320 { 1321 hammer2_mount_t *hmp = pmp->mount_cluster->hmp; 1322 kdmsg_msg_t *msg; 1323 1324 /* XXX interlock against connection state termination */ 1325 kprintf("volconf update %p\n", pmp->iocom.conn_state); 1326 if (pmp->iocom.conn_state) { 1327 kprintf("TRANSMIT VOLCONF VIA OPEN CONN TRANSACTION\n"); 1328 msg = kdmsg_msg_alloc_state(pmp->iocom.conn_state, 1329 DMSG_LNK_VOLCONF, NULL, NULL); 1330 msg->any.lnk_volconf.copy = hmp->voldata.copyinfo[index]; 1331 msg->any.lnk_volconf.mediaid = hmp->voldata.fsid; 1332 msg->any.lnk_volconf.index = index; 1333 kdmsg_msg_write(msg); 1334 } 1335 } 1336 1337 void 1338 hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp) 1339 { 1340 hammer2_chain_t *scan; 1341 1342 --*countp; 1343 if (*countp == 0) { 1344 kprintf("%*.*s...\n", tab, tab, ""); 1345 return; 1346 } 1347 if (*countp < 0) 1348 return; 1349 kprintf("%*.*schain[%d] %p.%d [%08x][core=%p] (%s) dl=%p dt=%s refs=%d", 1350 tab, tab, "", 1351 chain->index, chain, chain->bref.type, chain->flags, 1352 chain->core, 1353 ((chain->bref.type == HAMMER2_BREF_TYPE_INODE && 1354 chain->data) ? (char *)chain->data->ipdata.filename : "?"), 1355 chain->next_parent, 1356 (chain->delete_tid == HAMMER2_MAX_TID ? "max" : "fls"), 1357 chain->refs); 1358 if (chain->core == NULL || RB_EMPTY(&chain->core->rbtree)) 1359 kprintf("\n"); 1360 else 1361 kprintf(" {\n"); 1362 RB_FOREACH(scan, hammer2_chain_tree, &chain->core->rbtree) { 1363 hammer2_dump_chain(scan, tab + 4, countp); 1364 } 1365 if (chain->core && !RB_EMPTY(&chain->core->rbtree)) { 1366 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE && chain->data) 1367 kprintf("%*.*s}(%s)\n", tab, tab, "", 1368 chain->data->ipdata.filename); 1369 else 1370 kprintf("%*.*s}\n", tab, tab, ""); 1371 } 1372 } 1373