1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 /* 36 * Ioctl Functions. 37 * 38 * WARNING! The ioctl functions which manipulate the connection state need 39 * to be able to run without deadlock on the volume's chain lock. 40 * Most of these functions use a separate lock. 41 */ 42 43 #include "hammer2.h" 44 45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data); 46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data); 47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data); 48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data); 49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data); 50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data); 51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data); 52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data); 53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data); 54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data); 55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data); 56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data); 57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data); 58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data); 59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data); 60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip); 61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data); 62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data); 63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data); 64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data); 65 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data); 66 67 int 68 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag, 69 struct ucred *cred) 70 { 71 int error; 72 73 /* 74 * Standard root cred checks, will be selectively ignored below 75 * for ioctls that do not require root creds. 76 */ 77 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 78 79 switch(com) { 80 case HAMMER2IOC_VERSION_GET: 81 error = hammer2_ioctl_version_get(ip, data); 82 break; 83 case HAMMER2IOC_RECLUSTER: 84 if (error == 0) 85 error = hammer2_ioctl_recluster(ip, data); 86 break; 87 case HAMMER2IOC_REMOTE_SCAN: 88 if (error == 0) 89 error = hammer2_ioctl_remote_scan(ip, data); 90 break; 91 case HAMMER2IOC_REMOTE_ADD: 92 if (error == 0) 93 error = hammer2_ioctl_remote_add(ip, data); 94 break; 95 case HAMMER2IOC_REMOTE_DEL: 96 if (error == 0) 97 error = hammer2_ioctl_remote_del(ip, data); 98 break; 99 case HAMMER2IOC_REMOTE_REP: 100 if (error == 0) 101 error = hammer2_ioctl_remote_rep(ip, data); 102 break; 103 case HAMMER2IOC_SOCKET_GET: 104 if (error == 0) 105 error = hammer2_ioctl_socket_get(ip, data); 106 break; 107 case HAMMER2IOC_SOCKET_SET: 108 if (error == 0) 109 error = hammer2_ioctl_socket_set(ip, data); 110 break; 111 case HAMMER2IOC_PFS_GET: 112 if (error == 0) 113 error = hammer2_ioctl_pfs_get(ip, data); 114 break; 115 case HAMMER2IOC_PFS_LOOKUP: 116 if (error == 0) 117 error = hammer2_ioctl_pfs_lookup(ip, data); 118 break; 119 case HAMMER2IOC_PFS_CREATE: 120 if (error == 0) 121 error = hammer2_ioctl_pfs_create(ip, data); 122 break; 123 case HAMMER2IOC_PFS_DELETE: 124 if (error == 0) 125 error = hammer2_ioctl_pfs_delete(ip, data); 126 break; 127 case HAMMER2IOC_PFS_SNAPSHOT: 128 if (error == 0) 129 error = hammer2_ioctl_pfs_snapshot(ip, data); 130 break; 131 case HAMMER2IOC_INODE_GET: 132 error = hammer2_ioctl_inode_get(ip, data); 133 break; 134 case HAMMER2IOC_INODE_SET: 135 if (error == 0) 136 error = hammer2_ioctl_inode_set(ip, data); 137 break; 138 case HAMMER2IOC_BULKFREE_SCAN: 139 error = hammer2_ioctl_bulkfree_scan(ip, data); 140 break; 141 case HAMMER2IOC_BULKFREE_ASYNC: 142 error = hammer2_ioctl_bulkfree_scan(ip, NULL); 143 break; 144 /*case HAMMER2IOC_INODE_COMP_SET: 145 error = hammer2_ioctl_inode_comp_set(ip, data); 146 break; 147 case HAMMER2IOC_INODE_COMP_REC_SET: 148 error = hammer2_ioctl_inode_comp_rec_set(ip, data); 149 break; 150 case HAMMER2IOC_INODE_COMP_REC_SET2: 151 error = hammer2_ioctl_inode_comp_rec_set2(ip, data); 152 break;*/ 153 case HAMMER2IOC_DESTROY: 154 if (error == 0) 155 error = hammer2_ioctl_destroy(ip, data); 156 break; 157 case HAMMER2IOC_DEBUG_DUMP: 158 error = hammer2_ioctl_debug_dump(ip); 159 break; 160 default: 161 error = EOPNOTSUPP; 162 break; 163 } 164 return (error); 165 } 166 167 /* 168 * Retrieve version and basic info 169 */ 170 static int 171 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data) 172 { 173 hammer2_ioc_version_t *version = data; 174 hammer2_dev_t *hmp; 175 176 hmp = ip->pmp->pfs_hmps[0]; 177 if (hmp) 178 version->version = hmp->voldata.version; 179 else 180 version->version = -1; 181 return 0; 182 } 183 184 static int 185 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data) 186 { 187 hammer2_ioc_recluster_t *recl = data; 188 struct vnode *vproot; 189 struct file *fp; 190 hammer2_cluster_t *cluster; 191 int error; 192 193 fp = holdfp(curproc->p_fd, recl->fd, -1); 194 if (fp) { 195 error = VFS_ROOT(ip->pmp->mp, &vproot); 196 if (error == 0) { 197 cluster = &ip->pmp->iroot->cluster; 198 kprintf("reconnect to cluster: nc=%d focus=%p\n", 199 cluster->nchains, cluster->focus); 200 if (cluster->nchains != 1 || cluster->focus == NULL) { 201 kprintf("not a local device mount\n"); 202 error = EINVAL; 203 } else { 204 hammer2_cluster_reconnect(cluster->focus->hmp, 205 fp); 206 kprintf("ok\n"); 207 error = 0; 208 } 209 vput(vproot); 210 } 211 } else { 212 error = EINVAL; 213 } 214 return error; 215 } 216 217 /* 218 * Retrieve information about a remote 219 */ 220 static int 221 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data) 222 { 223 hammer2_dev_t *hmp; 224 hammer2_ioc_remote_t *remote = data; 225 int copyid = remote->copyid; 226 227 hmp = ip->pmp->pfs_hmps[0]; 228 if (hmp == NULL) 229 return (EINVAL); 230 231 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 232 return (EINVAL); 233 234 hammer2_voldata_lock(hmp); 235 remote->copy1 = hmp->voldata.copyinfo[copyid]; 236 hammer2_voldata_unlock(hmp); 237 238 /* 239 * Adjust nextid (GET only) 240 */ 241 while (++copyid < HAMMER2_COPYID_COUNT && 242 hmp->voldata.copyinfo[copyid].copyid == 0) { 243 ; 244 } 245 if (copyid == HAMMER2_COPYID_COUNT) 246 remote->nextid = -1; 247 else 248 remote->nextid = copyid; 249 250 return(0); 251 } 252 253 /* 254 * Add new remote entry 255 */ 256 static int 257 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data) 258 { 259 hammer2_ioc_remote_t *remote = data; 260 hammer2_pfs_t *pmp = ip->pmp; 261 hammer2_dev_t *hmp; 262 int copyid = remote->copyid; 263 int error = 0; 264 265 hmp = pmp->pfs_hmps[0]; 266 if (hmp == NULL) 267 return (EINVAL); 268 if (copyid >= HAMMER2_COPYID_COUNT) 269 return (EINVAL); 270 271 hammer2_voldata_lock(hmp); 272 if (copyid < 0) { 273 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 274 if (hmp->voldata.copyinfo[copyid].copyid == 0) 275 break; 276 } 277 if (copyid == HAMMER2_COPYID_COUNT) { 278 error = ENOSPC; 279 goto failed; 280 } 281 } 282 hammer2_voldata_modify(hmp); 283 remote->copy1.copyid = copyid; 284 hmp->voldata.copyinfo[copyid] = remote->copy1; 285 hammer2_volconf_update(hmp, copyid); 286 failed: 287 hammer2_voldata_unlock(hmp); 288 return (error); 289 } 290 291 /* 292 * Delete existing remote entry 293 */ 294 static int 295 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data) 296 { 297 hammer2_ioc_remote_t *remote = data; 298 hammer2_pfs_t *pmp = ip->pmp; 299 hammer2_dev_t *hmp; 300 int copyid = remote->copyid; 301 int error = 0; 302 303 hmp = pmp->pfs_hmps[0]; 304 if (hmp == NULL) 305 return (EINVAL); 306 if (copyid >= HAMMER2_COPYID_COUNT) 307 return (EINVAL); 308 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0; 309 hammer2_voldata_lock(hmp); 310 if (copyid < 0) { 311 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 312 if (hmp->voldata.copyinfo[copyid].copyid == 0) 313 continue; 314 if (strcmp(remote->copy1.path, 315 hmp->voldata.copyinfo[copyid].path) == 0) { 316 break; 317 } 318 } 319 if (copyid == HAMMER2_COPYID_COUNT) { 320 error = ENOENT; 321 goto failed; 322 } 323 } 324 hammer2_voldata_modify(hmp); 325 hmp->voldata.copyinfo[copyid].copyid = 0; 326 hammer2_volconf_update(hmp, copyid); 327 failed: 328 hammer2_voldata_unlock(hmp); 329 return (error); 330 } 331 332 /* 333 * Replace existing remote entry 334 */ 335 static int 336 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data) 337 { 338 hammer2_ioc_remote_t *remote = data; 339 hammer2_dev_t *hmp; 340 int copyid = remote->copyid; 341 342 hmp = ip->pmp->pfs_hmps[0]; 343 if (hmp == NULL) 344 return (EINVAL); 345 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 346 return (EINVAL); 347 348 hammer2_voldata_lock(hmp); 349 hammer2_voldata_modify(hmp); 350 /*hammer2_volconf_update(hmp, copyid);*/ 351 hammer2_voldata_unlock(hmp); 352 353 return(0); 354 } 355 356 /* 357 * Retrieve communications socket 358 */ 359 static int 360 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data) 361 { 362 return (EOPNOTSUPP); 363 } 364 365 /* 366 * Set communications socket for connection 367 */ 368 static int 369 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data) 370 { 371 hammer2_ioc_remote_t *remote = data; 372 hammer2_dev_t *hmp; 373 int copyid = remote->copyid; 374 375 hmp = ip->pmp->pfs_hmps[0]; 376 if (hmp == NULL) 377 return (EINVAL); 378 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 379 return (EINVAL); 380 381 hammer2_voldata_lock(hmp); 382 hammer2_voldata_unlock(hmp); 383 384 return(0); 385 } 386 387 /* 388 * Used to scan and retrieve PFS information. PFS's are directories under 389 * the super-root. 390 * 391 * To scan PFSs pass name_key=0. The function will scan for the next 392 * PFS and set all fields, as well as set name_next to the next key. 393 * When no PFSs remain, name_next is set to (hammer2_key_t)-1. 394 * 395 * To retrieve a particular PFS by key, specify the key but note that 396 * the ioctl will return the lowest key >= specified_key, so the caller 397 * must verify the key. 398 * 399 * To retrieve the PFS associated with the file descriptor, pass 400 * name_key set to (hammer2_key_t)-1. 401 */ 402 static int 403 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data) 404 { 405 const hammer2_inode_data_t *ripdata; 406 hammer2_dev_t *hmp; 407 hammer2_ioc_pfs_t *pfs; 408 hammer2_chain_t *parent; 409 hammer2_chain_t *chain; 410 hammer2_key_t key_next; 411 hammer2_key_t save_key; 412 int error; 413 414 hmp = ip->pmp->pfs_hmps[0]; 415 if (hmp == NULL) 416 return (EINVAL); 417 418 pfs = data; 419 save_key = pfs->name_key; 420 error = 0; 421 422 /* 423 * Setup 424 */ 425 if (save_key == (hammer2_key_t)-1) { 426 hammer2_inode_lock(ip->pmp->iroot, 0); 427 parent = NULL; 428 chain = hammer2_inode_chain(ip->pmp->iroot, 0, 429 HAMMER2_RESOLVE_ALWAYS | 430 HAMMER2_RESOLVE_SHARED); 431 } else { 432 hammer2_inode_lock(hmp->spmp->iroot, 0); 433 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 434 HAMMER2_RESOLVE_ALWAYS | 435 HAMMER2_RESOLVE_SHARED); 436 chain = hammer2_chain_lookup(&parent, &key_next, 437 pfs->name_key, HAMMER2_KEY_MAX, 438 &error, 439 HAMMER2_LOOKUP_SHARED); 440 } 441 442 /* 443 * Locate next PFS 444 */ 445 while (chain) { 446 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) 447 break; 448 if (parent == NULL) { 449 hammer2_chain_unlock(chain); 450 hammer2_chain_drop(chain); 451 chain = NULL; 452 break; 453 } 454 chain = hammer2_chain_next(&parent, chain, &key_next, 455 key_next, HAMMER2_KEY_MAX, 456 &error, 457 HAMMER2_LOOKUP_SHARED); 458 } 459 error = hammer2_error_to_errno(error); 460 461 /* 462 * Load the data being returned by the ioctl. 463 */ 464 if (chain && chain->error == 0) { 465 ripdata = &chain->data->ipdata; 466 pfs->name_key = ripdata->meta.name_key; 467 pfs->pfs_type = ripdata->meta.pfs_type; 468 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 469 pfs->pfs_clid = ripdata->meta.pfs_clid; 470 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 471 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name)); 472 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len); 473 pfs->name[ripdata->meta.name_len] = 0; 474 ripdata = NULL; /* safety */ 475 476 /* 477 * Calculate name_next, if any. We are only accessing 478 * chain->bref so we can ignore chain->error (if the key 479 * is used later it will error then). 480 */ 481 if (parent == NULL) { 482 pfs->name_next = (hammer2_key_t)-1; 483 } else { 484 chain = hammer2_chain_next(&parent, chain, &key_next, 485 key_next, HAMMER2_KEY_MAX, 486 &error, 487 HAMMER2_LOOKUP_SHARED); 488 if (chain) 489 pfs->name_next = chain->bref.key; 490 else 491 pfs->name_next = (hammer2_key_t)-1; 492 } 493 } else { 494 pfs->name_next = (hammer2_key_t)-1; 495 error = ENOENT; 496 } 497 498 /* 499 * Cleanup 500 */ 501 if (chain) { 502 hammer2_chain_unlock(chain); 503 hammer2_chain_drop(chain); 504 } 505 if (parent) { 506 hammer2_chain_unlock(parent); 507 hammer2_chain_drop(parent); 508 } 509 if (save_key == (hammer2_key_t)-1) { 510 hammer2_inode_unlock(ip->pmp->iroot); 511 } else { 512 hammer2_inode_unlock(hmp->spmp->iroot); 513 } 514 515 return (error); 516 } 517 518 /* 519 * Find a specific PFS by name 520 */ 521 static int 522 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data) 523 { 524 const hammer2_inode_data_t *ripdata; 525 hammer2_dev_t *hmp; 526 hammer2_ioc_pfs_t *pfs; 527 hammer2_chain_t *parent; 528 hammer2_chain_t *chain; 529 hammer2_key_t key_next; 530 hammer2_key_t lhc; 531 int error; 532 size_t len; 533 534 hmp = ip->pmp->pfs_hmps[0]; 535 if (hmp == NULL) 536 return (EINVAL); 537 538 pfs = data; 539 error = 0; 540 541 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED); 542 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 543 HAMMER2_RESOLVE_ALWAYS | 544 HAMMER2_RESOLVE_SHARED); 545 546 pfs->name[sizeof(pfs->name) - 1] = 0; 547 len = strlen(pfs->name); 548 lhc = hammer2_dirhash(pfs->name, len); 549 550 chain = hammer2_chain_lookup(&parent, &key_next, 551 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 552 &error, HAMMER2_LOOKUP_SHARED); 553 while (chain) { 554 if (hammer2_chain_dirent_test(chain, pfs->name, len)) 555 break; 556 chain = hammer2_chain_next(&parent, chain, &key_next, 557 key_next, 558 lhc + HAMMER2_DIRHASH_LOMASK, 559 &error, HAMMER2_LOOKUP_SHARED); 560 } 561 error = hammer2_error_to_errno(error); 562 563 /* 564 * Load the data being returned by the ioctl. 565 */ 566 if (chain && chain->error == 0) { 567 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE); 568 ripdata = &chain->data->ipdata; 569 pfs->name_key = ripdata->meta.name_key; 570 pfs->pfs_type = ripdata->meta.pfs_type; 571 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 572 pfs->pfs_clid = ripdata->meta.pfs_clid; 573 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 574 ripdata = NULL; 575 576 hammer2_chain_unlock(chain); 577 hammer2_chain_drop(chain); 578 } else if (error == 0) { 579 error = ENOENT; 580 } 581 if (parent) { 582 hammer2_chain_unlock(parent); 583 hammer2_chain_drop(parent); 584 } 585 hammer2_inode_unlock(hmp->spmp->iroot); 586 587 return (error); 588 } 589 590 /* 591 * Create a new PFS under the super-root 592 */ 593 static int 594 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data) 595 { 596 hammer2_inode_data_t *nipdata; 597 hammer2_chain_t *nchain; 598 hammer2_dev_t *hmp; 599 hammer2_dev_t *force_local; 600 hammer2_ioc_pfs_t *pfs; 601 hammer2_inode_t *nip; 602 hammer2_tid_t mtid; 603 int error; 604 605 hmp = ip->pmp->pfs_hmps[0]; /* XXX */ 606 if (hmp == NULL) 607 return (EINVAL); 608 609 pfs = data; 610 nip = NULL; 611 612 if (pfs->name[0] == 0) 613 return(EINVAL); 614 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */ 615 616 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0) 617 return(EEXIST); 618 619 hammer2_trans_init(hmp->spmp, 0); 620 mtid = hammer2_trans_sub(hmp->spmp); 621 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot, 622 NULL, NULL, 623 pfs->name, strlen(pfs->name), 0, 624 1, HAMMER2_OBJTYPE_DIRECTORY, 0, 625 HAMMER2_INSERT_PFSROOT, &error); 626 if (error == 0) { 627 nip->flags |= HAMMER2_INODE_NOSIDEQ; 628 hammer2_inode_modify(nip); 629 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); 630 hammer2_chain_modify(nchain, mtid, 0, 0); 631 nipdata = &nchain->data->ipdata; 632 633 nip->meta.pfs_type = pfs->pfs_type; 634 nip->meta.pfs_subtype = pfs->pfs_subtype; 635 nip->meta.pfs_clid = pfs->pfs_clid; 636 nip->meta.pfs_fsid = pfs->pfs_fsid; 637 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; 638 639 /* 640 * Set default compression and check algorithm. This 641 * can be changed later. 642 * 643 * Do not allow compression on PFS's with the special name 644 * "boot", the boot loader can't decompress (yet). 645 */ 646 nip->meta.comp_algo = 647 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT); 648 nip->meta.check_algo = 649 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64); 650 651 if (strcasecmp(pfs->name, "boot") == 0) { 652 nip->meta.comp_algo = 653 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO); 654 } 655 656 /* 657 * Super-root isn't mounted, fsync it 658 */ 659 hammer2_chain_unlock(nchain); 660 hammer2_inode_ref(nip); 661 hammer2_inode_unlock(nip); 662 hammer2_inode_chain_sync(nip); 663 KKASSERT(nip->refs == 1); 664 hammer2_inode_drop(nip); 665 666 /* 667 * We still have a ref on the chain, relock and associate 668 * with an appropriate PFS. 669 */ 670 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 671 672 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS); 673 nipdata = &nchain->data->ipdata; 674 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename); 675 hammer2_pfsalloc(nchain, nipdata, 676 nchain->bref.modify_tid, force_local); 677 678 hammer2_chain_unlock(nchain); 679 hammer2_chain_drop(nchain); 680 681 } 682 hammer2_trans_done(hmp->spmp); 683 684 return (error); 685 } 686 687 /* 688 * Destroy an existing PFS under the super-root 689 */ 690 static int 691 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data) 692 { 693 hammer2_ioc_pfs_t *pfs = data; 694 hammer2_dev_t *hmp; 695 hammer2_pfs_t *spmp; 696 hammer2_pfs_t *pmp; 697 hammer2_xop_unlink_t *xop; 698 hammer2_inode_t *dip; 699 hammer2_inode_t *iroot; 700 int error; 701 int i; 702 703 /* 704 * The PFS should be probed, so we should be able to 705 * locate it. We only delete the PFS from the 706 * specific H2 block device (hmp), not all of 707 * them. We must remove the PFS from the cluster 708 * before we can destroy it. 709 */ 710 hmp = ip->pmp->pfs_hmps[0]; 711 if (hmp == NULL) 712 return (EINVAL); 713 714 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */ 715 716 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 717 718 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 719 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 720 if (pmp->pfs_hmps[i] != hmp) 721 continue; 722 if (pmp->pfs_names[i] && 723 strcmp(pmp->pfs_names[i], pfs->name) == 0) { 724 break; 725 } 726 } 727 if (i != HAMMER2_MAXCLUSTER) 728 break; 729 } 730 731 if (pmp == NULL) { 732 lockmgr(&hammer2_mntlk, LK_RELEASE); 733 return ENOENT; 734 } 735 736 /* 737 * Ok, we found the pmp and we have the index. Permanently remove 738 * the PFS from the cluster 739 */ 740 iroot = pmp->iroot; 741 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i); 742 hammer2_pfsdealloc(pmp, i, 1); 743 744 lockmgr(&hammer2_mntlk, LK_RELEASE); 745 746 /* 747 * Now destroy the PFS under its device using the per-device 748 * super-root. 749 */ 750 spmp = hmp->spmp; 751 dip = spmp->iroot; 752 hammer2_trans_init(spmp, 0); 753 hammer2_inode_lock(dip, 0); 754 755 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 756 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name)); 757 xop->isdir = 2; 758 xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE; 759 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 760 761 error = hammer2_xop_collect(&xop->head, 0); 762 763 hammer2_inode_unlock(dip); 764 765 #if 0 766 if (error == 0) { 767 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 768 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 769 if (ip) { 770 hammer2_inode_unlink_finisher(ip, 0); 771 hammer2_inode_unlock(ip); 772 } 773 } else { 774 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 775 } 776 #endif 777 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 778 779 hammer2_trans_done(spmp); 780 781 return (hammer2_error_to_errno(error)); 782 } 783 784 static int 785 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data) 786 { 787 hammer2_ioc_pfs_t *pfs = data; 788 hammer2_dev_t *hmp; 789 hammer2_pfs_t *pmp; 790 hammer2_chain_t *chain; 791 hammer2_tid_t mtid; 792 int error; 793 794 if (pfs->name[0] == 0) 795 return(EINVAL); 796 if (pfs->name[sizeof(pfs->name)-1] != 0) 797 return(EINVAL); 798 799 pmp = ip->pmp; 800 ip = pmp->iroot; 801 802 hmp = pmp->pfs_hmps[0]; 803 if (hmp == NULL) 804 return (EINVAL); 805 806 lockmgr(&hmp->bulklk, LK_EXCLUSIVE); 807 808 hammer2_vfs_sync(pmp->mp, MNT_WAIT); 809 810 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 811 mtid = hammer2_trans_sub(pmp); 812 hammer2_inode_lock(ip, 0); 813 hammer2_inode_modify(ip); 814 ip->meta.pfs_lsnap_tid = mtid; 815 816 /* XXX cluster it! */ 817 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS); 818 error = hammer2_chain_snapshot(chain, pfs, mtid); 819 hammer2_chain_unlock(chain); 820 hammer2_chain_drop(chain); 821 822 hammer2_inode_unlock(ip); 823 hammer2_trans_done(pmp); 824 825 lockmgr(&hmp->bulklk, LK_RELEASE); 826 827 return (hammer2_error_to_errno(error)); 828 } 829 830 /* 831 * Retrieve the raw inode structure, non-inclusive of node-specific data. 832 */ 833 static int 834 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data) 835 { 836 hammer2_ioc_inode_t *ino; 837 hammer2_chain_t *chain; 838 int error; 839 int i; 840 841 ino = data; 842 error = 0; 843 844 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 845 ino->data_count = 0; 846 ino->inode_count = 0; 847 for (i = 0; i < ip->cluster.nchains; ++i) { 848 if ((chain = ip->cluster.array[i].chain) != NULL) { 849 if (ino->data_count < 850 chain->bref.embed.stats.data_count) { 851 ino->data_count = 852 chain->bref.embed.stats.data_count; 853 } 854 if (ino->inode_count < 855 chain->bref.embed.stats.inode_count) { 856 ino->inode_count = 857 chain->bref.embed.stats.inode_count; 858 } 859 } 860 } 861 bzero(&ino->ip_data, sizeof(ino->ip_data)); 862 ino->ip_data.meta = ip->meta; 863 ino->kdata = ip; 864 hammer2_inode_unlock(ip); 865 866 return hammer2_error_to_errno(error); 867 } 868 869 /* 870 * Set various parameters in an inode which cannot be set through 871 * normal filesystem VNOPS. 872 */ 873 static int 874 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data) 875 { 876 hammer2_ioc_inode_t *ino = data; 877 int error = 0; 878 879 hammer2_trans_init(ip->pmp, 0); 880 hammer2_inode_lock(ip, 0); 881 882 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) && 883 ip->meta.check_algo != ino->ip_data.meta.check_algo) { 884 hammer2_inode_modify(ip); 885 ip->meta.check_algo = ino->ip_data.meta.check_algo; 886 } 887 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) && 888 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) { 889 hammer2_inode_modify(ip); 890 ip->meta.comp_algo = ino->ip_data.meta.comp_algo; 891 } 892 ino->kdata = ip; 893 894 /* Ignore these flags for now...*/ 895 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) && 896 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) { 897 hammer2_inode_modify(ip); 898 ip->meta.inode_quota = ino->ip_data.meta.inode_quota; 899 } 900 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) && 901 ip->meta.data_quota != ino->ip_data.meta.data_quota) { 902 hammer2_inode_modify(ip); 903 ip->meta.data_quota = ino->ip_data.meta.data_quota; 904 } 905 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) && 906 ip->meta.ncopies != ino->ip_data.meta.ncopies) { 907 hammer2_inode_modify(ip); 908 ip->meta.ncopies = ino->ip_data.meta.ncopies; 909 } 910 hammer2_inode_unlock(ip); 911 hammer2_trans_done(ip->pmp); 912 913 return (hammer2_error_to_errno(error)); 914 } 915 916 static 917 int 918 hammer2_ioctl_debug_dump(hammer2_inode_t *ip) 919 { 920 hammer2_chain_t *chain; 921 int count = 1000; 922 int i; 923 924 for (i = 0; i < ip->cluster.nchains; ++i) { 925 chain = ip->cluster.array[i].chain; 926 if (chain == NULL) 927 continue; 928 hammer2_dump_chain(chain, 0, &count, 'i'); 929 } 930 return 0; 931 } 932 933 /* 934 * Executes one flush/free pass per call. If trying to recover 935 * data we just freed up a moment ago it can take up to six passes 936 * to fully free the blocks. Note that passes occur automatically based 937 * on free space as the storage fills up, but manual passes may be needed 938 * if storage becomes almost completely full. 939 */ 940 static 941 int 942 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data) 943 { 944 hammer2_ioc_bulkfree_t *bfi = data; 945 hammer2_dev_t *hmp; 946 hammer2_pfs_t *pmp; 947 hammer2_chain_t *vchain; 948 int error; 949 int didsnap; 950 951 pmp = ip->pmp; 952 ip = pmp->iroot; 953 954 hmp = pmp->pfs_hmps[0]; 955 if (hmp == NULL) 956 return (EINVAL); 957 if (bfi == NULL) 958 return (EINVAL); 959 960 /* 961 * Bulkfree has to be serialized to guarantee at least one sync 962 * inbetween bulkfrees. 963 */ 964 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH); 965 if (error) 966 return error; 967 968 /* 969 * sync the filesystem and obtain a snapshot of the synchronized 970 * hmp volume header. We treat the snapshot as an independent 971 * entity. 972 * 973 * If ENOSPC occurs we should continue, because bulkfree is the only 974 * way to fix that. The flush will have flushed everything it could 975 * and not left any modified chains. Otherwise an error is fatal. 976 */ 977 error = hammer2_vfs_sync(pmp->mp, MNT_WAIT); 978 if (error && error != ENOSPC) 979 goto failed; 980 981 /* 982 * If we have an ENOSPC error we have to bulkfree on the live 983 * topology. Otherwise we can bulkfree on a snapshot. 984 */ 985 if (error) { 986 kprintf("hammer2: WARNING! Bulkfree forced to use live " 987 "topology\n"); 988 vchain = &hmp->vchain; 989 hammer2_chain_ref(vchain); 990 didsnap = 0; 991 } else { 992 vchain = hammer2_chain_bulksnap(hmp); 993 didsnap = 1; 994 } 995 996 /* 997 * Bulkfree on a snapshot does not need a transaction, which allows 998 * it to run concurrently with any operation other than another 999 * bulkfree. 1000 * 1001 * If we are running bulkfree on the live topology we have to be 1002 * in a FLUSH transaction. 1003 */ 1004 if (didsnap == 0) 1005 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 1006 1007 if (bfi) { 1008 hammer2_thr_freeze(&hmp->bfthr); 1009 error = hammer2_bulkfree_pass(hmp, vchain, bfi); 1010 hammer2_thr_unfreeze(&hmp->bfthr); 1011 } 1012 if (didsnap) { 1013 hammer2_chain_bulkdrop(vchain); 1014 } else { 1015 hammer2_chain_drop(vchain); 1016 hammer2_trans_done(pmp); 1017 } 1018 error = hammer2_error_to_errno(error); 1019 1020 failed: 1021 lockmgr(&hmp->bflock, LK_RELEASE); 1022 return error; 1023 } 1024 1025 /* 1026 * Unconditionally delete meta-data in a hammer2 filesystem 1027 */ 1028 static 1029 int 1030 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data) 1031 { 1032 hammer2_ioc_destroy_t *iocd = data; 1033 hammer2_pfs_t *pmp = ip->pmp; 1034 int error; 1035 1036 if (pmp->ronly) { 1037 error = EROFS; 1038 return error; 1039 } 1040 1041 switch(iocd->cmd) { 1042 case HAMMER2_DELETE_FILE: 1043 /* 1044 * Destroy a bad directory entry by name. Caller must 1045 * pass the directory as fd. 1046 */ 1047 { 1048 hammer2_xop_unlink_t *xop; 1049 1050 if (iocd->path[sizeof(iocd->path)-1]) { 1051 error = EINVAL; 1052 break; 1053 } 1054 if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) { 1055 error = EINVAL; 1056 break; 1057 } 1058 hammer2_pfs_memory_wait(pmp); 1059 hammer2_trans_init(pmp, 0); 1060 hammer2_inode_lock(ip, 0); 1061 1062 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1063 hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path)); 1064 xop->isdir = -1; 1065 xop->dopermanent = H2DOPERM_PERMANENT | 1066 H2DOPERM_FORCE | 1067 H2DOPERM_IGNINO; 1068 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1069 1070 error = hammer2_xop_collect(&xop->head, 0); 1071 error = hammer2_error_to_errno(error); 1072 hammer2_inode_unlock(ip); 1073 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1074 hammer2_trans_done(pmp); 1075 } 1076 break; 1077 case HAMMER2_DELETE_INUM: 1078 /* 1079 * Destroy a bad inode by inode number. 1080 */ 1081 { 1082 hammer2_xop_lookup_t *xop; 1083 1084 if (iocd->inum < 1) { 1085 error = EINVAL; 1086 break; 1087 } 1088 hammer2_pfs_memory_wait(pmp); 1089 hammer2_trans_init(pmp, 0); 1090 1091 xop = hammer2_xop_alloc(pmp->iroot, 0); 1092 xop->lhc = iocd->inum; 1093 hammer2_xop_start(&xop->head, hammer2_xop_lookup); 1094 error = hammer2_xop_collect(&xop->head, 0); 1095 if (error == 0) { 1096 ip = hammer2_inode_get(pmp, NULL, 1097 &xop->head.cluster, -1); 1098 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1099 if (ip) { 1100 ip->meta.nlinks = 1; 1101 hammer2_inode_unlink_finisher(ip, 0); 1102 hammer2_inode_unlock(ip); 1103 } 1104 } else { 1105 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1106 } 1107 } 1108 break; 1109 default: 1110 error = EINVAL; 1111 break; 1112 } 1113 return error; 1114 } 1115