1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 /* 36 * Ioctl Functions. 37 * 38 * WARNING! The ioctl functions which manipulate the connection state need 39 * to be able to run without deadlock on the volume's chain lock. 40 * Most of these functions use a separate lock. 41 */ 42 43 #include "hammer2.h" 44 45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data); 46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data); 47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data); 48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data); 49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data); 50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data); 51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data); 52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data); 53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data); 54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data); 55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data); 56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data); 57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data); 58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data); 59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data); 60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip); 61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data); 62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data); 63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data); 64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data); 65 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data); 66 67 int 68 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag, 69 struct ucred *cred) 70 { 71 int error; 72 73 /* 74 * Standard root cred checks, will be selectively ignored below 75 * for ioctls that do not require root creds. 76 */ 77 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 78 79 switch(com) { 80 case HAMMER2IOC_VERSION_GET: 81 error = hammer2_ioctl_version_get(ip, data); 82 break; 83 case HAMMER2IOC_RECLUSTER: 84 if (error == 0) 85 error = hammer2_ioctl_recluster(ip, data); 86 break; 87 case HAMMER2IOC_REMOTE_SCAN: 88 if (error == 0) 89 error = hammer2_ioctl_remote_scan(ip, data); 90 break; 91 case HAMMER2IOC_REMOTE_ADD: 92 if (error == 0) 93 error = hammer2_ioctl_remote_add(ip, data); 94 break; 95 case HAMMER2IOC_REMOTE_DEL: 96 if (error == 0) 97 error = hammer2_ioctl_remote_del(ip, data); 98 break; 99 case HAMMER2IOC_REMOTE_REP: 100 if (error == 0) 101 error = hammer2_ioctl_remote_rep(ip, data); 102 break; 103 case HAMMER2IOC_SOCKET_GET: 104 if (error == 0) 105 error = hammer2_ioctl_socket_get(ip, data); 106 break; 107 case HAMMER2IOC_SOCKET_SET: 108 if (error == 0) 109 error = hammer2_ioctl_socket_set(ip, data); 110 break; 111 case HAMMER2IOC_PFS_GET: 112 if (error == 0) 113 error = hammer2_ioctl_pfs_get(ip, data); 114 break; 115 case HAMMER2IOC_PFS_LOOKUP: 116 if (error == 0) 117 error = hammer2_ioctl_pfs_lookup(ip, data); 118 break; 119 case HAMMER2IOC_PFS_CREATE: 120 if (error == 0) 121 error = hammer2_ioctl_pfs_create(ip, data); 122 break; 123 case HAMMER2IOC_PFS_DELETE: 124 if (error == 0) 125 error = hammer2_ioctl_pfs_delete(ip, data); 126 break; 127 case HAMMER2IOC_PFS_SNAPSHOT: 128 if (error == 0) 129 error = hammer2_ioctl_pfs_snapshot(ip, data); 130 break; 131 case HAMMER2IOC_INODE_GET: 132 error = hammer2_ioctl_inode_get(ip, data); 133 break; 134 case HAMMER2IOC_INODE_SET: 135 if (error == 0) 136 error = hammer2_ioctl_inode_set(ip, data); 137 break; 138 case HAMMER2IOC_BULKFREE_SCAN: 139 error = hammer2_ioctl_bulkfree_scan(ip, data); 140 break; 141 case HAMMER2IOC_BULKFREE_ASYNC: 142 error = hammer2_ioctl_bulkfree_scan(ip, NULL); 143 break; 144 /*case HAMMER2IOC_INODE_COMP_SET: 145 error = hammer2_ioctl_inode_comp_set(ip, data); 146 break; 147 case HAMMER2IOC_INODE_COMP_REC_SET: 148 error = hammer2_ioctl_inode_comp_rec_set(ip, data); 149 break; 150 case HAMMER2IOC_INODE_COMP_REC_SET2: 151 error = hammer2_ioctl_inode_comp_rec_set2(ip, data); 152 break;*/ 153 case HAMMER2IOC_DESTROY: 154 if (error == 0) 155 error = hammer2_ioctl_destroy(ip, data); 156 break; 157 case HAMMER2IOC_DEBUG_DUMP: 158 error = hammer2_ioctl_debug_dump(ip); 159 break; 160 default: 161 error = EOPNOTSUPP; 162 break; 163 } 164 return (error); 165 } 166 167 /* 168 * Retrieve version and basic info 169 */ 170 static int 171 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data) 172 { 173 hammer2_ioc_version_t *version = data; 174 hammer2_dev_t *hmp; 175 176 hmp = ip->pmp->pfs_hmps[0]; 177 if (hmp) 178 version->version = hmp->voldata.version; 179 else 180 version->version = -1; 181 return 0; 182 } 183 184 static int 185 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data) 186 { 187 hammer2_ioc_recluster_t *recl = data; 188 struct vnode *vproot; 189 struct file *fp; 190 hammer2_cluster_t *cluster; 191 int error; 192 193 fp = holdfp(curproc->p_fd, recl->fd, -1); 194 if (fp) { 195 error = VFS_ROOT(ip->pmp->mp, &vproot); 196 if (error == 0) { 197 cluster = &ip->pmp->iroot->cluster; 198 kprintf("reconnect to cluster: nc=%d focus=%p\n", 199 cluster->nchains, cluster->focus); 200 if (cluster->nchains != 1 || cluster->focus == NULL) { 201 kprintf("not a local device mount\n"); 202 error = EINVAL; 203 } else { 204 hammer2_cluster_reconnect(cluster->focus->hmp, 205 fp); 206 kprintf("ok\n"); 207 error = 0; 208 } 209 vput(vproot); 210 } 211 } else { 212 error = EINVAL; 213 } 214 return error; 215 } 216 217 /* 218 * Retrieve information about a remote 219 */ 220 static int 221 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data) 222 { 223 hammer2_dev_t *hmp; 224 hammer2_ioc_remote_t *remote = data; 225 int copyid = remote->copyid; 226 227 hmp = ip->pmp->pfs_hmps[0]; 228 if (hmp == NULL) 229 return (EINVAL); 230 231 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 232 return (EINVAL); 233 234 hammer2_voldata_lock(hmp); 235 remote->copy1 = hmp->voldata.copyinfo[copyid]; 236 hammer2_voldata_unlock(hmp); 237 238 /* 239 * Adjust nextid (GET only) 240 */ 241 while (++copyid < HAMMER2_COPYID_COUNT && 242 hmp->voldata.copyinfo[copyid].copyid == 0) { 243 ; 244 } 245 if (copyid == HAMMER2_COPYID_COUNT) 246 remote->nextid = -1; 247 else 248 remote->nextid = copyid; 249 250 return(0); 251 } 252 253 /* 254 * Add new remote entry 255 */ 256 static int 257 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data) 258 { 259 hammer2_ioc_remote_t *remote = data; 260 hammer2_pfs_t *pmp = ip->pmp; 261 hammer2_dev_t *hmp; 262 int copyid = remote->copyid; 263 int error = 0; 264 265 hmp = pmp->pfs_hmps[0]; 266 if (hmp == NULL) 267 return (EINVAL); 268 if (copyid >= HAMMER2_COPYID_COUNT) 269 return (EINVAL); 270 271 hammer2_voldata_lock(hmp); 272 if (copyid < 0) { 273 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 274 if (hmp->voldata.copyinfo[copyid].copyid == 0) 275 break; 276 } 277 if (copyid == HAMMER2_COPYID_COUNT) { 278 error = ENOSPC; 279 goto failed; 280 } 281 } 282 hammer2_voldata_modify(hmp); 283 remote->copy1.copyid = copyid; 284 hmp->voldata.copyinfo[copyid] = remote->copy1; 285 hammer2_volconf_update(hmp, copyid); 286 failed: 287 hammer2_voldata_unlock(hmp); 288 return (error); 289 } 290 291 /* 292 * Delete existing remote entry 293 */ 294 static int 295 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data) 296 { 297 hammer2_ioc_remote_t *remote = data; 298 hammer2_pfs_t *pmp = ip->pmp; 299 hammer2_dev_t *hmp; 300 int copyid = remote->copyid; 301 int error = 0; 302 303 hmp = pmp->pfs_hmps[0]; 304 if (hmp == NULL) 305 return (EINVAL); 306 if (copyid >= HAMMER2_COPYID_COUNT) 307 return (EINVAL); 308 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0; 309 hammer2_voldata_lock(hmp); 310 if (copyid < 0) { 311 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 312 if (hmp->voldata.copyinfo[copyid].copyid == 0) 313 continue; 314 if (strcmp(remote->copy1.path, 315 hmp->voldata.copyinfo[copyid].path) == 0) { 316 break; 317 } 318 } 319 if (copyid == HAMMER2_COPYID_COUNT) { 320 error = ENOENT; 321 goto failed; 322 } 323 } 324 hammer2_voldata_modify(hmp); 325 hmp->voldata.copyinfo[copyid].copyid = 0; 326 hammer2_volconf_update(hmp, copyid); 327 failed: 328 hammer2_voldata_unlock(hmp); 329 return (error); 330 } 331 332 /* 333 * Replace existing remote entry 334 */ 335 static int 336 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data) 337 { 338 hammer2_ioc_remote_t *remote = data; 339 hammer2_dev_t *hmp; 340 int copyid = remote->copyid; 341 342 hmp = ip->pmp->pfs_hmps[0]; 343 if (hmp == NULL) 344 return (EINVAL); 345 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 346 return (EINVAL); 347 348 hammer2_voldata_lock(hmp); 349 hammer2_voldata_modify(hmp); 350 /*hammer2_volconf_update(hmp, copyid);*/ 351 hammer2_voldata_unlock(hmp); 352 353 return(0); 354 } 355 356 /* 357 * Retrieve communications socket 358 */ 359 static int 360 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data) 361 { 362 return (EOPNOTSUPP); 363 } 364 365 /* 366 * Set communications socket for connection 367 */ 368 static int 369 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data) 370 { 371 hammer2_ioc_remote_t *remote = data; 372 hammer2_dev_t *hmp; 373 int copyid = remote->copyid; 374 375 hmp = ip->pmp->pfs_hmps[0]; 376 if (hmp == NULL) 377 return (EINVAL); 378 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 379 return (EINVAL); 380 381 hammer2_voldata_lock(hmp); 382 hammer2_voldata_unlock(hmp); 383 384 return(0); 385 } 386 387 /* 388 * Used to scan and retrieve PFS information. PFS's are directories under 389 * the super-root. 390 * 391 * To scan PFSs pass name_key=0. The function will scan for the next 392 * PFS and set all fields, as well as set name_next to the next key. 393 * When no PFSs remain, name_next is set to (hammer2_key_t)-1. 394 * 395 * To retrieve a particular PFS by key, specify the key but note that 396 * the ioctl will return the lowest key >= specified_key, so the caller 397 * must verify the key. 398 * 399 * To retrieve the PFS associated with the file descriptor, pass 400 * name_key set to (hammer2_key_t)-1. 401 */ 402 static int 403 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data) 404 { 405 const hammer2_inode_data_t *ripdata; 406 hammer2_dev_t *hmp; 407 hammer2_ioc_pfs_t *pfs; 408 hammer2_chain_t *parent; 409 hammer2_chain_t *chain; 410 hammer2_key_t key_next; 411 hammer2_key_t save_key; 412 int error; 413 414 hmp = ip->pmp->pfs_hmps[0]; 415 if (hmp == NULL) 416 return (EINVAL); 417 418 pfs = data; 419 save_key = pfs->name_key; 420 error = 0; 421 422 /* 423 * Setup 424 */ 425 if (save_key == (hammer2_key_t)-1) { 426 hammer2_inode_lock(ip->pmp->iroot, 0); 427 parent = NULL; 428 chain = hammer2_inode_chain(ip->pmp->iroot, 0, 429 HAMMER2_RESOLVE_ALWAYS | 430 HAMMER2_RESOLVE_SHARED); 431 } else { 432 hammer2_inode_lock(hmp->spmp->iroot, 0); 433 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 434 HAMMER2_RESOLVE_ALWAYS | 435 HAMMER2_RESOLVE_SHARED); 436 chain = hammer2_chain_lookup(&parent, &key_next, 437 pfs->name_key, HAMMER2_KEY_MAX, 438 &error, 439 HAMMER2_LOOKUP_SHARED); 440 } 441 442 /* 443 * Locate next PFS 444 */ 445 while (chain) { 446 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) 447 break; 448 if (parent == NULL) { 449 hammer2_chain_unlock(chain); 450 hammer2_chain_drop(chain); 451 chain = NULL; 452 break; 453 } 454 chain = hammer2_chain_next(&parent, chain, &key_next, 455 key_next, HAMMER2_KEY_MAX, 456 &error, 457 HAMMER2_LOOKUP_SHARED); 458 } 459 error = hammer2_error_to_errno(error); 460 461 /* 462 * Load the data being returned by the ioctl. 463 */ 464 if (chain && chain->error == 0) { 465 ripdata = &chain->data->ipdata; 466 pfs->name_key = ripdata->meta.name_key; 467 pfs->pfs_type = ripdata->meta.pfs_type; 468 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 469 pfs->pfs_clid = ripdata->meta.pfs_clid; 470 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 471 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name)); 472 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len); 473 pfs->name[ripdata->meta.name_len] = 0; 474 ripdata = NULL; /* safety */ 475 476 /* 477 * Calculate name_next, if any. We are only accessing 478 * chain->bref so we can ignore chain->error (if the key 479 * is used later it will error then). 480 */ 481 if (parent == NULL) { 482 pfs->name_next = (hammer2_key_t)-1; 483 } else { 484 chain = hammer2_chain_next(&parent, chain, &key_next, 485 key_next, HAMMER2_KEY_MAX, 486 &error, 487 HAMMER2_LOOKUP_SHARED); 488 if (chain) 489 pfs->name_next = chain->bref.key; 490 else 491 pfs->name_next = (hammer2_key_t)-1; 492 } 493 } else { 494 pfs->name_next = (hammer2_key_t)-1; 495 error = ENOENT; 496 } 497 498 /* 499 * Cleanup 500 */ 501 if (chain) { 502 hammer2_chain_unlock(chain); 503 hammer2_chain_drop(chain); 504 } 505 if (parent) { 506 hammer2_chain_unlock(parent); 507 hammer2_chain_drop(parent); 508 } 509 if (save_key == (hammer2_key_t)-1) { 510 hammer2_inode_unlock(ip->pmp->iroot); 511 } else { 512 hammer2_inode_unlock(hmp->spmp->iroot); 513 } 514 515 return (error); 516 } 517 518 /* 519 * Find a specific PFS by name 520 */ 521 static int 522 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data) 523 { 524 const hammer2_inode_data_t *ripdata; 525 hammer2_dev_t *hmp; 526 hammer2_ioc_pfs_t *pfs; 527 hammer2_chain_t *parent; 528 hammer2_chain_t *chain; 529 hammer2_key_t key_next; 530 hammer2_key_t lhc; 531 int error; 532 size_t len; 533 534 hmp = ip->pmp->pfs_hmps[0]; 535 if (hmp == NULL) 536 return (EINVAL); 537 538 pfs = data; 539 error = 0; 540 541 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED); 542 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 543 HAMMER2_RESOLVE_ALWAYS | 544 HAMMER2_RESOLVE_SHARED); 545 546 pfs->name[sizeof(pfs->name) - 1] = 0; 547 len = strlen(pfs->name); 548 lhc = hammer2_dirhash(pfs->name, len); 549 550 chain = hammer2_chain_lookup(&parent, &key_next, 551 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 552 &error, HAMMER2_LOOKUP_SHARED); 553 while (chain) { 554 if (hammer2_chain_dirent_test(chain, pfs->name, len)) 555 break; 556 chain = hammer2_chain_next(&parent, chain, &key_next, 557 key_next, 558 lhc + HAMMER2_DIRHASH_LOMASK, 559 &error, HAMMER2_LOOKUP_SHARED); 560 } 561 error = hammer2_error_to_errno(error); 562 563 /* 564 * Load the data being returned by the ioctl. 565 */ 566 if (chain && chain->error == 0) { 567 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE); 568 ripdata = &chain->data->ipdata; 569 pfs->name_key = ripdata->meta.name_key; 570 pfs->pfs_type = ripdata->meta.pfs_type; 571 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 572 pfs->pfs_clid = ripdata->meta.pfs_clid; 573 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 574 ripdata = NULL; 575 576 hammer2_chain_unlock(chain); 577 hammer2_chain_drop(chain); 578 } else if (error == 0) { 579 error = ENOENT; 580 } 581 if (parent) { 582 hammer2_chain_unlock(parent); 583 hammer2_chain_drop(parent); 584 } 585 hammer2_inode_unlock(hmp->spmp->iroot); 586 587 return (error); 588 } 589 590 /* 591 * Create a new PFS under the super-root 592 */ 593 static int 594 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data) 595 { 596 hammer2_inode_data_t *nipdata; 597 hammer2_chain_t *nchain; 598 hammer2_dev_t *hmp; 599 hammer2_dev_t *force_local; 600 hammer2_ioc_pfs_t *pfs; 601 hammer2_inode_t *nip; 602 hammer2_tid_t mtid; 603 int error; 604 605 hmp = ip->pmp->pfs_hmps[0]; /* XXX */ 606 if (hmp == NULL) 607 return (EINVAL); 608 609 pfs = data; 610 nip = NULL; 611 612 if (pfs->name[0] == 0) 613 return(EINVAL); 614 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */ 615 616 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0) 617 return(EEXIST); 618 619 hammer2_trans_init(hmp->spmp, 0); 620 mtid = hammer2_trans_sub(hmp->spmp); 621 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot, 622 NULL, NULL, 623 pfs->name, strlen(pfs->name), 0, 624 1, HAMMER2_OBJTYPE_DIRECTORY, 0, 625 HAMMER2_INSERT_PFSROOT, &error); 626 if (error == 0) { 627 hammer2_inode_modify(nip); 628 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); 629 hammer2_chain_modify(nchain, mtid, 0, 0); 630 nipdata = &nchain->data->ipdata; 631 632 nip->meta.pfs_type = pfs->pfs_type; 633 nip->meta.pfs_subtype = pfs->pfs_subtype; 634 nip->meta.pfs_clid = pfs->pfs_clid; 635 nip->meta.pfs_fsid = pfs->pfs_fsid; 636 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; 637 638 /* 639 * Set default compression and check algorithm. This 640 * can be changed later. 641 * 642 * Do not allow compression on PFS's with the special name 643 * "boot", the boot loader can't decompress (yet). 644 */ 645 nip->meta.comp_algo = 646 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT); 647 nip->meta.check_algo = 648 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64); 649 650 if (strcasecmp(pfs->name, "boot") == 0) { 651 nip->meta.comp_algo = 652 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO); 653 } 654 655 /* 656 * Super-root isn't mounted, fsync it 657 */ 658 hammer2_chain_unlock(nchain); 659 hammer2_inode_ref(nip); 660 hammer2_inode_unlock(nip); 661 hammer2_inode_chain_sync(nip); 662 hammer2_inode_drop(nip); 663 664 /* 665 * We still have a ref on the chain, relock and associate 666 * with an appropriate PFS. 667 */ 668 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 669 670 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS); 671 nipdata = &nchain->data->ipdata; 672 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename); 673 hammer2_pfsalloc(nchain, nipdata, 674 nchain->bref.modify_tid, force_local); 675 676 hammer2_chain_unlock(nchain); 677 hammer2_chain_drop(nchain); 678 679 } 680 hammer2_trans_done(hmp->spmp); 681 682 return (error); 683 } 684 685 /* 686 * Destroy an existing PFS under the super-root 687 */ 688 static int 689 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data) 690 { 691 hammer2_ioc_pfs_t *pfs = data; 692 hammer2_dev_t *hmp; 693 hammer2_pfs_t *spmp; 694 hammer2_pfs_t *pmp; 695 hammer2_xop_unlink_t *xop; 696 hammer2_inode_t *dip; 697 hammer2_inode_t *iroot; 698 int error; 699 int i; 700 701 /* 702 * The PFS should be probed, so we should be able to 703 * locate it. We only delete the PFS from the 704 * specific H2 block device (hmp), not all of 705 * them. We must remove the PFS from the cluster 706 * before we can destroy it. 707 */ 708 hmp = ip->pmp->pfs_hmps[0]; 709 if (hmp == NULL) 710 return (EINVAL); 711 712 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */ 713 714 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 715 716 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 717 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 718 if (pmp->pfs_hmps[i] != hmp) 719 continue; 720 if (pmp->pfs_names[i] && 721 strcmp(pmp->pfs_names[i], pfs->name) == 0) { 722 break; 723 } 724 } 725 if (i != HAMMER2_MAXCLUSTER) 726 break; 727 } 728 729 if (pmp == NULL) { 730 lockmgr(&hammer2_mntlk, LK_RELEASE); 731 return ENOENT; 732 } 733 734 /* 735 * Ok, we found the pmp and we have the index. Permanently remove 736 * the PFS from the cluster 737 */ 738 iroot = pmp->iroot; 739 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i); 740 hammer2_pfsdealloc(pmp, i, 1); 741 742 lockmgr(&hammer2_mntlk, LK_RELEASE); 743 744 /* 745 * Now destroy the PFS under its device using the per-device 746 * super-root. 747 */ 748 spmp = hmp->spmp; 749 dip = spmp->iroot; 750 hammer2_trans_init(spmp, 0); 751 hammer2_inode_lock(dip, 0); 752 753 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 754 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name)); 755 xop->isdir = 2; 756 xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE; 757 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 758 759 error = hammer2_xop_collect(&xop->head, 0); 760 761 hammer2_inode_unlock(dip); 762 763 #if 0 764 if (error == 0) { 765 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 766 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 767 if (ip) { 768 hammer2_inode_unlink_finisher(ip, 0); 769 hammer2_inode_unlock(ip); 770 } 771 } else { 772 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 773 } 774 #endif 775 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 776 777 hammer2_trans_done(spmp); 778 779 return (hammer2_error_to_errno(error)); 780 } 781 782 static int 783 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data) 784 { 785 hammer2_ioc_pfs_t *pfs = data; 786 hammer2_dev_t *hmp; 787 hammer2_pfs_t *pmp; 788 hammer2_chain_t *chain; 789 hammer2_tid_t mtid; 790 int error; 791 792 if (pfs->name[0] == 0) 793 return(EINVAL); 794 if (pfs->name[sizeof(pfs->name)-1] != 0) 795 return(EINVAL); 796 797 pmp = ip->pmp; 798 ip = pmp->iroot; 799 800 hmp = pmp->pfs_hmps[0]; 801 if (hmp == NULL) 802 return (EINVAL); 803 804 lockmgr(&hmp->bulklk, LK_EXCLUSIVE); 805 806 hammer2_vfs_sync(pmp->mp, MNT_WAIT); 807 808 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 809 mtid = hammer2_trans_sub(pmp); 810 hammer2_inode_lock(ip, 0); 811 hammer2_inode_modify(ip); 812 ip->meta.pfs_lsnap_tid = mtid; 813 814 /* XXX cluster it! */ 815 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS); 816 error = hammer2_chain_snapshot(chain, pfs, mtid); 817 hammer2_chain_unlock(chain); 818 hammer2_chain_drop(chain); 819 820 hammer2_inode_unlock(ip); 821 hammer2_trans_done(pmp); 822 823 lockmgr(&hmp->bulklk, LK_RELEASE); 824 825 return (hammer2_error_to_errno(error)); 826 } 827 828 /* 829 * Retrieve the raw inode structure, non-inclusive of node-specific data. 830 */ 831 static int 832 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data) 833 { 834 hammer2_ioc_inode_t *ino; 835 hammer2_chain_t *chain; 836 int error; 837 int i; 838 839 ino = data; 840 error = 0; 841 842 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 843 ino->data_count = 0; 844 ino->inode_count = 0; 845 for (i = 0; i < ip->cluster.nchains; ++i) { 846 if ((chain = ip->cluster.array[i].chain) != NULL) { 847 if (ino->data_count < 848 chain->bref.embed.stats.data_count) { 849 ino->data_count = 850 chain->bref.embed.stats.data_count; 851 } 852 if (ino->inode_count < 853 chain->bref.embed.stats.inode_count) { 854 ino->inode_count = 855 chain->bref.embed.stats.inode_count; 856 } 857 } 858 } 859 bzero(&ino->ip_data, sizeof(ino->ip_data)); 860 ino->ip_data.meta = ip->meta; 861 ino->kdata = ip; 862 hammer2_inode_unlock(ip); 863 864 return hammer2_error_to_errno(error); 865 } 866 867 /* 868 * Set various parameters in an inode which cannot be set through 869 * normal filesystem VNOPS. 870 */ 871 static int 872 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data) 873 { 874 hammer2_ioc_inode_t *ino = data; 875 int error = 0; 876 877 hammer2_trans_init(ip->pmp, 0); 878 hammer2_inode_lock(ip, 0); 879 880 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) && 881 ip->meta.check_algo != ino->ip_data.meta.check_algo) { 882 hammer2_inode_modify(ip); 883 ip->meta.check_algo = ino->ip_data.meta.check_algo; 884 } 885 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) && 886 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) { 887 hammer2_inode_modify(ip); 888 ip->meta.comp_algo = ino->ip_data.meta.comp_algo; 889 } 890 ino->kdata = ip; 891 892 /* Ignore these flags for now...*/ 893 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) && 894 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) { 895 hammer2_inode_modify(ip); 896 ip->meta.inode_quota = ino->ip_data.meta.inode_quota; 897 } 898 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) && 899 ip->meta.data_quota != ino->ip_data.meta.data_quota) { 900 hammer2_inode_modify(ip); 901 ip->meta.data_quota = ino->ip_data.meta.data_quota; 902 } 903 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) && 904 ip->meta.ncopies != ino->ip_data.meta.ncopies) { 905 hammer2_inode_modify(ip); 906 ip->meta.ncopies = ino->ip_data.meta.ncopies; 907 } 908 hammer2_inode_unlock(ip); 909 hammer2_trans_done(ip->pmp); 910 911 return (hammer2_error_to_errno(error)); 912 } 913 914 static 915 int 916 hammer2_ioctl_debug_dump(hammer2_inode_t *ip) 917 { 918 hammer2_chain_t *chain; 919 int count = 1000; 920 int i; 921 922 for (i = 0; i < ip->cluster.nchains; ++i) { 923 chain = ip->cluster.array[i].chain; 924 if (chain == NULL) 925 continue; 926 hammer2_dump_chain(chain, 0, &count, 'i'); 927 } 928 return 0; 929 } 930 931 /* 932 * Executes one flush/free pass per call. If trying to recover 933 * data we just freed up a moment ago it can take up to six passes 934 * to fully free the blocks. Note that passes occur automatically based 935 * on free space as the storage fills up, but manual passes may be needed 936 * if storage becomes almost completely full. 937 */ 938 static 939 int 940 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data) 941 { 942 hammer2_ioc_bulkfree_t *bfi = data; 943 hammer2_dev_t *hmp; 944 hammer2_pfs_t *pmp; 945 hammer2_chain_t *vchain; 946 int error; 947 int didsnap; 948 949 pmp = ip->pmp; 950 ip = pmp->iroot; 951 952 hmp = pmp->pfs_hmps[0]; 953 if (hmp == NULL) 954 return (EINVAL); 955 if (bfi == NULL) 956 return (EINVAL); 957 958 /* 959 * Bulkfree has to be serialized to guarantee at least one sync 960 * inbetween bulkfrees. 961 */ 962 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH); 963 if (error) 964 return error; 965 966 /* 967 * sync the filesystem and obtain a snapshot of the synchronized 968 * hmp volume header. We treat the snapshot as an independent 969 * entity. 970 * 971 * If ENOSPC occurs we should continue, because bulkfree is the only 972 * way to fix that. The flush will have flushed everything it could 973 * and not left any modified chains. Otherwise an error is fatal. 974 */ 975 error = hammer2_vfs_sync(pmp->mp, MNT_WAIT); 976 if (error && error != ENOSPC) 977 goto failed; 978 979 /* 980 * If we have an ENOSPC error we have to bulkfree on the live 981 * topology. Otherwise we can bulkfree on a snapshot. 982 */ 983 if (error) { 984 kprintf("hammer2: WARNING! Bulkfree forced to use live " 985 "topology\n"); 986 vchain = &hmp->vchain; 987 hammer2_chain_ref(vchain); 988 didsnap = 0; 989 } else { 990 vchain = hammer2_chain_bulksnap(hmp); 991 didsnap = 1; 992 } 993 994 /* 995 * Bulkfree on a snapshot does not need a transaction, which allows 996 * it to run concurrently with any operation other than another 997 * bulkfree. 998 * 999 * If we are running bulkfree on the live topology we have to be 1000 * in a FLUSH transaction. 1001 */ 1002 if (didsnap == 0) 1003 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 1004 1005 if (bfi) { 1006 hammer2_thr_freeze(&hmp->bfthr); 1007 error = hammer2_bulkfree_pass(hmp, vchain, bfi); 1008 hammer2_thr_unfreeze(&hmp->bfthr); 1009 } 1010 if (didsnap) { 1011 hammer2_chain_bulkdrop(vchain); 1012 } else { 1013 hammer2_chain_drop(vchain); 1014 hammer2_trans_done(pmp); 1015 } 1016 error = hammer2_error_to_errno(error); 1017 1018 failed: 1019 lockmgr(&hmp->bflock, LK_RELEASE); 1020 return error; 1021 } 1022 1023 /* 1024 * Unconditionally delete meta-data in a hammer2 filesystem 1025 */ 1026 static 1027 int 1028 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data) 1029 { 1030 hammer2_ioc_destroy_t *iocd = data; 1031 hammer2_pfs_t *pmp = ip->pmp; 1032 int error; 1033 1034 if (pmp->ronly) { 1035 error = EROFS; 1036 return error; 1037 } 1038 1039 switch(iocd->cmd) { 1040 case HAMMER2_DELETE_FILE: 1041 /* 1042 * Destroy a bad directory entry by name. Caller must 1043 * pass the directory as fd. 1044 */ 1045 { 1046 hammer2_xop_unlink_t *xop; 1047 1048 if (iocd->path[sizeof(iocd->path)-1]) { 1049 error = EINVAL; 1050 break; 1051 } 1052 if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) { 1053 error = EINVAL; 1054 break; 1055 } 1056 hammer2_pfs_memory_wait(pmp); 1057 hammer2_trans_init(pmp, 0); 1058 hammer2_inode_lock(ip, 0); 1059 1060 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1061 hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path)); 1062 xop->isdir = -1; 1063 xop->dopermanent = H2DOPERM_PERMANENT | 1064 H2DOPERM_FORCE | 1065 H2DOPERM_IGNINO; 1066 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1067 1068 error = hammer2_xop_collect(&xop->head, 0); 1069 error = hammer2_error_to_errno(error); 1070 hammer2_inode_unlock(ip); 1071 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1072 hammer2_trans_done(pmp); 1073 } 1074 break; 1075 case HAMMER2_DELETE_INUM: 1076 /* 1077 * Destroy a bad inode by inode number. 1078 */ 1079 { 1080 hammer2_xop_lookup_t *xop; 1081 1082 if (iocd->inum < 1) { 1083 error = EINVAL; 1084 break; 1085 } 1086 hammer2_pfs_memory_wait(pmp); 1087 hammer2_trans_init(pmp, 0); 1088 1089 xop = hammer2_xop_alloc(pmp->iroot, 0); 1090 xop->lhc = iocd->inum; 1091 hammer2_xop_start(&xop->head, hammer2_xop_lookup); 1092 error = hammer2_xop_collect(&xop->head, 0); 1093 if (error == 0) { 1094 ip = hammer2_inode_get(pmp, NULL, 1095 &xop->head.cluster, -1); 1096 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1097 if (ip) { 1098 ip->meta.nlinks = 1; 1099 hammer2_inode_unlink_finisher(ip, 0); 1100 hammer2_inode_unlock(ip); 1101 } 1102 } else { 1103 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1104 } 1105 } 1106 break; 1107 default: 1108 error = EINVAL; 1109 break; 1110 } 1111 return error; 1112 } 1113