1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> and 6 * Michael Neumann <mneumann@ntecs.de> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include "hammer.h" 38 #include <sys/fcntl.h> 39 #include <sys/nlookup.h> 40 #include <sys/buf.h> 41 42 static int 43 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly); 44 45 static void 46 hammer_close_device(struct vnode **devvpp, int ronly); 47 48 static int 49 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp, 50 const char *vol_name, int vol_no, int vol_count, 51 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size); 52 53 static int 54 hammer_clear_volume_header(struct vnode *devvp); 55 56 struct bigblock_stat { 57 uint64_t total_bigblocks; 58 uint64_t total_free_bigblocks; 59 uint64_t counter; 60 }; 61 62 static int 63 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume, 64 struct bigblock_stat *stat); 65 66 static int 67 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume, 68 struct bigblock_stat *stat); 69 70 int 71 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, 72 struct hammer_ioc_volume *ioc) 73 { 74 struct hammer_mount *hmp = trans->hmp; 75 struct mount *mp = hmp->mp; 76 hammer_volume_t volume; 77 int error; 78 79 if (mp->mnt_flag & MNT_RDONLY) { 80 kprintf("Cannot add volume to read-only HAMMER filesystem\n"); 81 return (EINVAL); 82 } 83 84 if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) { 85 kprintf("Max number of HAMMER volumes exceeded\n"); 86 return (EINVAL); 87 } 88 89 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { 90 kprintf("Another volume operation is in progress!\n"); 91 return (EAGAIN); 92 } 93 94 /* 95 * Find an unused volume number. 96 */ 97 int free_vol_no = 0; 98 while (free_vol_no < HAMMER_MAX_VOLUMES && 99 RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) { 100 ++free_vol_no; 101 } 102 if (free_vol_no >= HAMMER_MAX_VOLUMES) { 103 kprintf("Max number of HAMMER volumes exceeded\n"); 104 hammer_unlock(&hmp->volume_lock); 105 return (EINVAL); 106 } 107 108 struct vnode *devvp = NULL; 109 error = hammer_setup_device(&devvp, ioc->device_name, 0); 110 if (error) 111 goto end; 112 KKASSERT(devvp); 113 error = hammer_format_volume_header( 114 hmp, 115 devvp, 116 hmp->rootvol->ondisk->vol_name, 117 free_vol_no, 118 hmp->nvolumes+1, 119 ioc->vol_size, 120 ioc->boot_area_size, 121 ioc->mem_area_size); 122 hammer_close_device(&devvp, 0); 123 if (error) 124 goto end; 125 126 error = hammer_install_volume(hmp, ioc->device_name, NULL); 127 if (error) 128 goto end; 129 130 hammer_sync_lock_sh(trans); 131 hammer_lock_ex(&hmp->blkmap_lock); 132 133 ++hmp->nvolumes; 134 135 /* 136 * Set each volumes new value of the vol_count field. 137 */ 138 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { 139 volume = hammer_get_volume(hmp, vol_no, &error); 140 if (volume == NULL && error == ENOENT) { 141 /* 142 * Skip unused volume numbers 143 */ 144 error = 0; 145 continue; 146 } 147 KKASSERT(volume != NULL && error == 0); 148 hammer_modify_volume_field(trans, volume, vol_count); 149 volume->ondisk->vol_count = hmp->nvolumes; 150 hammer_modify_volume_done(volume); 151 152 /* 153 * Only changes to the header of the root volume 154 * are automatically flushed to disk. For all 155 * other volumes that we modify we do it here. 156 */ 157 if (volume != trans->rootvol && volume->io.modified) { 158 hammer_crc_set_volume(volume->ondisk); 159 hammer_io_flush(&volume->io, 0); 160 } 161 162 hammer_rel_volume(volume, 0); 163 } 164 165 volume = hammer_get_volume(hmp, free_vol_no, &error); 166 KKASSERT(volume != NULL && error == 0); 167 168 struct bigblock_stat stat; 169 error = hammer_format_freemap(trans, volume, &stat); 170 KKASSERT(error == 0); 171 172 /* 173 * Increase the total number of bigblocks 174 */ 175 hammer_modify_volume_field(trans, trans->rootvol, 176 vol0_stat_bigblocks); 177 trans->rootvol->ondisk->vol0_stat_bigblocks += stat.total_bigblocks; 178 hammer_modify_volume_done(trans->rootvol); 179 180 /* 181 * Increase the number of free bigblocks 182 * (including the copy in hmp) 183 */ 184 hammer_modify_volume_field(trans, trans->rootvol, 185 vol0_stat_freebigblocks); 186 trans->rootvol->ondisk->vol0_stat_freebigblocks += stat.total_free_bigblocks; 187 hmp->copy_stat_freebigblocks = 188 trans->rootvol->ondisk->vol0_stat_freebigblocks; 189 hammer_modify_volume_done(trans->rootvol); 190 191 hammer_rel_volume(volume, 0); 192 193 hammer_unlock(&hmp->blkmap_lock); 194 hammer_sync_unlock(trans); 195 196 KKASSERT(error == 0); 197 end: 198 hammer_unlock(&hmp->volume_lock); 199 if (error) 200 kprintf("An error occurred: %d\n", error); 201 return (error); 202 } 203 204 205 /* 206 * Remove a volume. 207 */ 208 int 209 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, 210 struct hammer_ioc_volume *ioc) 211 { 212 struct hammer_mount *hmp = trans->hmp; 213 struct mount *mp = hmp->mp; 214 hammer_volume_t volume; 215 int error = 0; 216 217 if (mp->mnt_flag & MNT_RDONLY) { 218 kprintf("Cannot del volume from read-only HAMMER filesystem\n"); 219 return (EINVAL); 220 } 221 222 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { 223 kprintf("Another volume operation is in progress!\n"); 224 return (EAGAIN); 225 } 226 227 volume = NULL; 228 229 /* 230 * find volume by volname 231 */ 232 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { 233 volume = hammer_get_volume(hmp, vol_no, &error); 234 if (volume == NULL && error == ENOENT) { 235 /* 236 * Skip unused volume numbers 237 */ 238 error = 0; 239 continue; 240 } 241 KKASSERT(volume != NULL && error == 0); 242 if (strcmp(volume->vol_name, ioc->device_name) == 0) { 243 break; 244 } 245 hammer_rel_volume(volume, 0); 246 volume = NULL; 247 } 248 249 if (volume == NULL) { 250 kprintf("Couldn't find volume\n"); 251 error = EINVAL; 252 goto end; 253 } 254 255 if (volume == trans->rootvol) { 256 kprintf("Cannot remove root-volume\n"); 257 hammer_rel_volume(volume, 0); 258 error = EINVAL; 259 goto end; 260 } 261 262 /* 263 * 264 */ 265 266 hmp->volume_to_remove = volume->vol_no; 267 268 struct hammer_ioc_reblock reblock; 269 bzero(&reblock, sizeof(reblock)); 270 271 reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION; 272 reblock.key_beg.obj_id = HAMMER_MIN_OBJID; 273 reblock.key_end.localization = HAMMER_MAX_LOCALIZATION; 274 reblock.key_end.obj_id = HAMMER_MAX_OBJID; 275 reblock.head.flags = HAMMER_IOC_DO_FLAGS; 276 reblock.free_level = 0; 277 278 error = hammer_ioc_reblock(trans, ip, &reblock); 279 280 if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) { 281 error = EINTR; 282 } 283 284 if (error) { 285 if (error == EINTR) { 286 kprintf("reblock was interrupted\n"); 287 } else { 288 kprintf("reblock failed: %d\n", error); 289 } 290 hmp->volume_to_remove = -1; 291 hammer_rel_volume(volume, 0); 292 goto end; 293 } 294 295 /* 296 * Sync filesystem 297 */ 298 int count = 0; 299 while (hammer_flusher_haswork(hmp)) { 300 hammer_flusher_sync(hmp); 301 ++count; 302 if (count >= 5) { 303 if (count == 5) 304 kprintf("HAMMER: flushing."); 305 else 306 kprintf("."); 307 tsleep(&count, 0, "hmrufl", hz); 308 } 309 if (count == 30) { 310 kprintf("giving up"); 311 break; 312 } 313 } 314 kprintf("\n"); 315 316 hammer_sync_lock_sh(trans); 317 hammer_lock_ex(&hmp->blkmap_lock); 318 319 /* 320 * We use stat later to update rootvol's bigblock stats 321 */ 322 struct bigblock_stat stat; 323 error = hammer_free_freemap(trans, volume, &stat); 324 if (error) { 325 kprintf("Failed to free volume. Volume not empty!\n"); 326 hmp->volume_to_remove = -1; 327 hammer_rel_volume(volume, 0); 328 hammer_unlock(&hmp->blkmap_lock); 329 hammer_sync_unlock(trans); 330 goto end; 331 } 332 333 hmp->volume_to_remove = -1; 334 335 hammer_rel_volume(volume, 0); 336 337 /* 338 * Unload buffers 339 */ 340 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 341 hammer_unload_buffer, volume); 342 343 error = hammer_unload_volume(volume, NULL); 344 if (error == -1) { 345 kprintf("Failed to unload volume\n"); 346 hammer_unlock(&hmp->blkmap_lock); 347 hammer_sync_unlock(trans); 348 goto end; 349 } 350 351 volume = NULL; 352 --hmp->nvolumes; 353 354 /* 355 * Set each volume's new value of the vol_count field. 356 */ 357 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { 358 volume = hammer_get_volume(hmp, vol_no, &error); 359 if (volume == NULL && error == ENOENT) { 360 /* 361 * Skip unused volume numbers 362 */ 363 error = 0; 364 continue; 365 } 366 367 KKASSERT(volume != NULL && error == 0); 368 hammer_modify_volume_field(trans, volume, vol_count); 369 volume->ondisk->vol_count = hmp->nvolumes; 370 hammer_modify_volume_done(volume); 371 372 /* 373 * Only changes to the header of the root volume 374 * are automatically flushed to disk. For all 375 * other volumes that we modify we do it here. 376 */ 377 if (volume != trans->rootvol && volume->io.modified) { 378 hammer_crc_set_volume(volume->ondisk); 379 hammer_io_flush(&volume->io, 0); 380 } 381 382 hammer_rel_volume(volume, 0); 383 } 384 385 /* 386 * Update the total number of bigblocks 387 */ 388 hammer_modify_volume_field(trans, trans->rootvol, 389 vol0_stat_bigblocks); 390 trans->rootvol->ondisk->vol0_stat_bigblocks -= stat.total_bigblocks; 391 hammer_modify_volume_done(trans->rootvol); 392 393 /* 394 * Update the number of free bigblocks 395 * (including the copy in hmp) 396 */ 397 hammer_modify_volume_field(trans, trans->rootvol, 398 vol0_stat_freebigblocks); 399 trans->rootvol->ondisk->vol0_stat_freebigblocks -= stat.total_free_bigblocks; 400 hmp->copy_stat_freebigblocks = 401 trans->rootvol->ondisk->vol0_stat_freebigblocks; 402 hammer_modify_volume_done(trans->rootvol); 403 404 405 hammer_unlock(&hmp->blkmap_lock); 406 hammer_sync_unlock(trans); 407 408 /* 409 * Erase the volume header of the removed device. 410 * 411 * This is to not accidentally mount the volume again. 412 */ 413 struct vnode *devvp = NULL; 414 error = hammer_setup_device(&devvp, ioc->device_name, 0); 415 if (error) { 416 kprintf("Failed to open device: %s\n", ioc->device_name); 417 goto end; 418 } 419 KKASSERT(devvp); 420 error = hammer_clear_volume_header(devvp); 421 if (error) { 422 kprintf("Failed to clear volume header of device: %s\n", 423 ioc->device_name); 424 goto end; 425 } 426 hammer_close_device(&devvp, 0); 427 428 KKASSERT(error == 0); 429 end: 430 hammer_unlock(&hmp->volume_lock); 431 return (error); 432 } 433 434 435 /* 436 * Iterate over all usable L1 entries of the volume and 437 * the corresponding L2 entries. 438 */ 439 static int 440 hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume, 441 int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*, 442 struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*, 443 hammer_off_t, hammer_off_t, void*), 444 void *data) 445 { 446 struct hammer_mount *hmp = trans->hmp; 447 hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 448 hammer_buffer_t buffer = NULL; 449 int error = 0; 450 451 hammer_off_t phys_off; 452 hammer_off_t block_off; 453 hammer_off_t layer1_off; 454 hammer_off_t layer2_off; 455 hammer_off_t aligned_buf_end_off; 456 struct hammer_blockmap_layer1 *layer1; 457 struct hammer_blockmap_layer2 *layer2; 458 459 /* 460 * Calculate the usable size of the volume, which 461 * must be aligned at a bigblock (8 MB) boundary. 462 */ 463 aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 464 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg) 465 & ~HAMMER_LARGEBLOCK_MASK64)); 466 467 /* 468 * Iterate the volume's address space in chunks of 4 TB, where each 469 * chunk consists of at least one physically available 8 MB bigblock. 470 * 471 * For each chunk we need one L1 entry and one L2 bigblock. 472 * We use the first bigblock of each chunk as L2 block. 473 */ 474 for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0); 475 phys_off < aligned_buf_end_off; 476 phys_off += HAMMER_BLOCKMAP_LAYER2) { 477 for (block_off = 0; 478 block_off < HAMMER_BLOCKMAP_LAYER2; 479 block_off += HAMMER_LARGEBLOCK_SIZE) { 480 layer2_off = phys_off + 481 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off); 482 layer2 = hammer_bread(hmp, layer2_off, &error, &buffer); 483 if (error) 484 goto end; 485 486 error = callback(trans, volume, &buffer, NULL, 487 layer2, phys_off, block_off, data); 488 if (error) 489 goto end; 490 } 491 492 layer1_off = freemap->phys_offset + 493 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off); 494 layer1 = hammer_bread(hmp, layer1_off, &error, &buffer); 495 if (error) 496 goto end; 497 498 error = callback(trans, volume, &buffer, layer1, NULL, 499 phys_off, 0, data); 500 if (error) 501 goto end; 502 } 503 504 end: 505 if (buffer) { 506 hammer_rel_buffer(buffer, 0); 507 buffer = NULL; 508 } 509 510 return error; 511 } 512 513 514 static int 515 format_callback(hammer_transaction_t trans, hammer_volume_t volume, 516 hammer_buffer_t *bufferp, 517 struct hammer_blockmap_layer1 *layer1, 518 struct hammer_blockmap_layer2 *layer2, 519 hammer_off_t phys_off, 520 hammer_off_t block_off, 521 void *data) 522 { 523 struct bigblock_stat *stat = (struct bigblock_stat*)data; 524 525 /* 526 * Calculate the usable size of the volume, which must be aligned 527 * at a bigblock (8 MB) boundary. 528 */ 529 hammer_off_t aligned_buf_end_off; 530 aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 531 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg) 532 & ~HAMMER_LARGEBLOCK_MASK64)); 533 534 if (layer1) { 535 KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL); 536 537 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1)); 538 bzero(layer1, sizeof(layer1)); 539 layer1->phys_offset = phys_off; 540 layer1->blocks_free = stat->counter; 541 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); 542 hammer_modify_buffer_done(*bufferp); 543 544 stat->total_free_bigblocks += stat->counter; 545 stat->counter = 0; /* reset */ 546 } else if (layer2) { 547 hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2)); 548 bzero(layer2, sizeof(*layer2)); 549 550 if (block_off == 0) { 551 /* 552 * The first entry represents the L2 bigblock itself. 553 */ 554 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX; 555 layer2->append_off = HAMMER_LARGEBLOCK_SIZE; 556 layer2->bytes_free = 0; 557 ++stat->total_bigblocks; 558 } else if (phys_off + block_off < aligned_buf_end_off) { 559 /* 560 * Available bigblock 561 */ 562 layer2->zone = 0; 563 layer2->append_off = 0; 564 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE; 565 ++stat->total_bigblocks; 566 ++stat->counter; 567 } else { 568 /* 569 * Bigblock outside of physically available 570 * space 571 */ 572 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX; 573 layer2->append_off = HAMMER_LARGEBLOCK_SIZE; 574 layer2->bytes_free = 0; 575 } 576 577 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 578 hammer_modify_buffer_done(*bufferp); 579 } else { 580 KKASSERT(0); 581 } 582 583 return 0; 584 } 585 586 static int 587 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume, 588 struct bigblock_stat *stat) 589 { 590 stat->total_bigblocks = 0; 591 stat->total_free_bigblocks = 0; 592 stat->counter = 0; 593 return hammer_iterate_l1l2_entries(trans, volume, format_callback, stat); 594 } 595 596 static int 597 free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused, 598 hammer_buffer_t *bufferp, 599 struct hammer_blockmap_layer1 *layer1, 600 struct hammer_blockmap_layer2 *layer2, 601 hammer_off_t phys_off, 602 hammer_off_t block_off __unused, 603 void *data) 604 { 605 struct bigblock_stat *stat = (struct bigblock_stat*)data; 606 607 /* 608 * No modifications to ondisk structures 609 */ 610 int testonly = (stat == NULL); 611 612 if (layer1) { 613 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 614 /* 615 * This layer1 entry is already free. 616 */ 617 return 0; 618 } 619 620 KKASSERT((int)HAMMER_VOL_DECODE(layer1->phys_offset) == 621 trans->hmp->volume_to_remove); 622 623 if (testonly) 624 return 0; 625 626 /* 627 * Free the L1 entry 628 */ 629 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1)); 630 bzero(layer1, sizeof(layer1)); 631 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL; 632 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); 633 hammer_modify_buffer_done(*bufferp); 634 635 return 0; 636 } else if (layer2) { 637 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) { 638 return 0; 639 } 640 641 if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) { 642 if (stat) { 643 ++stat->total_bigblocks; 644 } 645 return 0; 646 } 647 648 if (layer2->append_off == 0 && 649 layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) { 650 if (stat) { 651 ++stat->total_bigblocks; 652 ++stat->total_free_bigblocks; 653 } 654 return 0; 655 } 656 657 /* 658 * We found a layer2 entry that is not empty! 659 */ 660 return EBUSY; 661 } else { 662 KKASSERT(0); 663 } 664 665 return EINVAL; 666 } 667 668 static int 669 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume, 670 struct bigblock_stat *stat) 671 { 672 int error; 673 674 stat->total_bigblocks = 0; 675 stat->total_free_bigblocks = 0; 676 stat->counter = 0; 677 678 error = hammer_iterate_l1l2_entries(trans, volume, free_callback, NULL); 679 if (error) 680 return error; 681 682 error = hammer_iterate_l1l2_entries(trans, volume, free_callback, stat); 683 return error; 684 } 685 686 /************************************************************************ 687 * MISC * 688 ************************************************************************ 689 */ 690 691 static int 692 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly) 693 { 694 int error; 695 struct nlookupdata nd; 696 697 /* 698 * Get the device vnode 699 */ 700 if (*devvpp == NULL) { 701 error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW); 702 if (error == 0) 703 error = nlookup(&nd); 704 if (error == 0) 705 error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp); 706 nlookup_done(&nd); 707 } else { 708 error = 0; 709 } 710 711 if (error == 0) { 712 if (vn_isdisk(*devvpp, &error)) { 713 error = vfs_mountedon(*devvpp); 714 } 715 } 716 if (error == 0 && vcount(*devvpp) > 0) 717 error = EBUSY; 718 if (error == 0) { 719 vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY); 720 error = vinvalbuf(*devvpp, V_SAVE, 0, 0); 721 if (error == 0) { 722 error = VOP_OPEN(*devvpp, 723 (ronly ? FREAD : FREAD|FWRITE), 724 FSCRED, NULL); 725 } 726 vn_unlock(*devvpp); 727 } 728 if (error && *devvpp) { 729 vrele(*devvpp); 730 *devvpp = NULL; 731 } 732 return (error); 733 } 734 735 static void 736 hammer_close_device(struct vnode **devvpp, int ronly) 737 { 738 VOP_CLOSE(*devvpp, (ronly ? FREAD : FREAD|FWRITE)); 739 if (*devvpp) { 740 vinvalbuf(*devvpp, ronly ? 0 : V_SAVE, 0, 0); 741 vrele(*devvpp); 742 *devvpp = NULL; 743 } 744 } 745 746 static int 747 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp, 748 const char *vol_name, int vol_no, int vol_count, 749 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size) 750 { 751 struct buf *bp = NULL; 752 struct hammer_volume_ondisk *ondisk; 753 int error; 754 755 /* 756 * Extract the volume number from the volume header and do various 757 * sanity checks. 758 */ 759 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk)); 760 error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp); 761 if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk)) 762 goto late_failure; 763 764 ondisk = (struct hammer_volume_ondisk*) bp->b_data; 765 766 /* 767 * Note that we do NOT allow to use a device that contains 768 * a valid HAMMER signature. It has to be cleaned up with dd 769 * before. 770 */ 771 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) { 772 kprintf("hammer_volume_add: Formatting of valid HAMMER volume " 773 "%s denied. Erase with dd!\n", vol_name); 774 error = EFTYPE; 775 goto late_failure; 776 } 777 778 bzero(ondisk, sizeof(struct hammer_volume_ondisk)); 779 ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name); 780 ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype; 781 ondisk->vol_signature = HAMMER_FSBUF_VOLUME; 782 ondisk->vol_fsid = hmp->fsid; 783 ondisk->vol_rootvol = hmp->rootvol->vol_no; 784 ondisk->vol_no = vol_no; 785 ondisk->vol_count = vol_count; 786 ondisk->vol_version = hmp->version; 787 788 /* 789 * Reserve space for (future) header junk, setup our poor-man's 790 * bigblock allocator. 791 */ 792 int64_t vol_alloc = HAMMER_BUFSIZE * 16; 793 794 ondisk->vol_bot_beg = vol_alloc; 795 vol_alloc += boot_area_size; 796 ondisk->vol_mem_beg = vol_alloc; 797 vol_alloc += mem_area_size; 798 799 /* 800 * The remaining area is the zone 2 buffer allocation area. These 801 * buffers 802 */ 803 ondisk->vol_buf_beg = vol_alloc; 804 ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK; 805 806 if (ondisk->vol_buf_end < ondisk->vol_buf_beg) { 807 kprintf("volume %d %s is too small to hold the volume header", 808 ondisk->vol_no, ondisk->vol_name); 809 error = EFTYPE; 810 goto late_failure; 811 } 812 813 ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 814 HAMMER_BUFSIZE; 815 ondisk->vol_blocksize = HAMMER_BUFSIZE; 816 817 /* 818 * Write volume header to disk 819 */ 820 error = bwrite(bp); 821 bp = NULL; 822 823 late_failure: 824 if (bp) 825 brelse(bp); 826 return (error); 827 } 828 829 /* 830 * Invalidates the volume header. Used by volume-del. 831 */ 832 static int 833 hammer_clear_volume_header(struct vnode *devvp) 834 { 835 struct buf *bp = NULL; 836 struct hammer_volume_ondisk *ondisk; 837 int error; 838 839 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk)); 840 error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp); 841 if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk)) 842 goto late_failure; 843 844 ondisk = (struct hammer_volume_ondisk*) bp->b_data; 845 bzero(ondisk, sizeof(struct hammer_volume_ondisk)); 846 847 error = bwrite(bp); 848 bp = NULL; 849 850 late_failure: 851 if (bp) 852 brelse(bp); 853 return (error); 854 } 855