1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * 23 * Copyright (c) 2018, Intel Corporation. 24 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC. 25 */ 26 27 #include <sys/vdev_impl.h> 28 #include <sys/dsl_scan.h> 29 #include <sys/spa_impl.h> 30 #include <sys/metaslab_impl.h> 31 #include <sys/vdev_rebuild.h> 32 #include <sys/zio.h> 33 #include <sys/dmu_tx.h> 34 #include <sys/arc.h> 35 #include <sys/zap.h> 36 37 /* 38 * This file contains the sequential reconstruction implementation for 39 * resilvering. This form of resilvering is internally referred to as device 40 * rebuild to avoid conflating it with the traditional healing reconstruction 41 * performed by the dsl scan code. 42 * 43 * When replacing a device, or scrubbing the pool, ZFS has historically used 44 * a process called resilvering which is a form of healing reconstruction. 45 * This approach has the advantage that as blocks are read from disk their 46 * checksums can be immediately verified and the data repaired. Unfortunately, 47 * it also results in a random IO pattern to the disk even when extra care 48 * is taken to sequentialize the IO as much as possible. This substantially 49 * increases the time required to resilver the pool and restore redundancy. 50 * 51 * For mirrored devices it's possible to implement an alternate sequential 52 * reconstruction strategy when resilvering. Sequential reconstruction 53 * behaves like a traditional RAID rebuild and reconstructs a device in LBA 54 * order without verifying the checksum. After this phase completes a second 55 * scrub phase is started to verify all of the checksums. This two phase 56 * process will take longer than the healing reconstruction described above. 57 * However, it has that advantage that after the reconstruction first phase 58 * completes redundancy has been restored. At this point the pool can incur 59 * another device failure without risking data loss. 60 * 61 * There are a few noteworthy limitations and other advantages of resilvering 62 * using sequential reconstruction vs healing reconstruction. 63 * 64 * Limitations: 65 * 66 * - Only supported for mirror vdev types. Due to the variable stripe 67 * width used by raidz sequential reconstruction is not possible. 68 * 69 * - Block checksums are not verified during sequential reconstuction. 70 * Similar to traditional RAID the parity/mirror data is reconstructed 71 * but cannot be immediately double checked. For this reason when the 72 * last active resilver completes the pool is automatically scrubbed. 73 * 74 * - Deferred resilvers using sequential reconstruction are not currently 75 * supported. When adding another vdev to an active top-level resilver 76 * it must be restarted. 77 * 78 * Advantages: 79 * 80 * - Sequential reconstuction is performed in LBA order which may be faster 81 * than healing reconstuction particularly when using using HDDs (or 82 * especially with SMR devices). Only allocated capacity is resilvered. 83 * 84 * - Sequential reconstruction is not constrained by ZFS block boundaries. 85 * This allows it to issue larger IOs to disk which span multiple blocks 86 * allowing all of these logical blocks to be repaired with a single IO. 87 * 88 * - Unlike a healing resilver or scrub which are pool wide operations, 89 * sequential reconstruction is handled by the top-level mirror vdevs. 90 * This allows for it to be started or canceled on a top-level vdev 91 * without impacting any other top-level vdevs in the pool. 92 * 93 * - Data only referenced by a pool checkpoint will be repaired because 94 * that space is reflected in the space maps. This differs for a 95 * healing resilver or scrub which will not repair that data. 96 */ 97 98 99 /* 100 * Maximum number of queued rebuild I/Os top-level vdev. The number of 101 * concurrent rebuild I/Os issued to the device is controlled by the 102 * zfs_vdev_rebuild_min_active and zfs_vdev_rebuild_max_active module 103 * options. 104 */ 105 unsigned int zfs_rebuild_queue_limit = 20; 106 107 /* 108 * Size of rebuild reads; defaults to 1MiB and is capped at SPA_MAXBLOCKSIZE. 109 */ 110 unsigned long zfs_rebuild_max_segment = 1024 * 1024; 111 112 /* 113 * For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync(). 114 */ 115 static void vdev_rebuild_thread(void *arg); 116 117 /* 118 * Clear the per-vdev rebuild bytes value for a vdev tree. 119 */ 120 static void 121 clear_rebuild_bytes(vdev_t *vd) 122 { 123 vdev_stat_t *vs = &vd->vdev_stat; 124 125 for (uint64_t i = 0; i < vd->vdev_children; i++) 126 clear_rebuild_bytes(vd->vdev_child[i]); 127 128 mutex_enter(&vd->vdev_stat_lock); 129 vs->vs_rebuild_processed = 0; 130 mutex_exit(&vd->vdev_stat_lock); 131 } 132 133 /* 134 * Determines whether a vdev_rebuild_thread() should be stopped. 135 */ 136 static boolean_t 137 vdev_rebuild_should_stop(vdev_t *vd) 138 { 139 return (!vdev_writeable(vd) || vd->vdev_removing || 140 vd->vdev_rebuild_exit_wanted || 141 vd->vdev_rebuild_cancel_wanted || 142 vd->vdev_rebuild_reset_wanted); 143 } 144 145 /* 146 * Determine if the rebuild should be canceled. This may happen when all 147 * vdevs with MISSING DTLs are detached. 148 */ 149 static boolean_t 150 vdev_rebuild_should_cancel(vdev_t *vd) 151 { 152 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 153 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 154 155 if (!vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)) 156 return (B_TRUE); 157 158 return (B_FALSE); 159 } 160 161 /* 162 * The sync task for updating the on-disk state of a rebuild. This is 163 * scheduled by vdev_rebuild_range(). 164 */ 165 static void 166 vdev_rebuild_update_sync(void *arg, dmu_tx_t *tx) 167 { 168 int vdev_id = (uintptr_t)arg; 169 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 170 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 171 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 172 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 173 uint64_t txg = dmu_tx_get_txg(tx); 174 175 mutex_enter(&vd->vdev_rebuild_lock); 176 177 if (vr->vr_scan_offset[txg & TXG_MASK] > 0) { 178 vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK]; 179 vr->vr_scan_offset[txg & TXG_MASK] = 0; 180 } 181 182 vrp->vrp_scan_time_ms = vr->vr_prev_scan_time_ms + 183 NSEC2MSEC(gethrtime() - vr->vr_pass_start_time); 184 185 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 186 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 187 REBUILD_PHYS_ENTRIES, vrp, tx)); 188 189 mutex_exit(&vd->vdev_rebuild_lock); 190 } 191 192 /* 193 * Initialize the on-disk state for a new rebuild, start the rebuild thread. 194 */ 195 static void 196 vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx) 197 { 198 int vdev_id = (uintptr_t)arg; 199 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 200 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 201 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 202 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 203 204 ASSERT(vd->vdev_rebuilding); 205 206 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 207 208 mutex_enter(&vd->vdev_rebuild_lock); 209 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 210 vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE; 211 vrp->vrp_min_txg = 0; 212 vrp->vrp_max_txg = dmu_tx_get_txg(tx); 213 vrp->vrp_start_time = gethrestime_sec(); 214 vrp->vrp_scan_time_ms = 0; 215 vr->vr_prev_scan_time_ms = 0; 216 217 /* 218 * Rebuilds are currently only used when replacing a device, in which 219 * case there must be DTL_MISSING entries. In the future, we could 220 * allow rebuilds to be used in a way similar to a scrub. This would 221 * be useful because it would allow us to rebuild the space used by 222 * pool checkpoints. 223 */ 224 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)); 225 226 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 227 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 228 REBUILD_PHYS_ENTRIES, vrp, tx)); 229 230 spa_history_log_internal(spa, "rebuild", tx, 231 "vdev_id=%llu vdev_guid=%llu started", 232 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 233 234 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 235 vd->vdev_rebuild_thread = thread_create(NULL, 0, 236 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 237 238 mutex_exit(&vd->vdev_rebuild_lock); 239 } 240 241 static void 242 vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, char *name) 243 { 244 nvlist_t *aux = fnvlist_alloc(); 245 246 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, "sequential"); 247 spa_event_notify(spa, vd, aux, name); 248 nvlist_free(aux); 249 } 250 251 /* 252 * Called to request that a new rebuild be started. The feature will remain 253 * active for the duration of the rebuild, then revert to the enabled state. 254 */ 255 static void 256 vdev_rebuild_initiate(vdev_t *vd) 257 { 258 spa_t *spa = vd->vdev_spa; 259 260 ASSERT(vd->vdev_top == vd); 261 ASSERT(MUTEX_HELD(&vd->vdev_rebuild_lock)); 262 ASSERT(!vd->vdev_rebuilding); 263 264 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 265 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 266 267 vd->vdev_rebuilding = B_TRUE; 268 269 dsl_sync_task_nowait(spa_get_dsl(spa), vdev_rebuild_initiate_sync, 270 (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx); 271 dmu_tx_commit(tx); 272 273 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_START); 274 } 275 276 /* 277 * Update the on-disk state to completed when a rebuild finishes. 278 */ 279 static void 280 vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx) 281 { 282 int vdev_id = (uintptr_t)arg; 283 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 284 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 285 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 286 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 287 288 mutex_enter(&vd->vdev_rebuild_lock); 289 vrp->vrp_rebuild_state = VDEV_REBUILD_COMPLETE; 290 vrp->vrp_end_time = gethrestime_sec(); 291 292 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 293 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 294 REBUILD_PHYS_ENTRIES, vrp, tx)); 295 296 vdev_dtl_reassess(vd, tx->tx_txg, vrp->vrp_max_txg, B_TRUE, B_TRUE); 297 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 298 299 spa_history_log_internal(spa, "rebuild", tx, 300 "vdev_id=%llu vdev_guid=%llu complete", 301 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 302 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH); 303 304 /* Handles detaching of spares */ 305 spa_async_request(spa, SPA_ASYNC_REBUILD_DONE); 306 vd->vdev_rebuilding = B_FALSE; 307 mutex_exit(&vd->vdev_rebuild_lock); 308 309 spa_notify_waiters(spa); 310 cv_broadcast(&vd->vdev_rebuild_cv); 311 } 312 313 /* 314 * Update the on-disk state to canceled when a rebuild finishes. 315 */ 316 static void 317 vdev_rebuild_cancel_sync(void *arg, dmu_tx_t *tx) 318 { 319 int vdev_id = (uintptr_t)arg; 320 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 321 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 322 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 323 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 324 325 mutex_enter(&vd->vdev_rebuild_lock); 326 vrp->vrp_rebuild_state = VDEV_REBUILD_CANCELED; 327 vrp->vrp_end_time = gethrestime_sec(); 328 329 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 330 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 331 REBUILD_PHYS_ENTRIES, vrp, tx)); 332 333 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 334 335 spa_history_log_internal(spa, "rebuild", tx, 336 "vdev_id=%llu vdev_guid=%llu canceled", 337 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 338 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH); 339 340 vd->vdev_rebuild_cancel_wanted = B_FALSE; 341 vd->vdev_rebuilding = B_FALSE; 342 mutex_exit(&vd->vdev_rebuild_lock); 343 344 spa_notify_waiters(spa); 345 cv_broadcast(&vd->vdev_rebuild_cv); 346 } 347 348 /* 349 * Resets the progress of a running rebuild. This will occur when a new 350 * vdev is added to rebuild. 351 */ 352 static void 353 vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx) 354 { 355 int vdev_id = (uintptr_t)arg; 356 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 357 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 358 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 359 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 360 361 mutex_enter(&vd->vdev_rebuild_lock); 362 363 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 364 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 365 366 vrp->vrp_last_offset = 0; 367 vrp->vrp_min_txg = 0; 368 vrp->vrp_max_txg = dmu_tx_get_txg(tx); 369 vrp->vrp_bytes_scanned = 0; 370 vrp->vrp_bytes_issued = 0; 371 vrp->vrp_bytes_rebuilt = 0; 372 vrp->vrp_bytes_est = 0; 373 vrp->vrp_scan_time_ms = 0; 374 vr->vr_prev_scan_time_ms = 0; 375 376 /* See vdev_rebuild_initiate_sync comment */ 377 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)); 378 379 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 380 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 381 REBUILD_PHYS_ENTRIES, vrp, tx)); 382 383 spa_history_log_internal(spa, "rebuild", tx, 384 "vdev_id=%llu vdev_guid=%llu reset", 385 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 386 387 vd->vdev_rebuild_reset_wanted = B_FALSE; 388 ASSERT(vd->vdev_rebuilding); 389 390 vd->vdev_rebuild_thread = thread_create(NULL, 0, 391 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 392 393 mutex_exit(&vd->vdev_rebuild_lock); 394 } 395 396 /* 397 * Clear the last rebuild status. 398 */ 399 void 400 vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx) 401 { 402 int vdev_id = (uintptr_t)arg; 403 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 404 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 405 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 406 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 407 objset_t *mos = spa_meta_objset(spa); 408 409 mutex_enter(&vd->vdev_rebuild_lock); 410 411 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD) || 412 vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE) { 413 mutex_exit(&vd->vdev_rebuild_lock); 414 return; 415 } 416 417 clear_rebuild_bytes(vd); 418 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 419 420 if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap, 421 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) { 422 VERIFY0(zap_update(mos, vd->vdev_top_zap, 423 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 424 REBUILD_PHYS_ENTRIES, vrp, tx)); 425 } 426 427 mutex_exit(&vd->vdev_rebuild_lock); 428 } 429 430 /* 431 * The zio_done_func_t callback for each rebuild I/O issued. It's responsible 432 * for updating the rebuild stats and limiting the number of in flight I/Os. 433 */ 434 static void 435 vdev_rebuild_cb(zio_t *zio) 436 { 437 vdev_rebuild_t *vr = zio->io_private; 438 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 439 vdev_t *vd = vr->vr_top_vdev; 440 441 mutex_enter(&vd->vdev_rebuild_io_lock); 442 if (zio->io_error == ENXIO && !vdev_writeable(vd)) { 443 /* 444 * The I/O failed because the top-level vdev was unavailable. 445 * Attempt to roll back to the last completed offset, in order 446 * resume from the correct location if the pool is resumed. 447 * (This works because spa_sync waits on spa_txg_zio before 448 * it runs sync tasks.) 449 */ 450 uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK]; 451 *off = MIN(*off, zio->io_offset); 452 } else if (zio->io_error) { 453 vrp->vrp_errors++; 454 } 455 456 abd_free(zio->io_abd); 457 458 ASSERT3U(vd->vdev_rebuild_inflight, >, 0); 459 vd->vdev_rebuild_inflight--; 460 cv_broadcast(&vd->vdev_rebuild_io_cv); 461 mutex_exit(&vd->vdev_rebuild_io_lock); 462 463 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 464 } 465 466 /* 467 * Rebuild the data in this range by constructing a special dummy block 468 * pointer for the given range. It has no relation to any existing blocks 469 * in the pool. But by disabling checksum verification and issuing a scrub 470 * I/O mirrored vdevs will replicate the block using any available mirror 471 * leaf vdevs. 472 */ 473 static void 474 vdev_rebuild_rebuild_block(vdev_rebuild_t *vr, uint64_t start, uint64_t asize, 475 uint64_t txg) 476 { 477 vdev_t *vd = vr->vr_top_vdev; 478 spa_t *spa = vd->vdev_spa; 479 uint64_t psize = asize; 480 481 ASSERT(vd->vdev_ops == &vdev_mirror_ops || 482 vd->vdev_ops == &vdev_replacing_ops || 483 vd->vdev_ops == &vdev_spare_ops); 484 485 blkptr_t blk, *bp = &blk; 486 BP_ZERO(bp); 487 488 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 489 DVA_SET_OFFSET(&bp->blk_dva[0], start); 490 DVA_SET_GANG(&bp->blk_dva[0], 0); 491 DVA_SET_ASIZE(&bp->blk_dva[0], asize); 492 493 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 494 BP_SET_LSIZE(bp, psize); 495 BP_SET_PSIZE(bp, psize); 496 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 497 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 498 BP_SET_TYPE(bp, DMU_OT_NONE); 499 BP_SET_LEVEL(bp, 0); 500 BP_SET_DEDUP(bp, 0); 501 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 502 503 /* 504 * We increment the issued bytes by the asize rather than the psize 505 * so the scanned and issued bytes may be directly compared. This 506 * is consistent with the scrub/resilver issued reporting. 507 */ 508 vr->vr_pass_bytes_issued += asize; 509 vr->vr_rebuild_phys.vrp_bytes_issued += asize; 510 511 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, bp, 512 abd_alloc(psize, B_FALSE), psize, vdev_rebuild_cb, vr, 513 ZIO_PRIORITY_REBUILD, ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL | 514 ZIO_FLAG_RESILVER, NULL)); 515 } 516 517 /* 518 * Issues a rebuild I/O and takes care of rate limiting the number of queued 519 * rebuild I/Os. The provided start and size must be properly aligned for the 520 * top-level vdev type being rebuilt. 521 */ 522 static int 523 vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size) 524 { 525 uint64_t ms_id __maybe_unused = vr->vr_scan_msp->ms_id; 526 vdev_t *vd = vr->vr_top_vdev; 527 spa_t *spa = vd->vdev_spa; 528 529 ASSERT3U(ms_id, ==, start >> vd->vdev_ms_shift); 530 ASSERT3U(ms_id, ==, (start + size - 1) >> vd->vdev_ms_shift); 531 532 vr->vr_pass_bytes_scanned += size; 533 vr->vr_rebuild_phys.vrp_bytes_scanned += size; 534 535 mutex_enter(&vd->vdev_rebuild_io_lock); 536 537 /* Limit in flight rebuild I/Os */ 538 while (vd->vdev_rebuild_inflight >= zfs_rebuild_queue_limit) 539 cv_wait(&vd->vdev_rebuild_io_cv, &vd->vdev_rebuild_io_lock); 540 541 vd->vdev_rebuild_inflight++; 542 mutex_exit(&vd->vdev_rebuild_io_lock); 543 544 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 545 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 546 uint64_t txg = dmu_tx_get_txg(tx); 547 548 spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); 549 mutex_enter(&vd->vdev_rebuild_lock); 550 551 /* This is the first I/O for this txg. */ 552 if (vr->vr_scan_offset[txg & TXG_MASK] == 0) { 553 vr->vr_scan_offset[txg & TXG_MASK] = start; 554 dsl_sync_task_nowait(spa_get_dsl(spa), 555 vdev_rebuild_update_sync, 556 (void *)(uintptr_t)vd->vdev_id, 2, 557 ZFS_SPACE_CHECK_RESERVED, tx); 558 } 559 560 /* When exiting write out our progress. */ 561 if (vdev_rebuild_should_stop(vd)) { 562 mutex_enter(&vd->vdev_rebuild_io_lock); 563 vd->vdev_rebuild_inflight--; 564 mutex_exit(&vd->vdev_rebuild_io_lock); 565 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 566 mutex_exit(&vd->vdev_rebuild_lock); 567 dmu_tx_commit(tx); 568 return (SET_ERROR(EINTR)); 569 } 570 mutex_exit(&vd->vdev_rebuild_lock); 571 572 vr->vr_scan_offset[txg & TXG_MASK] = start + size; 573 vdev_rebuild_rebuild_block(vr, start, size, txg); 574 575 dmu_tx_commit(tx); 576 577 return (0); 578 } 579 580 /* 581 * Split range into legally-sized logical chunks given the constraints of the 582 * top-level mirror vdev type. 583 */ 584 static uint64_t 585 vdev_rebuild_chunk_size(vdev_t *vd, uint64_t start, uint64_t size) 586 { 587 uint64_t chunk_size, max_asize, max_segment; 588 589 ASSERT(vd->vdev_ops == &vdev_mirror_ops || 590 vd->vdev_ops == &vdev_replacing_ops || 591 vd->vdev_ops == &vdev_spare_ops); 592 593 max_segment = MIN(P2ROUNDUP(zfs_rebuild_max_segment, 594 1 << vd->vdev_ashift), SPA_MAXBLOCKSIZE); 595 max_asize = vdev_psize_to_asize(vd, max_segment); 596 chunk_size = MIN(size, max_asize); 597 598 return (chunk_size); 599 } 600 601 /* 602 * Issues rebuild I/Os for all ranges in the provided vr->vr_tree range tree. 603 */ 604 static int 605 vdev_rebuild_ranges(vdev_rebuild_t *vr) 606 { 607 vdev_t *vd = vr->vr_top_vdev; 608 zfs_btree_t *t = &vr->vr_scan_tree->rt_root; 609 zfs_btree_index_t idx; 610 int error; 611 612 for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL; 613 rs = zfs_btree_next(t, &idx, &idx)) { 614 uint64_t start = rs_get_start(rs, vr->vr_scan_tree); 615 uint64_t size = rs_get_end(rs, vr->vr_scan_tree) - start; 616 617 /* 618 * zfs_scan_suspend_progress can be set to disable rebuild 619 * progress for testing. See comment in dsl_scan_sync(). 620 */ 621 while (zfs_scan_suspend_progress && 622 !vdev_rebuild_should_stop(vd)) { 623 delay(hz); 624 } 625 626 while (size > 0) { 627 uint64_t chunk_size; 628 629 chunk_size = vdev_rebuild_chunk_size(vd, start, size); 630 631 error = vdev_rebuild_range(vr, start, chunk_size); 632 if (error != 0) 633 return (error); 634 635 size -= chunk_size; 636 start += chunk_size; 637 } 638 } 639 640 return (0); 641 } 642 643 /* 644 * Calculates the estimated capacity which remains to be scanned. Since 645 * we traverse the pool in metaslab order only allocated capacity beyond 646 * the vrp_last_offset need be considered. All lower offsets must have 647 * already been rebuilt and are thus already included in vrp_bytes_scanned. 648 */ 649 static void 650 vdev_rebuild_update_bytes_est(vdev_t *vd, uint64_t ms_id) 651 { 652 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 653 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 654 uint64_t bytes_est = vrp->vrp_bytes_scanned; 655 656 if (vrp->vrp_last_offset < vd->vdev_ms[ms_id]->ms_start) 657 return; 658 659 for (uint64_t i = ms_id; i < vd->vdev_ms_count; i++) { 660 metaslab_t *msp = vd->vdev_ms[i]; 661 662 mutex_enter(&msp->ms_lock); 663 bytes_est += metaslab_allocated_space(msp); 664 mutex_exit(&msp->ms_lock); 665 } 666 667 vrp->vrp_bytes_est = bytes_est; 668 } 669 670 /* 671 * Load from disk the top-level vdev's rebuild information. 672 */ 673 int 674 vdev_rebuild_load(vdev_t *vd) 675 { 676 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 677 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 678 spa_t *spa = vd->vdev_spa; 679 int err = 0; 680 681 mutex_enter(&vd->vdev_rebuild_lock); 682 vd->vdev_rebuilding = B_FALSE; 683 684 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) { 685 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 686 mutex_exit(&vd->vdev_rebuild_lock); 687 return (SET_ERROR(ENOTSUP)); 688 } 689 690 ASSERT(vd->vdev_top == vd); 691 692 err = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, 693 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 694 REBUILD_PHYS_ENTRIES, vrp); 695 696 /* 697 * A missing or damaged VDEV_TOP_ZAP_VDEV_REBUILD_PHYS should 698 * not prevent a pool from being imported. Clear the rebuild 699 * status allowing a new resilver/rebuild to be started. 700 */ 701 if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) { 702 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 703 } else if (err) { 704 mutex_exit(&vd->vdev_rebuild_lock); 705 return (err); 706 } 707 708 vr->vr_prev_scan_time_ms = vrp->vrp_scan_time_ms; 709 vr->vr_top_vdev = vd; 710 711 mutex_exit(&vd->vdev_rebuild_lock); 712 713 return (0); 714 } 715 716 /* 717 * Each scan thread is responsible for rebuilding a top-level vdev. The 718 * rebuild progress in tracked on-disk in VDEV_TOP_ZAP_VDEV_REBUILD_PHYS. 719 */ 720 static void 721 vdev_rebuild_thread(void *arg) 722 { 723 vdev_t *vd = arg; 724 spa_t *spa = vd->vdev_spa; 725 int error = 0; 726 727 /* 728 * If there's a scrub in process request that it be stopped. This 729 * is not required for a correct rebuild, but we do want rebuilds to 730 * emulate the resilver behavior as much as possible. 731 */ 732 dsl_pool_t *dsl = spa_get_dsl(spa); 733 if (dsl_scan_scrubbing(dsl)) 734 dsl_scan_cancel(dsl); 735 736 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 737 mutex_enter(&vd->vdev_rebuild_lock); 738 739 ASSERT3P(vd->vdev_top, ==, vd); 740 ASSERT3P(vd->vdev_rebuild_thread, !=, NULL); 741 ASSERT(vd->vdev_rebuilding); 742 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REBUILD)); 743 ASSERT3B(vd->vdev_rebuild_cancel_wanted, ==, B_FALSE); 744 ASSERT3B(vd->vdev_rebuild_reset_wanted, ==, B_FALSE); 745 746 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 747 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 748 vr->vr_top_vdev = vd; 749 vr->vr_scan_msp = NULL; 750 vr->vr_scan_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); 751 vr->vr_pass_start_time = gethrtime(); 752 vr->vr_pass_bytes_scanned = 0; 753 vr->vr_pass_bytes_issued = 0; 754 755 uint64_t update_est_time = gethrtime(); 756 vdev_rebuild_update_bytes_est(vd, 0); 757 758 clear_rebuild_bytes(vr->vr_top_vdev); 759 760 mutex_exit(&vd->vdev_rebuild_lock); 761 762 /* 763 * Systematically walk the metaslabs and issue rebuild I/Os for 764 * all ranges in the allocated space map. 765 */ 766 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 767 metaslab_t *msp = vd->vdev_ms[i]; 768 vr->vr_scan_msp = msp; 769 770 /* 771 * Removal of vdevs from the vdev tree may eliminate the need 772 * for the rebuild, in which case it should be canceled. The 773 * vdev_rebuild_cancel_wanted flag is set until the sync task 774 * completes. This may be after the rebuild thread exits. 775 */ 776 if (vdev_rebuild_should_cancel(vd)) { 777 vd->vdev_rebuild_cancel_wanted = B_TRUE; 778 error = EINTR; 779 break; 780 } 781 782 ASSERT0(range_tree_space(vr->vr_scan_tree)); 783 784 /* 785 * Disable any new allocations to this metaslab and wait 786 * for any writes inflight to complete. This is needed to 787 * ensure all allocated ranges are rebuilt. 788 */ 789 metaslab_disable(msp); 790 spa_config_exit(spa, SCL_CONFIG, FTAG); 791 txg_wait_synced(dsl, 0); 792 793 mutex_enter(&msp->ms_sync_lock); 794 mutex_enter(&msp->ms_lock); 795 796 /* 797 * When a metaslab has been allocated from read its allocated 798 * ranges from the space map object in to the vr_scan_tree. 799 * Then add inflight / unflushed ranges and remove inflight / 800 * unflushed frees. This is the minimum range to be rebuilt. 801 */ 802 if (msp->ms_sm != NULL) { 803 VERIFY0(space_map_load(msp->ms_sm, 804 vr->vr_scan_tree, SM_ALLOC)); 805 806 for (int i = 0; i < TXG_SIZE; i++) { 807 ASSERT0(range_tree_space( 808 msp->ms_allocating[i])); 809 } 810 811 range_tree_walk(msp->ms_unflushed_allocs, 812 range_tree_add, vr->vr_scan_tree); 813 range_tree_walk(msp->ms_unflushed_frees, 814 range_tree_remove, vr->vr_scan_tree); 815 816 /* 817 * Remove ranges which have already been rebuilt based 818 * on the last offset. This can happen when restarting 819 * a scan after exporting and re-importing the pool. 820 */ 821 range_tree_clear(vr->vr_scan_tree, 0, 822 vrp->vrp_last_offset); 823 } 824 825 mutex_exit(&msp->ms_lock); 826 mutex_exit(&msp->ms_sync_lock); 827 828 /* 829 * To provide an accurate estimate re-calculate the estimated 830 * size every 5 minutes to account for recent allocations and 831 * frees made space maps which have not yet been rebuilt. 832 */ 833 if (gethrtime() > update_est_time + SEC2NSEC(300)) { 834 update_est_time = gethrtime(); 835 vdev_rebuild_update_bytes_est(vd, i); 836 } 837 838 /* 839 * Walk the allocated space map and issue the rebuild I/O. 840 */ 841 error = vdev_rebuild_ranges(vr); 842 range_tree_vacate(vr->vr_scan_tree, NULL, NULL); 843 844 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 845 metaslab_enable(msp, B_FALSE, B_FALSE); 846 847 if (error != 0) 848 break; 849 } 850 851 range_tree_destroy(vr->vr_scan_tree); 852 spa_config_exit(spa, SCL_CONFIG, FTAG); 853 854 /* Wait for any remaining rebuild I/O to complete */ 855 mutex_enter(&vd->vdev_rebuild_io_lock); 856 while (vd->vdev_rebuild_inflight > 0) 857 cv_wait(&vd->vdev_rebuild_io_cv, &vd->vdev_rebuild_io_lock); 858 859 mutex_exit(&vd->vdev_rebuild_io_lock); 860 861 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 862 863 dsl_pool_t *dp = spa_get_dsl(spa); 864 dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir); 865 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 866 867 mutex_enter(&vd->vdev_rebuild_lock); 868 if (error == 0) { 869 /* 870 * After a successful rebuild clear the DTLs of all ranges 871 * which were missing when the rebuild was started. These 872 * ranges must have been rebuilt as a consequence of rebuilding 873 * all allocated space. Note that unlike a scrub or resilver 874 * the rebuild operation will reconstruct data only referenced 875 * by a pool checkpoint. See the dsl_scan_done() comments. 876 */ 877 dsl_sync_task_nowait(dp, vdev_rebuild_complete_sync, 878 (void *)(uintptr_t)vd->vdev_id, 0, 879 ZFS_SPACE_CHECK_NONE, tx); 880 } else if (vd->vdev_rebuild_cancel_wanted) { 881 /* 882 * The rebuild operation was canceled. This will occur when 883 * a device participating in the rebuild is detached. 884 */ 885 dsl_sync_task_nowait(dp, vdev_rebuild_cancel_sync, 886 (void *)(uintptr_t)vd->vdev_id, 0, 887 ZFS_SPACE_CHECK_NONE, tx); 888 } else if (vd->vdev_rebuild_reset_wanted) { 889 /* 890 * Reset the running rebuild without canceling and restarting 891 * it. This will occur when a new device is attached and must 892 * participate in the rebuild. 893 */ 894 dsl_sync_task_nowait(dp, vdev_rebuild_reset_sync, 895 (void *)(uintptr_t)vd->vdev_id, 0, 896 ZFS_SPACE_CHECK_NONE, tx); 897 } else { 898 /* 899 * The rebuild operation should be suspended. This may occur 900 * when detaching a child vdev or when exporting the pool. The 901 * rebuild is left in the active state so it will be resumed. 902 */ 903 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 904 vd->vdev_rebuilding = B_FALSE; 905 } 906 907 dmu_tx_commit(tx); 908 909 vd->vdev_rebuild_thread = NULL; 910 mutex_exit(&vd->vdev_rebuild_lock); 911 spa_config_exit(spa, SCL_CONFIG, FTAG); 912 913 cv_broadcast(&vd->vdev_rebuild_cv); 914 915 thread_exit(); 916 } 917 918 /* 919 * Returns B_TRUE if any top-level vdev are rebuilding. 920 */ 921 boolean_t 922 vdev_rebuild_active(vdev_t *vd) 923 { 924 spa_t *spa = vd->vdev_spa; 925 boolean_t ret = B_FALSE; 926 927 if (vd == spa->spa_root_vdev) { 928 for (uint64_t i = 0; i < vd->vdev_children; i++) { 929 ret = vdev_rebuild_active(vd->vdev_child[i]); 930 if (ret) 931 return (ret); 932 } 933 } else if (vd->vdev_top_zap != 0) { 934 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 935 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 936 937 mutex_enter(&vd->vdev_rebuild_lock); 938 ret = (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 939 mutex_exit(&vd->vdev_rebuild_lock); 940 } 941 942 return (ret); 943 } 944 945 /* 946 * Start a rebuild operation. The rebuild may be restarted when the 947 * top-level vdev is currently actively rebuilding. 948 */ 949 void 950 vdev_rebuild(vdev_t *vd) 951 { 952 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 953 vdev_rebuild_phys_t *vrp __maybe_unused = &vr->vr_rebuild_phys; 954 955 ASSERT(vd->vdev_top == vd); 956 ASSERT(vdev_is_concrete(vd)); 957 ASSERT(!vd->vdev_removing); 958 ASSERT(spa_feature_is_enabled(vd->vdev_spa, 959 SPA_FEATURE_DEVICE_REBUILD)); 960 961 mutex_enter(&vd->vdev_rebuild_lock); 962 if (vd->vdev_rebuilding) { 963 ASSERT3U(vrp->vrp_rebuild_state, ==, VDEV_REBUILD_ACTIVE); 964 965 /* 966 * Signal a running rebuild operation that it should restart 967 * from the beginning because a new device was attached. The 968 * vdev_rebuild_reset_wanted flag is set until the sync task 969 * completes. This may be after the rebuild thread exits. 970 */ 971 if (!vd->vdev_rebuild_reset_wanted) 972 vd->vdev_rebuild_reset_wanted = B_TRUE; 973 } else { 974 vdev_rebuild_initiate(vd); 975 } 976 mutex_exit(&vd->vdev_rebuild_lock); 977 } 978 979 static void 980 vdev_rebuild_restart_impl(vdev_t *vd) 981 { 982 spa_t *spa = vd->vdev_spa; 983 984 if (vd == spa->spa_root_vdev) { 985 for (uint64_t i = 0; i < vd->vdev_children; i++) 986 vdev_rebuild_restart_impl(vd->vdev_child[i]); 987 988 } else if (vd->vdev_top_zap != 0) { 989 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 990 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 991 992 mutex_enter(&vd->vdev_rebuild_lock); 993 if (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE && 994 vdev_writeable(vd) && !vd->vdev_rebuilding) { 995 ASSERT(spa_feature_is_active(spa, 996 SPA_FEATURE_DEVICE_REBUILD)); 997 vd->vdev_rebuilding = B_TRUE; 998 vd->vdev_rebuild_thread = thread_create(NULL, 0, 999 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, 1000 maxclsyspri); 1001 } 1002 mutex_exit(&vd->vdev_rebuild_lock); 1003 } 1004 } 1005 1006 /* 1007 * Conditionally restart all of the vdev_rebuild_thread's for a pool. The 1008 * feature flag must be active and the rebuild in the active state. This 1009 * cannot be used to start a new rebuild. 1010 */ 1011 void 1012 vdev_rebuild_restart(spa_t *spa) 1013 { 1014 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1015 1016 vdev_rebuild_restart_impl(spa->spa_root_vdev); 1017 } 1018 1019 /* 1020 * Stop and wait for all of the vdev_rebuild_thread's associated with the 1021 * vdev tree provide to be terminated (canceled or stopped). 1022 */ 1023 void 1024 vdev_rebuild_stop_wait(vdev_t *vd) 1025 { 1026 spa_t *spa = vd->vdev_spa; 1027 1028 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1029 1030 if (vd == spa->spa_root_vdev) { 1031 for (uint64_t i = 0; i < vd->vdev_children; i++) 1032 vdev_rebuild_stop_wait(vd->vdev_child[i]); 1033 1034 } else if (vd->vdev_top_zap != 0) { 1035 ASSERT(vd == vd->vdev_top); 1036 1037 mutex_enter(&vd->vdev_rebuild_lock); 1038 if (vd->vdev_rebuild_thread != NULL) { 1039 vd->vdev_rebuild_exit_wanted = B_TRUE; 1040 while (vd->vdev_rebuilding) { 1041 cv_wait(&vd->vdev_rebuild_cv, 1042 &vd->vdev_rebuild_lock); 1043 } 1044 vd->vdev_rebuild_exit_wanted = B_FALSE; 1045 } 1046 mutex_exit(&vd->vdev_rebuild_lock); 1047 } 1048 } 1049 1050 /* 1051 * Stop all rebuild operations but leave them in the active state so they 1052 * will be resumed when importing the pool. 1053 */ 1054 void 1055 vdev_rebuild_stop_all(spa_t *spa) 1056 { 1057 vdev_rebuild_stop_wait(spa->spa_root_vdev); 1058 } 1059 1060 /* 1061 * Rebuild statistics reported per top-level vdev. 1062 */ 1063 int 1064 vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs) 1065 { 1066 spa_t *spa = tvd->vdev_spa; 1067 1068 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 1069 return (SET_ERROR(ENOTSUP)); 1070 1071 if (tvd != tvd->vdev_top || tvd->vdev_top_zap == 0) 1072 return (SET_ERROR(EINVAL)); 1073 1074 int error = zap_contains(spa_meta_objset(spa), 1075 tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS); 1076 1077 if (error == ENOENT) { 1078 bzero(vrs, sizeof (vdev_rebuild_stat_t)); 1079 vrs->vrs_state = VDEV_REBUILD_NONE; 1080 error = 0; 1081 } else if (error == 0) { 1082 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config; 1083 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 1084 1085 mutex_enter(&tvd->vdev_rebuild_lock); 1086 vrs->vrs_state = vrp->vrp_rebuild_state; 1087 vrs->vrs_start_time = vrp->vrp_start_time; 1088 vrs->vrs_end_time = vrp->vrp_end_time; 1089 vrs->vrs_scan_time_ms = vrp->vrp_scan_time_ms; 1090 vrs->vrs_bytes_scanned = vrp->vrp_bytes_scanned; 1091 vrs->vrs_bytes_issued = vrp->vrp_bytes_issued; 1092 vrs->vrs_bytes_rebuilt = vrp->vrp_bytes_rebuilt; 1093 vrs->vrs_bytes_est = vrp->vrp_bytes_est; 1094 vrs->vrs_errors = vrp->vrp_errors; 1095 vrs->vrs_pass_time_ms = NSEC2MSEC(gethrtime() - 1096 vr->vr_pass_start_time); 1097 vrs->vrs_pass_bytes_scanned = vr->vr_pass_bytes_scanned; 1098 vrs->vrs_pass_bytes_issued = vr->vr_pass_bytes_issued; 1099 mutex_exit(&tvd->vdev_rebuild_lock); 1100 } 1101 1102 return (error); 1103 } 1104 1105 /* BEGIN CSTYLED */ 1106 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_max_segment, ULONG, ZMOD_RW, 1107 "Max segment size in bytes of rebuild reads"); 1108 /* END CSTYLED */ 1109