1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy 22eda14cbcSMatt Macy /* 23eda14cbcSMatt Macy * Copyright (c) 2016, 2019 by Delphix. All rights reserved. 24eda14cbcSMatt Macy */ 25eda14cbcSMatt Macy 26eda14cbcSMatt Macy #include <sys/spa.h> 27eda14cbcSMatt Macy #include <sys/spa_impl.h> 28eda14cbcSMatt Macy #include <sys/txg.h> 29eda14cbcSMatt Macy #include <sys/vdev_impl.h> 30eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 31eda14cbcSMatt Macy #include <sys/dsl_synctask.h> 32eda14cbcSMatt Macy #include <sys/zap.h> 33eda14cbcSMatt Macy #include <sys/dmu_tx.h> 34eda14cbcSMatt Macy #include <sys/vdev_initialize.h> 35eda14cbcSMatt Macy 36eda14cbcSMatt Macy /* 37eda14cbcSMatt Macy * Value that is written to disk during initialization. 38eda14cbcSMatt Macy */ 39dbd5678dSMartin Matuska static uint64_t zfs_initialize_value = 0xdeadbeefdeadbeeeULL; 40eda14cbcSMatt Macy 41eda14cbcSMatt Macy /* maximum number of I/Os outstanding per leaf vdev */ 42e92ffd9bSMartin Matuska static const int zfs_initialize_limit = 1; 43eda14cbcSMatt Macy 44eda14cbcSMatt Macy /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */ 45dbd5678dSMartin Matuska static uint64_t zfs_initialize_chunk_size = 1024 * 1024; 46eda14cbcSMatt Macy 47eda14cbcSMatt Macy static boolean_t 48eda14cbcSMatt Macy vdev_initialize_should_stop(vdev_t *vd) 49eda14cbcSMatt Macy { 50eda14cbcSMatt Macy return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) || 51*e716630dSMartin Matuska vd->vdev_detached || vd->vdev_top->vdev_removing || 52*e716630dSMartin Matuska vd->vdev_top->vdev_rz_expanding); 53eda14cbcSMatt Macy } 54eda14cbcSMatt Macy 55eda14cbcSMatt Macy static void 56eda14cbcSMatt Macy vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx) 57eda14cbcSMatt Macy { 58eda14cbcSMatt Macy /* 59eda14cbcSMatt Macy * We pass in the guid instead of the vdev_t since the vdev may 60eda14cbcSMatt Macy * have been freed prior to the sync task being processed. This 61eda14cbcSMatt Macy * happens when a vdev is detached as we call spa_config_vdev_exit(), 62eda14cbcSMatt Macy * stop the initializing thread, schedule the sync task, and free 63eda14cbcSMatt Macy * the vdev. Later when the scheduled sync task is invoked, it would 64eda14cbcSMatt Macy * find that the vdev has been freed. 65eda14cbcSMatt Macy */ 66eda14cbcSMatt Macy uint64_t guid = *(uint64_t *)arg; 67eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 68eda14cbcSMatt Macy kmem_free(arg, sizeof (uint64_t)); 69eda14cbcSMatt Macy 70eda14cbcSMatt Macy vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE); 71*e716630dSMartin Matuska if (vd == NULL || vd->vdev_top->vdev_removing || 72*e716630dSMartin Matuska !vdev_is_concrete(vd) || vd->vdev_top->vdev_rz_expanding) 73eda14cbcSMatt Macy return; 74eda14cbcSMatt Macy 75eda14cbcSMatt Macy uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK]; 76eda14cbcSMatt Macy vd->vdev_initialize_offset[txg & TXG_MASK] = 0; 77eda14cbcSMatt Macy 78eda14cbcSMatt Macy VERIFY(vd->vdev_leaf_zap != 0); 79eda14cbcSMatt Macy 80eda14cbcSMatt Macy objset_t *mos = vd->vdev_spa->spa_meta_objset; 81eda14cbcSMatt Macy 82eda14cbcSMatt Macy if (last_offset > 0) { 83eda14cbcSMatt Macy vd->vdev_initialize_last_offset = last_offset; 84eda14cbcSMatt Macy VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 85eda14cbcSMatt Macy VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, 86eda14cbcSMatt Macy sizeof (last_offset), 1, &last_offset, tx)); 87eda14cbcSMatt Macy } 88eda14cbcSMatt Macy if (vd->vdev_initialize_action_time > 0) { 89eda14cbcSMatt Macy uint64_t val = (uint64_t)vd->vdev_initialize_action_time; 90eda14cbcSMatt Macy VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 91eda14cbcSMatt Macy VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val), 92eda14cbcSMatt Macy 1, &val, tx)); 93eda14cbcSMatt Macy } 94eda14cbcSMatt Macy 95eda14cbcSMatt Macy uint64_t initialize_state = vd->vdev_initialize_state; 96eda14cbcSMatt Macy VERIFY0(zap_update(mos, vd->vdev_leaf_zap, 97eda14cbcSMatt Macy VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1, 98eda14cbcSMatt Macy &initialize_state, tx)); 99eda14cbcSMatt Macy } 100eda14cbcSMatt Macy 101eda14cbcSMatt Macy static void 102c0a83fe0SMartin Matuska vdev_initialize_zap_remove_sync(void *arg, dmu_tx_t *tx) 103c0a83fe0SMartin Matuska { 104c0a83fe0SMartin Matuska uint64_t guid = *(uint64_t *)arg; 105c0a83fe0SMartin Matuska 106c0a83fe0SMartin Matuska kmem_free(arg, sizeof (uint64_t)); 107c0a83fe0SMartin Matuska 108c0a83fe0SMartin Matuska vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE); 109c0a83fe0SMartin Matuska if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd)) 110c0a83fe0SMartin Matuska return; 111c0a83fe0SMartin Matuska 112c0a83fe0SMartin Matuska ASSERT3S(vd->vdev_initialize_state, ==, VDEV_INITIALIZE_NONE); 113c0a83fe0SMartin Matuska ASSERT3U(vd->vdev_leaf_zap, !=, 0); 114c0a83fe0SMartin Matuska 115c0a83fe0SMartin Matuska vd->vdev_initialize_last_offset = 0; 116c0a83fe0SMartin Matuska vd->vdev_initialize_action_time = 0; 117c0a83fe0SMartin Matuska 118c0a83fe0SMartin Matuska objset_t *mos = vd->vdev_spa->spa_meta_objset; 119c0a83fe0SMartin Matuska int error; 120c0a83fe0SMartin Matuska 121c0a83fe0SMartin Matuska error = zap_remove(mos, vd->vdev_leaf_zap, 122c0a83fe0SMartin Matuska VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, tx); 123c0a83fe0SMartin Matuska VERIFY(error == 0 || error == ENOENT); 124c0a83fe0SMartin Matuska 125c0a83fe0SMartin Matuska error = zap_remove(mos, vd->vdev_leaf_zap, 126c0a83fe0SMartin Matuska VDEV_LEAF_ZAP_INITIALIZE_STATE, tx); 127c0a83fe0SMartin Matuska VERIFY(error == 0 || error == ENOENT); 128c0a83fe0SMartin Matuska 129c0a83fe0SMartin Matuska error = zap_remove(mos, vd->vdev_leaf_zap, 130c0a83fe0SMartin Matuska VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, tx); 131c0a83fe0SMartin Matuska VERIFY(error == 0 || error == ENOENT); 132c0a83fe0SMartin Matuska } 133c0a83fe0SMartin Matuska 134c0a83fe0SMartin Matuska static void 135eda14cbcSMatt Macy vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state) 136eda14cbcSMatt Macy { 137eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 138eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 139eda14cbcSMatt Macy 140eda14cbcSMatt Macy if (new_state == vd->vdev_initialize_state) 141eda14cbcSMatt Macy return; 142eda14cbcSMatt Macy 143eda14cbcSMatt Macy /* 144eda14cbcSMatt Macy * Copy the vd's guid, this will be freed by the sync task. 145eda14cbcSMatt Macy */ 146eda14cbcSMatt Macy uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 147eda14cbcSMatt Macy *guid = vd->vdev_guid; 148eda14cbcSMatt Macy 149eda14cbcSMatt Macy /* 150eda14cbcSMatt Macy * If we're suspending, then preserving the original start time. 151eda14cbcSMatt Macy */ 152eda14cbcSMatt Macy if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) { 153eda14cbcSMatt Macy vd->vdev_initialize_action_time = gethrestime_sec(); 154eda14cbcSMatt Macy } 1557877fdebSMatt Macy 1567877fdebSMatt Macy vdev_initializing_state_t old_state = vd->vdev_initialize_state; 157eda14cbcSMatt Macy vd->vdev_initialize_state = new_state; 158eda14cbcSMatt Macy 159eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 160eda14cbcSMatt Macy VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 161c0a83fe0SMartin Matuska 162c0a83fe0SMartin Matuska if (new_state != VDEV_INITIALIZE_NONE) { 163c0a83fe0SMartin Matuska dsl_sync_task_nowait(spa_get_dsl(spa), 164c0a83fe0SMartin Matuska vdev_initialize_zap_update_sync, guid, tx); 165c0a83fe0SMartin Matuska } else { 166c0a83fe0SMartin Matuska dsl_sync_task_nowait(spa_get_dsl(spa), 167c0a83fe0SMartin Matuska vdev_initialize_zap_remove_sync, guid, tx); 168c0a83fe0SMartin Matuska } 169eda14cbcSMatt Macy 170eda14cbcSMatt Macy switch (new_state) { 171eda14cbcSMatt Macy case VDEV_INITIALIZE_ACTIVE: 172eda14cbcSMatt Macy spa_history_log_internal(spa, "initialize", tx, 173eda14cbcSMatt Macy "vdev=%s activated", vd->vdev_path); 174eda14cbcSMatt Macy break; 175eda14cbcSMatt Macy case VDEV_INITIALIZE_SUSPENDED: 176eda14cbcSMatt Macy spa_history_log_internal(spa, "initialize", tx, 177eda14cbcSMatt Macy "vdev=%s suspended", vd->vdev_path); 178eda14cbcSMatt Macy break; 179eda14cbcSMatt Macy case VDEV_INITIALIZE_CANCELED: 1807877fdebSMatt Macy if (old_state == VDEV_INITIALIZE_ACTIVE || 1817877fdebSMatt Macy old_state == VDEV_INITIALIZE_SUSPENDED) 182eda14cbcSMatt Macy spa_history_log_internal(spa, "initialize", tx, 183eda14cbcSMatt Macy "vdev=%s canceled", vd->vdev_path); 184eda14cbcSMatt Macy break; 185eda14cbcSMatt Macy case VDEV_INITIALIZE_COMPLETE: 186eda14cbcSMatt Macy spa_history_log_internal(spa, "initialize", tx, 187eda14cbcSMatt Macy "vdev=%s complete", vd->vdev_path); 188eda14cbcSMatt Macy break; 189c0a83fe0SMartin Matuska case VDEV_INITIALIZE_NONE: 190c0a83fe0SMartin Matuska spa_history_log_internal(spa, "uninitialize", tx, 191c0a83fe0SMartin Matuska "vdev=%s", vd->vdev_path); 192c0a83fe0SMartin Matuska break; 193eda14cbcSMatt Macy default: 194eda14cbcSMatt Macy panic("invalid state %llu", (unsigned long long)new_state); 195eda14cbcSMatt Macy } 196eda14cbcSMatt Macy 197eda14cbcSMatt Macy dmu_tx_commit(tx); 198eda14cbcSMatt Macy 199eda14cbcSMatt Macy if (new_state != VDEV_INITIALIZE_ACTIVE) 200eda14cbcSMatt Macy spa_notify_waiters(spa); 201eda14cbcSMatt Macy } 202eda14cbcSMatt Macy 203eda14cbcSMatt Macy static void 204eda14cbcSMatt Macy vdev_initialize_cb(zio_t *zio) 205eda14cbcSMatt Macy { 206eda14cbcSMatt Macy vdev_t *vd = zio->io_vd; 207eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_io_lock); 208eda14cbcSMatt Macy if (zio->io_error == ENXIO && !vdev_writeable(vd)) { 209eda14cbcSMatt Macy /* 210eda14cbcSMatt Macy * The I/O failed because the vdev was unavailable; roll the 211eda14cbcSMatt Macy * last offset back. (This works because spa_sync waits on 212eda14cbcSMatt Macy * spa_txg_zio before it runs sync tasks.) 213eda14cbcSMatt Macy */ 214eda14cbcSMatt Macy uint64_t *off = 215eda14cbcSMatt Macy &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK]; 216eda14cbcSMatt Macy *off = MIN(*off, zio->io_offset); 217eda14cbcSMatt Macy } else { 218eda14cbcSMatt Macy /* 219eda14cbcSMatt Macy * Since initializing is best-effort, we ignore I/O errors and 220eda14cbcSMatt Macy * rely on vdev_probe to determine if the errors are more 221eda14cbcSMatt Macy * critical. 222eda14cbcSMatt Macy */ 223eda14cbcSMatt Macy if (zio->io_error != 0) 224eda14cbcSMatt Macy vd->vdev_stat.vs_initialize_errors++; 225eda14cbcSMatt Macy 226eda14cbcSMatt Macy vd->vdev_initialize_bytes_done += zio->io_orig_size; 227eda14cbcSMatt Macy } 228eda14cbcSMatt Macy ASSERT3U(vd->vdev_initialize_inflight, >, 0); 229eda14cbcSMatt Macy vd->vdev_initialize_inflight--; 230eda14cbcSMatt Macy cv_broadcast(&vd->vdev_initialize_io_cv); 231eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_io_lock); 232eda14cbcSMatt Macy 233eda14cbcSMatt Macy spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 234eda14cbcSMatt Macy } 235eda14cbcSMatt Macy 236eda14cbcSMatt Macy /* Takes care of physical writing and limiting # of concurrent ZIOs. */ 237eda14cbcSMatt Macy static int 238eda14cbcSMatt Macy vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data) 239eda14cbcSMatt Macy { 240eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 241eda14cbcSMatt Macy 242eda14cbcSMatt Macy /* Limit inflight initializing I/Os */ 243eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_io_lock); 244eda14cbcSMatt Macy while (vd->vdev_initialize_inflight >= zfs_initialize_limit) { 245eda14cbcSMatt Macy cv_wait(&vd->vdev_initialize_io_cv, 246eda14cbcSMatt Macy &vd->vdev_initialize_io_lock); 247eda14cbcSMatt Macy } 248eda14cbcSMatt Macy vd->vdev_initialize_inflight++; 249eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_io_lock); 250eda14cbcSMatt Macy 251eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 252eda14cbcSMatt Macy VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 253eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 254eda14cbcSMatt Macy 255eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); 256eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 257eda14cbcSMatt Macy 258eda14cbcSMatt Macy if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) { 259eda14cbcSMatt Macy uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); 260eda14cbcSMatt Macy *guid = vd->vdev_guid; 261eda14cbcSMatt Macy 262eda14cbcSMatt Macy /* This is the first write of this txg. */ 263eda14cbcSMatt Macy dsl_sync_task_nowait(spa_get_dsl(spa), 2642c48331dSMatt Macy vdev_initialize_zap_update_sync, guid, tx); 265eda14cbcSMatt Macy } 266eda14cbcSMatt Macy 267eda14cbcSMatt Macy /* 268eda14cbcSMatt Macy * We know the vdev struct will still be around since all 269eda14cbcSMatt Macy * consumers of vdev_free must stop the initialization first. 270eda14cbcSMatt Macy */ 271eda14cbcSMatt Macy if (vdev_initialize_should_stop(vd)) { 272eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_io_lock); 273eda14cbcSMatt Macy ASSERT3U(vd->vdev_initialize_inflight, >, 0); 274eda14cbcSMatt Macy vd->vdev_initialize_inflight--; 275eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_io_lock); 276eda14cbcSMatt Macy spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 277eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 278eda14cbcSMatt Macy dmu_tx_commit(tx); 279eda14cbcSMatt Macy return (SET_ERROR(EINTR)); 280eda14cbcSMatt Macy } 281eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 282eda14cbcSMatt Macy 283eda14cbcSMatt Macy vd->vdev_initialize_offset[txg & TXG_MASK] = start + size; 284eda14cbcSMatt Macy zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start, 285eda14cbcSMatt Macy size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL, 286eda14cbcSMatt Macy ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE)); 287eda14cbcSMatt Macy /* vdev_initialize_cb releases SCL_STATE_ALL */ 288eda14cbcSMatt Macy 289eda14cbcSMatt Macy dmu_tx_commit(tx); 290eda14cbcSMatt Macy 291eda14cbcSMatt Macy return (0); 292eda14cbcSMatt Macy } 293eda14cbcSMatt Macy 294eda14cbcSMatt Macy /* 295eda14cbcSMatt Macy * Callback to fill each ABD chunk with zfs_initialize_value. len must be 296eda14cbcSMatt Macy * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD 297eda14cbcSMatt Macy * allocation will guarantee these for us. 298eda14cbcSMatt Macy */ 299eda14cbcSMatt Macy static int 300eda14cbcSMatt Macy vdev_initialize_block_fill(void *buf, size_t len, void *unused) 301eda14cbcSMatt Macy { 302e92ffd9bSMartin Matuska (void) unused; 303e92ffd9bSMartin Matuska 304eda14cbcSMatt Macy ASSERT0(len % sizeof (uint64_t)); 305eda14cbcSMatt Macy for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) { 306eda14cbcSMatt Macy *(uint64_t *)((char *)(buf) + i) = zfs_initialize_value; 307eda14cbcSMatt Macy } 308eda14cbcSMatt Macy return (0); 309eda14cbcSMatt Macy } 310eda14cbcSMatt Macy 311eda14cbcSMatt Macy static abd_t * 312eda14cbcSMatt Macy vdev_initialize_block_alloc(void) 313eda14cbcSMatt Macy { 314eda14cbcSMatt Macy /* Allocate ABD for filler data */ 315eda14cbcSMatt Macy abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE); 316eda14cbcSMatt Macy 317eda14cbcSMatt Macy ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t)); 318eda14cbcSMatt Macy (void) abd_iterate_func(data, 0, zfs_initialize_chunk_size, 319eda14cbcSMatt Macy vdev_initialize_block_fill, NULL); 320eda14cbcSMatt Macy 321eda14cbcSMatt Macy return (data); 322eda14cbcSMatt Macy } 323eda14cbcSMatt Macy 324eda14cbcSMatt Macy static void 325eda14cbcSMatt Macy vdev_initialize_block_free(abd_t *data) 326eda14cbcSMatt Macy { 327eda14cbcSMatt Macy abd_free(data); 328eda14cbcSMatt Macy } 329eda14cbcSMatt Macy 330eda14cbcSMatt Macy static int 331eda14cbcSMatt Macy vdev_initialize_ranges(vdev_t *vd, abd_t *data) 332eda14cbcSMatt Macy { 333eda14cbcSMatt Macy range_tree_t *rt = vd->vdev_initialize_tree; 334eda14cbcSMatt Macy zfs_btree_t *bt = &rt->rt_root; 335eda14cbcSMatt Macy zfs_btree_index_t where; 336eda14cbcSMatt Macy 337eda14cbcSMatt Macy for (range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL; 338eda14cbcSMatt Macy rs = zfs_btree_next(bt, &where, &where)) { 339eda14cbcSMatt Macy uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); 340eda14cbcSMatt Macy 341eda14cbcSMatt Macy /* Split range into legally-sized physical chunks */ 342eda14cbcSMatt Macy uint64_t writes_required = 343eda14cbcSMatt Macy ((size - 1) / zfs_initialize_chunk_size) + 1; 344eda14cbcSMatt Macy 345eda14cbcSMatt Macy for (uint64_t w = 0; w < writes_required; w++) { 346eda14cbcSMatt Macy int error; 347eda14cbcSMatt Macy 348eda14cbcSMatt Macy error = vdev_initialize_write(vd, 349eda14cbcSMatt Macy VDEV_LABEL_START_SIZE + rs_get_start(rs, rt) + 350eda14cbcSMatt Macy (w * zfs_initialize_chunk_size), 351eda14cbcSMatt Macy MIN(size - (w * zfs_initialize_chunk_size), 352eda14cbcSMatt Macy zfs_initialize_chunk_size), data); 353eda14cbcSMatt Macy if (error != 0) 354eda14cbcSMatt Macy return (error); 355eda14cbcSMatt Macy } 356eda14cbcSMatt Macy } 357eda14cbcSMatt Macy return (0); 358eda14cbcSMatt Macy } 359eda14cbcSMatt Macy 360eda14cbcSMatt Macy static void 3617877fdebSMatt Macy vdev_initialize_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs) 3627877fdebSMatt Macy { 3637877fdebSMatt Macy uint64_t *last_rs_end = (uint64_t *)arg; 3647877fdebSMatt Macy 3657877fdebSMatt Macy if (physical_rs->rs_end > *last_rs_end) 3667877fdebSMatt Macy *last_rs_end = physical_rs->rs_end; 3677877fdebSMatt Macy } 3687877fdebSMatt Macy 3697877fdebSMatt Macy static void 3707877fdebSMatt Macy vdev_initialize_xlate_progress(void *arg, range_seg64_t *physical_rs) 3717877fdebSMatt Macy { 3727877fdebSMatt Macy vdev_t *vd = (vdev_t *)arg; 3737877fdebSMatt Macy 3747877fdebSMatt Macy uint64_t size = physical_rs->rs_end - physical_rs->rs_start; 3757877fdebSMatt Macy vd->vdev_initialize_bytes_est += size; 3767877fdebSMatt Macy 3777877fdebSMatt Macy if (vd->vdev_initialize_last_offset > physical_rs->rs_end) { 3787877fdebSMatt Macy vd->vdev_initialize_bytes_done += size; 3797877fdebSMatt Macy } else if (vd->vdev_initialize_last_offset > physical_rs->rs_start && 3807877fdebSMatt Macy vd->vdev_initialize_last_offset < physical_rs->rs_end) { 3817877fdebSMatt Macy vd->vdev_initialize_bytes_done += 3827877fdebSMatt Macy vd->vdev_initialize_last_offset - physical_rs->rs_start; 3837877fdebSMatt Macy } 3847877fdebSMatt Macy } 3857877fdebSMatt Macy 3867877fdebSMatt Macy static void 387eda14cbcSMatt Macy vdev_initialize_calculate_progress(vdev_t *vd) 388eda14cbcSMatt Macy { 389eda14cbcSMatt Macy ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 390eda14cbcSMatt Macy spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 391eda14cbcSMatt Macy ASSERT(vd->vdev_leaf_zap != 0); 392eda14cbcSMatt Macy 393eda14cbcSMatt Macy vd->vdev_initialize_bytes_est = 0; 394eda14cbcSMatt Macy vd->vdev_initialize_bytes_done = 0; 395eda14cbcSMatt Macy 396eda14cbcSMatt Macy for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) { 397eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 398eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 399eda14cbcSMatt Macy 4007877fdebSMatt Macy uint64_t ms_free = (msp->ms_size - 4017877fdebSMatt Macy metaslab_allocated_space(msp)) / 4027877fdebSMatt Macy vdev_get_ndisks(vd->vdev_top); 403eda14cbcSMatt Macy 404eda14cbcSMatt Macy /* 405eda14cbcSMatt Macy * Convert the metaslab range to a physical range 406eda14cbcSMatt Macy * on our vdev. We use this to determine if we are 407eda14cbcSMatt Macy * in the middle of this metaslab range. 408eda14cbcSMatt Macy */ 4097877fdebSMatt Macy range_seg64_t logical_rs, physical_rs, remain_rs; 410eda14cbcSMatt Macy logical_rs.rs_start = msp->ms_start; 411eda14cbcSMatt Macy logical_rs.rs_end = msp->ms_start + msp->ms_size; 412eda14cbcSMatt Macy 4137877fdebSMatt Macy /* Metaslab space after this offset has not been initialized */ 4147877fdebSMatt Macy vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs); 415eda14cbcSMatt Macy if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) { 416eda14cbcSMatt Macy vd->vdev_initialize_bytes_est += ms_free; 417eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 418eda14cbcSMatt Macy continue; 4197877fdebSMatt Macy } 4207877fdebSMatt Macy 4217877fdebSMatt Macy /* Metaslab space before this offset has been initialized */ 4227877fdebSMatt Macy uint64_t last_rs_end = physical_rs.rs_end; 4237877fdebSMatt Macy if (!vdev_xlate_is_empty(&remain_rs)) { 4247877fdebSMatt Macy vdev_xlate_walk(vd, &remain_rs, 4257877fdebSMatt Macy vdev_initialize_xlate_last_rs_end, &last_rs_end); 4267877fdebSMatt Macy } 4277877fdebSMatt Macy 4287877fdebSMatt Macy if (vd->vdev_initialize_last_offset > last_rs_end) { 429eda14cbcSMatt Macy vd->vdev_initialize_bytes_done += ms_free; 430eda14cbcSMatt Macy vd->vdev_initialize_bytes_est += ms_free; 431eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 432eda14cbcSMatt Macy continue; 433eda14cbcSMatt Macy } 434eda14cbcSMatt Macy 435eda14cbcSMatt Macy /* 436eda14cbcSMatt Macy * If we get here, we're in the middle of initializing this 437eda14cbcSMatt Macy * metaslab. Load it and walk the free tree for more accurate 438eda14cbcSMatt Macy * progress estimation. 439eda14cbcSMatt Macy */ 440eda14cbcSMatt Macy VERIFY0(metaslab_load(msp)); 441eda14cbcSMatt Macy 442eda14cbcSMatt Macy zfs_btree_index_t where; 443eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 444eda14cbcSMatt Macy for (range_seg_t *rs = 445eda14cbcSMatt Macy zfs_btree_first(&rt->rt_root, &where); rs; 446eda14cbcSMatt Macy rs = zfs_btree_next(&rt->rt_root, &where, 447eda14cbcSMatt Macy &where)) { 448eda14cbcSMatt Macy logical_rs.rs_start = rs_get_start(rs, rt); 449eda14cbcSMatt Macy logical_rs.rs_end = rs_get_end(rs, rt); 450eda14cbcSMatt Macy 4517877fdebSMatt Macy vdev_xlate_walk(vd, &logical_rs, 4527877fdebSMatt Macy vdev_initialize_xlate_progress, vd); 453eda14cbcSMatt Macy } 454eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 455eda14cbcSMatt Macy } 456eda14cbcSMatt Macy } 457eda14cbcSMatt Macy 458eda14cbcSMatt Macy static int 459eda14cbcSMatt Macy vdev_initialize_load(vdev_t *vd) 460eda14cbcSMatt Macy { 461eda14cbcSMatt Macy int err = 0; 462eda14cbcSMatt Macy ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || 463eda14cbcSMatt Macy spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); 464eda14cbcSMatt Macy ASSERT(vd->vdev_leaf_zap != 0); 465eda14cbcSMatt Macy 466eda14cbcSMatt Macy if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE || 467eda14cbcSMatt Macy vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) { 468eda14cbcSMatt Macy err = zap_lookup(vd->vdev_spa->spa_meta_objset, 469eda14cbcSMatt Macy vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, 470eda14cbcSMatt Macy sizeof (vd->vdev_initialize_last_offset), 1, 471eda14cbcSMatt Macy &vd->vdev_initialize_last_offset); 472eda14cbcSMatt Macy if (err == ENOENT) { 473eda14cbcSMatt Macy vd->vdev_initialize_last_offset = 0; 474eda14cbcSMatt Macy err = 0; 475eda14cbcSMatt Macy } 476eda14cbcSMatt Macy } 477eda14cbcSMatt Macy 478eda14cbcSMatt Macy vdev_initialize_calculate_progress(vd); 479eda14cbcSMatt Macy return (err); 480eda14cbcSMatt Macy } 481eda14cbcSMatt Macy 4827877fdebSMatt Macy static void 4837877fdebSMatt Macy vdev_initialize_xlate_range_add(void *arg, range_seg64_t *physical_rs) 4847877fdebSMatt Macy { 4857877fdebSMatt Macy vdev_t *vd = arg; 4867877fdebSMatt Macy 4877877fdebSMatt Macy /* Only add segments that we have not visited yet */ 4887877fdebSMatt Macy if (physical_rs->rs_end <= vd->vdev_initialize_last_offset) 4897877fdebSMatt Macy return; 4907877fdebSMatt Macy 4917877fdebSMatt Macy /* Pick up where we left off mid-range. */ 4927877fdebSMatt Macy if (vd->vdev_initialize_last_offset > physical_rs->rs_start) { 4937877fdebSMatt Macy zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to " 4947877fdebSMatt Macy "(%llu, %llu)", vd->vdev_path, 4957877fdebSMatt Macy (u_longlong_t)physical_rs->rs_start, 4967877fdebSMatt Macy (u_longlong_t)physical_rs->rs_end, 4977877fdebSMatt Macy (u_longlong_t)vd->vdev_initialize_last_offset, 4987877fdebSMatt Macy (u_longlong_t)physical_rs->rs_end); 4997877fdebSMatt Macy ASSERT3U(physical_rs->rs_end, >, 5007877fdebSMatt Macy vd->vdev_initialize_last_offset); 5017877fdebSMatt Macy physical_rs->rs_start = vd->vdev_initialize_last_offset; 5027877fdebSMatt Macy } 5037877fdebSMatt Macy 5047877fdebSMatt Macy ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start); 5057877fdebSMatt Macy 5067877fdebSMatt Macy range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start, 5077877fdebSMatt Macy physical_rs->rs_end - physical_rs->rs_start); 5087877fdebSMatt Macy } 5097877fdebSMatt Macy 510eda14cbcSMatt Macy /* 511eda14cbcSMatt Macy * Convert the logical range into a physical range and add it to our 512eda14cbcSMatt Macy * avl tree. 513eda14cbcSMatt Macy */ 514eda14cbcSMatt Macy static void 515eda14cbcSMatt Macy vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size) 516eda14cbcSMatt Macy { 517eda14cbcSMatt Macy vdev_t *vd = arg; 5187877fdebSMatt Macy range_seg64_t logical_rs; 519eda14cbcSMatt Macy logical_rs.rs_start = start; 520eda14cbcSMatt Macy logical_rs.rs_end = start + size; 521eda14cbcSMatt Macy 522eda14cbcSMatt Macy ASSERT(vd->vdev_ops->vdev_op_leaf); 5237877fdebSMatt Macy vdev_xlate_walk(vd, &logical_rs, vdev_initialize_xlate_range_add, arg); 524eda14cbcSMatt Macy } 525eda14cbcSMatt Macy 526da5137abSMartin Matuska static __attribute__((noreturn)) void 527eda14cbcSMatt Macy vdev_initialize_thread(void *arg) 528eda14cbcSMatt Macy { 529eda14cbcSMatt Macy vdev_t *vd = arg; 530eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 531eda14cbcSMatt Macy int error = 0; 532eda14cbcSMatt Macy uint64_t ms_count = 0; 533eda14cbcSMatt Macy 534eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 535eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 536eda14cbcSMatt Macy 537eda14cbcSMatt Macy vd->vdev_initialize_last_offset = 0; 538eda14cbcSMatt Macy VERIFY0(vdev_initialize_load(vd)); 539eda14cbcSMatt Macy 540eda14cbcSMatt Macy abd_t *deadbeef = vdev_initialize_block_alloc(); 541eda14cbcSMatt Macy 542eda14cbcSMatt Macy vd->vdev_initialize_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 543eda14cbcSMatt Macy 0, 0); 544eda14cbcSMatt Macy 545eda14cbcSMatt Macy for (uint64_t i = 0; !vd->vdev_detached && 546eda14cbcSMatt Macy i < vd->vdev_top->vdev_ms_count; i++) { 547eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_top->vdev_ms[i]; 548eda14cbcSMatt Macy boolean_t unload_when_done = B_FALSE; 549eda14cbcSMatt Macy 550eda14cbcSMatt Macy /* 551eda14cbcSMatt Macy * If we've expanded the top-level vdev or it's our 552eda14cbcSMatt Macy * first pass, calculate our progress. 553eda14cbcSMatt Macy */ 554eda14cbcSMatt Macy if (vd->vdev_top->vdev_ms_count != ms_count) { 555eda14cbcSMatt Macy vdev_initialize_calculate_progress(vd); 556eda14cbcSMatt Macy ms_count = vd->vdev_top->vdev_ms_count; 557eda14cbcSMatt Macy } 558eda14cbcSMatt Macy 559eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 560eda14cbcSMatt Macy metaslab_disable(msp); 561eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 562eda14cbcSMatt Macy if (!msp->ms_loaded && !msp->ms_loading) 563eda14cbcSMatt Macy unload_when_done = B_TRUE; 564eda14cbcSMatt Macy VERIFY0(metaslab_load(msp)); 565eda14cbcSMatt Macy 566eda14cbcSMatt Macy range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add, 567eda14cbcSMatt Macy vd); 568eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 569eda14cbcSMatt Macy 570eda14cbcSMatt Macy error = vdev_initialize_ranges(vd, deadbeef); 571eda14cbcSMatt Macy metaslab_enable(msp, B_TRUE, unload_when_done); 572eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 573eda14cbcSMatt Macy 574eda14cbcSMatt Macy range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL); 575eda14cbcSMatt Macy if (error != 0) 576eda14cbcSMatt Macy break; 577eda14cbcSMatt Macy } 578eda14cbcSMatt Macy 579eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 580eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_io_lock); 581eda14cbcSMatt Macy while (vd->vdev_initialize_inflight > 0) { 582eda14cbcSMatt Macy cv_wait(&vd->vdev_initialize_io_cv, 583eda14cbcSMatt Macy &vd->vdev_initialize_io_lock); 584eda14cbcSMatt Macy } 585eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_io_lock); 586eda14cbcSMatt Macy 587eda14cbcSMatt Macy range_tree_destroy(vd->vdev_initialize_tree); 588eda14cbcSMatt Macy vdev_initialize_block_free(deadbeef); 589eda14cbcSMatt Macy vd->vdev_initialize_tree = NULL; 590eda14cbcSMatt Macy 591eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 592caed7b1cSMartin Matuska if (!vd->vdev_initialize_exit_wanted) { 593caed7b1cSMartin Matuska if (vdev_writeable(vd)) { 594caed7b1cSMartin Matuska vdev_initialize_change_state(vd, 595caed7b1cSMartin Matuska VDEV_INITIALIZE_COMPLETE); 596caed7b1cSMartin Matuska } else if (vd->vdev_faulted) { 597caed7b1cSMartin Matuska vdev_initialize_change_state(vd, 598caed7b1cSMartin Matuska VDEV_INITIALIZE_CANCELED); 599caed7b1cSMartin Matuska } 600eda14cbcSMatt Macy } 601eda14cbcSMatt Macy ASSERT(vd->vdev_initialize_thread != NULL || 602eda14cbcSMatt Macy vd->vdev_initialize_inflight == 0); 603eda14cbcSMatt Macy 604eda14cbcSMatt Macy /* 605eda14cbcSMatt Macy * Drop the vdev_initialize_lock while we sync out the 606eda14cbcSMatt Macy * txg since it's possible that a device might be trying to 607eda14cbcSMatt Macy * come online and must check to see if it needs to restart an 608eda14cbcSMatt Macy * initialization. That thread will be holding the spa_config_lock 609eda14cbcSMatt Macy * which would prevent the txg_wait_synced from completing. 610eda14cbcSMatt Macy */ 611eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 612eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), 0); 613eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 614eda14cbcSMatt Macy 615eda14cbcSMatt Macy vd->vdev_initialize_thread = NULL; 616eda14cbcSMatt Macy cv_broadcast(&vd->vdev_initialize_cv); 617eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 618eda14cbcSMatt Macy 619eda14cbcSMatt Macy thread_exit(); 620eda14cbcSMatt Macy } 621eda14cbcSMatt Macy 622eda14cbcSMatt Macy /* 623eda14cbcSMatt Macy * Initiates a device. Caller must hold vdev_initialize_lock. 624eda14cbcSMatt Macy * Device must be a leaf and not already be initializing. 625eda14cbcSMatt Macy */ 626eda14cbcSMatt Macy void 627eda14cbcSMatt Macy vdev_initialize(vdev_t *vd) 628eda14cbcSMatt Macy { 629eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 630eda14cbcSMatt Macy ASSERT(vd->vdev_ops->vdev_op_leaf); 631eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 632eda14cbcSMatt Macy ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 633eda14cbcSMatt Macy ASSERT(!vd->vdev_detached); 634eda14cbcSMatt Macy ASSERT(!vd->vdev_initialize_exit_wanted); 635eda14cbcSMatt Macy ASSERT(!vd->vdev_top->vdev_removing); 636*e716630dSMartin Matuska ASSERT(!vd->vdev_top->vdev_rz_expanding); 637eda14cbcSMatt Macy 638eda14cbcSMatt Macy vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE); 639eda14cbcSMatt Macy vd->vdev_initialize_thread = thread_create(NULL, 0, 640eda14cbcSMatt Macy vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 641eda14cbcSMatt Macy } 642eda14cbcSMatt Macy 643eda14cbcSMatt Macy /* 644c0a83fe0SMartin Matuska * Uninitializes a device. Caller must hold vdev_initialize_lock. 645c0a83fe0SMartin Matuska * Device must be a leaf and not already be initializing. 646c0a83fe0SMartin Matuska */ 647c0a83fe0SMartin Matuska void 648c0a83fe0SMartin Matuska vdev_uninitialize(vdev_t *vd) 649c0a83fe0SMartin Matuska { 650c0a83fe0SMartin Matuska ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 651c0a83fe0SMartin Matuska ASSERT(vd->vdev_ops->vdev_op_leaf); 652c0a83fe0SMartin Matuska ASSERT(vdev_is_concrete(vd)); 653c0a83fe0SMartin Matuska ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 654c0a83fe0SMartin Matuska ASSERT(!vd->vdev_detached); 655c0a83fe0SMartin Matuska ASSERT(!vd->vdev_initialize_exit_wanted); 656c0a83fe0SMartin Matuska ASSERT(!vd->vdev_top->vdev_removing); 657c0a83fe0SMartin Matuska 658c0a83fe0SMartin Matuska vdev_initialize_change_state(vd, VDEV_INITIALIZE_NONE); 659c0a83fe0SMartin Matuska } 660c0a83fe0SMartin Matuska 661c0a83fe0SMartin Matuska /* 662eda14cbcSMatt Macy * Wait for the initialize thread to be terminated (cancelled or stopped). 663eda14cbcSMatt Macy */ 664eda14cbcSMatt Macy static void 665eda14cbcSMatt Macy vdev_initialize_stop_wait_impl(vdev_t *vd) 666eda14cbcSMatt Macy { 667eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 668eda14cbcSMatt Macy 669eda14cbcSMatt Macy while (vd->vdev_initialize_thread != NULL) 670eda14cbcSMatt Macy cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock); 671eda14cbcSMatt Macy 672eda14cbcSMatt Macy ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 673eda14cbcSMatt Macy vd->vdev_initialize_exit_wanted = B_FALSE; 674eda14cbcSMatt Macy } 675eda14cbcSMatt Macy 676eda14cbcSMatt Macy /* 677eda14cbcSMatt Macy * Wait for vdev initialize threads which were either to cleanly exit. 678eda14cbcSMatt Macy */ 679eda14cbcSMatt Macy void 680eda14cbcSMatt Macy vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list) 681eda14cbcSMatt Macy { 682e92ffd9bSMartin Matuska (void) spa; 683eda14cbcSMatt Macy vdev_t *vd; 684eda14cbcSMatt Macy 685eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 686eda14cbcSMatt Macy 687eda14cbcSMatt Macy while ((vd = list_remove_head(vd_list)) != NULL) { 688eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 689eda14cbcSMatt Macy vdev_initialize_stop_wait_impl(vd); 690eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 691eda14cbcSMatt Macy } 692eda14cbcSMatt Macy } 693eda14cbcSMatt Macy 694eda14cbcSMatt Macy /* 695eda14cbcSMatt Macy * Stop initializing a device, with the resultant initializing state being 696eda14cbcSMatt Macy * tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when 697eda14cbcSMatt Macy * a list_t is provided the stopping vdev is inserted in to the list. Callers 698eda14cbcSMatt Macy * are then required to call vdev_initialize_stop_wait() to block for all the 699eda14cbcSMatt Macy * initialization threads to exit. The caller must hold vdev_initialize_lock 700eda14cbcSMatt Macy * and must not be writing to the spa config, as the initializing thread may 701eda14cbcSMatt Macy * try to enter the config as a reader before exiting. 702eda14cbcSMatt Macy */ 703eda14cbcSMatt Macy void 704eda14cbcSMatt Macy vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state, 705eda14cbcSMatt Macy list_t *vd_list) 706eda14cbcSMatt Macy { 707eda14cbcSMatt Macy ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER)); 708eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); 709eda14cbcSMatt Macy ASSERT(vd->vdev_ops->vdev_op_leaf); 710eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 711eda14cbcSMatt Macy 712eda14cbcSMatt Macy /* 713eda14cbcSMatt Macy * Allow cancel requests to proceed even if the initialize thread 714eda14cbcSMatt Macy * has stopped. 715eda14cbcSMatt Macy */ 716eda14cbcSMatt Macy if (vd->vdev_initialize_thread == NULL && 717eda14cbcSMatt Macy tgt_state != VDEV_INITIALIZE_CANCELED) { 718eda14cbcSMatt Macy return; 719eda14cbcSMatt Macy } 720eda14cbcSMatt Macy 721eda14cbcSMatt Macy vdev_initialize_change_state(vd, tgt_state); 722eda14cbcSMatt Macy vd->vdev_initialize_exit_wanted = B_TRUE; 723eda14cbcSMatt Macy 724eda14cbcSMatt Macy if (vd_list == NULL) { 725eda14cbcSMatt Macy vdev_initialize_stop_wait_impl(vd); 726eda14cbcSMatt Macy } else { 727eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 728eda14cbcSMatt Macy list_insert_tail(vd_list, vd); 729eda14cbcSMatt Macy } 730eda14cbcSMatt Macy } 731eda14cbcSMatt Macy 732eda14cbcSMatt Macy static void 733eda14cbcSMatt Macy vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state, 734eda14cbcSMatt Macy list_t *vd_list) 735eda14cbcSMatt Macy { 736eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) { 737eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 738eda14cbcSMatt Macy vdev_initialize_stop(vd, tgt_state, vd_list); 739eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 740eda14cbcSMatt Macy return; 741eda14cbcSMatt Macy } 742eda14cbcSMatt Macy 743eda14cbcSMatt Macy for (uint64_t i = 0; i < vd->vdev_children; i++) { 744eda14cbcSMatt Macy vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state, 745eda14cbcSMatt Macy vd_list); 746eda14cbcSMatt Macy } 747eda14cbcSMatt Macy } 748eda14cbcSMatt Macy 749eda14cbcSMatt Macy /* 750eda14cbcSMatt Macy * Convenience function to stop initializing of a vdev tree and set all 751eda14cbcSMatt Macy * initialize thread pointers to NULL. 752eda14cbcSMatt Macy */ 753eda14cbcSMatt Macy void 754eda14cbcSMatt Macy vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state) 755eda14cbcSMatt Macy { 756eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 757eda14cbcSMatt Macy list_t vd_list; 758eda14cbcSMatt Macy 759eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 760eda14cbcSMatt Macy 761eda14cbcSMatt Macy list_create(&vd_list, sizeof (vdev_t), 762eda14cbcSMatt Macy offsetof(vdev_t, vdev_initialize_node)); 763eda14cbcSMatt Macy 764eda14cbcSMatt Macy vdev_initialize_stop_all_impl(vd, tgt_state, &vd_list); 765eda14cbcSMatt Macy vdev_initialize_stop_wait(spa, &vd_list); 766eda14cbcSMatt Macy 767eda14cbcSMatt Macy if (vd->vdev_spa->spa_sync_on) { 768eda14cbcSMatt Macy /* Make sure that our state has been synced to disk */ 769eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); 770eda14cbcSMatt Macy } 771eda14cbcSMatt Macy 772eda14cbcSMatt Macy list_destroy(&vd_list); 773eda14cbcSMatt Macy } 774eda14cbcSMatt Macy 775eda14cbcSMatt Macy void 776eda14cbcSMatt Macy vdev_initialize_restart(vdev_t *vd) 777eda14cbcSMatt Macy { 778eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 779eda14cbcSMatt Macy ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 780eda14cbcSMatt Macy 781eda14cbcSMatt Macy if (vd->vdev_leaf_zap != 0) { 782eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 783eda14cbcSMatt Macy uint64_t initialize_state = VDEV_INITIALIZE_NONE; 784eda14cbcSMatt Macy int err = zap_lookup(vd->vdev_spa->spa_meta_objset, 785eda14cbcSMatt Macy vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE, 786eda14cbcSMatt Macy sizeof (initialize_state), 1, &initialize_state); 787eda14cbcSMatt Macy ASSERT(err == 0 || err == ENOENT); 788eda14cbcSMatt Macy vd->vdev_initialize_state = initialize_state; 789eda14cbcSMatt Macy 790eda14cbcSMatt Macy uint64_t timestamp = 0; 791eda14cbcSMatt Macy err = zap_lookup(vd->vdev_spa->spa_meta_objset, 792eda14cbcSMatt Macy vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, 793eda14cbcSMatt Macy sizeof (timestamp), 1, ×tamp); 794eda14cbcSMatt Macy ASSERT(err == 0 || err == ENOENT); 795eda14cbcSMatt Macy vd->vdev_initialize_action_time = timestamp; 796eda14cbcSMatt Macy 797*e716630dSMartin Matuska if ((vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED || 798*e716630dSMartin Matuska vd->vdev_offline) && !vd->vdev_top->vdev_rz_expanding) { 799eda14cbcSMatt Macy /* load progress for reporting, but don't resume */ 800eda14cbcSMatt Macy VERIFY0(vdev_initialize_load(vd)); 801eda14cbcSMatt Macy } else if (vd->vdev_initialize_state == 802eda14cbcSMatt Macy VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd) && 803eda14cbcSMatt Macy !vd->vdev_top->vdev_removing && 804*e716630dSMartin Matuska !vd->vdev_top->vdev_rz_expanding && 805eda14cbcSMatt Macy vd->vdev_initialize_thread == NULL) { 806eda14cbcSMatt Macy vdev_initialize(vd); 807eda14cbcSMatt Macy } 808eda14cbcSMatt Macy 809eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 810eda14cbcSMatt Macy } 811eda14cbcSMatt Macy 812eda14cbcSMatt Macy for (uint64_t i = 0; i < vd->vdev_children; i++) { 813eda14cbcSMatt Macy vdev_initialize_restart(vd->vdev_child[i]); 814eda14cbcSMatt Macy } 815eda14cbcSMatt Macy } 816eda14cbcSMatt Macy 817eda14cbcSMatt Macy EXPORT_SYMBOL(vdev_initialize); 818c0a83fe0SMartin Matuska EXPORT_SYMBOL(vdev_uninitialize); 819eda14cbcSMatt Macy EXPORT_SYMBOL(vdev_initialize_stop); 820eda14cbcSMatt Macy EXPORT_SYMBOL(vdev_initialize_stop_all); 821eda14cbcSMatt Macy EXPORT_SYMBOL(vdev_initialize_stop_wait); 822eda14cbcSMatt Macy EXPORT_SYMBOL(vdev_initialize_restart); 823eda14cbcSMatt Macy 824dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs, zfs_, initialize_value, U64, ZMOD_RW, 825eda14cbcSMatt Macy "Value written during zpool initialize"); 826eda14cbcSMatt Macy 827dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs, zfs_, initialize_chunk_size, U64, ZMOD_RW, 828eda14cbcSMatt Macy "Size in bytes of writes by zpool initialize"); 829