1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51544Seschrock * Common Development and Distribution License (the "License"). 61544Seschrock * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 231354Seschrock * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28789Sahrens 29789Sahrens /* 30789Sahrens * This file contains all the routines used when modifying on-disk SPA state. 31789Sahrens * This includes opening, importing, destroying, exporting a pool, and syncing a 32789Sahrens * pool. 33789Sahrens */ 34789Sahrens 35789Sahrens #include <sys/zfs_context.h> 361544Seschrock #include <sys/fm/fs/zfs.h> 37789Sahrens #include <sys/spa_impl.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zio_checksum.h> 40789Sahrens #include <sys/zio_compress.h> 41789Sahrens #include <sys/dmu.h> 42789Sahrens #include <sys/dmu_tx.h> 43789Sahrens #include <sys/zap.h> 44789Sahrens #include <sys/zil.h> 45789Sahrens #include <sys/vdev_impl.h> 46789Sahrens #include <sys/metaslab.h> 47789Sahrens #include <sys/uberblock_impl.h> 48789Sahrens #include <sys/txg.h> 49789Sahrens #include <sys/avl.h> 50789Sahrens #include <sys/dmu_traverse.h> 51789Sahrens #include <sys/unique.h> 52789Sahrens #include <sys/dsl_pool.h> 53789Sahrens #include <sys/dsl_dir.h> 54789Sahrens #include <sys/dsl_prop.h> 55789Sahrens #include <sys/fs/zfs.h> 56789Sahrens #include <sys/callb.h> 57789Sahrens 58789Sahrens /* 59789Sahrens * ========================================================================== 60789Sahrens * SPA state manipulation (open/create/destroy/import/export) 61789Sahrens * ========================================================================== 62789Sahrens */ 63789Sahrens 641544Seschrock static int 651544Seschrock spa_error_entry_compare(const void *a, const void *b) 661544Seschrock { 671544Seschrock spa_error_entry_t *sa = (spa_error_entry_t *)a; 681544Seschrock spa_error_entry_t *sb = (spa_error_entry_t *)b; 691544Seschrock int ret; 701544Seschrock 711544Seschrock ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 721544Seschrock sizeof (zbookmark_t)); 731544Seschrock 741544Seschrock if (ret < 0) 751544Seschrock return (-1); 761544Seschrock else if (ret > 0) 771544Seschrock return (1); 781544Seschrock else 791544Seschrock return (0); 801544Seschrock } 811544Seschrock 821544Seschrock /* 831544Seschrock * Utility function which retrieves copies of the current logs and 841544Seschrock * re-initializes them in the process. 851544Seschrock */ 861544Seschrock void 871544Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 881544Seschrock { 891544Seschrock ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 901544Seschrock 911544Seschrock bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 921544Seschrock bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 931544Seschrock 941544Seschrock avl_create(&spa->spa_errlist_scrub, 951544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 961544Seschrock offsetof(spa_error_entry_t, se_avl)); 971544Seschrock avl_create(&spa->spa_errlist_last, 981544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 991544Seschrock offsetof(spa_error_entry_t, se_avl)); 1001544Seschrock } 1011544Seschrock 102789Sahrens /* 103789Sahrens * Activate an uninitialized pool. 104789Sahrens */ 105789Sahrens static void 106789Sahrens spa_activate(spa_t *spa) 107789Sahrens { 108789Sahrens int t; 109789Sahrens 110789Sahrens ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 111789Sahrens 112789Sahrens spa->spa_state = POOL_STATE_ACTIVE; 113789Sahrens 114789Sahrens spa->spa_normal_class = metaslab_class_create(); 115789Sahrens 116789Sahrens for (t = 0; t < ZIO_TYPES; t++) { 117789Sahrens spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 118789Sahrens 8, maxclsyspri, 50, INT_MAX, 119789Sahrens TASKQ_PREPOPULATE); 120789Sahrens spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 121789Sahrens 8, maxclsyspri, 50, INT_MAX, 122789Sahrens TASKQ_PREPOPULATE); 123789Sahrens } 124789Sahrens 125789Sahrens rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); 126789Sahrens 1272856Snd150628 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 1282856Snd150628 mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL); 1292856Snd150628 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 1302856Snd150628 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 1312856Snd150628 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 1322856Snd150628 mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL); 1332856Snd150628 mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); 134*2926Sek110237 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 1352856Snd150628 136789Sahrens list_create(&spa->spa_dirty_list, sizeof (vdev_t), 137789Sahrens offsetof(vdev_t, vdev_dirty_node)); 138789Sahrens 139789Sahrens txg_list_create(&spa->spa_vdev_txg_list, 140789Sahrens offsetof(struct vdev, vdev_txg_node)); 1411544Seschrock 1421544Seschrock avl_create(&spa->spa_errlist_scrub, 1431544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1441544Seschrock offsetof(spa_error_entry_t, se_avl)); 1451544Seschrock avl_create(&spa->spa_errlist_last, 1461544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1471544Seschrock offsetof(spa_error_entry_t, se_avl)); 148789Sahrens } 149789Sahrens 150789Sahrens /* 151789Sahrens * Opposite of spa_activate(). 152789Sahrens */ 153789Sahrens static void 154789Sahrens spa_deactivate(spa_t *spa) 155789Sahrens { 156789Sahrens int t; 157789Sahrens 158789Sahrens ASSERT(spa->spa_sync_on == B_FALSE); 159789Sahrens ASSERT(spa->spa_dsl_pool == NULL); 160789Sahrens ASSERT(spa->spa_root_vdev == NULL); 161789Sahrens 162789Sahrens ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 163789Sahrens 164789Sahrens txg_list_destroy(&spa->spa_vdev_txg_list); 165789Sahrens 166789Sahrens list_destroy(&spa->spa_dirty_list); 167789Sahrens 168789Sahrens rw_destroy(&spa->spa_traverse_lock); 169789Sahrens 170789Sahrens for (t = 0; t < ZIO_TYPES; t++) { 171789Sahrens taskq_destroy(spa->spa_zio_issue_taskq[t]); 172789Sahrens taskq_destroy(spa->spa_zio_intr_taskq[t]); 173789Sahrens spa->spa_zio_issue_taskq[t] = NULL; 174789Sahrens spa->spa_zio_intr_taskq[t] = NULL; 175789Sahrens } 176789Sahrens 177789Sahrens metaslab_class_destroy(spa->spa_normal_class); 178789Sahrens spa->spa_normal_class = NULL; 179789Sahrens 1801544Seschrock /* 1811544Seschrock * If this was part of an import or the open otherwise failed, we may 1821544Seschrock * still have errors left in the queues. Empty them just in case. 1831544Seschrock */ 1841544Seschrock spa_errlog_drain(spa); 1851544Seschrock 1861544Seschrock avl_destroy(&spa->spa_errlist_scrub); 1871544Seschrock avl_destroy(&spa->spa_errlist_last); 1881544Seschrock 189789Sahrens spa->spa_state = POOL_STATE_UNINITIALIZED; 190789Sahrens } 191789Sahrens 192789Sahrens /* 193789Sahrens * Verify a pool configuration, and construct the vdev tree appropriately. This 194789Sahrens * will create all the necessary vdevs in the appropriate layout, with each vdev 195789Sahrens * in the CLOSED state. This will prep the pool before open/creation/import. 196789Sahrens * All vdev validation is done by the vdev_alloc() routine. 197789Sahrens */ 1982082Seschrock static int 1992082Seschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 2002082Seschrock uint_t id, int atype) 201789Sahrens { 202789Sahrens nvlist_t **child; 203789Sahrens uint_t c, children; 2042082Seschrock int error; 2052082Seschrock 2062082Seschrock if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 2072082Seschrock return (error); 2082082Seschrock 2092082Seschrock if ((*vdp)->vdev_ops->vdev_op_leaf) 2102082Seschrock return (0); 211789Sahrens 212789Sahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 213789Sahrens &child, &children) != 0) { 2142082Seschrock vdev_free(*vdp); 2152082Seschrock *vdp = NULL; 2162082Seschrock return (EINVAL); 217789Sahrens } 218789Sahrens 219789Sahrens for (c = 0; c < children; c++) { 2202082Seschrock vdev_t *vd; 2212082Seschrock if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 2222082Seschrock atype)) != 0) { 2232082Seschrock vdev_free(*vdp); 2242082Seschrock *vdp = NULL; 2252082Seschrock return (error); 226789Sahrens } 227789Sahrens } 228789Sahrens 2292082Seschrock ASSERT(*vdp != NULL); 2302082Seschrock 2312082Seschrock return (0); 232789Sahrens } 233789Sahrens 234789Sahrens /* 235789Sahrens * Opposite of spa_load(). 236789Sahrens */ 237789Sahrens static void 238789Sahrens spa_unload(spa_t *spa) 239789Sahrens { 2402082Seschrock int i; 2412082Seschrock 242789Sahrens /* 2431544Seschrock * Stop async tasks. 2441544Seschrock */ 2451544Seschrock spa_async_suspend(spa); 2461544Seschrock 2471544Seschrock /* 248789Sahrens * Stop syncing. 249789Sahrens */ 250789Sahrens if (spa->spa_sync_on) { 251789Sahrens txg_sync_stop(spa->spa_dsl_pool); 252789Sahrens spa->spa_sync_on = B_FALSE; 253789Sahrens } 254789Sahrens 255789Sahrens /* 256789Sahrens * Wait for any outstanding prefetch I/O to complete. 257789Sahrens */ 2581544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 2591544Seschrock spa_config_exit(spa, FTAG); 260789Sahrens 261789Sahrens /* 262789Sahrens * Close the dsl pool. 263789Sahrens */ 264789Sahrens if (spa->spa_dsl_pool) { 265789Sahrens dsl_pool_close(spa->spa_dsl_pool); 266789Sahrens spa->spa_dsl_pool = NULL; 267789Sahrens } 268789Sahrens 269789Sahrens /* 270789Sahrens * Close all vdevs. 271789Sahrens */ 2721585Sbonwick if (spa->spa_root_vdev) 273789Sahrens vdev_free(spa->spa_root_vdev); 2741585Sbonwick ASSERT(spa->spa_root_vdev == NULL); 2751544Seschrock 2762082Seschrock for (i = 0; i < spa->spa_nspares; i++) 2772082Seschrock vdev_free(spa->spa_spares[i]); 2782082Seschrock if (spa->spa_spares) { 2792082Seschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 2802082Seschrock spa->spa_spares = NULL; 2812082Seschrock } 2822082Seschrock if (spa->spa_sparelist) { 2832082Seschrock nvlist_free(spa->spa_sparelist); 2842082Seschrock spa->spa_sparelist = NULL; 2852082Seschrock } 2862082Seschrock 2871544Seschrock spa->spa_async_suspended = 0; 288789Sahrens } 289789Sahrens 290789Sahrens /* 2912082Seschrock * Load (or re-load) the current list of vdevs describing the active spares for 2922082Seschrock * this pool. When this is called, we have some form of basic information in 2932082Seschrock * 'spa_sparelist'. We parse this into vdevs, try to open them, and then 2942082Seschrock * re-generate a more complete list including status information. 2952082Seschrock */ 2962082Seschrock static void 2972082Seschrock spa_load_spares(spa_t *spa) 2982082Seschrock { 2992082Seschrock nvlist_t **spares; 3002082Seschrock uint_t nspares; 3012082Seschrock int i; 3022082Seschrock 3032082Seschrock /* 3042082Seschrock * First, close and free any existing spare vdevs. 3052082Seschrock */ 3062082Seschrock for (i = 0; i < spa->spa_nspares; i++) { 3072082Seschrock vdev_close(spa->spa_spares[i]); 3082082Seschrock vdev_free(spa->spa_spares[i]); 3092082Seschrock } 3102082Seschrock if (spa->spa_spares) 3112082Seschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 3122082Seschrock 3132082Seschrock if (spa->spa_sparelist == NULL) 3142082Seschrock nspares = 0; 3152082Seschrock else 3162082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 3172082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3182082Seschrock 3192082Seschrock spa->spa_nspares = (int)nspares; 3202082Seschrock spa->spa_spares = NULL; 3212082Seschrock 3222082Seschrock if (nspares == 0) 3232082Seschrock return; 3242082Seschrock 3252082Seschrock /* 3262082Seschrock * Construct the array of vdevs, opening them to get status in the 3272082Seschrock * process. 3282082Seschrock */ 3292082Seschrock spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP); 3302082Seschrock for (i = 0; i < spa->spa_nspares; i++) { 3312082Seschrock vdev_t *vd; 3322082Seschrock 3332082Seschrock VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 3342082Seschrock VDEV_ALLOC_SPARE) == 0); 3352082Seschrock ASSERT(vd != NULL); 3362082Seschrock 3372082Seschrock spa->spa_spares[i] = vd; 3382082Seschrock 3392082Seschrock if (vdev_open(vd) != 0) 3402082Seschrock continue; 3412082Seschrock 3422082Seschrock vd->vdev_top = vd; 3432082Seschrock (void) vdev_validate_spare(vd); 3442082Seschrock } 3452082Seschrock 3462082Seschrock /* 3472082Seschrock * Recompute the stashed list of spares, with status information 3482082Seschrock * this time. 3492082Seschrock */ 3502082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 3512082Seschrock DATA_TYPE_NVLIST_ARRAY) == 0); 3522082Seschrock 3532082Seschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP); 3542082Seschrock for (i = 0; i < spa->spa_nspares; i++) 3552082Seschrock spares[i] = vdev_config_generate(spa, spa->spa_spares[i], 3562082Seschrock B_TRUE, B_TRUE); 3572082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 3582082Seschrock spares, spa->spa_nspares) == 0); 3592082Seschrock for (i = 0; i < spa->spa_nspares; i++) 3602082Seschrock nvlist_free(spares[i]); 3612082Seschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 3622082Seschrock } 3632082Seschrock 3642082Seschrock static int 3652082Seschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 3662082Seschrock { 3672082Seschrock dmu_buf_t *db; 3682082Seschrock char *packed = NULL; 3692082Seschrock size_t nvsize = 0; 3702082Seschrock int error; 3712082Seschrock *value = NULL; 3722082Seschrock 3732082Seschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3742082Seschrock nvsize = *(uint64_t *)db->db_data; 3752082Seschrock dmu_buf_rele(db, FTAG); 3762082Seschrock 3772082Seschrock packed = kmem_alloc(nvsize, KM_SLEEP); 3782082Seschrock error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 3792082Seschrock if (error == 0) 3802082Seschrock error = nvlist_unpack(packed, nvsize, value, 0); 3812082Seschrock kmem_free(packed, nvsize); 3822082Seschrock 3832082Seschrock return (error); 3842082Seschrock } 3852082Seschrock 3862082Seschrock /* 387789Sahrens * Load an existing storage pool, using the pool's builtin spa_config as a 3881544Seschrock * source of configuration information. 389789Sahrens */ 390789Sahrens static int 3911544Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 392789Sahrens { 393789Sahrens int error = 0; 394789Sahrens nvlist_t *nvroot = NULL; 395789Sahrens vdev_t *rvd; 396789Sahrens uberblock_t *ub = &spa->spa_uberblock; 3971635Sbonwick uint64_t config_cache_txg = spa->spa_config_txg; 398789Sahrens uint64_t pool_guid; 3992082Seschrock uint64_t version; 400789Sahrens zio_t *zio; 401789Sahrens 4021544Seschrock spa->spa_load_state = state; 4031635Sbonwick 404789Sahrens if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 4051733Sbonwick nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 4061544Seschrock error = EINVAL; 4071544Seschrock goto out; 4081544Seschrock } 409789Sahrens 4102082Seschrock /* 4112082Seschrock * Versioning wasn't explicitly added to the label until later, so if 4122082Seschrock * it's not present treat it as the initial version. 4132082Seschrock */ 4142082Seschrock if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 4152082Seschrock version = ZFS_VERSION_INITIAL; 4162082Seschrock 4171733Sbonwick (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 4181733Sbonwick &spa->spa_config_txg); 4191733Sbonwick 4201635Sbonwick if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 4211544Seschrock spa_guid_exists(pool_guid, 0)) { 4221544Seschrock error = EEXIST; 4231544Seschrock goto out; 4241544Seschrock } 425789Sahrens 4262174Seschrock spa->spa_load_guid = pool_guid; 4272174Seschrock 428789Sahrens /* 4292082Seschrock * Parse the configuration into a vdev tree. We explicitly set the 4302082Seschrock * value that will be returned by spa_version() since parsing the 4312082Seschrock * configuration requires knowing the version number. 432789Sahrens */ 4331544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 4342082Seschrock spa->spa_ubsync.ub_version = version; 4352082Seschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 4361544Seschrock spa_config_exit(spa, FTAG); 437789Sahrens 4382082Seschrock if (error != 0) 4391544Seschrock goto out; 440789Sahrens 4411585Sbonwick ASSERT(spa->spa_root_vdev == rvd); 442789Sahrens ASSERT(spa_guid(spa) == pool_guid); 443789Sahrens 444789Sahrens /* 445789Sahrens * Try to open all vdevs, loading each label in the process. 446789Sahrens */ 4471544Seschrock if (vdev_open(rvd) != 0) { 4481544Seschrock error = ENXIO; 4491544Seschrock goto out; 4501544Seschrock } 451789Sahrens 452789Sahrens /* 4531986Seschrock * Validate the labels for all leaf vdevs. We need to grab the config 4541986Seschrock * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 4551986Seschrock * flag. 4561986Seschrock */ 4571986Seschrock spa_config_enter(spa, RW_READER, FTAG); 4581986Seschrock error = vdev_validate(rvd); 4591986Seschrock spa_config_exit(spa, FTAG); 4601986Seschrock 4611986Seschrock if (error != 0) { 4621986Seschrock error = EBADF; 4631986Seschrock goto out; 4641986Seschrock } 4651986Seschrock 4661986Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 4671986Seschrock error = ENXIO; 4681986Seschrock goto out; 4691986Seschrock } 4701986Seschrock 4711986Seschrock /* 472789Sahrens * Find the best uberblock. 473789Sahrens */ 474789Sahrens bzero(ub, sizeof (uberblock_t)); 475789Sahrens 476789Sahrens zio = zio_root(spa, NULL, NULL, 477789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 478789Sahrens vdev_uberblock_load(zio, rvd, ub); 479789Sahrens error = zio_wait(zio); 480789Sahrens 481789Sahrens /* 482789Sahrens * If we weren't able to find a single valid uberblock, return failure. 483789Sahrens */ 484789Sahrens if (ub->ub_txg == 0) { 4851760Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4861760Seschrock VDEV_AUX_CORRUPT_DATA); 4871544Seschrock error = ENXIO; 4881544Seschrock goto out; 4891544Seschrock } 4901544Seschrock 4911544Seschrock /* 4921544Seschrock * If the pool is newer than the code, we can't open it. 4931544Seschrock */ 4941760Seschrock if (ub->ub_version > ZFS_VERSION) { 4951760Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4961760Seschrock VDEV_AUX_VERSION_NEWER); 4971544Seschrock error = ENOTSUP; 4981544Seschrock goto out; 499789Sahrens } 500789Sahrens 501789Sahrens /* 502789Sahrens * If the vdev guid sum doesn't match the uberblock, we have an 503789Sahrens * incomplete configuration. 504789Sahrens */ 5051732Sbonwick if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 5061544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5071544Seschrock VDEV_AUX_BAD_GUID_SUM); 5081544Seschrock error = ENXIO; 5091544Seschrock goto out; 510789Sahrens } 511789Sahrens 512789Sahrens /* 513789Sahrens * Initialize internal SPA structures. 514789Sahrens */ 515789Sahrens spa->spa_state = POOL_STATE_ACTIVE; 516789Sahrens spa->spa_ubsync = spa->spa_uberblock; 517789Sahrens spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 5181544Seschrock error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 5191544Seschrock if (error) { 5201544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5211544Seschrock VDEV_AUX_CORRUPT_DATA); 5221544Seschrock goto out; 5231544Seschrock } 524789Sahrens spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 525789Sahrens 5261544Seschrock if (zap_lookup(spa->spa_meta_objset, 527789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 5281544Seschrock sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 5291544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5301544Seschrock VDEV_AUX_CORRUPT_DATA); 5311544Seschrock error = EIO; 5321544Seschrock goto out; 5331544Seschrock } 534789Sahrens 535789Sahrens if (!mosconfig) { 5362082Seschrock nvlist_t *newconfig; 5372082Seschrock 5382082Seschrock if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 5391544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5401544Seschrock VDEV_AUX_CORRUPT_DATA); 5411544Seschrock error = EIO; 5421544Seschrock goto out; 5431544Seschrock } 544789Sahrens 545789Sahrens spa_config_set(spa, newconfig); 546789Sahrens spa_unload(spa); 547789Sahrens spa_deactivate(spa); 548789Sahrens spa_activate(spa); 549789Sahrens 5501544Seschrock return (spa_load(spa, newconfig, state, B_TRUE)); 5511544Seschrock } 5521544Seschrock 5531544Seschrock if (zap_lookup(spa->spa_meta_objset, 5541544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 5551544Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 5561544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5571544Seschrock VDEV_AUX_CORRUPT_DATA); 5581544Seschrock error = EIO; 5591544Seschrock goto out; 560789Sahrens } 561789Sahrens 5621544Seschrock /* 5632082Seschrock * Load the bit that tells us to use the new accounting function 5642082Seschrock * (raid-z deflation). If we have an older pool, this will not 5652082Seschrock * be present. 5662082Seschrock */ 5672082Seschrock error = zap_lookup(spa->spa_meta_objset, 5682082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 5692082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate); 5702082Seschrock if (error != 0 && error != ENOENT) { 5712082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5722082Seschrock VDEV_AUX_CORRUPT_DATA); 5732082Seschrock error = EIO; 5742082Seschrock goto out; 5752082Seschrock } 5762082Seschrock 5772082Seschrock /* 5781544Seschrock * Load the persistent error log. If we have an older pool, this will 5791544Seschrock * not be present. 5801544Seschrock */ 5811544Seschrock error = zap_lookup(spa->spa_meta_objset, 5821544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 5831544Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_last); 5841807Sbonwick if (error != 0 && error != ENOENT) { 5851544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5861544Seschrock VDEV_AUX_CORRUPT_DATA); 5871544Seschrock error = EIO; 5881544Seschrock goto out; 5891544Seschrock } 5901544Seschrock 5911544Seschrock error = zap_lookup(spa->spa_meta_objset, 5921544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 5931544Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 5941544Seschrock if (error != 0 && error != ENOENT) { 5951544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5961544Seschrock VDEV_AUX_CORRUPT_DATA); 5971544Seschrock error = EIO; 5981544Seschrock goto out; 5991544Seschrock } 600789Sahrens 601789Sahrens /* 602*2926Sek110237 * Load the history object. If we have an older pool, this 603*2926Sek110237 * will not be present. 604*2926Sek110237 */ 605*2926Sek110237 error = zap_lookup(spa->spa_meta_objset, 606*2926Sek110237 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 607*2926Sek110237 sizeof (uint64_t), 1, &spa->spa_history); 608*2926Sek110237 if (error != 0 && error != ENOENT) { 609*2926Sek110237 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 610*2926Sek110237 VDEV_AUX_CORRUPT_DATA); 611*2926Sek110237 error = EIO; 612*2926Sek110237 goto out; 613*2926Sek110237 } 614*2926Sek110237 615*2926Sek110237 /* 6162082Seschrock * Load any hot spares for this pool. 6172082Seschrock */ 6182082Seschrock error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 6192082Seschrock DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object); 6202082Seschrock if (error != 0 && error != ENOENT) { 6212082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 6222082Seschrock VDEV_AUX_CORRUPT_DATA); 6232082Seschrock error = EIO; 6242082Seschrock goto out; 6252082Seschrock } 6262082Seschrock if (error == 0) { 6272082Seschrock ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES); 6282082Seschrock if (load_nvlist(spa, spa->spa_spares_object, 6292082Seschrock &spa->spa_sparelist) != 0) { 6302082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 6312082Seschrock VDEV_AUX_CORRUPT_DATA); 6322082Seschrock error = EIO; 6332082Seschrock goto out; 6342082Seschrock } 6352082Seschrock 6362082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 6372082Seschrock spa_load_spares(spa); 6382082Seschrock spa_config_exit(spa, FTAG); 6392082Seschrock } 6402082Seschrock 6412082Seschrock /* 6421986Seschrock * Load the vdev state for all toplevel vdevs. 643789Sahrens */ 6441986Seschrock vdev_load(rvd); 645789Sahrens 646789Sahrens /* 647789Sahrens * Propagate the leaf DTLs we just loaded all the way up the tree. 648789Sahrens */ 6491544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 650789Sahrens vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 6511544Seschrock spa_config_exit(spa, FTAG); 652789Sahrens 653789Sahrens /* 654789Sahrens * Check the state of the root vdev. If it can't be opened, it 655789Sahrens * indicates one or more toplevel vdevs are faulted. 656789Sahrens */ 6571544Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 6581544Seschrock error = ENXIO; 6591544Seschrock goto out; 6601544Seschrock } 661789Sahrens 6621544Seschrock if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 6631635Sbonwick dmu_tx_t *tx; 6641635Sbonwick int need_update = B_FALSE; 6651585Sbonwick int c; 6661601Sbonwick 6671635Sbonwick /* 6681635Sbonwick * Claim log blocks that haven't been committed yet. 6691635Sbonwick * This must all happen in a single txg. 6701635Sbonwick */ 6711601Sbonwick tx = dmu_tx_create_assigned(spa_get_dsl(spa), 672789Sahrens spa_first_txg(spa)); 6732417Sahrens (void) dmu_objset_find(spa->spa_name, 6742417Sahrens zil_claim, tx, DS_FIND_CHILDREN); 675789Sahrens dmu_tx_commit(tx); 676789Sahrens 677789Sahrens spa->spa_sync_on = B_TRUE; 678789Sahrens txg_sync_start(spa->spa_dsl_pool); 679789Sahrens 680789Sahrens /* 681789Sahrens * Wait for all claims to sync. 682789Sahrens */ 683789Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 6841585Sbonwick 6851585Sbonwick /* 6861635Sbonwick * If the config cache is stale, or we have uninitialized 6871635Sbonwick * metaslabs (see spa_vdev_add()), then update the config. 6881585Sbonwick */ 6891635Sbonwick if (config_cache_txg != spa->spa_config_txg || 6901635Sbonwick state == SPA_LOAD_IMPORT) 6911635Sbonwick need_update = B_TRUE; 6921635Sbonwick 6931635Sbonwick for (c = 0; c < rvd->vdev_children; c++) 6941635Sbonwick if (rvd->vdev_child[c]->vdev_ms_array == 0) 6951635Sbonwick need_update = B_TRUE; 6961585Sbonwick 6971585Sbonwick /* 6981635Sbonwick * Update the config cache asychronously in case we're the 6991635Sbonwick * root pool, in which case the config cache isn't writable yet. 7001585Sbonwick */ 7011635Sbonwick if (need_update) 7021635Sbonwick spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 703789Sahrens } 704789Sahrens 7051544Seschrock error = 0; 7061544Seschrock out: 7072082Seschrock if (error && error != EBADF) 7081544Seschrock zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 7091544Seschrock spa->spa_load_state = SPA_LOAD_NONE; 7101544Seschrock spa->spa_ena = 0; 7111544Seschrock 7121544Seschrock return (error); 713789Sahrens } 714789Sahrens 715789Sahrens /* 716789Sahrens * Pool Open/Import 717789Sahrens * 718789Sahrens * The import case is identical to an open except that the configuration is sent 719789Sahrens * down from userland, instead of grabbed from the configuration cache. For the 720789Sahrens * case of an open, the pool configuration will exist in the 721789Sahrens * POOL_STATE_UNITIALIZED state. 722789Sahrens * 723789Sahrens * The stats information (gen/count/ustats) is used to gather vdev statistics at 724789Sahrens * the same time open the pool, without having to keep around the spa_t in some 725789Sahrens * ambiguous state. 726789Sahrens */ 727789Sahrens static int 728789Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 729789Sahrens { 730789Sahrens spa_t *spa; 731789Sahrens int error; 732789Sahrens int loaded = B_FALSE; 733789Sahrens int locked = B_FALSE; 734789Sahrens 735789Sahrens *spapp = NULL; 736789Sahrens 737789Sahrens /* 738789Sahrens * As disgusting as this is, we need to support recursive calls to this 739789Sahrens * function because dsl_dir_open() is called during spa_load(), and ends 740789Sahrens * up calling spa_open() again. The real fix is to figure out how to 741789Sahrens * avoid dsl_dir_open() calling this in the first place. 742789Sahrens */ 743789Sahrens if (mutex_owner(&spa_namespace_lock) != curthread) { 744789Sahrens mutex_enter(&spa_namespace_lock); 745789Sahrens locked = B_TRUE; 746789Sahrens } 747789Sahrens 748789Sahrens if ((spa = spa_lookup(pool)) == NULL) { 749789Sahrens if (locked) 750789Sahrens mutex_exit(&spa_namespace_lock); 751789Sahrens return (ENOENT); 752789Sahrens } 753789Sahrens if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 754789Sahrens 755789Sahrens spa_activate(spa); 756789Sahrens 7571635Sbonwick error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 758789Sahrens 759789Sahrens if (error == EBADF) { 760789Sahrens /* 7611986Seschrock * If vdev_validate() returns failure (indicated by 7621986Seschrock * EBADF), it indicates that one of the vdevs indicates 7631986Seschrock * that the pool has been exported or destroyed. If 7641986Seschrock * this is the case, the config cache is out of sync and 7651986Seschrock * we should remove the pool from the namespace. 766789Sahrens */ 7672082Seschrock zfs_post_ok(spa, NULL); 768789Sahrens spa_unload(spa); 769789Sahrens spa_deactivate(spa); 770789Sahrens spa_remove(spa); 771789Sahrens spa_config_sync(); 772789Sahrens if (locked) 773789Sahrens mutex_exit(&spa_namespace_lock); 774789Sahrens return (ENOENT); 7751544Seschrock } 7761544Seschrock 7771544Seschrock if (error) { 778789Sahrens /* 779789Sahrens * We can't open the pool, but we still have useful 780789Sahrens * information: the state of each vdev after the 781789Sahrens * attempted vdev_open(). Return this to the user. 782789Sahrens */ 7831635Sbonwick if (config != NULL && spa->spa_root_vdev != NULL) { 7841635Sbonwick spa_config_enter(spa, RW_READER, FTAG); 785789Sahrens *config = spa_config_generate(spa, NULL, -1ULL, 786789Sahrens B_TRUE); 7871635Sbonwick spa_config_exit(spa, FTAG); 7881635Sbonwick } 789789Sahrens spa_unload(spa); 790789Sahrens spa_deactivate(spa); 7911544Seschrock spa->spa_last_open_failed = B_TRUE; 792789Sahrens if (locked) 793789Sahrens mutex_exit(&spa_namespace_lock); 794789Sahrens *spapp = NULL; 795789Sahrens return (error); 7961544Seschrock } else { 7971544Seschrock zfs_post_ok(spa, NULL); 7981544Seschrock spa->spa_last_open_failed = B_FALSE; 799789Sahrens } 800789Sahrens 801789Sahrens loaded = B_TRUE; 802789Sahrens } 803789Sahrens 804789Sahrens spa_open_ref(spa, tag); 805789Sahrens if (locked) 806789Sahrens mutex_exit(&spa_namespace_lock); 807789Sahrens 808789Sahrens *spapp = spa; 809789Sahrens 810789Sahrens if (config != NULL) { 8111544Seschrock spa_config_enter(spa, RW_READER, FTAG); 812789Sahrens *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 8131544Seschrock spa_config_exit(spa, FTAG); 814789Sahrens } 815789Sahrens 816789Sahrens /* 817789Sahrens * If we just loaded the pool, resilver anything that's out of date. 818789Sahrens */ 819789Sahrens if (loaded && (spa_mode & FWRITE)) 820789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 821789Sahrens 822789Sahrens return (0); 823789Sahrens } 824789Sahrens 825789Sahrens int 826789Sahrens spa_open(const char *name, spa_t **spapp, void *tag) 827789Sahrens { 828789Sahrens return (spa_open_common(name, spapp, tag, NULL)); 829789Sahrens } 830789Sahrens 8311544Seschrock /* 8321544Seschrock * Lookup the given spa_t, incrementing the inject count in the process, 8331544Seschrock * preventing it from being exported or destroyed. 8341544Seschrock */ 8351544Seschrock spa_t * 8361544Seschrock spa_inject_addref(char *name) 8371544Seschrock { 8381544Seschrock spa_t *spa; 8391544Seschrock 8401544Seschrock mutex_enter(&spa_namespace_lock); 8411544Seschrock if ((spa = spa_lookup(name)) == NULL) { 8421544Seschrock mutex_exit(&spa_namespace_lock); 8431544Seschrock return (NULL); 8441544Seschrock } 8451544Seschrock spa->spa_inject_ref++; 8461544Seschrock mutex_exit(&spa_namespace_lock); 8471544Seschrock 8481544Seschrock return (spa); 8491544Seschrock } 8501544Seschrock 8511544Seschrock void 8521544Seschrock spa_inject_delref(spa_t *spa) 8531544Seschrock { 8541544Seschrock mutex_enter(&spa_namespace_lock); 8551544Seschrock spa->spa_inject_ref--; 8561544Seschrock mutex_exit(&spa_namespace_lock); 8571544Seschrock } 8581544Seschrock 8592082Seschrock static void 8602082Seschrock spa_add_spares(spa_t *spa, nvlist_t *config) 8612082Seschrock { 8622082Seschrock nvlist_t **spares; 8632082Seschrock uint_t i, nspares; 8642082Seschrock nvlist_t *nvroot; 8652082Seschrock uint64_t guid; 8662082Seschrock vdev_stat_t *vs; 8672082Seschrock uint_t vsc; 8682082Seschrock 8692082Seschrock if (spa->spa_nspares == 0) 8702082Seschrock return; 8712082Seschrock 8722082Seschrock VERIFY(nvlist_lookup_nvlist(config, 8732082Seschrock ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 8742082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 8752082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 8762082Seschrock if (nspares != 0) { 8772082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, 8782082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 8792082Seschrock VERIFY(nvlist_lookup_nvlist_array(nvroot, 8802082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 8812082Seschrock 8822082Seschrock /* 8832082Seschrock * Go through and find any spares which have since been 8842082Seschrock * repurposed as an active spare. If this is the case, update 8852082Seschrock * their status appropriately. 8862082Seschrock */ 8872082Seschrock for (i = 0; i < nspares; i++) { 8882082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 8892082Seschrock ZPOOL_CONFIG_GUID, &guid) == 0); 8902082Seschrock if (spa_spare_inuse(guid)) { 8912082Seschrock VERIFY(nvlist_lookup_uint64_array( 8922082Seschrock spares[i], ZPOOL_CONFIG_STATS, 8932082Seschrock (uint64_t **)&vs, &vsc) == 0); 8942082Seschrock vs->vs_state = VDEV_STATE_CANT_OPEN; 8952082Seschrock vs->vs_aux = VDEV_AUX_SPARED; 8962082Seschrock } 8972082Seschrock } 8982082Seschrock } 8992082Seschrock } 9002082Seschrock 901789Sahrens int 9021544Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 903789Sahrens { 904789Sahrens int error; 905789Sahrens spa_t *spa; 906789Sahrens 907789Sahrens *config = NULL; 908789Sahrens error = spa_open_common(name, &spa, FTAG, config); 909789Sahrens 9102082Seschrock if (spa && *config != NULL) { 9111544Seschrock VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 9121544Seschrock spa_get_errlog_size(spa)) == 0); 9131544Seschrock 9142082Seschrock spa_add_spares(spa, *config); 9152082Seschrock } 9162082Seschrock 9171544Seschrock /* 9181544Seschrock * We want to get the alternate root even for faulted pools, so we cheat 9191544Seschrock * and call spa_lookup() directly. 9201544Seschrock */ 9211544Seschrock if (altroot) { 9221544Seschrock if (spa == NULL) { 9231544Seschrock mutex_enter(&spa_namespace_lock); 9241544Seschrock spa = spa_lookup(name); 9251544Seschrock if (spa) 9261544Seschrock spa_altroot(spa, altroot, buflen); 9271544Seschrock else 9281544Seschrock altroot[0] = '\0'; 9291544Seschrock spa = NULL; 9301544Seschrock mutex_exit(&spa_namespace_lock); 9311544Seschrock } else { 9321544Seschrock spa_altroot(spa, altroot, buflen); 9331544Seschrock } 9341544Seschrock } 9351544Seschrock 936789Sahrens if (spa != NULL) 937789Sahrens spa_close(spa, FTAG); 938789Sahrens 939789Sahrens return (error); 940789Sahrens } 941789Sahrens 942789Sahrens /* 9432082Seschrock * Validate that the 'spares' array is well formed. We must have an array of 9442082Seschrock * nvlists, each which describes a valid leaf vdev. 9452082Seschrock */ 9462082Seschrock static int 9472082Seschrock spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 9482082Seschrock { 9492082Seschrock nvlist_t **spares; 9502082Seschrock uint_t i, nspares; 9512082Seschrock vdev_t *vd; 9522082Seschrock int error; 9532082Seschrock 9542082Seschrock /* 9552082Seschrock * It's acceptable to have no spares specified. 9562082Seschrock */ 9572082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 9582082Seschrock &spares, &nspares) != 0) 9592082Seschrock return (0); 9602082Seschrock 9612082Seschrock if (nspares == 0) 9622082Seschrock return (EINVAL); 9632082Seschrock 9642082Seschrock /* 9652082Seschrock * Make sure the pool is formatted with a version that supports hot 9662082Seschrock * spares. 9672082Seschrock */ 9682082Seschrock if (spa_version(spa) < ZFS_VERSION_SPARES) 9692082Seschrock return (ENOTSUP); 9702082Seschrock 9712082Seschrock for (i = 0; i < nspares; i++) { 9722082Seschrock if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0, 9732082Seschrock mode)) != 0) 9742082Seschrock return (error); 9752082Seschrock 9762082Seschrock if (!vd->vdev_ops->vdev_op_leaf) { 9772082Seschrock vdev_free(vd); 9782082Seschrock return (EINVAL); 9792082Seschrock } 9802082Seschrock 9812082Seschrock if ((error = vdev_open(vd)) != 0) { 9822082Seschrock vdev_free(vd); 9832082Seschrock return (error); 9842082Seschrock } 9852082Seschrock 9862082Seschrock vd->vdev_top = vd; 9872082Seschrock if ((error = vdev_label_spare(vd, crtxg)) != 0) { 9882082Seschrock vdev_free(vd); 9892082Seschrock return (error); 9902082Seschrock } 9912082Seschrock 9922082Seschrock VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID, 9932082Seschrock vd->vdev_guid) == 0); 9942082Seschrock 9952082Seschrock vdev_free(vd); 9962082Seschrock } 9972082Seschrock 9982082Seschrock return (0); 9992082Seschrock } 10002082Seschrock 10012082Seschrock /* 1002789Sahrens * Pool Creation 1003789Sahrens */ 1004789Sahrens int 10051635Sbonwick spa_create(const char *pool, nvlist_t *nvroot, const char *altroot) 1006789Sahrens { 1007789Sahrens spa_t *spa; 10081635Sbonwick vdev_t *rvd; 1009789Sahrens dsl_pool_t *dp; 1010789Sahrens dmu_tx_t *tx; 10112082Seschrock int c, error = 0; 1012789Sahrens uint64_t txg = TXG_INITIAL; 10132082Seschrock nvlist_t **spares; 10142082Seschrock uint_t nspares; 1015789Sahrens 1016789Sahrens /* 1017789Sahrens * If this pool already exists, return failure. 1018789Sahrens */ 1019789Sahrens mutex_enter(&spa_namespace_lock); 1020789Sahrens if (spa_lookup(pool) != NULL) { 1021789Sahrens mutex_exit(&spa_namespace_lock); 1022789Sahrens return (EEXIST); 1023789Sahrens } 1024789Sahrens 1025789Sahrens /* 1026789Sahrens * Allocate a new spa_t structure. 1027789Sahrens */ 10281635Sbonwick spa = spa_add(pool, altroot); 1029789Sahrens spa_activate(spa); 1030789Sahrens 1031789Sahrens spa->spa_uberblock.ub_txg = txg - 1; 10321760Seschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 1033789Sahrens spa->spa_ubsync = spa->spa_uberblock; 1034789Sahrens 10351635Sbonwick /* 10361635Sbonwick * Create the root vdev. 10371635Sbonwick */ 10381635Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 10391635Sbonwick 10402082Seschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 10412082Seschrock 10422082Seschrock ASSERT(error != 0 || rvd != NULL); 10432082Seschrock ASSERT(error != 0 || spa->spa_root_vdev == rvd); 10442082Seschrock 10452082Seschrock if (error == 0 && rvd->vdev_children == 0) 10461635Sbonwick error = EINVAL; 10472082Seschrock 10482082Seschrock if (error == 0 && 10492082Seschrock (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 10502082Seschrock (error = spa_validate_spares(spa, nvroot, txg, 10512082Seschrock VDEV_ALLOC_ADD)) == 0) { 10522082Seschrock for (c = 0; c < rvd->vdev_children; c++) 10532082Seschrock vdev_init(rvd->vdev_child[c], txg); 10542082Seschrock vdev_config_dirty(rvd); 10551635Sbonwick } 10561635Sbonwick 10571635Sbonwick spa_config_exit(spa, FTAG); 1058789Sahrens 10592082Seschrock if (error != 0) { 1060789Sahrens spa_unload(spa); 1061789Sahrens spa_deactivate(spa); 1062789Sahrens spa_remove(spa); 1063789Sahrens mutex_exit(&spa_namespace_lock); 1064789Sahrens return (error); 1065789Sahrens } 1066789Sahrens 10672082Seschrock /* 10682082Seschrock * Get the list of spares, if specified. 10692082Seschrock */ 10702082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 10712082Seschrock &spares, &nspares) == 0) { 10722082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME, 10732082Seschrock KM_SLEEP) == 0); 10742082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 10752082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 10762082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 10772082Seschrock spa_load_spares(spa); 10782082Seschrock spa_config_exit(spa, FTAG); 10792082Seschrock spa->spa_sync_spares = B_TRUE; 10802082Seschrock } 10812082Seschrock 1082789Sahrens spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1083789Sahrens spa->spa_meta_objset = dp->dp_meta_objset; 1084789Sahrens 1085789Sahrens tx = dmu_tx_create_assigned(dp, txg); 1086789Sahrens 1087789Sahrens /* 1088789Sahrens * Create the pool config object. 1089789Sahrens */ 1090789Sahrens spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1091789Sahrens DMU_OT_PACKED_NVLIST, 1 << 14, 1092789Sahrens DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1093789Sahrens 10941544Seschrock if (zap_add(spa->spa_meta_objset, 1095789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 10961544Seschrock sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 10971544Seschrock cmn_err(CE_PANIC, "failed to add pool config"); 10981544Seschrock } 1099789Sahrens 11002082Seschrock /* Newly created pools are always deflated. */ 11012082Seschrock spa->spa_deflate = TRUE; 11022082Seschrock if (zap_add(spa->spa_meta_objset, 11032082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 11042082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 11052082Seschrock cmn_err(CE_PANIC, "failed to add deflate"); 11062082Seschrock } 11072082Seschrock 1108789Sahrens /* 1109789Sahrens * Create the deferred-free bplist object. Turn off compression 1110789Sahrens * because sync-to-convergence takes longer if the blocksize 1111789Sahrens * keeps changing. 1112789Sahrens */ 1113789Sahrens spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1114789Sahrens 1 << 14, tx); 1115789Sahrens dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1116789Sahrens ZIO_COMPRESS_OFF, tx); 1117789Sahrens 11181544Seschrock if (zap_add(spa->spa_meta_objset, 1119789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 11201544Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 11211544Seschrock cmn_err(CE_PANIC, "failed to add bplist"); 11221544Seschrock } 1123789Sahrens 1124*2926Sek110237 /* 1125*2926Sek110237 * Create the pool's history object. 1126*2926Sek110237 */ 1127*2926Sek110237 spa_history_create_obj(spa, tx); 1128*2926Sek110237 1129789Sahrens dmu_tx_commit(tx); 1130789Sahrens 1131789Sahrens spa->spa_sync_on = B_TRUE; 1132789Sahrens txg_sync_start(spa->spa_dsl_pool); 1133789Sahrens 1134789Sahrens /* 1135789Sahrens * We explicitly wait for the first transaction to complete so that our 1136789Sahrens * bean counters are appropriately updated. 1137789Sahrens */ 1138789Sahrens txg_wait_synced(spa->spa_dsl_pool, txg); 1139789Sahrens 1140789Sahrens spa_config_sync(); 1141789Sahrens 1142789Sahrens mutex_exit(&spa_namespace_lock); 1143789Sahrens 1144789Sahrens return (0); 1145789Sahrens } 1146789Sahrens 1147789Sahrens /* 1148789Sahrens * Import the given pool into the system. We set up the necessary spa_t and 1149789Sahrens * then call spa_load() to do the dirty work. 1150789Sahrens */ 1151789Sahrens int 11521635Sbonwick spa_import(const char *pool, nvlist_t *config, const char *altroot) 1153789Sahrens { 1154789Sahrens spa_t *spa; 1155789Sahrens int error; 11562082Seschrock nvlist_t *nvroot; 11572082Seschrock nvlist_t **spares; 11582082Seschrock uint_t nspares; 1159789Sahrens 1160789Sahrens if (!(spa_mode & FWRITE)) 1161789Sahrens return (EROFS); 1162789Sahrens 1163789Sahrens /* 1164789Sahrens * If a pool with this name exists, return failure. 1165789Sahrens */ 1166789Sahrens mutex_enter(&spa_namespace_lock); 1167789Sahrens if (spa_lookup(pool) != NULL) { 1168789Sahrens mutex_exit(&spa_namespace_lock); 1169789Sahrens return (EEXIST); 1170789Sahrens } 1171789Sahrens 1172789Sahrens /* 11731635Sbonwick * Create and initialize the spa structure. 1174789Sahrens */ 11751635Sbonwick spa = spa_add(pool, altroot); 1176789Sahrens spa_activate(spa); 1177789Sahrens 1178789Sahrens /* 11791635Sbonwick * Pass off the heavy lifting to spa_load(). 11801732Sbonwick * Pass TRUE for mosconfig because the user-supplied config 11811732Sbonwick * is actually the one to trust when doing an import. 11821601Sbonwick */ 11831732Sbonwick error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE); 1184789Sahrens 11852082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 11862082Seschrock /* 11872082Seschrock * Toss any existing sparelist, as it doesn't have any validity anymore, 11882082Seschrock * and conflicts with spa_has_spare(). 11892082Seschrock */ 11902082Seschrock if (spa->spa_sparelist) { 11912082Seschrock nvlist_free(spa->spa_sparelist); 11922082Seschrock spa->spa_sparelist = NULL; 11932082Seschrock spa_load_spares(spa); 11942082Seschrock } 11952082Seschrock 11962082Seschrock VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 11972082Seschrock &nvroot) == 0); 11982082Seschrock if (error == 0) 11992082Seschrock error = spa_validate_spares(spa, nvroot, -1ULL, 12002082Seschrock VDEV_ALLOC_SPARE); 12012082Seschrock spa_config_exit(spa, FTAG); 12022082Seschrock 12032082Seschrock if (error != 0) { 1204789Sahrens spa_unload(spa); 1205789Sahrens spa_deactivate(spa); 1206789Sahrens spa_remove(spa); 1207789Sahrens mutex_exit(&spa_namespace_lock); 1208789Sahrens return (error); 1209789Sahrens } 1210789Sahrens 12111635Sbonwick /* 12122082Seschrock * Override any spares as specified by the user, as these may have 12132082Seschrock * correct device names/devids, etc. 12142082Seschrock */ 12152082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 12162082Seschrock &spares, &nspares) == 0) { 12172082Seschrock if (spa->spa_sparelist) 12182082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, 12192082Seschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 12202082Seschrock else 12212082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 12222082Seschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 12232082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 12242082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 12252082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 12262082Seschrock spa_load_spares(spa); 12272082Seschrock spa_config_exit(spa, FTAG); 12282082Seschrock spa->spa_sync_spares = B_TRUE; 12292082Seschrock } 12302082Seschrock 12312082Seschrock /* 12321635Sbonwick * Update the config cache to include the newly-imported pool. 12331635Sbonwick */ 12341635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 12351635Sbonwick 1236789Sahrens mutex_exit(&spa_namespace_lock); 1237789Sahrens 1238789Sahrens /* 1239789Sahrens * Resilver anything that's out of date. 1240789Sahrens */ 1241789Sahrens if (spa_mode & FWRITE) 1242789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1243789Sahrens 1244789Sahrens return (0); 1245789Sahrens } 1246789Sahrens 1247789Sahrens /* 1248789Sahrens * This (illegal) pool name is used when temporarily importing a spa_t in order 1249789Sahrens * to get the vdev stats associated with the imported devices. 1250789Sahrens */ 1251789Sahrens #define TRYIMPORT_NAME "$import" 1252789Sahrens 1253789Sahrens nvlist_t * 1254789Sahrens spa_tryimport(nvlist_t *tryconfig) 1255789Sahrens { 1256789Sahrens nvlist_t *config = NULL; 1257789Sahrens char *poolname; 1258789Sahrens spa_t *spa; 1259789Sahrens uint64_t state; 1260789Sahrens 1261789Sahrens if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 1262789Sahrens return (NULL); 1263789Sahrens 1264789Sahrens if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 1265789Sahrens return (NULL); 1266789Sahrens 12671635Sbonwick /* 12681635Sbonwick * Create and initialize the spa structure. 12691635Sbonwick */ 1270789Sahrens mutex_enter(&spa_namespace_lock); 12711635Sbonwick spa = spa_add(TRYIMPORT_NAME, NULL); 1272789Sahrens spa_activate(spa); 1273789Sahrens 1274789Sahrens /* 12751635Sbonwick * Pass off the heavy lifting to spa_load(). 12761732Sbonwick * Pass TRUE for mosconfig because the user-supplied config 12771732Sbonwick * is actually the one to trust when doing an import. 1278789Sahrens */ 12791732Sbonwick (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 1280789Sahrens 1281789Sahrens /* 1282789Sahrens * If 'tryconfig' was at least parsable, return the current config. 1283789Sahrens */ 1284789Sahrens if (spa->spa_root_vdev != NULL) { 12851635Sbonwick spa_config_enter(spa, RW_READER, FTAG); 1286789Sahrens config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 12871635Sbonwick spa_config_exit(spa, FTAG); 1288789Sahrens VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 1289789Sahrens poolname) == 0); 1290789Sahrens VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1291789Sahrens state) == 0); 12922082Seschrock 12932082Seschrock /* 12942082Seschrock * Add the list of hot spares. 12952082Seschrock */ 12962082Seschrock spa_add_spares(spa, config); 1297789Sahrens } 1298789Sahrens 1299789Sahrens spa_unload(spa); 1300789Sahrens spa_deactivate(spa); 1301789Sahrens spa_remove(spa); 1302789Sahrens mutex_exit(&spa_namespace_lock); 1303789Sahrens 1304789Sahrens return (config); 1305789Sahrens } 1306789Sahrens 1307789Sahrens /* 1308789Sahrens * Pool export/destroy 1309789Sahrens * 1310789Sahrens * The act of destroying or exporting a pool is very simple. We make sure there 1311789Sahrens * is no more pending I/O and any references to the pool are gone. Then, we 1312789Sahrens * update the pool state and sync all the labels to disk, removing the 1313789Sahrens * configuration from the cache afterwards. 1314789Sahrens */ 1315789Sahrens static int 13161775Sbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 1317789Sahrens { 1318789Sahrens spa_t *spa; 1319789Sahrens 13201775Sbillm if (oldconfig) 13211775Sbillm *oldconfig = NULL; 13221775Sbillm 1323789Sahrens if (!(spa_mode & FWRITE)) 1324789Sahrens return (EROFS); 1325789Sahrens 1326789Sahrens mutex_enter(&spa_namespace_lock); 1327789Sahrens if ((spa = spa_lookup(pool)) == NULL) { 1328789Sahrens mutex_exit(&spa_namespace_lock); 1329789Sahrens return (ENOENT); 1330789Sahrens } 1331789Sahrens 1332789Sahrens /* 13331544Seschrock * Put a hold on the pool, drop the namespace lock, stop async tasks, 13341544Seschrock * reacquire the namespace lock, and see if we can export. 13351544Seschrock */ 13361544Seschrock spa_open_ref(spa, FTAG); 13371544Seschrock mutex_exit(&spa_namespace_lock); 13381544Seschrock spa_async_suspend(spa); 13391544Seschrock mutex_enter(&spa_namespace_lock); 13401544Seschrock spa_close(spa, FTAG); 13411544Seschrock 13421544Seschrock /* 1343789Sahrens * The pool will be in core if it's openable, 1344789Sahrens * in which case we can modify its state. 1345789Sahrens */ 1346789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 1347789Sahrens /* 1348789Sahrens * Objsets may be open only because they're dirty, so we 1349789Sahrens * have to force it to sync before checking spa_refcnt. 1350789Sahrens */ 1351789Sahrens spa_scrub_suspend(spa); 1352789Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 1353789Sahrens 13541544Seschrock /* 13551544Seschrock * A pool cannot be exported or destroyed if there are active 13561544Seschrock * references. If we are resetting a pool, allow references by 13571544Seschrock * fault injection handlers. 13581544Seschrock */ 13591544Seschrock if (!spa_refcount_zero(spa) || 13601544Seschrock (spa->spa_inject_ref != 0 && 13611544Seschrock new_state != POOL_STATE_UNINITIALIZED)) { 1362789Sahrens spa_scrub_resume(spa); 13631544Seschrock spa_async_resume(spa); 1364789Sahrens mutex_exit(&spa_namespace_lock); 1365789Sahrens return (EBUSY); 1366789Sahrens } 1367789Sahrens 1368789Sahrens spa_scrub_resume(spa); 1369789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 1370789Sahrens 1371789Sahrens /* 1372789Sahrens * We want this to be reflected on every label, 1373789Sahrens * so mark them all dirty. spa_unload() will do the 1374789Sahrens * final sync that pushes these changes out. 1375789Sahrens */ 13761544Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 13771601Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 13781544Seschrock spa->spa_state = new_state; 13791635Sbonwick spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 13801544Seschrock vdev_config_dirty(spa->spa_root_vdev); 13811601Sbonwick spa_config_exit(spa, FTAG); 13821544Seschrock } 1383789Sahrens } 1384789Sahrens 1385789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 1386789Sahrens spa_unload(spa); 1387789Sahrens spa_deactivate(spa); 1388789Sahrens } 1389789Sahrens 13901775Sbillm if (oldconfig && spa->spa_config) 13911775Sbillm VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 13921775Sbillm 13931544Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 13941544Seschrock spa_remove(spa); 13951544Seschrock spa_config_sync(); 13961544Seschrock } 1397789Sahrens mutex_exit(&spa_namespace_lock); 1398789Sahrens 1399789Sahrens return (0); 1400789Sahrens } 1401789Sahrens 1402789Sahrens /* 1403789Sahrens * Destroy a storage pool. 1404789Sahrens */ 1405789Sahrens int 1406789Sahrens spa_destroy(char *pool) 1407789Sahrens { 14081775Sbillm return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 1409789Sahrens } 1410789Sahrens 1411789Sahrens /* 1412789Sahrens * Export a storage pool. 1413789Sahrens */ 1414789Sahrens int 14151775Sbillm spa_export(char *pool, nvlist_t **oldconfig) 1416789Sahrens { 14171775Sbillm return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 1418789Sahrens } 1419789Sahrens 1420789Sahrens /* 14211544Seschrock * Similar to spa_export(), this unloads the spa_t without actually removing it 14221544Seschrock * from the namespace in any way. 14231544Seschrock */ 14241544Seschrock int 14251544Seschrock spa_reset(char *pool) 14261544Seschrock { 14271775Sbillm return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 14281544Seschrock } 14291544Seschrock 14301544Seschrock 14311544Seschrock /* 1432789Sahrens * ========================================================================== 1433789Sahrens * Device manipulation 1434789Sahrens * ========================================================================== 1435789Sahrens */ 1436789Sahrens 1437789Sahrens /* 1438789Sahrens * Add capacity to a storage pool. 1439789Sahrens */ 1440789Sahrens int 1441789Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 1442789Sahrens { 1443789Sahrens uint64_t txg; 14441635Sbonwick int c, error; 1445789Sahrens vdev_t *rvd = spa->spa_root_vdev; 14461585Sbonwick vdev_t *vd, *tvd; 14472082Seschrock nvlist_t **spares; 14482082Seschrock uint_t i, nspares; 1449789Sahrens 1450789Sahrens txg = spa_vdev_enter(spa); 1451789Sahrens 14522082Seschrock if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 14532082Seschrock VDEV_ALLOC_ADD)) != 0) 14542082Seschrock return (spa_vdev_exit(spa, NULL, txg, error)); 14552082Seschrock 14562082Seschrock if ((error = spa_validate_spares(spa, nvroot, txg, 14572082Seschrock VDEV_ALLOC_ADD)) != 0) 1458789Sahrens return (spa_vdev_exit(spa, vd, txg, error)); 1459789Sahrens 14602082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 14612082Seschrock &spares, &nspares) != 0) 14622082Seschrock nspares = 0; 14632082Seschrock 14642082Seschrock if (vd->vdev_children == 0 && nspares == 0) 14652082Seschrock return (spa_vdev_exit(spa, vd, txg, EINVAL)); 14662082Seschrock 14672082Seschrock if (vd->vdev_children != 0) { 14682082Seschrock if ((error = vdev_create(vd, txg, B_FALSE)) != 0) 14692082Seschrock return (spa_vdev_exit(spa, vd, txg, error)); 14702082Seschrock 14712082Seschrock /* 14722082Seschrock * Transfer each new top-level vdev from vd to rvd. 14732082Seschrock */ 14742082Seschrock for (c = 0; c < vd->vdev_children; c++) { 14752082Seschrock tvd = vd->vdev_child[c]; 14762082Seschrock vdev_remove_child(vd, tvd); 14772082Seschrock tvd->vdev_id = rvd->vdev_children; 14782082Seschrock vdev_add_child(rvd, tvd); 14792082Seschrock vdev_config_dirty(tvd); 14802082Seschrock } 14812082Seschrock } 14822082Seschrock 14832082Seschrock if (nspares != 0) { 14842082Seschrock if (spa->spa_sparelist != NULL) { 14852082Seschrock nvlist_t **oldspares; 14862082Seschrock uint_t oldnspares; 14872082Seschrock nvlist_t **newspares; 14882082Seschrock 14892082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 14902082Seschrock ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0); 14912082Seschrock 14922082Seschrock newspares = kmem_alloc(sizeof (void *) * 14932082Seschrock (nspares + oldnspares), KM_SLEEP); 14942082Seschrock for (i = 0; i < oldnspares; i++) 14952082Seschrock VERIFY(nvlist_dup(oldspares[i], 14962082Seschrock &newspares[i], KM_SLEEP) == 0); 14972082Seschrock for (i = 0; i < nspares; i++) 14982082Seschrock VERIFY(nvlist_dup(spares[i], 14992082Seschrock &newspares[i + oldnspares], 15002082Seschrock KM_SLEEP) == 0); 15012082Seschrock 15022082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, 15032082Seschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 15042082Seschrock 15052082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 15062082Seschrock ZPOOL_CONFIG_SPARES, newspares, 15072082Seschrock nspares + oldnspares) == 0); 15082082Seschrock for (i = 0; i < oldnspares + nspares; i++) 15092082Seschrock nvlist_free(newspares[i]); 15102082Seschrock kmem_free(newspares, (oldnspares + nspares) * 15112082Seschrock sizeof (void *)); 15122082Seschrock } else { 15132082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 15142082Seschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 15152082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 15162082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 15172082Seschrock } 15182082Seschrock 15192082Seschrock spa_load_spares(spa); 15202082Seschrock spa->spa_sync_spares = B_TRUE; 1521789Sahrens } 1522789Sahrens 1523789Sahrens /* 15241585Sbonwick * We have to be careful when adding new vdevs to an existing pool. 15251585Sbonwick * If other threads start allocating from these vdevs before we 15261585Sbonwick * sync the config cache, and we lose power, then upon reboot we may 15271585Sbonwick * fail to open the pool because there are DVAs that the config cache 15281585Sbonwick * can't translate. Therefore, we first add the vdevs without 15291585Sbonwick * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 15301635Sbonwick * and then let spa_config_update() initialize the new metaslabs. 15311585Sbonwick * 15321585Sbonwick * spa_load() checks for added-but-not-initialized vdevs, so that 15331585Sbonwick * if we lose power at any point in this sequence, the remaining 15341585Sbonwick * steps will be completed the next time we load the pool. 1535789Sahrens */ 15361635Sbonwick (void) spa_vdev_exit(spa, vd, txg, 0); 15371585Sbonwick 15381635Sbonwick mutex_enter(&spa_namespace_lock); 15391635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 15401635Sbonwick mutex_exit(&spa_namespace_lock); 1541789Sahrens 15421635Sbonwick return (0); 1543789Sahrens } 1544789Sahrens 1545789Sahrens /* 1546789Sahrens * Attach a device to a mirror. The arguments are the path to any device 1547789Sahrens * in the mirror, and the nvroot for the new device. If the path specifies 1548789Sahrens * a device that is not mirrored, we automatically insert the mirror vdev. 1549789Sahrens * 1550789Sahrens * If 'replacing' is specified, the new device is intended to replace the 1551789Sahrens * existing device; in this case the two devices are made into their own 1552789Sahrens * mirror using the 'replacing' vdev, which is functionally idendical to 1553789Sahrens * the mirror vdev (it actually reuses all the same ops) but has a few 1554789Sahrens * extra rules: you can't attach to it after it's been created, and upon 1555789Sahrens * completion of resilvering, the first disk (the one being replaced) 1556789Sahrens * is automatically detached. 1557789Sahrens */ 1558789Sahrens int 15591544Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 1560789Sahrens { 1561789Sahrens uint64_t txg, open_txg; 1562789Sahrens int error; 1563789Sahrens vdev_t *rvd = spa->spa_root_vdev; 1564789Sahrens vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 15652082Seschrock vdev_ops_t *pvops; 1566789Sahrens 1567789Sahrens txg = spa_vdev_enter(spa); 1568789Sahrens 15691544Seschrock oldvd = vdev_lookup_by_guid(rvd, guid); 1570789Sahrens 1571789Sahrens if (oldvd == NULL) 1572789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1573789Sahrens 15741585Sbonwick if (!oldvd->vdev_ops->vdev_op_leaf) 15751585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 15761585Sbonwick 1577789Sahrens pvd = oldvd->vdev_parent; 1578789Sahrens 15792082Seschrock if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 15802082Seschrock VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1) 1581789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1582789Sahrens 1583789Sahrens newvd = newrootvd->vdev_child[0]; 1584789Sahrens 1585789Sahrens if (!newvd->vdev_ops->vdev_op_leaf) 1586789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1587789Sahrens 15882082Seschrock if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 1589789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, error)); 1590789Sahrens 15912082Seschrock if (!replacing) { 15922082Seschrock /* 15932082Seschrock * For attach, the only allowable parent is a mirror or the root 15942082Seschrock * vdev. 15952082Seschrock */ 15962082Seschrock if (pvd->vdev_ops != &vdev_mirror_ops && 15972082Seschrock pvd->vdev_ops != &vdev_root_ops) 15982082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 15992082Seschrock 16002082Seschrock pvops = &vdev_mirror_ops; 16012082Seschrock } else { 16022082Seschrock /* 16032082Seschrock * Active hot spares can only be replaced by inactive hot 16042082Seschrock * spares. 16052082Seschrock */ 16062082Seschrock if (pvd->vdev_ops == &vdev_spare_ops && 16072082Seschrock pvd->vdev_child[1] == oldvd && 16082082Seschrock !spa_has_spare(spa, newvd->vdev_guid)) 16092082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 16102082Seschrock 16112082Seschrock /* 16122082Seschrock * If the source is a hot spare, and the parent isn't already a 16132082Seschrock * spare, then we want to create a new hot spare. Otherwise, we 16142082Seschrock * want to create a replacing vdev. 16152082Seschrock */ 16162082Seschrock if (pvd->vdev_ops == &vdev_replacing_ops) 16172082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 16182082Seschrock else if (pvd->vdev_ops != &vdev_spare_ops && 16192082Seschrock newvd->vdev_isspare) 16202082Seschrock pvops = &vdev_spare_ops; 16212082Seschrock else 16222082Seschrock pvops = &vdev_replacing_ops; 16232082Seschrock } 16242082Seschrock 16251175Slling /* 16261175Slling * Compare the new device size with the replaceable/attachable 16271175Slling * device size. 16281175Slling */ 16291175Slling if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 1630789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 1631789Sahrens 16321732Sbonwick /* 16331732Sbonwick * The new device cannot have a higher alignment requirement 16341732Sbonwick * than the top-level vdev. 16351732Sbonwick */ 16361732Sbonwick if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 1637789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 1638789Sahrens 1639789Sahrens /* 1640789Sahrens * If this is an in-place replacement, update oldvd's path and devid 1641789Sahrens * to make it distinguishable from newvd, and unopenable from now on. 1642789Sahrens */ 1643789Sahrens if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 1644789Sahrens spa_strfree(oldvd->vdev_path); 1645789Sahrens oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 1646789Sahrens KM_SLEEP); 1647789Sahrens (void) sprintf(oldvd->vdev_path, "%s/%s", 1648789Sahrens newvd->vdev_path, "old"); 1649789Sahrens if (oldvd->vdev_devid != NULL) { 1650789Sahrens spa_strfree(oldvd->vdev_devid); 1651789Sahrens oldvd->vdev_devid = NULL; 1652789Sahrens } 1653789Sahrens } 1654789Sahrens 1655789Sahrens /* 16562082Seschrock * If the parent is not a mirror, or if we're replacing, insert the new 16572082Seschrock * mirror/replacing/spare vdev above oldvd. 1658789Sahrens */ 1659789Sahrens if (pvd->vdev_ops != pvops) 1660789Sahrens pvd = vdev_add_parent(oldvd, pvops); 1661789Sahrens 1662789Sahrens ASSERT(pvd->vdev_top->vdev_parent == rvd); 1663789Sahrens ASSERT(pvd->vdev_ops == pvops); 1664789Sahrens ASSERT(oldvd->vdev_parent == pvd); 1665789Sahrens 1666789Sahrens /* 1667789Sahrens * Extract the new device from its root and add it to pvd. 1668789Sahrens */ 1669789Sahrens vdev_remove_child(newrootvd, newvd); 1670789Sahrens newvd->vdev_id = pvd->vdev_children; 1671789Sahrens vdev_add_child(pvd, newvd); 1672789Sahrens 16731544Seschrock /* 16741544Seschrock * If newvd is smaller than oldvd, but larger than its rsize, 16751544Seschrock * the addition of newvd may have decreased our parent's asize. 16761544Seschrock */ 16771544Seschrock pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 16781544Seschrock 1679789Sahrens tvd = newvd->vdev_top; 1680789Sahrens ASSERT(pvd->vdev_top == tvd); 1681789Sahrens ASSERT(tvd->vdev_parent == rvd); 1682789Sahrens 1683789Sahrens vdev_config_dirty(tvd); 1684789Sahrens 1685789Sahrens /* 1686789Sahrens * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 1687789Sahrens * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 1688789Sahrens */ 1689789Sahrens open_txg = txg + TXG_CONCURRENT_STATES - 1; 1690789Sahrens 1691789Sahrens mutex_enter(&newvd->vdev_dtl_lock); 1692789Sahrens space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 1693789Sahrens open_txg - TXG_INITIAL + 1); 1694789Sahrens mutex_exit(&newvd->vdev_dtl_lock); 1695789Sahrens 16961544Seschrock dprintf("attached %s in txg %llu\n", newvd->vdev_path, txg); 16971544Seschrock 1698789Sahrens /* 1699789Sahrens * Mark newvd's DTL dirty in this txg. 1700789Sahrens */ 17011732Sbonwick vdev_dirty(tvd, VDD_DTL, newvd, txg); 1702789Sahrens 1703789Sahrens (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 1704789Sahrens 1705789Sahrens /* 1706789Sahrens * Kick off a resilver to update newvd. 1707789Sahrens */ 1708789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1709789Sahrens 1710789Sahrens return (0); 1711789Sahrens } 1712789Sahrens 1713789Sahrens /* 1714789Sahrens * Detach a device from a mirror or replacing vdev. 1715789Sahrens * If 'replace_done' is specified, only detach if the parent 1716789Sahrens * is a replacing vdev. 1717789Sahrens */ 1718789Sahrens int 17191544Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 1720789Sahrens { 1721789Sahrens uint64_t txg; 1722789Sahrens int c, t, error; 1723789Sahrens vdev_t *rvd = spa->spa_root_vdev; 1724789Sahrens vdev_t *vd, *pvd, *cvd, *tvd; 17252082Seschrock boolean_t unspare = B_FALSE; 17262082Seschrock uint64_t unspare_guid; 1727789Sahrens 1728789Sahrens txg = spa_vdev_enter(spa); 1729789Sahrens 17301544Seschrock vd = vdev_lookup_by_guid(rvd, guid); 1731789Sahrens 1732789Sahrens if (vd == NULL) 1733789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1734789Sahrens 17351585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 17361585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17371585Sbonwick 1738789Sahrens pvd = vd->vdev_parent; 1739789Sahrens 1740789Sahrens /* 1741789Sahrens * If replace_done is specified, only remove this device if it's 17422082Seschrock * the first child of a replacing vdev. For the 'spare' vdev, either 17432082Seschrock * disk can be removed. 1744789Sahrens */ 17452082Seschrock if (replace_done) { 17462082Seschrock if (pvd->vdev_ops == &vdev_replacing_ops) { 17472082Seschrock if (vd->vdev_id != 0) 17482082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17492082Seschrock } else if (pvd->vdev_ops != &vdev_spare_ops) { 17502082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17512082Seschrock } 17522082Seschrock } 17532082Seschrock 17542082Seschrock ASSERT(pvd->vdev_ops != &vdev_spare_ops || 17552082Seschrock spa_version(spa) >= ZFS_VERSION_SPARES); 1756789Sahrens 1757789Sahrens /* 17582082Seschrock * Only mirror, replacing, and spare vdevs support detach. 1759789Sahrens */ 1760789Sahrens if (pvd->vdev_ops != &vdev_replacing_ops && 17612082Seschrock pvd->vdev_ops != &vdev_mirror_ops && 17622082Seschrock pvd->vdev_ops != &vdev_spare_ops) 1763789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1764789Sahrens 1765789Sahrens /* 1766789Sahrens * If there's only one replica, you can't detach it. 1767789Sahrens */ 1768789Sahrens if (pvd->vdev_children <= 1) 1769789Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1770789Sahrens 1771789Sahrens /* 1772789Sahrens * If all siblings have non-empty DTLs, this device may have the only 1773789Sahrens * valid copy of the data, which means we cannot safely detach it. 1774789Sahrens * 1775789Sahrens * XXX -- as in the vdev_offline() case, we really want a more 1776789Sahrens * precise DTL check. 1777789Sahrens */ 1778789Sahrens for (c = 0; c < pvd->vdev_children; c++) { 1779789Sahrens uint64_t dirty; 1780789Sahrens 1781789Sahrens cvd = pvd->vdev_child[c]; 1782789Sahrens if (cvd == vd) 1783789Sahrens continue; 1784789Sahrens if (vdev_is_dead(cvd)) 1785789Sahrens continue; 1786789Sahrens mutex_enter(&cvd->vdev_dtl_lock); 1787789Sahrens dirty = cvd->vdev_dtl_map.sm_space | 1788789Sahrens cvd->vdev_dtl_scrub.sm_space; 1789789Sahrens mutex_exit(&cvd->vdev_dtl_lock); 1790789Sahrens if (!dirty) 1791789Sahrens break; 1792789Sahrens } 17932082Seschrock 17942082Seschrock /* 17952082Seschrock * If we are a replacing or spare vdev, then we can always detach the 17962082Seschrock * latter child, as that is how one cancels the operation. 17972082Seschrock */ 17982082Seschrock if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 17992082Seschrock c == pvd->vdev_children) 1800789Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1801789Sahrens 1802789Sahrens /* 18032082Seschrock * If we are detaching the original disk from a spare, then it implies 18042082Seschrock * that the spare should become a real disk, and be removed from the 18052082Seschrock * active spare list for the pool. 18062082Seschrock */ 18072082Seschrock if (pvd->vdev_ops == &vdev_spare_ops && 18082082Seschrock vd->vdev_id == 0) 18092082Seschrock unspare = B_TRUE; 18102082Seschrock 18112082Seschrock /* 1812789Sahrens * Erase the disk labels so the disk can be used for other things. 1813789Sahrens * This must be done after all other error cases are handled, 1814789Sahrens * but before we disembowel vd (so we can still do I/O to it). 1815789Sahrens * But if we can't do it, don't treat the error as fatal -- 1816789Sahrens * it may be that the unwritability of the disk is the reason 1817789Sahrens * it's being detached! 1818789Sahrens */ 18192082Seschrock error = vdev_label_init(vd, 0, B_FALSE); 1820789Sahrens if (error) 1821789Sahrens dprintf("unable to erase labels on %s\n", vdev_description(vd)); 1822789Sahrens 1823789Sahrens /* 1824789Sahrens * Remove vd from its parent and compact the parent's children. 1825789Sahrens */ 1826789Sahrens vdev_remove_child(pvd, vd); 1827789Sahrens vdev_compact_children(pvd); 1828789Sahrens 1829789Sahrens /* 1830789Sahrens * Remember one of the remaining children so we can get tvd below. 1831789Sahrens */ 1832789Sahrens cvd = pvd->vdev_child[0]; 1833789Sahrens 1834789Sahrens /* 18352082Seschrock * If we need to remove the remaining child from the list of hot spares, 18362082Seschrock * do it now, marking the vdev as no longer a spare in the process. We 18372082Seschrock * must do this before vdev_remove_parent(), because that can change the 18382082Seschrock * GUID if it creates a new toplevel GUID. 18392082Seschrock */ 18402082Seschrock if (unspare) { 18412082Seschrock ASSERT(cvd->vdev_isspare); 18422082Seschrock spa_spare_remove(cvd->vdev_guid); 18432082Seschrock cvd->vdev_isspare = B_FALSE; 18442082Seschrock unspare_guid = cvd->vdev_guid; 18452082Seschrock } 18462082Seschrock 18472082Seschrock /* 1848789Sahrens * If the parent mirror/replacing vdev only has one child, 1849789Sahrens * the parent is no longer needed. Remove it from the tree. 1850789Sahrens */ 1851789Sahrens if (pvd->vdev_children == 1) 1852789Sahrens vdev_remove_parent(cvd); 1853789Sahrens 1854789Sahrens /* 1855789Sahrens * We don't set tvd until now because the parent we just removed 1856789Sahrens * may have been the previous top-level vdev. 1857789Sahrens */ 1858789Sahrens tvd = cvd->vdev_top; 1859789Sahrens ASSERT(tvd->vdev_parent == rvd); 1860789Sahrens 1861789Sahrens /* 1862789Sahrens * Reopen this top-level vdev to reassess health after detach. 1863789Sahrens */ 18641544Seschrock vdev_reopen(tvd); 1865789Sahrens 1866789Sahrens /* 1867789Sahrens * If the device we just detached was smaller than the others, 18681732Sbonwick * it may be possible to add metaslabs (i.e. grow the pool). 18691732Sbonwick * vdev_metaslab_init() can't fail because the existing metaslabs 18701732Sbonwick * are already in core, so there's nothing to read from disk. 1871789Sahrens */ 18721732Sbonwick VERIFY(vdev_metaslab_init(tvd, txg) == 0); 1873789Sahrens 1874789Sahrens vdev_config_dirty(tvd); 1875789Sahrens 1876789Sahrens /* 1877789Sahrens * Mark vd's DTL as dirty in this txg. 1878789Sahrens * vdev_dtl_sync() will see that vd->vdev_detached is set 1879789Sahrens * and free vd's DTL object in syncing context. 1880789Sahrens * But first make sure we're not on any *other* txg's DTL list, 1881789Sahrens * to prevent vd from being accessed after it's freed. 1882789Sahrens */ 1883789Sahrens for (t = 0; t < TXG_SIZE; t++) 1884789Sahrens (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 18851732Sbonwick vd->vdev_detached = B_TRUE; 18861732Sbonwick vdev_dirty(tvd, VDD_DTL, vd, txg); 1887789Sahrens 18881544Seschrock dprintf("detached %s in txg %llu\n", vd->vdev_path, txg); 1889789Sahrens 18902082Seschrock error = spa_vdev_exit(spa, vd, txg, 0); 18912082Seschrock 18922082Seschrock /* 18932082Seschrock * If we are supposed to remove the given vdev from the list of spares, 18942082Seschrock * iterate over all pools in the system and replace it if it's present. 18952082Seschrock */ 18962082Seschrock if (unspare) { 18972082Seschrock spa = NULL; 18982082Seschrock mutex_enter(&spa_namespace_lock); 18992082Seschrock while ((spa = spa_next(spa)) != NULL) { 19002082Seschrock if (spa->spa_state != POOL_STATE_ACTIVE) 19012082Seschrock continue; 19022082Seschrock 19032082Seschrock (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 19042082Seschrock } 19052082Seschrock mutex_exit(&spa_namespace_lock); 19062082Seschrock } 19072082Seschrock 19082082Seschrock return (error); 19092082Seschrock } 19102082Seschrock 19112082Seschrock /* 19122082Seschrock * Remove a device from the pool. Currently, this supports removing only hot 19132082Seschrock * spares. 19142082Seschrock */ 19152082Seschrock int 19162082Seschrock spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 19172082Seschrock { 19182082Seschrock vdev_t *vd; 19192082Seschrock nvlist_t **spares, *nv, **newspares; 19202082Seschrock uint_t i, j, nspares; 19212082Seschrock int ret = 0; 19222082Seschrock 19232082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 19242082Seschrock 19252082Seschrock vd = spa_lookup_by_guid(spa, guid); 19262082Seschrock 19272082Seschrock nv = NULL; 19282082Seschrock if (spa->spa_spares != NULL && 19292082Seschrock nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19302082Seschrock &spares, &nspares) == 0) { 19312082Seschrock for (i = 0; i < nspares; i++) { 19322082Seschrock uint64_t theguid; 19332082Seschrock 19342082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 19352082Seschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 19362082Seschrock if (theguid == guid) { 19372082Seschrock nv = spares[i]; 19382082Seschrock break; 19392082Seschrock } 19402082Seschrock } 19412082Seschrock } 19422082Seschrock 19432082Seschrock /* 19442082Seschrock * We only support removing a hot spare, and only if it's not currently 19452082Seschrock * in use in this pool. 19462082Seschrock */ 19472082Seschrock if (nv == NULL && vd == NULL) { 19482082Seschrock ret = ENOENT; 19492082Seschrock goto out; 19502082Seschrock } 19512082Seschrock 19522082Seschrock if (nv == NULL && vd != NULL) { 19532082Seschrock ret = ENOTSUP; 19542082Seschrock goto out; 19552082Seschrock } 19562082Seschrock 19572082Seschrock if (!unspare && nv != NULL && vd != NULL) { 19582082Seschrock ret = EBUSY; 19592082Seschrock goto out; 19602082Seschrock } 19612082Seschrock 19622082Seschrock if (nspares == 1) { 19632082Seschrock newspares = NULL; 19642082Seschrock } else { 19652082Seschrock newspares = kmem_alloc((nspares - 1) * sizeof (void *), 19662082Seschrock KM_SLEEP); 19672082Seschrock for (i = 0, j = 0; i < nspares; i++) { 19682082Seschrock if (spares[i] != nv) 19692082Seschrock VERIFY(nvlist_dup(spares[i], 19702082Seschrock &newspares[j++], KM_SLEEP) == 0); 19712082Seschrock } 19722082Seschrock } 19732082Seschrock 19742082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19752082Seschrock DATA_TYPE_NVLIST_ARRAY) == 0); 19762082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19772082Seschrock newspares, nspares - 1) == 0); 19782082Seschrock for (i = 0; i < nspares - 1; i++) 19792082Seschrock nvlist_free(newspares[i]); 19802082Seschrock kmem_free(newspares, (nspares - 1) * sizeof (void *)); 19812082Seschrock spa_load_spares(spa); 19822082Seschrock spa->spa_sync_spares = B_TRUE; 19832082Seschrock 19842082Seschrock out: 19852082Seschrock spa_config_exit(spa, FTAG); 19862082Seschrock 19872082Seschrock return (ret); 1988789Sahrens } 1989789Sahrens 1990789Sahrens /* 19911544Seschrock * Find any device that's done replacing, so we can detach it. 1992789Sahrens */ 19931544Seschrock static vdev_t * 19941544Seschrock spa_vdev_replace_done_hunt(vdev_t *vd) 1995789Sahrens { 19961544Seschrock vdev_t *newvd, *oldvd; 1997789Sahrens int c; 1998789Sahrens 19991544Seschrock for (c = 0; c < vd->vdev_children; c++) { 20001544Seschrock oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]); 20011544Seschrock if (oldvd != NULL) 20021544Seschrock return (oldvd); 20031544Seschrock } 2004789Sahrens 2005789Sahrens if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 20061544Seschrock oldvd = vd->vdev_child[0]; 20071544Seschrock newvd = vd->vdev_child[1]; 2008789Sahrens 20091544Seschrock mutex_enter(&newvd->vdev_dtl_lock); 20101544Seschrock if (newvd->vdev_dtl_map.sm_space == 0 && 20111544Seschrock newvd->vdev_dtl_scrub.sm_space == 0) { 20121544Seschrock mutex_exit(&newvd->vdev_dtl_lock); 20131544Seschrock return (oldvd); 20141544Seschrock } 20151544Seschrock mutex_exit(&newvd->vdev_dtl_lock); 20161544Seschrock } 2017789Sahrens 20181544Seschrock return (NULL); 2019789Sahrens } 2020789Sahrens 20211544Seschrock static void 2022789Sahrens spa_vdev_replace_done(spa_t *spa) 2023789Sahrens { 20241544Seschrock vdev_t *vd; 20252082Seschrock vdev_t *pvd; 20261544Seschrock uint64_t guid; 20272082Seschrock uint64_t pguid = 0; 2028789Sahrens 20291544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2030789Sahrens 20311544Seschrock while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) { 20321544Seschrock guid = vd->vdev_guid; 20332082Seschrock /* 20342082Seschrock * If we have just finished replacing a hot spared device, then 20352082Seschrock * we need to detach the parent's first child (the original hot 20362082Seschrock * spare) as well. 20372082Seschrock */ 20382082Seschrock pvd = vd->vdev_parent; 20392082Seschrock if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 20402082Seschrock pvd->vdev_id == 0) { 20412082Seschrock ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 20422082Seschrock ASSERT(pvd->vdev_parent->vdev_children == 2); 20432082Seschrock pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 20442082Seschrock } 20451544Seschrock spa_config_exit(spa, FTAG); 20461544Seschrock if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 20471544Seschrock return; 20482082Seschrock if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 20492082Seschrock return; 20501544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2051789Sahrens } 2052789Sahrens 20531544Seschrock spa_config_exit(spa, FTAG); 2054789Sahrens } 2055789Sahrens 2056789Sahrens /* 20571354Seschrock * Update the stored path for this vdev. Dirty the vdev configuration, relying 20581354Seschrock * on spa_vdev_enter/exit() to synchronize the labels and cache. 20591354Seschrock */ 20601354Seschrock int 20611354Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 20621354Seschrock { 20631354Seschrock vdev_t *rvd, *vd; 20641354Seschrock uint64_t txg; 20651354Seschrock 20661354Seschrock rvd = spa->spa_root_vdev; 20671354Seschrock 20681354Seschrock txg = spa_vdev_enter(spa); 20691354Seschrock 20702082Seschrock if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 20712082Seschrock /* 20722082Seschrock * Determine if this is a reference to a hot spare. In that 20732082Seschrock * case, update the path as stored in the spare list. 20742082Seschrock */ 20752082Seschrock nvlist_t **spares; 20762082Seschrock uint_t i, nspares; 20772082Seschrock if (spa->spa_sparelist != NULL) { 20782082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 20792082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 20802082Seschrock for (i = 0; i < nspares; i++) { 20812082Seschrock uint64_t theguid; 20822082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 20832082Seschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 20842082Seschrock if (theguid == guid) 20852082Seschrock break; 20862082Seschrock } 20872082Seschrock 20882082Seschrock if (i == nspares) 20892082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 20902082Seschrock 20912082Seschrock VERIFY(nvlist_add_string(spares[i], 20922082Seschrock ZPOOL_CONFIG_PATH, newpath) == 0); 20932082Seschrock spa_load_spares(spa); 20942082Seschrock spa->spa_sync_spares = B_TRUE; 20952082Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 20962082Seschrock } else { 20972082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 20982082Seschrock } 20992082Seschrock } 21001354Seschrock 21011585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 21021585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 21031585Sbonwick 21041354Seschrock spa_strfree(vd->vdev_path); 21051354Seschrock vd->vdev_path = spa_strdup(newpath); 21061354Seschrock 21071354Seschrock vdev_config_dirty(vd->vdev_top); 21081354Seschrock 21091354Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 21101354Seschrock } 21111354Seschrock 21121354Seschrock /* 2113789Sahrens * ========================================================================== 2114789Sahrens * SPA Scrubbing 2115789Sahrens * ========================================================================== 2116789Sahrens */ 2117789Sahrens 21181544Seschrock void 21191544Seschrock spa_scrub_throttle(spa_t *spa, int direction) 21201544Seschrock { 21211544Seschrock mutex_enter(&spa->spa_scrub_lock); 21221544Seschrock spa->spa_scrub_throttled += direction; 21231544Seschrock ASSERT(spa->spa_scrub_throttled >= 0); 21241544Seschrock if (spa->spa_scrub_throttled == 0) 21251544Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 21261544Seschrock mutex_exit(&spa->spa_scrub_lock); 21271544Seschrock } 2128789Sahrens 2129789Sahrens static void 2130789Sahrens spa_scrub_io_done(zio_t *zio) 2131789Sahrens { 2132789Sahrens spa_t *spa = zio->io_spa; 2133789Sahrens 2134789Sahrens zio_buf_free(zio->io_data, zio->io_size); 2135789Sahrens 2136789Sahrens mutex_enter(&spa->spa_scrub_lock); 21371544Seschrock if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 21381775Sbillm vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 2139789Sahrens spa->spa_scrub_errors++; 2140789Sahrens mutex_enter(&vd->vdev_stat_lock); 2141789Sahrens vd->vdev_stat.vs_scrub_errors++; 2142789Sahrens mutex_exit(&vd->vdev_stat_lock); 2143789Sahrens } 21441544Seschrock if (--spa->spa_scrub_inflight == 0) { 21451544Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 21461544Seschrock ASSERT(spa->spa_scrub_throttled == 0); 21471544Seschrock } 21481544Seschrock mutex_exit(&spa->spa_scrub_lock); 2149789Sahrens } 2150789Sahrens 2151789Sahrens static void 21521544Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 21531544Seschrock zbookmark_t *zb) 2154789Sahrens { 2155789Sahrens size_t size = BP_GET_LSIZE(bp); 2156789Sahrens void *data = zio_buf_alloc(size); 2157789Sahrens 2158789Sahrens mutex_enter(&spa->spa_scrub_lock); 2159789Sahrens spa->spa_scrub_inflight++; 2160789Sahrens mutex_exit(&spa->spa_scrub_lock); 2161789Sahrens 21621544Seschrock if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 21631544Seschrock flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 21641544Seschrock 21651807Sbonwick flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 21661544Seschrock 2167789Sahrens zio_nowait(zio_read(NULL, spa, bp, data, size, 21681544Seschrock spa_scrub_io_done, NULL, priority, flags, zb)); 2169789Sahrens } 2170789Sahrens 2171789Sahrens /* ARGSUSED */ 2172789Sahrens static int 2173789Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 2174789Sahrens { 2175789Sahrens blkptr_t *bp = &bc->bc_blkptr; 21761775Sbillm vdev_t *vd = spa->spa_root_vdev; 21771775Sbillm dva_t *dva = bp->blk_dva; 21781775Sbillm int needs_resilver = B_FALSE; 21791775Sbillm int d; 2180789Sahrens 21811775Sbillm if (bc->bc_errno) { 2182789Sahrens /* 2183789Sahrens * We can't scrub this block, but we can continue to scrub 2184789Sahrens * the rest of the pool. Note the error and move along. 2185789Sahrens */ 2186789Sahrens mutex_enter(&spa->spa_scrub_lock); 2187789Sahrens spa->spa_scrub_errors++; 2188789Sahrens mutex_exit(&spa->spa_scrub_lock); 2189789Sahrens 21901775Sbillm mutex_enter(&vd->vdev_stat_lock); 21911775Sbillm vd->vdev_stat.vs_scrub_errors++; 21921775Sbillm mutex_exit(&vd->vdev_stat_lock); 2193789Sahrens 2194789Sahrens return (ERESTART); 2195789Sahrens } 2196789Sahrens 2197789Sahrens ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 2198789Sahrens 21991775Sbillm for (d = 0; d < BP_GET_NDVAS(bp); d++) { 22001775Sbillm vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 22011775Sbillm 22021775Sbillm ASSERT(vd != NULL); 22031775Sbillm 22041775Sbillm /* 22051775Sbillm * Keep track of how much data we've examined so that 22061775Sbillm * zpool(1M) status can make useful progress reports. 22071775Sbillm */ 22081775Sbillm mutex_enter(&vd->vdev_stat_lock); 22091775Sbillm vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 22101775Sbillm mutex_exit(&vd->vdev_stat_lock); 2211789Sahrens 22121775Sbillm if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 22131775Sbillm if (DVA_GET_GANG(&dva[d])) { 22141775Sbillm /* 22151775Sbillm * Gang members may be spread across multiple 22161775Sbillm * vdevs, so the best we can do is look at the 22171775Sbillm * pool-wide DTL. 22181775Sbillm * XXX -- it would be better to change our 22191775Sbillm * allocation policy to ensure that this can't 22201775Sbillm * happen. 22211775Sbillm */ 22221775Sbillm vd = spa->spa_root_vdev; 22231775Sbillm } 22241775Sbillm if (vdev_dtl_contains(&vd->vdev_dtl_map, 22251775Sbillm bp->blk_birth, 1)) 22261775Sbillm needs_resilver = B_TRUE; 2227789Sahrens } 22281775Sbillm } 22291775Sbillm 22301775Sbillm if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 2231789Sahrens spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 22321544Seschrock ZIO_FLAG_SCRUB, &bc->bc_bookmark); 22331775Sbillm else if (needs_resilver) 22341775Sbillm spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 22351775Sbillm ZIO_FLAG_RESILVER, &bc->bc_bookmark); 2236789Sahrens 2237789Sahrens return (0); 2238789Sahrens } 2239789Sahrens 2240789Sahrens static void 2241789Sahrens spa_scrub_thread(spa_t *spa) 2242789Sahrens { 2243789Sahrens callb_cpr_t cprinfo; 2244789Sahrens traverse_handle_t *th = spa->spa_scrub_th; 2245789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2246789Sahrens pool_scrub_type_t scrub_type = spa->spa_scrub_type; 2247789Sahrens int error = 0; 2248789Sahrens boolean_t complete; 2249789Sahrens 2250789Sahrens CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 2251789Sahrens 2252797Sbonwick /* 2253797Sbonwick * If we're restarting due to a snapshot create/delete, 2254797Sbonwick * wait for that to complete. 2255797Sbonwick */ 2256797Sbonwick txg_wait_synced(spa_get_dsl(spa), 0); 2257797Sbonwick 22581544Seschrock dprintf("start %s mintxg=%llu maxtxg=%llu\n", 22591544Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 22601544Seschrock spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 22611544Seschrock 22621544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 22631544Seschrock vdev_reopen(rvd); /* purge all vdev caches */ 2264789Sahrens vdev_config_dirty(rvd); /* rewrite all disk labels */ 2265789Sahrens vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 22661544Seschrock spa_config_exit(spa, FTAG); 2267789Sahrens 2268789Sahrens mutex_enter(&spa->spa_scrub_lock); 2269789Sahrens spa->spa_scrub_errors = 0; 2270789Sahrens spa->spa_scrub_active = 1; 22711544Seschrock ASSERT(spa->spa_scrub_inflight == 0); 22721544Seschrock ASSERT(spa->spa_scrub_throttled == 0); 2273789Sahrens 2274789Sahrens while (!spa->spa_scrub_stop) { 2275789Sahrens CALLB_CPR_SAFE_BEGIN(&cprinfo); 22761544Seschrock while (spa->spa_scrub_suspended) { 2277789Sahrens spa->spa_scrub_active = 0; 2278789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2279789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2280789Sahrens spa->spa_scrub_active = 1; 2281789Sahrens } 2282789Sahrens CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 2283789Sahrens 2284789Sahrens if (spa->spa_scrub_restart_txg != 0) 2285789Sahrens break; 2286789Sahrens 2287789Sahrens mutex_exit(&spa->spa_scrub_lock); 2288789Sahrens error = traverse_more(th); 2289789Sahrens mutex_enter(&spa->spa_scrub_lock); 2290789Sahrens if (error != EAGAIN) 2291789Sahrens break; 22921544Seschrock 22931544Seschrock while (spa->spa_scrub_throttled > 0) 22941544Seschrock cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2295789Sahrens } 2296789Sahrens 2297789Sahrens while (spa->spa_scrub_inflight) 2298789Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2299789Sahrens 23001601Sbonwick spa->spa_scrub_active = 0; 23011601Sbonwick cv_broadcast(&spa->spa_scrub_cv); 23021601Sbonwick 23031601Sbonwick mutex_exit(&spa->spa_scrub_lock); 23041601Sbonwick 23051601Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 23061601Sbonwick 23071601Sbonwick mutex_enter(&spa->spa_scrub_lock); 23081601Sbonwick 23091601Sbonwick /* 23101601Sbonwick * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 23111601Sbonwick * AND the spa config lock to synchronize with any config changes 23121601Sbonwick * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 23131601Sbonwick */ 2314789Sahrens if (spa->spa_scrub_restart_txg != 0) 2315789Sahrens error = ERESTART; 2316789Sahrens 23171544Seschrock if (spa->spa_scrub_stop) 23181544Seschrock error = EINTR; 23191544Seschrock 2320789Sahrens /* 23211544Seschrock * Even if there were uncorrectable errors, we consider the scrub 23221544Seschrock * completed. The downside is that if there is a transient error during 23231544Seschrock * a resilver, we won't resilver the data properly to the target. But 23241544Seschrock * if the damage is permanent (more likely) we will resilver forever, 23251544Seschrock * which isn't really acceptable. Since there is enough information for 23261544Seschrock * the user to know what has failed and why, this seems like a more 23271544Seschrock * tractable approach. 2328789Sahrens */ 23291544Seschrock complete = (error == 0); 2330789Sahrens 23311544Seschrock dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 23321544Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2333789Sahrens spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 2334789Sahrens error, spa->spa_scrub_errors, spa->spa_scrub_stop); 2335789Sahrens 2336789Sahrens mutex_exit(&spa->spa_scrub_lock); 2337789Sahrens 2338789Sahrens /* 2339789Sahrens * If the scrub/resilver completed, update all DTLs to reflect this. 2340789Sahrens * Whether it succeeded or not, vacate all temporary scrub DTLs. 2341789Sahrens */ 2342789Sahrens vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 2343789Sahrens complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 2344789Sahrens vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 23451544Seschrock spa_errlog_rotate(spa); 23461601Sbonwick 23471544Seschrock spa_config_exit(spa, FTAG); 2348789Sahrens 2349789Sahrens mutex_enter(&spa->spa_scrub_lock); 2350789Sahrens 23511544Seschrock /* 23521544Seschrock * We may have finished replacing a device. 23531544Seschrock * Let the async thread assess this and handle the detach. 23541544Seschrock */ 23551544Seschrock spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 2356789Sahrens 2357789Sahrens /* 2358789Sahrens * If we were told to restart, our final act is to start a new scrub. 2359789Sahrens */ 2360789Sahrens if (error == ERESTART) 23611544Seschrock spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 23621544Seschrock SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 2363789Sahrens 23641544Seschrock spa->spa_scrub_type = POOL_SCRUB_NONE; 23651544Seschrock spa->spa_scrub_active = 0; 23661544Seschrock spa->spa_scrub_thread = NULL; 23671544Seschrock cv_broadcast(&spa->spa_scrub_cv); 2368789Sahrens CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 2369789Sahrens thread_exit(); 2370789Sahrens } 2371789Sahrens 2372789Sahrens void 2373789Sahrens spa_scrub_suspend(spa_t *spa) 2374789Sahrens { 2375789Sahrens mutex_enter(&spa->spa_scrub_lock); 23761544Seschrock spa->spa_scrub_suspended++; 2377789Sahrens while (spa->spa_scrub_active) { 2378789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2379789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2380789Sahrens } 2381789Sahrens while (spa->spa_scrub_inflight) 2382789Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2383789Sahrens mutex_exit(&spa->spa_scrub_lock); 2384789Sahrens } 2385789Sahrens 2386789Sahrens void 2387789Sahrens spa_scrub_resume(spa_t *spa) 2388789Sahrens { 2389789Sahrens mutex_enter(&spa->spa_scrub_lock); 23901544Seschrock ASSERT(spa->spa_scrub_suspended != 0); 23911544Seschrock if (--spa->spa_scrub_suspended == 0) 2392789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2393789Sahrens mutex_exit(&spa->spa_scrub_lock); 2394789Sahrens } 2395789Sahrens 2396789Sahrens void 2397789Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg) 2398789Sahrens { 2399789Sahrens /* 2400789Sahrens * Something happened (e.g. snapshot create/delete) that means 2401789Sahrens * we must restart any in-progress scrubs. The itinerary will 2402789Sahrens * fix this properly. 2403789Sahrens */ 2404789Sahrens mutex_enter(&spa->spa_scrub_lock); 2405789Sahrens spa->spa_scrub_restart_txg = txg; 2406789Sahrens mutex_exit(&spa->spa_scrub_lock); 2407789Sahrens } 2408789Sahrens 24091544Seschrock int 24101544Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 2411789Sahrens { 2412789Sahrens space_seg_t *ss; 2413789Sahrens uint64_t mintxg, maxtxg; 2414789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2415789Sahrens 2416789Sahrens if ((uint_t)type >= POOL_SCRUB_TYPES) 2417789Sahrens return (ENOTSUP); 2418789Sahrens 24191544Seschrock mutex_enter(&spa->spa_scrub_lock); 24201544Seschrock 2421789Sahrens /* 2422789Sahrens * If there's a scrub or resilver already in progress, stop it. 2423789Sahrens */ 2424789Sahrens while (spa->spa_scrub_thread != NULL) { 2425789Sahrens /* 2426789Sahrens * Don't stop a resilver unless forced. 2427789Sahrens */ 24281544Seschrock if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 24291544Seschrock mutex_exit(&spa->spa_scrub_lock); 2430789Sahrens return (EBUSY); 24311544Seschrock } 2432789Sahrens spa->spa_scrub_stop = 1; 2433789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2434789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2435789Sahrens } 2436789Sahrens 2437789Sahrens /* 2438789Sahrens * Terminate the previous traverse. 2439789Sahrens */ 2440789Sahrens if (spa->spa_scrub_th != NULL) { 2441789Sahrens traverse_fini(spa->spa_scrub_th); 2442789Sahrens spa->spa_scrub_th = NULL; 2443789Sahrens } 2444789Sahrens 24451544Seschrock if (rvd == NULL) { 24461544Seschrock ASSERT(spa->spa_scrub_stop == 0); 24471544Seschrock ASSERT(spa->spa_scrub_type == type); 24481544Seschrock ASSERT(spa->spa_scrub_restart_txg == 0); 24491544Seschrock mutex_exit(&spa->spa_scrub_lock); 24501544Seschrock return (0); 24511544Seschrock } 2452789Sahrens 2453789Sahrens mintxg = TXG_INITIAL - 1; 2454789Sahrens maxtxg = spa_last_synced_txg(spa) + 1; 2455789Sahrens 24561544Seschrock mutex_enter(&rvd->vdev_dtl_lock); 2457789Sahrens 24581544Seschrock if (rvd->vdev_dtl_map.sm_space == 0) { 24591544Seschrock /* 24601544Seschrock * The pool-wide DTL is empty. 24611732Sbonwick * If this is a resilver, there's nothing to do except 24621732Sbonwick * check whether any in-progress replacements have completed. 24631544Seschrock */ 24641732Sbonwick if (type == POOL_SCRUB_RESILVER) { 24651544Seschrock type = POOL_SCRUB_NONE; 24661732Sbonwick spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 24671732Sbonwick } 24681544Seschrock } else { 24691544Seschrock /* 24701544Seschrock * The pool-wide DTL is non-empty. 24711544Seschrock * If this is a normal scrub, upgrade to a resilver instead. 24721544Seschrock */ 24731544Seschrock if (type == POOL_SCRUB_EVERYTHING) 24741544Seschrock type = POOL_SCRUB_RESILVER; 24751544Seschrock } 2476789Sahrens 24771544Seschrock if (type == POOL_SCRUB_RESILVER) { 2478789Sahrens /* 2479789Sahrens * Determine the resilvering boundaries. 2480789Sahrens * 2481789Sahrens * Note: (mintxg, maxtxg) is an open interval, 2482789Sahrens * i.e. mintxg and maxtxg themselves are not included. 2483789Sahrens * 2484789Sahrens * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 2485789Sahrens * so we don't claim to resilver a txg that's still changing. 2486789Sahrens */ 2487789Sahrens ss = avl_first(&rvd->vdev_dtl_map.sm_root); 24881544Seschrock mintxg = ss->ss_start - 1; 2489789Sahrens ss = avl_last(&rvd->vdev_dtl_map.sm_root); 24901544Seschrock maxtxg = MIN(ss->ss_end, maxtxg); 2491789Sahrens } 2492789Sahrens 24931544Seschrock mutex_exit(&rvd->vdev_dtl_lock); 24941544Seschrock 24951544Seschrock spa->spa_scrub_stop = 0; 24961544Seschrock spa->spa_scrub_type = type; 24971544Seschrock spa->spa_scrub_restart_txg = 0; 24981544Seschrock 24991544Seschrock if (type != POOL_SCRUB_NONE) { 25001544Seschrock spa->spa_scrub_mintxg = mintxg; 2501789Sahrens spa->spa_scrub_maxtxg = maxtxg; 2502789Sahrens spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 25031635Sbonwick ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 25041635Sbonwick ZIO_FLAG_CANFAIL); 2505789Sahrens traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 2506789Sahrens spa->spa_scrub_thread = thread_create(NULL, 0, 2507789Sahrens spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 2508789Sahrens } 2509789Sahrens 25101544Seschrock mutex_exit(&spa->spa_scrub_lock); 25111544Seschrock 2512789Sahrens return (0); 2513789Sahrens } 2514789Sahrens 25151544Seschrock /* 25161544Seschrock * ========================================================================== 25171544Seschrock * SPA async task processing 25181544Seschrock * ========================================================================== 25191544Seschrock */ 25201544Seschrock 25211544Seschrock static void 25221544Seschrock spa_async_reopen(spa_t *spa) 2523789Sahrens { 25241544Seschrock vdev_t *rvd = spa->spa_root_vdev; 25251544Seschrock vdev_t *tvd; 25261544Seschrock int c; 25271544Seschrock 25281544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 25291544Seschrock 25301544Seschrock for (c = 0; c < rvd->vdev_children; c++) { 25311544Seschrock tvd = rvd->vdev_child[c]; 25321544Seschrock if (tvd->vdev_reopen_wanted) { 25331544Seschrock tvd->vdev_reopen_wanted = 0; 25341544Seschrock vdev_reopen(tvd); 25351544Seschrock } 25361544Seschrock } 2537789Sahrens 25381544Seschrock spa_config_exit(spa, FTAG); 25391544Seschrock } 25401544Seschrock 25411544Seschrock static void 25421544Seschrock spa_async_thread(spa_t *spa) 25431544Seschrock { 25441544Seschrock int tasks; 25451544Seschrock 25461544Seschrock ASSERT(spa->spa_sync_on); 2547789Sahrens 25481544Seschrock mutex_enter(&spa->spa_async_lock); 25491544Seschrock tasks = spa->spa_async_tasks; 25501544Seschrock spa->spa_async_tasks = 0; 25511544Seschrock mutex_exit(&spa->spa_async_lock); 25521544Seschrock 25531544Seschrock /* 25541635Sbonwick * See if the config needs to be updated. 25551635Sbonwick */ 25561635Sbonwick if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 25571635Sbonwick mutex_enter(&spa_namespace_lock); 25581635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 25591635Sbonwick mutex_exit(&spa_namespace_lock); 25601635Sbonwick } 25611635Sbonwick 25621635Sbonwick /* 25631544Seschrock * See if any devices need to be reopened. 25641544Seschrock */ 25651544Seschrock if (tasks & SPA_ASYNC_REOPEN) 25661544Seschrock spa_async_reopen(spa); 25671544Seschrock 25681544Seschrock /* 25691544Seschrock * If any devices are done replacing, detach them. 25701544Seschrock */ 25711544Seschrock if (tasks & SPA_ASYNC_REPLACE_DONE) 2572789Sahrens spa_vdev_replace_done(spa); 2573789Sahrens 25741544Seschrock /* 25751544Seschrock * Kick off a scrub. 25761544Seschrock */ 25771544Seschrock if (tasks & SPA_ASYNC_SCRUB) 25781544Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 25791544Seschrock 25801544Seschrock /* 25811544Seschrock * Kick off a resilver. 25821544Seschrock */ 25831544Seschrock if (tasks & SPA_ASYNC_RESILVER) 25841544Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 25851544Seschrock 25861544Seschrock /* 25871544Seschrock * Let the world know that we're done. 25881544Seschrock */ 25891544Seschrock mutex_enter(&spa->spa_async_lock); 25901544Seschrock spa->spa_async_thread = NULL; 25911544Seschrock cv_broadcast(&spa->spa_async_cv); 25921544Seschrock mutex_exit(&spa->spa_async_lock); 25931544Seschrock thread_exit(); 25941544Seschrock } 25951544Seschrock 25961544Seschrock void 25971544Seschrock spa_async_suspend(spa_t *spa) 25981544Seschrock { 25991544Seschrock mutex_enter(&spa->spa_async_lock); 26001544Seschrock spa->spa_async_suspended++; 26011544Seschrock while (spa->spa_async_thread != NULL) 26021544Seschrock cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 26031544Seschrock mutex_exit(&spa->spa_async_lock); 26041544Seschrock } 26051544Seschrock 26061544Seschrock void 26071544Seschrock spa_async_resume(spa_t *spa) 26081544Seschrock { 26091544Seschrock mutex_enter(&spa->spa_async_lock); 26101544Seschrock ASSERT(spa->spa_async_suspended != 0); 26111544Seschrock spa->spa_async_suspended--; 26121544Seschrock mutex_exit(&spa->spa_async_lock); 26131544Seschrock } 26141544Seschrock 26151544Seschrock static void 26161544Seschrock spa_async_dispatch(spa_t *spa) 26171544Seschrock { 26181544Seschrock mutex_enter(&spa->spa_async_lock); 26191544Seschrock if (spa->spa_async_tasks && !spa->spa_async_suspended && 26201635Sbonwick spa->spa_async_thread == NULL && 26211635Sbonwick rootdir != NULL && !vn_is_readonly(rootdir)) 26221544Seschrock spa->spa_async_thread = thread_create(NULL, 0, 26231544Seschrock spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 26241544Seschrock mutex_exit(&spa->spa_async_lock); 26251544Seschrock } 26261544Seschrock 26271544Seschrock void 26281544Seschrock spa_async_request(spa_t *spa, int task) 26291544Seschrock { 26301544Seschrock mutex_enter(&spa->spa_async_lock); 26311544Seschrock spa->spa_async_tasks |= task; 26321544Seschrock mutex_exit(&spa->spa_async_lock); 2633789Sahrens } 2634789Sahrens 2635789Sahrens /* 2636789Sahrens * ========================================================================== 2637789Sahrens * SPA syncing routines 2638789Sahrens * ========================================================================== 2639789Sahrens */ 2640789Sahrens 2641789Sahrens static void 2642789Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 2643789Sahrens { 2644789Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 2645789Sahrens dmu_tx_t *tx; 2646789Sahrens blkptr_t blk; 2647789Sahrens uint64_t itor = 0; 2648789Sahrens zio_t *zio; 2649789Sahrens int error; 2650789Sahrens uint8_t c = 1; 2651789Sahrens 2652789Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 2653789Sahrens 2654789Sahrens while (bplist_iterate(bpl, &itor, &blk) == 0) 2655789Sahrens zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 2656789Sahrens 2657789Sahrens error = zio_wait(zio); 2658789Sahrens ASSERT3U(error, ==, 0); 2659789Sahrens 2660789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2661789Sahrens bplist_vacate(bpl, tx); 2662789Sahrens 2663789Sahrens /* 2664789Sahrens * Pre-dirty the first block so we sync to convergence faster. 2665789Sahrens * (Usually only the first block is needed.) 2666789Sahrens */ 2667789Sahrens dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 2668789Sahrens dmu_tx_commit(tx); 2669789Sahrens } 2670789Sahrens 2671789Sahrens static void 26722082Seschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 26732082Seschrock { 26742082Seschrock char *packed = NULL; 26752082Seschrock size_t nvsize = 0; 26762082Seschrock dmu_buf_t *db; 26772082Seschrock 26782082Seschrock VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 26792082Seschrock 26802082Seschrock packed = kmem_alloc(nvsize, KM_SLEEP); 26812082Seschrock 26822082Seschrock VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 26832082Seschrock KM_SLEEP) == 0); 26842082Seschrock 26852082Seschrock dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 26862082Seschrock 26872082Seschrock kmem_free(packed, nvsize); 26882082Seschrock 26892082Seschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 26902082Seschrock dmu_buf_will_dirty(db, tx); 26912082Seschrock *(uint64_t *)db->db_data = nvsize; 26922082Seschrock dmu_buf_rele(db, FTAG); 26932082Seschrock } 26942082Seschrock 26952082Seschrock static void 26962082Seschrock spa_sync_spares(spa_t *spa, dmu_tx_t *tx) 26972082Seschrock { 26982082Seschrock nvlist_t *nvroot; 26992082Seschrock nvlist_t **spares; 27002082Seschrock int i; 27012082Seschrock 27022082Seschrock if (!spa->spa_sync_spares) 27032082Seschrock return; 27042082Seschrock 27052082Seschrock /* 27062082Seschrock * Update the MOS nvlist describing the list of available spares. 27072082Seschrock * spa_validate_spares() will have already made sure this nvlist is 27082082Seschrock * valid and the vdevs are labelled appropriately. 27092082Seschrock */ 27102082Seschrock if (spa->spa_spares_object == 0) { 27112082Seschrock spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset, 27122082Seschrock DMU_OT_PACKED_NVLIST, 1 << 14, 27132082Seschrock DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 27142082Seschrock VERIFY(zap_update(spa->spa_meta_objset, 27152082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES, 27162082Seschrock sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0); 27172082Seschrock } 27182082Seschrock 27192082Seschrock VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 27202082Seschrock if (spa->spa_nspares == 0) { 27212082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 27222082Seschrock NULL, 0) == 0); 27232082Seschrock } else { 27242082Seschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), 27252082Seschrock KM_SLEEP); 27262082Seschrock for (i = 0; i < spa->spa_nspares; i++) 27272082Seschrock spares[i] = vdev_config_generate(spa, 27282082Seschrock spa->spa_spares[i], B_FALSE, B_TRUE); 27292082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 27302082Seschrock spares, spa->spa_nspares) == 0); 27312082Seschrock for (i = 0; i < spa->spa_nspares; i++) 27322082Seschrock nvlist_free(spares[i]); 27332082Seschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 27342082Seschrock } 27352082Seschrock 27362082Seschrock spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx); 2737*2926Sek110237 nvlist_free(nvroot); 27382082Seschrock 27392082Seschrock spa->spa_sync_spares = B_FALSE; 27402082Seschrock } 27412082Seschrock 27422082Seschrock static void 2743789Sahrens spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 2744789Sahrens { 2745789Sahrens nvlist_t *config; 2746789Sahrens 2747789Sahrens if (list_is_empty(&spa->spa_dirty_list)) 2748789Sahrens return; 2749789Sahrens 2750789Sahrens config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 2751789Sahrens 27521635Sbonwick if (spa->spa_config_syncing) 27531635Sbonwick nvlist_free(spa->spa_config_syncing); 27541635Sbonwick spa->spa_config_syncing = config; 2755789Sahrens 27562082Seschrock spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 2757789Sahrens } 2758789Sahrens 2759789Sahrens /* 2760789Sahrens * Sync the specified transaction group. New blocks may be dirtied as 2761789Sahrens * part of the process, so we iterate until it converges. 2762789Sahrens */ 2763789Sahrens void 2764789Sahrens spa_sync(spa_t *spa, uint64_t txg) 2765789Sahrens { 2766789Sahrens dsl_pool_t *dp = spa->spa_dsl_pool; 2767789Sahrens objset_t *mos = spa->spa_meta_objset; 2768789Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 27691635Sbonwick vdev_t *rvd = spa->spa_root_vdev; 2770789Sahrens vdev_t *vd; 2771789Sahrens dmu_tx_t *tx; 2772789Sahrens int dirty_vdevs; 2773789Sahrens 2774789Sahrens /* 2775789Sahrens * Lock out configuration changes. 2776789Sahrens */ 27771544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2778789Sahrens 2779789Sahrens spa->spa_syncing_txg = txg; 2780789Sahrens spa->spa_sync_pass = 0; 2781789Sahrens 27821544Seschrock VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 2783789Sahrens 27842082Seschrock tx = dmu_tx_create_assigned(dp, txg); 27852082Seschrock 27862082Seschrock /* 27872082Seschrock * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg, 27882082Seschrock * set spa_deflate if we have no raid-z vdevs. 27892082Seschrock */ 27902082Seschrock if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE && 27912082Seschrock spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) { 27922082Seschrock int i; 27932082Seschrock 27942082Seschrock for (i = 0; i < rvd->vdev_children; i++) { 27952082Seschrock vd = rvd->vdev_child[i]; 27962082Seschrock if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 27972082Seschrock break; 27982082Seschrock } 27992082Seschrock if (i == rvd->vdev_children) { 28002082Seschrock spa->spa_deflate = TRUE; 28012082Seschrock VERIFY(0 == zap_add(spa->spa_meta_objset, 28022082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 28032082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 28042082Seschrock } 28052082Seschrock } 28062082Seschrock 2807789Sahrens /* 2808789Sahrens * If anything has changed in this txg, push the deferred frees 2809789Sahrens * from the previous txg. If not, leave them alone so that we 2810789Sahrens * don't generate work on an otherwise idle system. 2811789Sahrens */ 2812789Sahrens if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 28132329Sek110237 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 28142329Sek110237 !txg_list_empty(&dp->dp_sync_tasks, txg)) 2815789Sahrens spa_sync_deferred_frees(spa, txg); 2816789Sahrens 2817789Sahrens /* 2818789Sahrens * Iterate to convergence. 2819789Sahrens */ 2820789Sahrens do { 2821789Sahrens spa->spa_sync_pass++; 2822789Sahrens 2823789Sahrens spa_sync_config_object(spa, tx); 28242082Seschrock spa_sync_spares(spa, tx); 28251544Seschrock spa_errlog_sync(spa, txg); 2826789Sahrens dsl_pool_sync(dp, txg); 2827789Sahrens 2828789Sahrens dirty_vdevs = 0; 2829789Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 2830789Sahrens vdev_sync(vd, txg); 2831789Sahrens dirty_vdevs++; 2832789Sahrens } 2833789Sahrens 2834789Sahrens bplist_sync(bpl, tx); 2835789Sahrens } while (dirty_vdevs); 2836789Sahrens 2837789Sahrens bplist_close(bpl); 2838789Sahrens 2839789Sahrens dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 2840789Sahrens 2841789Sahrens /* 2842789Sahrens * Rewrite the vdev configuration (which includes the uberblock) 2843789Sahrens * to commit the transaction group. 28441635Sbonwick * 28451635Sbonwick * If there are any dirty vdevs, sync the uberblock to all vdevs. 28461635Sbonwick * Otherwise, pick a random top-level vdev that's known to be 28471635Sbonwick * visible in the config cache (see spa_vdev_add() for details). 28481635Sbonwick * If the write fails, try the next vdev until we're tried them all. 2849789Sahrens */ 28501635Sbonwick if (!list_is_empty(&spa->spa_dirty_list)) { 28511635Sbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 28521635Sbonwick } else { 28531635Sbonwick int children = rvd->vdev_children; 28541635Sbonwick int c0 = spa_get_random(children); 28551635Sbonwick int c; 28561635Sbonwick 28571635Sbonwick for (c = 0; c < children; c++) { 28581635Sbonwick vd = rvd->vdev_child[(c0 + c) % children]; 28591635Sbonwick if (vd->vdev_ms_array == 0) 28601635Sbonwick continue; 28611635Sbonwick if (vdev_config_sync(vd, txg) == 0) 28621635Sbonwick break; 28631635Sbonwick } 28641635Sbonwick if (c == children) 28651635Sbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 28661635Sbonwick } 28671635Sbonwick 28682082Seschrock dmu_tx_commit(tx); 28692082Seschrock 28701635Sbonwick /* 28711635Sbonwick * Clear the dirty config list. 28721635Sbonwick */ 28731635Sbonwick while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 28741635Sbonwick vdev_config_clean(vd); 28751635Sbonwick 28761635Sbonwick /* 28771635Sbonwick * Now that the new config has synced transactionally, 28781635Sbonwick * let it become visible to the config cache. 28791635Sbonwick */ 28801635Sbonwick if (spa->spa_config_syncing != NULL) { 28811635Sbonwick spa_config_set(spa, spa->spa_config_syncing); 28821635Sbonwick spa->spa_config_txg = txg; 28831635Sbonwick spa->spa_config_syncing = NULL; 28841635Sbonwick } 2885789Sahrens 2886789Sahrens /* 2887789Sahrens * Make a stable copy of the fully synced uberblock. 2888789Sahrens * We use this as the root for pool traversals. 2889789Sahrens */ 2890789Sahrens spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 2891789Sahrens 2892789Sahrens spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 2893789Sahrens 2894789Sahrens rw_enter(&spa->spa_traverse_lock, RW_WRITER); 2895789Sahrens spa->spa_traverse_wanted = 0; 2896789Sahrens spa->spa_ubsync = spa->spa_uberblock; 2897789Sahrens rw_exit(&spa->spa_traverse_lock); 2898789Sahrens 2899789Sahrens spa_scrub_resume(spa); /* resume scrub with new ubsync */ 2900789Sahrens 2901789Sahrens /* 2902789Sahrens * Clean up the ZIL records for the synced txg. 2903789Sahrens */ 2904789Sahrens dsl_pool_zil_clean(dp); 2905789Sahrens 2906789Sahrens /* 2907789Sahrens * Update usable space statistics. 2908789Sahrens */ 2909789Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 2910789Sahrens vdev_sync_done(vd, txg); 2911789Sahrens 2912789Sahrens /* 2913789Sahrens * It had better be the case that we didn't dirty anything 29142082Seschrock * since vdev_config_sync(). 2915789Sahrens */ 2916789Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 2917789Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 2918789Sahrens ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 2919789Sahrens ASSERT(bpl->bpl_queue == NULL); 2920789Sahrens 29211544Seschrock spa_config_exit(spa, FTAG); 29221544Seschrock 29231544Seschrock /* 29241544Seschrock * If any async tasks have been requested, kick them off. 29251544Seschrock */ 29261544Seschrock spa_async_dispatch(spa); 2927789Sahrens } 2928789Sahrens 2929789Sahrens /* 2930789Sahrens * Sync all pools. We don't want to hold the namespace lock across these 2931789Sahrens * operations, so we take a reference on the spa_t and drop the lock during the 2932789Sahrens * sync. 2933789Sahrens */ 2934789Sahrens void 2935789Sahrens spa_sync_allpools(void) 2936789Sahrens { 2937789Sahrens spa_t *spa = NULL; 2938789Sahrens mutex_enter(&spa_namespace_lock); 2939789Sahrens while ((spa = spa_next(spa)) != NULL) { 2940789Sahrens if (spa_state(spa) != POOL_STATE_ACTIVE) 2941789Sahrens continue; 2942789Sahrens spa_open_ref(spa, FTAG); 2943789Sahrens mutex_exit(&spa_namespace_lock); 2944789Sahrens txg_wait_synced(spa_get_dsl(spa), 0); 2945789Sahrens mutex_enter(&spa_namespace_lock); 2946789Sahrens spa_close(spa, FTAG); 2947789Sahrens } 2948789Sahrens mutex_exit(&spa_namespace_lock); 2949789Sahrens } 2950789Sahrens 2951789Sahrens /* 2952789Sahrens * ========================================================================== 2953789Sahrens * Miscellaneous routines 2954789Sahrens * ========================================================================== 2955789Sahrens */ 2956789Sahrens 2957789Sahrens /* 2958789Sahrens * Remove all pools in the system. 2959789Sahrens */ 2960789Sahrens void 2961789Sahrens spa_evict_all(void) 2962789Sahrens { 2963789Sahrens spa_t *spa; 2964789Sahrens 2965789Sahrens /* 2966789Sahrens * Remove all cached state. All pools should be closed now, 2967789Sahrens * so every spa in the AVL tree should be unreferenced. 2968789Sahrens */ 2969789Sahrens mutex_enter(&spa_namespace_lock); 2970789Sahrens while ((spa = spa_next(NULL)) != NULL) { 2971789Sahrens /* 29721544Seschrock * Stop async tasks. The async thread may need to detach 29731544Seschrock * a device that's been replaced, which requires grabbing 29741544Seschrock * spa_namespace_lock, so we must drop it here. 2975789Sahrens */ 2976789Sahrens spa_open_ref(spa, FTAG); 2977789Sahrens mutex_exit(&spa_namespace_lock); 29781544Seschrock spa_async_suspend(spa); 2979789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 2980789Sahrens mutex_enter(&spa_namespace_lock); 2981789Sahrens spa_close(spa, FTAG); 2982789Sahrens 2983789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2984789Sahrens spa_unload(spa); 2985789Sahrens spa_deactivate(spa); 2986789Sahrens } 2987789Sahrens spa_remove(spa); 2988789Sahrens } 2989789Sahrens mutex_exit(&spa_namespace_lock); 2990789Sahrens } 29911544Seschrock 29921544Seschrock vdev_t * 29931544Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid) 29941544Seschrock { 29951544Seschrock return (vdev_lookup_by_guid(spa->spa_root_vdev, guid)); 29961544Seschrock } 29971760Seschrock 29981760Seschrock void 29991760Seschrock spa_upgrade(spa_t *spa) 30001760Seschrock { 30011760Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 30021760Seschrock 30031760Seschrock /* 30041760Seschrock * This should only be called for a non-faulted pool, and since a 30051760Seschrock * future version would result in an unopenable pool, this shouldn't be 30061760Seschrock * possible. 30071760Seschrock */ 30081760Seschrock ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION); 30091760Seschrock 30101760Seschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 30111760Seschrock vdev_config_dirty(spa->spa_root_vdev); 30121760Seschrock 30131760Seschrock spa_config_exit(spa, FTAG); 30142082Seschrock 30152082Seschrock txg_wait_synced(spa_get_dsl(spa), 0); 30161760Seschrock } 30172082Seschrock 30182082Seschrock boolean_t 30192082Seschrock spa_has_spare(spa_t *spa, uint64_t guid) 30202082Seschrock { 30212082Seschrock int i; 30222082Seschrock 30232082Seschrock for (i = 0; i < spa->spa_nspares; i++) 30242082Seschrock if (spa->spa_spares[i]->vdev_guid == guid) 30252082Seschrock return (B_TRUE); 30262082Seschrock 30272082Seschrock return (B_FALSE); 30282082Seschrock } 3029