1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51544Seschrock * Common Development and Distribution License (the "License"). 61544Seschrock * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 231354Seschrock * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28789Sahrens 29789Sahrens /* 30789Sahrens * This file contains all the routines used when modifying on-disk SPA state. 31789Sahrens * This includes opening, importing, destroying, exporting a pool, and syncing a 32789Sahrens * pool. 33789Sahrens */ 34789Sahrens 35789Sahrens #include <sys/zfs_context.h> 361544Seschrock #include <sys/fm/fs/zfs.h> 37789Sahrens #include <sys/spa_impl.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zio_checksum.h> 40789Sahrens #include <sys/zio_compress.h> 41789Sahrens #include <sys/dmu.h> 42789Sahrens #include <sys/dmu_tx.h> 43789Sahrens #include <sys/zap.h> 44789Sahrens #include <sys/zil.h> 45789Sahrens #include <sys/vdev_impl.h> 46789Sahrens #include <sys/metaslab.h> 47789Sahrens #include <sys/uberblock_impl.h> 48789Sahrens #include <sys/txg.h> 49789Sahrens #include <sys/avl.h> 50789Sahrens #include <sys/dmu_traverse.h> 51789Sahrens #include <sys/unique.h> 52789Sahrens #include <sys/dsl_pool.h> 53789Sahrens #include <sys/dsl_dir.h> 54789Sahrens #include <sys/dsl_prop.h> 55789Sahrens #include <sys/fs/zfs.h> 56789Sahrens #include <sys/callb.h> 57789Sahrens 58789Sahrens /* 59789Sahrens * ========================================================================== 60789Sahrens * SPA state manipulation (open/create/destroy/import/export) 61789Sahrens * ========================================================================== 62789Sahrens */ 63789Sahrens 641544Seschrock static int 651544Seschrock spa_error_entry_compare(const void *a, const void *b) 661544Seschrock { 671544Seschrock spa_error_entry_t *sa = (spa_error_entry_t *)a; 681544Seschrock spa_error_entry_t *sb = (spa_error_entry_t *)b; 691544Seschrock int ret; 701544Seschrock 711544Seschrock ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 721544Seschrock sizeof (zbookmark_t)); 731544Seschrock 741544Seschrock if (ret < 0) 751544Seschrock return (-1); 761544Seschrock else if (ret > 0) 771544Seschrock return (1); 781544Seschrock else 791544Seschrock return (0); 801544Seschrock } 811544Seschrock 821544Seschrock /* 831544Seschrock * Utility function which retrieves copies of the current logs and 841544Seschrock * re-initializes them in the process. 851544Seschrock */ 861544Seschrock void 871544Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 881544Seschrock { 891544Seschrock ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 901544Seschrock 911544Seschrock bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 921544Seschrock bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 931544Seschrock 941544Seschrock avl_create(&spa->spa_errlist_scrub, 951544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 961544Seschrock offsetof(spa_error_entry_t, se_avl)); 971544Seschrock avl_create(&spa->spa_errlist_last, 981544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 991544Seschrock offsetof(spa_error_entry_t, se_avl)); 1001544Seschrock } 1011544Seschrock 102789Sahrens /* 103789Sahrens * Activate an uninitialized pool. 104789Sahrens */ 105789Sahrens static void 106789Sahrens spa_activate(spa_t *spa) 107789Sahrens { 108789Sahrens int t; 109789Sahrens 110789Sahrens ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 111789Sahrens 112789Sahrens spa->spa_state = POOL_STATE_ACTIVE; 113789Sahrens 114789Sahrens spa->spa_normal_class = metaslab_class_create(); 115789Sahrens 116789Sahrens for (t = 0; t < ZIO_TYPES; t++) { 117789Sahrens spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 118789Sahrens 8, maxclsyspri, 50, INT_MAX, 119789Sahrens TASKQ_PREPOPULATE); 120789Sahrens spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 121789Sahrens 8, maxclsyspri, 50, INT_MAX, 122789Sahrens TASKQ_PREPOPULATE); 123789Sahrens } 124789Sahrens 125789Sahrens rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); 126789Sahrens 127789Sahrens list_create(&spa->spa_dirty_list, sizeof (vdev_t), 128789Sahrens offsetof(vdev_t, vdev_dirty_node)); 129789Sahrens 130789Sahrens txg_list_create(&spa->spa_vdev_txg_list, 131789Sahrens offsetof(struct vdev, vdev_txg_node)); 1321544Seschrock 1331544Seschrock avl_create(&spa->spa_errlist_scrub, 1341544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1351544Seschrock offsetof(spa_error_entry_t, se_avl)); 1361544Seschrock avl_create(&spa->spa_errlist_last, 1371544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1381544Seschrock offsetof(spa_error_entry_t, se_avl)); 139789Sahrens } 140789Sahrens 141789Sahrens /* 142789Sahrens * Opposite of spa_activate(). 143789Sahrens */ 144789Sahrens static void 145789Sahrens spa_deactivate(spa_t *spa) 146789Sahrens { 147789Sahrens int t; 148789Sahrens 149789Sahrens ASSERT(spa->spa_sync_on == B_FALSE); 150789Sahrens ASSERT(spa->spa_dsl_pool == NULL); 151789Sahrens ASSERT(spa->spa_root_vdev == NULL); 152789Sahrens 153789Sahrens ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 154789Sahrens 155789Sahrens txg_list_destroy(&spa->spa_vdev_txg_list); 156789Sahrens 157789Sahrens list_destroy(&spa->spa_dirty_list); 158789Sahrens 159789Sahrens rw_destroy(&spa->spa_traverse_lock); 160789Sahrens 161789Sahrens for (t = 0; t < ZIO_TYPES; t++) { 162789Sahrens taskq_destroy(spa->spa_zio_issue_taskq[t]); 163789Sahrens taskq_destroy(spa->spa_zio_intr_taskq[t]); 164789Sahrens spa->spa_zio_issue_taskq[t] = NULL; 165789Sahrens spa->spa_zio_intr_taskq[t] = NULL; 166789Sahrens } 167789Sahrens 168789Sahrens metaslab_class_destroy(spa->spa_normal_class); 169789Sahrens spa->spa_normal_class = NULL; 170789Sahrens 1711544Seschrock /* 1721544Seschrock * If this was part of an import or the open otherwise failed, we may 1731544Seschrock * still have errors left in the queues. Empty them just in case. 1741544Seschrock */ 1751544Seschrock spa_errlog_drain(spa); 1761544Seschrock 1771544Seschrock avl_destroy(&spa->spa_errlist_scrub); 1781544Seschrock avl_destroy(&spa->spa_errlist_last); 1791544Seschrock 180789Sahrens spa->spa_state = POOL_STATE_UNINITIALIZED; 181789Sahrens } 182789Sahrens 183789Sahrens /* 184789Sahrens * Verify a pool configuration, and construct the vdev tree appropriately. This 185789Sahrens * will create all the necessary vdevs in the appropriate layout, with each vdev 186789Sahrens * in the CLOSED state. This will prep the pool before open/creation/import. 187789Sahrens * All vdev validation is done by the vdev_alloc() routine. 188789Sahrens */ 1892082Seschrock static int 1902082Seschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1912082Seschrock uint_t id, int atype) 192789Sahrens { 193789Sahrens nvlist_t **child; 194789Sahrens uint_t c, children; 1952082Seschrock int error; 1962082Seschrock 1972082Seschrock if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1982082Seschrock return (error); 1992082Seschrock 2002082Seschrock if ((*vdp)->vdev_ops->vdev_op_leaf) 2012082Seschrock return (0); 202789Sahrens 203789Sahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 204789Sahrens &child, &children) != 0) { 2052082Seschrock vdev_free(*vdp); 2062082Seschrock *vdp = NULL; 2072082Seschrock return (EINVAL); 208789Sahrens } 209789Sahrens 210789Sahrens for (c = 0; c < children; c++) { 2112082Seschrock vdev_t *vd; 2122082Seschrock if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 2132082Seschrock atype)) != 0) { 2142082Seschrock vdev_free(*vdp); 2152082Seschrock *vdp = NULL; 2162082Seschrock return (error); 217789Sahrens } 218789Sahrens } 219789Sahrens 2202082Seschrock ASSERT(*vdp != NULL); 2212082Seschrock 2222082Seschrock return (0); 223789Sahrens } 224789Sahrens 225789Sahrens /* 226789Sahrens * Opposite of spa_load(). 227789Sahrens */ 228789Sahrens static void 229789Sahrens spa_unload(spa_t *spa) 230789Sahrens { 2312082Seschrock int i; 2322082Seschrock 233789Sahrens /* 2341544Seschrock * Stop async tasks. 2351544Seschrock */ 2361544Seschrock spa_async_suspend(spa); 2371544Seschrock 2381544Seschrock /* 239789Sahrens * Stop syncing. 240789Sahrens */ 241789Sahrens if (spa->spa_sync_on) { 242789Sahrens txg_sync_stop(spa->spa_dsl_pool); 243789Sahrens spa->spa_sync_on = B_FALSE; 244789Sahrens } 245789Sahrens 246789Sahrens /* 247789Sahrens * Wait for any outstanding prefetch I/O to complete. 248789Sahrens */ 2491544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 2501544Seschrock spa_config_exit(spa, FTAG); 251789Sahrens 252789Sahrens /* 253789Sahrens * Close the dsl pool. 254789Sahrens */ 255789Sahrens if (spa->spa_dsl_pool) { 256789Sahrens dsl_pool_close(spa->spa_dsl_pool); 257789Sahrens spa->spa_dsl_pool = NULL; 258789Sahrens } 259789Sahrens 260789Sahrens /* 261789Sahrens * Close all vdevs. 262789Sahrens */ 2631585Sbonwick if (spa->spa_root_vdev) 264789Sahrens vdev_free(spa->spa_root_vdev); 2651585Sbonwick ASSERT(spa->spa_root_vdev == NULL); 2661544Seschrock 2672082Seschrock for (i = 0; i < spa->spa_nspares; i++) 2682082Seschrock vdev_free(spa->spa_spares[i]); 2692082Seschrock if (spa->spa_spares) { 2702082Seschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 2712082Seschrock spa->spa_spares = NULL; 2722082Seschrock } 2732082Seschrock if (spa->spa_sparelist) { 2742082Seschrock nvlist_free(spa->spa_sparelist); 2752082Seschrock spa->spa_sparelist = NULL; 2762082Seschrock } 2772082Seschrock 2781544Seschrock spa->spa_async_suspended = 0; 279789Sahrens } 280789Sahrens 281789Sahrens /* 2822082Seschrock * Load (or re-load) the current list of vdevs describing the active spares for 2832082Seschrock * this pool. When this is called, we have some form of basic information in 2842082Seschrock * 'spa_sparelist'. We parse this into vdevs, try to open them, and then 2852082Seschrock * re-generate a more complete list including status information. 2862082Seschrock */ 2872082Seschrock static void 2882082Seschrock spa_load_spares(spa_t *spa) 2892082Seschrock { 2902082Seschrock nvlist_t **spares; 2912082Seschrock uint_t nspares; 2922082Seschrock int i; 2932082Seschrock 2942082Seschrock /* 2952082Seschrock * First, close and free any existing spare vdevs. 2962082Seschrock */ 2972082Seschrock for (i = 0; i < spa->spa_nspares; i++) { 2982082Seschrock vdev_close(spa->spa_spares[i]); 2992082Seschrock vdev_free(spa->spa_spares[i]); 3002082Seschrock } 3012082Seschrock if (spa->spa_spares) 3022082Seschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 3032082Seschrock 3042082Seschrock if (spa->spa_sparelist == NULL) 3052082Seschrock nspares = 0; 3062082Seschrock else 3072082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 3082082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3092082Seschrock 3102082Seschrock spa->spa_nspares = (int)nspares; 3112082Seschrock spa->spa_spares = NULL; 3122082Seschrock 3132082Seschrock if (nspares == 0) 3142082Seschrock return; 3152082Seschrock 3162082Seschrock /* 3172082Seschrock * Construct the array of vdevs, opening them to get status in the 3182082Seschrock * process. 3192082Seschrock */ 3202082Seschrock spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP); 3212082Seschrock for (i = 0; i < spa->spa_nspares; i++) { 3222082Seschrock vdev_t *vd; 3232082Seschrock 3242082Seschrock VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 3252082Seschrock VDEV_ALLOC_SPARE) == 0); 3262082Seschrock ASSERT(vd != NULL); 3272082Seschrock 3282082Seschrock spa->spa_spares[i] = vd; 3292082Seschrock 3302082Seschrock if (vdev_open(vd) != 0) 3312082Seschrock continue; 3322082Seschrock 3332082Seschrock vd->vdev_top = vd; 3342082Seschrock (void) vdev_validate_spare(vd); 3352082Seschrock } 3362082Seschrock 3372082Seschrock /* 3382082Seschrock * Recompute the stashed list of spares, with status information 3392082Seschrock * this time. 3402082Seschrock */ 3412082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 3422082Seschrock DATA_TYPE_NVLIST_ARRAY) == 0); 3432082Seschrock 3442082Seschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP); 3452082Seschrock for (i = 0; i < spa->spa_nspares; i++) 3462082Seschrock spares[i] = vdev_config_generate(spa, spa->spa_spares[i], 3472082Seschrock B_TRUE, B_TRUE); 3482082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 3492082Seschrock spares, spa->spa_nspares) == 0); 3502082Seschrock for (i = 0; i < spa->spa_nspares; i++) 3512082Seschrock nvlist_free(spares[i]); 3522082Seschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 3532082Seschrock } 3542082Seschrock 3552082Seschrock static int 3562082Seschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 3572082Seschrock { 3582082Seschrock dmu_buf_t *db; 3592082Seschrock char *packed = NULL; 3602082Seschrock size_t nvsize = 0; 3612082Seschrock int error; 3622082Seschrock *value = NULL; 3632082Seschrock 3642082Seschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3652082Seschrock nvsize = *(uint64_t *)db->db_data; 3662082Seschrock dmu_buf_rele(db, FTAG); 3672082Seschrock 3682082Seschrock packed = kmem_alloc(nvsize, KM_SLEEP); 3692082Seschrock error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 3702082Seschrock if (error == 0) 3712082Seschrock error = nvlist_unpack(packed, nvsize, value, 0); 3722082Seschrock kmem_free(packed, nvsize); 3732082Seschrock 3742082Seschrock return (error); 3752082Seschrock } 3762082Seschrock 3772082Seschrock /* 378789Sahrens * Load an existing storage pool, using the pool's builtin spa_config as a 3791544Seschrock * source of configuration information. 380789Sahrens */ 381789Sahrens static int 3821544Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 383789Sahrens { 384789Sahrens int error = 0; 385789Sahrens nvlist_t *nvroot = NULL; 386789Sahrens vdev_t *rvd; 387789Sahrens uberblock_t *ub = &spa->spa_uberblock; 3881635Sbonwick uint64_t config_cache_txg = spa->spa_config_txg; 389789Sahrens uint64_t pool_guid; 3902082Seschrock uint64_t version; 391789Sahrens zio_t *zio; 392789Sahrens 3931544Seschrock spa->spa_load_state = state; 3941635Sbonwick 395789Sahrens if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 3961733Sbonwick nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 3971544Seschrock error = EINVAL; 3981544Seschrock goto out; 3991544Seschrock } 400789Sahrens 4012082Seschrock /* 4022082Seschrock * Versioning wasn't explicitly added to the label until later, so if 4032082Seschrock * it's not present treat it as the initial version. 4042082Seschrock */ 4052082Seschrock if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 4062082Seschrock version = ZFS_VERSION_INITIAL; 4072082Seschrock 4081733Sbonwick (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 4091733Sbonwick &spa->spa_config_txg); 4101733Sbonwick 4111635Sbonwick if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 4121544Seschrock spa_guid_exists(pool_guid, 0)) { 4131544Seschrock error = EEXIST; 4141544Seschrock goto out; 4151544Seschrock } 416789Sahrens 417*2174Seschrock spa->spa_load_guid = pool_guid; 418*2174Seschrock 419789Sahrens /* 4202082Seschrock * Parse the configuration into a vdev tree. We explicitly set the 4212082Seschrock * value that will be returned by spa_version() since parsing the 4222082Seschrock * configuration requires knowing the version number. 423789Sahrens */ 4241544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 4252082Seschrock spa->spa_ubsync.ub_version = version; 4262082Seschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 4271544Seschrock spa_config_exit(spa, FTAG); 428789Sahrens 4292082Seschrock if (error != 0) 4301544Seschrock goto out; 431789Sahrens 4321585Sbonwick ASSERT(spa->spa_root_vdev == rvd); 433789Sahrens ASSERT(spa_guid(spa) == pool_guid); 434789Sahrens 435789Sahrens /* 436789Sahrens * Try to open all vdevs, loading each label in the process. 437789Sahrens */ 4381544Seschrock if (vdev_open(rvd) != 0) { 4391544Seschrock error = ENXIO; 4401544Seschrock goto out; 4411544Seschrock } 442789Sahrens 443789Sahrens /* 4441986Seschrock * Validate the labels for all leaf vdevs. We need to grab the config 4451986Seschrock * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 4461986Seschrock * flag. 4471986Seschrock */ 4481986Seschrock spa_config_enter(spa, RW_READER, FTAG); 4491986Seschrock error = vdev_validate(rvd); 4501986Seschrock spa_config_exit(spa, FTAG); 4511986Seschrock 4521986Seschrock if (error != 0) { 4531986Seschrock error = EBADF; 4541986Seschrock goto out; 4551986Seschrock } 4561986Seschrock 4571986Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 4581986Seschrock error = ENXIO; 4591986Seschrock goto out; 4601986Seschrock } 4611986Seschrock 4621986Seschrock /* 463789Sahrens * Find the best uberblock. 464789Sahrens */ 465789Sahrens bzero(ub, sizeof (uberblock_t)); 466789Sahrens 467789Sahrens zio = zio_root(spa, NULL, NULL, 468789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 469789Sahrens vdev_uberblock_load(zio, rvd, ub); 470789Sahrens error = zio_wait(zio); 471789Sahrens 472789Sahrens /* 473789Sahrens * If we weren't able to find a single valid uberblock, return failure. 474789Sahrens */ 475789Sahrens if (ub->ub_txg == 0) { 4761760Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4771760Seschrock VDEV_AUX_CORRUPT_DATA); 4781544Seschrock error = ENXIO; 4791544Seschrock goto out; 4801544Seschrock } 4811544Seschrock 4821544Seschrock /* 4831544Seschrock * If the pool is newer than the code, we can't open it. 4841544Seschrock */ 4851760Seschrock if (ub->ub_version > ZFS_VERSION) { 4861760Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4871760Seschrock VDEV_AUX_VERSION_NEWER); 4881544Seschrock error = ENOTSUP; 4891544Seschrock goto out; 490789Sahrens } 491789Sahrens 492789Sahrens /* 493789Sahrens * If the vdev guid sum doesn't match the uberblock, we have an 494789Sahrens * incomplete configuration. 495789Sahrens */ 4961732Sbonwick if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 4971544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4981544Seschrock VDEV_AUX_BAD_GUID_SUM); 4991544Seschrock error = ENXIO; 5001544Seschrock goto out; 501789Sahrens } 502789Sahrens 503789Sahrens /* 504789Sahrens * Initialize internal SPA structures. 505789Sahrens */ 506789Sahrens spa->spa_state = POOL_STATE_ACTIVE; 507789Sahrens spa->spa_ubsync = spa->spa_uberblock; 508789Sahrens spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 5091544Seschrock error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 5101544Seschrock if (error) { 5111544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5121544Seschrock VDEV_AUX_CORRUPT_DATA); 5131544Seschrock goto out; 5141544Seschrock } 515789Sahrens spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 516789Sahrens 5171544Seschrock if (zap_lookup(spa->spa_meta_objset, 518789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 5191544Seschrock sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 5201544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5211544Seschrock VDEV_AUX_CORRUPT_DATA); 5221544Seschrock error = EIO; 5231544Seschrock goto out; 5241544Seschrock } 525789Sahrens 526789Sahrens if (!mosconfig) { 5272082Seschrock nvlist_t *newconfig; 5282082Seschrock 5292082Seschrock if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 5301544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5311544Seschrock VDEV_AUX_CORRUPT_DATA); 5321544Seschrock error = EIO; 5331544Seschrock goto out; 5341544Seschrock } 535789Sahrens 536789Sahrens spa_config_set(spa, newconfig); 537789Sahrens spa_unload(spa); 538789Sahrens spa_deactivate(spa); 539789Sahrens spa_activate(spa); 540789Sahrens 5411544Seschrock return (spa_load(spa, newconfig, state, B_TRUE)); 5421544Seschrock } 5431544Seschrock 5441544Seschrock if (zap_lookup(spa->spa_meta_objset, 5451544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 5461544Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 5471544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5481544Seschrock VDEV_AUX_CORRUPT_DATA); 5491544Seschrock error = EIO; 5501544Seschrock goto out; 551789Sahrens } 552789Sahrens 5531544Seschrock /* 5542082Seschrock * Load the bit that tells us to use the new accounting function 5552082Seschrock * (raid-z deflation). If we have an older pool, this will not 5562082Seschrock * be present. 5572082Seschrock */ 5582082Seschrock error = zap_lookup(spa->spa_meta_objset, 5592082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 5602082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate); 5612082Seschrock if (error != 0 && error != ENOENT) { 5622082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5632082Seschrock VDEV_AUX_CORRUPT_DATA); 5642082Seschrock error = EIO; 5652082Seschrock goto out; 5662082Seschrock } 5672082Seschrock 5682082Seschrock /* 5691544Seschrock * Load the persistent error log. If we have an older pool, this will 5701544Seschrock * not be present. 5711544Seschrock */ 5721544Seschrock error = zap_lookup(spa->spa_meta_objset, 5731544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 5741544Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_last); 5751807Sbonwick if (error != 0 && error != ENOENT) { 5761544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5771544Seschrock VDEV_AUX_CORRUPT_DATA); 5781544Seschrock error = EIO; 5791544Seschrock goto out; 5801544Seschrock } 5811544Seschrock 5821544Seschrock error = zap_lookup(spa->spa_meta_objset, 5831544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 5841544Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 5851544Seschrock if (error != 0 && error != ENOENT) { 5861544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5871544Seschrock VDEV_AUX_CORRUPT_DATA); 5881544Seschrock error = EIO; 5891544Seschrock goto out; 5901544Seschrock } 591789Sahrens 592789Sahrens /* 5932082Seschrock * Load any hot spares for this pool. 5942082Seschrock */ 5952082Seschrock error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 5962082Seschrock DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object); 5972082Seschrock if (error != 0 && error != ENOENT) { 5982082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5992082Seschrock VDEV_AUX_CORRUPT_DATA); 6002082Seschrock error = EIO; 6012082Seschrock goto out; 6022082Seschrock } 6032082Seschrock if (error == 0) { 6042082Seschrock ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES); 6052082Seschrock if (load_nvlist(spa, spa->spa_spares_object, 6062082Seschrock &spa->spa_sparelist) != 0) { 6072082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 6082082Seschrock VDEV_AUX_CORRUPT_DATA); 6092082Seschrock error = EIO; 6102082Seschrock goto out; 6112082Seschrock } 6122082Seschrock 6132082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 6142082Seschrock spa_load_spares(spa); 6152082Seschrock spa_config_exit(spa, FTAG); 6162082Seschrock } 6172082Seschrock 6182082Seschrock /* 6191986Seschrock * Load the vdev state for all toplevel vdevs. 620789Sahrens */ 6211986Seschrock vdev_load(rvd); 622789Sahrens 623789Sahrens /* 624789Sahrens * Propagate the leaf DTLs we just loaded all the way up the tree. 625789Sahrens */ 6261544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 627789Sahrens vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 6281544Seschrock spa_config_exit(spa, FTAG); 629789Sahrens 630789Sahrens /* 631789Sahrens * Check the state of the root vdev. If it can't be opened, it 632789Sahrens * indicates one or more toplevel vdevs are faulted. 633789Sahrens */ 6341544Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 6351544Seschrock error = ENXIO; 6361544Seschrock goto out; 6371544Seschrock } 638789Sahrens 6391544Seschrock if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 6401635Sbonwick dmu_tx_t *tx; 6411635Sbonwick int need_update = B_FALSE; 6421585Sbonwick int c; 6431601Sbonwick 6441635Sbonwick /* 6451635Sbonwick * Claim log blocks that haven't been committed yet. 6461635Sbonwick * This must all happen in a single txg. 6471635Sbonwick */ 6481601Sbonwick tx = dmu_tx_create_assigned(spa_get_dsl(spa), 649789Sahrens spa_first_txg(spa)); 650789Sahrens dmu_objset_find(spa->spa_name, zil_claim, tx, 0); 651789Sahrens dmu_tx_commit(tx); 652789Sahrens 653789Sahrens spa->spa_sync_on = B_TRUE; 654789Sahrens txg_sync_start(spa->spa_dsl_pool); 655789Sahrens 656789Sahrens /* 657789Sahrens * Wait for all claims to sync. 658789Sahrens */ 659789Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 6601585Sbonwick 6611585Sbonwick /* 6621635Sbonwick * If the config cache is stale, or we have uninitialized 6631635Sbonwick * metaslabs (see spa_vdev_add()), then update the config. 6641585Sbonwick */ 6651635Sbonwick if (config_cache_txg != spa->spa_config_txg || 6661635Sbonwick state == SPA_LOAD_IMPORT) 6671635Sbonwick need_update = B_TRUE; 6681635Sbonwick 6691635Sbonwick for (c = 0; c < rvd->vdev_children; c++) 6701635Sbonwick if (rvd->vdev_child[c]->vdev_ms_array == 0) 6711635Sbonwick need_update = B_TRUE; 6721585Sbonwick 6731585Sbonwick /* 6741635Sbonwick * Update the config cache asychronously in case we're the 6751635Sbonwick * root pool, in which case the config cache isn't writable yet. 6761585Sbonwick */ 6771635Sbonwick if (need_update) 6781635Sbonwick spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 679789Sahrens } 680789Sahrens 6811544Seschrock error = 0; 6821544Seschrock out: 6832082Seschrock if (error && error != EBADF) 6841544Seschrock zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 6851544Seschrock spa->spa_load_state = SPA_LOAD_NONE; 6861544Seschrock spa->spa_ena = 0; 6871544Seschrock 6881544Seschrock return (error); 689789Sahrens } 690789Sahrens 691789Sahrens /* 692789Sahrens * Pool Open/Import 693789Sahrens * 694789Sahrens * The import case is identical to an open except that the configuration is sent 695789Sahrens * down from userland, instead of grabbed from the configuration cache. For the 696789Sahrens * case of an open, the pool configuration will exist in the 697789Sahrens * POOL_STATE_UNITIALIZED state. 698789Sahrens * 699789Sahrens * The stats information (gen/count/ustats) is used to gather vdev statistics at 700789Sahrens * the same time open the pool, without having to keep around the spa_t in some 701789Sahrens * ambiguous state. 702789Sahrens */ 703789Sahrens static int 704789Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 705789Sahrens { 706789Sahrens spa_t *spa; 707789Sahrens int error; 708789Sahrens int loaded = B_FALSE; 709789Sahrens int locked = B_FALSE; 710789Sahrens 711789Sahrens *spapp = NULL; 712789Sahrens 713789Sahrens /* 714789Sahrens * As disgusting as this is, we need to support recursive calls to this 715789Sahrens * function because dsl_dir_open() is called during spa_load(), and ends 716789Sahrens * up calling spa_open() again. The real fix is to figure out how to 717789Sahrens * avoid dsl_dir_open() calling this in the first place. 718789Sahrens */ 719789Sahrens if (mutex_owner(&spa_namespace_lock) != curthread) { 720789Sahrens mutex_enter(&spa_namespace_lock); 721789Sahrens locked = B_TRUE; 722789Sahrens } 723789Sahrens 724789Sahrens if ((spa = spa_lookup(pool)) == NULL) { 725789Sahrens if (locked) 726789Sahrens mutex_exit(&spa_namespace_lock); 727789Sahrens return (ENOENT); 728789Sahrens } 729789Sahrens if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 730789Sahrens 731789Sahrens spa_activate(spa); 732789Sahrens 7331635Sbonwick error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 734789Sahrens 735789Sahrens if (error == EBADF) { 736789Sahrens /* 7371986Seschrock * If vdev_validate() returns failure (indicated by 7381986Seschrock * EBADF), it indicates that one of the vdevs indicates 7391986Seschrock * that the pool has been exported or destroyed. If 7401986Seschrock * this is the case, the config cache is out of sync and 7411986Seschrock * we should remove the pool from the namespace. 742789Sahrens */ 7432082Seschrock zfs_post_ok(spa, NULL); 744789Sahrens spa_unload(spa); 745789Sahrens spa_deactivate(spa); 746789Sahrens spa_remove(spa); 747789Sahrens spa_config_sync(); 748789Sahrens if (locked) 749789Sahrens mutex_exit(&spa_namespace_lock); 750789Sahrens return (ENOENT); 7511544Seschrock } 7521544Seschrock 7531544Seschrock if (error) { 754789Sahrens /* 755789Sahrens * We can't open the pool, but we still have useful 756789Sahrens * information: the state of each vdev after the 757789Sahrens * attempted vdev_open(). Return this to the user. 758789Sahrens */ 7591635Sbonwick if (config != NULL && spa->spa_root_vdev != NULL) { 7601635Sbonwick spa_config_enter(spa, RW_READER, FTAG); 761789Sahrens *config = spa_config_generate(spa, NULL, -1ULL, 762789Sahrens B_TRUE); 7631635Sbonwick spa_config_exit(spa, FTAG); 7641635Sbonwick } 765789Sahrens spa_unload(spa); 766789Sahrens spa_deactivate(spa); 7671544Seschrock spa->spa_last_open_failed = B_TRUE; 768789Sahrens if (locked) 769789Sahrens mutex_exit(&spa_namespace_lock); 770789Sahrens *spapp = NULL; 771789Sahrens return (error); 7721544Seschrock } else { 7731544Seschrock zfs_post_ok(spa, NULL); 7741544Seschrock spa->spa_last_open_failed = B_FALSE; 775789Sahrens } 776789Sahrens 777789Sahrens loaded = B_TRUE; 778789Sahrens } 779789Sahrens 780789Sahrens spa_open_ref(spa, tag); 781789Sahrens if (locked) 782789Sahrens mutex_exit(&spa_namespace_lock); 783789Sahrens 784789Sahrens *spapp = spa; 785789Sahrens 786789Sahrens if (config != NULL) { 7871544Seschrock spa_config_enter(spa, RW_READER, FTAG); 788789Sahrens *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 7891544Seschrock spa_config_exit(spa, FTAG); 790789Sahrens } 791789Sahrens 792789Sahrens /* 793789Sahrens * If we just loaded the pool, resilver anything that's out of date. 794789Sahrens */ 795789Sahrens if (loaded && (spa_mode & FWRITE)) 796789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 797789Sahrens 798789Sahrens return (0); 799789Sahrens } 800789Sahrens 801789Sahrens int 802789Sahrens spa_open(const char *name, spa_t **spapp, void *tag) 803789Sahrens { 804789Sahrens return (spa_open_common(name, spapp, tag, NULL)); 805789Sahrens } 806789Sahrens 8071544Seschrock /* 8081544Seschrock * Lookup the given spa_t, incrementing the inject count in the process, 8091544Seschrock * preventing it from being exported or destroyed. 8101544Seschrock */ 8111544Seschrock spa_t * 8121544Seschrock spa_inject_addref(char *name) 8131544Seschrock { 8141544Seschrock spa_t *spa; 8151544Seschrock 8161544Seschrock mutex_enter(&spa_namespace_lock); 8171544Seschrock if ((spa = spa_lookup(name)) == NULL) { 8181544Seschrock mutex_exit(&spa_namespace_lock); 8191544Seschrock return (NULL); 8201544Seschrock } 8211544Seschrock spa->spa_inject_ref++; 8221544Seschrock mutex_exit(&spa_namespace_lock); 8231544Seschrock 8241544Seschrock return (spa); 8251544Seschrock } 8261544Seschrock 8271544Seschrock void 8281544Seschrock spa_inject_delref(spa_t *spa) 8291544Seschrock { 8301544Seschrock mutex_enter(&spa_namespace_lock); 8311544Seschrock spa->spa_inject_ref--; 8321544Seschrock mutex_exit(&spa_namespace_lock); 8331544Seschrock } 8341544Seschrock 8352082Seschrock static void 8362082Seschrock spa_add_spares(spa_t *spa, nvlist_t *config) 8372082Seschrock { 8382082Seschrock nvlist_t **spares; 8392082Seschrock uint_t i, nspares; 8402082Seschrock nvlist_t *nvroot; 8412082Seschrock uint64_t guid; 8422082Seschrock vdev_stat_t *vs; 8432082Seschrock uint_t vsc; 8442082Seschrock 8452082Seschrock if (spa->spa_nspares == 0) 8462082Seschrock return; 8472082Seschrock 8482082Seschrock VERIFY(nvlist_lookup_nvlist(config, 8492082Seschrock ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 8502082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 8512082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 8522082Seschrock if (nspares != 0) { 8532082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, 8542082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 8552082Seschrock VERIFY(nvlist_lookup_nvlist_array(nvroot, 8562082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 8572082Seschrock 8582082Seschrock /* 8592082Seschrock * Go through and find any spares which have since been 8602082Seschrock * repurposed as an active spare. If this is the case, update 8612082Seschrock * their status appropriately. 8622082Seschrock */ 8632082Seschrock for (i = 0; i < nspares; i++) { 8642082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 8652082Seschrock ZPOOL_CONFIG_GUID, &guid) == 0); 8662082Seschrock if (spa_spare_inuse(guid)) { 8672082Seschrock VERIFY(nvlist_lookup_uint64_array( 8682082Seschrock spares[i], ZPOOL_CONFIG_STATS, 8692082Seschrock (uint64_t **)&vs, &vsc) == 0); 8702082Seschrock vs->vs_state = VDEV_STATE_CANT_OPEN; 8712082Seschrock vs->vs_aux = VDEV_AUX_SPARED; 8722082Seschrock } 8732082Seschrock } 8742082Seschrock } 8752082Seschrock } 8762082Seschrock 877789Sahrens int 8781544Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 879789Sahrens { 880789Sahrens int error; 881789Sahrens spa_t *spa; 882789Sahrens 883789Sahrens *config = NULL; 884789Sahrens error = spa_open_common(name, &spa, FTAG, config); 885789Sahrens 8862082Seschrock if (spa && *config != NULL) { 8871544Seschrock VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 8881544Seschrock spa_get_errlog_size(spa)) == 0); 8891544Seschrock 8902082Seschrock spa_add_spares(spa, *config); 8912082Seschrock } 8922082Seschrock 8931544Seschrock /* 8941544Seschrock * We want to get the alternate root even for faulted pools, so we cheat 8951544Seschrock * and call spa_lookup() directly. 8961544Seschrock */ 8971544Seschrock if (altroot) { 8981544Seschrock if (spa == NULL) { 8991544Seschrock mutex_enter(&spa_namespace_lock); 9001544Seschrock spa = spa_lookup(name); 9011544Seschrock if (spa) 9021544Seschrock spa_altroot(spa, altroot, buflen); 9031544Seschrock else 9041544Seschrock altroot[0] = '\0'; 9051544Seschrock spa = NULL; 9061544Seschrock mutex_exit(&spa_namespace_lock); 9071544Seschrock } else { 9081544Seschrock spa_altroot(spa, altroot, buflen); 9091544Seschrock } 9101544Seschrock } 9111544Seschrock 912789Sahrens if (spa != NULL) 913789Sahrens spa_close(spa, FTAG); 914789Sahrens 915789Sahrens return (error); 916789Sahrens } 917789Sahrens 918789Sahrens /* 9192082Seschrock * Validate that the 'spares' array is well formed. We must have an array of 9202082Seschrock * nvlists, each which describes a valid leaf vdev. 9212082Seschrock */ 9222082Seschrock static int 9232082Seschrock spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 9242082Seschrock { 9252082Seschrock nvlist_t **spares; 9262082Seschrock uint_t i, nspares; 9272082Seschrock vdev_t *vd; 9282082Seschrock int error; 9292082Seschrock 9302082Seschrock /* 9312082Seschrock * It's acceptable to have no spares specified. 9322082Seschrock */ 9332082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 9342082Seschrock &spares, &nspares) != 0) 9352082Seschrock return (0); 9362082Seschrock 9372082Seschrock if (nspares == 0) 9382082Seschrock return (EINVAL); 9392082Seschrock 9402082Seschrock /* 9412082Seschrock * Make sure the pool is formatted with a version that supports hot 9422082Seschrock * spares. 9432082Seschrock */ 9442082Seschrock if (spa_version(spa) < ZFS_VERSION_SPARES) 9452082Seschrock return (ENOTSUP); 9462082Seschrock 9472082Seschrock for (i = 0; i < nspares; i++) { 9482082Seschrock if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0, 9492082Seschrock mode)) != 0) 9502082Seschrock return (error); 9512082Seschrock 9522082Seschrock if (!vd->vdev_ops->vdev_op_leaf) { 9532082Seschrock vdev_free(vd); 9542082Seschrock return (EINVAL); 9552082Seschrock } 9562082Seschrock 9572082Seschrock if ((error = vdev_open(vd)) != 0) { 9582082Seschrock vdev_free(vd); 9592082Seschrock return (error); 9602082Seschrock } 9612082Seschrock 9622082Seschrock vd->vdev_top = vd; 9632082Seschrock if ((error = vdev_label_spare(vd, crtxg)) != 0) { 9642082Seschrock vdev_free(vd); 9652082Seschrock return (error); 9662082Seschrock } 9672082Seschrock 9682082Seschrock VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID, 9692082Seschrock vd->vdev_guid) == 0); 9702082Seschrock 9712082Seschrock vdev_free(vd); 9722082Seschrock } 9732082Seschrock 9742082Seschrock return (0); 9752082Seschrock } 9762082Seschrock 9772082Seschrock /* 978789Sahrens * Pool Creation 979789Sahrens */ 980789Sahrens int 9811635Sbonwick spa_create(const char *pool, nvlist_t *nvroot, const char *altroot) 982789Sahrens { 983789Sahrens spa_t *spa; 9841635Sbonwick vdev_t *rvd; 985789Sahrens dsl_pool_t *dp; 986789Sahrens dmu_tx_t *tx; 9872082Seschrock int c, error = 0; 988789Sahrens uint64_t txg = TXG_INITIAL; 9892082Seschrock nvlist_t **spares; 9902082Seschrock uint_t nspares; 991789Sahrens 992789Sahrens /* 993789Sahrens * If this pool already exists, return failure. 994789Sahrens */ 995789Sahrens mutex_enter(&spa_namespace_lock); 996789Sahrens if (spa_lookup(pool) != NULL) { 997789Sahrens mutex_exit(&spa_namespace_lock); 998789Sahrens return (EEXIST); 999789Sahrens } 1000789Sahrens 1001789Sahrens /* 1002789Sahrens * Allocate a new spa_t structure. 1003789Sahrens */ 10041635Sbonwick spa = spa_add(pool, altroot); 1005789Sahrens spa_activate(spa); 1006789Sahrens 1007789Sahrens spa->spa_uberblock.ub_txg = txg - 1; 10081760Seschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 1009789Sahrens spa->spa_ubsync = spa->spa_uberblock; 1010789Sahrens 10111635Sbonwick /* 10121635Sbonwick * Create the root vdev. 10131635Sbonwick */ 10141635Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 10151635Sbonwick 10162082Seschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 10172082Seschrock 10182082Seschrock ASSERT(error != 0 || rvd != NULL); 10192082Seschrock ASSERT(error != 0 || spa->spa_root_vdev == rvd); 10202082Seschrock 10212082Seschrock if (error == 0 && rvd->vdev_children == 0) 10221635Sbonwick error = EINVAL; 10232082Seschrock 10242082Seschrock if (error == 0 && 10252082Seschrock (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 10262082Seschrock (error = spa_validate_spares(spa, nvroot, txg, 10272082Seschrock VDEV_ALLOC_ADD)) == 0) { 10282082Seschrock for (c = 0; c < rvd->vdev_children; c++) 10292082Seschrock vdev_init(rvd->vdev_child[c], txg); 10302082Seschrock vdev_config_dirty(rvd); 10311635Sbonwick } 10321635Sbonwick 10331635Sbonwick spa_config_exit(spa, FTAG); 1034789Sahrens 10352082Seschrock if (error != 0) { 1036789Sahrens spa_unload(spa); 1037789Sahrens spa_deactivate(spa); 1038789Sahrens spa_remove(spa); 1039789Sahrens mutex_exit(&spa_namespace_lock); 1040789Sahrens return (error); 1041789Sahrens } 1042789Sahrens 10432082Seschrock /* 10442082Seschrock * Get the list of spares, if specified. 10452082Seschrock */ 10462082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 10472082Seschrock &spares, &nspares) == 0) { 10482082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME, 10492082Seschrock KM_SLEEP) == 0); 10502082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 10512082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 10522082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 10532082Seschrock spa_load_spares(spa); 10542082Seschrock spa_config_exit(spa, FTAG); 10552082Seschrock spa->spa_sync_spares = B_TRUE; 10562082Seschrock } 10572082Seschrock 1058789Sahrens spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1059789Sahrens spa->spa_meta_objset = dp->dp_meta_objset; 1060789Sahrens 1061789Sahrens tx = dmu_tx_create_assigned(dp, txg); 1062789Sahrens 1063789Sahrens /* 1064789Sahrens * Create the pool config object. 1065789Sahrens */ 1066789Sahrens spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1067789Sahrens DMU_OT_PACKED_NVLIST, 1 << 14, 1068789Sahrens DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1069789Sahrens 10701544Seschrock if (zap_add(spa->spa_meta_objset, 1071789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 10721544Seschrock sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 10731544Seschrock cmn_err(CE_PANIC, "failed to add pool config"); 10741544Seschrock } 1075789Sahrens 10762082Seschrock /* Newly created pools are always deflated. */ 10772082Seschrock spa->spa_deflate = TRUE; 10782082Seschrock if (zap_add(spa->spa_meta_objset, 10792082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 10802082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 10812082Seschrock cmn_err(CE_PANIC, "failed to add deflate"); 10822082Seschrock } 10832082Seschrock 1084789Sahrens /* 1085789Sahrens * Create the deferred-free bplist object. Turn off compression 1086789Sahrens * because sync-to-convergence takes longer if the blocksize 1087789Sahrens * keeps changing. 1088789Sahrens */ 1089789Sahrens spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1090789Sahrens 1 << 14, tx); 1091789Sahrens dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1092789Sahrens ZIO_COMPRESS_OFF, tx); 1093789Sahrens 10941544Seschrock if (zap_add(spa->spa_meta_objset, 1095789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 10961544Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 10971544Seschrock cmn_err(CE_PANIC, "failed to add bplist"); 10981544Seschrock } 1099789Sahrens 1100789Sahrens dmu_tx_commit(tx); 1101789Sahrens 1102789Sahrens spa->spa_sync_on = B_TRUE; 1103789Sahrens txg_sync_start(spa->spa_dsl_pool); 1104789Sahrens 1105789Sahrens /* 1106789Sahrens * We explicitly wait for the first transaction to complete so that our 1107789Sahrens * bean counters are appropriately updated. 1108789Sahrens */ 1109789Sahrens txg_wait_synced(spa->spa_dsl_pool, txg); 1110789Sahrens 1111789Sahrens spa_config_sync(); 1112789Sahrens 1113789Sahrens mutex_exit(&spa_namespace_lock); 1114789Sahrens 1115789Sahrens return (0); 1116789Sahrens } 1117789Sahrens 1118789Sahrens /* 1119789Sahrens * Import the given pool into the system. We set up the necessary spa_t and 1120789Sahrens * then call spa_load() to do the dirty work. 1121789Sahrens */ 1122789Sahrens int 11231635Sbonwick spa_import(const char *pool, nvlist_t *config, const char *altroot) 1124789Sahrens { 1125789Sahrens spa_t *spa; 1126789Sahrens int error; 11272082Seschrock nvlist_t *nvroot; 11282082Seschrock nvlist_t **spares; 11292082Seschrock uint_t nspares; 1130789Sahrens 1131789Sahrens if (!(spa_mode & FWRITE)) 1132789Sahrens return (EROFS); 1133789Sahrens 1134789Sahrens /* 1135789Sahrens * If a pool with this name exists, return failure. 1136789Sahrens */ 1137789Sahrens mutex_enter(&spa_namespace_lock); 1138789Sahrens if (spa_lookup(pool) != NULL) { 1139789Sahrens mutex_exit(&spa_namespace_lock); 1140789Sahrens return (EEXIST); 1141789Sahrens } 1142789Sahrens 1143789Sahrens /* 11441635Sbonwick * Create and initialize the spa structure. 1145789Sahrens */ 11461635Sbonwick spa = spa_add(pool, altroot); 1147789Sahrens spa_activate(spa); 1148789Sahrens 1149789Sahrens /* 11501635Sbonwick * Pass off the heavy lifting to spa_load(). 11511732Sbonwick * Pass TRUE for mosconfig because the user-supplied config 11521732Sbonwick * is actually the one to trust when doing an import. 11531601Sbonwick */ 11541732Sbonwick error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE); 1155789Sahrens 11562082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 11572082Seschrock /* 11582082Seschrock * Toss any existing sparelist, as it doesn't have any validity anymore, 11592082Seschrock * and conflicts with spa_has_spare(). 11602082Seschrock */ 11612082Seschrock if (spa->spa_sparelist) { 11622082Seschrock nvlist_free(spa->spa_sparelist); 11632082Seschrock spa->spa_sparelist = NULL; 11642082Seschrock spa_load_spares(spa); 11652082Seschrock } 11662082Seschrock 11672082Seschrock VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 11682082Seschrock &nvroot) == 0); 11692082Seschrock if (error == 0) 11702082Seschrock error = spa_validate_spares(spa, nvroot, -1ULL, 11712082Seschrock VDEV_ALLOC_SPARE); 11722082Seschrock spa_config_exit(spa, FTAG); 11732082Seschrock 11742082Seschrock if (error != 0) { 1175789Sahrens spa_unload(spa); 1176789Sahrens spa_deactivate(spa); 1177789Sahrens spa_remove(spa); 1178789Sahrens mutex_exit(&spa_namespace_lock); 1179789Sahrens return (error); 1180789Sahrens } 1181789Sahrens 11821635Sbonwick /* 11832082Seschrock * Override any spares as specified by the user, as these may have 11842082Seschrock * correct device names/devids, etc. 11852082Seschrock */ 11862082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 11872082Seschrock &spares, &nspares) == 0) { 11882082Seschrock if (spa->spa_sparelist) 11892082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, 11902082Seschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 11912082Seschrock else 11922082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 11932082Seschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 11942082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 11952082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 11962082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 11972082Seschrock spa_load_spares(spa); 11982082Seschrock spa_config_exit(spa, FTAG); 11992082Seschrock spa->spa_sync_spares = B_TRUE; 12002082Seschrock } 12012082Seschrock 12022082Seschrock /* 12031635Sbonwick * Update the config cache to include the newly-imported pool. 12041635Sbonwick */ 12051635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 12061635Sbonwick 1207789Sahrens mutex_exit(&spa_namespace_lock); 1208789Sahrens 1209789Sahrens /* 1210789Sahrens * Resilver anything that's out of date. 1211789Sahrens */ 1212789Sahrens if (spa_mode & FWRITE) 1213789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1214789Sahrens 1215789Sahrens return (0); 1216789Sahrens } 1217789Sahrens 1218789Sahrens /* 1219789Sahrens * This (illegal) pool name is used when temporarily importing a spa_t in order 1220789Sahrens * to get the vdev stats associated with the imported devices. 1221789Sahrens */ 1222789Sahrens #define TRYIMPORT_NAME "$import" 1223789Sahrens 1224789Sahrens nvlist_t * 1225789Sahrens spa_tryimport(nvlist_t *tryconfig) 1226789Sahrens { 1227789Sahrens nvlist_t *config = NULL; 1228789Sahrens char *poolname; 1229789Sahrens spa_t *spa; 1230789Sahrens uint64_t state; 1231789Sahrens 1232789Sahrens if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 1233789Sahrens return (NULL); 1234789Sahrens 1235789Sahrens if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 1236789Sahrens return (NULL); 1237789Sahrens 12381635Sbonwick /* 12391635Sbonwick * Create and initialize the spa structure. 12401635Sbonwick */ 1241789Sahrens mutex_enter(&spa_namespace_lock); 12421635Sbonwick spa = spa_add(TRYIMPORT_NAME, NULL); 1243789Sahrens spa_activate(spa); 1244789Sahrens 1245789Sahrens /* 12461635Sbonwick * Pass off the heavy lifting to spa_load(). 12471732Sbonwick * Pass TRUE for mosconfig because the user-supplied config 12481732Sbonwick * is actually the one to trust when doing an import. 1249789Sahrens */ 12501732Sbonwick (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 1251789Sahrens 1252789Sahrens /* 1253789Sahrens * If 'tryconfig' was at least parsable, return the current config. 1254789Sahrens */ 1255789Sahrens if (spa->spa_root_vdev != NULL) { 12561635Sbonwick spa_config_enter(spa, RW_READER, FTAG); 1257789Sahrens config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 12581635Sbonwick spa_config_exit(spa, FTAG); 1259789Sahrens VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 1260789Sahrens poolname) == 0); 1261789Sahrens VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1262789Sahrens state) == 0); 12632082Seschrock 12642082Seschrock /* 12652082Seschrock * Add the list of hot spares. 12662082Seschrock */ 12672082Seschrock spa_add_spares(spa, config); 1268789Sahrens } 1269789Sahrens 1270789Sahrens spa_unload(spa); 1271789Sahrens spa_deactivate(spa); 1272789Sahrens spa_remove(spa); 1273789Sahrens mutex_exit(&spa_namespace_lock); 1274789Sahrens 1275789Sahrens return (config); 1276789Sahrens } 1277789Sahrens 1278789Sahrens /* 1279789Sahrens * Pool export/destroy 1280789Sahrens * 1281789Sahrens * The act of destroying or exporting a pool is very simple. We make sure there 1282789Sahrens * is no more pending I/O and any references to the pool are gone. Then, we 1283789Sahrens * update the pool state and sync all the labels to disk, removing the 1284789Sahrens * configuration from the cache afterwards. 1285789Sahrens */ 1286789Sahrens static int 12871775Sbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 1288789Sahrens { 1289789Sahrens spa_t *spa; 1290789Sahrens 12911775Sbillm if (oldconfig) 12921775Sbillm *oldconfig = NULL; 12931775Sbillm 1294789Sahrens if (!(spa_mode & FWRITE)) 1295789Sahrens return (EROFS); 1296789Sahrens 1297789Sahrens mutex_enter(&spa_namespace_lock); 1298789Sahrens if ((spa = spa_lookup(pool)) == NULL) { 1299789Sahrens mutex_exit(&spa_namespace_lock); 1300789Sahrens return (ENOENT); 1301789Sahrens } 1302789Sahrens 1303789Sahrens /* 13041544Seschrock * Put a hold on the pool, drop the namespace lock, stop async tasks, 13051544Seschrock * reacquire the namespace lock, and see if we can export. 13061544Seschrock */ 13071544Seschrock spa_open_ref(spa, FTAG); 13081544Seschrock mutex_exit(&spa_namespace_lock); 13091544Seschrock spa_async_suspend(spa); 13101544Seschrock mutex_enter(&spa_namespace_lock); 13111544Seschrock spa_close(spa, FTAG); 13121544Seschrock 13131544Seschrock /* 1314789Sahrens * The pool will be in core if it's openable, 1315789Sahrens * in which case we can modify its state. 1316789Sahrens */ 1317789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 1318789Sahrens /* 1319789Sahrens * Objsets may be open only because they're dirty, so we 1320789Sahrens * have to force it to sync before checking spa_refcnt. 1321789Sahrens */ 1322789Sahrens spa_scrub_suspend(spa); 1323789Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 1324789Sahrens 13251544Seschrock /* 13261544Seschrock * A pool cannot be exported or destroyed if there are active 13271544Seschrock * references. If we are resetting a pool, allow references by 13281544Seschrock * fault injection handlers. 13291544Seschrock */ 13301544Seschrock if (!spa_refcount_zero(spa) || 13311544Seschrock (spa->spa_inject_ref != 0 && 13321544Seschrock new_state != POOL_STATE_UNINITIALIZED)) { 1333789Sahrens spa_scrub_resume(spa); 13341544Seschrock spa_async_resume(spa); 1335789Sahrens mutex_exit(&spa_namespace_lock); 1336789Sahrens return (EBUSY); 1337789Sahrens } 1338789Sahrens 1339789Sahrens spa_scrub_resume(spa); 1340789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 1341789Sahrens 1342789Sahrens /* 1343789Sahrens * We want this to be reflected on every label, 1344789Sahrens * so mark them all dirty. spa_unload() will do the 1345789Sahrens * final sync that pushes these changes out. 1346789Sahrens */ 13471544Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 13481601Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 13491544Seschrock spa->spa_state = new_state; 13501635Sbonwick spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 13511544Seschrock vdev_config_dirty(spa->spa_root_vdev); 13521601Sbonwick spa_config_exit(spa, FTAG); 13531544Seschrock } 1354789Sahrens } 1355789Sahrens 1356789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 1357789Sahrens spa_unload(spa); 1358789Sahrens spa_deactivate(spa); 1359789Sahrens } 1360789Sahrens 13611775Sbillm if (oldconfig && spa->spa_config) 13621775Sbillm VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 13631775Sbillm 13641544Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 13651544Seschrock spa_remove(spa); 13661544Seschrock spa_config_sync(); 13671544Seschrock } 1368789Sahrens mutex_exit(&spa_namespace_lock); 1369789Sahrens 1370789Sahrens return (0); 1371789Sahrens } 1372789Sahrens 1373789Sahrens /* 1374789Sahrens * Destroy a storage pool. 1375789Sahrens */ 1376789Sahrens int 1377789Sahrens spa_destroy(char *pool) 1378789Sahrens { 13791775Sbillm return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 1380789Sahrens } 1381789Sahrens 1382789Sahrens /* 1383789Sahrens * Export a storage pool. 1384789Sahrens */ 1385789Sahrens int 13861775Sbillm spa_export(char *pool, nvlist_t **oldconfig) 1387789Sahrens { 13881775Sbillm return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 1389789Sahrens } 1390789Sahrens 1391789Sahrens /* 13921544Seschrock * Similar to spa_export(), this unloads the spa_t without actually removing it 13931544Seschrock * from the namespace in any way. 13941544Seschrock */ 13951544Seschrock int 13961544Seschrock spa_reset(char *pool) 13971544Seschrock { 13981775Sbillm return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 13991544Seschrock } 14001544Seschrock 14011544Seschrock 14021544Seschrock /* 1403789Sahrens * ========================================================================== 1404789Sahrens * Device manipulation 1405789Sahrens * ========================================================================== 1406789Sahrens */ 1407789Sahrens 1408789Sahrens /* 1409789Sahrens * Add capacity to a storage pool. 1410789Sahrens */ 1411789Sahrens int 1412789Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 1413789Sahrens { 1414789Sahrens uint64_t txg; 14151635Sbonwick int c, error; 1416789Sahrens vdev_t *rvd = spa->spa_root_vdev; 14171585Sbonwick vdev_t *vd, *tvd; 14182082Seschrock nvlist_t **spares; 14192082Seschrock uint_t i, nspares; 1420789Sahrens 1421789Sahrens txg = spa_vdev_enter(spa); 1422789Sahrens 14232082Seschrock if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 14242082Seschrock VDEV_ALLOC_ADD)) != 0) 14252082Seschrock return (spa_vdev_exit(spa, NULL, txg, error)); 14262082Seschrock 14272082Seschrock if ((error = spa_validate_spares(spa, nvroot, txg, 14282082Seschrock VDEV_ALLOC_ADD)) != 0) 1429789Sahrens return (spa_vdev_exit(spa, vd, txg, error)); 1430789Sahrens 14312082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 14322082Seschrock &spares, &nspares) != 0) 14332082Seschrock nspares = 0; 14342082Seschrock 14352082Seschrock if (vd->vdev_children == 0 && nspares == 0) 14362082Seschrock return (spa_vdev_exit(spa, vd, txg, EINVAL)); 14372082Seschrock 14382082Seschrock if (vd->vdev_children != 0) { 14392082Seschrock if ((error = vdev_create(vd, txg, B_FALSE)) != 0) 14402082Seschrock return (spa_vdev_exit(spa, vd, txg, error)); 14412082Seschrock 14422082Seschrock /* 14432082Seschrock * Transfer each new top-level vdev from vd to rvd. 14442082Seschrock */ 14452082Seschrock for (c = 0; c < vd->vdev_children; c++) { 14462082Seschrock tvd = vd->vdev_child[c]; 14472082Seschrock vdev_remove_child(vd, tvd); 14482082Seschrock tvd->vdev_id = rvd->vdev_children; 14492082Seschrock vdev_add_child(rvd, tvd); 14502082Seschrock vdev_config_dirty(tvd); 14512082Seschrock } 14522082Seschrock } 14532082Seschrock 14542082Seschrock if (nspares != 0) { 14552082Seschrock if (spa->spa_sparelist != NULL) { 14562082Seschrock nvlist_t **oldspares; 14572082Seschrock uint_t oldnspares; 14582082Seschrock nvlist_t **newspares; 14592082Seschrock 14602082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 14612082Seschrock ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0); 14622082Seschrock 14632082Seschrock newspares = kmem_alloc(sizeof (void *) * 14642082Seschrock (nspares + oldnspares), KM_SLEEP); 14652082Seschrock for (i = 0; i < oldnspares; i++) 14662082Seschrock VERIFY(nvlist_dup(oldspares[i], 14672082Seschrock &newspares[i], KM_SLEEP) == 0); 14682082Seschrock for (i = 0; i < nspares; i++) 14692082Seschrock VERIFY(nvlist_dup(spares[i], 14702082Seschrock &newspares[i + oldnspares], 14712082Seschrock KM_SLEEP) == 0); 14722082Seschrock 14732082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, 14742082Seschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 14752082Seschrock 14762082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 14772082Seschrock ZPOOL_CONFIG_SPARES, newspares, 14782082Seschrock nspares + oldnspares) == 0); 14792082Seschrock for (i = 0; i < oldnspares + nspares; i++) 14802082Seschrock nvlist_free(newspares[i]); 14812082Seschrock kmem_free(newspares, (oldnspares + nspares) * 14822082Seschrock sizeof (void *)); 14832082Seschrock } else { 14842082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 14852082Seschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 14862082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 14872082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 14882082Seschrock } 14892082Seschrock 14902082Seschrock spa_load_spares(spa); 14912082Seschrock spa->spa_sync_spares = B_TRUE; 1492789Sahrens } 1493789Sahrens 1494789Sahrens /* 14951585Sbonwick * We have to be careful when adding new vdevs to an existing pool. 14961585Sbonwick * If other threads start allocating from these vdevs before we 14971585Sbonwick * sync the config cache, and we lose power, then upon reboot we may 14981585Sbonwick * fail to open the pool because there are DVAs that the config cache 14991585Sbonwick * can't translate. Therefore, we first add the vdevs without 15001585Sbonwick * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 15011635Sbonwick * and then let spa_config_update() initialize the new metaslabs. 15021585Sbonwick * 15031585Sbonwick * spa_load() checks for added-but-not-initialized vdevs, so that 15041585Sbonwick * if we lose power at any point in this sequence, the remaining 15051585Sbonwick * steps will be completed the next time we load the pool. 1506789Sahrens */ 15071635Sbonwick (void) spa_vdev_exit(spa, vd, txg, 0); 15081585Sbonwick 15091635Sbonwick mutex_enter(&spa_namespace_lock); 15101635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 15111635Sbonwick mutex_exit(&spa_namespace_lock); 1512789Sahrens 15131635Sbonwick return (0); 1514789Sahrens } 1515789Sahrens 1516789Sahrens /* 1517789Sahrens * Attach a device to a mirror. The arguments are the path to any device 1518789Sahrens * in the mirror, and the nvroot for the new device. If the path specifies 1519789Sahrens * a device that is not mirrored, we automatically insert the mirror vdev. 1520789Sahrens * 1521789Sahrens * If 'replacing' is specified, the new device is intended to replace the 1522789Sahrens * existing device; in this case the two devices are made into their own 1523789Sahrens * mirror using the 'replacing' vdev, which is functionally idendical to 1524789Sahrens * the mirror vdev (it actually reuses all the same ops) but has a few 1525789Sahrens * extra rules: you can't attach to it after it's been created, and upon 1526789Sahrens * completion of resilvering, the first disk (the one being replaced) 1527789Sahrens * is automatically detached. 1528789Sahrens */ 1529789Sahrens int 15301544Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 1531789Sahrens { 1532789Sahrens uint64_t txg, open_txg; 1533789Sahrens int error; 1534789Sahrens vdev_t *rvd = spa->spa_root_vdev; 1535789Sahrens vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 15362082Seschrock vdev_ops_t *pvops; 1537789Sahrens 1538789Sahrens txg = spa_vdev_enter(spa); 1539789Sahrens 15401544Seschrock oldvd = vdev_lookup_by_guid(rvd, guid); 1541789Sahrens 1542789Sahrens if (oldvd == NULL) 1543789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1544789Sahrens 15451585Sbonwick if (!oldvd->vdev_ops->vdev_op_leaf) 15461585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 15471585Sbonwick 1548789Sahrens pvd = oldvd->vdev_parent; 1549789Sahrens 15502082Seschrock if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 15512082Seschrock VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1) 1552789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1553789Sahrens 1554789Sahrens newvd = newrootvd->vdev_child[0]; 1555789Sahrens 1556789Sahrens if (!newvd->vdev_ops->vdev_op_leaf) 1557789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1558789Sahrens 15592082Seschrock if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 1560789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, error)); 1561789Sahrens 15622082Seschrock if (!replacing) { 15632082Seschrock /* 15642082Seschrock * For attach, the only allowable parent is a mirror or the root 15652082Seschrock * vdev. 15662082Seschrock */ 15672082Seschrock if (pvd->vdev_ops != &vdev_mirror_ops && 15682082Seschrock pvd->vdev_ops != &vdev_root_ops) 15692082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 15702082Seschrock 15712082Seschrock pvops = &vdev_mirror_ops; 15722082Seschrock } else { 15732082Seschrock /* 15742082Seschrock * Active hot spares can only be replaced by inactive hot 15752082Seschrock * spares. 15762082Seschrock */ 15772082Seschrock if (pvd->vdev_ops == &vdev_spare_ops && 15782082Seschrock pvd->vdev_child[1] == oldvd && 15792082Seschrock !spa_has_spare(spa, newvd->vdev_guid)) 15802082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 15812082Seschrock 15822082Seschrock /* 15832082Seschrock * If the source is a hot spare, and the parent isn't already a 15842082Seschrock * spare, then we want to create a new hot spare. Otherwise, we 15852082Seschrock * want to create a replacing vdev. 15862082Seschrock */ 15872082Seschrock if (pvd->vdev_ops == &vdev_replacing_ops) 15882082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 15892082Seschrock else if (pvd->vdev_ops != &vdev_spare_ops && 15902082Seschrock newvd->vdev_isspare) 15912082Seschrock pvops = &vdev_spare_ops; 15922082Seschrock else 15932082Seschrock pvops = &vdev_replacing_ops; 15942082Seschrock } 15952082Seschrock 15961175Slling /* 15971175Slling * Compare the new device size with the replaceable/attachable 15981175Slling * device size. 15991175Slling */ 16001175Slling if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 1601789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 1602789Sahrens 16031732Sbonwick /* 16041732Sbonwick * The new device cannot have a higher alignment requirement 16051732Sbonwick * than the top-level vdev. 16061732Sbonwick */ 16071732Sbonwick if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 1608789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 1609789Sahrens 1610789Sahrens /* 1611789Sahrens * If this is an in-place replacement, update oldvd's path and devid 1612789Sahrens * to make it distinguishable from newvd, and unopenable from now on. 1613789Sahrens */ 1614789Sahrens if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 1615789Sahrens spa_strfree(oldvd->vdev_path); 1616789Sahrens oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 1617789Sahrens KM_SLEEP); 1618789Sahrens (void) sprintf(oldvd->vdev_path, "%s/%s", 1619789Sahrens newvd->vdev_path, "old"); 1620789Sahrens if (oldvd->vdev_devid != NULL) { 1621789Sahrens spa_strfree(oldvd->vdev_devid); 1622789Sahrens oldvd->vdev_devid = NULL; 1623789Sahrens } 1624789Sahrens } 1625789Sahrens 1626789Sahrens /* 16272082Seschrock * If the parent is not a mirror, or if we're replacing, insert the new 16282082Seschrock * mirror/replacing/spare vdev above oldvd. 1629789Sahrens */ 1630789Sahrens if (pvd->vdev_ops != pvops) 1631789Sahrens pvd = vdev_add_parent(oldvd, pvops); 1632789Sahrens 1633789Sahrens ASSERT(pvd->vdev_top->vdev_parent == rvd); 1634789Sahrens ASSERT(pvd->vdev_ops == pvops); 1635789Sahrens ASSERT(oldvd->vdev_parent == pvd); 1636789Sahrens 1637789Sahrens /* 1638789Sahrens * Extract the new device from its root and add it to pvd. 1639789Sahrens */ 1640789Sahrens vdev_remove_child(newrootvd, newvd); 1641789Sahrens newvd->vdev_id = pvd->vdev_children; 1642789Sahrens vdev_add_child(pvd, newvd); 1643789Sahrens 16441544Seschrock /* 16451544Seschrock * If newvd is smaller than oldvd, but larger than its rsize, 16461544Seschrock * the addition of newvd may have decreased our parent's asize. 16471544Seschrock */ 16481544Seschrock pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 16491544Seschrock 1650789Sahrens tvd = newvd->vdev_top; 1651789Sahrens ASSERT(pvd->vdev_top == tvd); 1652789Sahrens ASSERT(tvd->vdev_parent == rvd); 1653789Sahrens 1654789Sahrens vdev_config_dirty(tvd); 1655789Sahrens 1656789Sahrens /* 1657789Sahrens * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 1658789Sahrens * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 1659789Sahrens */ 1660789Sahrens open_txg = txg + TXG_CONCURRENT_STATES - 1; 1661789Sahrens 1662789Sahrens mutex_enter(&newvd->vdev_dtl_lock); 1663789Sahrens space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 1664789Sahrens open_txg - TXG_INITIAL + 1); 1665789Sahrens mutex_exit(&newvd->vdev_dtl_lock); 1666789Sahrens 16671544Seschrock dprintf("attached %s in txg %llu\n", newvd->vdev_path, txg); 16681544Seschrock 1669789Sahrens /* 1670789Sahrens * Mark newvd's DTL dirty in this txg. 1671789Sahrens */ 16721732Sbonwick vdev_dirty(tvd, VDD_DTL, newvd, txg); 1673789Sahrens 1674789Sahrens (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 1675789Sahrens 1676789Sahrens /* 1677789Sahrens * Kick off a resilver to update newvd. 1678789Sahrens */ 1679789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1680789Sahrens 1681789Sahrens return (0); 1682789Sahrens } 1683789Sahrens 1684789Sahrens /* 1685789Sahrens * Detach a device from a mirror or replacing vdev. 1686789Sahrens * If 'replace_done' is specified, only detach if the parent 1687789Sahrens * is a replacing vdev. 1688789Sahrens */ 1689789Sahrens int 16901544Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 1691789Sahrens { 1692789Sahrens uint64_t txg; 1693789Sahrens int c, t, error; 1694789Sahrens vdev_t *rvd = spa->spa_root_vdev; 1695789Sahrens vdev_t *vd, *pvd, *cvd, *tvd; 16962082Seschrock boolean_t unspare = B_FALSE; 16972082Seschrock uint64_t unspare_guid; 1698789Sahrens 1699789Sahrens txg = spa_vdev_enter(spa); 1700789Sahrens 17011544Seschrock vd = vdev_lookup_by_guid(rvd, guid); 1702789Sahrens 1703789Sahrens if (vd == NULL) 1704789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1705789Sahrens 17061585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 17071585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17081585Sbonwick 1709789Sahrens pvd = vd->vdev_parent; 1710789Sahrens 1711789Sahrens /* 1712789Sahrens * If replace_done is specified, only remove this device if it's 17132082Seschrock * the first child of a replacing vdev. For the 'spare' vdev, either 17142082Seschrock * disk can be removed. 1715789Sahrens */ 17162082Seschrock if (replace_done) { 17172082Seschrock if (pvd->vdev_ops == &vdev_replacing_ops) { 17182082Seschrock if (vd->vdev_id != 0) 17192082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17202082Seschrock } else if (pvd->vdev_ops != &vdev_spare_ops) { 17212082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17222082Seschrock } 17232082Seschrock } 17242082Seschrock 17252082Seschrock ASSERT(pvd->vdev_ops != &vdev_spare_ops || 17262082Seschrock spa_version(spa) >= ZFS_VERSION_SPARES); 1727789Sahrens 1728789Sahrens /* 17292082Seschrock * Only mirror, replacing, and spare vdevs support detach. 1730789Sahrens */ 1731789Sahrens if (pvd->vdev_ops != &vdev_replacing_ops && 17322082Seschrock pvd->vdev_ops != &vdev_mirror_ops && 17332082Seschrock pvd->vdev_ops != &vdev_spare_ops) 1734789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1735789Sahrens 1736789Sahrens /* 1737789Sahrens * If there's only one replica, you can't detach it. 1738789Sahrens */ 1739789Sahrens if (pvd->vdev_children <= 1) 1740789Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1741789Sahrens 1742789Sahrens /* 1743789Sahrens * If all siblings have non-empty DTLs, this device may have the only 1744789Sahrens * valid copy of the data, which means we cannot safely detach it. 1745789Sahrens * 1746789Sahrens * XXX -- as in the vdev_offline() case, we really want a more 1747789Sahrens * precise DTL check. 1748789Sahrens */ 1749789Sahrens for (c = 0; c < pvd->vdev_children; c++) { 1750789Sahrens uint64_t dirty; 1751789Sahrens 1752789Sahrens cvd = pvd->vdev_child[c]; 1753789Sahrens if (cvd == vd) 1754789Sahrens continue; 1755789Sahrens if (vdev_is_dead(cvd)) 1756789Sahrens continue; 1757789Sahrens mutex_enter(&cvd->vdev_dtl_lock); 1758789Sahrens dirty = cvd->vdev_dtl_map.sm_space | 1759789Sahrens cvd->vdev_dtl_scrub.sm_space; 1760789Sahrens mutex_exit(&cvd->vdev_dtl_lock); 1761789Sahrens if (!dirty) 1762789Sahrens break; 1763789Sahrens } 17642082Seschrock 17652082Seschrock /* 17662082Seschrock * If we are a replacing or spare vdev, then we can always detach the 17672082Seschrock * latter child, as that is how one cancels the operation. 17682082Seschrock */ 17692082Seschrock if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 17702082Seschrock c == pvd->vdev_children) 1771789Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1772789Sahrens 1773789Sahrens /* 17742082Seschrock * If we are detaching the original disk from a spare, then it implies 17752082Seschrock * that the spare should become a real disk, and be removed from the 17762082Seschrock * active spare list for the pool. 17772082Seschrock */ 17782082Seschrock if (pvd->vdev_ops == &vdev_spare_ops && 17792082Seschrock vd->vdev_id == 0) 17802082Seschrock unspare = B_TRUE; 17812082Seschrock 17822082Seschrock /* 1783789Sahrens * Erase the disk labels so the disk can be used for other things. 1784789Sahrens * This must be done after all other error cases are handled, 1785789Sahrens * but before we disembowel vd (so we can still do I/O to it). 1786789Sahrens * But if we can't do it, don't treat the error as fatal -- 1787789Sahrens * it may be that the unwritability of the disk is the reason 1788789Sahrens * it's being detached! 1789789Sahrens */ 17902082Seschrock error = vdev_label_init(vd, 0, B_FALSE); 1791789Sahrens if (error) 1792789Sahrens dprintf("unable to erase labels on %s\n", vdev_description(vd)); 1793789Sahrens 1794789Sahrens /* 1795789Sahrens * Remove vd from its parent and compact the parent's children. 1796789Sahrens */ 1797789Sahrens vdev_remove_child(pvd, vd); 1798789Sahrens vdev_compact_children(pvd); 1799789Sahrens 1800789Sahrens /* 1801789Sahrens * Remember one of the remaining children so we can get tvd below. 1802789Sahrens */ 1803789Sahrens cvd = pvd->vdev_child[0]; 1804789Sahrens 1805789Sahrens /* 18062082Seschrock * If we need to remove the remaining child from the list of hot spares, 18072082Seschrock * do it now, marking the vdev as no longer a spare in the process. We 18082082Seschrock * must do this before vdev_remove_parent(), because that can change the 18092082Seschrock * GUID if it creates a new toplevel GUID. 18102082Seschrock */ 18112082Seschrock if (unspare) { 18122082Seschrock ASSERT(cvd->vdev_isspare); 18132082Seschrock spa_spare_remove(cvd->vdev_guid); 18142082Seschrock cvd->vdev_isspare = B_FALSE; 18152082Seschrock unspare_guid = cvd->vdev_guid; 18162082Seschrock } 18172082Seschrock 18182082Seschrock /* 1819789Sahrens * If the parent mirror/replacing vdev only has one child, 1820789Sahrens * the parent is no longer needed. Remove it from the tree. 1821789Sahrens */ 1822789Sahrens if (pvd->vdev_children == 1) 1823789Sahrens vdev_remove_parent(cvd); 1824789Sahrens 1825789Sahrens /* 1826789Sahrens * We don't set tvd until now because the parent we just removed 1827789Sahrens * may have been the previous top-level vdev. 1828789Sahrens */ 1829789Sahrens tvd = cvd->vdev_top; 1830789Sahrens ASSERT(tvd->vdev_parent == rvd); 1831789Sahrens 1832789Sahrens /* 1833789Sahrens * Reopen this top-level vdev to reassess health after detach. 1834789Sahrens */ 18351544Seschrock vdev_reopen(tvd); 1836789Sahrens 1837789Sahrens /* 1838789Sahrens * If the device we just detached was smaller than the others, 18391732Sbonwick * it may be possible to add metaslabs (i.e. grow the pool). 18401732Sbonwick * vdev_metaslab_init() can't fail because the existing metaslabs 18411732Sbonwick * are already in core, so there's nothing to read from disk. 1842789Sahrens */ 18431732Sbonwick VERIFY(vdev_metaslab_init(tvd, txg) == 0); 1844789Sahrens 1845789Sahrens vdev_config_dirty(tvd); 1846789Sahrens 1847789Sahrens /* 1848789Sahrens * Mark vd's DTL as dirty in this txg. 1849789Sahrens * vdev_dtl_sync() will see that vd->vdev_detached is set 1850789Sahrens * and free vd's DTL object in syncing context. 1851789Sahrens * But first make sure we're not on any *other* txg's DTL list, 1852789Sahrens * to prevent vd from being accessed after it's freed. 1853789Sahrens */ 1854789Sahrens for (t = 0; t < TXG_SIZE; t++) 1855789Sahrens (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 18561732Sbonwick vd->vdev_detached = B_TRUE; 18571732Sbonwick vdev_dirty(tvd, VDD_DTL, vd, txg); 1858789Sahrens 18591544Seschrock dprintf("detached %s in txg %llu\n", vd->vdev_path, txg); 1860789Sahrens 18612082Seschrock error = spa_vdev_exit(spa, vd, txg, 0); 18622082Seschrock 18632082Seschrock /* 18642082Seschrock * If we are supposed to remove the given vdev from the list of spares, 18652082Seschrock * iterate over all pools in the system and replace it if it's present. 18662082Seschrock */ 18672082Seschrock if (unspare) { 18682082Seschrock spa = NULL; 18692082Seschrock mutex_enter(&spa_namespace_lock); 18702082Seschrock while ((spa = spa_next(spa)) != NULL) { 18712082Seschrock if (spa->spa_state != POOL_STATE_ACTIVE) 18722082Seschrock continue; 18732082Seschrock 18742082Seschrock (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 18752082Seschrock } 18762082Seschrock mutex_exit(&spa_namespace_lock); 18772082Seschrock } 18782082Seschrock 18792082Seschrock return (error); 18802082Seschrock } 18812082Seschrock 18822082Seschrock /* 18832082Seschrock * Remove a device from the pool. Currently, this supports removing only hot 18842082Seschrock * spares. 18852082Seschrock */ 18862082Seschrock int 18872082Seschrock spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 18882082Seschrock { 18892082Seschrock vdev_t *vd; 18902082Seschrock nvlist_t **spares, *nv, **newspares; 18912082Seschrock uint_t i, j, nspares; 18922082Seschrock int ret = 0; 18932082Seschrock 18942082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 18952082Seschrock 18962082Seschrock vd = spa_lookup_by_guid(spa, guid); 18972082Seschrock 18982082Seschrock nv = NULL; 18992082Seschrock if (spa->spa_spares != NULL && 19002082Seschrock nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19012082Seschrock &spares, &nspares) == 0) { 19022082Seschrock for (i = 0; i < nspares; i++) { 19032082Seschrock uint64_t theguid; 19042082Seschrock 19052082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 19062082Seschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 19072082Seschrock if (theguid == guid) { 19082082Seschrock nv = spares[i]; 19092082Seschrock break; 19102082Seschrock } 19112082Seschrock } 19122082Seschrock } 19132082Seschrock 19142082Seschrock /* 19152082Seschrock * We only support removing a hot spare, and only if it's not currently 19162082Seschrock * in use in this pool. 19172082Seschrock */ 19182082Seschrock if (nv == NULL && vd == NULL) { 19192082Seschrock ret = ENOENT; 19202082Seschrock goto out; 19212082Seschrock } 19222082Seschrock 19232082Seschrock if (nv == NULL && vd != NULL) { 19242082Seschrock ret = ENOTSUP; 19252082Seschrock goto out; 19262082Seschrock } 19272082Seschrock 19282082Seschrock if (!unspare && nv != NULL && vd != NULL) { 19292082Seschrock ret = EBUSY; 19302082Seschrock goto out; 19312082Seschrock } 19322082Seschrock 19332082Seschrock if (nspares == 1) { 19342082Seschrock newspares = NULL; 19352082Seschrock } else { 19362082Seschrock newspares = kmem_alloc((nspares - 1) * sizeof (void *), 19372082Seschrock KM_SLEEP); 19382082Seschrock for (i = 0, j = 0; i < nspares; i++) { 19392082Seschrock if (spares[i] != nv) 19402082Seschrock VERIFY(nvlist_dup(spares[i], 19412082Seschrock &newspares[j++], KM_SLEEP) == 0); 19422082Seschrock } 19432082Seschrock } 19442082Seschrock 19452082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19462082Seschrock DATA_TYPE_NVLIST_ARRAY) == 0); 19472082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19482082Seschrock newspares, nspares - 1) == 0); 19492082Seschrock for (i = 0; i < nspares - 1; i++) 19502082Seschrock nvlist_free(newspares[i]); 19512082Seschrock kmem_free(newspares, (nspares - 1) * sizeof (void *)); 19522082Seschrock spa_load_spares(spa); 19532082Seschrock spa->spa_sync_spares = B_TRUE; 19542082Seschrock 19552082Seschrock out: 19562082Seschrock spa_config_exit(spa, FTAG); 19572082Seschrock 19582082Seschrock return (ret); 1959789Sahrens } 1960789Sahrens 1961789Sahrens /* 19621544Seschrock * Find any device that's done replacing, so we can detach it. 1963789Sahrens */ 19641544Seschrock static vdev_t * 19651544Seschrock spa_vdev_replace_done_hunt(vdev_t *vd) 1966789Sahrens { 19671544Seschrock vdev_t *newvd, *oldvd; 1968789Sahrens int c; 1969789Sahrens 19701544Seschrock for (c = 0; c < vd->vdev_children; c++) { 19711544Seschrock oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]); 19721544Seschrock if (oldvd != NULL) 19731544Seschrock return (oldvd); 19741544Seschrock } 1975789Sahrens 1976789Sahrens if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 19771544Seschrock oldvd = vd->vdev_child[0]; 19781544Seschrock newvd = vd->vdev_child[1]; 1979789Sahrens 19801544Seschrock mutex_enter(&newvd->vdev_dtl_lock); 19811544Seschrock if (newvd->vdev_dtl_map.sm_space == 0 && 19821544Seschrock newvd->vdev_dtl_scrub.sm_space == 0) { 19831544Seschrock mutex_exit(&newvd->vdev_dtl_lock); 19841544Seschrock return (oldvd); 19851544Seschrock } 19861544Seschrock mutex_exit(&newvd->vdev_dtl_lock); 19871544Seschrock } 1988789Sahrens 19891544Seschrock return (NULL); 1990789Sahrens } 1991789Sahrens 19921544Seschrock static void 1993789Sahrens spa_vdev_replace_done(spa_t *spa) 1994789Sahrens { 19951544Seschrock vdev_t *vd; 19962082Seschrock vdev_t *pvd; 19971544Seschrock uint64_t guid; 19982082Seschrock uint64_t pguid = 0; 1999789Sahrens 20001544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2001789Sahrens 20021544Seschrock while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) { 20031544Seschrock guid = vd->vdev_guid; 20042082Seschrock /* 20052082Seschrock * If we have just finished replacing a hot spared device, then 20062082Seschrock * we need to detach the parent's first child (the original hot 20072082Seschrock * spare) as well. 20082082Seschrock */ 20092082Seschrock pvd = vd->vdev_parent; 20102082Seschrock if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 20112082Seschrock pvd->vdev_id == 0) { 20122082Seschrock ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 20132082Seschrock ASSERT(pvd->vdev_parent->vdev_children == 2); 20142082Seschrock pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 20152082Seschrock } 20161544Seschrock spa_config_exit(spa, FTAG); 20171544Seschrock if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 20181544Seschrock return; 20192082Seschrock if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 20202082Seschrock return; 20211544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2022789Sahrens } 2023789Sahrens 20241544Seschrock spa_config_exit(spa, FTAG); 2025789Sahrens } 2026789Sahrens 2027789Sahrens /* 20281354Seschrock * Update the stored path for this vdev. Dirty the vdev configuration, relying 20291354Seschrock * on spa_vdev_enter/exit() to synchronize the labels and cache. 20301354Seschrock */ 20311354Seschrock int 20321354Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 20331354Seschrock { 20341354Seschrock vdev_t *rvd, *vd; 20351354Seschrock uint64_t txg; 20361354Seschrock 20371354Seschrock rvd = spa->spa_root_vdev; 20381354Seschrock 20391354Seschrock txg = spa_vdev_enter(spa); 20401354Seschrock 20412082Seschrock if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 20422082Seschrock /* 20432082Seschrock * Determine if this is a reference to a hot spare. In that 20442082Seschrock * case, update the path as stored in the spare list. 20452082Seschrock */ 20462082Seschrock nvlist_t **spares; 20472082Seschrock uint_t i, nspares; 20482082Seschrock if (spa->spa_sparelist != NULL) { 20492082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 20502082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 20512082Seschrock for (i = 0; i < nspares; i++) { 20522082Seschrock uint64_t theguid; 20532082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 20542082Seschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 20552082Seschrock if (theguid == guid) 20562082Seschrock break; 20572082Seschrock } 20582082Seschrock 20592082Seschrock if (i == nspares) 20602082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 20612082Seschrock 20622082Seschrock VERIFY(nvlist_add_string(spares[i], 20632082Seschrock ZPOOL_CONFIG_PATH, newpath) == 0); 20642082Seschrock spa_load_spares(spa); 20652082Seschrock spa->spa_sync_spares = B_TRUE; 20662082Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 20672082Seschrock } else { 20682082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 20692082Seschrock } 20702082Seschrock } 20711354Seschrock 20721585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 20731585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 20741585Sbonwick 20751354Seschrock spa_strfree(vd->vdev_path); 20761354Seschrock vd->vdev_path = spa_strdup(newpath); 20771354Seschrock 20781354Seschrock vdev_config_dirty(vd->vdev_top); 20791354Seschrock 20801354Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 20811354Seschrock } 20821354Seschrock 20831354Seschrock /* 2084789Sahrens * ========================================================================== 2085789Sahrens * SPA Scrubbing 2086789Sahrens * ========================================================================== 2087789Sahrens */ 2088789Sahrens 20891544Seschrock void 20901544Seschrock spa_scrub_throttle(spa_t *spa, int direction) 20911544Seschrock { 20921544Seschrock mutex_enter(&spa->spa_scrub_lock); 20931544Seschrock spa->spa_scrub_throttled += direction; 20941544Seschrock ASSERT(spa->spa_scrub_throttled >= 0); 20951544Seschrock if (spa->spa_scrub_throttled == 0) 20961544Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 20971544Seschrock mutex_exit(&spa->spa_scrub_lock); 20981544Seschrock } 2099789Sahrens 2100789Sahrens static void 2101789Sahrens spa_scrub_io_done(zio_t *zio) 2102789Sahrens { 2103789Sahrens spa_t *spa = zio->io_spa; 2104789Sahrens 2105789Sahrens zio_buf_free(zio->io_data, zio->io_size); 2106789Sahrens 2107789Sahrens mutex_enter(&spa->spa_scrub_lock); 21081544Seschrock if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 21091775Sbillm vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 2110789Sahrens spa->spa_scrub_errors++; 2111789Sahrens mutex_enter(&vd->vdev_stat_lock); 2112789Sahrens vd->vdev_stat.vs_scrub_errors++; 2113789Sahrens mutex_exit(&vd->vdev_stat_lock); 2114789Sahrens } 21151544Seschrock if (--spa->spa_scrub_inflight == 0) { 21161544Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 21171544Seschrock ASSERT(spa->spa_scrub_throttled == 0); 21181544Seschrock } 21191544Seschrock mutex_exit(&spa->spa_scrub_lock); 2120789Sahrens } 2121789Sahrens 2122789Sahrens static void 21231544Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 21241544Seschrock zbookmark_t *zb) 2125789Sahrens { 2126789Sahrens size_t size = BP_GET_LSIZE(bp); 2127789Sahrens void *data = zio_buf_alloc(size); 2128789Sahrens 2129789Sahrens mutex_enter(&spa->spa_scrub_lock); 2130789Sahrens spa->spa_scrub_inflight++; 2131789Sahrens mutex_exit(&spa->spa_scrub_lock); 2132789Sahrens 21331544Seschrock if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 21341544Seschrock flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 21351544Seschrock 21361807Sbonwick flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 21371544Seschrock 2138789Sahrens zio_nowait(zio_read(NULL, spa, bp, data, size, 21391544Seschrock spa_scrub_io_done, NULL, priority, flags, zb)); 2140789Sahrens } 2141789Sahrens 2142789Sahrens /* ARGSUSED */ 2143789Sahrens static int 2144789Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 2145789Sahrens { 2146789Sahrens blkptr_t *bp = &bc->bc_blkptr; 21471775Sbillm vdev_t *vd = spa->spa_root_vdev; 21481775Sbillm dva_t *dva = bp->blk_dva; 21491775Sbillm int needs_resilver = B_FALSE; 21501775Sbillm int d; 2151789Sahrens 21521775Sbillm if (bc->bc_errno) { 2153789Sahrens /* 2154789Sahrens * We can't scrub this block, but we can continue to scrub 2155789Sahrens * the rest of the pool. Note the error and move along. 2156789Sahrens */ 2157789Sahrens mutex_enter(&spa->spa_scrub_lock); 2158789Sahrens spa->spa_scrub_errors++; 2159789Sahrens mutex_exit(&spa->spa_scrub_lock); 2160789Sahrens 21611775Sbillm mutex_enter(&vd->vdev_stat_lock); 21621775Sbillm vd->vdev_stat.vs_scrub_errors++; 21631775Sbillm mutex_exit(&vd->vdev_stat_lock); 2164789Sahrens 2165789Sahrens return (ERESTART); 2166789Sahrens } 2167789Sahrens 2168789Sahrens ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 2169789Sahrens 21701775Sbillm for (d = 0; d < BP_GET_NDVAS(bp); d++) { 21711775Sbillm vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 21721775Sbillm 21731775Sbillm ASSERT(vd != NULL); 21741775Sbillm 21751775Sbillm /* 21761775Sbillm * Keep track of how much data we've examined so that 21771775Sbillm * zpool(1M) status can make useful progress reports. 21781775Sbillm */ 21791775Sbillm mutex_enter(&vd->vdev_stat_lock); 21801775Sbillm vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 21811775Sbillm mutex_exit(&vd->vdev_stat_lock); 2182789Sahrens 21831775Sbillm if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 21841775Sbillm if (DVA_GET_GANG(&dva[d])) { 21851775Sbillm /* 21861775Sbillm * Gang members may be spread across multiple 21871775Sbillm * vdevs, so the best we can do is look at the 21881775Sbillm * pool-wide DTL. 21891775Sbillm * XXX -- it would be better to change our 21901775Sbillm * allocation policy to ensure that this can't 21911775Sbillm * happen. 21921775Sbillm */ 21931775Sbillm vd = spa->spa_root_vdev; 21941775Sbillm } 21951775Sbillm if (vdev_dtl_contains(&vd->vdev_dtl_map, 21961775Sbillm bp->blk_birth, 1)) 21971775Sbillm needs_resilver = B_TRUE; 2198789Sahrens } 21991775Sbillm } 22001775Sbillm 22011775Sbillm if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 2202789Sahrens spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 22031544Seschrock ZIO_FLAG_SCRUB, &bc->bc_bookmark); 22041775Sbillm else if (needs_resilver) 22051775Sbillm spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 22061775Sbillm ZIO_FLAG_RESILVER, &bc->bc_bookmark); 2207789Sahrens 2208789Sahrens return (0); 2209789Sahrens } 2210789Sahrens 2211789Sahrens static void 2212789Sahrens spa_scrub_thread(spa_t *spa) 2213789Sahrens { 2214789Sahrens callb_cpr_t cprinfo; 2215789Sahrens traverse_handle_t *th = spa->spa_scrub_th; 2216789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2217789Sahrens pool_scrub_type_t scrub_type = spa->spa_scrub_type; 2218789Sahrens int error = 0; 2219789Sahrens boolean_t complete; 2220789Sahrens 2221789Sahrens CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 2222789Sahrens 2223797Sbonwick /* 2224797Sbonwick * If we're restarting due to a snapshot create/delete, 2225797Sbonwick * wait for that to complete. 2226797Sbonwick */ 2227797Sbonwick txg_wait_synced(spa_get_dsl(spa), 0); 2228797Sbonwick 22291544Seschrock dprintf("start %s mintxg=%llu maxtxg=%llu\n", 22301544Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 22311544Seschrock spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 22321544Seschrock 22331544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 22341544Seschrock vdev_reopen(rvd); /* purge all vdev caches */ 2235789Sahrens vdev_config_dirty(rvd); /* rewrite all disk labels */ 2236789Sahrens vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 22371544Seschrock spa_config_exit(spa, FTAG); 2238789Sahrens 2239789Sahrens mutex_enter(&spa->spa_scrub_lock); 2240789Sahrens spa->spa_scrub_errors = 0; 2241789Sahrens spa->spa_scrub_active = 1; 22421544Seschrock ASSERT(spa->spa_scrub_inflight == 0); 22431544Seschrock ASSERT(spa->spa_scrub_throttled == 0); 2244789Sahrens 2245789Sahrens while (!spa->spa_scrub_stop) { 2246789Sahrens CALLB_CPR_SAFE_BEGIN(&cprinfo); 22471544Seschrock while (spa->spa_scrub_suspended) { 2248789Sahrens spa->spa_scrub_active = 0; 2249789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2250789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2251789Sahrens spa->spa_scrub_active = 1; 2252789Sahrens } 2253789Sahrens CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 2254789Sahrens 2255789Sahrens if (spa->spa_scrub_restart_txg != 0) 2256789Sahrens break; 2257789Sahrens 2258789Sahrens mutex_exit(&spa->spa_scrub_lock); 2259789Sahrens error = traverse_more(th); 2260789Sahrens mutex_enter(&spa->spa_scrub_lock); 2261789Sahrens if (error != EAGAIN) 2262789Sahrens break; 22631544Seschrock 22641544Seschrock while (spa->spa_scrub_throttled > 0) 22651544Seschrock cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2266789Sahrens } 2267789Sahrens 2268789Sahrens while (spa->spa_scrub_inflight) 2269789Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2270789Sahrens 22711601Sbonwick spa->spa_scrub_active = 0; 22721601Sbonwick cv_broadcast(&spa->spa_scrub_cv); 22731601Sbonwick 22741601Sbonwick mutex_exit(&spa->spa_scrub_lock); 22751601Sbonwick 22761601Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 22771601Sbonwick 22781601Sbonwick mutex_enter(&spa->spa_scrub_lock); 22791601Sbonwick 22801601Sbonwick /* 22811601Sbonwick * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 22821601Sbonwick * AND the spa config lock to synchronize with any config changes 22831601Sbonwick * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 22841601Sbonwick */ 2285789Sahrens if (spa->spa_scrub_restart_txg != 0) 2286789Sahrens error = ERESTART; 2287789Sahrens 22881544Seschrock if (spa->spa_scrub_stop) 22891544Seschrock error = EINTR; 22901544Seschrock 2291789Sahrens /* 22921544Seschrock * Even if there were uncorrectable errors, we consider the scrub 22931544Seschrock * completed. The downside is that if there is a transient error during 22941544Seschrock * a resilver, we won't resilver the data properly to the target. But 22951544Seschrock * if the damage is permanent (more likely) we will resilver forever, 22961544Seschrock * which isn't really acceptable. Since there is enough information for 22971544Seschrock * the user to know what has failed and why, this seems like a more 22981544Seschrock * tractable approach. 2299789Sahrens */ 23001544Seschrock complete = (error == 0); 2301789Sahrens 23021544Seschrock dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 23031544Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2304789Sahrens spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 2305789Sahrens error, spa->spa_scrub_errors, spa->spa_scrub_stop); 2306789Sahrens 2307789Sahrens mutex_exit(&spa->spa_scrub_lock); 2308789Sahrens 2309789Sahrens /* 2310789Sahrens * If the scrub/resilver completed, update all DTLs to reflect this. 2311789Sahrens * Whether it succeeded or not, vacate all temporary scrub DTLs. 2312789Sahrens */ 2313789Sahrens vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 2314789Sahrens complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 2315789Sahrens vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 23161544Seschrock spa_errlog_rotate(spa); 23171601Sbonwick 23181544Seschrock spa_config_exit(spa, FTAG); 2319789Sahrens 2320789Sahrens mutex_enter(&spa->spa_scrub_lock); 2321789Sahrens 23221544Seschrock /* 23231544Seschrock * We may have finished replacing a device. 23241544Seschrock * Let the async thread assess this and handle the detach. 23251544Seschrock */ 23261544Seschrock spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 2327789Sahrens 2328789Sahrens /* 2329789Sahrens * If we were told to restart, our final act is to start a new scrub. 2330789Sahrens */ 2331789Sahrens if (error == ERESTART) 23321544Seschrock spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 23331544Seschrock SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 2334789Sahrens 23351544Seschrock spa->spa_scrub_type = POOL_SCRUB_NONE; 23361544Seschrock spa->spa_scrub_active = 0; 23371544Seschrock spa->spa_scrub_thread = NULL; 23381544Seschrock cv_broadcast(&spa->spa_scrub_cv); 2339789Sahrens CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 2340789Sahrens thread_exit(); 2341789Sahrens } 2342789Sahrens 2343789Sahrens void 2344789Sahrens spa_scrub_suspend(spa_t *spa) 2345789Sahrens { 2346789Sahrens mutex_enter(&spa->spa_scrub_lock); 23471544Seschrock spa->spa_scrub_suspended++; 2348789Sahrens while (spa->spa_scrub_active) { 2349789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2350789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2351789Sahrens } 2352789Sahrens while (spa->spa_scrub_inflight) 2353789Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2354789Sahrens mutex_exit(&spa->spa_scrub_lock); 2355789Sahrens } 2356789Sahrens 2357789Sahrens void 2358789Sahrens spa_scrub_resume(spa_t *spa) 2359789Sahrens { 2360789Sahrens mutex_enter(&spa->spa_scrub_lock); 23611544Seschrock ASSERT(spa->spa_scrub_suspended != 0); 23621544Seschrock if (--spa->spa_scrub_suspended == 0) 2363789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2364789Sahrens mutex_exit(&spa->spa_scrub_lock); 2365789Sahrens } 2366789Sahrens 2367789Sahrens void 2368789Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg) 2369789Sahrens { 2370789Sahrens /* 2371789Sahrens * Something happened (e.g. snapshot create/delete) that means 2372789Sahrens * we must restart any in-progress scrubs. The itinerary will 2373789Sahrens * fix this properly. 2374789Sahrens */ 2375789Sahrens mutex_enter(&spa->spa_scrub_lock); 2376789Sahrens spa->spa_scrub_restart_txg = txg; 2377789Sahrens mutex_exit(&spa->spa_scrub_lock); 2378789Sahrens } 2379789Sahrens 23801544Seschrock int 23811544Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 2382789Sahrens { 2383789Sahrens space_seg_t *ss; 2384789Sahrens uint64_t mintxg, maxtxg; 2385789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2386789Sahrens 2387789Sahrens if ((uint_t)type >= POOL_SCRUB_TYPES) 2388789Sahrens return (ENOTSUP); 2389789Sahrens 23901544Seschrock mutex_enter(&spa->spa_scrub_lock); 23911544Seschrock 2392789Sahrens /* 2393789Sahrens * If there's a scrub or resilver already in progress, stop it. 2394789Sahrens */ 2395789Sahrens while (spa->spa_scrub_thread != NULL) { 2396789Sahrens /* 2397789Sahrens * Don't stop a resilver unless forced. 2398789Sahrens */ 23991544Seschrock if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 24001544Seschrock mutex_exit(&spa->spa_scrub_lock); 2401789Sahrens return (EBUSY); 24021544Seschrock } 2403789Sahrens spa->spa_scrub_stop = 1; 2404789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2405789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2406789Sahrens } 2407789Sahrens 2408789Sahrens /* 2409789Sahrens * Terminate the previous traverse. 2410789Sahrens */ 2411789Sahrens if (spa->spa_scrub_th != NULL) { 2412789Sahrens traverse_fini(spa->spa_scrub_th); 2413789Sahrens spa->spa_scrub_th = NULL; 2414789Sahrens } 2415789Sahrens 24161544Seschrock if (rvd == NULL) { 24171544Seschrock ASSERT(spa->spa_scrub_stop == 0); 24181544Seschrock ASSERT(spa->spa_scrub_type == type); 24191544Seschrock ASSERT(spa->spa_scrub_restart_txg == 0); 24201544Seschrock mutex_exit(&spa->spa_scrub_lock); 24211544Seschrock return (0); 24221544Seschrock } 2423789Sahrens 2424789Sahrens mintxg = TXG_INITIAL - 1; 2425789Sahrens maxtxg = spa_last_synced_txg(spa) + 1; 2426789Sahrens 24271544Seschrock mutex_enter(&rvd->vdev_dtl_lock); 2428789Sahrens 24291544Seschrock if (rvd->vdev_dtl_map.sm_space == 0) { 24301544Seschrock /* 24311544Seschrock * The pool-wide DTL is empty. 24321732Sbonwick * If this is a resilver, there's nothing to do except 24331732Sbonwick * check whether any in-progress replacements have completed. 24341544Seschrock */ 24351732Sbonwick if (type == POOL_SCRUB_RESILVER) { 24361544Seschrock type = POOL_SCRUB_NONE; 24371732Sbonwick spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 24381732Sbonwick } 24391544Seschrock } else { 24401544Seschrock /* 24411544Seschrock * The pool-wide DTL is non-empty. 24421544Seschrock * If this is a normal scrub, upgrade to a resilver instead. 24431544Seschrock */ 24441544Seschrock if (type == POOL_SCRUB_EVERYTHING) 24451544Seschrock type = POOL_SCRUB_RESILVER; 24461544Seschrock } 2447789Sahrens 24481544Seschrock if (type == POOL_SCRUB_RESILVER) { 2449789Sahrens /* 2450789Sahrens * Determine the resilvering boundaries. 2451789Sahrens * 2452789Sahrens * Note: (mintxg, maxtxg) is an open interval, 2453789Sahrens * i.e. mintxg and maxtxg themselves are not included. 2454789Sahrens * 2455789Sahrens * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 2456789Sahrens * so we don't claim to resilver a txg that's still changing. 2457789Sahrens */ 2458789Sahrens ss = avl_first(&rvd->vdev_dtl_map.sm_root); 24591544Seschrock mintxg = ss->ss_start - 1; 2460789Sahrens ss = avl_last(&rvd->vdev_dtl_map.sm_root); 24611544Seschrock maxtxg = MIN(ss->ss_end, maxtxg); 2462789Sahrens } 2463789Sahrens 24641544Seschrock mutex_exit(&rvd->vdev_dtl_lock); 24651544Seschrock 24661544Seschrock spa->spa_scrub_stop = 0; 24671544Seschrock spa->spa_scrub_type = type; 24681544Seschrock spa->spa_scrub_restart_txg = 0; 24691544Seschrock 24701544Seschrock if (type != POOL_SCRUB_NONE) { 24711544Seschrock spa->spa_scrub_mintxg = mintxg; 2472789Sahrens spa->spa_scrub_maxtxg = maxtxg; 2473789Sahrens spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 24741635Sbonwick ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 24751635Sbonwick ZIO_FLAG_CANFAIL); 2476789Sahrens traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 2477789Sahrens spa->spa_scrub_thread = thread_create(NULL, 0, 2478789Sahrens spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 2479789Sahrens } 2480789Sahrens 24811544Seschrock mutex_exit(&spa->spa_scrub_lock); 24821544Seschrock 2483789Sahrens return (0); 2484789Sahrens } 2485789Sahrens 24861544Seschrock /* 24871544Seschrock * ========================================================================== 24881544Seschrock * SPA async task processing 24891544Seschrock * ========================================================================== 24901544Seschrock */ 24911544Seschrock 24921544Seschrock static void 24931544Seschrock spa_async_reopen(spa_t *spa) 2494789Sahrens { 24951544Seschrock vdev_t *rvd = spa->spa_root_vdev; 24961544Seschrock vdev_t *tvd; 24971544Seschrock int c; 24981544Seschrock 24991544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 25001544Seschrock 25011544Seschrock for (c = 0; c < rvd->vdev_children; c++) { 25021544Seschrock tvd = rvd->vdev_child[c]; 25031544Seschrock if (tvd->vdev_reopen_wanted) { 25041544Seschrock tvd->vdev_reopen_wanted = 0; 25051544Seschrock vdev_reopen(tvd); 25061544Seschrock } 25071544Seschrock } 2508789Sahrens 25091544Seschrock spa_config_exit(spa, FTAG); 25101544Seschrock } 25111544Seschrock 25121544Seschrock static void 25131544Seschrock spa_async_thread(spa_t *spa) 25141544Seschrock { 25151544Seschrock int tasks; 25161544Seschrock 25171544Seschrock ASSERT(spa->spa_sync_on); 2518789Sahrens 25191544Seschrock mutex_enter(&spa->spa_async_lock); 25201544Seschrock tasks = spa->spa_async_tasks; 25211544Seschrock spa->spa_async_tasks = 0; 25221544Seschrock mutex_exit(&spa->spa_async_lock); 25231544Seschrock 25241544Seschrock /* 25251635Sbonwick * See if the config needs to be updated. 25261635Sbonwick */ 25271635Sbonwick if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 25281635Sbonwick mutex_enter(&spa_namespace_lock); 25291635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 25301635Sbonwick mutex_exit(&spa_namespace_lock); 25311635Sbonwick } 25321635Sbonwick 25331635Sbonwick /* 25341544Seschrock * See if any devices need to be reopened. 25351544Seschrock */ 25361544Seschrock if (tasks & SPA_ASYNC_REOPEN) 25371544Seschrock spa_async_reopen(spa); 25381544Seschrock 25391544Seschrock /* 25401544Seschrock * If any devices are done replacing, detach them. 25411544Seschrock */ 25421544Seschrock if (tasks & SPA_ASYNC_REPLACE_DONE) 2543789Sahrens spa_vdev_replace_done(spa); 2544789Sahrens 25451544Seschrock /* 25461544Seschrock * Kick off a scrub. 25471544Seschrock */ 25481544Seschrock if (tasks & SPA_ASYNC_SCRUB) 25491544Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 25501544Seschrock 25511544Seschrock /* 25521544Seschrock * Kick off a resilver. 25531544Seschrock */ 25541544Seschrock if (tasks & SPA_ASYNC_RESILVER) 25551544Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 25561544Seschrock 25571544Seschrock /* 25581544Seschrock * Let the world know that we're done. 25591544Seschrock */ 25601544Seschrock mutex_enter(&spa->spa_async_lock); 25611544Seschrock spa->spa_async_thread = NULL; 25621544Seschrock cv_broadcast(&spa->spa_async_cv); 25631544Seschrock mutex_exit(&spa->spa_async_lock); 25641544Seschrock thread_exit(); 25651544Seschrock } 25661544Seschrock 25671544Seschrock void 25681544Seschrock spa_async_suspend(spa_t *spa) 25691544Seschrock { 25701544Seschrock mutex_enter(&spa->spa_async_lock); 25711544Seschrock spa->spa_async_suspended++; 25721544Seschrock while (spa->spa_async_thread != NULL) 25731544Seschrock cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 25741544Seschrock mutex_exit(&spa->spa_async_lock); 25751544Seschrock } 25761544Seschrock 25771544Seschrock void 25781544Seschrock spa_async_resume(spa_t *spa) 25791544Seschrock { 25801544Seschrock mutex_enter(&spa->spa_async_lock); 25811544Seschrock ASSERT(spa->spa_async_suspended != 0); 25821544Seschrock spa->spa_async_suspended--; 25831544Seschrock mutex_exit(&spa->spa_async_lock); 25841544Seschrock } 25851544Seschrock 25861544Seschrock static void 25871544Seschrock spa_async_dispatch(spa_t *spa) 25881544Seschrock { 25891544Seschrock mutex_enter(&spa->spa_async_lock); 25901544Seschrock if (spa->spa_async_tasks && !spa->spa_async_suspended && 25911635Sbonwick spa->spa_async_thread == NULL && 25921635Sbonwick rootdir != NULL && !vn_is_readonly(rootdir)) 25931544Seschrock spa->spa_async_thread = thread_create(NULL, 0, 25941544Seschrock spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 25951544Seschrock mutex_exit(&spa->spa_async_lock); 25961544Seschrock } 25971544Seschrock 25981544Seschrock void 25991544Seschrock spa_async_request(spa_t *spa, int task) 26001544Seschrock { 26011544Seschrock mutex_enter(&spa->spa_async_lock); 26021544Seschrock spa->spa_async_tasks |= task; 26031544Seschrock mutex_exit(&spa->spa_async_lock); 2604789Sahrens } 2605789Sahrens 2606789Sahrens /* 2607789Sahrens * ========================================================================== 2608789Sahrens * SPA syncing routines 2609789Sahrens * ========================================================================== 2610789Sahrens */ 2611789Sahrens 2612789Sahrens static void 2613789Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 2614789Sahrens { 2615789Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 2616789Sahrens dmu_tx_t *tx; 2617789Sahrens blkptr_t blk; 2618789Sahrens uint64_t itor = 0; 2619789Sahrens zio_t *zio; 2620789Sahrens int error; 2621789Sahrens uint8_t c = 1; 2622789Sahrens 2623789Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 2624789Sahrens 2625789Sahrens while (bplist_iterate(bpl, &itor, &blk) == 0) 2626789Sahrens zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 2627789Sahrens 2628789Sahrens error = zio_wait(zio); 2629789Sahrens ASSERT3U(error, ==, 0); 2630789Sahrens 2631789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2632789Sahrens bplist_vacate(bpl, tx); 2633789Sahrens 2634789Sahrens /* 2635789Sahrens * Pre-dirty the first block so we sync to convergence faster. 2636789Sahrens * (Usually only the first block is needed.) 2637789Sahrens */ 2638789Sahrens dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 2639789Sahrens dmu_tx_commit(tx); 2640789Sahrens } 2641789Sahrens 2642789Sahrens static void 26432082Seschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 26442082Seschrock { 26452082Seschrock char *packed = NULL; 26462082Seschrock size_t nvsize = 0; 26472082Seschrock dmu_buf_t *db; 26482082Seschrock 26492082Seschrock VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 26502082Seschrock 26512082Seschrock packed = kmem_alloc(nvsize, KM_SLEEP); 26522082Seschrock 26532082Seschrock VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 26542082Seschrock KM_SLEEP) == 0); 26552082Seschrock 26562082Seschrock dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 26572082Seschrock 26582082Seschrock kmem_free(packed, nvsize); 26592082Seschrock 26602082Seschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 26612082Seschrock dmu_buf_will_dirty(db, tx); 26622082Seschrock *(uint64_t *)db->db_data = nvsize; 26632082Seschrock dmu_buf_rele(db, FTAG); 26642082Seschrock } 26652082Seschrock 26662082Seschrock static void 26672082Seschrock spa_sync_spares(spa_t *spa, dmu_tx_t *tx) 26682082Seschrock { 26692082Seschrock nvlist_t *nvroot; 26702082Seschrock nvlist_t **spares; 26712082Seschrock int i; 26722082Seschrock 26732082Seschrock if (!spa->spa_sync_spares) 26742082Seschrock return; 26752082Seschrock 26762082Seschrock /* 26772082Seschrock * Update the MOS nvlist describing the list of available spares. 26782082Seschrock * spa_validate_spares() will have already made sure this nvlist is 26792082Seschrock * valid and the vdevs are labelled appropriately. 26802082Seschrock */ 26812082Seschrock if (spa->spa_spares_object == 0) { 26822082Seschrock spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset, 26832082Seschrock DMU_OT_PACKED_NVLIST, 1 << 14, 26842082Seschrock DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 26852082Seschrock VERIFY(zap_update(spa->spa_meta_objset, 26862082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES, 26872082Seschrock sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0); 26882082Seschrock } 26892082Seschrock 26902082Seschrock VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 26912082Seschrock if (spa->spa_nspares == 0) { 26922082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 26932082Seschrock NULL, 0) == 0); 26942082Seschrock } else { 26952082Seschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), 26962082Seschrock KM_SLEEP); 26972082Seschrock for (i = 0; i < spa->spa_nspares; i++) 26982082Seschrock spares[i] = vdev_config_generate(spa, 26992082Seschrock spa->spa_spares[i], B_FALSE, B_TRUE); 27002082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 27012082Seschrock spares, spa->spa_nspares) == 0); 27022082Seschrock for (i = 0; i < spa->spa_nspares; i++) 27032082Seschrock nvlist_free(spares[i]); 27042082Seschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 27052082Seschrock } 27062082Seschrock 27072082Seschrock spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx); 27082082Seschrock 27092082Seschrock spa->spa_sync_spares = B_FALSE; 27102082Seschrock } 27112082Seschrock 27122082Seschrock static void 2713789Sahrens spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 2714789Sahrens { 2715789Sahrens nvlist_t *config; 2716789Sahrens 2717789Sahrens if (list_is_empty(&spa->spa_dirty_list)) 2718789Sahrens return; 2719789Sahrens 2720789Sahrens config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 2721789Sahrens 27221635Sbonwick if (spa->spa_config_syncing) 27231635Sbonwick nvlist_free(spa->spa_config_syncing); 27241635Sbonwick spa->spa_config_syncing = config; 2725789Sahrens 27262082Seschrock spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 2727789Sahrens } 2728789Sahrens 2729789Sahrens /* 2730789Sahrens * Sync the specified transaction group. New blocks may be dirtied as 2731789Sahrens * part of the process, so we iterate until it converges. 2732789Sahrens */ 2733789Sahrens void 2734789Sahrens spa_sync(spa_t *spa, uint64_t txg) 2735789Sahrens { 2736789Sahrens dsl_pool_t *dp = spa->spa_dsl_pool; 2737789Sahrens objset_t *mos = spa->spa_meta_objset; 2738789Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 27391635Sbonwick vdev_t *rvd = spa->spa_root_vdev; 2740789Sahrens vdev_t *vd; 2741789Sahrens dmu_tx_t *tx; 2742789Sahrens int dirty_vdevs; 2743789Sahrens 2744789Sahrens /* 2745789Sahrens * Lock out configuration changes. 2746789Sahrens */ 27471544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2748789Sahrens 2749789Sahrens spa->spa_syncing_txg = txg; 2750789Sahrens spa->spa_sync_pass = 0; 2751789Sahrens 27521544Seschrock VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 2753789Sahrens 27542082Seschrock tx = dmu_tx_create_assigned(dp, txg); 27552082Seschrock 27562082Seschrock /* 27572082Seschrock * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg, 27582082Seschrock * set spa_deflate if we have no raid-z vdevs. 27592082Seschrock */ 27602082Seschrock if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE && 27612082Seschrock spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) { 27622082Seschrock int i; 27632082Seschrock 27642082Seschrock for (i = 0; i < rvd->vdev_children; i++) { 27652082Seschrock vd = rvd->vdev_child[i]; 27662082Seschrock if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 27672082Seschrock break; 27682082Seschrock } 27692082Seschrock if (i == rvd->vdev_children) { 27702082Seschrock spa->spa_deflate = TRUE; 27712082Seschrock VERIFY(0 == zap_add(spa->spa_meta_objset, 27722082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 27732082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 27742082Seschrock } 27752082Seschrock } 27762082Seschrock 2777789Sahrens /* 2778789Sahrens * If anything has changed in this txg, push the deferred frees 2779789Sahrens * from the previous txg. If not, leave them alone so that we 2780789Sahrens * don't generate work on an otherwise idle system. 2781789Sahrens */ 2782789Sahrens if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 2783789Sahrens !txg_list_empty(&dp->dp_dirty_dirs, txg)) 2784789Sahrens spa_sync_deferred_frees(spa, txg); 2785789Sahrens 2786789Sahrens /* 2787789Sahrens * Iterate to convergence. 2788789Sahrens */ 2789789Sahrens do { 2790789Sahrens spa->spa_sync_pass++; 2791789Sahrens 2792789Sahrens spa_sync_config_object(spa, tx); 27932082Seschrock spa_sync_spares(spa, tx); 27941544Seschrock spa_errlog_sync(spa, txg); 2795789Sahrens dsl_pool_sync(dp, txg); 2796789Sahrens 2797789Sahrens dirty_vdevs = 0; 2798789Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 2799789Sahrens vdev_sync(vd, txg); 2800789Sahrens dirty_vdevs++; 2801789Sahrens } 2802789Sahrens 2803789Sahrens bplist_sync(bpl, tx); 2804789Sahrens } while (dirty_vdevs); 2805789Sahrens 2806789Sahrens bplist_close(bpl); 2807789Sahrens 2808789Sahrens dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 2809789Sahrens 2810789Sahrens /* 2811789Sahrens * Rewrite the vdev configuration (which includes the uberblock) 2812789Sahrens * to commit the transaction group. 28131635Sbonwick * 28141635Sbonwick * If there are any dirty vdevs, sync the uberblock to all vdevs. 28151635Sbonwick * Otherwise, pick a random top-level vdev that's known to be 28161635Sbonwick * visible in the config cache (see spa_vdev_add() for details). 28171635Sbonwick * If the write fails, try the next vdev until we're tried them all. 2818789Sahrens */ 28191635Sbonwick if (!list_is_empty(&spa->spa_dirty_list)) { 28201635Sbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 28211635Sbonwick } else { 28221635Sbonwick int children = rvd->vdev_children; 28231635Sbonwick int c0 = spa_get_random(children); 28241635Sbonwick int c; 28251635Sbonwick 28261635Sbonwick for (c = 0; c < children; c++) { 28271635Sbonwick vd = rvd->vdev_child[(c0 + c) % children]; 28281635Sbonwick if (vd->vdev_ms_array == 0) 28291635Sbonwick continue; 28301635Sbonwick if (vdev_config_sync(vd, txg) == 0) 28311635Sbonwick break; 28321635Sbonwick } 28331635Sbonwick if (c == children) 28341635Sbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 28351635Sbonwick } 28361635Sbonwick 28372082Seschrock dmu_tx_commit(tx); 28382082Seschrock 28391635Sbonwick /* 28401635Sbonwick * Clear the dirty config list. 28411635Sbonwick */ 28421635Sbonwick while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 28431635Sbonwick vdev_config_clean(vd); 28441635Sbonwick 28451635Sbonwick /* 28461635Sbonwick * Now that the new config has synced transactionally, 28471635Sbonwick * let it become visible to the config cache. 28481635Sbonwick */ 28491635Sbonwick if (spa->spa_config_syncing != NULL) { 28501635Sbonwick spa_config_set(spa, spa->spa_config_syncing); 28511635Sbonwick spa->spa_config_txg = txg; 28521635Sbonwick spa->spa_config_syncing = NULL; 28531635Sbonwick } 2854789Sahrens 2855789Sahrens /* 2856789Sahrens * Make a stable copy of the fully synced uberblock. 2857789Sahrens * We use this as the root for pool traversals. 2858789Sahrens */ 2859789Sahrens spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 2860789Sahrens 2861789Sahrens spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 2862789Sahrens 2863789Sahrens rw_enter(&spa->spa_traverse_lock, RW_WRITER); 2864789Sahrens spa->spa_traverse_wanted = 0; 2865789Sahrens spa->spa_ubsync = spa->spa_uberblock; 2866789Sahrens rw_exit(&spa->spa_traverse_lock); 2867789Sahrens 2868789Sahrens spa_scrub_resume(spa); /* resume scrub with new ubsync */ 2869789Sahrens 2870789Sahrens /* 2871789Sahrens * Clean up the ZIL records for the synced txg. 2872789Sahrens */ 2873789Sahrens dsl_pool_zil_clean(dp); 2874789Sahrens 2875789Sahrens /* 2876789Sahrens * Update usable space statistics. 2877789Sahrens */ 2878789Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 2879789Sahrens vdev_sync_done(vd, txg); 2880789Sahrens 2881789Sahrens /* 2882789Sahrens * It had better be the case that we didn't dirty anything 28832082Seschrock * since vdev_config_sync(). 2884789Sahrens */ 2885789Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 2886789Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 2887789Sahrens ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 2888789Sahrens ASSERT(bpl->bpl_queue == NULL); 2889789Sahrens 28901544Seschrock spa_config_exit(spa, FTAG); 28911544Seschrock 28921544Seschrock /* 28931544Seschrock * If any async tasks have been requested, kick them off. 28941544Seschrock */ 28951544Seschrock spa_async_dispatch(spa); 2896789Sahrens } 2897789Sahrens 2898789Sahrens /* 2899789Sahrens * Sync all pools. We don't want to hold the namespace lock across these 2900789Sahrens * operations, so we take a reference on the spa_t and drop the lock during the 2901789Sahrens * sync. 2902789Sahrens */ 2903789Sahrens void 2904789Sahrens spa_sync_allpools(void) 2905789Sahrens { 2906789Sahrens spa_t *spa = NULL; 2907789Sahrens mutex_enter(&spa_namespace_lock); 2908789Sahrens while ((spa = spa_next(spa)) != NULL) { 2909789Sahrens if (spa_state(spa) != POOL_STATE_ACTIVE) 2910789Sahrens continue; 2911789Sahrens spa_open_ref(spa, FTAG); 2912789Sahrens mutex_exit(&spa_namespace_lock); 2913789Sahrens txg_wait_synced(spa_get_dsl(spa), 0); 2914789Sahrens mutex_enter(&spa_namespace_lock); 2915789Sahrens spa_close(spa, FTAG); 2916789Sahrens } 2917789Sahrens mutex_exit(&spa_namespace_lock); 2918789Sahrens } 2919789Sahrens 2920789Sahrens /* 2921789Sahrens * ========================================================================== 2922789Sahrens * Miscellaneous routines 2923789Sahrens * ========================================================================== 2924789Sahrens */ 2925789Sahrens 2926789Sahrens /* 2927789Sahrens * Remove all pools in the system. 2928789Sahrens */ 2929789Sahrens void 2930789Sahrens spa_evict_all(void) 2931789Sahrens { 2932789Sahrens spa_t *spa; 2933789Sahrens 2934789Sahrens /* 2935789Sahrens * Remove all cached state. All pools should be closed now, 2936789Sahrens * so every spa in the AVL tree should be unreferenced. 2937789Sahrens */ 2938789Sahrens mutex_enter(&spa_namespace_lock); 2939789Sahrens while ((spa = spa_next(NULL)) != NULL) { 2940789Sahrens /* 29411544Seschrock * Stop async tasks. The async thread may need to detach 29421544Seschrock * a device that's been replaced, which requires grabbing 29431544Seschrock * spa_namespace_lock, so we must drop it here. 2944789Sahrens */ 2945789Sahrens spa_open_ref(spa, FTAG); 2946789Sahrens mutex_exit(&spa_namespace_lock); 29471544Seschrock spa_async_suspend(spa); 2948789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 2949789Sahrens mutex_enter(&spa_namespace_lock); 2950789Sahrens spa_close(spa, FTAG); 2951789Sahrens 2952789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2953789Sahrens spa_unload(spa); 2954789Sahrens spa_deactivate(spa); 2955789Sahrens } 2956789Sahrens spa_remove(spa); 2957789Sahrens } 2958789Sahrens mutex_exit(&spa_namespace_lock); 2959789Sahrens } 29601544Seschrock 29611544Seschrock vdev_t * 29621544Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid) 29631544Seschrock { 29641544Seschrock return (vdev_lookup_by_guid(spa->spa_root_vdev, guid)); 29651544Seschrock } 29661760Seschrock 29671760Seschrock void 29681760Seschrock spa_upgrade(spa_t *spa) 29691760Seschrock { 29701760Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 29711760Seschrock 29721760Seschrock /* 29731760Seschrock * This should only be called for a non-faulted pool, and since a 29741760Seschrock * future version would result in an unopenable pool, this shouldn't be 29751760Seschrock * possible. 29761760Seschrock */ 29771760Seschrock ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION); 29781760Seschrock 29791760Seschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 29801760Seschrock vdev_config_dirty(spa->spa_root_vdev); 29811760Seschrock 29821760Seschrock spa_config_exit(spa, FTAG); 29832082Seschrock 29842082Seschrock txg_wait_synced(spa_get_dsl(spa), 0); 29851760Seschrock } 29862082Seschrock 29872082Seschrock boolean_t 29882082Seschrock spa_has_spare(spa_t *spa, uint64_t guid) 29892082Seschrock { 29902082Seschrock int i; 29912082Seschrock 29922082Seschrock for (i = 0; i < spa->spa_nspares; i++) 29932082Seschrock if (spa->spa_spares[i]->vdev_guid == guid) 29942082Seschrock return (B_TRUE); 29952082Seschrock 29962082Seschrock return (B_FALSE); 29972082Seschrock } 2998