1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51544Seschrock * Common Development and Distribution License (the "License"). 61544Seschrock * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 231354Seschrock * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28789Sahrens 29789Sahrens /* 30789Sahrens * This file contains all the routines used when modifying on-disk SPA state. 31789Sahrens * This includes opening, importing, destroying, exporting a pool, and syncing a 32789Sahrens * pool. 33789Sahrens */ 34789Sahrens 35789Sahrens #include <sys/zfs_context.h> 361544Seschrock #include <sys/fm/fs/zfs.h> 37789Sahrens #include <sys/spa_impl.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zio_checksum.h> 40789Sahrens #include <sys/zio_compress.h> 41789Sahrens #include <sys/dmu.h> 42789Sahrens #include <sys/dmu_tx.h> 43789Sahrens #include <sys/zap.h> 44789Sahrens #include <sys/zil.h> 45789Sahrens #include <sys/vdev_impl.h> 46789Sahrens #include <sys/metaslab.h> 47789Sahrens #include <sys/uberblock_impl.h> 48789Sahrens #include <sys/txg.h> 49789Sahrens #include <sys/avl.h> 50789Sahrens #include <sys/dmu_traverse.h> 51789Sahrens #include <sys/unique.h> 52789Sahrens #include <sys/dsl_pool.h> 53789Sahrens #include <sys/dsl_dir.h> 54789Sahrens #include <sys/dsl_prop.h> 55789Sahrens #include <sys/fs/zfs.h> 56789Sahrens #include <sys/callb.h> 57789Sahrens 58789Sahrens /* 59789Sahrens * ========================================================================== 60789Sahrens * SPA state manipulation (open/create/destroy/import/export) 61789Sahrens * ========================================================================== 62789Sahrens */ 63789Sahrens 641544Seschrock static int 651544Seschrock spa_error_entry_compare(const void *a, const void *b) 661544Seschrock { 671544Seschrock spa_error_entry_t *sa = (spa_error_entry_t *)a; 681544Seschrock spa_error_entry_t *sb = (spa_error_entry_t *)b; 691544Seschrock int ret; 701544Seschrock 711544Seschrock ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 721544Seschrock sizeof (zbookmark_t)); 731544Seschrock 741544Seschrock if (ret < 0) 751544Seschrock return (-1); 761544Seschrock else if (ret > 0) 771544Seschrock return (1); 781544Seschrock else 791544Seschrock return (0); 801544Seschrock } 811544Seschrock 821544Seschrock /* 831544Seschrock * Utility function which retrieves copies of the current logs and 841544Seschrock * re-initializes them in the process. 851544Seschrock */ 861544Seschrock void 871544Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 881544Seschrock { 891544Seschrock ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 901544Seschrock 911544Seschrock bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 921544Seschrock bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 931544Seschrock 941544Seschrock avl_create(&spa->spa_errlist_scrub, 951544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 961544Seschrock offsetof(spa_error_entry_t, se_avl)); 971544Seschrock avl_create(&spa->spa_errlist_last, 981544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 991544Seschrock offsetof(spa_error_entry_t, se_avl)); 1001544Seschrock } 1011544Seschrock 102789Sahrens /* 103789Sahrens * Activate an uninitialized pool. 104789Sahrens */ 105789Sahrens static void 106789Sahrens spa_activate(spa_t *spa) 107789Sahrens { 108789Sahrens int t; 109789Sahrens 110789Sahrens ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 111789Sahrens 112789Sahrens spa->spa_state = POOL_STATE_ACTIVE; 113789Sahrens 114789Sahrens spa->spa_normal_class = metaslab_class_create(); 115789Sahrens 116789Sahrens for (t = 0; t < ZIO_TYPES; t++) { 117789Sahrens spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 118789Sahrens 8, maxclsyspri, 50, INT_MAX, 119789Sahrens TASKQ_PREPOPULATE); 120789Sahrens spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 121789Sahrens 8, maxclsyspri, 50, INT_MAX, 122789Sahrens TASKQ_PREPOPULATE); 123789Sahrens } 124789Sahrens 125789Sahrens rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); 126789Sahrens 127789Sahrens list_create(&spa->spa_dirty_list, sizeof (vdev_t), 128789Sahrens offsetof(vdev_t, vdev_dirty_node)); 129789Sahrens 130789Sahrens txg_list_create(&spa->spa_vdev_txg_list, 131789Sahrens offsetof(struct vdev, vdev_txg_node)); 1321544Seschrock 1331544Seschrock avl_create(&spa->spa_errlist_scrub, 1341544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1351544Seschrock offsetof(spa_error_entry_t, se_avl)); 1361544Seschrock avl_create(&spa->spa_errlist_last, 1371544Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1381544Seschrock offsetof(spa_error_entry_t, se_avl)); 139789Sahrens } 140789Sahrens 141789Sahrens /* 142789Sahrens * Opposite of spa_activate(). 143789Sahrens */ 144789Sahrens static void 145789Sahrens spa_deactivate(spa_t *spa) 146789Sahrens { 147789Sahrens int t; 148789Sahrens 149789Sahrens ASSERT(spa->spa_sync_on == B_FALSE); 150789Sahrens ASSERT(spa->spa_dsl_pool == NULL); 151789Sahrens ASSERT(spa->spa_root_vdev == NULL); 152789Sahrens 153789Sahrens ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 154789Sahrens 155789Sahrens txg_list_destroy(&spa->spa_vdev_txg_list); 156789Sahrens 157789Sahrens list_destroy(&spa->spa_dirty_list); 158789Sahrens 159789Sahrens rw_destroy(&spa->spa_traverse_lock); 160789Sahrens 161789Sahrens for (t = 0; t < ZIO_TYPES; t++) { 162789Sahrens taskq_destroy(spa->spa_zio_issue_taskq[t]); 163789Sahrens taskq_destroy(spa->spa_zio_intr_taskq[t]); 164789Sahrens spa->spa_zio_issue_taskq[t] = NULL; 165789Sahrens spa->spa_zio_intr_taskq[t] = NULL; 166789Sahrens } 167789Sahrens 168789Sahrens metaslab_class_destroy(spa->spa_normal_class); 169789Sahrens spa->spa_normal_class = NULL; 170789Sahrens 1711544Seschrock /* 1721544Seschrock * If this was part of an import or the open otherwise failed, we may 1731544Seschrock * still have errors left in the queues. Empty them just in case. 1741544Seschrock */ 1751544Seschrock spa_errlog_drain(spa); 1761544Seschrock 1771544Seschrock avl_destroy(&spa->spa_errlist_scrub); 1781544Seschrock avl_destroy(&spa->spa_errlist_last); 1791544Seschrock 180789Sahrens spa->spa_state = POOL_STATE_UNINITIALIZED; 181789Sahrens } 182789Sahrens 183789Sahrens /* 184789Sahrens * Verify a pool configuration, and construct the vdev tree appropriately. This 185789Sahrens * will create all the necessary vdevs in the appropriate layout, with each vdev 186789Sahrens * in the CLOSED state. This will prep the pool before open/creation/import. 187789Sahrens * All vdev validation is done by the vdev_alloc() routine. 188789Sahrens */ 1892082Seschrock static int 1902082Seschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1912082Seschrock uint_t id, int atype) 192789Sahrens { 193789Sahrens nvlist_t **child; 194789Sahrens uint_t c, children; 1952082Seschrock int error; 1962082Seschrock 1972082Seschrock if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1982082Seschrock return (error); 1992082Seschrock 2002082Seschrock if ((*vdp)->vdev_ops->vdev_op_leaf) 2012082Seschrock return (0); 202789Sahrens 203789Sahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 204789Sahrens &child, &children) != 0) { 2052082Seschrock vdev_free(*vdp); 2062082Seschrock *vdp = NULL; 2072082Seschrock return (EINVAL); 208789Sahrens } 209789Sahrens 210789Sahrens for (c = 0; c < children; c++) { 2112082Seschrock vdev_t *vd; 2122082Seschrock if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 2132082Seschrock atype)) != 0) { 2142082Seschrock vdev_free(*vdp); 2152082Seschrock *vdp = NULL; 2162082Seschrock return (error); 217789Sahrens } 218789Sahrens } 219789Sahrens 2202082Seschrock ASSERT(*vdp != NULL); 2212082Seschrock 2222082Seschrock return (0); 223789Sahrens } 224789Sahrens 225789Sahrens /* 226789Sahrens * Opposite of spa_load(). 227789Sahrens */ 228789Sahrens static void 229789Sahrens spa_unload(spa_t *spa) 230789Sahrens { 2312082Seschrock int i; 2322082Seschrock 233789Sahrens /* 2341544Seschrock * Stop async tasks. 2351544Seschrock */ 2361544Seschrock spa_async_suspend(spa); 2371544Seschrock 2381544Seschrock /* 239789Sahrens * Stop syncing. 240789Sahrens */ 241789Sahrens if (spa->spa_sync_on) { 242789Sahrens txg_sync_stop(spa->spa_dsl_pool); 243789Sahrens spa->spa_sync_on = B_FALSE; 244789Sahrens } 245789Sahrens 246789Sahrens /* 247789Sahrens * Wait for any outstanding prefetch I/O to complete. 248789Sahrens */ 2491544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 2501544Seschrock spa_config_exit(spa, FTAG); 251789Sahrens 252789Sahrens /* 253789Sahrens * Close the dsl pool. 254789Sahrens */ 255789Sahrens if (spa->spa_dsl_pool) { 256789Sahrens dsl_pool_close(spa->spa_dsl_pool); 257789Sahrens spa->spa_dsl_pool = NULL; 258789Sahrens } 259789Sahrens 260789Sahrens /* 261789Sahrens * Close all vdevs. 262789Sahrens */ 2631585Sbonwick if (spa->spa_root_vdev) 264789Sahrens vdev_free(spa->spa_root_vdev); 2651585Sbonwick ASSERT(spa->spa_root_vdev == NULL); 2661544Seschrock 2672082Seschrock for (i = 0; i < spa->spa_nspares; i++) 2682082Seschrock vdev_free(spa->spa_spares[i]); 2692082Seschrock if (spa->spa_spares) { 2702082Seschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 2712082Seschrock spa->spa_spares = NULL; 2722082Seschrock } 2732082Seschrock if (spa->spa_sparelist) { 2742082Seschrock nvlist_free(spa->spa_sparelist); 2752082Seschrock spa->spa_sparelist = NULL; 2762082Seschrock } 2772082Seschrock 2781544Seschrock spa->spa_async_suspended = 0; 279789Sahrens } 280789Sahrens 281789Sahrens /* 2822082Seschrock * Load (or re-load) the current list of vdevs describing the active spares for 2832082Seschrock * this pool. When this is called, we have some form of basic information in 2842082Seschrock * 'spa_sparelist'. We parse this into vdevs, try to open them, and then 2852082Seschrock * re-generate a more complete list including status information. 2862082Seschrock */ 2872082Seschrock static void 2882082Seschrock spa_load_spares(spa_t *spa) 2892082Seschrock { 2902082Seschrock nvlist_t **spares; 2912082Seschrock uint_t nspares; 2922082Seschrock int i; 2932082Seschrock 2942082Seschrock /* 2952082Seschrock * First, close and free any existing spare vdevs. 2962082Seschrock */ 2972082Seschrock for (i = 0; i < spa->spa_nspares; i++) { 2982082Seschrock vdev_close(spa->spa_spares[i]); 2992082Seschrock vdev_free(spa->spa_spares[i]); 3002082Seschrock } 3012082Seschrock if (spa->spa_spares) 3022082Seschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 3032082Seschrock 3042082Seschrock if (spa->spa_sparelist == NULL) 3052082Seschrock nspares = 0; 3062082Seschrock else 3072082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 3082082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3092082Seschrock 3102082Seschrock spa->spa_nspares = (int)nspares; 3112082Seschrock spa->spa_spares = NULL; 3122082Seschrock 3132082Seschrock if (nspares == 0) 3142082Seschrock return; 3152082Seschrock 3162082Seschrock /* 3172082Seschrock * Construct the array of vdevs, opening them to get status in the 3182082Seschrock * process. 3192082Seschrock */ 3202082Seschrock spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP); 3212082Seschrock for (i = 0; i < spa->spa_nspares; i++) { 3222082Seschrock vdev_t *vd; 3232082Seschrock 3242082Seschrock VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 3252082Seschrock VDEV_ALLOC_SPARE) == 0); 3262082Seschrock ASSERT(vd != NULL); 3272082Seschrock 3282082Seschrock spa->spa_spares[i] = vd; 3292082Seschrock 3302082Seschrock if (vdev_open(vd) != 0) 3312082Seschrock continue; 3322082Seschrock 3332082Seschrock vd->vdev_top = vd; 3342082Seschrock (void) vdev_validate_spare(vd); 3352082Seschrock } 3362082Seschrock 3372082Seschrock /* 3382082Seschrock * Recompute the stashed list of spares, with status information 3392082Seschrock * this time. 3402082Seschrock */ 3412082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 3422082Seschrock DATA_TYPE_NVLIST_ARRAY) == 0); 3432082Seschrock 3442082Seschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP); 3452082Seschrock for (i = 0; i < spa->spa_nspares; i++) 3462082Seschrock spares[i] = vdev_config_generate(spa, spa->spa_spares[i], 3472082Seschrock B_TRUE, B_TRUE); 3482082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 3492082Seschrock spares, spa->spa_nspares) == 0); 3502082Seschrock for (i = 0; i < spa->spa_nspares; i++) 3512082Seschrock nvlist_free(spares[i]); 3522082Seschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 3532082Seschrock } 3542082Seschrock 3552082Seschrock static int 3562082Seschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 3572082Seschrock { 3582082Seschrock dmu_buf_t *db; 3592082Seschrock char *packed = NULL; 3602082Seschrock size_t nvsize = 0; 3612082Seschrock int error; 3622082Seschrock *value = NULL; 3632082Seschrock 3642082Seschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3652082Seschrock nvsize = *(uint64_t *)db->db_data; 3662082Seschrock dmu_buf_rele(db, FTAG); 3672082Seschrock 3682082Seschrock packed = kmem_alloc(nvsize, KM_SLEEP); 3692082Seschrock error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 3702082Seschrock if (error == 0) 3712082Seschrock error = nvlist_unpack(packed, nvsize, value, 0); 3722082Seschrock kmem_free(packed, nvsize); 3732082Seschrock 3742082Seschrock return (error); 3752082Seschrock } 3762082Seschrock 3772082Seschrock /* 378789Sahrens * Load an existing storage pool, using the pool's builtin spa_config as a 3791544Seschrock * source of configuration information. 380789Sahrens */ 381789Sahrens static int 3821544Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 383789Sahrens { 384789Sahrens int error = 0; 385789Sahrens nvlist_t *nvroot = NULL; 386789Sahrens vdev_t *rvd; 387789Sahrens uberblock_t *ub = &spa->spa_uberblock; 3881635Sbonwick uint64_t config_cache_txg = spa->spa_config_txg; 389789Sahrens uint64_t pool_guid; 3902082Seschrock uint64_t version; 391789Sahrens zio_t *zio; 392789Sahrens 3931544Seschrock spa->spa_load_state = state; 3941635Sbonwick 395789Sahrens if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 3961733Sbonwick nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 3971544Seschrock error = EINVAL; 3981544Seschrock goto out; 3991544Seschrock } 400789Sahrens 4012082Seschrock /* 4022082Seschrock * Versioning wasn't explicitly added to the label until later, so if 4032082Seschrock * it's not present treat it as the initial version. 4042082Seschrock */ 4052082Seschrock if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 4062082Seschrock version = ZFS_VERSION_INITIAL; 4072082Seschrock 4081733Sbonwick (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 4091733Sbonwick &spa->spa_config_txg); 4101733Sbonwick 4111635Sbonwick if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 4121544Seschrock spa_guid_exists(pool_guid, 0)) { 4131544Seschrock error = EEXIST; 4141544Seschrock goto out; 4151544Seschrock } 416789Sahrens 4172174Seschrock spa->spa_load_guid = pool_guid; 4182174Seschrock 419789Sahrens /* 4202082Seschrock * Parse the configuration into a vdev tree. We explicitly set the 4212082Seschrock * value that will be returned by spa_version() since parsing the 4222082Seschrock * configuration requires knowing the version number. 423789Sahrens */ 4241544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 4252082Seschrock spa->spa_ubsync.ub_version = version; 4262082Seschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 4271544Seschrock spa_config_exit(spa, FTAG); 428789Sahrens 4292082Seschrock if (error != 0) 4301544Seschrock goto out; 431789Sahrens 4321585Sbonwick ASSERT(spa->spa_root_vdev == rvd); 433789Sahrens ASSERT(spa_guid(spa) == pool_guid); 434789Sahrens 435789Sahrens /* 436789Sahrens * Try to open all vdevs, loading each label in the process. 437789Sahrens */ 4381544Seschrock if (vdev_open(rvd) != 0) { 4391544Seschrock error = ENXIO; 4401544Seschrock goto out; 4411544Seschrock } 442789Sahrens 443789Sahrens /* 4441986Seschrock * Validate the labels for all leaf vdevs. We need to grab the config 4451986Seschrock * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 4461986Seschrock * flag. 4471986Seschrock */ 4481986Seschrock spa_config_enter(spa, RW_READER, FTAG); 4491986Seschrock error = vdev_validate(rvd); 4501986Seschrock spa_config_exit(spa, FTAG); 4511986Seschrock 4521986Seschrock if (error != 0) { 4531986Seschrock error = EBADF; 4541986Seschrock goto out; 4551986Seschrock } 4561986Seschrock 4571986Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 4581986Seschrock error = ENXIO; 4591986Seschrock goto out; 4601986Seschrock } 4611986Seschrock 4621986Seschrock /* 463789Sahrens * Find the best uberblock. 464789Sahrens */ 465789Sahrens bzero(ub, sizeof (uberblock_t)); 466789Sahrens 467789Sahrens zio = zio_root(spa, NULL, NULL, 468789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 469789Sahrens vdev_uberblock_load(zio, rvd, ub); 470789Sahrens error = zio_wait(zio); 471789Sahrens 472789Sahrens /* 473789Sahrens * If we weren't able to find a single valid uberblock, return failure. 474789Sahrens */ 475789Sahrens if (ub->ub_txg == 0) { 4761760Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4771760Seschrock VDEV_AUX_CORRUPT_DATA); 4781544Seschrock error = ENXIO; 4791544Seschrock goto out; 4801544Seschrock } 4811544Seschrock 4821544Seschrock /* 4831544Seschrock * If the pool is newer than the code, we can't open it. 4841544Seschrock */ 4851760Seschrock if (ub->ub_version > ZFS_VERSION) { 4861760Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4871760Seschrock VDEV_AUX_VERSION_NEWER); 4881544Seschrock error = ENOTSUP; 4891544Seschrock goto out; 490789Sahrens } 491789Sahrens 492789Sahrens /* 493789Sahrens * If the vdev guid sum doesn't match the uberblock, we have an 494789Sahrens * incomplete configuration. 495789Sahrens */ 4961732Sbonwick if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 4971544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 4981544Seschrock VDEV_AUX_BAD_GUID_SUM); 4991544Seschrock error = ENXIO; 5001544Seschrock goto out; 501789Sahrens } 502789Sahrens 503789Sahrens /* 504789Sahrens * Initialize internal SPA structures. 505789Sahrens */ 506789Sahrens spa->spa_state = POOL_STATE_ACTIVE; 507789Sahrens spa->spa_ubsync = spa->spa_uberblock; 508789Sahrens spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 5091544Seschrock error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 5101544Seschrock if (error) { 5111544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5121544Seschrock VDEV_AUX_CORRUPT_DATA); 5131544Seschrock goto out; 5141544Seschrock } 515789Sahrens spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 516789Sahrens 5171544Seschrock if (zap_lookup(spa->spa_meta_objset, 518789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 5191544Seschrock sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 5201544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5211544Seschrock VDEV_AUX_CORRUPT_DATA); 5221544Seschrock error = EIO; 5231544Seschrock goto out; 5241544Seschrock } 525789Sahrens 526789Sahrens if (!mosconfig) { 5272082Seschrock nvlist_t *newconfig; 5282082Seschrock 5292082Seschrock if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 5301544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5311544Seschrock VDEV_AUX_CORRUPT_DATA); 5321544Seschrock error = EIO; 5331544Seschrock goto out; 5341544Seschrock } 535789Sahrens 536789Sahrens spa_config_set(spa, newconfig); 537789Sahrens spa_unload(spa); 538789Sahrens spa_deactivate(spa); 539789Sahrens spa_activate(spa); 540789Sahrens 5411544Seschrock return (spa_load(spa, newconfig, state, B_TRUE)); 5421544Seschrock } 5431544Seschrock 5441544Seschrock if (zap_lookup(spa->spa_meta_objset, 5451544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 5461544Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 5471544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5481544Seschrock VDEV_AUX_CORRUPT_DATA); 5491544Seschrock error = EIO; 5501544Seschrock goto out; 551789Sahrens } 552789Sahrens 5531544Seschrock /* 5542082Seschrock * Load the bit that tells us to use the new accounting function 5552082Seschrock * (raid-z deflation). If we have an older pool, this will not 5562082Seschrock * be present. 5572082Seschrock */ 5582082Seschrock error = zap_lookup(spa->spa_meta_objset, 5592082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 5602082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate); 5612082Seschrock if (error != 0 && error != ENOENT) { 5622082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5632082Seschrock VDEV_AUX_CORRUPT_DATA); 5642082Seschrock error = EIO; 5652082Seschrock goto out; 5662082Seschrock } 5672082Seschrock 5682082Seschrock /* 5691544Seschrock * Load the persistent error log. If we have an older pool, this will 5701544Seschrock * not be present. 5711544Seschrock */ 5721544Seschrock error = zap_lookup(spa->spa_meta_objset, 5731544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 5741544Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_last); 5751807Sbonwick if (error != 0 && error != ENOENT) { 5761544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5771544Seschrock VDEV_AUX_CORRUPT_DATA); 5781544Seschrock error = EIO; 5791544Seschrock goto out; 5801544Seschrock } 5811544Seschrock 5821544Seschrock error = zap_lookup(spa->spa_meta_objset, 5831544Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 5841544Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 5851544Seschrock if (error != 0 && error != ENOENT) { 5861544Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5871544Seschrock VDEV_AUX_CORRUPT_DATA); 5881544Seschrock error = EIO; 5891544Seschrock goto out; 5901544Seschrock } 591789Sahrens 592789Sahrens /* 5932082Seschrock * Load any hot spares for this pool. 5942082Seschrock */ 5952082Seschrock error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 5962082Seschrock DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object); 5972082Seschrock if (error != 0 && error != ENOENT) { 5982082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 5992082Seschrock VDEV_AUX_CORRUPT_DATA); 6002082Seschrock error = EIO; 6012082Seschrock goto out; 6022082Seschrock } 6032082Seschrock if (error == 0) { 6042082Seschrock ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES); 6052082Seschrock if (load_nvlist(spa, spa->spa_spares_object, 6062082Seschrock &spa->spa_sparelist) != 0) { 6072082Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 6082082Seschrock VDEV_AUX_CORRUPT_DATA); 6092082Seschrock error = EIO; 6102082Seschrock goto out; 6112082Seschrock } 6122082Seschrock 6132082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 6142082Seschrock spa_load_spares(spa); 6152082Seschrock spa_config_exit(spa, FTAG); 6162082Seschrock } 6172082Seschrock 6182082Seschrock /* 6191986Seschrock * Load the vdev state for all toplevel vdevs. 620789Sahrens */ 6211986Seschrock vdev_load(rvd); 622789Sahrens 623789Sahrens /* 624789Sahrens * Propagate the leaf DTLs we just loaded all the way up the tree. 625789Sahrens */ 6261544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 627789Sahrens vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 6281544Seschrock spa_config_exit(spa, FTAG); 629789Sahrens 630789Sahrens /* 631789Sahrens * Check the state of the root vdev. If it can't be opened, it 632789Sahrens * indicates one or more toplevel vdevs are faulted. 633789Sahrens */ 6341544Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 6351544Seschrock error = ENXIO; 6361544Seschrock goto out; 6371544Seschrock } 638789Sahrens 6391544Seschrock if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 6401635Sbonwick dmu_tx_t *tx; 6411635Sbonwick int need_update = B_FALSE; 6421585Sbonwick int c; 6431601Sbonwick 6441635Sbonwick /* 6451635Sbonwick * Claim log blocks that haven't been committed yet. 6461635Sbonwick * This must all happen in a single txg. 6471635Sbonwick */ 6481601Sbonwick tx = dmu_tx_create_assigned(spa_get_dsl(spa), 649789Sahrens spa_first_txg(spa)); 650*2417Sahrens (void) dmu_objset_find(spa->spa_name, 651*2417Sahrens zil_claim, tx, DS_FIND_CHILDREN); 652789Sahrens dmu_tx_commit(tx); 653789Sahrens 654789Sahrens spa->spa_sync_on = B_TRUE; 655789Sahrens txg_sync_start(spa->spa_dsl_pool); 656789Sahrens 657789Sahrens /* 658789Sahrens * Wait for all claims to sync. 659789Sahrens */ 660789Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 6611585Sbonwick 6621585Sbonwick /* 6631635Sbonwick * If the config cache is stale, or we have uninitialized 6641635Sbonwick * metaslabs (see spa_vdev_add()), then update the config. 6651585Sbonwick */ 6661635Sbonwick if (config_cache_txg != spa->spa_config_txg || 6671635Sbonwick state == SPA_LOAD_IMPORT) 6681635Sbonwick need_update = B_TRUE; 6691635Sbonwick 6701635Sbonwick for (c = 0; c < rvd->vdev_children; c++) 6711635Sbonwick if (rvd->vdev_child[c]->vdev_ms_array == 0) 6721635Sbonwick need_update = B_TRUE; 6731585Sbonwick 6741585Sbonwick /* 6751635Sbonwick * Update the config cache asychronously in case we're the 6761635Sbonwick * root pool, in which case the config cache isn't writable yet. 6771585Sbonwick */ 6781635Sbonwick if (need_update) 6791635Sbonwick spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 680789Sahrens } 681789Sahrens 6821544Seschrock error = 0; 6831544Seschrock out: 6842082Seschrock if (error && error != EBADF) 6851544Seschrock zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 6861544Seschrock spa->spa_load_state = SPA_LOAD_NONE; 6871544Seschrock spa->spa_ena = 0; 6881544Seschrock 6891544Seschrock return (error); 690789Sahrens } 691789Sahrens 692789Sahrens /* 693789Sahrens * Pool Open/Import 694789Sahrens * 695789Sahrens * The import case is identical to an open except that the configuration is sent 696789Sahrens * down from userland, instead of grabbed from the configuration cache. For the 697789Sahrens * case of an open, the pool configuration will exist in the 698789Sahrens * POOL_STATE_UNITIALIZED state. 699789Sahrens * 700789Sahrens * The stats information (gen/count/ustats) is used to gather vdev statistics at 701789Sahrens * the same time open the pool, without having to keep around the spa_t in some 702789Sahrens * ambiguous state. 703789Sahrens */ 704789Sahrens static int 705789Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 706789Sahrens { 707789Sahrens spa_t *spa; 708789Sahrens int error; 709789Sahrens int loaded = B_FALSE; 710789Sahrens int locked = B_FALSE; 711789Sahrens 712789Sahrens *spapp = NULL; 713789Sahrens 714789Sahrens /* 715789Sahrens * As disgusting as this is, we need to support recursive calls to this 716789Sahrens * function because dsl_dir_open() is called during spa_load(), and ends 717789Sahrens * up calling spa_open() again. The real fix is to figure out how to 718789Sahrens * avoid dsl_dir_open() calling this in the first place. 719789Sahrens */ 720789Sahrens if (mutex_owner(&spa_namespace_lock) != curthread) { 721789Sahrens mutex_enter(&spa_namespace_lock); 722789Sahrens locked = B_TRUE; 723789Sahrens } 724789Sahrens 725789Sahrens if ((spa = spa_lookup(pool)) == NULL) { 726789Sahrens if (locked) 727789Sahrens mutex_exit(&spa_namespace_lock); 728789Sahrens return (ENOENT); 729789Sahrens } 730789Sahrens if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 731789Sahrens 732789Sahrens spa_activate(spa); 733789Sahrens 7341635Sbonwick error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 735789Sahrens 736789Sahrens if (error == EBADF) { 737789Sahrens /* 7381986Seschrock * If vdev_validate() returns failure (indicated by 7391986Seschrock * EBADF), it indicates that one of the vdevs indicates 7401986Seschrock * that the pool has been exported or destroyed. If 7411986Seschrock * this is the case, the config cache is out of sync and 7421986Seschrock * we should remove the pool from the namespace. 743789Sahrens */ 7442082Seschrock zfs_post_ok(spa, NULL); 745789Sahrens spa_unload(spa); 746789Sahrens spa_deactivate(spa); 747789Sahrens spa_remove(spa); 748789Sahrens spa_config_sync(); 749789Sahrens if (locked) 750789Sahrens mutex_exit(&spa_namespace_lock); 751789Sahrens return (ENOENT); 7521544Seschrock } 7531544Seschrock 7541544Seschrock if (error) { 755789Sahrens /* 756789Sahrens * We can't open the pool, but we still have useful 757789Sahrens * information: the state of each vdev after the 758789Sahrens * attempted vdev_open(). Return this to the user. 759789Sahrens */ 7601635Sbonwick if (config != NULL && spa->spa_root_vdev != NULL) { 7611635Sbonwick spa_config_enter(spa, RW_READER, FTAG); 762789Sahrens *config = spa_config_generate(spa, NULL, -1ULL, 763789Sahrens B_TRUE); 7641635Sbonwick spa_config_exit(spa, FTAG); 7651635Sbonwick } 766789Sahrens spa_unload(spa); 767789Sahrens spa_deactivate(spa); 7681544Seschrock spa->spa_last_open_failed = B_TRUE; 769789Sahrens if (locked) 770789Sahrens mutex_exit(&spa_namespace_lock); 771789Sahrens *spapp = NULL; 772789Sahrens return (error); 7731544Seschrock } else { 7741544Seschrock zfs_post_ok(spa, NULL); 7751544Seschrock spa->spa_last_open_failed = B_FALSE; 776789Sahrens } 777789Sahrens 778789Sahrens loaded = B_TRUE; 779789Sahrens } 780789Sahrens 781789Sahrens spa_open_ref(spa, tag); 782789Sahrens if (locked) 783789Sahrens mutex_exit(&spa_namespace_lock); 784789Sahrens 785789Sahrens *spapp = spa; 786789Sahrens 787789Sahrens if (config != NULL) { 7881544Seschrock spa_config_enter(spa, RW_READER, FTAG); 789789Sahrens *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 7901544Seschrock spa_config_exit(spa, FTAG); 791789Sahrens } 792789Sahrens 793789Sahrens /* 794789Sahrens * If we just loaded the pool, resilver anything that's out of date. 795789Sahrens */ 796789Sahrens if (loaded && (spa_mode & FWRITE)) 797789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 798789Sahrens 799789Sahrens return (0); 800789Sahrens } 801789Sahrens 802789Sahrens int 803789Sahrens spa_open(const char *name, spa_t **spapp, void *tag) 804789Sahrens { 805789Sahrens return (spa_open_common(name, spapp, tag, NULL)); 806789Sahrens } 807789Sahrens 8081544Seschrock /* 8091544Seschrock * Lookup the given spa_t, incrementing the inject count in the process, 8101544Seschrock * preventing it from being exported or destroyed. 8111544Seschrock */ 8121544Seschrock spa_t * 8131544Seschrock spa_inject_addref(char *name) 8141544Seschrock { 8151544Seschrock spa_t *spa; 8161544Seschrock 8171544Seschrock mutex_enter(&spa_namespace_lock); 8181544Seschrock if ((spa = spa_lookup(name)) == NULL) { 8191544Seschrock mutex_exit(&spa_namespace_lock); 8201544Seschrock return (NULL); 8211544Seschrock } 8221544Seschrock spa->spa_inject_ref++; 8231544Seschrock mutex_exit(&spa_namespace_lock); 8241544Seschrock 8251544Seschrock return (spa); 8261544Seschrock } 8271544Seschrock 8281544Seschrock void 8291544Seschrock spa_inject_delref(spa_t *spa) 8301544Seschrock { 8311544Seschrock mutex_enter(&spa_namespace_lock); 8321544Seschrock spa->spa_inject_ref--; 8331544Seschrock mutex_exit(&spa_namespace_lock); 8341544Seschrock } 8351544Seschrock 8362082Seschrock static void 8372082Seschrock spa_add_spares(spa_t *spa, nvlist_t *config) 8382082Seschrock { 8392082Seschrock nvlist_t **spares; 8402082Seschrock uint_t i, nspares; 8412082Seschrock nvlist_t *nvroot; 8422082Seschrock uint64_t guid; 8432082Seschrock vdev_stat_t *vs; 8442082Seschrock uint_t vsc; 8452082Seschrock 8462082Seschrock if (spa->spa_nspares == 0) 8472082Seschrock return; 8482082Seschrock 8492082Seschrock VERIFY(nvlist_lookup_nvlist(config, 8502082Seschrock ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 8512082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 8522082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 8532082Seschrock if (nspares != 0) { 8542082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, 8552082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 8562082Seschrock VERIFY(nvlist_lookup_nvlist_array(nvroot, 8572082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 8582082Seschrock 8592082Seschrock /* 8602082Seschrock * Go through and find any spares which have since been 8612082Seschrock * repurposed as an active spare. If this is the case, update 8622082Seschrock * their status appropriately. 8632082Seschrock */ 8642082Seschrock for (i = 0; i < nspares; i++) { 8652082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 8662082Seschrock ZPOOL_CONFIG_GUID, &guid) == 0); 8672082Seschrock if (spa_spare_inuse(guid)) { 8682082Seschrock VERIFY(nvlist_lookup_uint64_array( 8692082Seschrock spares[i], ZPOOL_CONFIG_STATS, 8702082Seschrock (uint64_t **)&vs, &vsc) == 0); 8712082Seschrock vs->vs_state = VDEV_STATE_CANT_OPEN; 8722082Seschrock vs->vs_aux = VDEV_AUX_SPARED; 8732082Seschrock } 8742082Seschrock } 8752082Seschrock } 8762082Seschrock } 8772082Seschrock 878789Sahrens int 8791544Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 880789Sahrens { 881789Sahrens int error; 882789Sahrens spa_t *spa; 883789Sahrens 884789Sahrens *config = NULL; 885789Sahrens error = spa_open_common(name, &spa, FTAG, config); 886789Sahrens 8872082Seschrock if (spa && *config != NULL) { 8881544Seschrock VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 8891544Seschrock spa_get_errlog_size(spa)) == 0); 8901544Seschrock 8912082Seschrock spa_add_spares(spa, *config); 8922082Seschrock } 8932082Seschrock 8941544Seschrock /* 8951544Seschrock * We want to get the alternate root even for faulted pools, so we cheat 8961544Seschrock * and call spa_lookup() directly. 8971544Seschrock */ 8981544Seschrock if (altroot) { 8991544Seschrock if (spa == NULL) { 9001544Seschrock mutex_enter(&spa_namespace_lock); 9011544Seschrock spa = spa_lookup(name); 9021544Seschrock if (spa) 9031544Seschrock spa_altroot(spa, altroot, buflen); 9041544Seschrock else 9051544Seschrock altroot[0] = '\0'; 9061544Seschrock spa = NULL; 9071544Seschrock mutex_exit(&spa_namespace_lock); 9081544Seschrock } else { 9091544Seschrock spa_altroot(spa, altroot, buflen); 9101544Seschrock } 9111544Seschrock } 9121544Seschrock 913789Sahrens if (spa != NULL) 914789Sahrens spa_close(spa, FTAG); 915789Sahrens 916789Sahrens return (error); 917789Sahrens } 918789Sahrens 919789Sahrens /* 9202082Seschrock * Validate that the 'spares' array is well formed. We must have an array of 9212082Seschrock * nvlists, each which describes a valid leaf vdev. 9222082Seschrock */ 9232082Seschrock static int 9242082Seschrock spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 9252082Seschrock { 9262082Seschrock nvlist_t **spares; 9272082Seschrock uint_t i, nspares; 9282082Seschrock vdev_t *vd; 9292082Seschrock int error; 9302082Seschrock 9312082Seschrock /* 9322082Seschrock * It's acceptable to have no spares specified. 9332082Seschrock */ 9342082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 9352082Seschrock &spares, &nspares) != 0) 9362082Seschrock return (0); 9372082Seschrock 9382082Seschrock if (nspares == 0) 9392082Seschrock return (EINVAL); 9402082Seschrock 9412082Seschrock /* 9422082Seschrock * Make sure the pool is formatted with a version that supports hot 9432082Seschrock * spares. 9442082Seschrock */ 9452082Seschrock if (spa_version(spa) < ZFS_VERSION_SPARES) 9462082Seschrock return (ENOTSUP); 9472082Seschrock 9482082Seschrock for (i = 0; i < nspares; i++) { 9492082Seschrock if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0, 9502082Seschrock mode)) != 0) 9512082Seschrock return (error); 9522082Seschrock 9532082Seschrock if (!vd->vdev_ops->vdev_op_leaf) { 9542082Seschrock vdev_free(vd); 9552082Seschrock return (EINVAL); 9562082Seschrock } 9572082Seschrock 9582082Seschrock if ((error = vdev_open(vd)) != 0) { 9592082Seschrock vdev_free(vd); 9602082Seschrock return (error); 9612082Seschrock } 9622082Seschrock 9632082Seschrock vd->vdev_top = vd; 9642082Seschrock if ((error = vdev_label_spare(vd, crtxg)) != 0) { 9652082Seschrock vdev_free(vd); 9662082Seschrock return (error); 9672082Seschrock } 9682082Seschrock 9692082Seschrock VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID, 9702082Seschrock vd->vdev_guid) == 0); 9712082Seschrock 9722082Seschrock vdev_free(vd); 9732082Seschrock } 9742082Seschrock 9752082Seschrock return (0); 9762082Seschrock } 9772082Seschrock 9782082Seschrock /* 979789Sahrens * Pool Creation 980789Sahrens */ 981789Sahrens int 9821635Sbonwick spa_create(const char *pool, nvlist_t *nvroot, const char *altroot) 983789Sahrens { 984789Sahrens spa_t *spa; 9851635Sbonwick vdev_t *rvd; 986789Sahrens dsl_pool_t *dp; 987789Sahrens dmu_tx_t *tx; 9882082Seschrock int c, error = 0; 989789Sahrens uint64_t txg = TXG_INITIAL; 9902082Seschrock nvlist_t **spares; 9912082Seschrock uint_t nspares; 992789Sahrens 993789Sahrens /* 994789Sahrens * If this pool already exists, return failure. 995789Sahrens */ 996789Sahrens mutex_enter(&spa_namespace_lock); 997789Sahrens if (spa_lookup(pool) != NULL) { 998789Sahrens mutex_exit(&spa_namespace_lock); 999789Sahrens return (EEXIST); 1000789Sahrens } 1001789Sahrens 1002789Sahrens /* 1003789Sahrens * Allocate a new spa_t structure. 1004789Sahrens */ 10051635Sbonwick spa = spa_add(pool, altroot); 1006789Sahrens spa_activate(spa); 1007789Sahrens 1008789Sahrens spa->spa_uberblock.ub_txg = txg - 1; 10091760Seschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 1010789Sahrens spa->spa_ubsync = spa->spa_uberblock; 1011789Sahrens 10121635Sbonwick /* 10131635Sbonwick * Create the root vdev. 10141635Sbonwick */ 10151635Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 10161635Sbonwick 10172082Seschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 10182082Seschrock 10192082Seschrock ASSERT(error != 0 || rvd != NULL); 10202082Seschrock ASSERT(error != 0 || spa->spa_root_vdev == rvd); 10212082Seschrock 10222082Seschrock if (error == 0 && rvd->vdev_children == 0) 10231635Sbonwick error = EINVAL; 10242082Seschrock 10252082Seschrock if (error == 0 && 10262082Seschrock (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 10272082Seschrock (error = spa_validate_spares(spa, nvroot, txg, 10282082Seschrock VDEV_ALLOC_ADD)) == 0) { 10292082Seschrock for (c = 0; c < rvd->vdev_children; c++) 10302082Seschrock vdev_init(rvd->vdev_child[c], txg); 10312082Seschrock vdev_config_dirty(rvd); 10321635Sbonwick } 10331635Sbonwick 10341635Sbonwick spa_config_exit(spa, FTAG); 1035789Sahrens 10362082Seschrock if (error != 0) { 1037789Sahrens spa_unload(spa); 1038789Sahrens spa_deactivate(spa); 1039789Sahrens spa_remove(spa); 1040789Sahrens mutex_exit(&spa_namespace_lock); 1041789Sahrens return (error); 1042789Sahrens } 1043789Sahrens 10442082Seschrock /* 10452082Seschrock * Get the list of spares, if specified. 10462082Seschrock */ 10472082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 10482082Seschrock &spares, &nspares) == 0) { 10492082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME, 10502082Seschrock KM_SLEEP) == 0); 10512082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 10522082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 10532082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 10542082Seschrock spa_load_spares(spa); 10552082Seschrock spa_config_exit(spa, FTAG); 10562082Seschrock spa->spa_sync_spares = B_TRUE; 10572082Seschrock } 10582082Seschrock 1059789Sahrens spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1060789Sahrens spa->spa_meta_objset = dp->dp_meta_objset; 1061789Sahrens 1062789Sahrens tx = dmu_tx_create_assigned(dp, txg); 1063789Sahrens 1064789Sahrens /* 1065789Sahrens * Create the pool config object. 1066789Sahrens */ 1067789Sahrens spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1068789Sahrens DMU_OT_PACKED_NVLIST, 1 << 14, 1069789Sahrens DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1070789Sahrens 10711544Seschrock if (zap_add(spa->spa_meta_objset, 1072789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 10731544Seschrock sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 10741544Seschrock cmn_err(CE_PANIC, "failed to add pool config"); 10751544Seschrock } 1076789Sahrens 10772082Seschrock /* Newly created pools are always deflated. */ 10782082Seschrock spa->spa_deflate = TRUE; 10792082Seschrock if (zap_add(spa->spa_meta_objset, 10802082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 10812082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 10822082Seschrock cmn_err(CE_PANIC, "failed to add deflate"); 10832082Seschrock } 10842082Seschrock 1085789Sahrens /* 1086789Sahrens * Create the deferred-free bplist object. Turn off compression 1087789Sahrens * because sync-to-convergence takes longer if the blocksize 1088789Sahrens * keeps changing. 1089789Sahrens */ 1090789Sahrens spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1091789Sahrens 1 << 14, tx); 1092789Sahrens dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1093789Sahrens ZIO_COMPRESS_OFF, tx); 1094789Sahrens 10951544Seschrock if (zap_add(spa->spa_meta_objset, 1096789Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 10971544Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 10981544Seschrock cmn_err(CE_PANIC, "failed to add bplist"); 10991544Seschrock } 1100789Sahrens 1101789Sahrens dmu_tx_commit(tx); 1102789Sahrens 1103789Sahrens spa->spa_sync_on = B_TRUE; 1104789Sahrens txg_sync_start(spa->spa_dsl_pool); 1105789Sahrens 1106789Sahrens /* 1107789Sahrens * We explicitly wait for the first transaction to complete so that our 1108789Sahrens * bean counters are appropriately updated. 1109789Sahrens */ 1110789Sahrens txg_wait_synced(spa->spa_dsl_pool, txg); 1111789Sahrens 1112789Sahrens spa_config_sync(); 1113789Sahrens 1114789Sahrens mutex_exit(&spa_namespace_lock); 1115789Sahrens 1116789Sahrens return (0); 1117789Sahrens } 1118789Sahrens 1119789Sahrens /* 1120789Sahrens * Import the given pool into the system. We set up the necessary spa_t and 1121789Sahrens * then call spa_load() to do the dirty work. 1122789Sahrens */ 1123789Sahrens int 11241635Sbonwick spa_import(const char *pool, nvlist_t *config, const char *altroot) 1125789Sahrens { 1126789Sahrens spa_t *spa; 1127789Sahrens int error; 11282082Seschrock nvlist_t *nvroot; 11292082Seschrock nvlist_t **spares; 11302082Seschrock uint_t nspares; 1131789Sahrens 1132789Sahrens if (!(spa_mode & FWRITE)) 1133789Sahrens return (EROFS); 1134789Sahrens 1135789Sahrens /* 1136789Sahrens * If a pool with this name exists, return failure. 1137789Sahrens */ 1138789Sahrens mutex_enter(&spa_namespace_lock); 1139789Sahrens if (spa_lookup(pool) != NULL) { 1140789Sahrens mutex_exit(&spa_namespace_lock); 1141789Sahrens return (EEXIST); 1142789Sahrens } 1143789Sahrens 1144789Sahrens /* 11451635Sbonwick * Create and initialize the spa structure. 1146789Sahrens */ 11471635Sbonwick spa = spa_add(pool, altroot); 1148789Sahrens spa_activate(spa); 1149789Sahrens 1150789Sahrens /* 11511635Sbonwick * Pass off the heavy lifting to spa_load(). 11521732Sbonwick * Pass TRUE for mosconfig because the user-supplied config 11531732Sbonwick * is actually the one to trust when doing an import. 11541601Sbonwick */ 11551732Sbonwick error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE); 1156789Sahrens 11572082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 11582082Seschrock /* 11592082Seschrock * Toss any existing sparelist, as it doesn't have any validity anymore, 11602082Seschrock * and conflicts with spa_has_spare(). 11612082Seschrock */ 11622082Seschrock if (spa->spa_sparelist) { 11632082Seschrock nvlist_free(spa->spa_sparelist); 11642082Seschrock spa->spa_sparelist = NULL; 11652082Seschrock spa_load_spares(spa); 11662082Seschrock } 11672082Seschrock 11682082Seschrock VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 11692082Seschrock &nvroot) == 0); 11702082Seschrock if (error == 0) 11712082Seschrock error = spa_validate_spares(spa, nvroot, -1ULL, 11722082Seschrock VDEV_ALLOC_SPARE); 11732082Seschrock spa_config_exit(spa, FTAG); 11742082Seschrock 11752082Seschrock if (error != 0) { 1176789Sahrens spa_unload(spa); 1177789Sahrens spa_deactivate(spa); 1178789Sahrens spa_remove(spa); 1179789Sahrens mutex_exit(&spa_namespace_lock); 1180789Sahrens return (error); 1181789Sahrens } 1182789Sahrens 11831635Sbonwick /* 11842082Seschrock * Override any spares as specified by the user, as these may have 11852082Seschrock * correct device names/devids, etc. 11862082Seschrock */ 11872082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 11882082Seschrock &spares, &nspares) == 0) { 11892082Seschrock if (spa->spa_sparelist) 11902082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, 11912082Seschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 11922082Seschrock else 11932082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 11942082Seschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 11952082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 11962082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 11972082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 11982082Seschrock spa_load_spares(spa); 11992082Seschrock spa_config_exit(spa, FTAG); 12002082Seschrock spa->spa_sync_spares = B_TRUE; 12012082Seschrock } 12022082Seschrock 12032082Seschrock /* 12041635Sbonwick * Update the config cache to include the newly-imported pool. 12051635Sbonwick */ 12061635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 12071635Sbonwick 1208789Sahrens mutex_exit(&spa_namespace_lock); 1209789Sahrens 1210789Sahrens /* 1211789Sahrens * Resilver anything that's out of date. 1212789Sahrens */ 1213789Sahrens if (spa_mode & FWRITE) 1214789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1215789Sahrens 1216789Sahrens return (0); 1217789Sahrens } 1218789Sahrens 1219789Sahrens /* 1220789Sahrens * This (illegal) pool name is used when temporarily importing a spa_t in order 1221789Sahrens * to get the vdev stats associated with the imported devices. 1222789Sahrens */ 1223789Sahrens #define TRYIMPORT_NAME "$import" 1224789Sahrens 1225789Sahrens nvlist_t * 1226789Sahrens spa_tryimport(nvlist_t *tryconfig) 1227789Sahrens { 1228789Sahrens nvlist_t *config = NULL; 1229789Sahrens char *poolname; 1230789Sahrens spa_t *spa; 1231789Sahrens uint64_t state; 1232789Sahrens 1233789Sahrens if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 1234789Sahrens return (NULL); 1235789Sahrens 1236789Sahrens if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 1237789Sahrens return (NULL); 1238789Sahrens 12391635Sbonwick /* 12401635Sbonwick * Create and initialize the spa structure. 12411635Sbonwick */ 1242789Sahrens mutex_enter(&spa_namespace_lock); 12431635Sbonwick spa = spa_add(TRYIMPORT_NAME, NULL); 1244789Sahrens spa_activate(spa); 1245789Sahrens 1246789Sahrens /* 12471635Sbonwick * Pass off the heavy lifting to spa_load(). 12481732Sbonwick * Pass TRUE for mosconfig because the user-supplied config 12491732Sbonwick * is actually the one to trust when doing an import. 1250789Sahrens */ 12511732Sbonwick (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 1252789Sahrens 1253789Sahrens /* 1254789Sahrens * If 'tryconfig' was at least parsable, return the current config. 1255789Sahrens */ 1256789Sahrens if (spa->spa_root_vdev != NULL) { 12571635Sbonwick spa_config_enter(spa, RW_READER, FTAG); 1258789Sahrens config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 12591635Sbonwick spa_config_exit(spa, FTAG); 1260789Sahrens VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 1261789Sahrens poolname) == 0); 1262789Sahrens VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1263789Sahrens state) == 0); 12642082Seschrock 12652082Seschrock /* 12662082Seschrock * Add the list of hot spares. 12672082Seschrock */ 12682082Seschrock spa_add_spares(spa, config); 1269789Sahrens } 1270789Sahrens 1271789Sahrens spa_unload(spa); 1272789Sahrens spa_deactivate(spa); 1273789Sahrens spa_remove(spa); 1274789Sahrens mutex_exit(&spa_namespace_lock); 1275789Sahrens 1276789Sahrens return (config); 1277789Sahrens } 1278789Sahrens 1279789Sahrens /* 1280789Sahrens * Pool export/destroy 1281789Sahrens * 1282789Sahrens * The act of destroying or exporting a pool is very simple. We make sure there 1283789Sahrens * is no more pending I/O and any references to the pool are gone. Then, we 1284789Sahrens * update the pool state and sync all the labels to disk, removing the 1285789Sahrens * configuration from the cache afterwards. 1286789Sahrens */ 1287789Sahrens static int 12881775Sbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 1289789Sahrens { 1290789Sahrens spa_t *spa; 1291789Sahrens 12921775Sbillm if (oldconfig) 12931775Sbillm *oldconfig = NULL; 12941775Sbillm 1295789Sahrens if (!(spa_mode & FWRITE)) 1296789Sahrens return (EROFS); 1297789Sahrens 1298789Sahrens mutex_enter(&spa_namespace_lock); 1299789Sahrens if ((spa = spa_lookup(pool)) == NULL) { 1300789Sahrens mutex_exit(&spa_namespace_lock); 1301789Sahrens return (ENOENT); 1302789Sahrens } 1303789Sahrens 1304789Sahrens /* 13051544Seschrock * Put a hold on the pool, drop the namespace lock, stop async tasks, 13061544Seschrock * reacquire the namespace lock, and see if we can export. 13071544Seschrock */ 13081544Seschrock spa_open_ref(spa, FTAG); 13091544Seschrock mutex_exit(&spa_namespace_lock); 13101544Seschrock spa_async_suspend(spa); 13111544Seschrock mutex_enter(&spa_namespace_lock); 13121544Seschrock spa_close(spa, FTAG); 13131544Seschrock 13141544Seschrock /* 1315789Sahrens * The pool will be in core if it's openable, 1316789Sahrens * in which case we can modify its state. 1317789Sahrens */ 1318789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 1319789Sahrens /* 1320789Sahrens * Objsets may be open only because they're dirty, so we 1321789Sahrens * have to force it to sync before checking spa_refcnt. 1322789Sahrens */ 1323789Sahrens spa_scrub_suspend(spa); 1324789Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 1325789Sahrens 13261544Seschrock /* 13271544Seschrock * A pool cannot be exported or destroyed if there are active 13281544Seschrock * references. If we are resetting a pool, allow references by 13291544Seschrock * fault injection handlers. 13301544Seschrock */ 13311544Seschrock if (!spa_refcount_zero(spa) || 13321544Seschrock (spa->spa_inject_ref != 0 && 13331544Seschrock new_state != POOL_STATE_UNINITIALIZED)) { 1334789Sahrens spa_scrub_resume(spa); 13351544Seschrock spa_async_resume(spa); 1336789Sahrens mutex_exit(&spa_namespace_lock); 1337789Sahrens return (EBUSY); 1338789Sahrens } 1339789Sahrens 1340789Sahrens spa_scrub_resume(spa); 1341789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 1342789Sahrens 1343789Sahrens /* 1344789Sahrens * We want this to be reflected on every label, 1345789Sahrens * so mark them all dirty. spa_unload() will do the 1346789Sahrens * final sync that pushes these changes out. 1347789Sahrens */ 13481544Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 13491601Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 13501544Seschrock spa->spa_state = new_state; 13511635Sbonwick spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 13521544Seschrock vdev_config_dirty(spa->spa_root_vdev); 13531601Sbonwick spa_config_exit(spa, FTAG); 13541544Seschrock } 1355789Sahrens } 1356789Sahrens 1357789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 1358789Sahrens spa_unload(spa); 1359789Sahrens spa_deactivate(spa); 1360789Sahrens } 1361789Sahrens 13621775Sbillm if (oldconfig && spa->spa_config) 13631775Sbillm VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 13641775Sbillm 13651544Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 13661544Seschrock spa_remove(spa); 13671544Seschrock spa_config_sync(); 13681544Seschrock } 1369789Sahrens mutex_exit(&spa_namespace_lock); 1370789Sahrens 1371789Sahrens return (0); 1372789Sahrens } 1373789Sahrens 1374789Sahrens /* 1375789Sahrens * Destroy a storage pool. 1376789Sahrens */ 1377789Sahrens int 1378789Sahrens spa_destroy(char *pool) 1379789Sahrens { 13801775Sbillm return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 1381789Sahrens } 1382789Sahrens 1383789Sahrens /* 1384789Sahrens * Export a storage pool. 1385789Sahrens */ 1386789Sahrens int 13871775Sbillm spa_export(char *pool, nvlist_t **oldconfig) 1388789Sahrens { 13891775Sbillm return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 1390789Sahrens } 1391789Sahrens 1392789Sahrens /* 13931544Seschrock * Similar to spa_export(), this unloads the spa_t without actually removing it 13941544Seschrock * from the namespace in any way. 13951544Seschrock */ 13961544Seschrock int 13971544Seschrock spa_reset(char *pool) 13981544Seschrock { 13991775Sbillm return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 14001544Seschrock } 14011544Seschrock 14021544Seschrock 14031544Seschrock /* 1404789Sahrens * ========================================================================== 1405789Sahrens * Device manipulation 1406789Sahrens * ========================================================================== 1407789Sahrens */ 1408789Sahrens 1409789Sahrens /* 1410789Sahrens * Add capacity to a storage pool. 1411789Sahrens */ 1412789Sahrens int 1413789Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 1414789Sahrens { 1415789Sahrens uint64_t txg; 14161635Sbonwick int c, error; 1417789Sahrens vdev_t *rvd = spa->spa_root_vdev; 14181585Sbonwick vdev_t *vd, *tvd; 14192082Seschrock nvlist_t **spares; 14202082Seschrock uint_t i, nspares; 1421789Sahrens 1422789Sahrens txg = spa_vdev_enter(spa); 1423789Sahrens 14242082Seschrock if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 14252082Seschrock VDEV_ALLOC_ADD)) != 0) 14262082Seschrock return (spa_vdev_exit(spa, NULL, txg, error)); 14272082Seschrock 14282082Seschrock if ((error = spa_validate_spares(spa, nvroot, txg, 14292082Seschrock VDEV_ALLOC_ADD)) != 0) 1430789Sahrens return (spa_vdev_exit(spa, vd, txg, error)); 1431789Sahrens 14322082Seschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 14332082Seschrock &spares, &nspares) != 0) 14342082Seschrock nspares = 0; 14352082Seschrock 14362082Seschrock if (vd->vdev_children == 0 && nspares == 0) 14372082Seschrock return (spa_vdev_exit(spa, vd, txg, EINVAL)); 14382082Seschrock 14392082Seschrock if (vd->vdev_children != 0) { 14402082Seschrock if ((error = vdev_create(vd, txg, B_FALSE)) != 0) 14412082Seschrock return (spa_vdev_exit(spa, vd, txg, error)); 14422082Seschrock 14432082Seschrock /* 14442082Seschrock * Transfer each new top-level vdev from vd to rvd. 14452082Seschrock */ 14462082Seschrock for (c = 0; c < vd->vdev_children; c++) { 14472082Seschrock tvd = vd->vdev_child[c]; 14482082Seschrock vdev_remove_child(vd, tvd); 14492082Seschrock tvd->vdev_id = rvd->vdev_children; 14502082Seschrock vdev_add_child(rvd, tvd); 14512082Seschrock vdev_config_dirty(tvd); 14522082Seschrock } 14532082Seschrock } 14542082Seschrock 14552082Seschrock if (nspares != 0) { 14562082Seschrock if (spa->spa_sparelist != NULL) { 14572082Seschrock nvlist_t **oldspares; 14582082Seschrock uint_t oldnspares; 14592082Seschrock nvlist_t **newspares; 14602082Seschrock 14612082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 14622082Seschrock ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0); 14632082Seschrock 14642082Seschrock newspares = kmem_alloc(sizeof (void *) * 14652082Seschrock (nspares + oldnspares), KM_SLEEP); 14662082Seschrock for (i = 0; i < oldnspares; i++) 14672082Seschrock VERIFY(nvlist_dup(oldspares[i], 14682082Seschrock &newspares[i], KM_SLEEP) == 0); 14692082Seschrock for (i = 0; i < nspares; i++) 14702082Seschrock VERIFY(nvlist_dup(spares[i], 14712082Seschrock &newspares[i + oldnspares], 14722082Seschrock KM_SLEEP) == 0); 14732082Seschrock 14742082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, 14752082Seschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 14762082Seschrock 14772082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 14782082Seschrock ZPOOL_CONFIG_SPARES, newspares, 14792082Seschrock nspares + oldnspares) == 0); 14802082Seschrock for (i = 0; i < oldnspares + nspares; i++) 14812082Seschrock nvlist_free(newspares[i]); 14822082Seschrock kmem_free(newspares, (oldnspares + nspares) * 14832082Seschrock sizeof (void *)); 14842082Seschrock } else { 14852082Seschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 14862082Seschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 14872082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 14882082Seschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 14892082Seschrock } 14902082Seschrock 14912082Seschrock spa_load_spares(spa); 14922082Seschrock spa->spa_sync_spares = B_TRUE; 1493789Sahrens } 1494789Sahrens 1495789Sahrens /* 14961585Sbonwick * We have to be careful when adding new vdevs to an existing pool. 14971585Sbonwick * If other threads start allocating from these vdevs before we 14981585Sbonwick * sync the config cache, and we lose power, then upon reboot we may 14991585Sbonwick * fail to open the pool because there are DVAs that the config cache 15001585Sbonwick * can't translate. Therefore, we first add the vdevs without 15011585Sbonwick * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 15021635Sbonwick * and then let spa_config_update() initialize the new metaslabs. 15031585Sbonwick * 15041585Sbonwick * spa_load() checks for added-but-not-initialized vdevs, so that 15051585Sbonwick * if we lose power at any point in this sequence, the remaining 15061585Sbonwick * steps will be completed the next time we load the pool. 1507789Sahrens */ 15081635Sbonwick (void) spa_vdev_exit(spa, vd, txg, 0); 15091585Sbonwick 15101635Sbonwick mutex_enter(&spa_namespace_lock); 15111635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 15121635Sbonwick mutex_exit(&spa_namespace_lock); 1513789Sahrens 15141635Sbonwick return (0); 1515789Sahrens } 1516789Sahrens 1517789Sahrens /* 1518789Sahrens * Attach a device to a mirror. The arguments are the path to any device 1519789Sahrens * in the mirror, and the nvroot for the new device. If the path specifies 1520789Sahrens * a device that is not mirrored, we automatically insert the mirror vdev. 1521789Sahrens * 1522789Sahrens * If 'replacing' is specified, the new device is intended to replace the 1523789Sahrens * existing device; in this case the two devices are made into their own 1524789Sahrens * mirror using the 'replacing' vdev, which is functionally idendical to 1525789Sahrens * the mirror vdev (it actually reuses all the same ops) but has a few 1526789Sahrens * extra rules: you can't attach to it after it's been created, and upon 1527789Sahrens * completion of resilvering, the first disk (the one being replaced) 1528789Sahrens * is automatically detached. 1529789Sahrens */ 1530789Sahrens int 15311544Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 1532789Sahrens { 1533789Sahrens uint64_t txg, open_txg; 1534789Sahrens int error; 1535789Sahrens vdev_t *rvd = spa->spa_root_vdev; 1536789Sahrens vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 15372082Seschrock vdev_ops_t *pvops; 1538789Sahrens 1539789Sahrens txg = spa_vdev_enter(spa); 1540789Sahrens 15411544Seschrock oldvd = vdev_lookup_by_guid(rvd, guid); 1542789Sahrens 1543789Sahrens if (oldvd == NULL) 1544789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1545789Sahrens 15461585Sbonwick if (!oldvd->vdev_ops->vdev_op_leaf) 15471585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 15481585Sbonwick 1549789Sahrens pvd = oldvd->vdev_parent; 1550789Sahrens 15512082Seschrock if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 15522082Seschrock VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1) 1553789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1554789Sahrens 1555789Sahrens newvd = newrootvd->vdev_child[0]; 1556789Sahrens 1557789Sahrens if (!newvd->vdev_ops->vdev_op_leaf) 1558789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1559789Sahrens 15602082Seschrock if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 1561789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, error)); 1562789Sahrens 15632082Seschrock if (!replacing) { 15642082Seschrock /* 15652082Seschrock * For attach, the only allowable parent is a mirror or the root 15662082Seschrock * vdev. 15672082Seschrock */ 15682082Seschrock if (pvd->vdev_ops != &vdev_mirror_ops && 15692082Seschrock pvd->vdev_ops != &vdev_root_ops) 15702082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 15712082Seschrock 15722082Seschrock pvops = &vdev_mirror_ops; 15732082Seschrock } else { 15742082Seschrock /* 15752082Seschrock * Active hot spares can only be replaced by inactive hot 15762082Seschrock * spares. 15772082Seschrock */ 15782082Seschrock if (pvd->vdev_ops == &vdev_spare_ops && 15792082Seschrock pvd->vdev_child[1] == oldvd && 15802082Seschrock !spa_has_spare(spa, newvd->vdev_guid)) 15812082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 15822082Seschrock 15832082Seschrock /* 15842082Seschrock * If the source is a hot spare, and the parent isn't already a 15852082Seschrock * spare, then we want to create a new hot spare. Otherwise, we 15862082Seschrock * want to create a replacing vdev. 15872082Seschrock */ 15882082Seschrock if (pvd->vdev_ops == &vdev_replacing_ops) 15892082Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 15902082Seschrock else if (pvd->vdev_ops != &vdev_spare_ops && 15912082Seschrock newvd->vdev_isspare) 15922082Seschrock pvops = &vdev_spare_ops; 15932082Seschrock else 15942082Seschrock pvops = &vdev_replacing_ops; 15952082Seschrock } 15962082Seschrock 15971175Slling /* 15981175Slling * Compare the new device size with the replaceable/attachable 15991175Slling * device size. 16001175Slling */ 16011175Slling if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 1602789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 1603789Sahrens 16041732Sbonwick /* 16051732Sbonwick * The new device cannot have a higher alignment requirement 16061732Sbonwick * than the top-level vdev. 16071732Sbonwick */ 16081732Sbonwick if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 1609789Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 1610789Sahrens 1611789Sahrens /* 1612789Sahrens * If this is an in-place replacement, update oldvd's path and devid 1613789Sahrens * to make it distinguishable from newvd, and unopenable from now on. 1614789Sahrens */ 1615789Sahrens if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 1616789Sahrens spa_strfree(oldvd->vdev_path); 1617789Sahrens oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 1618789Sahrens KM_SLEEP); 1619789Sahrens (void) sprintf(oldvd->vdev_path, "%s/%s", 1620789Sahrens newvd->vdev_path, "old"); 1621789Sahrens if (oldvd->vdev_devid != NULL) { 1622789Sahrens spa_strfree(oldvd->vdev_devid); 1623789Sahrens oldvd->vdev_devid = NULL; 1624789Sahrens } 1625789Sahrens } 1626789Sahrens 1627789Sahrens /* 16282082Seschrock * If the parent is not a mirror, or if we're replacing, insert the new 16292082Seschrock * mirror/replacing/spare vdev above oldvd. 1630789Sahrens */ 1631789Sahrens if (pvd->vdev_ops != pvops) 1632789Sahrens pvd = vdev_add_parent(oldvd, pvops); 1633789Sahrens 1634789Sahrens ASSERT(pvd->vdev_top->vdev_parent == rvd); 1635789Sahrens ASSERT(pvd->vdev_ops == pvops); 1636789Sahrens ASSERT(oldvd->vdev_parent == pvd); 1637789Sahrens 1638789Sahrens /* 1639789Sahrens * Extract the new device from its root and add it to pvd. 1640789Sahrens */ 1641789Sahrens vdev_remove_child(newrootvd, newvd); 1642789Sahrens newvd->vdev_id = pvd->vdev_children; 1643789Sahrens vdev_add_child(pvd, newvd); 1644789Sahrens 16451544Seschrock /* 16461544Seschrock * If newvd is smaller than oldvd, but larger than its rsize, 16471544Seschrock * the addition of newvd may have decreased our parent's asize. 16481544Seschrock */ 16491544Seschrock pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 16501544Seschrock 1651789Sahrens tvd = newvd->vdev_top; 1652789Sahrens ASSERT(pvd->vdev_top == tvd); 1653789Sahrens ASSERT(tvd->vdev_parent == rvd); 1654789Sahrens 1655789Sahrens vdev_config_dirty(tvd); 1656789Sahrens 1657789Sahrens /* 1658789Sahrens * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 1659789Sahrens * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 1660789Sahrens */ 1661789Sahrens open_txg = txg + TXG_CONCURRENT_STATES - 1; 1662789Sahrens 1663789Sahrens mutex_enter(&newvd->vdev_dtl_lock); 1664789Sahrens space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 1665789Sahrens open_txg - TXG_INITIAL + 1); 1666789Sahrens mutex_exit(&newvd->vdev_dtl_lock); 1667789Sahrens 16681544Seschrock dprintf("attached %s in txg %llu\n", newvd->vdev_path, txg); 16691544Seschrock 1670789Sahrens /* 1671789Sahrens * Mark newvd's DTL dirty in this txg. 1672789Sahrens */ 16731732Sbonwick vdev_dirty(tvd, VDD_DTL, newvd, txg); 1674789Sahrens 1675789Sahrens (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 1676789Sahrens 1677789Sahrens /* 1678789Sahrens * Kick off a resilver to update newvd. 1679789Sahrens */ 1680789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1681789Sahrens 1682789Sahrens return (0); 1683789Sahrens } 1684789Sahrens 1685789Sahrens /* 1686789Sahrens * Detach a device from a mirror or replacing vdev. 1687789Sahrens * If 'replace_done' is specified, only detach if the parent 1688789Sahrens * is a replacing vdev. 1689789Sahrens */ 1690789Sahrens int 16911544Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 1692789Sahrens { 1693789Sahrens uint64_t txg; 1694789Sahrens int c, t, error; 1695789Sahrens vdev_t *rvd = spa->spa_root_vdev; 1696789Sahrens vdev_t *vd, *pvd, *cvd, *tvd; 16972082Seschrock boolean_t unspare = B_FALSE; 16982082Seschrock uint64_t unspare_guid; 1699789Sahrens 1700789Sahrens txg = spa_vdev_enter(spa); 1701789Sahrens 17021544Seschrock vd = vdev_lookup_by_guid(rvd, guid); 1703789Sahrens 1704789Sahrens if (vd == NULL) 1705789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1706789Sahrens 17071585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 17081585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17091585Sbonwick 1710789Sahrens pvd = vd->vdev_parent; 1711789Sahrens 1712789Sahrens /* 1713789Sahrens * If replace_done is specified, only remove this device if it's 17142082Seschrock * the first child of a replacing vdev. For the 'spare' vdev, either 17152082Seschrock * disk can be removed. 1716789Sahrens */ 17172082Seschrock if (replace_done) { 17182082Seschrock if (pvd->vdev_ops == &vdev_replacing_ops) { 17192082Seschrock if (vd->vdev_id != 0) 17202082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17212082Seschrock } else if (pvd->vdev_ops != &vdev_spare_ops) { 17222082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17232082Seschrock } 17242082Seschrock } 17252082Seschrock 17262082Seschrock ASSERT(pvd->vdev_ops != &vdev_spare_ops || 17272082Seschrock spa_version(spa) >= ZFS_VERSION_SPARES); 1728789Sahrens 1729789Sahrens /* 17302082Seschrock * Only mirror, replacing, and spare vdevs support detach. 1731789Sahrens */ 1732789Sahrens if (pvd->vdev_ops != &vdev_replacing_ops && 17332082Seschrock pvd->vdev_ops != &vdev_mirror_ops && 17342082Seschrock pvd->vdev_ops != &vdev_spare_ops) 1735789Sahrens return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1736789Sahrens 1737789Sahrens /* 1738789Sahrens * If there's only one replica, you can't detach it. 1739789Sahrens */ 1740789Sahrens if (pvd->vdev_children <= 1) 1741789Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1742789Sahrens 1743789Sahrens /* 1744789Sahrens * If all siblings have non-empty DTLs, this device may have the only 1745789Sahrens * valid copy of the data, which means we cannot safely detach it. 1746789Sahrens * 1747789Sahrens * XXX -- as in the vdev_offline() case, we really want a more 1748789Sahrens * precise DTL check. 1749789Sahrens */ 1750789Sahrens for (c = 0; c < pvd->vdev_children; c++) { 1751789Sahrens uint64_t dirty; 1752789Sahrens 1753789Sahrens cvd = pvd->vdev_child[c]; 1754789Sahrens if (cvd == vd) 1755789Sahrens continue; 1756789Sahrens if (vdev_is_dead(cvd)) 1757789Sahrens continue; 1758789Sahrens mutex_enter(&cvd->vdev_dtl_lock); 1759789Sahrens dirty = cvd->vdev_dtl_map.sm_space | 1760789Sahrens cvd->vdev_dtl_scrub.sm_space; 1761789Sahrens mutex_exit(&cvd->vdev_dtl_lock); 1762789Sahrens if (!dirty) 1763789Sahrens break; 1764789Sahrens } 17652082Seschrock 17662082Seschrock /* 17672082Seschrock * If we are a replacing or spare vdev, then we can always detach the 17682082Seschrock * latter child, as that is how one cancels the operation. 17692082Seschrock */ 17702082Seschrock if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 17712082Seschrock c == pvd->vdev_children) 1772789Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1773789Sahrens 1774789Sahrens /* 17752082Seschrock * If we are detaching the original disk from a spare, then it implies 17762082Seschrock * that the spare should become a real disk, and be removed from the 17772082Seschrock * active spare list for the pool. 17782082Seschrock */ 17792082Seschrock if (pvd->vdev_ops == &vdev_spare_ops && 17802082Seschrock vd->vdev_id == 0) 17812082Seschrock unspare = B_TRUE; 17822082Seschrock 17832082Seschrock /* 1784789Sahrens * Erase the disk labels so the disk can be used for other things. 1785789Sahrens * This must be done after all other error cases are handled, 1786789Sahrens * but before we disembowel vd (so we can still do I/O to it). 1787789Sahrens * But if we can't do it, don't treat the error as fatal -- 1788789Sahrens * it may be that the unwritability of the disk is the reason 1789789Sahrens * it's being detached! 1790789Sahrens */ 17912082Seschrock error = vdev_label_init(vd, 0, B_FALSE); 1792789Sahrens if (error) 1793789Sahrens dprintf("unable to erase labels on %s\n", vdev_description(vd)); 1794789Sahrens 1795789Sahrens /* 1796789Sahrens * Remove vd from its parent and compact the parent's children. 1797789Sahrens */ 1798789Sahrens vdev_remove_child(pvd, vd); 1799789Sahrens vdev_compact_children(pvd); 1800789Sahrens 1801789Sahrens /* 1802789Sahrens * Remember one of the remaining children so we can get tvd below. 1803789Sahrens */ 1804789Sahrens cvd = pvd->vdev_child[0]; 1805789Sahrens 1806789Sahrens /* 18072082Seschrock * If we need to remove the remaining child from the list of hot spares, 18082082Seschrock * do it now, marking the vdev as no longer a spare in the process. We 18092082Seschrock * must do this before vdev_remove_parent(), because that can change the 18102082Seschrock * GUID if it creates a new toplevel GUID. 18112082Seschrock */ 18122082Seschrock if (unspare) { 18132082Seschrock ASSERT(cvd->vdev_isspare); 18142082Seschrock spa_spare_remove(cvd->vdev_guid); 18152082Seschrock cvd->vdev_isspare = B_FALSE; 18162082Seschrock unspare_guid = cvd->vdev_guid; 18172082Seschrock } 18182082Seschrock 18192082Seschrock /* 1820789Sahrens * If the parent mirror/replacing vdev only has one child, 1821789Sahrens * the parent is no longer needed. Remove it from the tree. 1822789Sahrens */ 1823789Sahrens if (pvd->vdev_children == 1) 1824789Sahrens vdev_remove_parent(cvd); 1825789Sahrens 1826789Sahrens /* 1827789Sahrens * We don't set tvd until now because the parent we just removed 1828789Sahrens * may have been the previous top-level vdev. 1829789Sahrens */ 1830789Sahrens tvd = cvd->vdev_top; 1831789Sahrens ASSERT(tvd->vdev_parent == rvd); 1832789Sahrens 1833789Sahrens /* 1834789Sahrens * Reopen this top-level vdev to reassess health after detach. 1835789Sahrens */ 18361544Seschrock vdev_reopen(tvd); 1837789Sahrens 1838789Sahrens /* 1839789Sahrens * If the device we just detached was smaller than the others, 18401732Sbonwick * it may be possible to add metaslabs (i.e. grow the pool). 18411732Sbonwick * vdev_metaslab_init() can't fail because the existing metaslabs 18421732Sbonwick * are already in core, so there's nothing to read from disk. 1843789Sahrens */ 18441732Sbonwick VERIFY(vdev_metaslab_init(tvd, txg) == 0); 1845789Sahrens 1846789Sahrens vdev_config_dirty(tvd); 1847789Sahrens 1848789Sahrens /* 1849789Sahrens * Mark vd's DTL as dirty in this txg. 1850789Sahrens * vdev_dtl_sync() will see that vd->vdev_detached is set 1851789Sahrens * and free vd's DTL object in syncing context. 1852789Sahrens * But first make sure we're not on any *other* txg's DTL list, 1853789Sahrens * to prevent vd from being accessed after it's freed. 1854789Sahrens */ 1855789Sahrens for (t = 0; t < TXG_SIZE; t++) 1856789Sahrens (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 18571732Sbonwick vd->vdev_detached = B_TRUE; 18581732Sbonwick vdev_dirty(tvd, VDD_DTL, vd, txg); 1859789Sahrens 18601544Seschrock dprintf("detached %s in txg %llu\n", vd->vdev_path, txg); 1861789Sahrens 18622082Seschrock error = spa_vdev_exit(spa, vd, txg, 0); 18632082Seschrock 18642082Seschrock /* 18652082Seschrock * If we are supposed to remove the given vdev from the list of spares, 18662082Seschrock * iterate over all pools in the system and replace it if it's present. 18672082Seschrock */ 18682082Seschrock if (unspare) { 18692082Seschrock spa = NULL; 18702082Seschrock mutex_enter(&spa_namespace_lock); 18712082Seschrock while ((spa = spa_next(spa)) != NULL) { 18722082Seschrock if (spa->spa_state != POOL_STATE_ACTIVE) 18732082Seschrock continue; 18742082Seschrock 18752082Seschrock (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 18762082Seschrock } 18772082Seschrock mutex_exit(&spa_namespace_lock); 18782082Seschrock } 18792082Seschrock 18802082Seschrock return (error); 18812082Seschrock } 18822082Seschrock 18832082Seschrock /* 18842082Seschrock * Remove a device from the pool. Currently, this supports removing only hot 18852082Seschrock * spares. 18862082Seschrock */ 18872082Seschrock int 18882082Seschrock spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 18892082Seschrock { 18902082Seschrock vdev_t *vd; 18912082Seschrock nvlist_t **spares, *nv, **newspares; 18922082Seschrock uint_t i, j, nspares; 18932082Seschrock int ret = 0; 18942082Seschrock 18952082Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 18962082Seschrock 18972082Seschrock vd = spa_lookup_by_guid(spa, guid); 18982082Seschrock 18992082Seschrock nv = NULL; 19002082Seschrock if (spa->spa_spares != NULL && 19012082Seschrock nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19022082Seschrock &spares, &nspares) == 0) { 19032082Seschrock for (i = 0; i < nspares; i++) { 19042082Seschrock uint64_t theguid; 19052082Seschrock 19062082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 19072082Seschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 19082082Seschrock if (theguid == guid) { 19092082Seschrock nv = spares[i]; 19102082Seschrock break; 19112082Seschrock } 19122082Seschrock } 19132082Seschrock } 19142082Seschrock 19152082Seschrock /* 19162082Seschrock * We only support removing a hot spare, and only if it's not currently 19172082Seschrock * in use in this pool. 19182082Seschrock */ 19192082Seschrock if (nv == NULL && vd == NULL) { 19202082Seschrock ret = ENOENT; 19212082Seschrock goto out; 19222082Seschrock } 19232082Seschrock 19242082Seschrock if (nv == NULL && vd != NULL) { 19252082Seschrock ret = ENOTSUP; 19262082Seschrock goto out; 19272082Seschrock } 19282082Seschrock 19292082Seschrock if (!unspare && nv != NULL && vd != NULL) { 19302082Seschrock ret = EBUSY; 19312082Seschrock goto out; 19322082Seschrock } 19332082Seschrock 19342082Seschrock if (nspares == 1) { 19352082Seschrock newspares = NULL; 19362082Seschrock } else { 19372082Seschrock newspares = kmem_alloc((nspares - 1) * sizeof (void *), 19382082Seschrock KM_SLEEP); 19392082Seschrock for (i = 0, j = 0; i < nspares; i++) { 19402082Seschrock if (spares[i] != nv) 19412082Seschrock VERIFY(nvlist_dup(spares[i], 19422082Seschrock &newspares[j++], KM_SLEEP) == 0); 19432082Seschrock } 19442082Seschrock } 19452082Seschrock 19462082Seschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19472082Seschrock DATA_TYPE_NVLIST_ARRAY) == 0); 19482082Seschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 19492082Seschrock newspares, nspares - 1) == 0); 19502082Seschrock for (i = 0; i < nspares - 1; i++) 19512082Seschrock nvlist_free(newspares[i]); 19522082Seschrock kmem_free(newspares, (nspares - 1) * sizeof (void *)); 19532082Seschrock spa_load_spares(spa); 19542082Seschrock spa->spa_sync_spares = B_TRUE; 19552082Seschrock 19562082Seschrock out: 19572082Seschrock spa_config_exit(spa, FTAG); 19582082Seschrock 19592082Seschrock return (ret); 1960789Sahrens } 1961789Sahrens 1962789Sahrens /* 19631544Seschrock * Find any device that's done replacing, so we can detach it. 1964789Sahrens */ 19651544Seschrock static vdev_t * 19661544Seschrock spa_vdev_replace_done_hunt(vdev_t *vd) 1967789Sahrens { 19681544Seschrock vdev_t *newvd, *oldvd; 1969789Sahrens int c; 1970789Sahrens 19711544Seschrock for (c = 0; c < vd->vdev_children; c++) { 19721544Seschrock oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]); 19731544Seschrock if (oldvd != NULL) 19741544Seschrock return (oldvd); 19751544Seschrock } 1976789Sahrens 1977789Sahrens if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 19781544Seschrock oldvd = vd->vdev_child[0]; 19791544Seschrock newvd = vd->vdev_child[1]; 1980789Sahrens 19811544Seschrock mutex_enter(&newvd->vdev_dtl_lock); 19821544Seschrock if (newvd->vdev_dtl_map.sm_space == 0 && 19831544Seschrock newvd->vdev_dtl_scrub.sm_space == 0) { 19841544Seschrock mutex_exit(&newvd->vdev_dtl_lock); 19851544Seschrock return (oldvd); 19861544Seschrock } 19871544Seschrock mutex_exit(&newvd->vdev_dtl_lock); 19881544Seschrock } 1989789Sahrens 19901544Seschrock return (NULL); 1991789Sahrens } 1992789Sahrens 19931544Seschrock static void 1994789Sahrens spa_vdev_replace_done(spa_t *spa) 1995789Sahrens { 19961544Seschrock vdev_t *vd; 19972082Seschrock vdev_t *pvd; 19981544Seschrock uint64_t guid; 19992082Seschrock uint64_t pguid = 0; 2000789Sahrens 20011544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2002789Sahrens 20031544Seschrock while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) { 20041544Seschrock guid = vd->vdev_guid; 20052082Seschrock /* 20062082Seschrock * If we have just finished replacing a hot spared device, then 20072082Seschrock * we need to detach the parent's first child (the original hot 20082082Seschrock * spare) as well. 20092082Seschrock */ 20102082Seschrock pvd = vd->vdev_parent; 20112082Seschrock if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 20122082Seschrock pvd->vdev_id == 0) { 20132082Seschrock ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 20142082Seschrock ASSERT(pvd->vdev_parent->vdev_children == 2); 20152082Seschrock pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 20162082Seschrock } 20171544Seschrock spa_config_exit(spa, FTAG); 20181544Seschrock if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 20191544Seschrock return; 20202082Seschrock if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 20212082Seschrock return; 20221544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2023789Sahrens } 2024789Sahrens 20251544Seschrock spa_config_exit(spa, FTAG); 2026789Sahrens } 2027789Sahrens 2028789Sahrens /* 20291354Seschrock * Update the stored path for this vdev. Dirty the vdev configuration, relying 20301354Seschrock * on spa_vdev_enter/exit() to synchronize the labels and cache. 20311354Seschrock */ 20321354Seschrock int 20331354Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 20341354Seschrock { 20351354Seschrock vdev_t *rvd, *vd; 20361354Seschrock uint64_t txg; 20371354Seschrock 20381354Seschrock rvd = spa->spa_root_vdev; 20391354Seschrock 20401354Seschrock txg = spa_vdev_enter(spa); 20411354Seschrock 20422082Seschrock if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 20432082Seschrock /* 20442082Seschrock * Determine if this is a reference to a hot spare. In that 20452082Seschrock * case, update the path as stored in the spare list. 20462082Seschrock */ 20472082Seschrock nvlist_t **spares; 20482082Seschrock uint_t i, nspares; 20492082Seschrock if (spa->spa_sparelist != NULL) { 20502082Seschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 20512082Seschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 20522082Seschrock for (i = 0; i < nspares; i++) { 20532082Seschrock uint64_t theguid; 20542082Seschrock VERIFY(nvlist_lookup_uint64(spares[i], 20552082Seschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 20562082Seschrock if (theguid == guid) 20572082Seschrock break; 20582082Seschrock } 20592082Seschrock 20602082Seschrock if (i == nspares) 20612082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 20622082Seschrock 20632082Seschrock VERIFY(nvlist_add_string(spares[i], 20642082Seschrock ZPOOL_CONFIG_PATH, newpath) == 0); 20652082Seschrock spa_load_spares(spa); 20662082Seschrock spa->spa_sync_spares = B_TRUE; 20672082Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 20682082Seschrock } else { 20692082Seschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 20702082Seschrock } 20712082Seschrock } 20721354Seschrock 20731585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 20741585Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 20751585Sbonwick 20761354Seschrock spa_strfree(vd->vdev_path); 20771354Seschrock vd->vdev_path = spa_strdup(newpath); 20781354Seschrock 20791354Seschrock vdev_config_dirty(vd->vdev_top); 20801354Seschrock 20811354Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 20821354Seschrock } 20831354Seschrock 20841354Seschrock /* 2085789Sahrens * ========================================================================== 2086789Sahrens * SPA Scrubbing 2087789Sahrens * ========================================================================== 2088789Sahrens */ 2089789Sahrens 20901544Seschrock void 20911544Seschrock spa_scrub_throttle(spa_t *spa, int direction) 20921544Seschrock { 20931544Seschrock mutex_enter(&spa->spa_scrub_lock); 20941544Seschrock spa->spa_scrub_throttled += direction; 20951544Seschrock ASSERT(spa->spa_scrub_throttled >= 0); 20961544Seschrock if (spa->spa_scrub_throttled == 0) 20971544Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 20981544Seschrock mutex_exit(&spa->spa_scrub_lock); 20991544Seschrock } 2100789Sahrens 2101789Sahrens static void 2102789Sahrens spa_scrub_io_done(zio_t *zio) 2103789Sahrens { 2104789Sahrens spa_t *spa = zio->io_spa; 2105789Sahrens 2106789Sahrens zio_buf_free(zio->io_data, zio->io_size); 2107789Sahrens 2108789Sahrens mutex_enter(&spa->spa_scrub_lock); 21091544Seschrock if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 21101775Sbillm vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 2111789Sahrens spa->spa_scrub_errors++; 2112789Sahrens mutex_enter(&vd->vdev_stat_lock); 2113789Sahrens vd->vdev_stat.vs_scrub_errors++; 2114789Sahrens mutex_exit(&vd->vdev_stat_lock); 2115789Sahrens } 21161544Seschrock if (--spa->spa_scrub_inflight == 0) { 21171544Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 21181544Seschrock ASSERT(spa->spa_scrub_throttled == 0); 21191544Seschrock } 21201544Seschrock mutex_exit(&spa->spa_scrub_lock); 2121789Sahrens } 2122789Sahrens 2123789Sahrens static void 21241544Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 21251544Seschrock zbookmark_t *zb) 2126789Sahrens { 2127789Sahrens size_t size = BP_GET_LSIZE(bp); 2128789Sahrens void *data = zio_buf_alloc(size); 2129789Sahrens 2130789Sahrens mutex_enter(&spa->spa_scrub_lock); 2131789Sahrens spa->spa_scrub_inflight++; 2132789Sahrens mutex_exit(&spa->spa_scrub_lock); 2133789Sahrens 21341544Seschrock if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 21351544Seschrock flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 21361544Seschrock 21371807Sbonwick flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 21381544Seschrock 2139789Sahrens zio_nowait(zio_read(NULL, spa, bp, data, size, 21401544Seschrock spa_scrub_io_done, NULL, priority, flags, zb)); 2141789Sahrens } 2142789Sahrens 2143789Sahrens /* ARGSUSED */ 2144789Sahrens static int 2145789Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 2146789Sahrens { 2147789Sahrens blkptr_t *bp = &bc->bc_blkptr; 21481775Sbillm vdev_t *vd = spa->spa_root_vdev; 21491775Sbillm dva_t *dva = bp->blk_dva; 21501775Sbillm int needs_resilver = B_FALSE; 21511775Sbillm int d; 2152789Sahrens 21531775Sbillm if (bc->bc_errno) { 2154789Sahrens /* 2155789Sahrens * We can't scrub this block, but we can continue to scrub 2156789Sahrens * the rest of the pool. Note the error and move along. 2157789Sahrens */ 2158789Sahrens mutex_enter(&spa->spa_scrub_lock); 2159789Sahrens spa->spa_scrub_errors++; 2160789Sahrens mutex_exit(&spa->spa_scrub_lock); 2161789Sahrens 21621775Sbillm mutex_enter(&vd->vdev_stat_lock); 21631775Sbillm vd->vdev_stat.vs_scrub_errors++; 21641775Sbillm mutex_exit(&vd->vdev_stat_lock); 2165789Sahrens 2166789Sahrens return (ERESTART); 2167789Sahrens } 2168789Sahrens 2169789Sahrens ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 2170789Sahrens 21711775Sbillm for (d = 0; d < BP_GET_NDVAS(bp); d++) { 21721775Sbillm vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 21731775Sbillm 21741775Sbillm ASSERT(vd != NULL); 21751775Sbillm 21761775Sbillm /* 21771775Sbillm * Keep track of how much data we've examined so that 21781775Sbillm * zpool(1M) status can make useful progress reports. 21791775Sbillm */ 21801775Sbillm mutex_enter(&vd->vdev_stat_lock); 21811775Sbillm vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 21821775Sbillm mutex_exit(&vd->vdev_stat_lock); 2183789Sahrens 21841775Sbillm if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 21851775Sbillm if (DVA_GET_GANG(&dva[d])) { 21861775Sbillm /* 21871775Sbillm * Gang members may be spread across multiple 21881775Sbillm * vdevs, so the best we can do is look at the 21891775Sbillm * pool-wide DTL. 21901775Sbillm * XXX -- it would be better to change our 21911775Sbillm * allocation policy to ensure that this can't 21921775Sbillm * happen. 21931775Sbillm */ 21941775Sbillm vd = spa->spa_root_vdev; 21951775Sbillm } 21961775Sbillm if (vdev_dtl_contains(&vd->vdev_dtl_map, 21971775Sbillm bp->blk_birth, 1)) 21981775Sbillm needs_resilver = B_TRUE; 2199789Sahrens } 22001775Sbillm } 22011775Sbillm 22021775Sbillm if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 2203789Sahrens spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 22041544Seschrock ZIO_FLAG_SCRUB, &bc->bc_bookmark); 22051775Sbillm else if (needs_resilver) 22061775Sbillm spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 22071775Sbillm ZIO_FLAG_RESILVER, &bc->bc_bookmark); 2208789Sahrens 2209789Sahrens return (0); 2210789Sahrens } 2211789Sahrens 2212789Sahrens static void 2213789Sahrens spa_scrub_thread(spa_t *spa) 2214789Sahrens { 2215789Sahrens callb_cpr_t cprinfo; 2216789Sahrens traverse_handle_t *th = spa->spa_scrub_th; 2217789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2218789Sahrens pool_scrub_type_t scrub_type = spa->spa_scrub_type; 2219789Sahrens int error = 0; 2220789Sahrens boolean_t complete; 2221789Sahrens 2222789Sahrens CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 2223789Sahrens 2224797Sbonwick /* 2225797Sbonwick * If we're restarting due to a snapshot create/delete, 2226797Sbonwick * wait for that to complete. 2227797Sbonwick */ 2228797Sbonwick txg_wait_synced(spa_get_dsl(spa), 0); 2229797Sbonwick 22301544Seschrock dprintf("start %s mintxg=%llu maxtxg=%llu\n", 22311544Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 22321544Seschrock spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 22331544Seschrock 22341544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 22351544Seschrock vdev_reopen(rvd); /* purge all vdev caches */ 2236789Sahrens vdev_config_dirty(rvd); /* rewrite all disk labels */ 2237789Sahrens vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 22381544Seschrock spa_config_exit(spa, FTAG); 2239789Sahrens 2240789Sahrens mutex_enter(&spa->spa_scrub_lock); 2241789Sahrens spa->spa_scrub_errors = 0; 2242789Sahrens spa->spa_scrub_active = 1; 22431544Seschrock ASSERT(spa->spa_scrub_inflight == 0); 22441544Seschrock ASSERT(spa->spa_scrub_throttled == 0); 2245789Sahrens 2246789Sahrens while (!spa->spa_scrub_stop) { 2247789Sahrens CALLB_CPR_SAFE_BEGIN(&cprinfo); 22481544Seschrock while (spa->spa_scrub_suspended) { 2249789Sahrens spa->spa_scrub_active = 0; 2250789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2251789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2252789Sahrens spa->spa_scrub_active = 1; 2253789Sahrens } 2254789Sahrens CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 2255789Sahrens 2256789Sahrens if (spa->spa_scrub_restart_txg != 0) 2257789Sahrens break; 2258789Sahrens 2259789Sahrens mutex_exit(&spa->spa_scrub_lock); 2260789Sahrens error = traverse_more(th); 2261789Sahrens mutex_enter(&spa->spa_scrub_lock); 2262789Sahrens if (error != EAGAIN) 2263789Sahrens break; 22641544Seschrock 22651544Seschrock while (spa->spa_scrub_throttled > 0) 22661544Seschrock cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2267789Sahrens } 2268789Sahrens 2269789Sahrens while (spa->spa_scrub_inflight) 2270789Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2271789Sahrens 22721601Sbonwick spa->spa_scrub_active = 0; 22731601Sbonwick cv_broadcast(&spa->spa_scrub_cv); 22741601Sbonwick 22751601Sbonwick mutex_exit(&spa->spa_scrub_lock); 22761601Sbonwick 22771601Sbonwick spa_config_enter(spa, RW_WRITER, FTAG); 22781601Sbonwick 22791601Sbonwick mutex_enter(&spa->spa_scrub_lock); 22801601Sbonwick 22811601Sbonwick /* 22821601Sbonwick * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 22831601Sbonwick * AND the spa config lock to synchronize with any config changes 22841601Sbonwick * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 22851601Sbonwick */ 2286789Sahrens if (spa->spa_scrub_restart_txg != 0) 2287789Sahrens error = ERESTART; 2288789Sahrens 22891544Seschrock if (spa->spa_scrub_stop) 22901544Seschrock error = EINTR; 22911544Seschrock 2292789Sahrens /* 22931544Seschrock * Even if there were uncorrectable errors, we consider the scrub 22941544Seschrock * completed. The downside is that if there is a transient error during 22951544Seschrock * a resilver, we won't resilver the data properly to the target. But 22961544Seschrock * if the damage is permanent (more likely) we will resilver forever, 22971544Seschrock * which isn't really acceptable. Since there is enough information for 22981544Seschrock * the user to know what has failed and why, this seems like a more 22991544Seschrock * tractable approach. 2300789Sahrens */ 23011544Seschrock complete = (error == 0); 2302789Sahrens 23031544Seschrock dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 23041544Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2305789Sahrens spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 2306789Sahrens error, spa->spa_scrub_errors, spa->spa_scrub_stop); 2307789Sahrens 2308789Sahrens mutex_exit(&spa->spa_scrub_lock); 2309789Sahrens 2310789Sahrens /* 2311789Sahrens * If the scrub/resilver completed, update all DTLs to reflect this. 2312789Sahrens * Whether it succeeded or not, vacate all temporary scrub DTLs. 2313789Sahrens */ 2314789Sahrens vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 2315789Sahrens complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 2316789Sahrens vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 23171544Seschrock spa_errlog_rotate(spa); 23181601Sbonwick 23191544Seschrock spa_config_exit(spa, FTAG); 2320789Sahrens 2321789Sahrens mutex_enter(&spa->spa_scrub_lock); 2322789Sahrens 23231544Seschrock /* 23241544Seschrock * We may have finished replacing a device. 23251544Seschrock * Let the async thread assess this and handle the detach. 23261544Seschrock */ 23271544Seschrock spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 2328789Sahrens 2329789Sahrens /* 2330789Sahrens * If we were told to restart, our final act is to start a new scrub. 2331789Sahrens */ 2332789Sahrens if (error == ERESTART) 23331544Seschrock spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 23341544Seschrock SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 2335789Sahrens 23361544Seschrock spa->spa_scrub_type = POOL_SCRUB_NONE; 23371544Seschrock spa->spa_scrub_active = 0; 23381544Seschrock spa->spa_scrub_thread = NULL; 23391544Seschrock cv_broadcast(&spa->spa_scrub_cv); 2340789Sahrens CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 2341789Sahrens thread_exit(); 2342789Sahrens } 2343789Sahrens 2344789Sahrens void 2345789Sahrens spa_scrub_suspend(spa_t *spa) 2346789Sahrens { 2347789Sahrens mutex_enter(&spa->spa_scrub_lock); 23481544Seschrock spa->spa_scrub_suspended++; 2349789Sahrens while (spa->spa_scrub_active) { 2350789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2351789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2352789Sahrens } 2353789Sahrens while (spa->spa_scrub_inflight) 2354789Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2355789Sahrens mutex_exit(&spa->spa_scrub_lock); 2356789Sahrens } 2357789Sahrens 2358789Sahrens void 2359789Sahrens spa_scrub_resume(spa_t *spa) 2360789Sahrens { 2361789Sahrens mutex_enter(&spa->spa_scrub_lock); 23621544Seschrock ASSERT(spa->spa_scrub_suspended != 0); 23631544Seschrock if (--spa->spa_scrub_suspended == 0) 2364789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2365789Sahrens mutex_exit(&spa->spa_scrub_lock); 2366789Sahrens } 2367789Sahrens 2368789Sahrens void 2369789Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg) 2370789Sahrens { 2371789Sahrens /* 2372789Sahrens * Something happened (e.g. snapshot create/delete) that means 2373789Sahrens * we must restart any in-progress scrubs. The itinerary will 2374789Sahrens * fix this properly. 2375789Sahrens */ 2376789Sahrens mutex_enter(&spa->spa_scrub_lock); 2377789Sahrens spa->spa_scrub_restart_txg = txg; 2378789Sahrens mutex_exit(&spa->spa_scrub_lock); 2379789Sahrens } 2380789Sahrens 23811544Seschrock int 23821544Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 2383789Sahrens { 2384789Sahrens space_seg_t *ss; 2385789Sahrens uint64_t mintxg, maxtxg; 2386789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2387789Sahrens 2388789Sahrens if ((uint_t)type >= POOL_SCRUB_TYPES) 2389789Sahrens return (ENOTSUP); 2390789Sahrens 23911544Seschrock mutex_enter(&spa->spa_scrub_lock); 23921544Seschrock 2393789Sahrens /* 2394789Sahrens * If there's a scrub or resilver already in progress, stop it. 2395789Sahrens */ 2396789Sahrens while (spa->spa_scrub_thread != NULL) { 2397789Sahrens /* 2398789Sahrens * Don't stop a resilver unless forced. 2399789Sahrens */ 24001544Seschrock if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 24011544Seschrock mutex_exit(&spa->spa_scrub_lock); 2402789Sahrens return (EBUSY); 24031544Seschrock } 2404789Sahrens spa->spa_scrub_stop = 1; 2405789Sahrens cv_broadcast(&spa->spa_scrub_cv); 2406789Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2407789Sahrens } 2408789Sahrens 2409789Sahrens /* 2410789Sahrens * Terminate the previous traverse. 2411789Sahrens */ 2412789Sahrens if (spa->spa_scrub_th != NULL) { 2413789Sahrens traverse_fini(spa->spa_scrub_th); 2414789Sahrens spa->spa_scrub_th = NULL; 2415789Sahrens } 2416789Sahrens 24171544Seschrock if (rvd == NULL) { 24181544Seschrock ASSERT(spa->spa_scrub_stop == 0); 24191544Seschrock ASSERT(spa->spa_scrub_type == type); 24201544Seschrock ASSERT(spa->spa_scrub_restart_txg == 0); 24211544Seschrock mutex_exit(&spa->spa_scrub_lock); 24221544Seschrock return (0); 24231544Seschrock } 2424789Sahrens 2425789Sahrens mintxg = TXG_INITIAL - 1; 2426789Sahrens maxtxg = spa_last_synced_txg(spa) + 1; 2427789Sahrens 24281544Seschrock mutex_enter(&rvd->vdev_dtl_lock); 2429789Sahrens 24301544Seschrock if (rvd->vdev_dtl_map.sm_space == 0) { 24311544Seschrock /* 24321544Seschrock * The pool-wide DTL is empty. 24331732Sbonwick * If this is a resilver, there's nothing to do except 24341732Sbonwick * check whether any in-progress replacements have completed. 24351544Seschrock */ 24361732Sbonwick if (type == POOL_SCRUB_RESILVER) { 24371544Seschrock type = POOL_SCRUB_NONE; 24381732Sbonwick spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 24391732Sbonwick } 24401544Seschrock } else { 24411544Seschrock /* 24421544Seschrock * The pool-wide DTL is non-empty. 24431544Seschrock * If this is a normal scrub, upgrade to a resilver instead. 24441544Seschrock */ 24451544Seschrock if (type == POOL_SCRUB_EVERYTHING) 24461544Seschrock type = POOL_SCRUB_RESILVER; 24471544Seschrock } 2448789Sahrens 24491544Seschrock if (type == POOL_SCRUB_RESILVER) { 2450789Sahrens /* 2451789Sahrens * Determine the resilvering boundaries. 2452789Sahrens * 2453789Sahrens * Note: (mintxg, maxtxg) is an open interval, 2454789Sahrens * i.e. mintxg and maxtxg themselves are not included. 2455789Sahrens * 2456789Sahrens * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 2457789Sahrens * so we don't claim to resilver a txg that's still changing. 2458789Sahrens */ 2459789Sahrens ss = avl_first(&rvd->vdev_dtl_map.sm_root); 24601544Seschrock mintxg = ss->ss_start - 1; 2461789Sahrens ss = avl_last(&rvd->vdev_dtl_map.sm_root); 24621544Seschrock maxtxg = MIN(ss->ss_end, maxtxg); 2463789Sahrens } 2464789Sahrens 24651544Seschrock mutex_exit(&rvd->vdev_dtl_lock); 24661544Seschrock 24671544Seschrock spa->spa_scrub_stop = 0; 24681544Seschrock spa->spa_scrub_type = type; 24691544Seschrock spa->spa_scrub_restart_txg = 0; 24701544Seschrock 24711544Seschrock if (type != POOL_SCRUB_NONE) { 24721544Seschrock spa->spa_scrub_mintxg = mintxg; 2473789Sahrens spa->spa_scrub_maxtxg = maxtxg; 2474789Sahrens spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 24751635Sbonwick ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 24761635Sbonwick ZIO_FLAG_CANFAIL); 2477789Sahrens traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 2478789Sahrens spa->spa_scrub_thread = thread_create(NULL, 0, 2479789Sahrens spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 2480789Sahrens } 2481789Sahrens 24821544Seschrock mutex_exit(&spa->spa_scrub_lock); 24831544Seschrock 2484789Sahrens return (0); 2485789Sahrens } 2486789Sahrens 24871544Seschrock /* 24881544Seschrock * ========================================================================== 24891544Seschrock * SPA async task processing 24901544Seschrock * ========================================================================== 24911544Seschrock */ 24921544Seschrock 24931544Seschrock static void 24941544Seschrock spa_async_reopen(spa_t *spa) 2495789Sahrens { 24961544Seschrock vdev_t *rvd = spa->spa_root_vdev; 24971544Seschrock vdev_t *tvd; 24981544Seschrock int c; 24991544Seschrock 25001544Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 25011544Seschrock 25021544Seschrock for (c = 0; c < rvd->vdev_children; c++) { 25031544Seschrock tvd = rvd->vdev_child[c]; 25041544Seschrock if (tvd->vdev_reopen_wanted) { 25051544Seschrock tvd->vdev_reopen_wanted = 0; 25061544Seschrock vdev_reopen(tvd); 25071544Seschrock } 25081544Seschrock } 2509789Sahrens 25101544Seschrock spa_config_exit(spa, FTAG); 25111544Seschrock } 25121544Seschrock 25131544Seschrock static void 25141544Seschrock spa_async_thread(spa_t *spa) 25151544Seschrock { 25161544Seschrock int tasks; 25171544Seschrock 25181544Seschrock ASSERT(spa->spa_sync_on); 2519789Sahrens 25201544Seschrock mutex_enter(&spa->spa_async_lock); 25211544Seschrock tasks = spa->spa_async_tasks; 25221544Seschrock spa->spa_async_tasks = 0; 25231544Seschrock mutex_exit(&spa->spa_async_lock); 25241544Seschrock 25251544Seschrock /* 25261635Sbonwick * See if the config needs to be updated. 25271635Sbonwick */ 25281635Sbonwick if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 25291635Sbonwick mutex_enter(&spa_namespace_lock); 25301635Sbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 25311635Sbonwick mutex_exit(&spa_namespace_lock); 25321635Sbonwick } 25331635Sbonwick 25341635Sbonwick /* 25351544Seschrock * See if any devices need to be reopened. 25361544Seschrock */ 25371544Seschrock if (tasks & SPA_ASYNC_REOPEN) 25381544Seschrock spa_async_reopen(spa); 25391544Seschrock 25401544Seschrock /* 25411544Seschrock * If any devices are done replacing, detach them. 25421544Seschrock */ 25431544Seschrock if (tasks & SPA_ASYNC_REPLACE_DONE) 2544789Sahrens spa_vdev_replace_done(spa); 2545789Sahrens 25461544Seschrock /* 25471544Seschrock * Kick off a scrub. 25481544Seschrock */ 25491544Seschrock if (tasks & SPA_ASYNC_SCRUB) 25501544Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 25511544Seschrock 25521544Seschrock /* 25531544Seschrock * Kick off a resilver. 25541544Seschrock */ 25551544Seschrock if (tasks & SPA_ASYNC_RESILVER) 25561544Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 25571544Seschrock 25581544Seschrock /* 25591544Seschrock * Let the world know that we're done. 25601544Seschrock */ 25611544Seschrock mutex_enter(&spa->spa_async_lock); 25621544Seschrock spa->spa_async_thread = NULL; 25631544Seschrock cv_broadcast(&spa->spa_async_cv); 25641544Seschrock mutex_exit(&spa->spa_async_lock); 25651544Seschrock thread_exit(); 25661544Seschrock } 25671544Seschrock 25681544Seschrock void 25691544Seschrock spa_async_suspend(spa_t *spa) 25701544Seschrock { 25711544Seschrock mutex_enter(&spa->spa_async_lock); 25721544Seschrock spa->spa_async_suspended++; 25731544Seschrock while (spa->spa_async_thread != NULL) 25741544Seschrock cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 25751544Seschrock mutex_exit(&spa->spa_async_lock); 25761544Seschrock } 25771544Seschrock 25781544Seschrock void 25791544Seschrock spa_async_resume(spa_t *spa) 25801544Seschrock { 25811544Seschrock mutex_enter(&spa->spa_async_lock); 25821544Seschrock ASSERT(spa->spa_async_suspended != 0); 25831544Seschrock spa->spa_async_suspended--; 25841544Seschrock mutex_exit(&spa->spa_async_lock); 25851544Seschrock } 25861544Seschrock 25871544Seschrock static void 25881544Seschrock spa_async_dispatch(spa_t *spa) 25891544Seschrock { 25901544Seschrock mutex_enter(&spa->spa_async_lock); 25911544Seschrock if (spa->spa_async_tasks && !spa->spa_async_suspended && 25921635Sbonwick spa->spa_async_thread == NULL && 25931635Sbonwick rootdir != NULL && !vn_is_readonly(rootdir)) 25941544Seschrock spa->spa_async_thread = thread_create(NULL, 0, 25951544Seschrock spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 25961544Seschrock mutex_exit(&spa->spa_async_lock); 25971544Seschrock } 25981544Seschrock 25991544Seschrock void 26001544Seschrock spa_async_request(spa_t *spa, int task) 26011544Seschrock { 26021544Seschrock mutex_enter(&spa->spa_async_lock); 26031544Seschrock spa->spa_async_tasks |= task; 26041544Seschrock mutex_exit(&spa->spa_async_lock); 2605789Sahrens } 2606789Sahrens 2607789Sahrens /* 2608789Sahrens * ========================================================================== 2609789Sahrens * SPA syncing routines 2610789Sahrens * ========================================================================== 2611789Sahrens */ 2612789Sahrens 2613789Sahrens static void 2614789Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 2615789Sahrens { 2616789Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 2617789Sahrens dmu_tx_t *tx; 2618789Sahrens blkptr_t blk; 2619789Sahrens uint64_t itor = 0; 2620789Sahrens zio_t *zio; 2621789Sahrens int error; 2622789Sahrens uint8_t c = 1; 2623789Sahrens 2624789Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 2625789Sahrens 2626789Sahrens while (bplist_iterate(bpl, &itor, &blk) == 0) 2627789Sahrens zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 2628789Sahrens 2629789Sahrens error = zio_wait(zio); 2630789Sahrens ASSERT3U(error, ==, 0); 2631789Sahrens 2632789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2633789Sahrens bplist_vacate(bpl, tx); 2634789Sahrens 2635789Sahrens /* 2636789Sahrens * Pre-dirty the first block so we sync to convergence faster. 2637789Sahrens * (Usually only the first block is needed.) 2638789Sahrens */ 2639789Sahrens dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 2640789Sahrens dmu_tx_commit(tx); 2641789Sahrens } 2642789Sahrens 2643789Sahrens static void 26442082Seschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 26452082Seschrock { 26462082Seschrock char *packed = NULL; 26472082Seschrock size_t nvsize = 0; 26482082Seschrock dmu_buf_t *db; 26492082Seschrock 26502082Seschrock VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 26512082Seschrock 26522082Seschrock packed = kmem_alloc(nvsize, KM_SLEEP); 26532082Seschrock 26542082Seschrock VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 26552082Seschrock KM_SLEEP) == 0); 26562082Seschrock 26572082Seschrock dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 26582082Seschrock 26592082Seschrock kmem_free(packed, nvsize); 26602082Seschrock 26612082Seschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 26622082Seschrock dmu_buf_will_dirty(db, tx); 26632082Seschrock *(uint64_t *)db->db_data = nvsize; 26642082Seschrock dmu_buf_rele(db, FTAG); 26652082Seschrock } 26662082Seschrock 26672082Seschrock static void 26682082Seschrock spa_sync_spares(spa_t *spa, dmu_tx_t *tx) 26692082Seschrock { 26702082Seschrock nvlist_t *nvroot; 26712082Seschrock nvlist_t **spares; 26722082Seschrock int i; 26732082Seschrock 26742082Seschrock if (!spa->spa_sync_spares) 26752082Seschrock return; 26762082Seschrock 26772082Seschrock /* 26782082Seschrock * Update the MOS nvlist describing the list of available spares. 26792082Seschrock * spa_validate_spares() will have already made sure this nvlist is 26802082Seschrock * valid and the vdevs are labelled appropriately. 26812082Seschrock */ 26822082Seschrock if (spa->spa_spares_object == 0) { 26832082Seschrock spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset, 26842082Seschrock DMU_OT_PACKED_NVLIST, 1 << 14, 26852082Seschrock DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 26862082Seschrock VERIFY(zap_update(spa->spa_meta_objset, 26872082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES, 26882082Seschrock sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0); 26892082Seschrock } 26902082Seschrock 26912082Seschrock VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 26922082Seschrock if (spa->spa_nspares == 0) { 26932082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 26942082Seschrock NULL, 0) == 0); 26952082Seschrock } else { 26962082Seschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), 26972082Seschrock KM_SLEEP); 26982082Seschrock for (i = 0; i < spa->spa_nspares; i++) 26992082Seschrock spares[i] = vdev_config_generate(spa, 27002082Seschrock spa->spa_spares[i], B_FALSE, B_TRUE); 27012082Seschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 27022082Seschrock spares, spa->spa_nspares) == 0); 27032082Seschrock for (i = 0; i < spa->spa_nspares; i++) 27042082Seschrock nvlist_free(spares[i]); 27052082Seschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 27062082Seschrock } 27072082Seschrock 27082082Seschrock spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx); 27092082Seschrock 27102082Seschrock spa->spa_sync_spares = B_FALSE; 27112082Seschrock } 27122082Seschrock 27132082Seschrock static void 2714789Sahrens spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 2715789Sahrens { 2716789Sahrens nvlist_t *config; 2717789Sahrens 2718789Sahrens if (list_is_empty(&spa->spa_dirty_list)) 2719789Sahrens return; 2720789Sahrens 2721789Sahrens config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 2722789Sahrens 27231635Sbonwick if (spa->spa_config_syncing) 27241635Sbonwick nvlist_free(spa->spa_config_syncing); 27251635Sbonwick spa->spa_config_syncing = config; 2726789Sahrens 27272082Seschrock spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 2728789Sahrens } 2729789Sahrens 2730789Sahrens /* 2731789Sahrens * Sync the specified transaction group. New blocks may be dirtied as 2732789Sahrens * part of the process, so we iterate until it converges. 2733789Sahrens */ 2734789Sahrens void 2735789Sahrens spa_sync(spa_t *spa, uint64_t txg) 2736789Sahrens { 2737789Sahrens dsl_pool_t *dp = spa->spa_dsl_pool; 2738789Sahrens objset_t *mos = spa->spa_meta_objset; 2739789Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 27401635Sbonwick vdev_t *rvd = spa->spa_root_vdev; 2741789Sahrens vdev_t *vd; 2742789Sahrens dmu_tx_t *tx; 2743789Sahrens int dirty_vdevs; 2744789Sahrens 2745789Sahrens /* 2746789Sahrens * Lock out configuration changes. 2747789Sahrens */ 27481544Seschrock spa_config_enter(spa, RW_READER, FTAG); 2749789Sahrens 2750789Sahrens spa->spa_syncing_txg = txg; 2751789Sahrens spa->spa_sync_pass = 0; 2752789Sahrens 27531544Seschrock VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 2754789Sahrens 27552082Seschrock tx = dmu_tx_create_assigned(dp, txg); 27562082Seschrock 27572082Seschrock /* 27582082Seschrock * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg, 27592082Seschrock * set spa_deflate if we have no raid-z vdevs. 27602082Seschrock */ 27612082Seschrock if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE && 27622082Seschrock spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) { 27632082Seschrock int i; 27642082Seschrock 27652082Seschrock for (i = 0; i < rvd->vdev_children; i++) { 27662082Seschrock vd = rvd->vdev_child[i]; 27672082Seschrock if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 27682082Seschrock break; 27692082Seschrock } 27702082Seschrock if (i == rvd->vdev_children) { 27712082Seschrock spa->spa_deflate = TRUE; 27722082Seschrock VERIFY(0 == zap_add(spa->spa_meta_objset, 27732082Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 27742082Seschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 27752082Seschrock } 27762082Seschrock } 27772082Seschrock 2778789Sahrens /* 2779789Sahrens * If anything has changed in this txg, push the deferred frees 2780789Sahrens * from the previous txg. If not, leave them alone so that we 2781789Sahrens * don't generate work on an otherwise idle system. 2782789Sahrens */ 2783789Sahrens if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 27842329Sek110237 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 27852329Sek110237 !txg_list_empty(&dp->dp_sync_tasks, txg)) 2786789Sahrens spa_sync_deferred_frees(spa, txg); 2787789Sahrens 2788789Sahrens /* 2789789Sahrens * Iterate to convergence. 2790789Sahrens */ 2791789Sahrens do { 2792789Sahrens spa->spa_sync_pass++; 2793789Sahrens 2794789Sahrens spa_sync_config_object(spa, tx); 27952082Seschrock spa_sync_spares(spa, tx); 27961544Seschrock spa_errlog_sync(spa, txg); 2797789Sahrens dsl_pool_sync(dp, txg); 2798789Sahrens 2799789Sahrens dirty_vdevs = 0; 2800789Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 2801789Sahrens vdev_sync(vd, txg); 2802789Sahrens dirty_vdevs++; 2803789Sahrens } 2804789Sahrens 2805789Sahrens bplist_sync(bpl, tx); 2806789Sahrens } while (dirty_vdevs); 2807789Sahrens 2808789Sahrens bplist_close(bpl); 2809789Sahrens 2810789Sahrens dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 2811789Sahrens 2812789Sahrens /* 2813789Sahrens * Rewrite the vdev configuration (which includes the uberblock) 2814789Sahrens * to commit the transaction group. 28151635Sbonwick * 28161635Sbonwick * If there are any dirty vdevs, sync the uberblock to all vdevs. 28171635Sbonwick * Otherwise, pick a random top-level vdev that's known to be 28181635Sbonwick * visible in the config cache (see spa_vdev_add() for details). 28191635Sbonwick * If the write fails, try the next vdev until we're tried them all. 2820789Sahrens */ 28211635Sbonwick if (!list_is_empty(&spa->spa_dirty_list)) { 28221635Sbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 28231635Sbonwick } else { 28241635Sbonwick int children = rvd->vdev_children; 28251635Sbonwick int c0 = spa_get_random(children); 28261635Sbonwick int c; 28271635Sbonwick 28281635Sbonwick for (c = 0; c < children; c++) { 28291635Sbonwick vd = rvd->vdev_child[(c0 + c) % children]; 28301635Sbonwick if (vd->vdev_ms_array == 0) 28311635Sbonwick continue; 28321635Sbonwick if (vdev_config_sync(vd, txg) == 0) 28331635Sbonwick break; 28341635Sbonwick } 28351635Sbonwick if (c == children) 28361635Sbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 28371635Sbonwick } 28381635Sbonwick 28392082Seschrock dmu_tx_commit(tx); 28402082Seschrock 28411635Sbonwick /* 28421635Sbonwick * Clear the dirty config list. 28431635Sbonwick */ 28441635Sbonwick while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 28451635Sbonwick vdev_config_clean(vd); 28461635Sbonwick 28471635Sbonwick /* 28481635Sbonwick * Now that the new config has synced transactionally, 28491635Sbonwick * let it become visible to the config cache. 28501635Sbonwick */ 28511635Sbonwick if (spa->spa_config_syncing != NULL) { 28521635Sbonwick spa_config_set(spa, spa->spa_config_syncing); 28531635Sbonwick spa->spa_config_txg = txg; 28541635Sbonwick spa->spa_config_syncing = NULL; 28551635Sbonwick } 2856789Sahrens 2857789Sahrens /* 2858789Sahrens * Make a stable copy of the fully synced uberblock. 2859789Sahrens * We use this as the root for pool traversals. 2860789Sahrens */ 2861789Sahrens spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 2862789Sahrens 2863789Sahrens spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 2864789Sahrens 2865789Sahrens rw_enter(&spa->spa_traverse_lock, RW_WRITER); 2866789Sahrens spa->spa_traverse_wanted = 0; 2867789Sahrens spa->spa_ubsync = spa->spa_uberblock; 2868789Sahrens rw_exit(&spa->spa_traverse_lock); 2869789Sahrens 2870789Sahrens spa_scrub_resume(spa); /* resume scrub with new ubsync */ 2871789Sahrens 2872789Sahrens /* 2873789Sahrens * Clean up the ZIL records for the synced txg. 2874789Sahrens */ 2875789Sahrens dsl_pool_zil_clean(dp); 2876789Sahrens 2877789Sahrens /* 2878789Sahrens * Update usable space statistics. 2879789Sahrens */ 2880789Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 2881789Sahrens vdev_sync_done(vd, txg); 2882789Sahrens 2883789Sahrens /* 2884789Sahrens * It had better be the case that we didn't dirty anything 28852082Seschrock * since vdev_config_sync(). 2886789Sahrens */ 2887789Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 2888789Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 2889789Sahrens ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 2890789Sahrens ASSERT(bpl->bpl_queue == NULL); 2891789Sahrens 28921544Seschrock spa_config_exit(spa, FTAG); 28931544Seschrock 28941544Seschrock /* 28951544Seschrock * If any async tasks have been requested, kick them off. 28961544Seschrock */ 28971544Seschrock spa_async_dispatch(spa); 2898789Sahrens } 2899789Sahrens 2900789Sahrens /* 2901789Sahrens * Sync all pools. We don't want to hold the namespace lock across these 2902789Sahrens * operations, so we take a reference on the spa_t and drop the lock during the 2903789Sahrens * sync. 2904789Sahrens */ 2905789Sahrens void 2906789Sahrens spa_sync_allpools(void) 2907789Sahrens { 2908789Sahrens spa_t *spa = NULL; 2909789Sahrens mutex_enter(&spa_namespace_lock); 2910789Sahrens while ((spa = spa_next(spa)) != NULL) { 2911789Sahrens if (spa_state(spa) != POOL_STATE_ACTIVE) 2912789Sahrens continue; 2913789Sahrens spa_open_ref(spa, FTAG); 2914789Sahrens mutex_exit(&spa_namespace_lock); 2915789Sahrens txg_wait_synced(spa_get_dsl(spa), 0); 2916789Sahrens mutex_enter(&spa_namespace_lock); 2917789Sahrens spa_close(spa, FTAG); 2918789Sahrens } 2919789Sahrens mutex_exit(&spa_namespace_lock); 2920789Sahrens } 2921789Sahrens 2922789Sahrens /* 2923789Sahrens * ========================================================================== 2924789Sahrens * Miscellaneous routines 2925789Sahrens * ========================================================================== 2926789Sahrens */ 2927789Sahrens 2928789Sahrens /* 2929789Sahrens * Remove all pools in the system. 2930789Sahrens */ 2931789Sahrens void 2932789Sahrens spa_evict_all(void) 2933789Sahrens { 2934789Sahrens spa_t *spa; 2935789Sahrens 2936789Sahrens /* 2937789Sahrens * Remove all cached state. All pools should be closed now, 2938789Sahrens * so every spa in the AVL tree should be unreferenced. 2939789Sahrens */ 2940789Sahrens mutex_enter(&spa_namespace_lock); 2941789Sahrens while ((spa = spa_next(NULL)) != NULL) { 2942789Sahrens /* 29431544Seschrock * Stop async tasks. The async thread may need to detach 29441544Seschrock * a device that's been replaced, which requires grabbing 29451544Seschrock * spa_namespace_lock, so we must drop it here. 2946789Sahrens */ 2947789Sahrens spa_open_ref(spa, FTAG); 2948789Sahrens mutex_exit(&spa_namespace_lock); 29491544Seschrock spa_async_suspend(spa); 2950789Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 2951789Sahrens mutex_enter(&spa_namespace_lock); 2952789Sahrens spa_close(spa, FTAG); 2953789Sahrens 2954789Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2955789Sahrens spa_unload(spa); 2956789Sahrens spa_deactivate(spa); 2957789Sahrens } 2958789Sahrens spa_remove(spa); 2959789Sahrens } 2960789Sahrens mutex_exit(&spa_namespace_lock); 2961789Sahrens } 29621544Seschrock 29631544Seschrock vdev_t * 29641544Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid) 29651544Seschrock { 29661544Seschrock return (vdev_lookup_by_guid(spa->spa_root_vdev, guid)); 29671544Seschrock } 29681760Seschrock 29691760Seschrock void 29701760Seschrock spa_upgrade(spa_t *spa) 29711760Seschrock { 29721760Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 29731760Seschrock 29741760Seschrock /* 29751760Seschrock * This should only be called for a non-faulted pool, and since a 29761760Seschrock * future version would result in an unopenable pool, this shouldn't be 29771760Seschrock * possible. 29781760Seschrock */ 29791760Seschrock ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION); 29801760Seschrock 29811760Seschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 29821760Seschrock vdev_config_dirty(spa->spa_root_vdev); 29831760Seschrock 29841760Seschrock spa_config_exit(spa, FTAG); 29852082Seschrock 29862082Seschrock txg_wait_synced(spa_get_dsl(spa), 0); 29871760Seschrock } 29882082Seschrock 29892082Seschrock boolean_t 29902082Seschrock spa_has_spare(spa_t *spa, uint64_t guid) 29912082Seschrock { 29922082Seschrock int i; 29932082Seschrock 29942082Seschrock for (i = 0; i < spa->spa_nspares; i++) 29952082Seschrock if (spa->spa_spares[i]->vdev_guid == guid) 29962082Seschrock return (B_TRUE); 29972082Seschrock 29982082Seschrock return (B_FALSE); 29992082Seschrock } 3000