1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51485Slling * Common Development and Distribution License (the "License"). 61485Slling * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 238632SBill.Moore@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #include <sys/zfs_context.h> 281544Seschrock #include <sys/fm/fs/zfs.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/spa_impl.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/dmu_tx.h> 33789Sahrens #include <sys/vdev_impl.h> 34789Sahrens #include <sys/uberblock_impl.h> 35789Sahrens #include <sys/metaslab.h> 36789Sahrens #include <sys/metaslab_impl.h> 37789Sahrens #include <sys/space_map.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zap.h> 40789Sahrens #include <sys/fs/zfs.h> 416643Seschrock #include <sys/arc.h> 429701SGeorge.Wilson@Sun.COM #include <sys/zil.h> 43789Sahrens 44789Sahrens /* 45789Sahrens * Virtual device management. 46789Sahrens */ 47789Sahrens 48789Sahrens static vdev_ops_t *vdev_ops_table[] = { 49789Sahrens &vdev_root_ops, 50789Sahrens &vdev_raidz_ops, 51789Sahrens &vdev_mirror_ops, 52789Sahrens &vdev_replacing_ops, 532082Seschrock &vdev_spare_ops, 54789Sahrens &vdev_disk_ops, 55789Sahrens &vdev_file_ops, 56789Sahrens &vdev_missing_ops, 57789Sahrens NULL 58789Sahrens }; 59789Sahrens 607046Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 617046Sahrens int zfs_scrub_limit = 10; 623697Smishra 63789Sahrens /* 64789Sahrens * Given a vdev type, return the appropriate ops vector. 65789Sahrens */ 66789Sahrens static vdev_ops_t * 67789Sahrens vdev_getops(const char *type) 68789Sahrens { 69789Sahrens vdev_ops_t *ops, **opspp; 70789Sahrens 71789Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 72789Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 73789Sahrens break; 74789Sahrens 75789Sahrens return (ops); 76789Sahrens } 77789Sahrens 78789Sahrens /* 79789Sahrens * Default asize function: return the MAX of psize with the asize of 80789Sahrens * all children. This is what's used by anything other than RAID-Z. 81789Sahrens */ 82789Sahrens uint64_t 83789Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 84789Sahrens { 851732Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 86789Sahrens uint64_t csize; 879816SGeorge.Wilson@Sun.COM 889816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 89789Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 90789Sahrens asize = MAX(asize, csize); 91789Sahrens } 92789Sahrens 93789Sahrens return (asize); 94789Sahrens } 95789Sahrens 961175Slling /* 979816SGeorge.Wilson@Sun.COM * Get the minimum allocatable size. We define the allocatable size as 989816SGeorge.Wilson@Sun.COM * the vdev's asize rounded to the nearest metaslab. This allows us to 999816SGeorge.Wilson@Sun.COM * replace or attach devices which don't have the same physical size but 1009816SGeorge.Wilson@Sun.COM * can still satisfy the same number of allocations. 1011175Slling */ 1021175Slling uint64_t 1039816SGeorge.Wilson@Sun.COM vdev_get_min_asize(vdev_t *vd) 1041175Slling { 1059816SGeorge.Wilson@Sun.COM vdev_t *pvd = vd->vdev_parent; 1069816SGeorge.Wilson@Sun.COM 1079816SGeorge.Wilson@Sun.COM /* 1089816SGeorge.Wilson@Sun.COM * The our parent is NULL (inactive spare or cache) or is the root, 1099816SGeorge.Wilson@Sun.COM * just return our own asize. 1109816SGeorge.Wilson@Sun.COM */ 1119816SGeorge.Wilson@Sun.COM if (pvd == NULL) 1129816SGeorge.Wilson@Sun.COM return (vd->vdev_asize); 1131175Slling 1141175Slling /* 1159816SGeorge.Wilson@Sun.COM * The top-level vdev just returns the allocatable size rounded 1169816SGeorge.Wilson@Sun.COM * to the nearest metaslab. 1179816SGeorge.Wilson@Sun.COM */ 1189816SGeorge.Wilson@Sun.COM if (vd == vd->vdev_top) 1199816SGeorge.Wilson@Sun.COM return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 1209816SGeorge.Wilson@Sun.COM 1219816SGeorge.Wilson@Sun.COM /* 1229816SGeorge.Wilson@Sun.COM * The allocatable space for a raidz vdev is N * sizeof(smallest child), 1239816SGeorge.Wilson@Sun.COM * so each child must provide at least 1/Nth of its asize. 1241175Slling */ 1259816SGeorge.Wilson@Sun.COM if (pvd->vdev_ops == &vdev_raidz_ops) 1269816SGeorge.Wilson@Sun.COM return (pvd->vdev_min_asize / pvd->vdev_children); 1279816SGeorge.Wilson@Sun.COM 1289816SGeorge.Wilson@Sun.COM return (pvd->vdev_min_asize); 1299816SGeorge.Wilson@Sun.COM } 1309816SGeorge.Wilson@Sun.COM 1319816SGeorge.Wilson@Sun.COM void 1329816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vdev_t *vd) 1339816SGeorge.Wilson@Sun.COM { 1349816SGeorge.Wilson@Sun.COM vd->vdev_min_asize = vdev_get_min_asize(vd); 1359816SGeorge.Wilson@Sun.COM 1369816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 1379816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vd->vdev_child[c]); 1381175Slling } 1391175Slling 140789Sahrens vdev_t * 141789Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 142789Sahrens { 143789Sahrens vdev_t *rvd = spa->spa_root_vdev; 144789Sahrens 1457754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1465530Sbonwick 1477046Sahrens if (vdev < rvd->vdev_children) { 1487046Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 149789Sahrens return (rvd->vdev_child[vdev]); 1507046Sahrens } 151789Sahrens 152789Sahrens return (NULL); 153789Sahrens } 154789Sahrens 155789Sahrens vdev_t * 156789Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 157789Sahrens { 158789Sahrens vdev_t *mvd; 159789Sahrens 1601585Sbonwick if (vd->vdev_guid == guid) 161789Sahrens return (vd); 162789Sahrens 1639816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 164789Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 165789Sahrens NULL) 166789Sahrens return (mvd); 167789Sahrens 168789Sahrens return (NULL); 169789Sahrens } 170789Sahrens 171789Sahrens void 172789Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 173789Sahrens { 174789Sahrens size_t oldsize, newsize; 175789Sahrens uint64_t id = cvd->vdev_id; 176789Sahrens vdev_t **newchild; 177789Sahrens 1787754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 179789Sahrens ASSERT(cvd->vdev_parent == NULL); 180789Sahrens 181789Sahrens cvd->vdev_parent = pvd; 182789Sahrens 183789Sahrens if (pvd == NULL) 184789Sahrens return; 185789Sahrens 186789Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 187789Sahrens 188789Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 189789Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 190789Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 191789Sahrens 192789Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 193789Sahrens if (pvd->vdev_child != NULL) { 194789Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 195789Sahrens kmem_free(pvd->vdev_child, oldsize); 196789Sahrens } 197789Sahrens 198789Sahrens pvd->vdev_child = newchild; 199789Sahrens pvd->vdev_child[id] = cvd; 200789Sahrens 201789Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 202789Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 203789Sahrens 204789Sahrens /* 205789Sahrens * Walk up all ancestors to update guid sum. 206789Sahrens */ 207789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 208789Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 2093697Smishra 2103697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2113697Smishra cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 212789Sahrens } 213789Sahrens 214789Sahrens void 215789Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 216789Sahrens { 217789Sahrens int c; 218789Sahrens uint_t id = cvd->vdev_id; 219789Sahrens 220789Sahrens ASSERT(cvd->vdev_parent == pvd); 221789Sahrens 222789Sahrens if (pvd == NULL) 223789Sahrens return; 224789Sahrens 225789Sahrens ASSERT(id < pvd->vdev_children); 226789Sahrens ASSERT(pvd->vdev_child[id] == cvd); 227789Sahrens 228789Sahrens pvd->vdev_child[id] = NULL; 229789Sahrens cvd->vdev_parent = NULL; 230789Sahrens 231789Sahrens for (c = 0; c < pvd->vdev_children; c++) 232789Sahrens if (pvd->vdev_child[c]) 233789Sahrens break; 234789Sahrens 235789Sahrens if (c == pvd->vdev_children) { 236789Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 237789Sahrens pvd->vdev_child = NULL; 238789Sahrens pvd->vdev_children = 0; 239789Sahrens } 240789Sahrens 241789Sahrens /* 242789Sahrens * Walk up all ancestors to update guid sum. 243789Sahrens */ 244789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 245789Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 2463697Smishra 2473697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2483697Smishra cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 249789Sahrens } 250789Sahrens 251789Sahrens /* 252789Sahrens * Remove any holes in the child array. 253789Sahrens */ 254789Sahrens void 255789Sahrens vdev_compact_children(vdev_t *pvd) 256789Sahrens { 257789Sahrens vdev_t **newchild, *cvd; 258789Sahrens int oldc = pvd->vdev_children; 2599816SGeorge.Wilson@Sun.COM int newc; 260789Sahrens 2617754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 262789Sahrens 2639816SGeorge.Wilson@Sun.COM for (int c = newc = 0; c < oldc; c++) 264789Sahrens if (pvd->vdev_child[c]) 265789Sahrens newc++; 266789Sahrens 267789Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 268789Sahrens 2699816SGeorge.Wilson@Sun.COM for (int c = newc = 0; c < oldc; c++) { 270789Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 271789Sahrens newchild[newc] = cvd; 272789Sahrens cvd->vdev_id = newc++; 273789Sahrens } 274789Sahrens } 275789Sahrens 276789Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 277789Sahrens pvd->vdev_child = newchild; 278789Sahrens pvd->vdev_children = newc; 279789Sahrens } 280789Sahrens 281789Sahrens /* 282789Sahrens * Allocate and minimally initialize a vdev_t. 283789Sahrens */ 284789Sahrens static vdev_t * 285789Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 286789Sahrens { 287789Sahrens vdev_t *vd; 288789Sahrens 2891585Sbonwick vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 2901585Sbonwick 2911585Sbonwick if (spa->spa_root_vdev == NULL) { 2921585Sbonwick ASSERT(ops == &vdev_root_ops); 2931585Sbonwick spa->spa_root_vdev = vd; 2941585Sbonwick } 295789Sahrens 2961585Sbonwick if (guid == 0) { 2971585Sbonwick if (spa->spa_root_vdev == vd) { 2981585Sbonwick /* 2991585Sbonwick * The root vdev's guid will also be the pool guid, 3001585Sbonwick * which must be unique among all pools. 3011585Sbonwick */ 3021585Sbonwick while (guid == 0 || spa_guid_exists(guid, 0)) 3031585Sbonwick guid = spa_get_random(-1ULL); 3041585Sbonwick } else { 3051585Sbonwick /* 3061585Sbonwick * Any other vdev's guid must be unique within the pool. 3071585Sbonwick */ 3081585Sbonwick while (guid == 0 || 3091585Sbonwick spa_guid_exists(spa_guid(spa), guid)) 3101585Sbonwick guid = spa_get_random(-1ULL); 3111585Sbonwick } 3121585Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3131585Sbonwick } 314789Sahrens 315789Sahrens vd->vdev_spa = spa; 316789Sahrens vd->vdev_id = id; 317789Sahrens vd->vdev_guid = guid; 318789Sahrens vd->vdev_guid_sum = guid; 319789Sahrens vd->vdev_ops = ops; 320789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 321789Sahrens 322789Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3232856Snd150628 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 3247754SJeff.Bonwick@Sun.COM mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 3258241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 3268241SJeff.Bonwick@Sun.COM space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 3278241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_lock); 3288241SJeff.Bonwick@Sun.COM } 329789Sahrens txg_list_create(&vd->vdev_ms_list, 330789Sahrens offsetof(struct metaslab, ms_txg_node)); 331789Sahrens txg_list_create(&vd->vdev_dtl_list, 332789Sahrens offsetof(struct vdev, vdev_dtl_node)); 333789Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3344451Seschrock vdev_queue_init(vd); 3354451Seschrock vdev_cache_init(vd); 336789Sahrens 337789Sahrens return (vd); 338789Sahrens } 339789Sahrens 340789Sahrens /* 341789Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 342789Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 343789Sahrens * different for each case. 344789Sahrens */ 3452082Seschrock int 3462082Seschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 3472082Seschrock int alloctype) 348789Sahrens { 349789Sahrens vdev_ops_t *ops; 350789Sahrens char *type; 3514527Sperrin uint64_t guid = 0, islog, nparity; 352789Sahrens vdev_t *vd; 353789Sahrens 3547754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 355789Sahrens 356789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 3572082Seschrock return (EINVAL); 358789Sahrens 359789Sahrens if ((ops = vdev_getops(type)) == NULL) 3602082Seschrock return (EINVAL); 361789Sahrens 362789Sahrens /* 363789Sahrens * If this is a load, get the vdev guid from the nvlist. 364789Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 365789Sahrens */ 366789Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 367789Sahrens uint64_t label_id; 368789Sahrens 369789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 370789Sahrens label_id != id) 3712082Seschrock return (EINVAL); 372789Sahrens 373789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3742082Seschrock return (EINVAL); 3752082Seschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 3762082Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3772082Seschrock return (EINVAL); 3785450Sbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 3795450Sbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3805450Sbrendan return (EINVAL); 3819790SLin.Ling@Sun.COM } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 3829790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3839790SLin.Ling@Sun.COM return (EINVAL); 384789Sahrens } 385789Sahrens 3862082Seschrock /* 3872082Seschrock * The first allocated vdev must be of type 'root'. 3882082Seschrock */ 3892082Seschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 3902082Seschrock return (EINVAL); 3912082Seschrock 3924527Sperrin /* 3934527Sperrin * Determine whether we're a log vdev. 3944527Sperrin */ 3954527Sperrin islog = 0; 3964527Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 3975094Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 3984527Sperrin return (ENOTSUP); 3994527Sperrin 4004527Sperrin /* 4014527Sperrin * Set the nparity property for RAID-Z vdevs. 4024527Sperrin */ 4034527Sperrin nparity = -1ULL; 4044527Sperrin if (ops == &vdev_raidz_ops) { 4054527Sperrin if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4064527Sperrin &nparity) == 0) { 4074527Sperrin /* 408*10105Sadam.leventhal@sun.com * Currently, we can only support 3 parity devices. 4094527Sperrin */ 410*10105Sadam.leventhal@sun.com if (nparity == 0 || nparity > 3) 4114527Sperrin return (EINVAL); 4124527Sperrin /* 413*10105Sadam.leventhal@sun.com * Previous versions could only support 1 or 2 parity 414*10105Sadam.leventhal@sun.com * device. 4154527Sperrin */ 416*10105Sadam.leventhal@sun.com if (nparity > 1 && 417*10105Sadam.leventhal@sun.com spa_version(spa) < SPA_VERSION_RAIDZ2) 418*10105Sadam.leventhal@sun.com return (ENOTSUP); 419*10105Sadam.leventhal@sun.com if (nparity > 2 && 420*10105Sadam.leventhal@sun.com spa_version(spa) < SPA_VERSION_RAIDZ3) 4214527Sperrin return (ENOTSUP); 4224527Sperrin } else { 4234527Sperrin /* 4244527Sperrin * We require the parity to be specified for SPAs that 4254527Sperrin * support multiple parity levels. 4264527Sperrin */ 427*10105Sadam.leventhal@sun.com if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 4284527Sperrin return (EINVAL); 4294527Sperrin /* 4304527Sperrin * Otherwise, we default to 1 parity device for RAID-Z. 4314527Sperrin */ 4324527Sperrin nparity = 1; 4334527Sperrin } 4344527Sperrin } else { 4354527Sperrin nparity = 0; 4364527Sperrin } 4374527Sperrin ASSERT(nparity != -1ULL); 4384527Sperrin 439789Sahrens vd = vdev_alloc_common(spa, id, guid, ops); 440789Sahrens 4414527Sperrin vd->vdev_islog = islog; 4424527Sperrin vd->vdev_nparity = nparity; 4434527Sperrin 444789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 445789Sahrens vd->vdev_path = spa_strdup(vd->vdev_path); 446789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 447789Sahrens vd->vdev_devid = spa_strdup(vd->vdev_devid); 4484451Seschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4494451Seschrock &vd->vdev_physpath) == 0) 4504451Seschrock vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 4519425SEric.Schrock@Sun.COM if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 4529425SEric.Schrock@Sun.COM vd->vdev_fru = spa_strdup(vd->vdev_fru); 453789Sahrens 454789Sahrens /* 4551171Seschrock * Set the whole_disk property. If it's not specified, leave the value 4561171Seschrock * as -1. 4571171Seschrock */ 4581171Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 4591171Seschrock &vd->vdev_wholedisk) != 0) 4601171Seschrock vd->vdev_wholedisk = -1ULL; 4611171Seschrock 4621171Seschrock /* 4631544Seschrock * Look for the 'not present' flag. This will only be set if the device 4641544Seschrock * was not present at the time of import. 4651544Seschrock */ 4669425SEric.Schrock@Sun.COM (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4679425SEric.Schrock@Sun.COM &vd->vdev_not_present); 4681544Seschrock 4691544Seschrock /* 4701732Sbonwick * Get the alignment requirement. 4711732Sbonwick */ 4721732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 4731732Sbonwick 4741732Sbonwick /* 475789Sahrens * If we're a top-level vdev, try to load the allocation parameters. 476789Sahrens */ 477789Sahrens if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 478789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 479789Sahrens &vd->vdev_ms_array); 480789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 481789Sahrens &vd->vdev_ms_shift); 482789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 483789Sahrens &vd->vdev_asize); 484789Sahrens } 485789Sahrens 486789Sahrens /* 4874451Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 488789Sahrens */ 4896643Seschrock if (vd->vdev_ops->vdev_op_leaf && 4909790SLin.Ling@Sun.COM (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 4919790SLin.Ling@Sun.COM alloctype == VDEV_ALLOC_ROOTPOOL)) { 4926643Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 4936643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 4948241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_smo.smo_object); 4956643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 4966643Seschrock &vd->vdev_unspare); 4976643Seschrock } 4989790SLin.Ling@Sun.COM 4999790SLin.Ling@Sun.COM if (alloctype == VDEV_ALLOC_ROOTPOOL) { 5009790SLin.Ling@Sun.COM uint64_t spare = 0; 5019790SLin.Ling@Sun.COM 5029790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 5039790SLin.Ling@Sun.COM &spare) == 0 && spare) 5049790SLin.Ling@Sun.COM spa_spare_add(vd); 5059790SLin.Ling@Sun.COM } 5069790SLin.Ling@Sun.COM 5071732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 5081732Sbonwick &vd->vdev_offline); 5096643Seschrock 5104451Seschrock /* 5114451Seschrock * When importing a pool, we want to ignore the persistent fault 5124451Seschrock * state, as the diagnosis made on another system may not be 5134451Seschrock * valid in the current context. 5144451Seschrock */ 5154451Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN) { 5164451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 5174451Seschrock &vd->vdev_faulted); 5184451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 5194451Seschrock &vd->vdev_degraded); 5204451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 5214451Seschrock &vd->vdev_removed); 5224451Seschrock } 523789Sahrens } 524789Sahrens 525789Sahrens /* 526789Sahrens * Add ourselves to the parent's list of children. 527789Sahrens */ 528789Sahrens vdev_add_child(parent, vd); 529789Sahrens 5302082Seschrock *vdp = vd; 5312082Seschrock 5322082Seschrock return (0); 533789Sahrens } 534789Sahrens 535789Sahrens void 536789Sahrens vdev_free(vdev_t *vd) 537789Sahrens { 5384451Seschrock spa_t *spa = vd->vdev_spa; 539789Sahrens 540789Sahrens /* 541789Sahrens * vdev_free() implies closing the vdev first. This is simpler than 542789Sahrens * trying to ensure complicated semantics for all callers. 543789Sahrens */ 544789Sahrens vdev_close(vd); 545789Sahrens 5467754SJeff.Bonwick@Sun.COM ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 547789Sahrens 548789Sahrens /* 549789Sahrens * Free all children. 550789Sahrens */ 5519816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 552789Sahrens vdev_free(vd->vdev_child[c]); 553789Sahrens 554789Sahrens ASSERT(vd->vdev_child == NULL); 555789Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 556789Sahrens 557789Sahrens /* 558789Sahrens * Discard allocation state. 559789Sahrens */ 560789Sahrens if (vd == vd->vdev_top) 561789Sahrens vdev_metaslab_fini(vd); 562789Sahrens 563789Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 5642082Seschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 565789Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 566789Sahrens 567789Sahrens /* 568789Sahrens * Remove this vdev from its parent's child list. 569789Sahrens */ 570789Sahrens vdev_remove_child(vd->vdev_parent, vd); 571789Sahrens 572789Sahrens ASSERT(vd->vdev_parent == NULL); 573789Sahrens 5744451Seschrock /* 5754451Seschrock * Clean up vdev structure. 5764451Seschrock */ 5774451Seschrock vdev_queue_fini(vd); 5784451Seschrock vdev_cache_fini(vd); 5794451Seschrock 5804451Seschrock if (vd->vdev_path) 5814451Seschrock spa_strfree(vd->vdev_path); 5824451Seschrock if (vd->vdev_devid) 5834451Seschrock spa_strfree(vd->vdev_devid); 5844451Seschrock if (vd->vdev_physpath) 5854451Seschrock spa_strfree(vd->vdev_physpath); 5869425SEric.Schrock@Sun.COM if (vd->vdev_fru) 5879425SEric.Schrock@Sun.COM spa_strfree(vd->vdev_fru); 5884451Seschrock 5894451Seschrock if (vd->vdev_isspare) 5904451Seschrock spa_spare_remove(vd); 5915450Sbrendan if (vd->vdev_isl2cache) 5925450Sbrendan spa_l2cache_remove(vd); 5934451Seschrock 5944451Seschrock txg_list_destroy(&vd->vdev_ms_list); 5954451Seschrock txg_list_destroy(&vd->vdev_dtl_list); 5968241SJeff.Bonwick@Sun.COM 5974451Seschrock mutex_enter(&vd->vdev_dtl_lock); 5988241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 5998241SJeff.Bonwick@Sun.COM space_map_unload(&vd->vdev_dtl[t]); 6008241SJeff.Bonwick@Sun.COM space_map_destroy(&vd->vdev_dtl[t]); 6018241SJeff.Bonwick@Sun.COM } 6024451Seschrock mutex_exit(&vd->vdev_dtl_lock); 6038241SJeff.Bonwick@Sun.COM 6044451Seschrock mutex_destroy(&vd->vdev_dtl_lock); 6054451Seschrock mutex_destroy(&vd->vdev_stat_lock); 6067754SJeff.Bonwick@Sun.COM mutex_destroy(&vd->vdev_probe_lock); 6074451Seschrock 6084451Seschrock if (vd == spa->spa_root_vdev) 6094451Seschrock spa->spa_root_vdev = NULL; 6104451Seschrock 6114451Seschrock kmem_free(vd, sizeof (vdev_t)); 612789Sahrens } 613789Sahrens 614789Sahrens /* 615789Sahrens * Transfer top-level vdev state from svd to tvd. 616789Sahrens */ 617789Sahrens static void 618789Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 619789Sahrens { 620789Sahrens spa_t *spa = svd->vdev_spa; 621789Sahrens metaslab_t *msp; 622789Sahrens vdev_t *vd; 623789Sahrens int t; 624789Sahrens 625789Sahrens ASSERT(tvd == tvd->vdev_top); 626789Sahrens 627789Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 628789Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 629789Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 630789Sahrens 631789Sahrens svd->vdev_ms_array = 0; 632789Sahrens svd->vdev_ms_shift = 0; 633789Sahrens svd->vdev_ms_count = 0; 634789Sahrens 635789Sahrens tvd->vdev_mg = svd->vdev_mg; 636789Sahrens tvd->vdev_ms = svd->vdev_ms; 637789Sahrens 638789Sahrens svd->vdev_mg = NULL; 639789Sahrens svd->vdev_ms = NULL; 6401732Sbonwick 6411732Sbonwick if (tvd->vdev_mg != NULL) 6421732Sbonwick tvd->vdev_mg->mg_vd = tvd; 643789Sahrens 644789Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 645789Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 6462082Seschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 647789Sahrens 648789Sahrens svd->vdev_stat.vs_alloc = 0; 649789Sahrens svd->vdev_stat.vs_space = 0; 6502082Seschrock svd->vdev_stat.vs_dspace = 0; 651789Sahrens 652789Sahrens for (t = 0; t < TXG_SIZE; t++) { 653789Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 654789Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 655789Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 656789Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 657789Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 658789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 659789Sahrens } 660789Sahrens 6617754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_config_dirty_node)) { 662789Sahrens vdev_config_clean(svd); 663789Sahrens vdev_config_dirty(tvd); 664789Sahrens } 665789Sahrens 6667754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_state_dirty_node)) { 6677754SJeff.Bonwick@Sun.COM vdev_state_clean(svd); 6687754SJeff.Bonwick@Sun.COM vdev_state_dirty(tvd); 6697754SJeff.Bonwick@Sun.COM } 6707754SJeff.Bonwick@Sun.COM 6712082Seschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 6722082Seschrock svd->vdev_deflate_ratio = 0; 6734527Sperrin 6744527Sperrin tvd->vdev_islog = svd->vdev_islog; 6754527Sperrin svd->vdev_islog = 0; 676789Sahrens } 677789Sahrens 678789Sahrens static void 679789Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 680789Sahrens { 681789Sahrens if (vd == NULL) 682789Sahrens return; 683789Sahrens 684789Sahrens vd->vdev_top = tvd; 685789Sahrens 6869816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 687789Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 688789Sahrens } 689789Sahrens 690789Sahrens /* 691789Sahrens * Add a mirror/replacing vdev above an existing vdev. 692789Sahrens */ 693789Sahrens vdev_t * 694789Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 695789Sahrens { 696789Sahrens spa_t *spa = cvd->vdev_spa; 697789Sahrens vdev_t *pvd = cvd->vdev_parent; 698789Sahrens vdev_t *mvd; 699789Sahrens 7007754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 701789Sahrens 702789Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 7031732Sbonwick 7041732Sbonwick mvd->vdev_asize = cvd->vdev_asize; 7059816SGeorge.Wilson@Sun.COM mvd->vdev_min_asize = cvd->vdev_min_asize; 7061732Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 7071732Sbonwick mvd->vdev_state = cvd->vdev_state; 7081732Sbonwick 709789Sahrens vdev_remove_child(pvd, cvd); 710789Sahrens vdev_add_child(pvd, mvd); 711789Sahrens cvd->vdev_id = mvd->vdev_children; 712789Sahrens vdev_add_child(mvd, cvd); 713789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 714789Sahrens 715789Sahrens if (mvd == mvd->vdev_top) 716789Sahrens vdev_top_transfer(cvd, mvd); 717789Sahrens 718789Sahrens return (mvd); 719789Sahrens } 720789Sahrens 721789Sahrens /* 722789Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 723789Sahrens */ 724789Sahrens void 725789Sahrens vdev_remove_parent(vdev_t *cvd) 726789Sahrens { 727789Sahrens vdev_t *mvd = cvd->vdev_parent; 728789Sahrens vdev_t *pvd = mvd->vdev_parent; 729789Sahrens 7307754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 731789Sahrens 732789Sahrens ASSERT(mvd->vdev_children == 1); 733789Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 7342082Seschrock mvd->vdev_ops == &vdev_replacing_ops || 7352082Seschrock mvd->vdev_ops == &vdev_spare_ops); 7361732Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 737789Sahrens 738789Sahrens vdev_remove_child(mvd, cvd); 739789Sahrens vdev_remove_child(pvd, mvd); 7408241SJeff.Bonwick@Sun.COM 7417754SJeff.Bonwick@Sun.COM /* 7427754SJeff.Bonwick@Sun.COM * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 7437754SJeff.Bonwick@Sun.COM * Otherwise, we could have detached an offline device, and when we 7447754SJeff.Bonwick@Sun.COM * go to import the pool we'll think we have two top-level vdevs, 7457754SJeff.Bonwick@Sun.COM * instead of a different version of the same top-level vdev. 7467754SJeff.Bonwick@Sun.COM */ 7478241SJeff.Bonwick@Sun.COM if (mvd->vdev_top == mvd) { 7488241SJeff.Bonwick@Sun.COM uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 7498241SJeff.Bonwick@Sun.COM cvd->vdev_guid += guid_delta; 7508241SJeff.Bonwick@Sun.COM cvd->vdev_guid_sum += guid_delta; 7518241SJeff.Bonwick@Sun.COM } 752789Sahrens cvd->vdev_id = mvd->vdev_id; 753789Sahrens vdev_add_child(pvd, cvd); 754789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 755789Sahrens 756789Sahrens if (cvd == cvd->vdev_top) 757789Sahrens vdev_top_transfer(mvd, cvd); 758789Sahrens 759789Sahrens ASSERT(mvd->vdev_children == 0); 760789Sahrens vdev_free(mvd); 761789Sahrens } 762789Sahrens 7631544Seschrock int 764789Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 765789Sahrens { 766789Sahrens spa_t *spa = vd->vdev_spa; 7671732Sbonwick objset_t *mos = spa->spa_meta_objset; 7684527Sperrin metaslab_class_t *mc; 7691732Sbonwick uint64_t m; 770789Sahrens uint64_t oldc = vd->vdev_ms_count; 771789Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 7721732Sbonwick metaslab_t **mspp; 7731732Sbonwick int error; 774789Sahrens 7751585Sbonwick if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 7761585Sbonwick return (0); 7771585Sbonwick 7789701SGeorge.Wilson@Sun.COM /* 7799701SGeorge.Wilson@Sun.COM * Compute the raidz-deflation ratio. Note, we hard-code 7809701SGeorge.Wilson@Sun.COM * in 128k (1 << 17) because it is the current "typical" blocksize. 7819701SGeorge.Wilson@Sun.COM * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 7829701SGeorge.Wilson@Sun.COM * or we will inconsistently account for existing bp's. 7839701SGeorge.Wilson@Sun.COM */ 7849701SGeorge.Wilson@Sun.COM vd->vdev_deflate_ratio = (1 << 17) / 7859701SGeorge.Wilson@Sun.COM (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 7869701SGeorge.Wilson@Sun.COM 787789Sahrens ASSERT(oldc <= newc); 788789Sahrens 7894527Sperrin if (vd->vdev_islog) 7904527Sperrin mc = spa->spa_log_class; 7914527Sperrin else 7924527Sperrin mc = spa->spa_normal_class; 7934527Sperrin 7941732Sbonwick if (vd->vdev_mg == NULL) 7951732Sbonwick vd->vdev_mg = metaslab_group_create(mc, vd); 7961732Sbonwick 7971732Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 7981732Sbonwick 7991732Sbonwick if (oldc != 0) { 8001732Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 8011732Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 8021732Sbonwick } 8031732Sbonwick 8041732Sbonwick vd->vdev_ms = mspp; 805789Sahrens vd->vdev_ms_count = newc; 806789Sahrens 8071732Sbonwick for (m = oldc; m < newc; m++) { 8081732Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 809789Sahrens if (txg == 0) { 8101732Sbonwick uint64_t object = 0; 8111732Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 8129512SNeil.Perrin@Sun.COM m * sizeof (uint64_t), sizeof (uint64_t), &object, 8139512SNeil.Perrin@Sun.COM DMU_READ_PREFETCH); 8141732Sbonwick if (error) 8151732Sbonwick return (error); 8161732Sbonwick if (object != 0) { 8171732Sbonwick dmu_buf_t *db; 8181732Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 8191732Sbonwick if (error) 8201732Sbonwick return (error); 8214944Smaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 8224944Smaybee bcopy(db->db_data, &smo, sizeof (smo)); 8231732Sbonwick ASSERT3U(smo.smo_object, ==, object); 8241544Seschrock dmu_buf_rele(db, FTAG); 825789Sahrens } 826789Sahrens } 8271732Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 8281732Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 829789Sahrens } 830789Sahrens 8311544Seschrock return (0); 832789Sahrens } 833789Sahrens 834789Sahrens void 835789Sahrens vdev_metaslab_fini(vdev_t *vd) 836789Sahrens { 837789Sahrens uint64_t m; 838789Sahrens uint64_t count = vd->vdev_ms_count; 839789Sahrens 840789Sahrens if (vd->vdev_ms != NULL) { 841789Sahrens for (m = 0; m < count; m++) 8421732Sbonwick if (vd->vdev_ms[m] != NULL) 8431732Sbonwick metaslab_fini(vd->vdev_ms[m]); 844789Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 845789Sahrens vd->vdev_ms = NULL; 846789Sahrens } 847789Sahrens } 848789Sahrens 8497754SJeff.Bonwick@Sun.COM typedef struct vdev_probe_stats { 8507754SJeff.Bonwick@Sun.COM boolean_t vps_readable; 8517754SJeff.Bonwick@Sun.COM boolean_t vps_writeable; 8527754SJeff.Bonwick@Sun.COM int vps_flags; 8537754SJeff.Bonwick@Sun.COM } vdev_probe_stats_t; 8547754SJeff.Bonwick@Sun.COM 8557754SJeff.Bonwick@Sun.COM static void 8567754SJeff.Bonwick@Sun.COM vdev_probe_done(zio_t *zio) 8575329Sgw25295 { 8588241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 8598632SBill.Moore@Sun.COM vdev_t *vd = zio->io_vd; 8607754SJeff.Bonwick@Sun.COM vdev_probe_stats_t *vps = zio->io_private; 8618632SBill.Moore@Sun.COM 8628632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio != NULL); 8637754SJeff.Bonwick@Sun.COM 8647754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) { 8657754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8667754SJeff.Bonwick@Sun.COM vps->vps_readable = 1; 8678241SJeff.Bonwick@Sun.COM if (zio->io_error == 0 && spa_writeable(spa)) { 8688632SBill.Moore@Sun.COM zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 8697754SJeff.Bonwick@Sun.COM zio->io_offset, zio->io_size, zio->io_data, 8707754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 8717754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 8727754SJeff.Bonwick@Sun.COM } else { 8737754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8747754SJeff.Bonwick@Sun.COM } 8757754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_WRITE) { 8767754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8777754SJeff.Bonwick@Sun.COM vps->vps_writeable = 1; 8787754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8797754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_NULL) { 8808632SBill.Moore@Sun.COM zio_t *pio; 8817754SJeff.Bonwick@Sun.COM 8827754SJeff.Bonwick@Sun.COM vd->vdev_cant_read |= !vps->vps_readable; 8837754SJeff.Bonwick@Sun.COM vd->vdev_cant_write |= !vps->vps_writeable; 8847754SJeff.Bonwick@Sun.COM 8857754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && 8868241SJeff.Bonwick@Sun.COM (vdev_writeable(vd) || !spa_writeable(spa))) { 8877754SJeff.Bonwick@Sun.COM zio->io_error = 0; 8887754SJeff.Bonwick@Sun.COM } else { 8897754SJeff.Bonwick@Sun.COM ASSERT(zio->io_error != 0); 8907754SJeff.Bonwick@Sun.COM zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 8918241SJeff.Bonwick@Sun.COM spa, vd, NULL, 0, 0); 8927754SJeff.Bonwick@Sun.COM zio->io_error = ENXIO; 8937754SJeff.Bonwick@Sun.COM } 8948632SBill.Moore@Sun.COM 8958632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 8968632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio == zio); 8978632SBill.Moore@Sun.COM vd->vdev_probe_zio = NULL; 8988632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 8998632SBill.Moore@Sun.COM 9008632SBill.Moore@Sun.COM while ((pio = zio_walk_parents(zio)) != NULL) 9018632SBill.Moore@Sun.COM if (!vdev_accessible(vd, pio)) 9028632SBill.Moore@Sun.COM pio->io_error = ENXIO; 9038632SBill.Moore@Sun.COM 9047754SJeff.Bonwick@Sun.COM kmem_free(vps, sizeof (*vps)); 9057754SJeff.Bonwick@Sun.COM } 9067754SJeff.Bonwick@Sun.COM } 9075329Sgw25295 9087754SJeff.Bonwick@Sun.COM /* 9097754SJeff.Bonwick@Sun.COM * Determine whether this device is accessible by reading and writing 9107754SJeff.Bonwick@Sun.COM * to several known locations: the pad regions of each vdev label 9117754SJeff.Bonwick@Sun.COM * but the first (which we leave alone in case it contains a VTOC). 9127754SJeff.Bonwick@Sun.COM */ 9137754SJeff.Bonwick@Sun.COM zio_t * 9148632SBill.Moore@Sun.COM vdev_probe(vdev_t *vd, zio_t *zio) 9157754SJeff.Bonwick@Sun.COM { 9167754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 9178632SBill.Moore@Sun.COM vdev_probe_stats_t *vps = NULL; 9188632SBill.Moore@Sun.COM zio_t *pio; 9197754SJeff.Bonwick@Sun.COM 9207754SJeff.Bonwick@Sun.COM ASSERT(vd->vdev_ops->vdev_op_leaf); 9217754SJeff.Bonwick@Sun.COM 9228632SBill.Moore@Sun.COM /* 9238632SBill.Moore@Sun.COM * Don't probe the probe. 9248632SBill.Moore@Sun.COM */ 9258632SBill.Moore@Sun.COM if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 9268632SBill.Moore@Sun.COM return (NULL); 9278632SBill.Moore@Sun.COM 9288632SBill.Moore@Sun.COM /* 9298632SBill.Moore@Sun.COM * To prevent 'probe storms' when a device fails, we create 9308632SBill.Moore@Sun.COM * just one probe i/o at a time. All zios that want to probe 9318632SBill.Moore@Sun.COM * this vdev will become parents of the probe io. 9328632SBill.Moore@Sun.COM */ 9338632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 9348632SBill.Moore@Sun.COM 9358632SBill.Moore@Sun.COM if ((pio = vd->vdev_probe_zio) == NULL) { 9368632SBill.Moore@Sun.COM vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 9378632SBill.Moore@Sun.COM 9388632SBill.Moore@Sun.COM vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 9398632SBill.Moore@Sun.COM ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 9409725SEric.Schrock@Sun.COM ZIO_FLAG_TRYHARD; 9418632SBill.Moore@Sun.COM 9428632SBill.Moore@Sun.COM if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 9438632SBill.Moore@Sun.COM /* 9448632SBill.Moore@Sun.COM * vdev_cant_read and vdev_cant_write can only 9458632SBill.Moore@Sun.COM * transition from TRUE to FALSE when we have the 9468632SBill.Moore@Sun.COM * SCL_ZIO lock as writer; otherwise they can only 9478632SBill.Moore@Sun.COM * transition from FALSE to TRUE. This ensures that 9488632SBill.Moore@Sun.COM * any zio looking at these values can assume that 9498632SBill.Moore@Sun.COM * failures persist for the life of the I/O. That's 9508632SBill.Moore@Sun.COM * important because when a device has intermittent 9518632SBill.Moore@Sun.COM * connectivity problems, we want to ensure that 9528632SBill.Moore@Sun.COM * they're ascribed to the device (ENXIO) and not 9538632SBill.Moore@Sun.COM * the zio (EIO). 9548632SBill.Moore@Sun.COM * 9558632SBill.Moore@Sun.COM * Since we hold SCL_ZIO as writer here, clear both 9568632SBill.Moore@Sun.COM * values so the probe can reevaluate from first 9578632SBill.Moore@Sun.COM * principles. 9588632SBill.Moore@Sun.COM */ 9598632SBill.Moore@Sun.COM vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 9608632SBill.Moore@Sun.COM vd->vdev_cant_read = B_FALSE; 9618632SBill.Moore@Sun.COM vd->vdev_cant_write = B_FALSE; 9628632SBill.Moore@Sun.COM } 9638632SBill.Moore@Sun.COM 9648632SBill.Moore@Sun.COM vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 9658632SBill.Moore@Sun.COM vdev_probe_done, vps, 9668632SBill.Moore@Sun.COM vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 9678632SBill.Moore@Sun.COM 9688632SBill.Moore@Sun.COM if (zio != NULL) { 9698632SBill.Moore@Sun.COM vd->vdev_probe_wanted = B_TRUE; 9708632SBill.Moore@Sun.COM spa_async_request(spa, SPA_ASYNC_PROBE); 9718632SBill.Moore@Sun.COM } 9728632SBill.Moore@Sun.COM } 9738632SBill.Moore@Sun.COM 9748632SBill.Moore@Sun.COM if (zio != NULL) 9758632SBill.Moore@Sun.COM zio_add_child(zio, pio); 9768632SBill.Moore@Sun.COM 9778632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 9788632SBill.Moore@Sun.COM 9798632SBill.Moore@Sun.COM if (vps == NULL) { 9808632SBill.Moore@Sun.COM ASSERT(zio != NULL); 9818632SBill.Moore@Sun.COM return (NULL); 9828632SBill.Moore@Sun.COM } 9837754SJeff.Bonwick@Sun.COM 9847754SJeff.Bonwick@Sun.COM for (int l = 1; l < VDEV_LABELS; l++) { 9858632SBill.Moore@Sun.COM zio_nowait(zio_read_phys(pio, vd, 9867754SJeff.Bonwick@Sun.COM vdev_label_offset(vd->vdev_psize, l, 9879056SLin.Ling@Sun.COM offsetof(vdev_label_t, vl_pad2)), 9889056SLin.Ling@Sun.COM VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 9897754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 9907754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 9917754SJeff.Bonwick@Sun.COM } 9927754SJeff.Bonwick@Sun.COM 9938632SBill.Moore@Sun.COM if (zio == NULL) 9948632SBill.Moore@Sun.COM return (pio); 9958632SBill.Moore@Sun.COM 9968632SBill.Moore@Sun.COM zio_nowait(pio); 9978632SBill.Moore@Sun.COM return (NULL); 9985329Sgw25295 } 9995329Sgw25295 10009846SEric.Taylor@Sun.COM static void 10019846SEric.Taylor@Sun.COM vdev_open_child(void *arg) 10029846SEric.Taylor@Sun.COM { 10039846SEric.Taylor@Sun.COM vdev_t *vd = arg; 10049846SEric.Taylor@Sun.COM 10059846SEric.Taylor@Sun.COM vd->vdev_open_thread = curthread; 10069846SEric.Taylor@Sun.COM vd->vdev_open_error = vdev_open(vd); 10079846SEric.Taylor@Sun.COM vd->vdev_open_thread = NULL; 10089846SEric.Taylor@Sun.COM } 10099846SEric.Taylor@Sun.COM 10109846SEric.Taylor@Sun.COM void 10119846SEric.Taylor@Sun.COM vdev_open_children(vdev_t *vd) 10129846SEric.Taylor@Sun.COM { 10139846SEric.Taylor@Sun.COM taskq_t *tq; 10149846SEric.Taylor@Sun.COM int children = vd->vdev_children; 10159846SEric.Taylor@Sun.COM 10169846SEric.Taylor@Sun.COM tq = taskq_create("vdev_open", children, minclsyspri, 10179846SEric.Taylor@Sun.COM children, children, TASKQ_PREPOPULATE); 10189846SEric.Taylor@Sun.COM 10199846SEric.Taylor@Sun.COM for (int c = 0; c < children; c++) 10209846SEric.Taylor@Sun.COM VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 10219846SEric.Taylor@Sun.COM TQ_SLEEP) != NULL); 10229846SEric.Taylor@Sun.COM 10239846SEric.Taylor@Sun.COM taskq_destroy(tq); 10249846SEric.Taylor@Sun.COM } 10259846SEric.Taylor@Sun.COM 1026789Sahrens /* 1027789Sahrens * Prepare a virtual device for access. 1028789Sahrens */ 1029789Sahrens int 1030789Sahrens vdev_open(vdev_t *vd) 1031789Sahrens { 10328241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 1033789Sahrens int error; 1034789Sahrens uint64_t osize = 0; 1035789Sahrens uint64_t asize, psize; 10361732Sbonwick uint64_t ashift = 0; 1037789Sahrens 10389846SEric.Taylor@Sun.COM ASSERT(vd->vdev_open_thread == curthread || 10399846SEric.Taylor@Sun.COM spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1040789Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1041789Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 1042789Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 1043789Sahrens 1044789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 10459701SGeorge.Wilson@Sun.COM vd->vdev_cant_read = B_FALSE; 10469701SGeorge.Wilson@Sun.COM vd->vdev_cant_write = B_FALSE; 10479816SGeorge.Wilson@Sun.COM vd->vdev_min_asize = vdev_get_min_asize(vd); 1048789Sahrens 10494451Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 10504451Seschrock ASSERT(vd->vdev_children == 0); 10514451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 10524451Seschrock VDEV_AUX_ERR_EXCEEDED); 10534451Seschrock return (ENXIO); 10544451Seschrock } else if (vd->vdev_offline) { 1055789Sahrens ASSERT(vd->vdev_children == 0); 10561544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1057789Sahrens return (ENXIO); 1058789Sahrens } 1059789Sahrens 1060789Sahrens error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1061789Sahrens 10621544Seschrock if (zio_injection_enabled && error == 0) 10639725SEric.Schrock@Sun.COM error = zio_handle_device_injection(vd, NULL, ENXIO); 10641544Seschrock 10654451Seschrock if (error) { 10664451Seschrock if (vd->vdev_removed && 10674451Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 10684451Seschrock vd->vdev_removed = B_FALSE; 1069789Sahrens 10701544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1071789Sahrens vd->vdev_stat.vs_aux); 1072789Sahrens return (error); 1073789Sahrens } 1074789Sahrens 10754451Seschrock vd->vdev_removed = B_FALSE; 10764451Seschrock 10774451Seschrock if (vd->vdev_degraded) { 10784451Seschrock ASSERT(vd->vdev_children == 0); 10794451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10804451Seschrock VDEV_AUX_ERR_EXCEEDED); 10814451Seschrock } else { 10824451Seschrock vd->vdev_state = VDEV_STATE_HEALTHY; 10834451Seschrock } 1084789Sahrens 10859816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 10861544Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 10871544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10881544Seschrock VDEV_AUX_NONE); 10891544Seschrock break; 10901544Seschrock } 10919816SGeorge.Wilson@Sun.COM } 1092789Sahrens 1093789Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1094789Sahrens 1095789Sahrens if (vd->vdev_children == 0) { 1096789Sahrens if (osize < SPA_MINDEVSIZE) { 10971544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10981544Seschrock VDEV_AUX_TOO_SMALL); 1099789Sahrens return (EOVERFLOW); 1100789Sahrens } 1101789Sahrens psize = osize; 1102789Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1103789Sahrens } else { 11041732Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1105789Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 11061544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11071544Seschrock VDEV_AUX_TOO_SMALL); 1108789Sahrens return (EOVERFLOW); 1109789Sahrens } 1110789Sahrens psize = 0; 1111789Sahrens asize = osize; 1112789Sahrens } 1113789Sahrens 1114789Sahrens vd->vdev_psize = psize; 1115789Sahrens 11169816SGeorge.Wilson@Sun.COM /* 11179816SGeorge.Wilson@Sun.COM * Make sure the allocatable size hasn't shrunk. 11189816SGeorge.Wilson@Sun.COM */ 11199816SGeorge.Wilson@Sun.COM if (asize < vd->vdev_min_asize) { 11209816SGeorge.Wilson@Sun.COM vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11219816SGeorge.Wilson@Sun.COM VDEV_AUX_BAD_LABEL); 11229816SGeorge.Wilson@Sun.COM return (EINVAL); 11239816SGeorge.Wilson@Sun.COM } 11249816SGeorge.Wilson@Sun.COM 1125789Sahrens if (vd->vdev_asize == 0) { 1126789Sahrens /* 1127789Sahrens * This is the first-ever open, so use the computed values. 11281732Sbonwick * For testing purposes, a higher ashift can be requested. 1129789Sahrens */ 1130789Sahrens vd->vdev_asize = asize; 11311732Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1132789Sahrens } else { 1133789Sahrens /* 1134789Sahrens * Make sure the alignment requirement hasn't increased. 1135789Sahrens */ 11361732Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 11371544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11381544Seschrock VDEV_AUX_BAD_LABEL); 1139789Sahrens return (EINVAL); 1140789Sahrens } 1141789Sahrens } 1142789Sahrens 11431544Seschrock /* 11449816SGeorge.Wilson@Sun.COM * If all children are healthy and the asize has increased, 11459816SGeorge.Wilson@Sun.COM * then we've experienced dynamic LUN growth. If automatic 11469816SGeorge.Wilson@Sun.COM * expansion is enabled then use the additional space. 11479816SGeorge.Wilson@Sun.COM */ 11489816SGeorge.Wilson@Sun.COM if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 11499816SGeorge.Wilson@Sun.COM (vd->vdev_expanding || spa->spa_autoexpand)) 11509816SGeorge.Wilson@Sun.COM vd->vdev_asize = asize; 11519816SGeorge.Wilson@Sun.COM 11529816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vd); 11539816SGeorge.Wilson@Sun.COM 11549816SGeorge.Wilson@Sun.COM /* 11555329Sgw25295 * Ensure we can issue some IO before declaring the 11565329Sgw25295 * vdev open for business. 11575329Sgw25295 */ 11587754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && 11597754SJeff.Bonwick@Sun.COM (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 11605329Sgw25295 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11617754SJeff.Bonwick@Sun.COM VDEV_AUX_IO_FAILURE); 11625329Sgw25295 return (error); 11635329Sgw25295 } 11645329Sgw25295 11655329Sgw25295 /* 11667046Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 11678241SJeff.Bonwick@Sun.COM * resilver. But don't do this if we are doing a reopen for a scrub, 11688241SJeff.Bonwick@Sun.COM * since this would just restart the scrub we are already doing. 11697046Sahrens */ 11708241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 11718241SJeff.Bonwick@Sun.COM vdev_resilver_needed(vd, NULL, NULL)) 11728241SJeff.Bonwick@Sun.COM spa_async_request(spa, SPA_ASYNC_RESILVER); 11737046Sahrens 1174789Sahrens return (0); 1175789Sahrens } 1176789Sahrens 1177789Sahrens /* 11781986Seschrock * Called once the vdevs are all opened, this routine validates the label 11791986Seschrock * contents. This needs to be done before vdev_load() so that we don't 11804451Seschrock * inadvertently do repair I/Os to the wrong device. 11811986Seschrock * 11821986Seschrock * This function will only return failure if one of the vdevs indicates that it 11831986Seschrock * has since been destroyed or exported. This is only possible if 11841986Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 11851986Seschrock * will be updated but the function will return 0. 11861986Seschrock */ 11871986Seschrock int 11881986Seschrock vdev_validate(vdev_t *vd) 11891986Seschrock { 11901986Seschrock spa_t *spa = vd->vdev_spa; 11911986Seschrock nvlist_t *label; 11927754SJeff.Bonwick@Sun.COM uint64_t guid, top_guid; 11931986Seschrock uint64_t state; 11941986Seschrock 11959816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 11961986Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 11974070Smc142369 return (EBADF); 11981986Seschrock 11992174Seschrock /* 12002174Seschrock * If the device has already failed, or was marked offline, don't do 12012174Seschrock * any further validation. Otherwise, label I/O will fail and we will 12022174Seschrock * overwrite the previous state. 12032174Seschrock */ 12047754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 12051986Seschrock 12061986Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 12071986Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 12081986Seschrock VDEV_AUX_BAD_LABEL); 12091986Seschrock return (0); 12101986Seschrock } 12111986Seschrock 12121986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 12131986Seschrock &guid) != 0 || guid != spa_guid(spa)) { 12141986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12151986Seschrock VDEV_AUX_CORRUPT_DATA); 12161986Seschrock nvlist_free(label); 12171986Seschrock return (0); 12181986Seschrock } 12191986Seschrock 12207754SJeff.Bonwick@Sun.COM /* 12217754SJeff.Bonwick@Sun.COM * If this vdev just became a top-level vdev because its 12227754SJeff.Bonwick@Sun.COM * sibling was detached, it will have adopted the parent's 12237754SJeff.Bonwick@Sun.COM * vdev guid -- but the label may or may not be on disk yet. 12247754SJeff.Bonwick@Sun.COM * Fortunately, either version of the label will have the 12257754SJeff.Bonwick@Sun.COM * same top guid, so if we're a top-level vdev, we can 12267754SJeff.Bonwick@Sun.COM * safely compare to that instead. 12277754SJeff.Bonwick@Sun.COM */ 12281986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 12297754SJeff.Bonwick@Sun.COM &guid) != 0 || 12307754SJeff.Bonwick@Sun.COM nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 12317754SJeff.Bonwick@Sun.COM &top_guid) != 0 || 12327754SJeff.Bonwick@Sun.COM (vd->vdev_guid != guid && 12337754SJeff.Bonwick@Sun.COM (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 12341986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12351986Seschrock VDEV_AUX_CORRUPT_DATA); 12361986Seschrock nvlist_free(label); 12371986Seschrock return (0); 12381986Seschrock } 12391986Seschrock 12401986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 12411986Seschrock &state) != 0) { 12421986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12431986Seschrock VDEV_AUX_CORRUPT_DATA); 12441986Seschrock nvlist_free(label); 12451986Seschrock return (0); 12461986Seschrock } 12471986Seschrock 12481986Seschrock nvlist_free(label); 12491986Seschrock 125010100SLin.Ling@Sun.COM /* 125110100SLin.Ling@Sun.COM * If spa->spa_load_verbatim is true, no need to check the 125210100SLin.Ling@Sun.COM * state of the pool. 125310100SLin.Ling@Sun.COM */ 125410100SLin.Ling@Sun.COM if (!spa->spa_load_verbatim && 125510100SLin.Ling@Sun.COM spa->spa_load_state == SPA_LOAD_OPEN && 125610100SLin.Ling@Sun.COM state != POOL_STATE_ACTIVE) 12574070Smc142369 return (EBADF); 12586976Seschrock 12596976Seschrock /* 12606976Seschrock * If we were able to open and validate a vdev that was 12616976Seschrock * previously marked permanently unavailable, clear that state 12626976Seschrock * now. 12636976Seschrock */ 12646976Seschrock if (vd->vdev_not_present) 12656976Seschrock vd->vdev_not_present = 0; 12661986Seschrock } 12671986Seschrock 12681986Seschrock return (0); 12691986Seschrock } 12701986Seschrock 12711986Seschrock /* 1272789Sahrens * Close a virtual device. 1273789Sahrens */ 1274789Sahrens void 1275789Sahrens vdev_close(vdev_t *vd) 1276789Sahrens { 12778241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 12788241SJeff.Bonwick@Sun.COM 12798241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12808241SJeff.Bonwick@Sun.COM 1281789Sahrens vd->vdev_ops->vdev_op_close(vd); 1282789Sahrens 12834451Seschrock vdev_cache_purge(vd); 1284789Sahrens 12851986Seschrock /* 12869816SGeorge.Wilson@Sun.COM * We record the previous state before we close it, so that if we are 12871986Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 12881986Seschrock * it's still faulted. 12891986Seschrock */ 12901986Seschrock vd->vdev_prevstate = vd->vdev_state; 12911986Seschrock 1292789Sahrens if (vd->vdev_offline) 1293789Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1294789Sahrens else 1295789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 12961544Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1297789Sahrens } 1298789Sahrens 1299789Sahrens void 13001544Seschrock vdev_reopen(vdev_t *vd) 1301789Sahrens { 13021544Seschrock spa_t *spa = vd->vdev_spa; 1303789Sahrens 13047754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 13051544Seschrock 1306789Sahrens vdev_close(vd); 1307789Sahrens (void) vdev_open(vd); 1308789Sahrens 1309789Sahrens /* 13103377Seschrock * Call vdev_validate() here to make sure we have the same device. 13113377Seschrock * Otherwise, a device with an invalid label could be successfully 13123377Seschrock * opened in response to vdev_reopen(). 13133377Seschrock */ 13146643Seschrock if (vd->vdev_aux) { 13156643Seschrock (void) vdev_validate_aux(vd); 13167754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && vdev_writeable(vd) && 13179425SEric.Schrock@Sun.COM vd->vdev_aux == &spa->spa_l2cache && 13189816SGeorge.Wilson@Sun.COM !l2arc_vdev_present(vd)) 13199816SGeorge.Wilson@Sun.COM l2arc_add_vdev(spa, vd); 13206643Seschrock } else { 13216643Seschrock (void) vdev_validate(vd); 13226643Seschrock } 13233377Seschrock 13243377Seschrock /* 13254451Seschrock * Reassess parent vdev's health. 1326789Sahrens */ 13274451Seschrock vdev_propagate_state(vd); 1328789Sahrens } 1329789Sahrens 1330789Sahrens int 13312082Seschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1332789Sahrens { 1333789Sahrens int error; 1334789Sahrens 1335789Sahrens /* 1336789Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1337789Sahrens * For a create, however, we want to fail the request if 1338789Sahrens * there are any components we can't open. 1339789Sahrens */ 1340789Sahrens error = vdev_open(vd); 1341789Sahrens 1342789Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1343789Sahrens vdev_close(vd); 1344789Sahrens return (error ? error : ENXIO); 1345789Sahrens } 1346789Sahrens 1347789Sahrens /* 1348789Sahrens * Recursively initialize all labels. 1349789Sahrens */ 13503377Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 13513377Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1352789Sahrens vdev_close(vd); 1353789Sahrens return (error); 1354789Sahrens } 1355789Sahrens 1356789Sahrens return (0); 1357789Sahrens } 1358789Sahrens 13591585Sbonwick void 13609816SGeorge.Wilson@Sun.COM vdev_metaslab_set_size(vdev_t *vd) 1361789Sahrens { 1362789Sahrens /* 1363789Sahrens * Aim for roughly 200 metaslabs per vdev. 1364789Sahrens */ 1365789Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1366789Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1367789Sahrens } 1368789Sahrens 1369789Sahrens void 13701732Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1371789Sahrens { 13721732Sbonwick ASSERT(vd == vd->vdev_top); 13731732Sbonwick ASSERT(ISP2(flags)); 1374789Sahrens 13751732Sbonwick if (flags & VDD_METASLAB) 13761732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 13771732Sbonwick 13781732Sbonwick if (flags & VDD_DTL) 13791732Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 13801732Sbonwick 13811732Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1382789Sahrens } 1383789Sahrens 13848241SJeff.Bonwick@Sun.COM /* 13858241SJeff.Bonwick@Sun.COM * DTLs. 13868241SJeff.Bonwick@Sun.COM * 13878241SJeff.Bonwick@Sun.COM * A vdev's DTL (dirty time log) is the set of transaction groups for which 13888241SJeff.Bonwick@Sun.COM * the vdev has less than perfect replication. There are three kinds of DTL: 13898241SJeff.Bonwick@Sun.COM * 13908241SJeff.Bonwick@Sun.COM * DTL_MISSING: txgs for which the vdev has no valid copies of the data 13918241SJeff.Bonwick@Sun.COM * 13928241SJeff.Bonwick@Sun.COM * DTL_PARTIAL: txgs for which data is available, but not fully replicated 13938241SJeff.Bonwick@Sun.COM * 13948241SJeff.Bonwick@Sun.COM * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 13958241SJeff.Bonwick@Sun.COM * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 13968241SJeff.Bonwick@Sun.COM * txgs that was scrubbed. 13978241SJeff.Bonwick@Sun.COM * 13988241SJeff.Bonwick@Sun.COM * DTL_OUTAGE: txgs which cannot currently be read, whether due to 13998241SJeff.Bonwick@Sun.COM * persistent errors or just some device being offline. 14008241SJeff.Bonwick@Sun.COM * Unlike the other three, the DTL_OUTAGE map is not generally 14018241SJeff.Bonwick@Sun.COM * maintained; it's only computed when needed, typically to 14028241SJeff.Bonwick@Sun.COM * determine whether a device can be detached. 14038241SJeff.Bonwick@Sun.COM * 14048241SJeff.Bonwick@Sun.COM * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 14058241SJeff.Bonwick@Sun.COM * either has the data or it doesn't. 14068241SJeff.Bonwick@Sun.COM * 14078241SJeff.Bonwick@Sun.COM * For interior vdevs such as mirror and RAID-Z the picture is more complex. 14088241SJeff.Bonwick@Sun.COM * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 14098241SJeff.Bonwick@Sun.COM * if any child is less than fully replicated, then so is its parent. 14108241SJeff.Bonwick@Sun.COM * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 14118241SJeff.Bonwick@Sun.COM * comprising only those txgs which appear in 'maxfaults' or more children; 14128241SJeff.Bonwick@Sun.COM * those are the txgs we don't have enough replication to read. For example, 14138241SJeff.Bonwick@Sun.COM * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 14148241SJeff.Bonwick@Sun.COM * thus, its DTL_MISSING consists of the set of txgs that appear in more than 14158241SJeff.Bonwick@Sun.COM * two child DTL_MISSING maps. 14168241SJeff.Bonwick@Sun.COM * 14178241SJeff.Bonwick@Sun.COM * It should be clear from the above that to compute the DTLs and outage maps 14188241SJeff.Bonwick@Sun.COM * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 14198241SJeff.Bonwick@Sun.COM * Therefore, that is all we keep on disk. When loading the pool, or after 14208241SJeff.Bonwick@Sun.COM * a configuration change, we generate all other DTLs from first principles. 14218241SJeff.Bonwick@Sun.COM */ 1422789Sahrens void 14238241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1424789Sahrens { 14258241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14268241SJeff.Bonwick@Sun.COM 14278241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14288241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 14298241SJeff.Bonwick@Sun.COM 1430789Sahrens mutex_enter(sm->sm_lock); 1431789Sahrens if (!space_map_contains(sm, txg, size)) 1432789Sahrens space_map_add(sm, txg, size); 1433789Sahrens mutex_exit(sm->sm_lock); 1434789Sahrens } 1435789Sahrens 14368241SJeff.Bonwick@Sun.COM boolean_t 14378241SJeff.Bonwick@Sun.COM vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1438789Sahrens { 14398241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14408241SJeff.Bonwick@Sun.COM boolean_t dirty = B_FALSE; 14418241SJeff.Bonwick@Sun.COM 14428241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14438241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1444789Sahrens 1445789Sahrens mutex_enter(sm->sm_lock); 14468241SJeff.Bonwick@Sun.COM if (sm->sm_space != 0) 14478241SJeff.Bonwick@Sun.COM dirty = space_map_contains(sm, txg, size); 1448789Sahrens mutex_exit(sm->sm_lock); 1449789Sahrens 1450789Sahrens return (dirty); 1451789Sahrens } 1452789Sahrens 14538241SJeff.Bonwick@Sun.COM boolean_t 14548241SJeff.Bonwick@Sun.COM vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 14558241SJeff.Bonwick@Sun.COM { 14568241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14578241SJeff.Bonwick@Sun.COM boolean_t empty; 14588241SJeff.Bonwick@Sun.COM 14598241SJeff.Bonwick@Sun.COM mutex_enter(sm->sm_lock); 14608241SJeff.Bonwick@Sun.COM empty = (sm->sm_space == 0); 14618241SJeff.Bonwick@Sun.COM mutex_exit(sm->sm_lock); 14628241SJeff.Bonwick@Sun.COM 14638241SJeff.Bonwick@Sun.COM return (empty); 14648241SJeff.Bonwick@Sun.COM } 14658241SJeff.Bonwick@Sun.COM 1466789Sahrens /* 1467789Sahrens * Reassess DTLs after a config change or scrub completion. 1468789Sahrens */ 1469789Sahrens void 1470789Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1471789Sahrens { 14721544Seschrock spa_t *spa = vd->vdev_spa; 14738241SJeff.Bonwick@Sun.COM avl_tree_t reftree; 14748241SJeff.Bonwick@Sun.COM int minref; 14758241SJeff.Bonwick@Sun.COM 14768241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 14778241SJeff.Bonwick@Sun.COM 14788241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 14798241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(vd->vdev_child[c], txg, 14808241SJeff.Bonwick@Sun.COM scrub_txg, scrub_done); 14818241SJeff.Bonwick@Sun.COM 14828241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev) 14838241SJeff.Bonwick@Sun.COM return; 14848241SJeff.Bonwick@Sun.COM 14858241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 1486789Sahrens mutex_enter(&vd->vdev_dtl_lock); 14877046Sahrens if (scrub_txg != 0 && 14887046Sahrens (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 14897046Sahrens /* XXX should check scrub_done? */ 14907046Sahrens /* 14917046Sahrens * We completed a scrub up to scrub_txg. If we 14927046Sahrens * did it without rebooting, then the scrub dtl 14937046Sahrens * will be valid, so excise the old region and 14947046Sahrens * fold in the scrub dtl. Otherwise, leave the 14957046Sahrens * dtl as-is if there was an error. 14968241SJeff.Bonwick@Sun.COM * 14978241SJeff.Bonwick@Sun.COM * There's little trick here: to excise the beginning 14988241SJeff.Bonwick@Sun.COM * of the DTL_MISSING map, we put it into a reference 14998241SJeff.Bonwick@Sun.COM * tree and then add a segment with refcnt -1 that 15008241SJeff.Bonwick@Sun.COM * covers the range [0, scrub_txg). This means 15018241SJeff.Bonwick@Sun.COM * that each txg in that range has refcnt -1 or 0. 15028241SJeff.Bonwick@Sun.COM * We then add DTL_SCRUB with a refcnt of 2, so that 15038241SJeff.Bonwick@Sun.COM * entries in the range [0, scrub_txg) will have a 15048241SJeff.Bonwick@Sun.COM * positive refcnt -- either 1 or 2. We then convert 15058241SJeff.Bonwick@Sun.COM * the reference tree into the new DTL_MISSING map. 15067046Sahrens */ 15078241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 15088241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 15098241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 15108241SJeff.Bonwick@Sun.COM space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 15118241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 15128241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_SCRUB], 2); 15138241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, 15148241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 15158241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 1516789Sahrens } 15178241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 15188241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15198241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1520789Sahrens if (scrub_done) 15218241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 15228241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 15238241SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 15248241SJeff.Bonwick@Sun.COM space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 15258241SJeff.Bonwick@Sun.COM else 15268241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15278241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1528789Sahrens mutex_exit(&vd->vdev_dtl_lock); 15297046Sahrens 15301732Sbonwick if (txg != 0) 15311732Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1532789Sahrens return; 1533789Sahrens } 1534789Sahrens 1535789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15368241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 15378241SJeff.Bonwick@Sun.COM if (t == DTL_SCRUB) 15388241SJeff.Bonwick@Sun.COM continue; /* leaf vdevs only */ 15398241SJeff.Bonwick@Sun.COM if (t == DTL_PARTIAL) 15408241SJeff.Bonwick@Sun.COM minref = 1; /* i.e. non-zero */ 15418241SJeff.Bonwick@Sun.COM else if (vd->vdev_nparity != 0) 15428241SJeff.Bonwick@Sun.COM minref = vd->vdev_nparity + 1; /* RAID-Z */ 15438241SJeff.Bonwick@Sun.COM else 15448241SJeff.Bonwick@Sun.COM minref = vd->vdev_children; /* any kind of mirror */ 15458241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 15468241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 15478241SJeff.Bonwick@Sun.COM vdev_t *cvd = vd->vdev_child[c]; 15488241SJeff.Bonwick@Sun.COM mutex_enter(&cvd->vdev_dtl_lock); 15498241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 15508241SJeff.Bonwick@Sun.COM mutex_exit(&cvd->vdev_dtl_lock); 15518241SJeff.Bonwick@Sun.COM } 15528241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 15538241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 15548241SJeff.Bonwick@Sun.COM } 1555789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1556789Sahrens } 1557789Sahrens 1558789Sahrens static int 1559789Sahrens vdev_dtl_load(vdev_t *vd) 1560789Sahrens { 1561789Sahrens spa_t *spa = vd->vdev_spa; 15628241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15631732Sbonwick objset_t *mos = spa->spa_meta_objset; 1564789Sahrens dmu_buf_t *db; 1565789Sahrens int error; 1566789Sahrens 1567789Sahrens ASSERT(vd->vdev_children == 0); 1568789Sahrens 1569789Sahrens if (smo->smo_object == 0) 1570789Sahrens return (0); 1571789Sahrens 15721732Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 15731544Seschrock return (error); 15741732Sbonwick 15754944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 15764944Smaybee bcopy(db->db_data, smo, sizeof (*smo)); 15771544Seschrock dmu_buf_rele(db, FTAG); 1578789Sahrens 1579789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15808241SJeff.Bonwick@Sun.COM error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 15818241SJeff.Bonwick@Sun.COM NULL, SM_ALLOC, smo, mos); 1582789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1583789Sahrens 1584789Sahrens return (error); 1585789Sahrens } 1586789Sahrens 1587789Sahrens void 1588789Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1589789Sahrens { 1590789Sahrens spa_t *spa = vd->vdev_spa; 15918241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15928241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 15931732Sbonwick objset_t *mos = spa->spa_meta_objset; 1594789Sahrens space_map_t smsync; 1595789Sahrens kmutex_t smlock; 1596789Sahrens dmu_buf_t *db; 1597789Sahrens dmu_tx_t *tx; 1598789Sahrens 1599789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1600789Sahrens 1601789Sahrens if (vd->vdev_detached) { 1602789Sahrens if (smo->smo_object != 0) { 16031732Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1604789Sahrens ASSERT3U(err, ==, 0); 1605789Sahrens smo->smo_object = 0; 1606789Sahrens } 1607789Sahrens dmu_tx_commit(tx); 1608789Sahrens return; 1609789Sahrens } 1610789Sahrens 1611789Sahrens if (smo->smo_object == 0) { 1612789Sahrens ASSERT(smo->smo_objsize == 0); 1613789Sahrens ASSERT(smo->smo_alloc == 0); 16141732Sbonwick smo->smo_object = dmu_object_alloc(mos, 1615789Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1616789Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1617789Sahrens ASSERT(smo->smo_object != 0); 1618789Sahrens vdev_config_dirty(vd->vdev_top); 1619789Sahrens } 1620789Sahrens 1621789Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1622789Sahrens 1623789Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1624789Sahrens &smlock); 1625789Sahrens 1626789Sahrens mutex_enter(&smlock); 1627789Sahrens 1628789Sahrens mutex_enter(&vd->vdev_dtl_lock); 16291732Sbonwick space_map_walk(sm, space_map_add, &smsync); 1630789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1631789Sahrens 16321732Sbonwick space_map_truncate(smo, mos, tx); 16331732Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1634789Sahrens 1635789Sahrens space_map_destroy(&smsync); 1636789Sahrens 1637789Sahrens mutex_exit(&smlock); 1638789Sahrens mutex_destroy(&smlock); 1639789Sahrens 16401732Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1641789Sahrens dmu_buf_will_dirty(db, tx); 16424944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16434944Smaybee bcopy(smo, db->db_data, sizeof (*smo)); 16441544Seschrock dmu_buf_rele(db, FTAG); 1645789Sahrens 1646789Sahrens dmu_tx_commit(tx); 1647789Sahrens } 1648789Sahrens 16497046Sahrens /* 16508241SJeff.Bonwick@Sun.COM * Determine whether the specified vdev can be offlined/detached/removed 16518241SJeff.Bonwick@Sun.COM * without losing data. 16528241SJeff.Bonwick@Sun.COM */ 16538241SJeff.Bonwick@Sun.COM boolean_t 16548241SJeff.Bonwick@Sun.COM vdev_dtl_required(vdev_t *vd) 16558241SJeff.Bonwick@Sun.COM { 16568241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 16578241SJeff.Bonwick@Sun.COM vdev_t *tvd = vd->vdev_top; 16588241SJeff.Bonwick@Sun.COM uint8_t cant_read = vd->vdev_cant_read; 16598241SJeff.Bonwick@Sun.COM boolean_t required; 16608241SJeff.Bonwick@Sun.COM 16618241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 16628241SJeff.Bonwick@Sun.COM 16638241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev || vd == tvd) 16648241SJeff.Bonwick@Sun.COM return (B_TRUE); 16658241SJeff.Bonwick@Sun.COM 16668241SJeff.Bonwick@Sun.COM /* 16678241SJeff.Bonwick@Sun.COM * Temporarily mark the device as unreadable, and then determine 16688241SJeff.Bonwick@Sun.COM * whether this results in any DTL outages in the top-level vdev. 16698241SJeff.Bonwick@Sun.COM * If not, we can safely offline/detach/remove the device. 16708241SJeff.Bonwick@Sun.COM */ 16718241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_TRUE; 16728241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16738241SJeff.Bonwick@Sun.COM required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 16748241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = cant_read; 16758241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16768241SJeff.Bonwick@Sun.COM 16778241SJeff.Bonwick@Sun.COM return (required); 16788241SJeff.Bonwick@Sun.COM } 16798241SJeff.Bonwick@Sun.COM 16808241SJeff.Bonwick@Sun.COM /* 16817046Sahrens * Determine if resilver is needed, and if so the txg range. 16827046Sahrens */ 16837046Sahrens boolean_t 16847046Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 16857046Sahrens { 16867046Sahrens boolean_t needed = B_FALSE; 16877046Sahrens uint64_t thismin = UINT64_MAX; 16887046Sahrens uint64_t thismax = 0; 16897046Sahrens 16907046Sahrens if (vd->vdev_children == 0) { 16917046Sahrens mutex_enter(&vd->vdev_dtl_lock); 16928241SJeff.Bonwick@Sun.COM if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 16938241SJeff.Bonwick@Sun.COM vdev_writeable(vd)) { 16947046Sahrens space_seg_t *ss; 16957046Sahrens 16968241SJeff.Bonwick@Sun.COM ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 16977046Sahrens thismin = ss->ss_start - 1; 16988241SJeff.Bonwick@Sun.COM ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 16997046Sahrens thismax = ss->ss_end; 17007046Sahrens needed = B_TRUE; 17017046Sahrens } 17027046Sahrens mutex_exit(&vd->vdev_dtl_lock); 17037046Sahrens } else { 17048241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 17057046Sahrens vdev_t *cvd = vd->vdev_child[c]; 17067046Sahrens uint64_t cmin, cmax; 17077046Sahrens 17087046Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 17097046Sahrens thismin = MIN(thismin, cmin); 17107046Sahrens thismax = MAX(thismax, cmax); 17117046Sahrens needed = B_TRUE; 17127046Sahrens } 17137046Sahrens } 17147046Sahrens } 17157046Sahrens 17167046Sahrens if (needed && minp) { 17177046Sahrens *minp = thismin; 17187046Sahrens *maxp = thismax; 17197046Sahrens } 17207046Sahrens return (needed); 17217046Sahrens } 17227046Sahrens 17231986Seschrock void 17241544Seschrock vdev_load(vdev_t *vd) 1725789Sahrens { 1726789Sahrens /* 1727789Sahrens * Recursively load all children. 1728789Sahrens */ 17298241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 17301986Seschrock vdev_load(vd->vdev_child[c]); 1731789Sahrens 1732789Sahrens /* 17331585Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1734789Sahrens */ 17351986Seschrock if (vd == vd->vdev_top && 17361986Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 17371986Seschrock vdev_metaslab_init(vd, 0) != 0)) 17381986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17391986Seschrock VDEV_AUX_CORRUPT_DATA); 1740789Sahrens 1741789Sahrens /* 1742789Sahrens * If this is a leaf vdev, load its DTL. 1743789Sahrens */ 17441986Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 17451986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17461986Seschrock VDEV_AUX_CORRUPT_DATA); 1747789Sahrens } 1748789Sahrens 17492082Seschrock /* 17505450Sbrendan * The special vdev case is used for hot spares and l2cache devices. Its 17515450Sbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 17525450Sbrendan * we make sure that we can open the underlying device, then try to read the 17535450Sbrendan * label, and make sure that the label is sane and that it hasn't been 17545450Sbrendan * repurposed to another pool. 17552082Seschrock */ 17562082Seschrock int 17575450Sbrendan vdev_validate_aux(vdev_t *vd) 17582082Seschrock { 17592082Seschrock nvlist_t *label; 17602082Seschrock uint64_t guid, version; 17612082Seschrock uint64_t state; 17622082Seschrock 17637754SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 17646643Seschrock return (0); 17656643Seschrock 17662082Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 17672082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17682082Seschrock VDEV_AUX_CORRUPT_DATA); 17692082Seschrock return (-1); 17702082Seschrock } 17712082Seschrock 17722082Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 17734577Sahrens version > SPA_VERSION || 17742082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 17752082Seschrock guid != vd->vdev_guid || 17762082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 17772082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17782082Seschrock VDEV_AUX_CORRUPT_DATA); 17792082Seschrock nvlist_free(label); 17802082Seschrock return (-1); 17812082Seschrock } 17822082Seschrock 17832082Seschrock /* 17842082Seschrock * We don't actually check the pool state here. If it's in fact in 17852082Seschrock * use by another pool, we update this fact on the fly when requested. 17862082Seschrock */ 17872082Seschrock nvlist_free(label); 17882082Seschrock return (0); 17892082Seschrock } 17902082Seschrock 1791789Sahrens void 1792789Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 1793789Sahrens { 1794789Sahrens metaslab_t *msp; 1795789Sahrens 1796789Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1797789Sahrens metaslab_sync_done(msp, txg); 1798789Sahrens } 1799789Sahrens 1800789Sahrens void 1801789Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 1802789Sahrens { 1803789Sahrens spa_t *spa = vd->vdev_spa; 1804789Sahrens vdev_t *lvd; 1805789Sahrens metaslab_t *msp; 18061732Sbonwick dmu_tx_t *tx; 1807789Sahrens 18081732Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 18091732Sbonwick ASSERT(vd == vd->vdev_top); 18101732Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 18111732Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 18121732Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 18131732Sbonwick ASSERT(vd->vdev_ms_array != 0); 18141732Sbonwick vdev_config_dirty(vd); 18151732Sbonwick dmu_tx_commit(tx); 18161732Sbonwick } 1817789Sahrens 18181732Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1819789Sahrens metaslab_sync(msp, txg); 18201732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 18211732Sbonwick } 1822789Sahrens 1823789Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1824789Sahrens vdev_dtl_sync(lvd, txg); 1825789Sahrens 1826789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1827789Sahrens } 1828789Sahrens 1829789Sahrens uint64_t 1830789Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1831789Sahrens { 1832789Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1833789Sahrens } 1834789Sahrens 18354451Seschrock /* 18364451Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 18374451Seschrock * not be opened, and no I/O is attempted. 18384451Seschrock */ 1839789Sahrens int 18404451Seschrock vdev_fault(spa_t *spa, uint64_t guid) 18414451Seschrock { 18426643Seschrock vdev_t *vd; 18434451Seschrock 18447754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18454451Seschrock 18466643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18477754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18487754SJeff.Bonwick@Sun.COM 18494451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18507754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18514451Seschrock 18524451Seschrock /* 18534451Seschrock * Faulted state takes precedence over degraded. 18544451Seschrock */ 18554451Seschrock vd->vdev_faulted = 1ULL; 18564451Seschrock vd->vdev_degraded = 0ULL; 18577754SJeff.Bonwick@Sun.COM vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 18584451Seschrock 18594451Seschrock /* 18608123SDavid.Marker@sun.com * If marking the vdev as faulted cause the top-level vdev to become 18614451Seschrock * unavailable, then back off and simply mark the vdev as degraded 18624451Seschrock * instead. 18634451Seschrock */ 18646643Seschrock if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 18654451Seschrock vd->vdev_degraded = 1ULL; 18664451Seschrock vd->vdev_faulted = 0ULL; 18674451Seschrock 18684451Seschrock /* 18694451Seschrock * If we reopen the device and it's not dead, only then do we 18704451Seschrock * mark it degraded. 18714451Seschrock */ 18724451Seschrock vdev_reopen(vd); 18734451Seschrock 18745329Sgw25295 if (vdev_readable(vd)) { 18754451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 18764451Seschrock VDEV_AUX_ERR_EXCEEDED); 18774451Seschrock } 18784451Seschrock } 18794451Seschrock 18807754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 18814451Seschrock } 18824451Seschrock 18834451Seschrock /* 18844451Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 18854451Seschrock * user that something is wrong. The vdev continues to operate as normal as far 18864451Seschrock * as I/O is concerned. 18874451Seschrock */ 18884451Seschrock int 18894451Seschrock vdev_degrade(spa_t *spa, uint64_t guid) 18904451Seschrock { 18916643Seschrock vdev_t *vd; 18924451Seschrock 18937754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18944451Seschrock 18956643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18967754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18977754SJeff.Bonwick@Sun.COM 18984451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18997754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19004451Seschrock 19014451Seschrock /* 19024451Seschrock * If the vdev is already faulted, then don't do anything. 19034451Seschrock */ 19047754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded) 19057754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, 0)); 19064451Seschrock 19074451Seschrock vd->vdev_degraded = 1ULL; 19084451Seschrock if (!vdev_is_dead(vd)) 19094451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 19104451Seschrock VDEV_AUX_ERR_EXCEEDED); 19114451Seschrock 19127754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 19134451Seschrock } 19144451Seschrock 19154451Seschrock /* 19164451Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 19174451Seschrock * any attached spare device should be detached when the device finishes 19184451Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 19194451Seschrock * so no FMA events are generated if the device fails to open. 19204451Seschrock */ 19214451Seschrock int 19227754SJeff.Bonwick@Sun.COM vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 1923789Sahrens { 19249816SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 1925789Sahrens 19267754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 19271485Slling 19286643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19297754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1930789Sahrens 19311585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19327754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19331585Sbonwick 19349816SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 1935789Sahrens vd->vdev_offline = B_FALSE; 19361485Slling vd->vdev_tmpoffline = B_FALSE; 19377754SJeff.Bonwick@Sun.COM vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 19387754SJeff.Bonwick@Sun.COM vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 19399816SGeorge.Wilson@Sun.COM 19409816SGeorge.Wilson@Sun.COM /* XXX - L2ARC 1.0 does not support expansion */ 19419816SGeorge.Wilson@Sun.COM if (!vd->vdev_aux) { 19429816SGeorge.Wilson@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 19439816SGeorge.Wilson@Sun.COM pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 19449816SGeorge.Wilson@Sun.COM } 19459816SGeorge.Wilson@Sun.COM 19469816SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 19474451Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 19484451Seschrock 19499816SGeorge.Wilson@Sun.COM if (!vd->vdev_aux) { 19509816SGeorge.Wilson@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 19519816SGeorge.Wilson@Sun.COM pvd->vdev_expanding = B_FALSE; 19529816SGeorge.Wilson@Sun.COM } 19539816SGeorge.Wilson@Sun.COM 19544451Seschrock if (newstate) 19554451Seschrock *newstate = vd->vdev_state; 19564451Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 19574451Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 19584451Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 19594451Seschrock vd->vdev_parent->vdev_child[0] == vd) 19604451Seschrock vd->vdev_unspare = B_TRUE; 1961789Sahrens 19629816SGeorge.Wilson@Sun.COM if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 19639816SGeorge.Wilson@Sun.COM 19649816SGeorge.Wilson@Sun.COM /* XXX - L2ARC 1.0 does not support expansion */ 19659816SGeorge.Wilson@Sun.COM if (vd->vdev_aux) 19669816SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 19679816SGeorge.Wilson@Sun.COM spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 19689816SGeorge.Wilson@Sun.COM } 19698241SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 1970789Sahrens } 1971789Sahrens 1972789Sahrens int 19734451Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 1974789Sahrens { 19759701SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd; 19769701SGeorge.Wilson@Sun.COM int error; 1977789Sahrens 19787754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 1979789Sahrens 19806643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19817754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1982789Sahrens 19831585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19847754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19851585Sbonwick 19869701SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 19879701SGeorge.Wilson@Sun.COM 1988789Sahrens /* 19891732Sbonwick * If the device isn't already offline, try to offline it. 1990789Sahrens */ 19911732Sbonwick if (!vd->vdev_offline) { 19921732Sbonwick /* 19938241SJeff.Bonwick@Sun.COM * If this device has the only valid copy of some data, 19949701SGeorge.Wilson@Sun.COM * don't allow it to be offlined. Log devices are always 19959701SGeorge.Wilson@Sun.COM * expendable. 19961732Sbonwick */ 19979701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 19989701SGeorge.Wilson@Sun.COM vdev_dtl_required(vd)) 19997754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2000789Sahrens 20011732Sbonwick /* 20021732Sbonwick * Offline this device and reopen its top-level vdev. 20039701SGeorge.Wilson@Sun.COM * If the top-level vdev is a log device then just offline 20049701SGeorge.Wilson@Sun.COM * it. Otherwise, if this action results in the top-level 20059701SGeorge.Wilson@Sun.COM * vdev becoming unusable, undo it and fail the request. 20061732Sbonwick */ 20071732Sbonwick vd->vdev_offline = B_TRUE; 20089701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 20099701SGeorge.Wilson@Sun.COM 20109701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 20119701SGeorge.Wilson@Sun.COM vdev_is_dead(tvd)) { 20121732Sbonwick vd->vdev_offline = B_FALSE; 20139701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 20147754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 20151732Sbonwick } 2016789Sahrens } 2017789Sahrens 20187754SJeff.Bonwick@Sun.COM vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 20191732Sbonwick 20209701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog || !vdev_is_dead(tvd)) 20219701SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 20229701SGeorge.Wilson@Sun.COM 20239701SGeorge.Wilson@Sun.COM (void) spa_vdev_state_exit(spa, vd, 0); 20249701SGeorge.Wilson@Sun.COM 20259701SGeorge.Wilson@Sun.COM error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 20269701SGeorge.Wilson@Sun.COM NULL, DS_FIND_CHILDREN); 20279701SGeorge.Wilson@Sun.COM if (error) { 20289701SGeorge.Wilson@Sun.COM (void) vdev_online(spa, guid, 0, NULL); 20299701SGeorge.Wilson@Sun.COM return (error); 20309701SGeorge.Wilson@Sun.COM } 20319701SGeorge.Wilson@Sun.COM /* 20329701SGeorge.Wilson@Sun.COM * If we successfully offlined the log device then we need to 20339701SGeorge.Wilson@Sun.COM * sync out the current txg so that the "stubby" block can be 20349701SGeorge.Wilson@Sun.COM * removed by zil_sync(). 20359701SGeorge.Wilson@Sun.COM */ 20369701SGeorge.Wilson@Sun.COM txg_wait_synced(spa->spa_dsl_pool, 0); 20379701SGeorge.Wilson@Sun.COM return (0); 2038789Sahrens } 2039789Sahrens 20401544Seschrock /* 20411544Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 20421544Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 20431544Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 20441544Seschrock */ 20451544Seschrock void 20467754SJeff.Bonwick@Sun.COM vdev_clear(spa_t *spa, vdev_t *vd) 2047789Sahrens { 20487754SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 20497754SJeff.Bonwick@Sun.COM 20507754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2051789Sahrens 20521544Seschrock if (vd == NULL) 20537754SJeff.Bonwick@Sun.COM vd = rvd; 2054789Sahrens 20551544Seschrock vd->vdev_stat.vs_read_errors = 0; 20561544Seschrock vd->vdev_stat.vs_write_errors = 0; 20571544Seschrock vd->vdev_stat.vs_checksum_errors = 0; 2058789Sahrens 20597754SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 20607754SJeff.Bonwick@Sun.COM vdev_clear(spa, vd->vdev_child[c]); 20614451Seschrock 20624451Seschrock /* 20636959Sek110237 * If we're in the FAULTED state or have experienced failed I/O, then 20646959Sek110237 * clear the persistent state and attempt to reopen the device. We 20656959Sek110237 * also mark the vdev config dirty, so that the new faulted state is 20666959Sek110237 * written out to disk. 20674451Seschrock */ 20687754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded || 20697754SJeff.Bonwick@Sun.COM !vdev_readable(vd) || !vdev_writeable(vd)) { 20706959Sek110237 20714451Seschrock vd->vdev_faulted = vd->vdev_degraded = 0; 20727754SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_FALSE; 20737754SJeff.Bonwick@Sun.COM vd->vdev_cant_write = B_FALSE; 20747754SJeff.Bonwick@Sun.COM 20754451Seschrock vdev_reopen(vd); 20764451Seschrock 20777754SJeff.Bonwick@Sun.COM if (vd != rvd) 20787754SJeff.Bonwick@Sun.COM vdev_state_dirty(vd->vdev_top); 20797754SJeff.Bonwick@Sun.COM 20807754SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 20814808Sek110237 spa_async_request(spa, SPA_ASYNC_RESILVER); 20824451Seschrock 20834451Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 20844451Seschrock } 2085789Sahrens } 2086789Sahrens 20877754SJeff.Bonwick@Sun.COM boolean_t 20887754SJeff.Bonwick@Sun.COM vdev_is_dead(vdev_t *vd) 20895329Sgw25295 { 20907754SJeff.Bonwick@Sun.COM return (vd->vdev_state < VDEV_STATE_DEGRADED); 20915329Sgw25295 } 20925329Sgw25295 20937754SJeff.Bonwick@Sun.COM boolean_t 20947754SJeff.Bonwick@Sun.COM vdev_readable(vdev_t *vd) 2095789Sahrens { 20967754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2097789Sahrens } 2098789Sahrens 20997754SJeff.Bonwick@Sun.COM boolean_t 21007754SJeff.Bonwick@Sun.COM vdev_writeable(vdev_t *vd) 2101789Sahrens { 21027754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 21037754SJeff.Bonwick@Sun.COM } 2104789Sahrens 21057754SJeff.Bonwick@Sun.COM boolean_t 21067980SGeorge.Wilson@Sun.COM vdev_allocatable(vdev_t *vd) 21077980SGeorge.Wilson@Sun.COM { 21088241SJeff.Bonwick@Sun.COM uint64_t state = vd->vdev_state; 21098241SJeff.Bonwick@Sun.COM 21107980SGeorge.Wilson@Sun.COM /* 21118241SJeff.Bonwick@Sun.COM * We currently allow allocations from vdevs which may be in the 21127980SGeorge.Wilson@Sun.COM * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 21137980SGeorge.Wilson@Sun.COM * fails to reopen then we'll catch it later when we're holding 21148241SJeff.Bonwick@Sun.COM * the proper locks. Note that we have to get the vdev state 21158241SJeff.Bonwick@Sun.COM * in a local variable because although it changes atomically, 21168241SJeff.Bonwick@Sun.COM * we're asking two separate questions about it. 21177980SGeorge.Wilson@Sun.COM */ 21188241SJeff.Bonwick@Sun.COM return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 21197980SGeorge.Wilson@Sun.COM !vd->vdev_cant_write); 21207980SGeorge.Wilson@Sun.COM } 21217980SGeorge.Wilson@Sun.COM 21227980SGeorge.Wilson@Sun.COM boolean_t 21237754SJeff.Bonwick@Sun.COM vdev_accessible(vdev_t *vd, zio_t *zio) 21247754SJeff.Bonwick@Sun.COM { 21257754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 2126789Sahrens 21277754SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 21287754SJeff.Bonwick@Sun.COM return (B_FALSE); 2129789Sahrens 21307754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) 21317754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_read); 2132789Sahrens 21337754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_WRITE) 21347754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_write); 21357754SJeff.Bonwick@Sun.COM 21367754SJeff.Bonwick@Sun.COM return (B_TRUE); 2137789Sahrens } 2138789Sahrens 2139789Sahrens /* 2140789Sahrens * Get statistics for the given vdev. 2141789Sahrens */ 2142789Sahrens void 2143789Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2144789Sahrens { 2145789Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2146789Sahrens 2147789Sahrens mutex_enter(&vd->vdev_stat_lock); 2148789Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 21497046Sahrens vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2150789Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2151789Sahrens vs->vs_state = vd->vdev_state; 21529816SGeorge.Wilson@Sun.COM vs->vs_rsize = vdev_get_min_asize(vd); 21539816SGeorge.Wilson@Sun.COM if (vd->vdev_ops->vdev_op_leaf) 21549816SGeorge.Wilson@Sun.COM vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2155789Sahrens mutex_exit(&vd->vdev_stat_lock); 2156789Sahrens 2157789Sahrens /* 2158789Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2159789Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2160789Sahrens */ 2161789Sahrens if (vd == rvd) { 21627754SJeff.Bonwick@Sun.COM for (int c = 0; c < rvd->vdev_children; c++) { 2163789Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2164789Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2165789Sahrens 2166789Sahrens mutex_enter(&vd->vdev_stat_lock); 21677754SJeff.Bonwick@Sun.COM for (int t = 0; t < ZIO_TYPES; t++) { 2168789Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2169789Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2170789Sahrens } 2171789Sahrens vs->vs_scrub_examined += cvs->vs_scrub_examined; 2172789Sahrens mutex_exit(&vd->vdev_stat_lock); 2173789Sahrens } 2174789Sahrens } 2175789Sahrens } 2176789Sahrens 2177789Sahrens void 21785450Sbrendan vdev_clear_stats(vdev_t *vd) 21795450Sbrendan { 21805450Sbrendan mutex_enter(&vd->vdev_stat_lock); 21815450Sbrendan vd->vdev_stat.vs_space = 0; 21825450Sbrendan vd->vdev_stat.vs_dspace = 0; 21835450Sbrendan vd->vdev_stat.vs_alloc = 0; 21845450Sbrendan mutex_exit(&vd->vdev_stat_lock); 21855450Sbrendan } 21865450Sbrendan 21875450Sbrendan void 21887754SJeff.Bonwick@Sun.COM vdev_stat_update(zio_t *zio, uint64_t psize) 2189789Sahrens { 21908241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 21918241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 21927754SJeff.Bonwick@Sun.COM vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2193789Sahrens vdev_t *pvd; 2194789Sahrens uint64_t txg = zio->io_txg; 2195789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2196789Sahrens zio_type_t type = zio->io_type; 2197789Sahrens int flags = zio->io_flags; 2198789Sahrens 21997754SJeff.Bonwick@Sun.COM /* 22007754SJeff.Bonwick@Sun.COM * If this i/o is a gang leader, it didn't do any actual work. 22017754SJeff.Bonwick@Sun.COM */ 22027754SJeff.Bonwick@Sun.COM if (zio->io_gang_tree) 22037754SJeff.Bonwick@Sun.COM return; 22047754SJeff.Bonwick@Sun.COM 2205789Sahrens if (zio->io_error == 0) { 22067754SJeff.Bonwick@Sun.COM /* 22077754SJeff.Bonwick@Sun.COM * If this is a root i/o, don't count it -- we've already 22087754SJeff.Bonwick@Sun.COM * counted the top-level vdevs, and vdev_get_stats() will 22097754SJeff.Bonwick@Sun.COM * aggregate them when asked. This reduces contention on 22107754SJeff.Bonwick@Sun.COM * the root vdev_stat_lock and implicitly handles blocks 22117754SJeff.Bonwick@Sun.COM * that compress away to holes, for which there is no i/o. 22127754SJeff.Bonwick@Sun.COM * (Holes never create vdev children, so all the counters 22137754SJeff.Bonwick@Sun.COM * remain zero, which is what we want.) 22147754SJeff.Bonwick@Sun.COM * 22157754SJeff.Bonwick@Sun.COM * Note: this only applies to successful i/o (io_error == 0) 22167754SJeff.Bonwick@Sun.COM * because unlike i/o counts, errors are not additive. 22177754SJeff.Bonwick@Sun.COM * When reading a ditto block, for example, failure of 22187754SJeff.Bonwick@Sun.COM * one top-level vdev does not imply a root-level error. 22197754SJeff.Bonwick@Sun.COM */ 22207754SJeff.Bonwick@Sun.COM if (vd == rvd) 22217754SJeff.Bonwick@Sun.COM return; 22227754SJeff.Bonwick@Sun.COM 22237754SJeff.Bonwick@Sun.COM ASSERT(vd == zio->io_vd); 22248241SJeff.Bonwick@Sun.COM 22258241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_BYPASS) 22268241SJeff.Bonwick@Sun.COM return; 22278241SJeff.Bonwick@Sun.COM 22288241SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 22298241SJeff.Bonwick@Sun.COM 22307754SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_REPAIR) { 22311807Sbonwick if (flags & ZIO_FLAG_SCRUB_THREAD) 22327754SJeff.Bonwick@Sun.COM vs->vs_scrub_repaired += psize; 22338241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SELF_HEAL) 22347754SJeff.Bonwick@Sun.COM vs->vs_self_healed += psize; 2235789Sahrens } 22368241SJeff.Bonwick@Sun.COM 22378241SJeff.Bonwick@Sun.COM vs->vs_ops[type]++; 22388241SJeff.Bonwick@Sun.COM vs->vs_bytes[type] += psize; 22398241SJeff.Bonwick@Sun.COM 22408241SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2241789Sahrens return; 2242789Sahrens } 2243789Sahrens 2244789Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2245789Sahrens return; 2246789Sahrens 22479725SEric.Schrock@Sun.COM /* 22489725SEric.Schrock@Sun.COM * If this is an I/O error that is going to be retried, then ignore the 22499725SEric.Schrock@Sun.COM * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 22509725SEric.Schrock@Sun.COM * hard errors, when in reality they can happen for any number of 22519725SEric.Schrock@Sun.COM * innocuous reasons (bus resets, MPxIO link failure, etc). 22529725SEric.Schrock@Sun.COM */ 22539725SEric.Schrock@Sun.COM if (zio->io_error == EIO && 22549725SEric.Schrock@Sun.COM !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 22559725SEric.Schrock@Sun.COM return; 22569725SEric.Schrock@Sun.COM 22577754SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 22589230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 22597754SJeff.Bonwick@Sun.COM if (zio->io_error == ECKSUM) 22607754SJeff.Bonwick@Sun.COM vs->vs_checksum_errors++; 22617754SJeff.Bonwick@Sun.COM else 22627754SJeff.Bonwick@Sun.COM vs->vs_read_errors++; 2263789Sahrens } 22649230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 22657754SJeff.Bonwick@Sun.COM vs->vs_write_errors++; 22667754SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2267789Sahrens 22688241SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_WRITE && txg != 0 && 22698241SJeff.Bonwick@Sun.COM (!(flags & ZIO_FLAG_IO_REPAIR) || 22708241SJeff.Bonwick@Sun.COM (flags & ZIO_FLAG_SCRUB_THREAD))) { 22718241SJeff.Bonwick@Sun.COM /* 22728241SJeff.Bonwick@Sun.COM * This is either a normal write (not a repair), or it's a 22738241SJeff.Bonwick@Sun.COM * repair induced by the scrub thread. In the normal case, 22748241SJeff.Bonwick@Sun.COM * we commit the DTL change in the same txg as the block 22758241SJeff.Bonwick@Sun.COM * was born. In the scrub-induced repair case, we know that 22768241SJeff.Bonwick@Sun.COM * scrubs run in first-pass syncing context, so we commit 22778241SJeff.Bonwick@Sun.COM * the DTL change in spa->spa_syncing_txg. 22788241SJeff.Bonwick@Sun.COM * 22798241SJeff.Bonwick@Sun.COM * We currently do not make DTL entries for failed spontaneous 22808241SJeff.Bonwick@Sun.COM * self-healing writes triggered by normal (non-scrubbing) 22818241SJeff.Bonwick@Sun.COM * reads, because we have no transactional context in which to 22828241SJeff.Bonwick@Sun.COM * do so -- and it's not clear that it'd be desirable anyway. 22838241SJeff.Bonwick@Sun.COM */ 22848241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 22858241SJeff.Bonwick@Sun.COM uint64_t commit_txg = txg; 22868241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SCRUB_THREAD) { 22878241SJeff.Bonwick@Sun.COM ASSERT(flags & ZIO_FLAG_IO_REPAIR); 22888241SJeff.Bonwick@Sun.COM ASSERT(spa_sync_pass(spa) == 1); 22898241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 22908241SJeff.Bonwick@Sun.COM commit_txg = spa->spa_syncing_txg; 22918241SJeff.Bonwick@Sun.COM } 22928241SJeff.Bonwick@Sun.COM ASSERT(commit_txg >= spa->spa_syncing_txg); 22938241SJeff.Bonwick@Sun.COM if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 22948241SJeff.Bonwick@Sun.COM return; 22958241SJeff.Bonwick@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 22968241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 22978241SJeff.Bonwick@Sun.COM vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2298789Sahrens } 22998241SJeff.Bonwick@Sun.COM if (vd != rvd) 23008241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2301789Sahrens } 2302789Sahrens } 2303789Sahrens 2304789Sahrens void 2305789Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2306789Sahrens { 2307789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2308789Sahrens 23099816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 2310789Sahrens vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2311789Sahrens 2312789Sahrens mutex_enter(&vd->vdev_stat_lock); 2313789Sahrens 2314789Sahrens if (type == POOL_SCRUB_NONE) { 2315789Sahrens /* 2316789Sahrens * Update completion and end time. Leave everything else alone 2317789Sahrens * so we can report what happened during the previous scrub. 2318789Sahrens */ 2319789Sahrens vs->vs_scrub_complete = complete; 2320789Sahrens vs->vs_scrub_end = gethrestime_sec(); 2321789Sahrens } else { 2322789Sahrens vs->vs_scrub_type = type; 2323789Sahrens vs->vs_scrub_complete = 0; 2324789Sahrens vs->vs_scrub_examined = 0; 2325789Sahrens vs->vs_scrub_repaired = 0; 2326789Sahrens vs->vs_scrub_start = gethrestime_sec(); 2327789Sahrens vs->vs_scrub_end = 0; 2328789Sahrens } 2329789Sahrens 2330789Sahrens mutex_exit(&vd->vdev_stat_lock); 2331789Sahrens } 2332789Sahrens 2333789Sahrens /* 2334789Sahrens * Update the in-core space usage stats for this vdev and the root vdev. 2335789Sahrens */ 2336789Sahrens void 23375450Sbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 23385450Sbrendan boolean_t update_root) 2339789Sahrens { 23404527Sperrin int64_t dspace_delta = space_delta; 23414527Sperrin spa_t *spa = vd->vdev_spa; 23424527Sperrin vdev_t *rvd = spa->spa_root_vdev; 23434527Sperrin 2344789Sahrens ASSERT(vd == vd->vdev_top); 23454527Sperrin 23464527Sperrin /* 23474527Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 23484527Sperrin * factor. We must calculate this here and not at the root vdev 23494527Sperrin * because the root vdev's psize-to-asize is simply the max of its 23504527Sperrin * childrens', thus not accurate enough for us. 23514527Sperrin */ 23524527Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 23539701SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 23544527Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 23554527Sperrin vd->vdev_deflate_ratio; 2356789Sahrens 23574527Sperrin mutex_enter(&vd->vdev_stat_lock); 23584527Sperrin vd->vdev_stat.vs_space += space_delta; 23594527Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 23604527Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 23614527Sperrin mutex_exit(&vd->vdev_stat_lock); 23622082Seschrock 23635450Sbrendan if (update_root) { 23645450Sbrendan ASSERT(rvd == vd->vdev_parent); 23655450Sbrendan ASSERT(vd->vdev_ms_count != 0); 23664527Sperrin 23675450Sbrendan /* 23685450Sbrendan * Don't count non-normal (e.g. intent log) space as part of 23695450Sbrendan * the pool's capacity. 23705450Sbrendan */ 23715450Sbrendan if (vd->vdev_mg->mg_class != spa->spa_normal_class) 23725450Sbrendan return; 23735450Sbrendan 23745450Sbrendan mutex_enter(&rvd->vdev_stat_lock); 23755450Sbrendan rvd->vdev_stat.vs_space += space_delta; 23765450Sbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 23775450Sbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 23785450Sbrendan mutex_exit(&rvd->vdev_stat_lock); 23795450Sbrendan } 2380789Sahrens } 2381789Sahrens 2382789Sahrens /* 2383789Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2384789Sahrens * so that it will be written out next time the vdev configuration is synced. 2385789Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2386789Sahrens */ 2387789Sahrens void 2388789Sahrens vdev_config_dirty(vdev_t *vd) 2389789Sahrens { 2390789Sahrens spa_t *spa = vd->vdev_spa; 2391789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2392789Sahrens int c; 2393789Sahrens 23941601Sbonwick /* 23959425SEric.Schrock@Sun.COM * If this is an aux vdev (as with l2cache and spare devices), then we 23969425SEric.Schrock@Sun.COM * update the vdev config manually and set the sync flag. 23976643Seschrock */ 23986643Seschrock if (vd->vdev_aux != NULL) { 23996643Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 24006643Seschrock nvlist_t **aux; 24016643Seschrock uint_t naux; 24026643Seschrock 24036643Seschrock for (c = 0; c < sav->sav_count; c++) { 24046643Seschrock if (sav->sav_vdevs[c] == vd) 24056643Seschrock break; 24066643Seschrock } 24076643Seschrock 24087754SJeff.Bonwick@Sun.COM if (c == sav->sav_count) { 24097754SJeff.Bonwick@Sun.COM /* 24107754SJeff.Bonwick@Sun.COM * We're being removed. There's nothing more to do. 24117754SJeff.Bonwick@Sun.COM */ 24127754SJeff.Bonwick@Sun.COM ASSERT(sav->sav_sync == B_TRUE); 24137754SJeff.Bonwick@Sun.COM return; 24147754SJeff.Bonwick@Sun.COM } 24157754SJeff.Bonwick@Sun.COM 24166643Seschrock sav->sav_sync = B_TRUE; 24176643Seschrock 24189425SEric.Schrock@Sun.COM if (nvlist_lookup_nvlist_array(sav->sav_config, 24199425SEric.Schrock@Sun.COM ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 24209425SEric.Schrock@Sun.COM VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 24219425SEric.Schrock@Sun.COM ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 24229425SEric.Schrock@Sun.COM } 24236643Seschrock 24246643Seschrock ASSERT(c < naux); 24256643Seschrock 24266643Seschrock /* 24276643Seschrock * Setting the nvlist in the middle if the array is a little 24286643Seschrock * sketchy, but it will work. 24296643Seschrock */ 24306643Seschrock nvlist_free(aux[c]); 24316643Seschrock aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 24326643Seschrock 24336643Seschrock return; 24346643Seschrock } 24356643Seschrock 24366643Seschrock /* 24377754SJeff.Bonwick@Sun.COM * The dirty list is protected by the SCL_CONFIG lock. The caller 24387754SJeff.Bonwick@Sun.COM * must either hold SCL_CONFIG as writer, or must be the sync thread 24397754SJeff.Bonwick@Sun.COM * (which holds SCL_CONFIG as reader). There's only one sync thread, 24401601Sbonwick * so this is sufficient to ensure mutual exclusion. 24411601Sbonwick */ 24427754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 24437754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24447754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 24451601Sbonwick 2446789Sahrens if (vd == rvd) { 2447789Sahrens for (c = 0; c < rvd->vdev_children; c++) 2448789Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2449789Sahrens } else { 2450789Sahrens ASSERT(vd == vd->vdev_top); 2451789Sahrens 24527754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_config_dirty_node)) 24537754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_config_dirty_list, vd); 2454789Sahrens } 2455789Sahrens } 2456789Sahrens 2457789Sahrens void 2458789Sahrens vdev_config_clean(vdev_t *vd) 2459789Sahrens { 24601601Sbonwick spa_t *spa = vd->vdev_spa; 24611601Sbonwick 24627754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 24637754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24647754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 24657754SJeff.Bonwick@Sun.COM 24667754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 24677754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_config_dirty_list, vd); 24687754SJeff.Bonwick@Sun.COM } 24697754SJeff.Bonwick@Sun.COM 24707754SJeff.Bonwick@Sun.COM /* 24717754SJeff.Bonwick@Sun.COM * Mark a top-level vdev's state as dirty, so that the next pass of 24727754SJeff.Bonwick@Sun.COM * spa_sync() can convert this into vdev_config_dirty(). We distinguish 24737754SJeff.Bonwick@Sun.COM * the state changes from larger config changes because they require 24747754SJeff.Bonwick@Sun.COM * much less locking, and are often needed for administrative actions. 24757754SJeff.Bonwick@Sun.COM */ 24767754SJeff.Bonwick@Sun.COM void 24777754SJeff.Bonwick@Sun.COM vdev_state_dirty(vdev_t *vd) 24787754SJeff.Bonwick@Sun.COM { 24797754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24807754SJeff.Bonwick@Sun.COM 24817754SJeff.Bonwick@Sun.COM ASSERT(vd == vd->vdev_top); 24821601Sbonwick 24837754SJeff.Bonwick@Sun.COM /* 24847754SJeff.Bonwick@Sun.COM * The state list is protected by the SCL_STATE lock. The caller 24857754SJeff.Bonwick@Sun.COM * must either hold SCL_STATE as writer, or must be the sync thread 24867754SJeff.Bonwick@Sun.COM * (which holds SCL_STATE as reader). There's only one sync thread, 24877754SJeff.Bonwick@Sun.COM * so this is sufficient to ensure mutual exclusion. 24887754SJeff.Bonwick@Sun.COM */ 24897754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 24907754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24917754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 24927754SJeff.Bonwick@Sun.COM 24937754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_state_dirty_node)) 24947754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_state_dirty_list, vd); 24957754SJeff.Bonwick@Sun.COM } 24967754SJeff.Bonwick@Sun.COM 24977754SJeff.Bonwick@Sun.COM void 24987754SJeff.Bonwick@Sun.COM vdev_state_clean(vdev_t *vd) 24997754SJeff.Bonwick@Sun.COM { 25007754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 25017754SJeff.Bonwick@Sun.COM 25027754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 25037754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 25047754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 25057754SJeff.Bonwick@Sun.COM 25067754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 25077754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_state_dirty_list, vd); 2508789Sahrens } 2509789Sahrens 25106523Sek110237 /* 25116523Sek110237 * Propagate vdev state up from children to parent. 25126523Sek110237 */ 25131775Sbillm void 25141775Sbillm vdev_propagate_state(vdev_t *vd) 25151775Sbillm { 25168241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 25178241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 25181775Sbillm int degraded = 0, faulted = 0; 25191775Sbillm int corrupted = 0; 25201775Sbillm vdev_t *child; 25211775Sbillm 25224451Seschrock if (vd->vdev_children > 0) { 25239816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 25244451Seschrock child = vd->vdev_child[c]; 25256976Seschrock 25267754SJeff.Bonwick@Sun.COM if (!vdev_readable(child) || 25278241SJeff.Bonwick@Sun.COM (!vdev_writeable(child) && spa_writeable(spa))) { 25286976Seschrock /* 25296976Seschrock * Root special: if there is a top-level log 25306976Seschrock * device, treat the root vdev as if it were 25316976Seschrock * degraded. 25326976Seschrock */ 25336976Seschrock if (child->vdev_islog && vd == rvd) 25346976Seschrock degraded++; 25356976Seschrock else 25366976Seschrock faulted++; 25376976Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 25384451Seschrock degraded++; 25396976Seschrock } 25404451Seschrock 25414451Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 25424451Seschrock corrupted++; 25434451Seschrock } 25441775Sbillm 25454451Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 25464451Seschrock 25474451Seschrock /* 25487754SJeff.Bonwick@Sun.COM * Root special: if there is a top-level vdev that cannot be 25494451Seschrock * opened due to corrupted metadata, then propagate the root 25504451Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 25514451Seschrock * replicas'. 25524451Seschrock */ 25534451Seschrock if (corrupted && vd == rvd && 25544451Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 25554451Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 25564451Seschrock VDEV_AUX_CORRUPT_DATA); 25571775Sbillm } 25581775Sbillm 25596976Seschrock if (vd->vdev_parent) 25604451Seschrock vdev_propagate_state(vd->vdev_parent); 25611775Sbillm } 25621775Sbillm 2563789Sahrens /* 25641544Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 25651544Seschrock * state, because we're in the process of opening children depth-first. 25661544Seschrock * Otherwise, we propagate the change to the parent. 25671544Seschrock * 25681544Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 25691544Seschrock * generated. 2570789Sahrens */ 2571789Sahrens void 25721544Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2573789Sahrens { 25741986Seschrock uint64_t save_state; 25756643Seschrock spa_t *spa = vd->vdev_spa; 25761544Seschrock 25771544Seschrock if (state == vd->vdev_state) { 25781544Seschrock vd->vdev_stat.vs_aux = aux; 2579789Sahrens return; 25801544Seschrock } 25811544Seschrock 25821986Seschrock save_state = vd->vdev_state; 2583789Sahrens 2584789Sahrens vd->vdev_state = state; 2585789Sahrens vd->vdev_stat.vs_aux = aux; 2586789Sahrens 25874451Seschrock /* 25884451Seschrock * If we are setting the vdev state to anything but an open state, then 25894451Seschrock * always close the underlying device. Otherwise, we keep accessible 25904451Seschrock * but invalid devices open forever. We don't call vdev_close() itself, 25914451Seschrock * because that implies some extra checks (offline, etc) that we don't 25924451Seschrock * want here. This is limited to leaf devices, because otherwise 25934451Seschrock * closing the device will affect other children. 25944451Seschrock */ 25957780SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 25964451Seschrock vd->vdev_ops->vdev_op_close(vd); 25974451Seschrock 25984451Seschrock if (vd->vdev_removed && 25994451Seschrock state == VDEV_STATE_CANT_OPEN && 26004451Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 26014451Seschrock /* 26024451Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 26034451Seschrock * device was previously marked removed and someone attempted to 26044451Seschrock * reopen it. If this failed due to a nonexistent device, then 26054451Seschrock * keep the device in the REMOVED state. We also let this be if 26064451Seschrock * it is one of our special test online cases, which is only 26074451Seschrock * attempting to online the device and shouldn't generate an FMA 26084451Seschrock * fault. 26094451Seschrock */ 26104451Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 26114451Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 26124451Seschrock } else if (state == VDEV_STATE_REMOVED) { 26134451Seschrock /* 26144451Seschrock * Indicate to the ZFS DE that this device has been removed, and 26154451Seschrock * any recent errors should be ignored. 26164451Seschrock */ 26176643Seschrock zfs_post_remove(spa, vd); 26184451Seschrock vd->vdev_removed = B_TRUE; 26194451Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 26201544Seschrock /* 26211544Seschrock * If we fail to open a vdev during an import, we mark it as 26221544Seschrock * "not available", which signifies that it was never there to 26231544Seschrock * begin with. Failure to open such a device is not considered 26241544Seschrock * an error. 26251544Seschrock */ 26266643Seschrock if (spa->spa_load_state == SPA_LOAD_IMPORT && 26271986Seschrock vd->vdev_ops->vdev_op_leaf) 26281986Seschrock vd->vdev_not_present = 1; 26291986Seschrock 26301986Seschrock /* 26311986Seschrock * Post the appropriate ereport. If the 'prevstate' field is 26321986Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 26331986Seschrock * that this is part of a vdev_reopen(). In this case, we don't 26341986Seschrock * want to post the ereport if the device was already in the 26351986Seschrock * CANT_OPEN state beforehand. 26364451Seschrock * 26374451Seschrock * If the 'checkremove' flag is set, then this is an attempt to 26384451Seschrock * online the device in response to an insertion event. If we 26394451Seschrock * hit this case, then we have detected an insertion event for a 26404451Seschrock * faulted or offline device that wasn't in the removed state. 26414451Seschrock * In this scenario, we don't post an ereport because we are 26424451Seschrock * about to replace the device, or attempt an online with 26434451Seschrock * vdev_forcefault, which will generate the fault for us. 26441986Seschrock */ 26454451Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 26464451Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 26476643Seschrock vd != spa->spa_root_vdev) { 26481544Seschrock const char *class; 26491544Seschrock 26501544Seschrock switch (aux) { 26511544Seschrock case VDEV_AUX_OPEN_FAILED: 26521544Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 26531544Seschrock break; 26541544Seschrock case VDEV_AUX_CORRUPT_DATA: 26551544Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 26561544Seschrock break; 26571544Seschrock case VDEV_AUX_NO_REPLICAS: 26581544Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 26591544Seschrock break; 26601544Seschrock case VDEV_AUX_BAD_GUID_SUM: 26611544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 26621544Seschrock break; 26631544Seschrock case VDEV_AUX_TOO_SMALL: 26641544Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 26651544Seschrock break; 26661544Seschrock case VDEV_AUX_BAD_LABEL: 26671544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 26681544Seschrock break; 26697754SJeff.Bonwick@Sun.COM case VDEV_AUX_IO_FAILURE: 26707754SJeff.Bonwick@Sun.COM class = FM_EREPORT_ZFS_IO_FAILURE; 26717754SJeff.Bonwick@Sun.COM break; 26721544Seschrock default: 26731544Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 26741544Seschrock } 26751544Seschrock 26766643Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 26771544Seschrock } 26784451Seschrock 26794451Seschrock /* Erase any notion of persistent removed state */ 26804451Seschrock vd->vdev_removed = B_FALSE; 26814451Seschrock } else { 26824451Seschrock vd->vdev_removed = B_FALSE; 26831544Seschrock } 26841544Seschrock 26859583STim.Haley@Sun.COM if (!isopen && vd->vdev_parent) 26869583STim.Haley@Sun.COM vdev_propagate_state(vd->vdev_parent); 2687789Sahrens } 26887042Sgw25295 26897042Sgw25295 /* 26907042Sgw25295 * Check the vdev configuration to ensure that it's capable of supporting 26917042Sgw25295 * a root pool. Currently, we do not support RAID-Z or partial configuration. 26927042Sgw25295 * In addition, only a single top-level vdev is allowed and none of the leaves 26937042Sgw25295 * can be wholedisks. 26947042Sgw25295 */ 26957042Sgw25295 boolean_t 26967042Sgw25295 vdev_is_bootable(vdev_t *vd) 26977042Sgw25295 { 26987042Sgw25295 if (!vd->vdev_ops->vdev_op_leaf) { 26997042Sgw25295 char *vdev_type = vd->vdev_ops->vdev_op_type; 27007042Sgw25295 27017042Sgw25295 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 27027042Sgw25295 vd->vdev_children > 1) { 27037042Sgw25295 return (B_FALSE); 27047042Sgw25295 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 27057042Sgw25295 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 27067042Sgw25295 return (B_FALSE); 27077042Sgw25295 } 27087042Sgw25295 } else if (vd->vdev_wholedisk == 1) { 27097042Sgw25295 return (B_FALSE); 27107042Sgw25295 } 27117042Sgw25295 27129816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 27137042Sgw25295 if (!vdev_is_bootable(vd->vdev_child[c])) 27147042Sgw25295 return (B_FALSE); 27157042Sgw25295 } 27167042Sgw25295 return (B_TRUE); 27177042Sgw25295 } 27189701SGeorge.Wilson@Sun.COM 27199701SGeorge.Wilson@Sun.COM void 27209701SGeorge.Wilson@Sun.COM vdev_load_log_state(vdev_t *vd, nvlist_t *nv) 27219701SGeorge.Wilson@Sun.COM { 27229816SGeorge.Wilson@Sun.COM uint_t children; 27239701SGeorge.Wilson@Sun.COM nvlist_t **child; 27249701SGeorge.Wilson@Sun.COM uint64_t val; 27259701SGeorge.Wilson@Sun.COM spa_t *spa = vd->vdev_spa; 27269701SGeorge.Wilson@Sun.COM 27279701SGeorge.Wilson@Sun.COM if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 27289701SGeorge.Wilson@Sun.COM &child, &children) == 0) { 27299816SGeorge.Wilson@Sun.COM for (int c = 0; c < children; c++) 27309701SGeorge.Wilson@Sun.COM vdev_load_log_state(vd->vdev_child[c], child[c]); 27319701SGeorge.Wilson@Sun.COM } 27329701SGeorge.Wilson@Sun.COM 27339701SGeorge.Wilson@Sun.COM if (vd->vdev_ops->vdev_op_leaf && nvlist_lookup_uint64(nv, 27349701SGeorge.Wilson@Sun.COM ZPOOL_CONFIG_OFFLINE, &val) == 0 && val) { 27359701SGeorge.Wilson@Sun.COM 27369701SGeorge.Wilson@Sun.COM /* 27379701SGeorge.Wilson@Sun.COM * It would be nice to call vdev_offline() 27389701SGeorge.Wilson@Sun.COM * directly but the pool isn't fully loaded and 27399701SGeorge.Wilson@Sun.COM * the txg threads have not been started yet. 27409701SGeorge.Wilson@Sun.COM */ 27419701SGeorge.Wilson@Sun.COM spa_config_enter(spa, SCL_STATE_ALL, FTAG, RW_WRITER); 27429701SGeorge.Wilson@Sun.COM vd->vdev_offline = val; 27439701SGeorge.Wilson@Sun.COM vdev_reopen(vd->vdev_top); 27449701SGeorge.Wilson@Sun.COM spa_config_exit(spa, SCL_STATE_ALL, FTAG); 27459701SGeorge.Wilson@Sun.COM } 27469701SGeorge.Wilson@Sun.COM } 27479816SGeorge.Wilson@Sun.COM 27489816SGeorge.Wilson@Sun.COM /* 27499816SGeorge.Wilson@Sun.COM * Expand a vdev if possible. 27509816SGeorge.Wilson@Sun.COM */ 27519816SGeorge.Wilson@Sun.COM void 27529816SGeorge.Wilson@Sun.COM vdev_expand(vdev_t *vd, uint64_t txg) 27539816SGeorge.Wilson@Sun.COM { 27549816SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_top == vd); 27559816SGeorge.Wilson@Sun.COM ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 27569816SGeorge.Wilson@Sun.COM 27579816SGeorge.Wilson@Sun.COM if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 27589816SGeorge.Wilson@Sun.COM VERIFY(vdev_metaslab_init(vd, txg) == 0); 27599816SGeorge.Wilson@Sun.COM vdev_config_dirty(vd); 27609816SGeorge.Wilson@Sun.COM } 27619816SGeorge.Wilson@Sun.COM } 2762