1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51485Slling * Common Development and Distribution License (the "License"). 61485Slling * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 236523Sek110237 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #include <sys/zfs_context.h> 281544Seschrock #include <sys/fm/fs/zfs.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/spa_impl.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/dmu_tx.h> 33789Sahrens #include <sys/vdev_impl.h> 34789Sahrens #include <sys/uberblock_impl.h> 35789Sahrens #include <sys/metaslab.h> 36789Sahrens #include <sys/metaslab_impl.h> 37789Sahrens #include <sys/space_map.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zap.h> 40789Sahrens #include <sys/fs/zfs.h> 416643Seschrock #include <sys/arc.h> 42789Sahrens 43789Sahrens /* 44789Sahrens * Virtual device management. 45789Sahrens */ 46789Sahrens 47789Sahrens static vdev_ops_t *vdev_ops_table[] = { 48789Sahrens &vdev_root_ops, 49789Sahrens &vdev_raidz_ops, 50789Sahrens &vdev_mirror_ops, 51789Sahrens &vdev_replacing_ops, 522082Seschrock &vdev_spare_ops, 53789Sahrens &vdev_disk_ops, 54789Sahrens &vdev_file_ops, 55789Sahrens &vdev_missing_ops, 56789Sahrens NULL 57789Sahrens }; 58789Sahrens 597046Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 607046Sahrens int zfs_scrub_limit = 10; 613697Smishra 62789Sahrens /* 63789Sahrens * Given a vdev type, return the appropriate ops vector. 64789Sahrens */ 65789Sahrens static vdev_ops_t * 66789Sahrens vdev_getops(const char *type) 67789Sahrens { 68789Sahrens vdev_ops_t *ops, **opspp; 69789Sahrens 70789Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 71789Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 72789Sahrens break; 73789Sahrens 74789Sahrens return (ops); 75789Sahrens } 76789Sahrens 77789Sahrens /* 78789Sahrens * Default asize function: return the MAX of psize with the asize of 79789Sahrens * all children. This is what's used by anything other than RAID-Z. 80789Sahrens */ 81789Sahrens uint64_t 82789Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 83789Sahrens { 841732Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 85789Sahrens uint64_t csize; 86789Sahrens uint64_t c; 87789Sahrens 88789Sahrens for (c = 0; c < vd->vdev_children; c++) { 89789Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 90789Sahrens asize = MAX(asize, csize); 91789Sahrens } 92789Sahrens 93789Sahrens return (asize); 94789Sahrens } 95789Sahrens 961175Slling /* 971175Slling * Get the replaceable or attachable device size. 981175Slling * If the parent is a mirror or raidz, the replaceable size is the minimum 991175Slling * psize of all its children. For the rest, just return our own psize. 1001175Slling * 1011175Slling * e.g. 1021175Slling * psize rsize 1031175Slling * root - - 1041175Slling * mirror/raidz - - 1051175Slling * disk1 20g 20g 1061175Slling * disk2 40g 20g 1071175Slling * disk3 80g 80g 1081175Slling */ 1091175Slling uint64_t 1101175Slling vdev_get_rsize(vdev_t *vd) 1111175Slling { 1121175Slling vdev_t *pvd, *cvd; 1131175Slling uint64_t c, rsize; 1141175Slling 1151175Slling pvd = vd->vdev_parent; 1161175Slling 1171175Slling /* 1181175Slling * If our parent is NULL or the root, just return our own psize. 1191175Slling */ 1201175Slling if (pvd == NULL || pvd->vdev_parent == NULL) 1211175Slling return (vd->vdev_psize); 1221175Slling 1231175Slling rsize = 0; 1241175Slling 1251175Slling for (c = 0; c < pvd->vdev_children; c++) { 1261175Slling cvd = pvd->vdev_child[c]; 1271175Slling rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1; 1281175Slling } 1291175Slling 1301175Slling return (rsize); 1311175Slling } 1321175Slling 133789Sahrens vdev_t * 134789Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 135789Sahrens { 136789Sahrens vdev_t *rvd = spa->spa_root_vdev; 137789Sahrens 1387754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1395530Sbonwick 1407046Sahrens if (vdev < rvd->vdev_children) { 1417046Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 142789Sahrens return (rvd->vdev_child[vdev]); 1437046Sahrens } 144789Sahrens 145789Sahrens return (NULL); 146789Sahrens } 147789Sahrens 148789Sahrens vdev_t * 149789Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 150789Sahrens { 151789Sahrens int c; 152789Sahrens vdev_t *mvd; 153789Sahrens 1541585Sbonwick if (vd->vdev_guid == guid) 155789Sahrens return (vd); 156789Sahrens 157789Sahrens for (c = 0; c < vd->vdev_children; c++) 158789Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 159789Sahrens NULL) 160789Sahrens return (mvd); 161789Sahrens 162789Sahrens return (NULL); 163789Sahrens } 164789Sahrens 165789Sahrens void 166789Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 167789Sahrens { 168789Sahrens size_t oldsize, newsize; 169789Sahrens uint64_t id = cvd->vdev_id; 170789Sahrens vdev_t **newchild; 171789Sahrens 1727754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 173789Sahrens ASSERT(cvd->vdev_parent == NULL); 174789Sahrens 175789Sahrens cvd->vdev_parent = pvd; 176789Sahrens 177789Sahrens if (pvd == NULL) 178789Sahrens return; 179789Sahrens 180789Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 181789Sahrens 182789Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 183789Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 184789Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 185789Sahrens 186789Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 187789Sahrens if (pvd->vdev_child != NULL) { 188789Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 189789Sahrens kmem_free(pvd->vdev_child, oldsize); 190789Sahrens } 191789Sahrens 192789Sahrens pvd->vdev_child = newchild; 193789Sahrens pvd->vdev_child[id] = cvd; 194789Sahrens 195789Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 196789Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 197789Sahrens 198789Sahrens /* 199789Sahrens * Walk up all ancestors to update guid sum. 200789Sahrens */ 201789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 202789Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 2033697Smishra 2043697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2053697Smishra cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 206789Sahrens } 207789Sahrens 208789Sahrens void 209789Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 210789Sahrens { 211789Sahrens int c; 212789Sahrens uint_t id = cvd->vdev_id; 213789Sahrens 214789Sahrens ASSERT(cvd->vdev_parent == pvd); 215789Sahrens 216789Sahrens if (pvd == NULL) 217789Sahrens return; 218789Sahrens 219789Sahrens ASSERT(id < pvd->vdev_children); 220789Sahrens ASSERT(pvd->vdev_child[id] == cvd); 221789Sahrens 222789Sahrens pvd->vdev_child[id] = NULL; 223789Sahrens cvd->vdev_parent = NULL; 224789Sahrens 225789Sahrens for (c = 0; c < pvd->vdev_children; c++) 226789Sahrens if (pvd->vdev_child[c]) 227789Sahrens break; 228789Sahrens 229789Sahrens if (c == pvd->vdev_children) { 230789Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 231789Sahrens pvd->vdev_child = NULL; 232789Sahrens pvd->vdev_children = 0; 233789Sahrens } 234789Sahrens 235789Sahrens /* 236789Sahrens * Walk up all ancestors to update guid sum. 237789Sahrens */ 238789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 239789Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 2403697Smishra 2413697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2423697Smishra cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 243789Sahrens } 244789Sahrens 245789Sahrens /* 246789Sahrens * Remove any holes in the child array. 247789Sahrens */ 248789Sahrens void 249789Sahrens vdev_compact_children(vdev_t *pvd) 250789Sahrens { 251789Sahrens vdev_t **newchild, *cvd; 252789Sahrens int oldc = pvd->vdev_children; 253789Sahrens int newc, c; 254789Sahrens 2557754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 256789Sahrens 257789Sahrens for (c = newc = 0; c < oldc; c++) 258789Sahrens if (pvd->vdev_child[c]) 259789Sahrens newc++; 260789Sahrens 261789Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 262789Sahrens 263789Sahrens for (c = newc = 0; c < oldc; c++) { 264789Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 265789Sahrens newchild[newc] = cvd; 266789Sahrens cvd->vdev_id = newc++; 267789Sahrens } 268789Sahrens } 269789Sahrens 270789Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 271789Sahrens pvd->vdev_child = newchild; 272789Sahrens pvd->vdev_children = newc; 273789Sahrens } 274789Sahrens 275789Sahrens /* 276789Sahrens * Allocate and minimally initialize a vdev_t. 277789Sahrens */ 278789Sahrens static vdev_t * 279789Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 280789Sahrens { 281789Sahrens vdev_t *vd; 282789Sahrens 2831585Sbonwick vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 2841585Sbonwick 2851585Sbonwick if (spa->spa_root_vdev == NULL) { 2861585Sbonwick ASSERT(ops == &vdev_root_ops); 2871585Sbonwick spa->spa_root_vdev = vd; 2881585Sbonwick } 289789Sahrens 2901585Sbonwick if (guid == 0) { 2911585Sbonwick if (spa->spa_root_vdev == vd) { 2921585Sbonwick /* 2931585Sbonwick * The root vdev's guid will also be the pool guid, 2941585Sbonwick * which must be unique among all pools. 2951585Sbonwick */ 2961585Sbonwick while (guid == 0 || spa_guid_exists(guid, 0)) 2971585Sbonwick guid = spa_get_random(-1ULL); 2981585Sbonwick } else { 2991585Sbonwick /* 3001585Sbonwick * Any other vdev's guid must be unique within the pool. 3011585Sbonwick */ 3021585Sbonwick while (guid == 0 || 3031585Sbonwick spa_guid_exists(spa_guid(spa), guid)) 3041585Sbonwick guid = spa_get_random(-1ULL); 3051585Sbonwick } 3061585Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3071585Sbonwick } 308789Sahrens 309789Sahrens vd->vdev_spa = spa; 310789Sahrens vd->vdev_id = id; 311789Sahrens vd->vdev_guid = guid; 312789Sahrens vd->vdev_guid_sum = guid; 313789Sahrens vd->vdev_ops = ops; 314789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 315789Sahrens 316789Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3172856Snd150628 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 3187754SJeff.Bonwick@Sun.COM mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 319*8241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 320*8241SJeff.Bonwick@Sun.COM space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 321*8241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_lock); 322*8241SJeff.Bonwick@Sun.COM } 323789Sahrens txg_list_create(&vd->vdev_ms_list, 324789Sahrens offsetof(struct metaslab, ms_txg_node)); 325789Sahrens txg_list_create(&vd->vdev_dtl_list, 326789Sahrens offsetof(struct vdev, vdev_dtl_node)); 327789Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3284451Seschrock vdev_queue_init(vd); 3294451Seschrock vdev_cache_init(vd); 330789Sahrens 331789Sahrens return (vd); 332789Sahrens } 333789Sahrens 334789Sahrens /* 335789Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 336789Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 337789Sahrens * different for each case. 338789Sahrens */ 3392082Seschrock int 3402082Seschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 3412082Seschrock int alloctype) 342789Sahrens { 343789Sahrens vdev_ops_t *ops; 344789Sahrens char *type; 3454527Sperrin uint64_t guid = 0, islog, nparity; 346789Sahrens vdev_t *vd; 347789Sahrens 3487754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 349789Sahrens 350789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 3512082Seschrock return (EINVAL); 352789Sahrens 353789Sahrens if ((ops = vdev_getops(type)) == NULL) 3542082Seschrock return (EINVAL); 355789Sahrens 356789Sahrens /* 357789Sahrens * If this is a load, get the vdev guid from the nvlist. 358789Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 359789Sahrens */ 360789Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 361789Sahrens uint64_t label_id; 362789Sahrens 363789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 364789Sahrens label_id != id) 3652082Seschrock return (EINVAL); 366789Sahrens 367789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3682082Seschrock return (EINVAL); 3692082Seschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 3702082Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3712082Seschrock return (EINVAL); 3725450Sbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 3735450Sbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3745450Sbrendan return (EINVAL); 375789Sahrens } 376789Sahrens 3772082Seschrock /* 3782082Seschrock * The first allocated vdev must be of type 'root'. 3792082Seschrock */ 3802082Seschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 3812082Seschrock return (EINVAL); 3822082Seschrock 3834527Sperrin /* 3844527Sperrin * Determine whether we're a log vdev. 3854527Sperrin */ 3864527Sperrin islog = 0; 3874527Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 3885094Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 3894527Sperrin return (ENOTSUP); 3904527Sperrin 3914527Sperrin /* 3924527Sperrin * Set the nparity property for RAID-Z vdevs. 3934527Sperrin */ 3944527Sperrin nparity = -1ULL; 3954527Sperrin if (ops == &vdev_raidz_ops) { 3964527Sperrin if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3974527Sperrin &nparity) == 0) { 3984527Sperrin /* 3994527Sperrin * Currently, we can only support 2 parity devices. 4004527Sperrin */ 4014527Sperrin if (nparity == 0 || nparity > 2) 4024527Sperrin return (EINVAL); 4034527Sperrin /* 4044527Sperrin * Older versions can only support 1 parity device. 4054527Sperrin */ 4064527Sperrin if (nparity == 2 && 4074577Sahrens spa_version(spa) < SPA_VERSION_RAID6) 4084527Sperrin return (ENOTSUP); 4094527Sperrin } else { 4104527Sperrin /* 4114527Sperrin * We require the parity to be specified for SPAs that 4124527Sperrin * support multiple parity levels. 4134527Sperrin */ 4144577Sahrens if (spa_version(spa) >= SPA_VERSION_RAID6) 4154527Sperrin return (EINVAL); 4164527Sperrin /* 4174527Sperrin * Otherwise, we default to 1 parity device for RAID-Z. 4184527Sperrin */ 4194527Sperrin nparity = 1; 4204527Sperrin } 4214527Sperrin } else { 4224527Sperrin nparity = 0; 4234527Sperrin } 4244527Sperrin ASSERT(nparity != -1ULL); 4254527Sperrin 426789Sahrens vd = vdev_alloc_common(spa, id, guid, ops); 427789Sahrens 4284527Sperrin vd->vdev_islog = islog; 4294527Sperrin vd->vdev_nparity = nparity; 4304527Sperrin 431789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 432789Sahrens vd->vdev_path = spa_strdup(vd->vdev_path); 433789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 434789Sahrens vd->vdev_devid = spa_strdup(vd->vdev_devid); 4354451Seschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4364451Seschrock &vd->vdev_physpath) == 0) 4374451Seschrock vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 438789Sahrens 439789Sahrens /* 4401171Seschrock * Set the whole_disk property. If it's not specified, leave the value 4411171Seschrock * as -1. 4421171Seschrock */ 4431171Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 4441171Seschrock &vd->vdev_wholedisk) != 0) 4451171Seschrock vd->vdev_wholedisk = -1ULL; 4461171Seschrock 4471171Seschrock /* 4481544Seschrock * Look for the 'not present' flag. This will only be set if the device 4491544Seschrock * was not present at the time of import. 4501544Seschrock */ 4516643Seschrock if (!spa->spa_import_faulted) 4526643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4536643Seschrock &vd->vdev_not_present); 4541544Seschrock 4551544Seschrock /* 4561732Sbonwick * Get the alignment requirement. 4571732Sbonwick */ 4581732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 4591732Sbonwick 4601732Sbonwick /* 461789Sahrens * If we're a top-level vdev, try to load the allocation parameters. 462789Sahrens */ 463789Sahrens if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 464789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 465789Sahrens &vd->vdev_ms_array); 466789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 467789Sahrens &vd->vdev_ms_shift); 468789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 469789Sahrens &vd->vdev_asize); 470789Sahrens } 471789Sahrens 472789Sahrens /* 4734451Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 474789Sahrens */ 4756643Seschrock if (vd->vdev_ops->vdev_op_leaf && 4766643Seschrock (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE)) { 4776643Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 4786643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 479*8241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_smo.smo_object); 4806643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 4816643Seschrock &vd->vdev_unspare); 4826643Seschrock } 4831732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 4841732Sbonwick &vd->vdev_offline); 4856643Seschrock 4864451Seschrock /* 4874451Seschrock * When importing a pool, we want to ignore the persistent fault 4884451Seschrock * state, as the diagnosis made on another system may not be 4894451Seschrock * valid in the current context. 4904451Seschrock */ 4914451Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN) { 4924451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 4934451Seschrock &vd->vdev_faulted); 4944451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 4954451Seschrock &vd->vdev_degraded); 4964451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 4974451Seschrock &vd->vdev_removed); 4984451Seschrock } 499789Sahrens } 500789Sahrens 501789Sahrens /* 502789Sahrens * Add ourselves to the parent's list of children. 503789Sahrens */ 504789Sahrens vdev_add_child(parent, vd); 505789Sahrens 5062082Seschrock *vdp = vd; 5072082Seschrock 5082082Seschrock return (0); 509789Sahrens } 510789Sahrens 511789Sahrens void 512789Sahrens vdev_free(vdev_t *vd) 513789Sahrens { 514789Sahrens int c; 5154451Seschrock spa_t *spa = vd->vdev_spa; 516789Sahrens 517789Sahrens /* 518789Sahrens * vdev_free() implies closing the vdev first. This is simpler than 519789Sahrens * trying to ensure complicated semantics for all callers. 520789Sahrens */ 521789Sahrens vdev_close(vd); 522789Sahrens 5237754SJeff.Bonwick@Sun.COM ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 524789Sahrens 525789Sahrens /* 526789Sahrens * Free all children. 527789Sahrens */ 528789Sahrens for (c = 0; c < vd->vdev_children; c++) 529789Sahrens vdev_free(vd->vdev_child[c]); 530789Sahrens 531789Sahrens ASSERT(vd->vdev_child == NULL); 532789Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 533789Sahrens 534789Sahrens /* 535789Sahrens * Discard allocation state. 536789Sahrens */ 537789Sahrens if (vd == vd->vdev_top) 538789Sahrens vdev_metaslab_fini(vd); 539789Sahrens 540789Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 5412082Seschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 542789Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 543789Sahrens 544789Sahrens /* 545789Sahrens * Remove this vdev from its parent's child list. 546789Sahrens */ 547789Sahrens vdev_remove_child(vd->vdev_parent, vd); 548789Sahrens 549789Sahrens ASSERT(vd->vdev_parent == NULL); 550789Sahrens 5514451Seschrock /* 5524451Seschrock * Clean up vdev structure. 5534451Seschrock */ 5544451Seschrock vdev_queue_fini(vd); 5554451Seschrock vdev_cache_fini(vd); 5564451Seschrock 5574451Seschrock if (vd->vdev_path) 5584451Seschrock spa_strfree(vd->vdev_path); 5594451Seschrock if (vd->vdev_devid) 5604451Seschrock spa_strfree(vd->vdev_devid); 5614451Seschrock if (vd->vdev_physpath) 5624451Seschrock spa_strfree(vd->vdev_physpath); 5634451Seschrock 5644451Seschrock if (vd->vdev_isspare) 5654451Seschrock spa_spare_remove(vd); 5665450Sbrendan if (vd->vdev_isl2cache) 5675450Sbrendan spa_l2cache_remove(vd); 5684451Seschrock 5694451Seschrock txg_list_destroy(&vd->vdev_ms_list); 5704451Seschrock txg_list_destroy(&vd->vdev_dtl_list); 571*8241SJeff.Bonwick@Sun.COM 5724451Seschrock mutex_enter(&vd->vdev_dtl_lock); 573*8241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 574*8241SJeff.Bonwick@Sun.COM space_map_unload(&vd->vdev_dtl[t]); 575*8241SJeff.Bonwick@Sun.COM space_map_destroy(&vd->vdev_dtl[t]); 576*8241SJeff.Bonwick@Sun.COM } 5774451Seschrock mutex_exit(&vd->vdev_dtl_lock); 578*8241SJeff.Bonwick@Sun.COM 5794451Seschrock mutex_destroy(&vd->vdev_dtl_lock); 5804451Seschrock mutex_destroy(&vd->vdev_stat_lock); 5817754SJeff.Bonwick@Sun.COM mutex_destroy(&vd->vdev_probe_lock); 5824451Seschrock 5834451Seschrock if (vd == spa->spa_root_vdev) 5844451Seschrock spa->spa_root_vdev = NULL; 5854451Seschrock 5864451Seschrock kmem_free(vd, sizeof (vdev_t)); 587789Sahrens } 588789Sahrens 589789Sahrens /* 590789Sahrens * Transfer top-level vdev state from svd to tvd. 591789Sahrens */ 592789Sahrens static void 593789Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 594789Sahrens { 595789Sahrens spa_t *spa = svd->vdev_spa; 596789Sahrens metaslab_t *msp; 597789Sahrens vdev_t *vd; 598789Sahrens int t; 599789Sahrens 600789Sahrens ASSERT(tvd == tvd->vdev_top); 601789Sahrens 602789Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 603789Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 604789Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 605789Sahrens 606789Sahrens svd->vdev_ms_array = 0; 607789Sahrens svd->vdev_ms_shift = 0; 608789Sahrens svd->vdev_ms_count = 0; 609789Sahrens 610789Sahrens tvd->vdev_mg = svd->vdev_mg; 611789Sahrens tvd->vdev_ms = svd->vdev_ms; 612789Sahrens 613789Sahrens svd->vdev_mg = NULL; 614789Sahrens svd->vdev_ms = NULL; 6151732Sbonwick 6161732Sbonwick if (tvd->vdev_mg != NULL) 6171732Sbonwick tvd->vdev_mg->mg_vd = tvd; 618789Sahrens 619789Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 620789Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 6212082Seschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 622789Sahrens 623789Sahrens svd->vdev_stat.vs_alloc = 0; 624789Sahrens svd->vdev_stat.vs_space = 0; 6252082Seschrock svd->vdev_stat.vs_dspace = 0; 626789Sahrens 627789Sahrens for (t = 0; t < TXG_SIZE; t++) { 628789Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 629789Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 630789Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 631789Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 632789Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 633789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 634789Sahrens } 635789Sahrens 6367754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_config_dirty_node)) { 637789Sahrens vdev_config_clean(svd); 638789Sahrens vdev_config_dirty(tvd); 639789Sahrens } 640789Sahrens 6417754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_state_dirty_node)) { 6427754SJeff.Bonwick@Sun.COM vdev_state_clean(svd); 6437754SJeff.Bonwick@Sun.COM vdev_state_dirty(tvd); 6447754SJeff.Bonwick@Sun.COM } 6457754SJeff.Bonwick@Sun.COM 6462082Seschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 6472082Seschrock svd->vdev_deflate_ratio = 0; 6484527Sperrin 6494527Sperrin tvd->vdev_islog = svd->vdev_islog; 6504527Sperrin svd->vdev_islog = 0; 651789Sahrens } 652789Sahrens 653789Sahrens static void 654789Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 655789Sahrens { 656789Sahrens int c; 657789Sahrens 658789Sahrens if (vd == NULL) 659789Sahrens return; 660789Sahrens 661789Sahrens vd->vdev_top = tvd; 662789Sahrens 663789Sahrens for (c = 0; c < vd->vdev_children; c++) 664789Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 665789Sahrens } 666789Sahrens 667789Sahrens /* 668789Sahrens * Add a mirror/replacing vdev above an existing vdev. 669789Sahrens */ 670789Sahrens vdev_t * 671789Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 672789Sahrens { 673789Sahrens spa_t *spa = cvd->vdev_spa; 674789Sahrens vdev_t *pvd = cvd->vdev_parent; 675789Sahrens vdev_t *mvd; 676789Sahrens 6777754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 678789Sahrens 679789Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 6801732Sbonwick 6811732Sbonwick mvd->vdev_asize = cvd->vdev_asize; 6821732Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 6831732Sbonwick mvd->vdev_state = cvd->vdev_state; 6841732Sbonwick 685789Sahrens vdev_remove_child(pvd, cvd); 686789Sahrens vdev_add_child(pvd, mvd); 687789Sahrens cvd->vdev_id = mvd->vdev_children; 688789Sahrens vdev_add_child(mvd, cvd); 689789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 690789Sahrens 691789Sahrens if (mvd == mvd->vdev_top) 692789Sahrens vdev_top_transfer(cvd, mvd); 693789Sahrens 694789Sahrens return (mvd); 695789Sahrens } 696789Sahrens 697789Sahrens /* 698789Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 699789Sahrens */ 700789Sahrens void 701789Sahrens vdev_remove_parent(vdev_t *cvd) 702789Sahrens { 703789Sahrens vdev_t *mvd = cvd->vdev_parent; 704789Sahrens vdev_t *pvd = mvd->vdev_parent; 705789Sahrens 7067754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 707789Sahrens 708789Sahrens ASSERT(mvd->vdev_children == 1); 709789Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 7102082Seschrock mvd->vdev_ops == &vdev_replacing_ops || 7112082Seschrock mvd->vdev_ops == &vdev_spare_ops); 7121732Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 713789Sahrens 714789Sahrens vdev_remove_child(mvd, cvd); 715789Sahrens vdev_remove_child(pvd, mvd); 716*8241SJeff.Bonwick@Sun.COM 7177754SJeff.Bonwick@Sun.COM /* 7187754SJeff.Bonwick@Sun.COM * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 7197754SJeff.Bonwick@Sun.COM * Otherwise, we could have detached an offline device, and when we 7207754SJeff.Bonwick@Sun.COM * go to import the pool we'll think we have two top-level vdevs, 7217754SJeff.Bonwick@Sun.COM * instead of a different version of the same top-level vdev. 7227754SJeff.Bonwick@Sun.COM */ 723*8241SJeff.Bonwick@Sun.COM if (mvd->vdev_top == mvd) { 724*8241SJeff.Bonwick@Sun.COM uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 725*8241SJeff.Bonwick@Sun.COM cvd->vdev_guid += guid_delta; 726*8241SJeff.Bonwick@Sun.COM cvd->vdev_guid_sum += guid_delta; 727*8241SJeff.Bonwick@Sun.COM } 728789Sahrens cvd->vdev_id = mvd->vdev_id; 729789Sahrens vdev_add_child(pvd, cvd); 730789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 731789Sahrens 732789Sahrens if (cvd == cvd->vdev_top) 733789Sahrens vdev_top_transfer(mvd, cvd); 734789Sahrens 735789Sahrens ASSERT(mvd->vdev_children == 0); 736789Sahrens vdev_free(mvd); 737789Sahrens } 738789Sahrens 7391544Seschrock int 740789Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 741789Sahrens { 742789Sahrens spa_t *spa = vd->vdev_spa; 7431732Sbonwick objset_t *mos = spa->spa_meta_objset; 7444527Sperrin metaslab_class_t *mc; 7451732Sbonwick uint64_t m; 746789Sahrens uint64_t oldc = vd->vdev_ms_count; 747789Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 7481732Sbonwick metaslab_t **mspp; 7491732Sbonwick int error; 750789Sahrens 7511585Sbonwick if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 7521585Sbonwick return (0); 7531585Sbonwick 754789Sahrens ASSERT(oldc <= newc); 755789Sahrens 7564527Sperrin if (vd->vdev_islog) 7574527Sperrin mc = spa->spa_log_class; 7584527Sperrin else 7594527Sperrin mc = spa->spa_normal_class; 7604527Sperrin 7611732Sbonwick if (vd->vdev_mg == NULL) 7621732Sbonwick vd->vdev_mg = metaslab_group_create(mc, vd); 7631732Sbonwick 7641732Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 7651732Sbonwick 7661732Sbonwick if (oldc != 0) { 7671732Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 7681732Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 7691732Sbonwick } 7701732Sbonwick 7711732Sbonwick vd->vdev_ms = mspp; 772789Sahrens vd->vdev_ms_count = newc; 773789Sahrens 7741732Sbonwick for (m = oldc; m < newc; m++) { 7751732Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 776789Sahrens if (txg == 0) { 7771732Sbonwick uint64_t object = 0; 7781732Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 7791732Sbonwick m * sizeof (uint64_t), sizeof (uint64_t), &object); 7801732Sbonwick if (error) 7811732Sbonwick return (error); 7821732Sbonwick if (object != 0) { 7831732Sbonwick dmu_buf_t *db; 7841732Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 7851732Sbonwick if (error) 7861732Sbonwick return (error); 7874944Smaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 7884944Smaybee bcopy(db->db_data, &smo, sizeof (smo)); 7891732Sbonwick ASSERT3U(smo.smo_object, ==, object); 7901544Seschrock dmu_buf_rele(db, FTAG); 791789Sahrens } 792789Sahrens } 7931732Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 7941732Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 795789Sahrens } 796789Sahrens 7971544Seschrock return (0); 798789Sahrens } 799789Sahrens 800789Sahrens void 801789Sahrens vdev_metaslab_fini(vdev_t *vd) 802789Sahrens { 803789Sahrens uint64_t m; 804789Sahrens uint64_t count = vd->vdev_ms_count; 805789Sahrens 806789Sahrens if (vd->vdev_ms != NULL) { 807789Sahrens for (m = 0; m < count; m++) 8081732Sbonwick if (vd->vdev_ms[m] != NULL) 8091732Sbonwick metaslab_fini(vd->vdev_ms[m]); 810789Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 811789Sahrens vd->vdev_ms = NULL; 812789Sahrens } 813789Sahrens } 814789Sahrens 8157754SJeff.Bonwick@Sun.COM typedef struct vdev_probe_stats { 8167754SJeff.Bonwick@Sun.COM boolean_t vps_readable; 8177754SJeff.Bonwick@Sun.COM boolean_t vps_writeable; 8187754SJeff.Bonwick@Sun.COM int vps_flags; 8197754SJeff.Bonwick@Sun.COM zio_t *vps_root; 8207754SJeff.Bonwick@Sun.COM vdev_t *vps_vd; 8217754SJeff.Bonwick@Sun.COM } vdev_probe_stats_t; 8227754SJeff.Bonwick@Sun.COM 8237754SJeff.Bonwick@Sun.COM static void 8247754SJeff.Bonwick@Sun.COM vdev_probe_done(zio_t *zio) 8255329Sgw25295 { 826*8241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 8277754SJeff.Bonwick@Sun.COM vdev_probe_stats_t *vps = zio->io_private; 8287754SJeff.Bonwick@Sun.COM vdev_t *vd = vps->vps_vd; 8297754SJeff.Bonwick@Sun.COM 8307754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) { 8317754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 8327754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8337754SJeff.Bonwick@Sun.COM vps->vps_readable = 1; 834*8241SJeff.Bonwick@Sun.COM if (zio->io_error == 0 && spa_writeable(spa)) { 8357754SJeff.Bonwick@Sun.COM zio_nowait(zio_write_phys(vps->vps_root, vd, 8367754SJeff.Bonwick@Sun.COM zio->io_offset, zio->io_size, zio->io_data, 8377754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 8387754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 8397754SJeff.Bonwick@Sun.COM } else { 8407754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8417754SJeff.Bonwick@Sun.COM } 8427754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_WRITE) { 8437754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 8447754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8457754SJeff.Bonwick@Sun.COM vps->vps_writeable = 1; 8467754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8477754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_NULL) { 8487754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == NULL); 8497754SJeff.Bonwick@Sun.COM ASSERT(zio == vps->vps_root); 8507754SJeff.Bonwick@Sun.COM 8517754SJeff.Bonwick@Sun.COM vd->vdev_cant_read |= !vps->vps_readable; 8527754SJeff.Bonwick@Sun.COM vd->vdev_cant_write |= !vps->vps_writeable; 8537754SJeff.Bonwick@Sun.COM 8547754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && 855*8241SJeff.Bonwick@Sun.COM (vdev_writeable(vd) || !spa_writeable(spa))) { 8567754SJeff.Bonwick@Sun.COM zio->io_error = 0; 8577754SJeff.Bonwick@Sun.COM } else { 8587754SJeff.Bonwick@Sun.COM ASSERT(zio->io_error != 0); 8597754SJeff.Bonwick@Sun.COM zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 860*8241SJeff.Bonwick@Sun.COM spa, vd, NULL, 0, 0); 8617754SJeff.Bonwick@Sun.COM zio->io_error = ENXIO; 8627754SJeff.Bonwick@Sun.COM } 8637754SJeff.Bonwick@Sun.COM kmem_free(vps, sizeof (*vps)); 8647754SJeff.Bonwick@Sun.COM } 8657754SJeff.Bonwick@Sun.COM } 8665329Sgw25295 8677754SJeff.Bonwick@Sun.COM /* 8687754SJeff.Bonwick@Sun.COM * Determine whether this device is accessible by reading and writing 8697754SJeff.Bonwick@Sun.COM * to several known locations: the pad regions of each vdev label 8707754SJeff.Bonwick@Sun.COM * but the first (which we leave alone in case it contains a VTOC). 8717754SJeff.Bonwick@Sun.COM */ 8727754SJeff.Bonwick@Sun.COM zio_t * 8737754SJeff.Bonwick@Sun.COM vdev_probe(vdev_t *vd, zio_t *pio) 8747754SJeff.Bonwick@Sun.COM { 8757754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 8767754SJeff.Bonwick@Sun.COM vdev_probe_stats_t *vps; 8777754SJeff.Bonwick@Sun.COM zio_t *zio; 8787754SJeff.Bonwick@Sun.COM 8797754SJeff.Bonwick@Sun.COM vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 8807754SJeff.Bonwick@Sun.COM 8817754SJeff.Bonwick@Sun.COM vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 8827754SJeff.Bonwick@Sun.COM ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_DONT_RETRY; 8835329Sgw25295 8847754SJeff.Bonwick@Sun.COM if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 8857754SJeff.Bonwick@Sun.COM /* 8867754SJeff.Bonwick@Sun.COM * vdev_cant_read and vdev_cant_write can only transition 8877754SJeff.Bonwick@Sun.COM * from TRUE to FALSE when we have the SCL_ZIO lock as writer; 8887754SJeff.Bonwick@Sun.COM * otherwise they can only transition from FALSE to TRUE. 8897754SJeff.Bonwick@Sun.COM * This ensures that any zio looking at these values can 8907754SJeff.Bonwick@Sun.COM * assume that failures persist for the life of the I/O. 8917754SJeff.Bonwick@Sun.COM * That's important because when a device has intermittent 8927754SJeff.Bonwick@Sun.COM * connectivity problems, we want to ensure that they're 8937754SJeff.Bonwick@Sun.COM * ascribed to the device (ENXIO) and not the zio (EIO). 8947754SJeff.Bonwick@Sun.COM * 8957754SJeff.Bonwick@Sun.COM * Since we hold SCL_ZIO as writer here, clear both values 8967754SJeff.Bonwick@Sun.COM * so the probe can reevaluate from first principles. 8977754SJeff.Bonwick@Sun.COM */ 8987754SJeff.Bonwick@Sun.COM vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 8997754SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_FALSE; 9007754SJeff.Bonwick@Sun.COM vd->vdev_cant_write = B_FALSE; 9017754SJeff.Bonwick@Sun.COM } 9027754SJeff.Bonwick@Sun.COM 9037754SJeff.Bonwick@Sun.COM ASSERT(vd->vdev_ops->vdev_op_leaf); 9047754SJeff.Bonwick@Sun.COM 9057754SJeff.Bonwick@Sun.COM zio = zio_null(pio, spa, vdev_probe_done, vps, vps->vps_flags); 9067754SJeff.Bonwick@Sun.COM 9077754SJeff.Bonwick@Sun.COM vps->vps_root = zio; 9087754SJeff.Bonwick@Sun.COM vps->vps_vd = vd; 9097754SJeff.Bonwick@Sun.COM 9107754SJeff.Bonwick@Sun.COM for (int l = 1; l < VDEV_LABELS; l++) { 9117754SJeff.Bonwick@Sun.COM zio_nowait(zio_read_phys(zio, vd, 9127754SJeff.Bonwick@Sun.COM vdev_label_offset(vd->vdev_psize, l, 9137754SJeff.Bonwick@Sun.COM offsetof(vdev_label_t, vl_pad)), 9147754SJeff.Bonwick@Sun.COM VDEV_SKIP_SIZE, zio_buf_alloc(VDEV_SKIP_SIZE), 9157754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 9167754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 9177754SJeff.Bonwick@Sun.COM } 9187754SJeff.Bonwick@Sun.COM 9197754SJeff.Bonwick@Sun.COM return (zio); 9205329Sgw25295 } 9215329Sgw25295 922789Sahrens /* 923789Sahrens * Prepare a virtual device for access. 924789Sahrens */ 925789Sahrens int 926789Sahrens vdev_open(vdev_t *vd) 927789Sahrens { 928*8241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 929789Sahrens int error; 930789Sahrens int c; 931789Sahrens uint64_t osize = 0; 932789Sahrens uint64_t asize, psize; 9331732Sbonwick uint64_t ashift = 0; 934789Sahrens 935*8241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 936*8241SJeff.Bonwick@Sun.COM 937789Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 938789Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 939789Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 940789Sahrens 941789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 942789Sahrens 9434451Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 9444451Seschrock ASSERT(vd->vdev_children == 0); 9454451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 9464451Seschrock VDEV_AUX_ERR_EXCEEDED); 9474451Seschrock return (ENXIO); 9484451Seschrock } else if (vd->vdev_offline) { 949789Sahrens ASSERT(vd->vdev_children == 0); 9501544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 951789Sahrens return (ENXIO); 952789Sahrens } 953789Sahrens 954789Sahrens error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 955789Sahrens 9561544Seschrock if (zio_injection_enabled && error == 0) 9571544Seschrock error = zio_handle_device_injection(vd, ENXIO); 9581544Seschrock 9594451Seschrock if (error) { 9604451Seschrock if (vd->vdev_removed && 9614451Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 9624451Seschrock vd->vdev_removed = B_FALSE; 963789Sahrens 9641544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 965789Sahrens vd->vdev_stat.vs_aux); 966789Sahrens return (error); 967789Sahrens } 968789Sahrens 9694451Seschrock vd->vdev_removed = B_FALSE; 9704451Seschrock 9714451Seschrock if (vd->vdev_degraded) { 9724451Seschrock ASSERT(vd->vdev_children == 0); 9734451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 9744451Seschrock VDEV_AUX_ERR_EXCEEDED); 9754451Seschrock } else { 9764451Seschrock vd->vdev_state = VDEV_STATE_HEALTHY; 9774451Seschrock } 978789Sahrens 979789Sahrens for (c = 0; c < vd->vdev_children; c++) 9801544Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 9811544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 9821544Seschrock VDEV_AUX_NONE); 9831544Seschrock break; 9841544Seschrock } 985789Sahrens 986789Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 987789Sahrens 988789Sahrens if (vd->vdev_children == 0) { 989789Sahrens if (osize < SPA_MINDEVSIZE) { 9901544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 9911544Seschrock VDEV_AUX_TOO_SMALL); 992789Sahrens return (EOVERFLOW); 993789Sahrens } 994789Sahrens psize = osize; 995789Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 996789Sahrens } else { 9971732Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 998789Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 9991544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10001544Seschrock VDEV_AUX_TOO_SMALL); 1001789Sahrens return (EOVERFLOW); 1002789Sahrens } 1003789Sahrens psize = 0; 1004789Sahrens asize = osize; 1005789Sahrens } 1006789Sahrens 1007789Sahrens vd->vdev_psize = psize; 1008789Sahrens 1009789Sahrens if (vd->vdev_asize == 0) { 1010789Sahrens /* 1011789Sahrens * This is the first-ever open, so use the computed values. 10121732Sbonwick * For testing purposes, a higher ashift can be requested. 1013789Sahrens */ 1014789Sahrens vd->vdev_asize = asize; 10151732Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1016789Sahrens } else { 1017789Sahrens /* 1018789Sahrens * Make sure the alignment requirement hasn't increased. 1019789Sahrens */ 10201732Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 10211544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10221544Seschrock VDEV_AUX_BAD_LABEL); 1023789Sahrens return (EINVAL); 1024789Sahrens } 1025789Sahrens 1026789Sahrens /* 1027789Sahrens * Make sure the device hasn't shrunk. 1028789Sahrens */ 1029789Sahrens if (asize < vd->vdev_asize) { 10301544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10311544Seschrock VDEV_AUX_BAD_LABEL); 1032789Sahrens return (EINVAL); 1033789Sahrens } 1034789Sahrens 1035789Sahrens /* 1036789Sahrens * If all children are healthy and the asize has increased, 1037789Sahrens * then we've experienced dynamic LUN growth. 1038789Sahrens */ 1039789Sahrens if (vd->vdev_state == VDEV_STATE_HEALTHY && 1040789Sahrens asize > vd->vdev_asize) { 1041789Sahrens vd->vdev_asize = asize; 1042789Sahrens } 1043789Sahrens } 1044789Sahrens 10451544Seschrock /* 10465329Sgw25295 * Ensure we can issue some IO before declaring the 10475329Sgw25295 * vdev open for business. 10485329Sgw25295 */ 10497754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && 10507754SJeff.Bonwick@Sun.COM (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 10515329Sgw25295 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10527754SJeff.Bonwick@Sun.COM VDEV_AUX_IO_FAILURE); 10535329Sgw25295 return (error); 10545329Sgw25295 } 10555329Sgw25295 10565329Sgw25295 /* 10572082Seschrock * If this is a top-level vdev, compute the raidz-deflation 10582082Seschrock * ratio. Note, we hard-code in 128k (1<<17) because it is the 10592082Seschrock * current "typical" blocksize. Even if SPA_MAXBLOCKSIZE 10602082Seschrock * changes, this algorithm must never change, or we will 10612082Seschrock * inconsistently account for existing bp's. 10622082Seschrock */ 10632082Seschrock if (vd->vdev_top == vd) { 10642082Seschrock vd->vdev_deflate_ratio = (1<<17) / 10652082Seschrock (vdev_psize_to_asize(vd, 1<<17) >> SPA_MINBLOCKSHIFT); 10662082Seschrock } 10672082Seschrock 10687046Sahrens /* 10697046Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 1070*8241SJeff.Bonwick@Sun.COM * resilver. But don't do this if we are doing a reopen for a scrub, 1071*8241SJeff.Bonwick@Sun.COM * since this would just restart the scrub we are already doing. 10727046Sahrens */ 1073*8241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1074*8241SJeff.Bonwick@Sun.COM vdev_resilver_needed(vd, NULL, NULL)) 1075*8241SJeff.Bonwick@Sun.COM spa_async_request(spa, SPA_ASYNC_RESILVER); 10767046Sahrens 1077789Sahrens return (0); 1078789Sahrens } 1079789Sahrens 1080789Sahrens /* 10811986Seschrock * Called once the vdevs are all opened, this routine validates the label 10821986Seschrock * contents. This needs to be done before vdev_load() so that we don't 10834451Seschrock * inadvertently do repair I/Os to the wrong device. 10841986Seschrock * 10851986Seschrock * This function will only return failure if one of the vdevs indicates that it 10861986Seschrock * has since been destroyed or exported. This is only possible if 10871986Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 10881986Seschrock * will be updated but the function will return 0. 10891986Seschrock */ 10901986Seschrock int 10911986Seschrock vdev_validate(vdev_t *vd) 10921986Seschrock { 10931986Seschrock spa_t *spa = vd->vdev_spa; 10941986Seschrock int c; 10951986Seschrock nvlist_t *label; 10967754SJeff.Bonwick@Sun.COM uint64_t guid, top_guid; 10971986Seschrock uint64_t state; 10981986Seschrock 10991986Seschrock for (c = 0; c < vd->vdev_children; c++) 11001986Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 11014070Smc142369 return (EBADF); 11021986Seschrock 11032174Seschrock /* 11042174Seschrock * If the device has already failed, or was marked offline, don't do 11052174Seschrock * any further validation. Otherwise, label I/O will fail and we will 11062174Seschrock * overwrite the previous state. 11072174Seschrock */ 11087754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 11091986Seschrock 11101986Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 11111986Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11121986Seschrock VDEV_AUX_BAD_LABEL); 11131986Seschrock return (0); 11141986Seschrock } 11151986Seschrock 11161986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 11171986Seschrock &guid) != 0 || guid != spa_guid(spa)) { 11181986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 11191986Seschrock VDEV_AUX_CORRUPT_DATA); 11201986Seschrock nvlist_free(label); 11211986Seschrock return (0); 11221986Seschrock } 11231986Seschrock 11247754SJeff.Bonwick@Sun.COM /* 11257754SJeff.Bonwick@Sun.COM * If this vdev just became a top-level vdev because its 11267754SJeff.Bonwick@Sun.COM * sibling was detached, it will have adopted the parent's 11277754SJeff.Bonwick@Sun.COM * vdev guid -- but the label may or may not be on disk yet. 11287754SJeff.Bonwick@Sun.COM * Fortunately, either version of the label will have the 11297754SJeff.Bonwick@Sun.COM * same top guid, so if we're a top-level vdev, we can 11307754SJeff.Bonwick@Sun.COM * safely compare to that instead. 11317754SJeff.Bonwick@Sun.COM */ 11321986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 11337754SJeff.Bonwick@Sun.COM &guid) != 0 || 11347754SJeff.Bonwick@Sun.COM nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 11357754SJeff.Bonwick@Sun.COM &top_guid) != 0 || 11367754SJeff.Bonwick@Sun.COM (vd->vdev_guid != guid && 11377754SJeff.Bonwick@Sun.COM (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 11381986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 11391986Seschrock VDEV_AUX_CORRUPT_DATA); 11401986Seschrock nvlist_free(label); 11411986Seschrock return (0); 11421986Seschrock } 11431986Seschrock 11441986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 11451986Seschrock &state) != 0) { 11461986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 11471986Seschrock VDEV_AUX_CORRUPT_DATA); 11481986Seschrock nvlist_free(label); 11491986Seschrock return (0); 11501986Seschrock } 11511986Seschrock 11521986Seschrock nvlist_free(label); 11531986Seschrock 11541986Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN && 11551986Seschrock state != POOL_STATE_ACTIVE) 11564070Smc142369 return (EBADF); 11576976Seschrock 11586976Seschrock /* 11596976Seschrock * If we were able to open and validate a vdev that was 11606976Seschrock * previously marked permanently unavailable, clear that state 11616976Seschrock * now. 11626976Seschrock */ 11636976Seschrock if (vd->vdev_not_present) 11646976Seschrock vd->vdev_not_present = 0; 11651986Seschrock } 11661986Seschrock 11671986Seschrock return (0); 11681986Seschrock } 11691986Seschrock 11701986Seschrock /* 1171789Sahrens * Close a virtual device. 1172789Sahrens */ 1173789Sahrens void 1174789Sahrens vdev_close(vdev_t *vd) 1175789Sahrens { 1176*8241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 1177*8241SJeff.Bonwick@Sun.COM 1178*8241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1179*8241SJeff.Bonwick@Sun.COM 1180789Sahrens vd->vdev_ops->vdev_op_close(vd); 1181789Sahrens 11824451Seschrock vdev_cache_purge(vd); 1183789Sahrens 11841986Seschrock /* 11851986Seschrock * We record the previous state before we close it, so that if we are 11861986Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 11871986Seschrock * it's still faulted. 11881986Seschrock */ 11891986Seschrock vd->vdev_prevstate = vd->vdev_state; 11901986Seschrock 1191789Sahrens if (vd->vdev_offline) 1192789Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1193789Sahrens else 1194789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 11951544Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1196789Sahrens } 1197789Sahrens 1198789Sahrens void 11991544Seschrock vdev_reopen(vdev_t *vd) 1200789Sahrens { 12011544Seschrock spa_t *spa = vd->vdev_spa; 1202789Sahrens 12037754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12041544Seschrock 1205789Sahrens vdev_close(vd); 1206789Sahrens (void) vdev_open(vd); 1207789Sahrens 1208789Sahrens /* 12093377Seschrock * Call vdev_validate() here to make sure we have the same device. 12103377Seschrock * Otherwise, a device with an invalid label could be successfully 12113377Seschrock * opened in response to vdev_reopen(). 12123377Seschrock */ 12136643Seschrock if (vd->vdev_aux) { 12146643Seschrock (void) vdev_validate_aux(vd); 12157754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && vdev_writeable(vd) && 12166643Seschrock !l2arc_vdev_present(vd)) { 12176643Seschrock uint64_t size = vdev_get_rsize(vd); 12186643Seschrock l2arc_add_vdev(spa, vd, 12196643Seschrock VDEV_LABEL_START_SIZE, 12206643Seschrock size - VDEV_LABEL_START_SIZE); 12216643Seschrock } 12226643Seschrock } else { 12236643Seschrock (void) vdev_validate(vd); 12246643Seschrock } 12253377Seschrock 12263377Seschrock /* 12274451Seschrock * Reassess parent vdev's health. 1228789Sahrens */ 12294451Seschrock vdev_propagate_state(vd); 1230789Sahrens } 1231789Sahrens 1232789Sahrens int 12332082Seschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1234789Sahrens { 1235789Sahrens int error; 1236789Sahrens 1237789Sahrens /* 1238789Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1239789Sahrens * For a create, however, we want to fail the request if 1240789Sahrens * there are any components we can't open. 1241789Sahrens */ 1242789Sahrens error = vdev_open(vd); 1243789Sahrens 1244789Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1245789Sahrens vdev_close(vd); 1246789Sahrens return (error ? error : ENXIO); 1247789Sahrens } 1248789Sahrens 1249789Sahrens /* 1250789Sahrens * Recursively initialize all labels. 1251789Sahrens */ 12523377Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 12533377Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1254789Sahrens vdev_close(vd); 1255789Sahrens return (error); 1256789Sahrens } 1257789Sahrens 1258789Sahrens return (0); 1259789Sahrens } 1260789Sahrens 1261789Sahrens /* 1262789Sahrens * The is the latter half of vdev_create(). It is distinct because it 1263789Sahrens * involves initiating transactions in order to do metaslab creation. 1264789Sahrens * For creation, we want to try to create all vdevs at once and then undo it 1265789Sahrens * if anything fails; this is much harder if we have pending transactions. 1266789Sahrens */ 12671585Sbonwick void 1268789Sahrens vdev_init(vdev_t *vd, uint64_t txg) 1269789Sahrens { 1270789Sahrens /* 1271789Sahrens * Aim for roughly 200 metaslabs per vdev. 1272789Sahrens */ 1273789Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1274789Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1275789Sahrens 1276789Sahrens /* 12771585Sbonwick * Initialize the vdev's metaslabs. This can't fail because 12781585Sbonwick * there's nothing to read when creating all new metaslabs. 1279789Sahrens */ 12801585Sbonwick VERIFY(vdev_metaslab_init(vd, txg) == 0); 1281789Sahrens } 1282789Sahrens 1283789Sahrens void 12841732Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1285789Sahrens { 12861732Sbonwick ASSERT(vd == vd->vdev_top); 12871732Sbonwick ASSERT(ISP2(flags)); 1288789Sahrens 12891732Sbonwick if (flags & VDD_METASLAB) 12901732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 12911732Sbonwick 12921732Sbonwick if (flags & VDD_DTL) 12931732Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 12941732Sbonwick 12951732Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1296789Sahrens } 1297789Sahrens 1298*8241SJeff.Bonwick@Sun.COM /* 1299*8241SJeff.Bonwick@Sun.COM * DTLs. 1300*8241SJeff.Bonwick@Sun.COM * 1301*8241SJeff.Bonwick@Sun.COM * A vdev's DTL (dirty time log) is the set of transaction groups for which 1302*8241SJeff.Bonwick@Sun.COM * the vdev has less than perfect replication. There are three kinds of DTL: 1303*8241SJeff.Bonwick@Sun.COM * 1304*8241SJeff.Bonwick@Sun.COM * DTL_MISSING: txgs for which the vdev has no valid copies of the data 1305*8241SJeff.Bonwick@Sun.COM * 1306*8241SJeff.Bonwick@Sun.COM * DTL_PARTIAL: txgs for which data is available, but not fully replicated 1307*8241SJeff.Bonwick@Sun.COM * 1308*8241SJeff.Bonwick@Sun.COM * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 1309*8241SJeff.Bonwick@Sun.COM * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 1310*8241SJeff.Bonwick@Sun.COM * txgs that was scrubbed. 1311*8241SJeff.Bonwick@Sun.COM * 1312*8241SJeff.Bonwick@Sun.COM * DTL_OUTAGE: txgs which cannot currently be read, whether due to 1313*8241SJeff.Bonwick@Sun.COM * persistent errors or just some device being offline. 1314*8241SJeff.Bonwick@Sun.COM * Unlike the other three, the DTL_OUTAGE map is not generally 1315*8241SJeff.Bonwick@Sun.COM * maintained; it's only computed when needed, typically to 1316*8241SJeff.Bonwick@Sun.COM * determine whether a device can be detached. 1317*8241SJeff.Bonwick@Sun.COM * 1318*8241SJeff.Bonwick@Sun.COM * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 1319*8241SJeff.Bonwick@Sun.COM * either has the data or it doesn't. 1320*8241SJeff.Bonwick@Sun.COM * 1321*8241SJeff.Bonwick@Sun.COM * For interior vdevs such as mirror and RAID-Z the picture is more complex. 1322*8241SJeff.Bonwick@Sun.COM * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 1323*8241SJeff.Bonwick@Sun.COM * if any child is less than fully replicated, then so is its parent. 1324*8241SJeff.Bonwick@Sun.COM * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 1325*8241SJeff.Bonwick@Sun.COM * comprising only those txgs which appear in 'maxfaults' or more children; 1326*8241SJeff.Bonwick@Sun.COM * those are the txgs we don't have enough replication to read. For example, 1327*8241SJeff.Bonwick@Sun.COM * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 1328*8241SJeff.Bonwick@Sun.COM * thus, its DTL_MISSING consists of the set of txgs that appear in more than 1329*8241SJeff.Bonwick@Sun.COM * two child DTL_MISSING maps. 1330*8241SJeff.Bonwick@Sun.COM * 1331*8241SJeff.Bonwick@Sun.COM * It should be clear from the above that to compute the DTLs and outage maps 1332*8241SJeff.Bonwick@Sun.COM * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 1333*8241SJeff.Bonwick@Sun.COM * Therefore, that is all we keep on disk. When loading the pool, or after 1334*8241SJeff.Bonwick@Sun.COM * a configuration change, we generate all other DTLs from first principles. 1335*8241SJeff.Bonwick@Sun.COM */ 1336789Sahrens void 1337*8241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1338789Sahrens { 1339*8241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 1340*8241SJeff.Bonwick@Sun.COM 1341*8241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 1342*8241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1343*8241SJeff.Bonwick@Sun.COM 1344789Sahrens mutex_enter(sm->sm_lock); 1345789Sahrens if (!space_map_contains(sm, txg, size)) 1346789Sahrens space_map_add(sm, txg, size); 1347789Sahrens mutex_exit(sm->sm_lock); 1348789Sahrens } 1349789Sahrens 1350*8241SJeff.Bonwick@Sun.COM boolean_t 1351*8241SJeff.Bonwick@Sun.COM vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1352789Sahrens { 1353*8241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 1354*8241SJeff.Bonwick@Sun.COM boolean_t dirty = B_FALSE; 1355*8241SJeff.Bonwick@Sun.COM 1356*8241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 1357*8241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1358789Sahrens 1359789Sahrens mutex_enter(sm->sm_lock); 1360*8241SJeff.Bonwick@Sun.COM if (sm->sm_space != 0) 1361*8241SJeff.Bonwick@Sun.COM dirty = space_map_contains(sm, txg, size); 1362789Sahrens mutex_exit(sm->sm_lock); 1363789Sahrens 1364789Sahrens return (dirty); 1365789Sahrens } 1366789Sahrens 1367*8241SJeff.Bonwick@Sun.COM boolean_t 1368*8241SJeff.Bonwick@Sun.COM vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 1369*8241SJeff.Bonwick@Sun.COM { 1370*8241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 1371*8241SJeff.Bonwick@Sun.COM boolean_t empty; 1372*8241SJeff.Bonwick@Sun.COM 1373*8241SJeff.Bonwick@Sun.COM mutex_enter(sm->sm_lock); 1374*8241SJeff.Bonwick@Sun.COM empty = (sm->sm_space == 0); 1375*8241SJeff.Bonwick@Sun.COM mutex_exit(sm->sm_lock); 1376*8241SJeff.Bonwick@Sun.COM 1377*8241SJeff.Bonwick@Sun.COM return (empty); 1378*8241SJeff.Bonwick@Sun.COM } 1379*8241SJeff.Bonwick@Sun.COM 1380789Sahrens /* 1381789Sahrens * Reassess DTLs after a config change or scrub completion. 1382789Sahrens */ 1383789Sahrens void 1384789Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1385789Sahrens { 13861544Seschrock spa_t *spa = vd->vdev_spa; 1387*8241SJeff.Bonwick@Sun.COM avl_tree_t reftree; 1388*8241SJeff.Bonwick@Sun.COM int minref; 1389*8241SJeff.Bonwick@Sun.COM 1390*8241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1391*8241SJeff.Bonwick@Sun.COM 1392*8241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 1393*8241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(vd->vdev_child[c], txg, 1394*8241SJeff.Bonwick@Sun.COM scrub_txg, scrub_done); 1395*8241SJeff.Bonwick@Sun.COM 1396*8241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev) 1397*8241SJeff.Bonwick@Sun.COM return; 1398*8241SJeff.Bonwick@Sun.COM 1399*8241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 1400789Sahrens mutex_enter(&vd->vdev_dtl_lock); 14017046Sahrens if (scrub_txg != 0 && 14027046Sahrens (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 14037046Sahrens /* XXX should check scrub_done? */ 14047046Sahrens /* 14057046Sahrens * We completed a scrub up to scrub_txg. If we 14067046Sahrens * did it without rebooting, then the scrub dtl 14077046Sahrens * will be valid, so excise the old region and 14087046Sahrens * fold in the scrub dtl. Otherwise, leave the 14097046Sahrens * dtl as-is if there was an error. 1410*8241SJeff.Bonwick@Sun.COM * 1411*8241SJeff.Bonwick@Sun.COM * There's little trick here: to excise the beginning 1412*8241SJeff.Bonwick@Sun.COM * of the DTL_MISSING map, we put it into a reference 1413*8241SJeff.Bonwick@Sun.COM * tree and then add a segment with refcnt -1 that 1414*8241SJeff.Bonwick@Sun.COM * covers the range [0, scrub_txg). This means 1415*8241SJeff.Bonwick@Sun.COM * that each txg in that range has refcnt -1 or 0. 1416*8241SJeff.Bonwick@Sun.COM * We then add DTL_SCRUB with a refcnt of 2, so that 1417*8241SJeff.Bonwick@Sun.COM * entries in the range [0, scrub_txg) will have a 1418*8241SJeff.Bonwick@Sun.COM * positive refcnt -- either 1 or 2. We then convert 1419*8241SJeff.Bonwick@Sun.COM * the reference tree into the new DTL_MISSING map. 14207046Sahrens */ 1421*8241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 1422*8241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 1423*8241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 1424*8241SJeff.Bonwick@Sun.COM space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 1425*8241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 1426*8241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_SCRUB], 2); 1427*8241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, 1428*8241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 1429*8241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 1430789Sahrens } 1431*8241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 1432*8241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1433*8241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1434789Sahrens if (scrub_done) 1435*8241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 1436*8241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 1437*8241SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 1438*8241SJeff.Bonwick@Sun.COM space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 1439*8241SJeff.Bonwick@Sun.COM else 1440*8241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1441*8241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1442789Sahrens mutex_exit(&vd->vdev_dtl_lock); 14437046Sahrens 14441732Sbonwick if (txg != 0) 14451732Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1446789Sahrens return; 1447789Sahrens } 1448789Sahrens 1449789Sahrens mutex_enter(&vd->vdev_dtl_lock); 1450*8241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 1451*8241SJeff.Bonwick@Sun.COM if (t == DTL_SCRUB) 1452*8241SJeff.Bonwick@Sun.COM continue; /* leaf vdevs only */ 1453*8241SJeff.Bonwick@Sun.COM if (t == DTL_PARTIAL) 1454*8241SJeff.Bonwick@Sun.COM minref = 1; /* i.e. non-zero */ 1455*8241SJeff.Bonwick@Sun.COM else if (vd->vdev_nparity != 0) 1456*8241SJeff.Bonwick@Sun.COM minref = vd->vdev_nparity + 1; /* RAID-Z */ 1457*8241SJeff.Bonwick@Sun.COM else 1458*8241SJeff.Bonwick@Sun.COM minref = vd->vdev_children; /* any kind of mirror */ 1459*8241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 1460*8241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 1461*8241SJeff.Bonwick@Sun.COM vdev_t *cvd = vd->vdev_child[c]; 1462*8241SJeff.Bonwick@Sun.COM mutex_enter(&cvd->vdev_dtl_lock); 1463*8241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 1464*8241SJeff.Bonwick@Sun.COM mutex_exit(&cvd->vdev_dtl_lock); 1465*8241SJeff.Bonwick@Sun.COM } 1466*8241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 1467*8241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 1468*8241SJeff.Bonwick@Sun.COM } 1469789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1470789Sahrens } 1471789Sahrens 1472789Sahrens static int 1473789Sahrens vdev_dtl_load(vdev_t *vd) 1474789Sahrens { 1475789Sahrens spa_t *spa = vd->vdev_spa; 1476*8241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 14771732Sbonwick objset_t *mos = spa->spa_meta_objset; 1478789Sahrens dmu_buf_t *db; 1479789Sahrens int error; 1480789Sahrens 1481789Sahrens ASSERT(vd->vdev_children == 0); 1482789Sahrens 1483789Sahrens if (smo->smo_object == 0) 1484789Sahrens return (0); 1485789Sahrens 14861732Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 14871544Seschrock return (error); 14881732Sbonwick 14894944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 14904944Smaybee bcopy(db->db_data, smo, sizeof (*smo)); 14911544Seschrock dmu_buf_rele(db, FTAG); 1492789Sahrens 1493789Sahrens mutex_enter(&vd->vdev_dtl_lock); 1494*8241SJeff.Bonwick@Sun.COM error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 1495*8241SJeff.Bonwick@Sun.COM NULL, SM_ALLOC, smo, mos); 1496789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1497789Sahrens 1498789Sahrens return (error); 1499789Sahrens } 1500789Sahrens 1501789Sahrens void 1502789Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1503789Sahrens { 1504789Sahrens spa_t *spa = vd->vdev_spa; 1505*8241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 1506*8241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 15071732Sbonwick objset_t *mos = spa->spa_meta_objset; 1508789Sahrens space_map_t smsync; 1509789Sahrens kmutex_t smlock; 1510789Sahrens dmu_buf_t *db; 1511789Sahrens dmu_tx_t *tx; 1512789Sahrens 1513789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1514789Sahrens 1515789Sahrens if (vd->vdev_detached) { 1516789Sahrens if (smo->smo_object != 0) { 15171732Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1518789Sahrens ASSERT3U(err, ==, 0); 1519789Sahrens smo->smo_object = 0; 1520789Sahrens } 1521789Sahrens dmu_tx_commit(tx); 1522789Sahrens return; 1523789Sahrens } 1524789Sahrens 1525789Sahrens if (smo->smo_object == 0) { 1526789Sahrens ASSERT(smo->smo_objsize == 0); 1527789Sahrens ASSERT(smo->smo_alloc == 0); 15281732Sbonwick smo->smo_object = dmu_object_alloc(mos, 1529789Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1530789Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1531789Sahrens ASSERT(smo->smo_object != 0); 1532789Sahrens vdev_config_dirty(vd->vdev_top); 1533789Sahrens } 1534789Sahrens 1535789Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1536789Sahrens 1537789Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1538789Sahrens &smlock); 1539789Sahrens 1540789Sahrens mutex_enter(&smlock); 1541789Sahrens 1542789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15431732Sbonwick space_map_walk(sm, space_map_add, &smsync); 1544789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1545789Sahrens 15461732Sbonwick space_map_truncate(smo, mos, tx); 15471732Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1548789Sahrens 1549789Sahrens space_map_destroy(&smsync); 1550789Sahrens 1551789Sahrens mutex_exit(&smlock); 1552789Sahrens mutex_destroy(&smlock); 1553789Sahrens 15541732Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1555789Sahrens dmu_buf_will_dirty(db, tx); 15564944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 15574944Smaybee bcopy(smo, db->db_data, sizeof (*smo)); 15581544Seschrock dmu_buf_rele(db, FTAG); 1559789Sahrens 1560789Sahrens dmu_tx_commit(tx); 1561789Sahrens } 1562789Sahrens 15637046Sahrens /* 1564*8241SJeff.Bonwick@Sun.COM * Determine whether the specified vdev can be offlined/detached/removed 1565*8241SJeff.Bonwick@Sun.COM * without losing data. 1566*8241SJeff.Bonwick@Sun.COM */ 1567*8241SJeff.Bonwick@Sun.COM boolean_t 1568*8241SJeff.Bonwick@Sun.COM vdev_dtl_required(vdev_t *vd) 1569*8241SJeff.Bonwick@Sun.COM { 1570*8241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 1571*8241SJeff.Bonwick@Sun.COM vdev_t *tvd = vd->vdev_top; 1572*8241SJeff.Bonwick@Sun.COM uint8_t cant_read = vd->vdev_cant_read; 1573*8241SJeff.Bonwick@Sun.COM boolean_t required; 1574*8241SJeff.Bonwick@Sun.COM 1575*8241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1576*8241SJeff.Bonwick@Sun.COM 1577*8241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev || vd == tvd) 1578*8241SJeff.Bonwick@Sun.COM return (B_TRUE); 1579*8241SJeff.Bonwick@Sun.COM 1580*8241SJeff.Bonwick@Sun.COM /* 1581*8241SJeff.Bonwick@Sun.COM * Temporarily mark the device as unreadable, and then determine 1582*8241SJeff.Bonwick@Sun.COM * whether this results in any DTL outages in the top-level vdev. 1583*8241SJeff.Bonwick@Sun.COM * If not, we can safely offline/detach/remove the device. 1584*8241SJeff.Bonwick@Sun.COM */ 1585*8241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_TRUE; 1586*8241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1587*8241SJeff.Bonwick@Sun.COM required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 1588*8241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = cant_read; 1589*8241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1590*8241SJeff.Bonwick@Sun.COM 1591*8241SJeff.Bonwick@Sun.COM return (required); 1592*8241SJeff.Bonwick@Sun.COM } 1593*8241SJeff.Bonwick@Sun.COM 1594*8241SJeff.Bonwick@Sun.COM /* 15957046Sahrens * Determine if resilver is needed, and if so the txg range. 15967046Sahrens */ 15977046Sahrens boolean_t 15987046Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 15997046Sahrens { 16007046Sahrens boolean_t needed = B_FALSE; 16017046Sahrens uint64_t thismin = UINT64_MAX; 16027046Sahrens uint64_t thismax = 0; 16037046Sahrens 16047046Sahrens if (vd->vdev_children == 0) { 16057046Sahrens mutex_enter(&vd->vdev_dtl_lock); 1606*8241SJeff.Bonwick@Sun.COM if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 1607*8241SJeff.Bonwick@Sun.COM vdev_writeable(vd)) { 16087046Sahrens space_seg_t *ss; 16097046Sahrens 1610*8241SJeff.Bonwick@Sun.COM ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 16117046Sahrens thismin = ss->ss_start - 1; 1612*8241SJeff.Bonwick@Sun.COM ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 16137046Sahrens thismax = ss->ss_end; 16147046Sahrens needed = B_TRUE; 16157046Sahrens } 16167046Sahrens mutex_exit(&vd->vdev_dtl_lock); 16177046Sahrens } else { 1618*8241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 16197046Sahrens vdev_t *cvd = vd->vdev_child[c]; 16207046Sahrens uint64_t cmin, cmax; 16217046Sahrens 16227046Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 16237046Sahrens thismin = MIN(thismin, cmin); 16247046Sahrens thismax = MAX(thismax, cmax); 16257046Sahrens needed = B_TRUE; 16267046Sahrens } 16277046Sahrens } 16287046Sahrens } 16297046Sahrens 16307046Sahrens if (needed && minp) { 16317046Sahrens *minp = thismin; 16327046Sahrens *maxp = thismax; 16337046Sahrens } 16347046Sahrens return (needed); 16357046Sahrens } 16367046Sahrens 16371986Seschrock void 16381544Seschrock vdev_load(vdev_t *vd) 1639789Sahrens { 1640789Sahrens /* 1641789Sahrens * Recursively load all children. 1642789Sahrens */ 1643*8241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 16441986Seschrock vdev_load(vd->vdev_child[c]); 1645789Sahrens 1646789Sahrens /* 16471585Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1648789Sahrens */ 16491986Seschrock if (vd == vd->vdev_top && 16501986Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 16511986Seschrock vdev_metaslab_init(vd, 0) != 0)) 16521986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 16531986Seschrock VDEV_AUX_CORRUPT_DATA); 1654789Sahrens 1655789Sahrens /* 1656789Sahrens * If this is a leaf vdev, load its DTL. 1657789Sahrens */ 16581986Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 16591986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 16601986Seschrock VDEV_AUX_CORRUPT_DATA); 1661789Sahrens } 1662789Sahrens 16632082Seschrock /* 16645450Sbrendan * The special vdev case is used for hot spares and l2cache devices. Its 16655450Sbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 16665450Sbrendan * we make sure that we can open the underlying device, then try to read the 16675450Sbrendan * label, and make sure that the label is sane and that it hasn't been 16685450Sbrendan * repurposed to another pool. 16692082Seschrock */ 16702082Seschrock int 16715450Sbrendan vdev_validate_aux(vdev_t *vd) 16722082Seschrock { 16732082Seschrock nvlist_t *label; 16742082Seschrock uint64_t guid, version; 16752082Seschrock uint64_t state; 16762082Seschrock 16777754SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 16786643Seschrock return (0); 16796643Seschrock 16802082Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 16812082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 16822082Seschrock VDEV_AUX_CORRUPT_DATA); 16832082Seschrock return (-1); 16842082Seschrock } 16852082Seschrock 16862082Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 16874577Sahrens version > SPA_VERSION || 16882082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 16892082Seschrock guid != vd->vdev_guid || 16902082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 16912082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 16922082Seschrock VDEV_AUX_CORRUPT_DATA); 16932082Seschrock nvlist_free(label); 16942082Seschrock return (-1); 16952082Seschrock } 16962082Seschrock 16972082Seschrock /* 16982082Seschrock * We don't actually check the pool state here. If it's in fact in 16992082Seschrock * use by another pool, we update this fact on the fly when requested. 17002082Seschrock */ 17012082Seschrock nvlist_free(label); 17022082Seschrock return (0); 17032082Seschrock } 17042082Seschrock 1705789Sahrens void 1706789Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 1707789Sahrens { 1708789Sahrens metaslab_t *msp; 1709789Sahrens 1710789Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1711789Sahrens metaslab_sync_done(msp, txg); 1712789Sahrens } 1713789Sahrens 1714789Sahrens void 1715789Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 1716789Sahrens { 1717789Sahrens spa_t *spa = vd->vdev_spa; 1718789Sahrens vdev_t *lvd; 1719789Sahrens metaslab_t *msp; 17201732Sbonwick dmu_tx_t *tx; 1721789Sahrens 17221732Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 17231732Sbonwick ASSERT(vd == vd->vdev_top); 17241732Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 17251732Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 17261732Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 17271732Sbonwick ASSERT(vd->vdev_ms_array != 0); 17281732Sbonwick vdev_config_dirty(vd); 17291732Sbonwick dmu_tx_commit(tx); 17301732Sbonwick } 1731789Sahrens 17321732Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1733789Sahrens metaslab_sync(msp, txg); 17341732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 17351732Sbonwick } 1736789Sahrens 1737789Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1738789Sahrens vdev_dtl_sync(lvd, txg); 1739789Sahrens 1740789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1741789Sahrens } 1742789Sahrens 1743789Sahrens uint64_t 1744789Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1745789Sahrens { 1746789Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1747789Sahrens } 1748789Sahrens 17494451Seschrock /* 17504451Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 17514451Seschrock * not be opened, and no I/O is attempted. 17524451Seschrock */ 1753789Sahrens int 17544451Seschrock vdev_fault(spa_t *spa, uint64_t guid) 17554451Seschrock { 17566643Seschrock vdev_t *vd; 17574451Seschrock 17587754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 17594451Seschrock 17606643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 17617754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 17627754SJeff.Bonwick@Sun.COM 17634451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 17647754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 17654451Seschrock 17664451Seschrock /* 17674451Seschrock * Faulted state takes precedence over degraded. 17684451Seschrock */ 17694451Seschrock vd->vdev_faulted = 1ULL; 17704451Seschrock vd->vdev_degraded = 0ULL; 17717754SJeff.Bonwick@Sun.COM vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 17724451Seschrock 17734451Seschrock /* 17748123SDavid.Marker@sun.com * If marking the vdev as faulted cause the top-level vdev to become 17754451Seschrock * unavailable, then back off and simply mark the vdev as degraded 17764451Seschrock * instead. 17774451Seschrock */ 17786643Seschrock if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 17794451Seschrock vd->vdev_degraded = 1ULL; 17804451Seschrock vd->vdev_faulted = 0ULL; 17814451Seschrock 17824451Seschrock /* 17834451Seschrock * If we reopen the device and it's not dead, only then do we 17844451Seschrock * mark it degraded. 17854451Seschrock */ 17864451Seschrock vdev_reopen(vd); 17874451Seschrock 17885329Sgw25295 if (vdev_readable(vd)) { 17894451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 17904451Seschrock VDEV_AUX_ERR_EXCEEDED); 17914451Seschrock } 17924451Seschrock } 17934451Seschrock 17947754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 17954451Seschrock } 17964451Seschrock 17974451Seschrock /* 17984451Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 17994451Seschrock * user that something is wrong. The vdev continues to operate as normal as far 18004451Seschrock * as I/O is concerned. 18014451Seschrock */ 18024451Seschrock int 18034451Seschrock vdev_degrade(spa_t *spa, uint64_t guid) 18044451Seschrock { 18056643Seschrock vdev_t *vd; 18064451Seschrock 18077754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18084451Seschrock 18096643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18107754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18117754SJeff.Bonwick@Sun.COM 18124451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18137754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18144451Seschrock 18154451Seschrock /* 18164451Seschrock * If the vdev is already faulted, then don't do anything. 18174451Seschrock */ 18187754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded) 18197754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, 0)); 18204451Seschrock 18214451Seschrock vd->vdev_degraded = 1ULL; 18224451Seschrock if (!vdev_is_dead(vd)) 18234451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 18244451Seschrock VDEV_AUX_ERR_EXCEEDED); 18254451Seschrock 18267754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 18274451Seschrock } 18284451Seschrock 18294451Seschrock /* 18304451Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 18314451Seschrock * any attached spare device should be detached when the device finishes 18324451Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 18334451Seschrock * so no FMA events are generated if the device fails to open. 18344451Seschrock */ 18354451Seschrock int 18367754SJeff.Bonwick@Sun.COM vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 1837789Sahrens { 18386643Seschrock vdev_t *vd; 1839789Sahrens 18407754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18411485Slling 18426643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18437754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1844789Sahrens 18451585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 18467754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18471585Sbonwick 1848789Sahrens vd->vdev_offline = B_FALSE; 18491485Slling vd->vdev_tmpoffline = B_FALSE; 18507754SJeff.Bonwick@Sun.COM vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 18517754SJeff.Bonwick@Sun.COM vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 18521544Seschrock vdev_reopen(vd->vdev_top); 18534451Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 18544451Seschrock 18554451Seschrock if (newstate) 18564451Seschrock *newstate = vd->vdev_state; 18574451Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 18584451Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 18594451Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 18604451Seschrock vd->vdev_parent->vdev_child[0] == vd) 18614451Seschrock vd->vdev_unspare = B_TRUE; 1862789Sahrens 1863*8241SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 1864789Sahrens } 1865789Sahrens 1866789Sahrens int 18674451Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 1868789Sahrens { 18696643Seschrock vdev_t *vd; 1870789Sahrens 18717754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 1872789Sahrens 18736643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18747754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1875789Sahrens 18761585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 18777754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18781585Sbonwick 1879789Sahrens /* 18801732Sbonwick * If the device isn't already offline, try to offline it. 1881789Sahrens */ 18821732Sbonwick if (!vd->vdev_offline) { 18831732Sbonwick /* 1884*8241SJeff.Bonwick@Sun.COM * If this device has the only valid copy of some data, 1885*8241SJeff.Bonwick@Sun.COM * don't allow it to be offlined. 18861732Sbonwick */ 1887*8241SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && vdev_dtl_required(vd)) 18887754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 1889789Sahrens 18901732Sbonwick /* 18911732Sbonwick * Offline this device and reopen its top-level vdev. 18921732Sbonwick * If this action results in the top-level vdev becoming 18931732Sbonwick * unusable, undo it and fail the request. 18941732Sbonwick */ 18951732Sbonwick vd->vdev_offline = B_TRUE; 18961544Seschrock vdev_reopen(vd->vdev_top); 1897*8241SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && vdev_is_dead(vd->vdev_top)) { 18981732Sbonwick vd->vdev_offline = B_FALSE; 18991732Sbonwick vdev_reopen(vd->vdev_top); 19007754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 19011732Sbonwick } 1902789Sahrens } 1903789Sahrens 19047754SJeff.Bonwick@Sun.COM vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 19051732Sbonwick 19067754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 1907789Sahrens } 1908789Sahrens 19091544Seschrock /* 19101544Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 19111544Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 19121544Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 19131544Seschrock */ 19141544Seschrock void 19157754SJeff.Bonwick@Sun.COM vdev_clear(spa_t *spa, vdev_t *vd) 1916789Sahrens { 19177754SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 19187754SJeff.Bonwick@Sun.COM 19197754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1920789Sahrens 19211544Seschrock if (vd == NULL) 19227754SJeff.Bonwick@Sun.COM vd = rvd; 1923789Sahrens 19241544Seschrock vd->vdev_stat.vs_read_errors = 0; 19251544Seschrock vd->vdev_stat.vs_write_errors = 0; 19261544Seschrock vd->vdev_stat.vs_checksum_errors = 0; 1927789Sahrens 19287754SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 19297754SJeff.Bonwick@Sun.COM vdev_clear(spa, vd->vdev_child[c]); 19304451Seschrock 19314451Seschrock /* 19326959Sek110237 * If we're in the FAULTED state or have experienced failed I/O, then 19336959Sek110237 * clear the persistent state and attempt to reopen the device. We 19346959Sek110237 * also mark the vdev config dirty, so that the new faulted state is 19356959Sek110237 * written out to disk. 19364451Seschrock */ 19377754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded || 19387754SJeff.Bonwick@Sun.COM !vdev_readable(vd) || !vdev_writeable(vd)) { 19396959Sek110237 19404451Seschrock vd->vdev_faulted = vd->vdev_degraded = 0; 19417754SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_FALSE; 19427754SJeff.Bonwick@Sun.COM vd->vdev_cant_write = B_FALSE; 19437754SJeff.Bonwick@Sun.COM 19444451Seschrock vdev_reopen(vd); 19454451Seschrock 19467754SJeff.Bonwick@Sun.COM if (vd != rvd) 19477754SJeff.Bonwick@Sun.COM vdev_state_dirty(vd->vdev_top); 19487754SJeff.Bonwick@Sun.COM 19497754SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 19504808Sek110237 spa_async_request(spa, SPA_ASYNC_RESILVER); 19514451Seschrock 19524451Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 19534451Seschrock } 1954789Sahrens } 1955789Sahrens 19567754SJeff.Bonwick@Sun.COM boolean_t 19577754SJeff.Bonwick@Sun.COM vdev_is_dead(vdev_t *vd) 19585329Sgw25295 { 19597754SJeff.Bonwick@Sun.COM return (vd->vdev_state < VDEV_STATE_DEGRADED); 19605329Sgw25295 } 19615329Sgw25295 19627754SJeff.Bonwick@Sun.COM boolean_t 19637754SJeff.Bonwick@Sun.COM vdev_readable(vdev_t *vd) 1964789Sahrens { 19657754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 1966789Sahrens } 1967789Sahrens 19687754SJeff.Bonwick@Sun.COM boolean_t 19697754SJeff.Bonwick@Sun.COM vdev_writeable(vdev_t *vd) 1970789Sahrens { 19717754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 19727754SJeff.Bonwick@Sun.COM } 1973789Sahrens 19747754SJeff.Bonwick@Sun.COM boolean_t 19757980SGeorge.Wilson@Sun.COM vdev_allocatable(vdev_t *vd) 19767980SGeorge.Wilson@Sun.COM { 1977*8241SJeff.Bonwick@Sun.COM uint64_t state = vd->vdev_state; 1978*8241SJeff.Bonwick@Sun.COM 19797980SGeorge.Wilson@Sun.COM /* 1980*8241SJeff.Bonwick@Sun.COM * We currently allow allocations from vdevs which may be in the 19817980SGeorge.Wilson@Sun.COM * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 19827980SGeorge.Wilson@Sun.COM * fails to reopen then we'll catch it later when we're holding 1983*8241SJeff.Bonwick@Sun.COM * the proper locks. Note that we have to get the vdev state 1984*8241SJeff.Bonwick@Sun.COM * in a local variable because although it changes atomically, 1985*8241SJeff.Bonwick@Sun.COM * we're asking two separate questions about it. 19867980SGeorge.Wilson@Sun.COM */ 1987*8241SJeff.Bonwick@Sun.COM return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 19887980SGeorge.Wilson@Sun.COM !vd->vdev_cant_write); 19897980SGeorge.Wilson@Sun.COM } 19907980SGeorge.Wilson@Sun.COM 19917980SGeorge.Wilson@Sun.COM boolean_t 19927754SJeff.Bonwick@Sun.COM vdev_accessible(vdev_t *vd, zio_t *zio) 19937754SJeff.Bonwick@Sun.COM { 19947754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 1995789Sahrens 19967754SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 19977754SJeff.Bonwick@Sun.COM return (B_FALSE); 1998789Sahrens 19997754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) 20007754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_read); 2001789Sahrens 20027754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_WRITE) 20037754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_write); 20047754SJeff.Bonwick@Sun.COM 20057754SJeff.Bonwick@Sun.COM return (B_TRUE); 2006789Sahrens } 2007789Sahrens 2008789Sahrens /* 2009789Sahrens * Get statistics for the given vdev. 2010789Sahrens */ 2011789Sahrens void 2012789Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2013789Sahrens { 2014789Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2015789Sahrens 2016789Sahrens mutex_enter(&vd->vdev_stat_lock); 2017789Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 20187046Sahrens vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2019789Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2020789Sahrens vs->vs_state = vd->vdev_state; 20211175Slling vs->vs_rsize = vdev_get_rsize(vd); 2022789Sahrens mutex_exit(&vd->vdev_stat_lock); 2023789Sahrens 2024789Sahrens /* 2025789Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2026789Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2027789Sahrens */ 2028789Sahrens if (vd == rvd) { 20297754SJeff.Bonwick@Sun.COM for (int c = 0; c < rvd->vdev_children; c++) { 2030789Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2031789Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2032789Sahrens 2033789Sahrens mutex_enter(&vd->vdev_stat_lock); 20347754SJeff.Bonwick@Sun.COM for (int t = 0; t < ZIO_TYPES; t++) { 2035789Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2036789Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2037789Sahrens } 2038789Sahrens vs->vs_scrub_examined += cvs->vs_scrub_examined; 2039789Sahrens mutex_exit(&vd->vdev_stat_lock); 2040789Sahrens } 2041789Sahrens } 2042789Sahrens } 2043789Sahrens 2044789Sahrens void 20455450Sbrendan vdev_clear_stats(vdev_t *vd) 20465450Sbrendan { 20475450Sbrendan mutex_enter(&vd->vdev_stat_lock); 20485450Sbrendan vd->vdev_stat.vs_space = 0; 20495450Sbrendan vd->vdev_stat.vs_dspace = 0; 20505450Sbrendan vd->vdev_stat.vs_alloc = 0; 20515450Sbrendan mutex_exit(&vd->vdev_stat_lock); 20525450Sbrendan } 20535450Sbrendan 20545450Sbrendan void 20557754SJeff.Bonwick@Sun.COM vdev_stat_update(zio_t *zio, uint64_t psize) 2056789Sahrens { 2057*8241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 2058*8241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 20597754SJeff.Bonwick@Sun.COM vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2060789Sahrens vdev_t *pvd; 2061789Sahrens uint64_t txg = zio->io_txg; 2062789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2063789Sahrens zio_type_t type = zio->io_type; 2064789Sahrens int flags = zio->io_flags; 2065789Sahrens 20667754SJeff.Bonwick@Sun.COM /* 20677754SJeff.Bonwick@Sun.COM * If this i/o is a gang leader, it didn't do any actual work. 20687754SJeff.Bonwick@Sun.COM */ 20697754SJeff.Bonwick@Sun.COM if (zio->io_gang_tree) 20707754SJeff.Bonwick@Sun.COM return; 20717754SJeff.Bonwick@Sun.COM 2072789Sahrens if (zio->io_error == 0) { 20737754SJeff.Bonwick@Sun.COM /* 20747754SJeff.Bonwick@Sun.COM * If this is a root i/o, don't count it -- we've already 20757754SJeff.Bonwick@Sun.COM * counted the top-level vdevs, and vdev_get_stats() will 20767754SJeff.Bonwick@Sun.COM * aggregate them when asked. This reduces contention on 20777754SJeff.Bonwick@Sun.COM * the root vdev_stat_lock and implicitly handles blocks 20787754SJeff.Bonwick@Sun.COM * that compress away to holes, for which there is no i/o. 20797754SJeff.Bonwick@Sun.COM * (Holes never create vdev children, so all the counters 20807754SJeff.Bonwick@Sun.COM * remain zero, which is what we want.) 20817754SJeff.Bonwick@Sun.COM * 20827754SJeff.Bonwick@Sun.COM * Note: this only applies to successful i/o (io_error == 0) 20837754SJeff.Bonwick@Sun.COM * because unlike i/o counts, errors are not additive. 20847754SJeff.Bonwick@Sun.COM * When reading a ditto block, for example, failure of 20857754SJeff.Bonwick@Sun.COM * one top-level vdev does not imply a root-level error. 20867754SJeff.Bonwick@Sun.COM */ 20877754SJeff.Bonwick@Sun.COM if (vd == rvd) 20887754SJeff.Bonwick@Sun.COM return; 20897754SJeff.Bonwick@Sun.COM 20907754SJeff.Bonwick@Sun.COM ASSERT(vd == zio->io_vd); 2091*8241SJeff.Bonwick@Sun.COM 2092*8241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_BYPASS) 2093*8241SJeff.Bonwick@Sun.COM return; 2094*8241SJeff.Bonwick@Sun.COM 2095*8241SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 2096*8241SJeff.Bonwick@Sun.COM 20977754SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_REPAIR) { 20981807Sbonwick if (flags & ZIO_FLAG_SCRUB_THREAD) 20997754SJeff.Bonwick@Sun.COM vs->vs_scrub_repaired += psize; 2100*8241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SELF_HEAL) 21017754SJeff.Bonwick@Sun.COM vs->vs_self_healed += psize; 2102789Sahrens } 2103*8241SJeff.Bonwick@Sun.COM 2104*8241SJeff.Bonwick@Sun.COM vs->vs_ops[type]++; 2105*8241SJeff.Bonwick@Sun.COM vs->vs_bytes[type] += psize; 2106*8241SJeff.Bonwick@Sun.COM 2107*8241SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2108789Sahrens return; 2109789Sahrens } 2110789Sahrens 2111789Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2112789Sahrens return; 2113789Sahrens 21147754SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 21157754SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_READ) { 21167754SJeff.Bonwick@Sun.COM if (zio->io_error == ECKSUM) 21177754SJeff.Bonwick@Sun.COM vs->vs_checksum_errors++; 21187754SJeff.Bonwick@Sun.COM else 21197754SJeff.Bonwick@Sun.COM vs->vs_read_errors++; 2120789Sahrens } 21217754SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_WRITE) 21227754SJeff.Bonwick@Sun.COM vs->vs_write_errors++; 21237754SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2124789Sahrens 2125*8241SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_WRITE && txg != 0 && 2126*8241SJeff.Bonwick@Sun.COM (!(flags & ZIO_FLAG_IO_REPAIR) || 2127*8241SJeff.Bonwick@Sun.COM (flags & ZIO_FLAG_SCRUB_THREAD))) { 2128*8241SJeff.Bonwick@Sun.COM /* 2129*8241SJeff.Bonwick@Sun.COM * This is either a normal write (not a repair), or it's a 2130*8241SJeff.Bonwick@Sun.COM * repair induced by the scrub thread. In the normal case, 2131*8241SJeff.Bonwick@Sun.COM * we commit the DTL change in the same txg as the block 2132*8241SJeff.Bonwick@Sun.COM * was born. In the scrub-induced repair case, we know that 2133*8241SJeff.Bonwick@Sun.COM * scrubs run in first-pass syncing context, so we commit 2134*8241SJeff.Bonwick@Sun.COM * the DTL change in spa->spa_syncing_txg. 2135*8241SJeff.Bonwick@Sun.COM * 2136*8241SJeff.Bonwick@Sun.COM * We currently do not make DTL entries for failed spontaneous 2137*8241SJeff.Bonwick@Sun.COM * self-healing writes triggered by normal (non-scrubbing) 2138*8241SJeff.Bonwick@Sun.COM * reads, because we have no transactional context in which to 2139*8241SJeff.Bonwick@Sun.COM * do so -- and it's not clear that it'd be desirable anyway. 2140*8241SJeff.Bonwick@Sun.COM */ 2141*8241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 2142*8241SJeff.Bonwick@Sun.COM uint64_t commit_txg = txg; 2143*8241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SCRUB_THREAD) { 2144*8241SJeff.Bonwick@Sun.COM ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2145*8241SJeff.Bonwick@Sun.COM ASSERT(spa_sync_pass(spa) == 1); 2146*8241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 2147*8241SJeff.Bonwick@Sun.COM commit_txg = spa->spa_syncing_txg; 2148*8241SJeff.Bonwick@Sun.COM } 2149*8241SJeff.Bonwick@Sun.COM ASSERT(commit_txg >= spa->spa_syncing_txg); 2150*8241SJeff.Bonwick@Sun.COM if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 2151*8241SJeff.Bonwick@Sun.COM return; 2152*8241SJeff.Bonwick@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2153*8241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 2154*8241SJeff.Bonwick@Sun.COM vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2155789Sahrens } 2156*8241SJeff.Bonwick@Sun.COM if (vd != rvd) 2157*8241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2158789Sahrens } 2159789Sahrens } 2160789Sahrens 2161789Sahrens void 2162789Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2163789Sahrens { 2164789Sahrens int c; 2165789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2166789Sahrens 2167789Sahrens for (c = 0; c < vd->vdev_children; c++) 2168789Sahrens vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2169789Sahrens 2170789Sahrens mutex_enter(&vd->vdev_stat_lock); 2171789Sahrens 2172789Sahrens if (type == POOL_SCRUB_NONE) { 2173789Sahrens /* 2174789Sahrens * Update completion and end time. Leave everything else alone 2175789Sahrens * so we can report what happened during the previous scrub. 2176789Sahrens */ 2177789Sahrens vs->vs_scrub_complete = complete; 2178789Sahrens vs->vs_scrub_end = gethrestime_sec(); 2179789Sahrens } else { 2180789Sahrens vs->vs_scrub_type = type; 2181789Sahrens vs->vs_scrub_complete = 0; 2182789Sahrens vs->vs_scrub_examined = 0; 2183789Sahrens vs->vs_scrub_repaired = 0; 2184789Sahrens vs->vs_scrub_start = gethrestime_sec(); 2185789Sahrens vs->vs_scrub_end = 0; 2186789Sahrens } 2187789Sahrens 2188789Sahrens mutex_exit(&vd->vdev_stat_lock); 2189789Sahrens } 2190789Sahrens 2191789Sahrens /* 2192789Sahrens * Update the in-core space usage stats for this vdev and the root vdev. 2193789Sahrens */ 2194789Sahrens void 21955450Sbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 21965450Sbrendan boolean_t update_root) 2197789Sahrens { 21984527Sperrin int64_t dspace_delta = space_delta; 21994527Sperrin spa_t *spa = vd->vdev_spa; 22004527Sperrin vdev_t *rvd = spa->spa_root_vdev; 22014527Sperrin 2202789Sahrens ASSERT(vd == vd->vdev_top); 22034527Sperrin 22044527Sperrin /* 22054527Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 22064527Sperrin * factor. We must calculate this here and not at the root vdev 22074527Sperrin * because the root vdev's psize-to-asize is simply the max of its 22084527Sperrin * childrens', thus not accurate enough for us. 22094527Sperrin */ 22104527Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 22114527Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 22124527Sperrin vd->vdev_deflate_ratio; 2213789Sahrens 22144527Sperrin mutex_enter(&vd->vdev_stat_lock); 22154527Sperrin vd->vdev_stat.vs_space += space_delta; 22164527Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 22174527Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 22184527Sperrin mutex_exit(&vd->vdev_stat_lock); 22192082Seschrock 22205450Sbrendan if (update_root) { 22215450Sbrendan ASSERT(rvd == vd->vdev_parent); 22225450Sbrendan ASSERT(vd->vdev_ms_count != 0); 22234527Sperrin 22245450Sbrendan /* 22255450Sbrendan * Don't count non-normal (e.g. intent log) space as part of 22265450Sbrendan * the pool's capacity. 22275450Sbrendan */ 22285450Sbrendan if (vd->vdev_mg->mg_class != spa->spa_normal_class) 22295450Sbrendan return; 22305450Sbrendan 22315450Sbrendan mutex_enter(&rvd->vdev_stat_lock); 22325450Sbrendan rvd->vdev_stat.vs_space += space_delta; 22335450Sbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 22345450Sbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 22355450Sbrendan mutex_exit(&rvd->vdev_stat_lock); 22365450Sbrendan } 2237789Sahrens } 2238789Sahrens 2239789Sahrens /* 2240789Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2241789Sahrens * so that it will be written out next time the vdev configuration is synced. 2242789Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2243789Sahrens */ 2244789Sahrens void 2245789Sahrens vdev_config_dirty(vdev_t *vd) 2246789Sahrens { 2247789Sahrens spa_t *spa = vd->vdev_spa; 2248789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2249789Sahrens int c; 2250789Sahrens 22511601Sbonwick /* 22526643Seschrock * If this is an aux vdev (as with l2cache devices), then we update the 22536643Seschrock * vdev config manually and set the sync flag. 22546643Seschrock */ 22556643Seschrock if (vd->vdev_aux != NULL) { 22566643Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 22576643Seschrock nvlist_t **aux; 22586643Seschrock uint_t naux; 22596643Seschrock 22606643Seschrock for (c = 0; c < sav->sav_count; c++) { 22616643Seschrock if (sav->sav_vdevs[c] == vd) 22626643Seschrock break; 22636643Seschrock } 22646643Seschrock 22657754SJeff.Bonwick@Sun.COM if (c == sav->sav_count) { 22667754SJeff.Bonwick@Sun.COM /* 22677754SJeff.Bonwick@Sun.COM * We're being removed. There's nothing more to do. 22687754SJeff.Bonwick@Sun.COM */ 22697754SJeff.Bonwick@Sun.COM ASSERT(sav->sav_sync == B_TRUE); 22707754SJeff.Bonwick@Sun.COM return; 22717754SJeff.Bonwick@Sun.COM } 22727754SJeff.Bonwick@Sun.COM 22736643Seschrock sav->sav_sync = B_TRUE; 22746643Seschrock 22756643Seschrock VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 22766643Seschrock ZPOOL_CONFIG_L2CACHE, &aux, &naux) == 0); 22776643Seschrock 22786643Seschrock ASSERT(c < naux); 22796643Seschrock 22806643Seschrock /* 22816643Seschrock * Setting the nvlist in the middle if the array is a little 22826643Seschrock * sketchy, but it will work. 22836643Seschrock */ 22846643Seschrock nvlist_free(aux[c]); 22856643Seschrock aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 22866643Seschrock 22876643Seschrock return; 22886643Seschrock } 22896643Seschrock 22906643Seschrock /* 22917754SJeff.Bonwick@Sun.COM * The dirty list is protected by the SCL_CONFIG lock. The caller 22927754SJeff.Bonwick@Sun.COM * must either hold SCL_CONFIG as writer, or must be the sync thread 22937754SJeff.Bonwick@Sun.COM * (which holds SCL_CONFIG as reader). There's only one sync thread, 22941601Sbonwick * so this is sufficient to ensure mutual exclusion. 22951601Sbonwick */ 22967754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 22977754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 22987754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 22991601Sbonwick 2300789Sahrens if (vd == rvd) { 2301789Sahrens for (c = 0; c < rvd->vdev_children; c++) 2302789Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2303789Sahrens } else { 2304789Sahrens ASSERT(vd == vd->vdev_top); 2305789Sahrens 23067754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_config_dirty_node)) 23077754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_config_dirty_list, vd); 2308789Sahrens } 2309789Sahrens } 2310789Sahrens 2311789Sahrens void 2312789Sahrens vdev_config_clean(vdev_t *vd) 2313789Sahrens { 23141601Sbonwick spa_t *spa = vd->vdev_spa; 23151601Sbonwick 23167754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 23177754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 23187754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 23197754SJeff.Bonwick@Sun.COM 23207754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 23217754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_config_dirty_list, vd); 23227754SJeff.Bonwick@Sun.COM } 23237754SJeff.Bonwick@Sun.COM 23247754SJeff.Bonwick@Sun.COM /* 23257754SJeff.Bonwick@Sun.COM * Mark a top-level vdev's state as dirty, so that the next pass of 23267754SJeff.Bonwick@Sun.COM * spa_sync() can convert this into vdev_config_dirty(). We distinguish 23277754SJeff.Bonwick@Sun.COM * the state changes from larger config changes because they require 23287754SJeff.Bonwick@Sun.COM * much less locking, and are often needed for administrative actions. 23297754SJeff.Bonwick@Sun.COM */ 23307754SJeff.Bonwick@Sun.COM void 23317754SJeff.Bonwick@Sun.COM vdev_state_dirty(vdev_t *vd) 23327754SJeff.Bonwick@Sun.COM { 23337754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 23347754SJeff.Bonwick@Sun.COM 23357754SJeff.Bonwick@Sun.COM ASSERT(vd == vd->vdev_top); 23361601Sbonwick 23377754SJeff.Bonwick@Sun.COM /* 23387754SJeff.Bonwick@Sun.COM * The state list is protected by the SCL_STATE lock. The caller 23397754SJeff.Bonwick@Sun.COM * must either hold SCL_STATE as writer, or must be the sync thread 23407754SJeff.Bonwick@Sun.COM * (which holds SCL_STATE as reader). There's only one sync thread, 23417754SJeff.Bonwick@Sun.COM * so this is sufficient to ensure mutual exclusion. 23427754SJeff.Bonwick@Sun.COM */ 23437754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 23447754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 23457754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 23467754SJeff.Bonwick@Sun.COM 23477754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_state_dirty_node)) 23487754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_state_dirty_list, vd); 23497754SJeff.Bonwick@Sun.COM } 23507754SJeff.Bonwick@Sun.COM 23517754SJeff.Bonwick@Sun.COM void 23527754SJeff.Bonwick@Sun.COM vdev_state_clean(vdev_t *vd) 23537754SJeff.Bonwick@Sun.COM { 23547754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 23557754SJeff.Bonwick@Sun.COM 23567754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 23577754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 23587754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 23597754SJeff.Bonwick@Sun.COM 23607754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 23617754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_state_dirty_list, vd); 2362789Sahrens } 2363789Sahrens 23646523Sek110237 /* 23656523Sek110237 * Propagate vdev state up from children to parent. 23666523Sek110237 */ 23671775Sbillm void 23681775Sbillm vdev_propagate_state(vdev_t *vd) 23691775Sbillm { 2370*8241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 2371*8241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 23721775Sbillm int degraded = 0, faulted = 0; 23731775Sbillm int corrupted = 0; 23741775Sbillm int c; 23751775Sbillm vdev_t *child; 23761775Sbillm 23774451Seschrock if (vd->vdev_children > 0) { 23784451Seschrock for (c = 0; c < vd->vdev_children; c++) { 23794451Seschrock child = vd->vdev_child[c]; 23806976Seschrock 23817754SJeff.Bonwick@Sun.COM if (!vdev_readable(child) || 2382*8241SJeff.Bonwick@Sun.COM (!vdev_writeable(child) && spa_writeable(spa))) { 23836976Seschrock /* 23846976Seschrock * Root special: if there is a top-level log 23856976Seschrock * device, treat the root vdev as if it were 23866976Seschrock * degraded. 23876976Seschrock */ 23886976Seschrock if (child->vdev_islog && vd == rvd) 23896976Seschrock degraded++; 23906976Seschrock else 23916976Seschrock faulted++; 23926976Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 23934451Seschrock degraded++; 23946976Seschrock } 23954451Seschrock 23964451Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 23974451Seschrock corrupted++; 23984451Seschrock } 23991775Sbillm 24004451Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 24014451Seschrock 24024451Seschrock /* 24037754SJeff.Bonwick@Sun.COM * Root special: if there is a top-level vdev that cannot be 24044451Seschrock * opened due to corrupted metadata, then propagate the root 24054451Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 24064451Seschrock * replicas'. 24074451Seschrock */ 24084451Seschrock if (corrupted && vd == rvd && 24094451Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 24104451Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 24114451Seschrock VDEV_AUX_CORRUPT_DATA); 24121775Sbillm } 24131775Sbillm 24146976Seschrock if (vd->vdev_parent) 24154451Seschrock vdev_propagate_state(vd->vdev_parent); 24161775Sbillm } 24171775Sbillm 2418789Sahrens /* 24191544Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 24201544Seschrock * state, because we're in the process of opening children depth-first. 24211544Seschrock * Otherwise, we propagate the change to the parent. 24221544Seschrock * 24231544Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 24241544Seschrock * generated. 2425789Sahrens */ 2426789Sahrens void 24271544Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2428789Sahrens { 24291986Seschrock uint64_t save_state; 24306643Seschrock spa_t *spa = vd->vdev_spa; 24311544Seschrock 24321544Seschrock if (state == vd->vdev_state) { 24331544Seschrock vd->vdev_stat.vs_aux = aux; 2434789Sahrens return; 24351544Seschrock } 24361544Seschrock 24371986Seschrock save_state = vd->vdev_state; 2438789Sahrens 2439789Sahrens vd->vdev_state = state; 2440789Sahrens vd->vdev_stat.vs_aux = aux; 2441789Sahrens 24424451Seschrock /* 24434451Seschrock * If we are setting the vdev state to anything but an open state, then 24444451Seschrock * always close the underlying device. Otherwise, we keep accessible 24454451Seschrock * but invalid devices open forever. We don't call vdev_close() itself, 24464451Seschrock * because that implies some extra checks (offline, etc) that we don't 24474451Seschrock * want here. This is limited to leaf devices, because otherwise 24484451Seschrock * closing the device will affect other children. 24494451Seschrock */ 24507780SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 24514451Seschrock vd->vdev_ops->vdev_op_close(vd); 24524451Seschrock 24534451Seschrock if (vd->vdev_removed && 24544451Seschrock state == VDEV_STATE_CANT_OPEN && 24554451Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 24564451Seschrock /* 24574451Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 24584451Seschrock * device was previously marked removed and someone attempted to 24594451Seschrock * reopen it. If this failed due to a nonexistent device, then 24604451Seschrock * keep the device in the REMOVED state. We also let this be if 24614451Seschrock * it is one of our special test online cases, which is only 24624451Seschrock * attempting to online the device and shouldn't generate an FMA 24634451Seschrock * fault. 24644451Seschrock */ 24654451Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 24664451Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 24674451Seschrock } else if (state == VDEV_STATE_REMOVED) { 24684451Seschrock /* 24694451Seschrock * Indicate to the ZFS DE that this device has been removed, and 24704451Seschrock * any recent errors should be ignored. 24714451Seschrock */ 24726643Seschrock zfs_post_remove(spa, vd); 24734451Seschrock vd->vdev_removed = B_TRUE; 24744451Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 24751544Seschrock /* 24761544Seschrock * If we fail to open a vdev during an import, we mark it as 24771544Seschrock * "not available", which signifies that it was never there to 24781544Seschrock * begin with. Failure to open such a device is not considered 24791544Seschrock * an error. 24801544Seschrock */ 24816643Seschrock if (spa->spa_load_state == SPA_LOAD_IMPORT && 24826643Seschrock !spa->spa_import_faulted && 24831986Seschrock vd->vdev_ops->vdev_op_leaf) 24841986Seschrock vd->vdev_not_present = 1; 24851986Seschrock 24861986Seschrock /* 24871986Seschrock * Post the appropriate ereport. If the 'prevstate' field is 24881986Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 24891986Seschrock * that this is part of a vdev_reopen(). In this case, we don't 24901986Seschrock * want to post the ereport if the device was already in the 24911986Seschrock * CANT_OPEN state beforehand. 24924451Seschrock * 24934451Seschrock * If the 'checkremove' flag is set, then this is an attempt to 24944451Seschrock * online the device in response to an insertion event. If we 24954451Seschrock * hit this case, then we have detected an insertion event for a 24964451Seschrock * faulted or offline device that wasn't in the removed state. 24974451Seschrock * In this scenario, we don't post an ereport because we are 24984451Seschrock * about to replace the device, or attempt an online with 24994451Seschrock * vdev_forcefault, which will generate the fault for us. 25001986Seschrock */ 25014451Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 25024451Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 25036643Seschrock vd != spa->spa_root_vdev) { 25041544Seschrock const char *class; 25051544Seschrock 25061544Seschrock switch (aux) { 25071544Seschrock case VDEV_AUX_OPEN_FAILED: 25081544Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 25091544Seschrock break; 25101544Seschrock case VDEV_AUX_CORRUPT_DATA: 25111544Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 25121544Seschrock break; 25131544Seschrock case VDEV_AUX_NO_REPLICAS: 25141544Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 25151544Seschrock break; 25161544Seschrock case VDEV_AUX_BAD_GUID_SUM: 25171544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 25181544Seschrock break; 25191544Seschrock case VDEV_AUX_TOO_SMALL: 25201544Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 25211544Seschrock break; 25221544Seschrock case VDEV_AUX_BAD_LABEL: 25231544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 25241544Seschrock break; 25257754SJeff.Bonwick@Sun.COM case VDEV_AUX_IO_FAILURE: 25267754SJeff.Bonwick@Sun.COM class = FM_EREPORT_ZFS_IO_FAILURE; 25277754SJeff.Bonwick@Sun.COM break; 25281544Seschrock default: 25291544Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 25301544Seschrock } 25311544Seschrock 25326643Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 25331544Seschrock } 25344451Seschrock 25354451Seschrock /* Erase any notion of persistent removed state */ 25364451Seschrock vd->vdev_removed = B_FALSE; 25374451Seschrock } else { 25384451Seschrock vd->vdev_removed = B_FALSE; 25391544Seschrock } 25401544Seschrock 25414451Seschrock if (!isopen) 25424451Seschrock vdev_propagate_state(vd); 2543789Sahrens } 25447042Sgw25295 25457042Sgw25295 /* 25467042Sgw25295 * Check the vdev configuration to ensure that it's capable of supporting 25477042Sgw25295 * a root pool. Currently, we do not support RAID-Z or partial configuration. 25487042Sgw25295 * In addition, only a single top-level vdev is allowed and none of the leaves 25497042Sgw25295 * can be wholedisks. 25507042Sgw25295 */ 25517042Sgw25295 boolean_t 25527042Sgw25295 vdev_is_bootable(vdev_t *vd) 25537042Sgw25295 { 25547042Sgw25295 int c; 25557042Sgw25295 25567042Sgw25295 if (!vd->vdev_ops->vdev_op_leaf) { 25577042Sgw25295 char *vdev_type = vd->vdev_ops->vdev_op_type; 25587042Sgw25295 25597042Sgw25295 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 25607042Sgw25295 vd->vdev_children > 1) { 25617042Sgw25295 return (B_FALSE); 25627042Sgw25295 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 25637042Sgw25295 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 25647042Sgw25295 return (B_FALSE); 25657042Sgw25295 } 25667042Sgw25295 } else if (vd->vdev_wholedisk == 1) { 25677042Sgw25295 return (B_FALSE); 25687042Sgw25295 } 25697042Sgw25295 25707042Sgw25295 for (c = 0; c < vd->vdev_children; c++) { 25717042Sgw25295 if (!vdev_is_bootable(vd->vdev_child[c])) 25727042Sgw25295 return (B_FALSE); 25737042Sgw25295 } 25747042Sgw25295 return (B_TRUE); 25757042Sgw25295 } 2576