1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51485Slling * Common Development and Distribution License (the "License"). 61485Slling * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 238632SBill.Moore@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #include <sys/zfs_context.h> 281544Seschrock #include <sys/fm/fs/zfs.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/spa_impl.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/dmu_tx.h> 33789Sahrens #include <sys/vdev_impl.h> 34789Sahrens #include <sys/uberblock_impl.h> 35789Sahrens #include <sys/metaslab.h> 36789Sahrens #include <sys/metaslab_impl.h> 37789Sahrens #include <sys/space_map.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zap.h> 40789Sahrens #include <sys/fs/zfs.h> 416643Seschrock #include <sys/arc.h> 42*9701SGeorge.Wilson@Sun.COM #include <sys/zil.h> 43789Sahrens 44789Sahrens /* 45789Sahrens * Virtual device management. 46789Sahrens */ 47789Sahrens 48789Sahrens static vdev_ops_t *vdev_ops_table[] = { 49789Sahrens &vdev_root_ops, 50789Sahrens &vdev_raidz_ops, 51789Sahrens &vdev_mirror_ops, 52789Sahrens &vdev_replacing_ops, 532082Seschrock &vdev_spare_ops, 54789Sahrens &vdev_disk_ops, 55789Sahrens &vdev_file_ops, 56789Sahrens &vdev_missing_ops, 57789Sahrens NULL 58789Sahrens }; 59789Sahrens 607046Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 617046Sahrens int zfs_scrub_limit = 10; 623697Smishra 63789Sahrens /* 64789Sahrens * Given a vdev type, return the appropriate ops vector. 65789Sahrens */ 66789Sahrens static vdev_ops_t * 67789Sahrens vdev_getops(const char *type) 68789Sahrens { 69789Sahrens vdev_ops_t *ops, **opspp; 70789Sahrens 71789Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 72789Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 73789Sahrens break; 74789Sahrens 75789Sahrens return (ops); 76789Sahrens } 77789Sahrens 78789Sahrens /* 79789Sahrens * Default asize function: return the MAX of psize with the asize of 80789Sahrens * all children. This is what's used by anything other than RAID-Z. 81789Sahrens */ 82789Sahrens uint64_t 83789Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 84789Sahrens { 851732Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 86789Sahrens uint64_t csize; 87789Sahrens uint64_t c; 88789Sahrens 89789Sahrens for (c = 0; c < vd->vdev_children; c++) { 90789Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 91789Sahrens asize = MAX(asize, csize); 92789Sahrens } 93789Sahrens 94789Sahrens return (asize); 95789Sahrens } 96789Sahrens 971175Slling /* 981175Slling * Get the replaceable or attachable device size. 991175Slling * If the parent is a mirror or raidz, the replaceable size is the minimum 1001175Slling * psize of all its children. For the rest, just return our own psize. 1011175Slling * 1021175Slling * e.g. 1031175Slling * psize rsize 1041175Slling * root - - 1051175Slling * mirror/raidz - - 1061175Slling * disk1 20g 20g 1071175Slling * disk2 40g 20g 1081175Slling * disk3 80g 80g 1091175Slling */ 1101175Slling uint64_t 1111175Slling vdev_get_rsize(vdev_t *vd) 1121175Slling { 1131175Slling vdev_t *pvd, *cvd; 1141175Slling uint64_t c, rsize; 1151175Slling 1161175Slling pvd = vd->vdev_parent; 1171175Slling 1181175Slling /* 1191175Slling * If our parent is NULL or the root, just return our own psize. 1201175Slling */ 1211175Slling if (pvd == NULL || pvd->vdev_parent == NULL) 1221175Slling return (vd->vdev_psize); 1231175Slling 1241175Slling rsize = 0; 1251175Slling 1261175Slling for (c = 0; c < pvd->vdev_children; c++) { 1271175Slling cvd = pvd->vdev_child[c]; 1281175Slling rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1; 1291175Slling } 1301175Slling 1311175Slling return (rsize); 1321175Slling } 1331175Slling 134789Sahrens vdev_t * 135789Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 136789Sahrens { 137789Sahrens vdev_t *rvd = spa->spa_root_vdev; 138789Sahrens 1397754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1405530Sbonwick 1417046Sahrens if (vdev < rvd->vdev_children) { 1427046Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 143789Sahrens return (rvd->vdev_child[vdev]); 1447046Sahrens } 145789Sahrens 146789Sahrens return (NULL); 147789Sahrens } 148789Sahrens 149789Sahrens vdev_t * 150789Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 151789Sahrens { 152789Sahrens int c; 153789Sahrens vdev_t *mvd; 154789Sahrens 1551585Sbonwick if (vd->vdev_guid == guid) 156789Sahrens return (vd); 157789Sahrens 158789Sahrens for (c = 0; c < vd->vdev_children; c++) 159789Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 160789Sahrens NULL) 161789Sahrens return (mvd); 162789Sahrens 163789Sahrens return (NULL); 164789Sahrens } 165789Sahrens 166789Sahrens void 167789Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 168789Sahrens { 169789Sahrens size_t oldsize, newsize; 170789Sahrens uint64_t id = cvd->vdev_id; 171789Sahrens vdev_t **newchild; 172789Sahrens 1737754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 174789Sahrens ASSERT(cvd->vdev_parent == NULL); 175789Sahrens 176789Sahrens cvd->vdev_parent = pvd; 177789Sahrens 178789Sahrens if (pvd == NULL) 179789Sahrens return; 180789Sahrens 181789Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 182789Sahrens 183789Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 184789Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 185789Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 186789Sahrens 187789Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 188789Sahrens if (pvd->vdev_child != NULL) { 189789Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 190789Sahrens kmem_free(pvd->vdev_child, oldsize); 191789Sahrens } 192789Sahrens 193789Sahrens pvd->vdev_child = newchild; 194789Sahrens pvd->vdev_child[id] = cvd; 195789Sahrens 196789Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 197789Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 198789Sahrens 199789Sahrens /* 200789Sahrens * Walk up all ancestors to update guid sum. 201789Sahrens */ 202789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 203789Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 2043697Smishra 2053697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2063697Smishra cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 207789Sahrens } 208789Sahrens 209789Sahrens void 210789Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 211789Sahrens { 212789Sahrens int c; 213789Sahrens uint_t id = cvd->vdev_id; 214789Sahrens 215789Sahrens ASSERT(cvd->vdev_parent == pvd); 216789Sahrens 217789Sahrens if (pvd == NULL) 218789Sahrens return; 219789Sahrens 220789Sahrens ASSERT(id < pvd->vdev_children); 221789Sahrens ASSERT(pvd->vdev_child[id] == cvd); 222789Sahrens 223789Sahrens pvd->vdev_child[id] = NULL; 224789Sahrens cvd->vdev_parent = NULL; 225789Sahrens 226789Sahrens for (c = 0; c < pvd->vdev_children; c++) 227789Sahrens if (pvd->vdev_child[c]) 228789Sahrens break; 229789Sahrens 230789Sahrens if (c == pvd->vdev_children) { 231789Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 232789Sahrens pvd->vdev_child = NULL; 233789Sahrens pvd->vdev_children = 0; 234789Sahrens } 235789Sahrens 236789Sahrens /* 237789Sahrens * Walk up all ancestors to update guid sum. 238789Sahrens */ 239789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 240789Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 2413697Smishra 2423697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2433697Smishra cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 244789Sahrens } 245789Sahrens 246789Sahrens /* 247789Sahrens * Remove any holes in the child array. 248789Sahrens */ 249789Sahrens void 250789Sahrens vdev_compact_children(vdev_t *pvd) 251789Sahrens { 252789Sahrens vdev_t **newchild, *cvd; 253789Sahrens int oldc = pvd->vdev_children; 254789Sahrens int newc, c; 255789Sahrens 2567754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 257789Sahrens 258789Sahrens for (c = newc = 0; c < oldc; c++) 259789Sahrens if (pvd->vdev_child[c]) 260789Sahrens newc++; 261789Sahrens 262789Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 263789Sahrens 264789Sahrens for (c = newc = 0; c < oldc; c++) { 265789Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 266789Sahrens newchild[newc] = cvd; 267789Sahrens cvd->vdev_id = newc++; 268789Sahrens } 269789Sahrens } 270789Sahrens 271789Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 272789Sahrens pvd->vdev_child = newchild; 273789Sahrens pvd->vdev_children = newc; 274789Sahrens } 275789Sahrens 276789Sahrens /* 277789Sahrens * Allocate and minimally initialize a vdev_t. 278789Sahrens */ 279789Sahrens static vdev_t * 280789Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 281789Sahrens { 282789Sahrens vdev_t *vd; 283789Sahrens 2841585Sbonwick vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 2851585Sbonwick 2861585Sbonwick if (spa->spa_root_vdev == NULL) { 2871585Sbonwick ASSERT(ops == &vdev_root_ops); 2881585Sbonwick spa->spa_root_vdev = vd; 2891585Sbonwick } 290789Sahrens 2911585Sbonwick if (guid == 0) { 2921585Sbonwick if (spa->spa_root_vdev == vd) { 2931585Sbonwick /* 2941585Sbonwick * The root vdev's guid will also be the pool guid, 2951585Sbonwick * which must be unique among all pools. 2961585Sbonwick */ 2971585Sbonwick while (guid == 0 || spa_guid_exists(guid, 0)) 2981585Sbonwick guid = spa_get_random(-1ULL); 2991585Sbonwick } else { 3001585Sbonwick /* 3011585Sbonwick * Any other vdev's guid must be unique within the pool. 3021585Sbonwick */ 3031585Sbonwick while (guid == 0 || 3041585Sbonwick spa_guid_exists(spa_guid(spa), guid)) 3051585Sbonwick guid = spa_get_random(-1ULL); 3061585Sbonwick } 3071585Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3081585Sbonwick } 309789Sahrens 310789Sahrens vd->vdev_spa = spa; 311789Sahrens vd->vdev_id = id; 312789Sahrens vd->vdev_guid = guid; 313789Sahrens vd->vdev_guid_sum = guid; 314789Sahrens vd->vdev_ops = ops; 315789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 316789Sahrens 317789Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3182856Snd150628 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 3197754SJeff.Bonwick@Sun.COM mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 3208241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 3218241SJeff.Bonwick@Sun.COM space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 3228241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_lock); 3238241SJeff.Bonwick@Sun.COM } 324789Sahrens txg_list_create(&vd->vdev_ms_list, 325789Sahrens offsetof(struct metaslab, ms_txg_node)); 326789Sahrens txg_list_create(&vd->vdev_dtl_list, 327789Sahrens offsetof(struct vdev, vdev_dtl_node)); 328789Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3294451Seschrock vdev_queue_init(vd); 3304451Seschrock vdev_cache_init(vd); 331789Sahrens 332789Sahrens return (vd); 333789Sahrens } 334789Sahrens 335789Sahrens /* 336789Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 337789Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 338789Sahrens * different for each case. 339789Sahrens */ 3402082Seschrock int 3412082Seschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 3422082Seschrock int alloctype) 343789Sahrens { 344789Sahrens vdev_ops_t *ops; 345789Sahrens char *type; 3464527Sperrin uint64_t guid = 0, islog, nparity; 347789Sahrens vdev_t *vd; 348789Sahrens 3497754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 350789Sahrens 351789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 3522082Seschrock return (EINVAL); 353789Sahrens 354789Sahrens if ((ops = vdev_getops(type)) == NULL) 3552082Seschrock return (EINVAL); 356789Sahrens 357789Sahrens /* 358789Sahrens * If this is a load, get the vdev guid from the nvlist. 359789Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 360789Sahrens */ 361789Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 362789Sahrens uint64_t label_id; 363789Sahrens 364789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 365789Sahrens label_id != id) 3662082Seschrock return (EINVAL); 367789Sahrens 368789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3692082Seschrock return (EINVAL); 3702082Seschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 3712082Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3722082Seschrock return (EINVAL); 3735450Sbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 3745450Sbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3755450Sbrendan return (EINVAL); 376789Sahrens } 377789Sahrens 3782082Seschrock /* 3792082Seschrock * The first allocated vdev must be of type 'root'. 3802082Seschrock */ 3812082Seschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 3822082Seschrock return (EINVAL); 3832082Seschrock 3844527Sperrin /* 3854527Sperrin * Determine whether we're a log vdev. 3864527Sperrin */ 3874527Sperrin islog = 0; 3884527Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 3895094Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 3904527Sperrin return (ENOTSUP); 3914527Sperrin 3924527Sperrin /* 3934527Sperrin * Set the nparity property for RAID-Z vdevs. 3944527Sperrin */ 3954527Sperrin nparity = -1ULL; 3964527Sperrin if (ops == &vdev_raidz_ops) { 3974527Sperrin if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3984527Sperrin &nparity) == 0) { 3994527Sperrin /* 4004527Sperrin * Currently, we can only support 2 parity devices. 4014527Sperrin */ 4024527Sperrin if (nparity == 0 || nparity > 2) 4034527Sperrin return (EINVAL); 4044527Sperrin /* 4054527Sperrin * Older versions can only support 1 parity device. 4064527Sperrin */ 4074527Sperrin if (nparity == 2 && 4084577Sahrens spa_version(spa) < SPA_VERSION_RAID6) 4094527Sperrin return (ENOTSUP); 4104527Sperrin } else { 4114527Sperrin /* 4124527Sperrin * We require the parity to be specified for SPAs that 4134527Sperrin * support multiple parity levels. 4144527Sperrin */ 4154577Sahrens if (spa_version(spa) >= SPA_VERSION_RAID6) 4164527Sperrin return (EINVAL); 4174527Sperrin /* 4184527Sperrin * Otherwise, we default to 1 parity device for RAID-Z. 4194527Sperrin */ 4204527Sperrin nparity = 1; 4214527Sperrin } 4224527Sperrin } else { 4234527Sperrin nparity = 0; 4244527Sperrin } 4254527Sperrin ASSERT(nparity != -1ULL); 4264527Sperrin 427789Sahrens vd = vdev_alloc_common(spa, id, guid, ops); 428789Sahrens 4294527Sperrin vd->vdev_islog = islog; 4304527Sperrin vd->vdev_nparity = nparity; 4314527Sperrin 432789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 433789Sahrens vd->vdev_path = spa_strdup(vd->vdev_path); 434789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 435789Sahrens vd->vdev_devid = spa_strdup(vd->vdev_devid); 4364451Seschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4374451Seschrock &vd->vdev_physpath) == 0) 4384451Seschrock vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 4399425SEric.Schrock@Sun.COM if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 4409425SEric.Schrock@Sun.COM vd->vdev_fru = spa_strdup(vd->vdev_fru); 441789Sahrens 442789Sahrens /* 4431171Seschrock * Set the whole_disk property. If it's not specified, leave the value 4441171Seschrock * as -1. 4451171Seschrock */ 4461171Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 4471171Seschrock &vd->vdev_wholedisk) != 0) 4481171Seschrock vd->vdev_wholedisk = -1ULL; 4491171Seschrock 4501171Seschrock /* 4511544Seschrock * Look for the 'not present' flag. This will only be set if the device 4521544Seschrock * was not present at the time of import. 4531544Seschrock */ 4549425SEric.Schrock@Sun.COM (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4559425SEric.Schrock@Sun.COM &vd->vdev_not_present); 4561544Seschrock 4571544Seschrock /* 4581732Sbonwick * Get the alignment requirement. 4591732Sbonwick */ 4601732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 4611732Sbonwick 4621732Sbonwick /* 463789Sahrens * If we're a top-level vdev, try to load the allocation parameters. 464789Sahrens */ 465789Sahrens if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 466789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 467789Sahrens &vd->vdev_ms_array); 468789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 469789Sahrens &vd->vdev_ms_shift); 470789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 471789Sahrens &vd->vdev_asize); 472789Sahrens } 473789Sahrens 474789Sahrens /* 4754451Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 476789Sahrens */ 4776643Seschrock if (vd->vdev_ops->vdev_op_leaf && 4786643Seschrock (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE)) { 4796643Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 4806643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 4818241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_smo.smo_object); 4826643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 4836643Seschrock &vd->vdev_unspare); 4846643Seschrock } 4851732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 4861732Sbonwick &vd->vdev_offline); 4876643Seschrock 4884451Seschrock /* 4894451Seschrock * When importing a pool, we want to ignore the persistent fault 4904451Seschrock * state, as the diagnosis made on another system may not be 4914451Seschrock * valid in the current context. 4924451Seschrock */ 4934451Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN) { 4944451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 4954451Seschrock &vd->vdev_faulted); 4964451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 4974451Seschrock &vd->vdev_degraded); 4984451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 4994451Seschrock &vd->vdev_removed); 5004451Seschrock } 501789Sahrens } 502789Sahrens 503789Sahrens /* 504789Sahrens * Add ourselves to the parent's list of children. 505789Sahrens */ 506789Sahrens vdev_add_child(parent, vd); 507789Sahrens 5082082Seschrock *vdp = vd; 5092082Seschrock 5102082Seschrock return (0); 511789Sahrens } 512789Sahrens 513789Sahrens void 514789Sahrens vdev_free(vdev_t *vd) 515789Sahrens { 516789Sahrens int c; 5174451Seschrock spa_t *spa = vd->vdev_spa; 518789Sahrens 519789Sahrens /* 520789Sahrens * vdev_free() implies closing the vdev first. This is simpler than 521789Sahrens * trying to ensure complicated semantics for all callers. 522789Sahrens */ 523789Sahrens vdev_close(vd); 524789Sahrens 5257754SJeff.Bonwick@Sun.COM ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 526789Sahrens 527789Sahrens /* 528789Sahrens * Free all children. 529789Sahrens */ 530789Sahrens for (c = 0; c < vd->vdev_children; c++) 531789Sahrens vdev_free(vd->vdev_child[c]); 532789Sahrens 533789Sahrens ASSERT(vd->vdev_child == NULL); 534789Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 535789Sahrens 536789Sahrens /* 537789Sahrens * Discard allocation state. 538789Sahrens */ 539789Sahrens if (vd == vd->vdev_top) 540789Sahrens vdev_metaslab_fini(vd); 541789Sahrens 542789Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 5432082Seschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 544789Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 545789Sahrens 546789Sahrens /* 547789Sahrens * Remove this vdev from its parent's child list. 548789Sahrens */ 549789Sahrens vdev_remove_child(vd->vdev_parent, vd); 550789Sahrens 551789Sahrens ASSERT(vd->vdev_parent == NULL); 552789Sahrens 5534451Seschrock /* 5544451Seschrock * Clean up vdev structure. 5554451Seschrock */ 5564451Seschrock vdev_queue_fini(vd); 5574451Seschrock vdev_cache_fini(vd); 5584451Seschrock 5594451Seschrock if (vd->vdev_path) 5604451Seschrock spa_strfree(vd->vdev_path); 5614451Seschrock if (vd->vdev_devid) 5624451Seschrock spa_strfree(vd->vdev_devid); 5634451Seschrock if (vd->vdev_physpath) 5644451Seschrock spa_strfree(vd->vdev_physpath); 5659425SEric.Schrock@Sun.COM if (vd->vdev_fru) 5669425SEric.Schrock@Sun.COM spa_strfree(vd->vdev_fru); 5674451Seschrock 5684451Seschrock if (vd->vdev_isspare) 5694451Seschrock spa_spare_remove(vd); 5705450Sbrendan if (vd->vdev_isl2cache) 5715450Sbrendan spa_l2cache_remove(vd); 5724451Seschrock 5734451Seschrock txg_list_destroy(&vd->vdev_ms_list); 5744451Seschrock txg_list_destroy(&vd->vdev_dtl_list); 5758241SJeff.Bonwick@Sun.COM 5764451Seschrock mutex_enter(&vd->vdev_dtl_lock); 5778241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 5788241SJeff.Bonwick@Sun.COM space_map_unload(&vd->vdev_dtl[t]); 5798241SJeff.Bonwick@Sun.COM space_map_destroy(&vd->vdev_dtl[t]); 5808241SJeff.Bonwick@Sun.COM } 5814451Seschrock mutex_exit(&vd->vdev_dtl_lock); 5828241SJeff.Bonwick@Sun.COM 5834451Seschrock mutex_destroy(&vd->vdev_dtl_lock); 5844451Seschrock mutex_destroy(&vd->vdev_stat_lock); 5857754SJeff.Bonwick@Sun.COM mutex_destroy(&vd->vdev_probe_lock); 5864451Seschrock 5874451Seschrock if (vd == spa->spa_root_vdev) 5884451Seschrock spa->spa_root_vdev = NULL; 5894451Seschrock 5904451Seschrock kmem_free(vd, sizeof (vdev_t)); 591789Sahrens } 592789Sahrens 593789Sahrens /* 594789Sahrens * Transfer top-level vdev state from svd to tvd. 595789Sahrens */ 596789Sahrens static void 597789Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 598789Sahrens { 599789Sahrens spa_t *spa = svd->vdev_spa; 600789Sahrens metaslab_t *msp; 601789Sahrens vdev_t *vd; 602789Sahrens int t; 603789Sahrens 604789Sahrens ASSERT(tvd == tvd->vdev_top); 605789Sahrens 606789Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 607789Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 608789Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 609789Sahrens 610789Sahrens svd->vdev_ms_array = 0; 611789Sahrens svd->vdev_ms_shift = 0; 612789Sahrens svd->vdev_ms_count = 0; 613789Sahrens 614789Sahrens tvd->vdev_mg = svd->vdev_mg; 615789Sahrens tvd->vdev_ms = svd->vdev_ms; 616789Sahrens 617789Sahrens svd->vdev_mg = NULL; 618789Sahrens svd->vdev_ms = NULL; 6191732Sbonwick 6201732Sbonwick if (tvd->vdev_mg != NULL) 6211732Sbonwick tvd->vdev_mg->mg_vd = tvd; 622789Sahrens 623789Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 624789Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 6252082Seschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 626789Sahrens 627789Sahrens svd->vdev_stat.vs_alloc = 0; 628789Sahrens svd->vdev_stat.vs_space = 0; 6292082Seschrock svd->vdev_stat.vs_dspace = 0; 630789Sahrens 631789Sahrens for (t = 0; t < TXG_SIZE; t++) { 632789Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 633789Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 634789Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 635789Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 636789Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 637789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 638789Sahrens } 639789Sahrens 6407754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_config_dirty_node)) { 641789Sahrens vdev_config_clean(svd); 642789Sahrens vdev_config_dirty(tvd); 643789Sahrens } 644789Sahrens 6457754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_state_dirty_node)) { 6467754SJeff.Bonwick@Sun.COM vdev_state_clean(svd); 6477754SJeff.Bonwick@Sun.COM vdev_state_dirty(tvd); 6487754SJeff.Bonwick@Sun.COM } 6497754SJeff.Bonwick@Sun.COM 6502082Seschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 6512082Seschrock svd->vdev_deflate_ratio = 0; 6524527Sperrin 6534527Sperrin tvd->vdev_islog = svd->vdev_islog; 6544527Sperrin svd->vdev_islog = 0; 655789Sahrens } 656789Sahrens 657789Sahrens static void 658789Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 659789Sahrens { 660789Sahrens int c; 661789Sahrens 662789Sahrens if (vd == NULL) 663789Sahrens return; 664789Sahrens 665789Sahrens vd->vdev_top = tvd; 666789Sahrens 667789Sahrens for (c = 0; c < vd->vdev_children; c++) 668789Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 669789Sahrens } 670789Sahrens 671789Sahrens /* 672789Sahrens * Add a mirror/replacing vdev above an existing vdev. 673789Sahrens */ 674789Sahrens vdev_t * 675789Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 676789Sahrens { 677789Sahrens spa_t *spa = cvd->vdev_spa; 678789Sahrens vdev_t *pvd = cvd->vdev_parent; 679789Sahrens vdev_t *mvd; 680789Sahrens 6817754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 682789Sahrens 683789Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 6841732Sbonwick 6851732Sbonwick mvd->vdev_asize = cvd->vdev_asize; 6861732Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 6871732Sbonwick mvd->vdev_state = cvd->vdev_state; 6881732Sbonwick 689789Sahrens vdev_remove_child(pvd, cvd); 690789Sahrens vdev_add_child(pvd, mvd); 691789Sahrens cvd->vdev_id = mvd->vdev_children; 692789Sahrens vdev_add_child(mvd, cvd); 693789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 694789Sahrens 695789Sahrens if (mvd == mvd->vdev_top) 696789Sahrens vdev_top_transfer(cvd, mvd); 697789Sahrens 698789Sahrens return (mvd); 699789Sahrens } 700789Sahrens 701789Sahrens /* 702789Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 703789Sahrens */ 704789Sahrens void 705789Sahrens vdev_remove_parent(vdev_t *cvd) 706789Sahrens { 707789Sahrens vdev_t *mvd = cvd->vdev_parent; 708789Sahrens vdev_t *pvd = mvd->vdev_parent; 709789Sahrens 7107754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 711789Sahrens 712789Sahrens ASSERT(mvd->vdev_children == 1); 713789Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 7142082Seschrock mvd->vdev_ops == &vdev_replacing_ops || 7152082Seschrock mvd->vdev_ops == &vdev_spare_ops); 7161732Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 717789Sahrens 718789Sahrens vdev_remove_child(mvd, cvd); 719789Sahrens vdev_remove_child(pvd, mvd); 7208241SJeff.Bonwick@Sun.COM 7217754SJeff.Bonwick@Sun.COM /* 7227754SJeff.Bonwick@Sun.COM * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 7237754SJeff.Bonwick@Sun.COM * Otherwise, we could have detached an offline device, and when we 7247754SJeff.Bonwick@Sun.COM * go to import the pool we'll think we have two top-level vdevs, 7257754SJeff.Bonwick@Sun.COM * instead of a different version of the same top-level vdev. 7267754SJeff.Bonwick@Sun.COM */ 7278241SJeff.Bonwick@Sun.COM if (mvd->vdev_top == mvd) { 7288241SJeff.Bonwick@Sun.COM uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 7298241SJeff.Bonwick@Sun.COM cvd->vdev_guid += guid_delta; 7308241SJeff.Bonwick@Sun.COM cvd->vdev_guid_sum += guid_delta; 7318241SJeff.Bonwick@Sun.COM } 732789Sahrens cvd->vdev_id = mvd->vdev_id; 733789Sahrens vdev_add_child(pvd, cvd); 734789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 735789Sahrens 736789Sahrens if (cvd == cvd->vdev_top) 737789Sahrens vdev_top_transfer(mvd, cvd); 738789Sahrens 739789Sahrens ASSERT(mvd->vdev_children == 0); 740789Sahrens vdev_free(mvd); 741789Sahrens } 742789Sahrens 7431544Seschrock int 744789Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 745789Sahrens { 746789Sahrens spa_t *spa = vd->vdev_spa; 7471732Sbonwick objset_t *mos = spa->spa_meta_objset; 7484527Sperrin metaslab_class_t *mc; 7491732Sbonwick uint64_t m; 750789Sahrens uint64_t oldc = vd->vdev_ms_count; 751789Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 7521732Sbonwick metaslab_t **mspp; 7531732Sbonwick int error; 754789Sahrens 7551585Sbonwick if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 7561585Sbonwick return (0); 7571585Sbonwick 758*9701SGeorge.Wilson@Sun.COM /* 759*9701SGeorge.Wilson@Sun.COM * Compute the raidz-deflation ratio. Note, we hard-code 760*9701SGeorge.Wilson@Sun.COM * in 128k (1 << 17) because it is the current "typical" blocksize. 761*9701SGeorge.Wilson@Sun.COM * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 762*9701SGeorge.Wilson@Sun.COM * or we will inconsistently account for existing bp's. 763*9701SGeorge.Wilson@Sun.COM */ 764*9701SGeorge.Wilson@Sun.COM vd->vdev_deflate_ratio = (1 << 17) / 765*9701SGeorge.Wilson@Sun.COM (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 766*9701SGeorge.Wilson@Sun.COM 767789Sahrens ASSERT(oldc <= newc); 768789Sahrens 7694527Sperrin if (vd->vdev_islog) 7704527Sperrin mc = spa->spa_log_class; 7714527Sperrin else 7724527Sperrin mc = spa->spa_normal_class; 7734527Sperrin 7741732Sbonwick if (vd->vdev_mg == NULL) 7751732Sbonwick vd->vdev_mg = metaslab_group_create(mc, vd); 7761732Sbonwick 7771732Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 7781732Sbonwick 7791732Sbonwick if (oldc != 0) { 7801732Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 7811732Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 7821732Sbonwick } 7831732Sbonwick 7841732Sbonwick vd->vdev_ms = mspp; 785789Sahrens vd->vdev_ms_count = newc; 786789Sahrens 7871732Sbonwick for (m = oldc; m < newc; m++) { 7881732Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 789789Sahrens if (txg == 0) { 7901732Sbonwick uint64_t object = 0; 7911732Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 7929512SNeil.Perrin@Sun.COM m * sizeof (uint64_t), sizeof (uint64_t), &object, 7939512SNeil.Perrin@Sun.COM DMU_READ_PREFETCH); 7941732Sbonwick if (error) 7951732Sbonwick return (error); 7961732Sbonwick if (object != 0) { 7971732Sbonwick dmu_buf_t *db; 7981732Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 7991732Sbonwick if (error) 8001732Sbonwick return (error); 8014944Smaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 8024944Smaybee bcopy(db->db_data, &smo, sizeof (smo)); 8031732Sbonwick ASSERT3U(smo.smo_object, ==, object); 8041544Seschrock dmu_buf_rele(db, FTAG); 805789Sahrens } 806789Sahrens } 8071732Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 8081732Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 809789Sahrens } 810789Sahrens 8111544Seschrock return (0); 812789Sahrens } 813789Sahrens 814789Sahrens void 815789Sahrens vdev_metaslab_fini(vdev_t *vd) 816789Sahrens { 817789Sahrens uint64_t m; 818789Sahrens uint64_t count = vd->vdev_ms_count; 819789Sahrens 820789Sahrens if (vd->vdev_ms != NULL) { 821789Sahrens for (m = 0; m < count; m++) 8221732Sbonwick if (vd->vdev_ms[m] != NULL) 8231732Sbonwick metaslab_fini(vd->vdev_ms[m]); 824789Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 825789Sahrens vd->vdev_ms = NULL; 826789Sahrens } 827789Sahrens } 828789Sahrens 8297754SJeff.Bonwick@Sun.COM typedef struct vdev_probe_stats { 8307754SJeff.Bonwick@Sun.COM boolean_t vps_readable; 8317754SJeff.Bonwick@Sun.COM boolean_t vps_writeable; 8327754SJeff.Bonwick@Sun.COM int vps_flags; 8337754SJeff.Bonwick@Sun.COM } vdev_probe_stats_t; 8347754SJeff.Bonwick@Sun.COM 8357754SJeff.Bonwick@Sun.COM static void 8367754SJeff.Bonwick@Sun.COM vdev_probe_done(zio_t *zio) 8375329Sgw25295 { 8388241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 8398632SBill.Moore@Sun.COM vdev_t *vd = zio->io_vd; 8407754SJeff.Bonwick@Sun.COM vdev_probe_stats_t *vps = zio->io_private; 8418632SBill.Moore@Sun.COM 8428632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio != NULL); 8437754SJeff.Bonwick@Sun.COM 8447754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) { 8457754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8467754SJeff.Bonwick@Sun.COM vps->vps_readable = 1; 8478241SJeff.Bonwick@Sun.COM if (zio->io_error == 0 && spa_writeable(spa)) { 8488632SBill.Moore@Sun.COM zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 8497754SJeff.Bonwick@Sun.COM zio->io_offset, zio->io_size, zio->io_data, 8507754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 8517754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 8527754SJeff.Bonwick@Sun.COM } else { 8537754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8547754SJeff.Bonwick@Sun.COM } 8557754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_WRITE) { 8567754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8577754SJeff.Bonwick@Sun.COM vps->vps_writeable = 1; 8587754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8597754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_NULL) { 8608632SBill.Moore@Sun.COM zio_t *pio; 8617754SJeff.Bonwick@Sun.COM 8627754SJeff.Bonwick@Sun.COM vd->vdev_cant_read |= !vps->vps_readable; 8637754SJeff.Bonwick@Sun.COM vd->vdev_cant_write |= !vps->vps_writeable; 8647754SJeff.Bonwick@Sun.COM 8657754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && 8668241SJeff.Bonwick@Sun.COM (vdev_writeable(vd) || !spa_writeable(spa))) { 8677754SJeff.Bonwick@Sun.COM zio->io_error = 0; 8687754SJeff.Bonwick@Sun.COM } else { 8697754SJeff.Bonwick@Sun.COM ASSERT(zio->io_error != 0); 8707754SJeff.Bonwick@Sun.COM zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 8718241SJeff.Bonwick@Sun.COM spa, vd, NULL, 0, 0); 8727754SJeff.Bonwick@Sun.COM zio->io_error = ENXIO; 8737754SJeff.Bonwick@Sun.COM } 8748632SBill.Moore@Sun.COM 8758632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 8768632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio == zio); 8778632SBill.Moore@Sun.COM vd->vdev_probe_zio = NULL; 8788632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 8798632SBill.Moore@Sun.COM 8808632SBill.Moore@Sun.COM while ((pio = zio_walk_parents(zio)) != NULL) 8818632SBill.Moore@Sun.COM if (!vdev_accessible(vd, pio)) 8828632SBill.Moore@Sun.COM pio->io_error = ENXIO; 8838632SBill.Moore@Sun.COM 8847754SJeff.Bonwick@Sun.COM kmem_free(vps, sizeof (*vps)); 8857754SJeff.Bonwick@Sun.COM } 8867754SJeff.Bonwick@Sun.COM } 8875329Sgw25295 8887754SJeff.Bonwick@Sun.COM /* 8897754SJeff.Bonwick@Sun.COM * Determine whether this device is accessible by reading and writing 8907754SJeff.Bonwick@Sun.COM * to several known locations: the pad regions of each vdev label 8917754SJeff.Bonwick@Sun.COM * but the first (which we leave alone in case it contains a VTOC). 8927754SJeff.Bonwick@Sun.COM */ 8937754SJeff.Bonwick@Sun.COM zio_t * 8948632SBill.Moore@Sun.COM vdev_probe(vdev_t *vd, zio_t *zio) 8957754SJeff.Bonwick@Sun.COM { 8967754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 8978632SBill.Moore@Sun.COM vdev_probe_stats_t *vps = NULL; 8988632SBill.Moore@Sun.COM zio_t *pio; 8997754SJeff.Bonwick@Sun.COM 9007754SJeff.Bonwick@Sun.COM ASSERT(vd->vdev_ops->vdev_op_leaf); 9017754SJeff.Bonwick@Sun.COM 9028632SBill.Moore@Sun.COM /* 9038632SBill.Moore@Sun.COM * Don't probe the probe. 9048632SBill.Moore@Sun.COM */ 9058632SBill.Moore@Sun.COM if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 9068632SBill.Moore@Sun.COM return (NULL); 9078632SBill.Moore@Sun.COM 9088632SBill.Moore@Sun.COM /* 9098632SBill.Moore@Sun.COM * To prevent 'probe storms' when a device fails, we create 9108632SBill.Moore@Sun.COM * just one probe i/o at a time. All zios that want to probe 9118632SBill.Moore@Sun.COM * this vdev will become parents of the probe io. 9128632SBill.Moore@Sun.COM */ 9138632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 9148632SBill.Moore@Sun.COM 9158632SBill.Moore@Sun.COM if ((pio = vd->vdev_probe_zio) == NULL) { 9168632SBill.Moore@Sun.COM vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 9178632SBill.Moore@Sun.COM 9188632SBill.Moore@Sun.COM vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 9198632SBill.Moore@Sun.COM ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 9208632SBill.Moore@Sun.COM ZIO_FLAG_DONT_RETRY; 9218632SBill.Moore@Sun.COM 9228632SBill.Moore@Sun.COM if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 9238632SBill.Moore@Sun.COM /* 9248632SBill.Moore@Sun.COM * vdev_cant_read and vdev_cant_write can only 9258632SBill.Moore@Sun.COM * transition from TRUE to FALSE when we have the 9268632SBill.Moore@Sun.COM * SCL_ZIO lock as writer; otherwise they can only 9278632SBill.Moore@Sun.COM * transition from FALSE to TRUE. This ensures that 9288632SBill.Moore@Sun.COM * any zio looking at these values can assume that 9298632SBill.Moore@Sun.COM * failures persist for the life of the I/O. That's 9308632SBill.Moore@Sun.COM * important because when a device has intermittent 9318632SBill.Moore@Sun.COM * connectivity problems, we want to ensure that 9328632SBill.Moore@Sun.COM * they're ascribed to the device (ENXIO) and not 9338632SBill.Moore@Sun.COM * the zio (EIO). 9348632SBill.Moore@Sun.COM * 9358632SBill.Moore@Sun.COM * Since we hold SCL_ZIO as writer here, clear both 9368632SBill.Moore@Sun.COM * values so the probe can reevaluate from first 9378632SBill.Moore@Sun.COM * principles. 9388632SBill.Moore@Sun.COM */ 9398632SBill.Moore@Sun.COM vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 9408632SBill.Moore@Sun.COM vd->vdev_cant_read = B_FALSE; 9418632SBill.Moore@Sun.COM vd->vdev_cant_write = B_FALSE; 9428632SBill.Moore@Sun.COM } 9438632SBill.Moore@Sun.COM 9448632SBill.Moore@Sun.COM vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 9458632SBill.Moore@Sun.COM vdev_probe_done, vps, 9468632SBill.Moore@Sun.COM vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 9478632SBill.Moore@Sun.COM 9488632SBill.Moore@Sun.COM if (zio != NULL) { 9498632SBill.Moore@Sun.COM vd->vdev_probe_wanted = B_TRUE; 9508632SBill.Moore@Sun.COM spa_async_request(spa, SPA_ASYNC_PROBE); 9518632SBill.Moore@Sun.COM } 9528632SBill.Moore@Sun.COM } 9538632SBill.Moore@Sun.COM 9548632SBill.Moore@Sun.COM if (zio != NULL) 9558632SBill.Moore@Sun.COM zio_add_child(zio, pio); 9568632SBill.Moore@Sun.COM 9578632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 9588632SBill.Moore@Sun.COM 9598632SBill.Moore@Sun.COM if (vps == NULL) { 9608632SBill.Moore@Sun.COM ASSERT(zio != NULL); 9618632SBill.Moore@Sun.COM return (NULL); 9628632SBill.Moore@Sun.COM } 9637754SJeff.Bonwick@Sun.COM 9647754SJeff.Bonwick@Sun.COM for (int l = 1; l < VDEV_LABELS; l++) { 9658632SBill.Moore@Sun.COM zio_nowait(zio_read_phys(pio, vd, 9667754SJeff.Bonwick@Sun.COM vdev_label_offset(vd->vdev_psize, l, 9679056SLin.Ling@Sun.COM offsetof(vdev_label_t, vl_pad2)), 9689056SLin.Ling@Sun.COM VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 9697754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 9707754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 9717754SJeff.Bonwick@Sun.COM } 9727754SJeff.Bonwick@Sun.COM 9738632SBill.Moore@Sun.COM if (zio == NULL) 9748632SBill.Moore@Sun.COM return (pio); 9758632SBill.Moore@Sun.COM 9768632SBill.Moore@Sun.COM zio_nowait(pio); 9778632SBill.Moore@Sun.COM return (NULL); 9785329Sgw25295 } 9795329Sgw25295 980789Sahrens /* 981789Sahrens * Prepare a virtual device for access. 982789Sahrens */ 983789Sahrens int 984789Sahrens vdev_open(vdev_t *vd) 985789Sahrens { 9868241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 987789Sahrens int error; 988789Sahrens int c; 989789Sahrens uint64_t osize = 0; 990789Sahrens uint64_t asize, psize; 9911732Sbonwick uint64_t ashift = 0; 992789Sahrens 9938241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 9948241SJeff.Bonwick@Sun.COM 995789Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 996789Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 997789Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 998789Sahrens 999789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1000*9701SGeorge.Wilson@Sun.COM vd->vdev_cant_read = B_FALSE; 1001*9701SGeorge.Wilson@Sun.COM vd->vdev_cant_write = B_FALSE; 1002789Sahrens 10034451Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 10044451Seschrock ASSERT(vd->vdev_children == 0); 10054451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 10064451Seschrock VDEV_AUX_ERR_EXCEEDED); 10074451Seschrock return (ENXIO); 10084451Seschrock } else if (vd->vdev_offline) { 1009789Sahrens ASSERT(vd->vdev_children == 0); 10101544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1011789Sahrens return (ENXIO); 1012789Sahrens } 1013789Sahrens 1014789Sahrens error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1015789Sahrens 10161544Seschrock if (zio_injection_enabled && error == 0) 10171544Seschrock error = zio_handle_device_injection(vd, ENXIO); 10181544Seschrock 10194451Seschrock if (error) { 10204451Seschrock if (vd->vdev_removed && 10214451Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 10224451Seschrock vd->vdev_removed = B_FALSE; 1023789Sahrens 10241544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1025789Sahrens vd->vdev_stat.vs_aux); 1026789Sahrens return (error); 1027789Sahrens } 1028789Sahrens 10294451Seschrock vd->vdev_removed = B_FALSE; 10304451Seschrock 10314451Seschrock if (vd->vdev_degraded) { 10324451Seschrock ASSERT(vd->vdev_children == 0); 10334451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10344451Seschrock VDEV_AUX_ERR_EXCEEDED); 10354451Seschrock } else { 10364451Seschrock vd->vdev_state = VDEV_STATE_HEALTHY; 10374451Seschrock } 1038789Sahrens 1039789Sahrens for (c = 0; c < vd->vdev_children; c++) 10401544Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 10411544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10421544Seschrock VDEV_AUX_NONE); 10431544Seschrock break; 10441544Seschrock } 1045789Sahrens 1046789Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1047789Sahrens 1048789Sahrens if (vd->vdev_children == 0) { 1049789Sahrens if (osize < SPA_MINDEVSIZE) { 10501544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10511544Seschrock VDEV_AUX_TOO_SMALL); 1052789Sahrens return (EOVERFLOW); 1053789Sahrens } 1054789Sahrens psize = osize; 1055789Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1056789Sahrens } else { 10571732Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1058789Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 10591544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10601544Seschrock VDEV_AUX_TOO_SMALL); 1061789Sahrens return (EOVERFLOW); 1062789Sahrens } 1063789Sahrens psize = 0; 1064789Sahrens asize = osize; 1065789Sahrens } 1066789Sahrens 1067789Sahrens vd->vdev_psize = psize; 1068789Sahrens 1069789Sahrens if (vd->vdev_asize == 0) { 1070789Sahrens /* 1071789Sahrens * This is the first-ever open, so use the computed values. 10721732Sbonwick * For testing purposes, a higher ashift can be requested. 1073789Sahrens */ 1074789Sahrens vd->vdev_asize = asize; 10751732Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1076789Sahrens } else { 1077789Sahrens /* 1078789Sahrens * Make sure the alignment requirement hasn't increased. 1079789Sahrens */ 10801732Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 10811544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10821544Seschrock VDEV_AUX_BAD_LABEL); 1083789Sahrens return (EINVAL); 1084789Sahrens } 1085789Sahrens 1086789Sahrens /* 1087789Sahrens * Make sure the device hasn't shrunk. 1088789Sahrens */ 1089789Sahrens if (asize < vd->vdev_asize) { 10901544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10911544Seschrock VDEV_AUX_BAD_LABEL); 1092789Sahrens return (EINVAL); 1093789Sahrens } 1094789Sahrens 1095789Sahrens /* 1096789Sahrens * If all children are healthy and the asize has increased, 1097789Sahrens * then we've experienced dynamic LUN growth. 1098789Sahrens */ 1099789Sahrens if (vd->vdev_state == VDEV_STATE_HEALTHY && 1100789Sahrens asize > vd->vdev_asize) { 1101789Sahrens vd->vdev_asize = asize; 1102789Sahrens } 1103789Sahrens } 1104789Sahrens 11051544Seschrock /* 11065329Sgw25295 * Ensure we can issue some IO before declaring the 11075329Sgw25295 * vdev open for business. 11085329Sgw25295 */ 11097754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && 11107754SJeff.Bonwick@Sun.COM (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 11115329Sgw25295 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11127754SJeff.Bonwick@Sun.COM VDEV_AUX_IO_FAILURE); 11135329Sgw25295 return (error); 11145329Sgw25295 } 11155329Sgw25295 11165329Sgw25295 /* 11177046Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 11188241SJeff.Bonwick@Sun.COM * resilver. But don't do this if we are doing a reopen for a scrub, 11198241SJeff.Bonwick@Sun.COM * since this would just restart the scrub we are already doing. 11207046Sahrens */ 11218241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 11228241SJeff.Bonwick@Sun.COM vdev_resilver_needed(vd, NULL, NULL)) 11238241SJeff.Bonwick@Sun.COM spa_async_request(spa, SPA_ASYNC_RESILVER); 11247046Sahrens 1125789Sahrens return (0); 1126789Sahrens } 1127789Sahrens 1128789Sahrens /* 11291986Seschrock * Called once the vdevs are all opened, this routine validates the label 11301986Seschrock * contents. This needs to be done before vdev_load() so that we don't 11314451Seschrock * inadvertently do repair I/Os to the wrong device. 11321986Seschrock * 11331986Seschrock * This function will only return failure if one of the vdevs indicates that it 11341986Seschrock * has since been destroyed or exported. This is only possible if 11351986Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 11361986Seschrock * will be updated but the function will return 0. 11371986Seschrock */ 11381986Seschrock int 11391986Seschrock vdev_validate(vdev_t *vd) 11401986Seschrock { 11411986Seschrock spa_t *spa = vd->vdev_spa; 11421986Seschrock int c; 11431986Seschrock nvlist_t *label; 11447754SJeff.Bonwick@Sun.COM uint64_t guid, top_guid; 11451986Seschrock uint64_t state; 11461986Seschrock 11471986Seschrock for (c = 0; c < vd->vdev_children; c++) 11481986Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 11494070Smc142369 return (EBADF); 11501986Seschrock 11512174Seschrock /* 11522174Seschrock * If the device has already failed, or was marked offline, don't do 11532174Seschrock * any further validation. Otherwise, label I/O will fail and we will 11542174Seschrock * overwrite the previous state. 11552174Seschrock */ 11567754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 11571986Seschrock 11581986Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 11591986Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11601986Seschrock VDEV_AUX_BAD_LABEL); 11611986Seschrock return (0); 11621986Seschrock } 11631986Seschrock 11641986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 11651986Seschrock &guid) != 0 || guid != spa_guid(spa)) { 11661986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 11671986Seschrock VDEV_AUX_CORRUPT_DATA); 11681986Seschrock nvlist_free(label); 11691986Seschrock return (0); 11701986Seschrock } 11711986Seschrock 11727754SJeff.Bonwick@Sun.COM /* 11737754SJeff.Bonwick@Sun.COM * If this vdev just became a top-level vdev because its 11747754SJeff.Bonwick@Sun.COM * sibling was detached, it will have adopted the parent's 11757754SJeff.Bonwick@Sun.COM * vdev guid -- but the label may or may not be on disk yet. 11767754SJeff.Bonwick@Sun.COM * Fortunately, either version of the label will have the 11777754SJeff.Bonwick@Sun.COM * same top guid, so if we're a top-level vdev, we can 11787754SJeff.Bonwick@Sun.COM * safely compare to that instead. 11797754SJeff.Bonwick@Sun.COM */ 11801986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 11817754SJeff.Bonwick@Sun.COM &guid) != 0 || 11827754SJeff.Bonwick@Sun.COM nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 11837754SJeff.Bonwick@Sun.COM &top_guid) != 0 || 11847754SJeff.Bonwick@Sun.COM (vd->vdev_guid != guid && 11857754SJeff.Bonwick@Sun.COM (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 11861986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 11871986Seschrock VDEV_AUX_CORRUPT_DATA); 11881986Seschrock nvlist_free(label); 11891986Seschrock return (0); 11901986Seschrock } 11911986Seschrock 11921986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 11931986Seschrock &state) != 0) { 11941986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 11951986Seschrock VDEV_AUX_CORRUPT_DATA); 11961986Seschrock nvlist_free(label); 11971986Seschrock return (0); 11981986Seschrock } 11991986Seschrock 12001986Seschrock nvlist_free(label); 12011986Seschrock 12021986Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN && 12031986Seschrock state != POOL_STATE_ACTIVE) 12044070Smc142369 return (EBADF); 12056976Seschrock 12066976Seschrock /* 12076976Seschrock * If we were able to open and validate a vdev that was 12086976Seschrock * previously marked permanently unavailable, clear that state 12096976Seschrock * now. 12106976Seschrock */ 12116976Seschrock if (vd->vdev_not_present) 12126976Seschrock vd->vdev_not_present = 0; 12131986Seschrock } 12141986Seschrock 12151986Seschrock return (0); 12161986Seschrock } 12171986Seschrock 12181986Seschrock /* 1219789Sahrens * Close a virtual device. 1220789Sahrens */ 1221789Sahrens void 1222789Sahrens vdev_close(vdev_t *vd) 1223789Sahrens { 12248241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 12258241SJeff.Bonwick@Sun.COM 12268241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12278241SJeff.Bonwick@Sun.COM 1228789Sahrens vd->vdev_ops->vdev_op_close(vd); 1229789Sahrens 12304451Seschrock vdev_cache_purge(vd); 1231789Sahrens 12321986Seschrock /* 12331986Seschrock * We record the previous state before we close it, so that if we are 12341986Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 12351986Seschrock * it's still faulted. 12361986Seschrock */ 12371986Seschrock vd->vdev_prevstate = vd->vdev_state; 12381986Seschrock 1239789Sahrens if (vd->vdev_offline) 1240789Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1241789Sahrens else 1242789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 12431544Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1244789Sahrens } 1245789Sahrens 1246789Sahrens void 12471544Seschrock vdev_reopen(vdev_t *vd) 1248789Sahrens { 12491544Seschrock spa_t *spa = vd->vdev_spa; 1250789Sahrens 12517754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12521544Seschrock 1253789Sahrens vdev_close(vd); 1254789Sahrens (void) vdev_open(vd); 1255789Sahrens 1256789Sahrens /* 12573377Seschrock * Call vdev_validate() here to make sure we have the same device. 12583377Seschrock * Otherwise, a device with an invalid label could be successfully 12593377Seschrock * opened in response to vdev_reopen(). 12603377Seschrock */ 12616643Seschrock if (vd->vdev_aux) { 12626643Seschrock (void) vdev_validate_aux(vd); 12637754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && vdev_writeable(vd) && 12649425SEric.Schrock@Sun.COM vd->vdev_aux == &spa->spa_l2cache && 12656643Seschrock !l2arc_vdev_present(vd)) { 12666643Seschrock uint64_t size = vdev_get_rsize(vd); 12676643Seschrock l2arc_add_vdev(spa, vd, 12686643Seschrock VDEV_LABEL_START_SIZE, 12696643Seschrock size - VDEV_LABEL_START_SIZE); 12706643Seschrock } 12716643Seschrock } else { 12726643Seschrock (void) vdev_validate(vd); 12736643Seschrock } 12743377Seschrock 12753377Seschrock /* 12764451Seschrock * Reassess parent vdev's health. 1277789Sahrens */ 12784451Seschrock vdev_propagate_state(vd); 1279789Sahrens } 1280789Sahrens 1281789Sahrens int 12822082Seschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1283789Sahrens { 1284789Sahrens int error; 1285789Sahrens 1286789Sahrens /* 1287789Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1288789Sahrens * For a create, however, we want to fail the request if 1289789Sahrens * there are any components we can't open. 1290789Sahrens */ 1291789Sahrens error = vdev_open(vd); 1292789Sahrens 1293789Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1294789Sahrens vdev_close(vd); 1295789Sahrens return (error ? error : ENXIO); 1296789Sahrens } 1297789Sahrens 1298789Sahrens /* 1299789Sahrens * Recursively initialize all labels. 1300789Sahrens */ 13013377Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 13023377Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1303789Sahrens vdev_close(vd); 1304789Sahrens return (error); 1305789Sahrens } 1306789Sahrens 1307789Sahrens return (0); 1308789Sahrens } 1309789Sahrens 1310789Sahrens /* 1311789Sahrens * The is the latter half of vdev_create(). It is distinct because it 1312789Sahrens * involves initiating transactions in order to do metaslab creation. 1313789Sahrens * For creation, we want to try to create all vdevs at once and then undo it 1314789Sahrens * if anything fails; this is much harder if we have pending transactions. 1315789Sahrens */ 13161585Sbonwick void 1317789Sahrens vdev_init(vdev_t *vd, uint64_t txg) 1318789Sahrens { 1319789Sahrens /* 1320789Sahrens * Aim for roughly 200 metaslabs per vdev. 1321789Sahrens */ 1322789Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1323789Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1324789Sahrens 1325789Sahrens /* 13261585Sbonwick * Initialize the vdev's metaslabs. This can't fail because 13271585Sbonwick * there's nothing to read when creating all new metaslabs. 1328789Sahrens */ 13291585Sbonwick VERIFY(vdev_metaslab_init(vd, txg) == 0); 1330789Sahrens } 1331789Sahrens 1332789Sahrens void 13331732Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1334789Sahrens { 13351732Sbonwick ASSERT(vd == vd->vdev_top); 13361732Sbonwick ASSERT(ISP2(flags)); 1337789Sahrens 13381732Sbonwick if (flags & VDD_METASLAB) 13391732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 13401732Sbonwick 13411732Sbonwick if (flags & VDD_DTL) 13421732Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 13431732Sbonwick 13441732Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1345789Sahrens } 1346789Sahrens 13478241SJeff.Bonwick@Sun.COM /* 13488241SJeff.Bonwick@Sun.COM * DTLs. 13498241SJeff.Bonwick@Sun.COM * 13508241SJeff.Bonwick@Sun.COM * A vdev's DTL (dirty time log) is the set of transaction groups for which 13518241SJeff.Bonwick@Sun.COM * the vdev has less than perfect replication. There are three kinds of DTL: 13528241SJeff.Bonwick@Sun.COM * 13538241SJeff.Bonwick@Sun.COM * DTL_MISSING: txgs for which the vdev has no valid copies of the data 13548241SJeff.Bonwick@Sun.COM * 13558241SJeff.Bonwick@Sun.COM * DTL_PARTIAL: txgs for which data is available, but not fully replicated 13568241SJeff.Bonwick@Sun.COM * 13578241SJeff.Bonwick@Sun.COM * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 13588241SJeff.Bonwick@Sun.COM * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 13598241SJeff.Bonwick@Sun.COM * txgs that was scrubbed. 13608241SJeff.Bonwick@Sun.COM * 13618241SJeff.Bonwick@Sun.COM * DTL_OUTAGE: txgs which cannot currently be read, whether due to 13628241SJeff.Bonwick@Sun.COM * persistent errors or just some device being offline. 13638241SJeff.Bonwick@Sun.COM * Unlike the other three, the DTL_OUTAGE map is not generally 13648241SJeff.Bonwick@Sun.COM * maintained; it's only computed when needed, typically to 13658241SJeff.Bonwick@Sun.COM * determine whether a device can be detached. 13668241SJeff.Bonwick@Sun.COM * 13678241SJeff.Bonwick@Sun.COM * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 13688241SJeff.Bonwick@Sun.COM * either has the data or it doesn't. 13698241SJeff.Bonwick@Sun.COM * 13708241SJeff.Bonwick@Sun.COM * For interior vdevs such as mirror and RAID-Z the picture is more complex. 13718241SJeff.Bonwick@Sun.COM * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 13728241SJeff.Bonwick@Sun.COM * if any child is less than fully replicated, then so is its parent. 13738241SJeff.Bonwick@Sun.COM * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 13748241SJeff.Bonwick@Sun.COM * comprising only those txgs which appear in 'maxfaults' or more children; 13758241SJeff.Bonwick@Sun.COM * those are the txgs we don't have enough replication to read. For example, 13768241SJeff.Bonwick@Sun.COM * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 13778241SJeff.Bonwick@Sun.COM * thus, its DTL_MISSING consists of the set of txgs that appear in more than 13788241SJeff.Bonwick@Sun.COM * two child DTL_MISSING maps. 13798241SJeff.Bonwick@Sun.COM * 13808241SJeff.Bonwick@Sun.COM * It should be clear from the above that to compute the DTLs and outage maps 13818241SJeff.Bonwick@Sun.COM * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 13828241SJeff.Bonwick@Sun.COM * Therefore, that is all we keep on disk. When loading the pool, or after 13838241SJeff.Bonwick@Sun.COM * a configuration change, we generate all other DTLs from first principles. 13848241SJeff.Bonwick@Sun.COM */ 1385789Sahrens void 13868241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1387789Sahrens { 13888241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 13898241SJeff.Bonwick@Sun.COM 13908241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 13918241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 13928241SJeff.Bonwick@Sun.COM 1393789Sahrens mutex_enter(sm->sm_lock); 1394789Sahrens if (!space_map_contains(sm, txg, size)) 1395789Sahrens space_map_add(sm, txg, size); 1396789Sahrens mutex_exit(sm->sm_lock); 1397789Sahrens } 1398789Sahrens 13998241SJeff.Bonwick@Sun.COM boolean_t 14008241SJeff.Bonwick@Sun.COM vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1401789Sahrens { 14028241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14038241SJeff.Bonwick@Sun.COM boolean_t dirty = B_FALSE; 14048241SJeff.Bonwick@Sun.COM 14058241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14068241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1407789Sahrens 1408789Sahrens mutex_enter(sm->sm_lock); 14098241SJeff.Bonwick@Sun.COM if (sm->sm_space != 0) 14108241SJeff.Bonwick@Sun.COM dirty = space_map_contains(sm, txg, size); 1411789Sahrens mutex_exit(sm->sm_lock); 1412789Sahrens 1413789Sahrens return (dirty); 1414789Sahrens } 1415789Sahrens 14168241SJeff.Bonwick@Sun.COM boolean_t 14178241SJeff.Bonwick@Sun.COM vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 14188241SJeff.Bonwick@Sun.COM { 14198241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14208241SJeff.Bonwick@Sun.COM boolean_t empty; 14218241SJeff.Bonwick@Sun.COM 14228241SJeff.Bonwick@Sun.COM mutex_enter(sm->sm_lock); 14238241SJeff.Bonwick@Sun.COM empty = (sm->sm_space == 0); 14248241SJeff.Bonwick@Sun.COM mutex_exit(sm->sm_lock); 14258241SJeff.Bonwick@Sun.COM 14268241SJeff.Bonwick@Sun.COM return (empty); 14278241SJeff.Bonwick@Sun.COM } 14288241SJeff.Bonwick@Sun.COM 1429789Sahrens /* 1430789Sahrens * Reassess DTLs after a config change or scrub completion. 1431789Sahrens */ 1432789Sahrens void 1433789Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1434789Sahrens { 14351544Seschrock spa_t *spa = vd->vdev_spa; 14368241SJeff.Bonwick@Sun.COM avl_tree_t reftree; 14378241SJeff.Bonwick@Sun.COM int minref; 14388241SJeff.Bonwick@Sun.COM 14398241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 14408241SJeff.Bonwick@Sun.COM 14418241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 14428241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(vd->vdev_child[c], txg, 14438241SJeff.Bonwick@Sun.COM scrub_txg, scrub_done); 14448241SJeff.Bonwick@Sun.COM 14458241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev) 14468241SJeff.Bonwick@Sun.COM return; 14478241SJeff.Bonwick@Sun.COM 14488241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 1449789Sahrens mutex_enter(&vd->vdev_dtl_lock); 14507046Sahrens if (scrub_txg != 0 && 14517046Sahrens (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 14527046Sahrens /* XXX should check scrub_done? */ 14537046Sahrens /* 14547046Sahrens * We completed a scrub up to scrub_txg. If we 14557046Sahrens * did it without rebooting, then the scrub dtl 14567046Sahrens * will be valid, so excise the old region and 14577046Sahrens * fold in the scrub dtl. Otherwise, leave the 14587046Sahrens * dtl as-is if there was an error. 14598241SJeff.Bonwick@Sun.COM * 14608241SJeff.Bonwick@Sun.COM * There's little trick here: to excise the beginning 14618241SJeff.Bonwick@Sun.COM * of the DTL_MISSING map, we put it into a reference 14628241SJeff.Bonwick@Sun.COM * tree and then add a segment with refcnt -1 that 14638241SJeff.Bonwick@Sun.COM * covers the range [0, scrub_txg). This means 14648241SJeff.Bonwick@Sun.COM * that each txg in that range has refcnt -1 or 0. 14658241SJeff.Bonwick@Sun.COM * We then add DTL_SCRUB with a refcnt of 2, so that 14668241SJeff.Bonwick@Sun.COM * entries in the range [0, scrub_txg) will have a 14678241SJeff.Bonwick@Sun.COM * positive refcnt -- either 1 or 2. We then convert 14688241SJeff.Bonwick@Sun.COM * the reference tree into the new DTL_MISSING map. 14697046Sahrens */ 14708241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 14718241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 14728241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 14738241SJeff.Bonwick@Sun.COM space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 14748241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 14758241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_SCRUB], 2); 14768241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, 14778241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 14788241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 1479789Sahrens } 14808241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 14818241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 14828241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1483789Sahrens if (scrub_done) 14848241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 14858241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 14868241SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 14878241SJeff.Bonwick@Sun.COM space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 14888241SJeff.Bonwick@Sun.COM else 14898241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 14908241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1491789Sahrens mutex_exit(&vd->vdev_dtl_lock); 14927046Sahrens 14931732Sbonwick if (txg != 0) 14941732Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1495789Sahrens return; 1496789Sahrens } 1497789Sahrens 1498789Sahrens mutex_enter(&vd->vdev_dtl_lock); 14998241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 15008241SJeff.Bonwick@Sun.COM if (t == DTL_SCRUB) 15018241SJeff.Bonwick@Sun.COM continue; /* leaf vdevs only */ 15028241SJeff.Bonwick@Sun.COM if (t == DTL_PARTIAL) 15038241SJeff.Bonwick@Sun.COM minref = 1; /* i.e. non-zero */ 15048241SJeff.Bonwick@Sun.COM else if (vd->vdev_nparity != 0) 15058241SJeff.Bonwick@Sun.COM minref = vd->vdev_nparity + 1; /* RAID-Z */ 15068241SJeff.Bonwick@Sun.COM else 15078241SJeff.Bonwick@Sun.COM minref = vd->vdev_children; /* any kind of mirror */ 15088241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 15098241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 15108241SJeff.Bonwick@Sun.COM vdev_t *cvd = vd->vdev_child[c]; 15118241SJeff.Bonwick@Sun.COM mutex_enter(&cvd->vdev_dtl_lock); 15128241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 15138241SJeff.Bonwick@Sun.COM mutex_exit(&cvd->vdev_dtl_lock); 15148241SJeff.Bonwick@Sun.COM } 15158241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 15168241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 15178241SJeff.Bonwick@Sun.COM } 1518789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1519789Sahrens } 1520789Sahrens 1521789Sahrens static int 1522789Sahrens vdev_dtl_load(vdev_t *vd) 1523789Sahrens { 1524789Sahrens spa_t *spa = vd->vdev_spa; 15258241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15261732Sbonwick objset_t *mos = spa->spa_meta_objset; 1527789Sahrens dmu_buf_t *db; 1528789Sahrens int error; 1529789Sahrens 1530789Sahrens ASSERT(vd->vdev_children == 0); 1531789Sahrens 1532789Sahrens if (smo->smo_object == 0) 1533789Sahrens return (0); 1534789Sahrens 15351732Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 15361544Seschrock return (error); 15371732Sbonwick 15384944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 15394944Smaybee bcopy(db->db_data, smo, sizeof (*smo)); 15401544Seschrock dmu_buf_rele(db, FTAG); 1541789Sahrens 1542789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15438241SJeff.Bonwick@Sun.COM error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 15448241SJeff.Bonwick@Sun.COM NULL, SM_ALLOC, smo, mos); 1545789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1546789Sahrens 1547789Sahrens return (error); 1548789Sahrens } 1549789Sahrens 1550789Sahrens void 1551789Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1552789Sahrens { 1553789Sahrens spa_t *spa = vd->vdev_spa; 15548241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15558241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 15561732Sbonwick objset_t *mos = spa->spa_meta_objset; 1557789Sahrens space_map_t smsync; 1558789Sahrens kmutex_t smlock; 1559789Sahrens dmu_buf_t *db; 1560789Sahrens dmu_tx_t *tx; 1561789Sahrens 1562789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1563789Sahrens 1564789Sahrens if (vd->vdev_detached) { 1565789Sahrens if (smo->smo_object != 0) { 15661732Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1567789Sahrens ASSERT3U(err, ==, 0); 1568789Sahrens smo->smo_object = 0; 1569789Sahrens } 1570789Sahrens dmu_tx_commit(tx); 1571789Sahrens return; 1572789Sahrens } 1573789Sahrens 1574789Sahrens if (smo->smo_object == 0) { 1575789Sahrens ASSERT(smo->smo_objsize == 0); 1576789Sahrens ASSERT(smo->smo_alloc == 0); 15771732Sbonwick smo->smo_object = dmu_object_alloc(mos, 1578789Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1579789Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1580789Sahrens ASSERT(smo->smo_object != 0); 1581789Sahrens vdev_config_dirty(vd->vdev_top); 1582789Sahrens } 1583789Sahrens 1584789Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1585789Sahrens 1586789Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1587789Sahrens &smlock); 1588789Sahrens 1589789Sahrens mutex_enter(&smlock); 1590789Sahrens 1591789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15921732Sbonwick space_map_walk(sm, space_map_add, &smsync); 1593789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1594789Sahrens 15951732Sbonwick space_map_truncate(smo, mos, tx); 15961732Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1597789Sahrens 1598789Sahrens space_map_destroy(&smsync); 1599789Sahrens 1600789Sahrens mutex_exit(&smlock); 1601789Sahrens mutex_destroy(&smlock); 1602789Sahrens 16031732Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1604789Sahrens dmu_buf_will_dirty(db, tx); 16054944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16064944Smaybee bcopy(smo, db->db_data, sizeof (*smo)); 16071544Seschrock dmu_buf_rele(db, FTAG); 1608789Sahrens 1609789Sahrens dmu_tx_commit(tx); 1610789Sahrens } 1611789Sahrens 16127046Sahrens /* 16138241SJeff.Bonwick@Sun.COM * Determine whether the specified vdev can be offlined/detached/removed 16148241SJeff.Bonwick@Sun.COM * without losing data. 16158241SJeff.Bonwick@Sun.COM */ 16168241SJeff.Bonwick@Sun.COM boolean_t 16178241SJeff.Bonwick@Sun.COM vdev_dtl_required(vdev_t *vd) 16188241SJeff.Bonwick@Sun.COM { 16198241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 16208241SJeff.Bonwick@Sun.COM vdev_t *tvd = vd->vdev_top; 16218241SJeff.Bonwick@Sun.COM uint8_t cant_read = vd->vdev_cant_read; 16228241SJeff.Bonwick@Sun.COM boolean_t required; 16238241SJeff.Bonwick@Sun.COM 16248241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 16258241SJeff.Bonwick@Sun.COM 16268241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev || vd == tvd) 16278241SJeff.Bonwick@Sun.COM return (B_TRUE); 16288241SJeff.Bonwick@Sun.COM 16298241SJeff.Bonwick@Sun.COM /* 16308241SJeff.Bonwick@Sun.COM * Temporarily mark the device as unreadable, and then determine 16318241SJeff.Bonwick@Sun.COM * whether this results in any DTL outages in the top-level vdev. 16328241SJeff.Bonwick@Sun.COM * If not, we can safely offline/detach/remove the device. 16338241SJeff.Bonwick@Sun.COM */ 16348241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_TRUE; 16358241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16368241SJeff.Bonwick@Sun.COM required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 16378241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = cant_read; 16388241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16398241SJeff.Bonwick@Sun.COM 16408241SJeff.Bonwick@Sun.COM return (required); 16418241SJeff.Bonwick@Sun.COM } 16428241SJeff.Bonwick@Sun.COM 16438241SJeff.Bonwick@Sun.COM /* 16447046Sahrens * Determine if resilver is needed, and if so the txg range. 16457046Sahrens */ 16467046Sahrens boolean_t 16477046Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 16487046Sahrens { 16497046Sahrens boolean_t needed = B_FALSE; 16507046Sahrens uint64_t thismin = UINT64_MAX; 16517046Sahrens uint64_t thismax = 0; 16527046Sahrens 16537046Sahrens if (vd->vdev_children == 0) { 16547046Sahrens mutex_enter(&vd->vdev_dtl_lock); 16558241SJeff.Bonwick@Sun.COM if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 16568241SJeff.Bonwick@Sun.COM vdev_writeable(vd)) { 16577046Sahrens space_seg_t *ss; 16587046Sahrens 16598241SJeff.Bonwick@Sun.COM ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 16607046Sahrens thismin = ss->ss_start - 1; 16618241SJeff.Bonwick@Sun.COM ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 16627046Sahrens thismax = ss->ss_end; 16637046Sahrens needed = B_TRUE; 16647046Sahrens } 16657046Sahrens mutex_exit(&vd->vdev_dtl_lock); 16667046Sahrens } else { 16678241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 16687046Sahrens vdev_t *cvd = vd->vdev_child[c]; 16697046Sahrens uint64_t cmin, cmax; 16707046Sahrens 16717046Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 16727046Sahrens thismin = MIN(thismin, cmin); 16737046Sahrens thismax = MAX(thismax, cmax); 16747046Sahrens needed = B_TRUE; 16757046Sahrens } 16767046Sahrens } 16777046Sahrens } 16787046Sahrens 16797046Sahrens if (needed && minp) { 16807046Sahrens *minp = thismin; 16817046Sahrens *maxp = thismax; 16827046Sahrens } 16837046Sahrens return (needed); 16847046Sahrens } 16857046Sahrens 16861986Seschrock void 16871544Seschrock vdev_load(vdev_t *vd) 1688789Sahrens { 1689789Sahrens /* 1690789Sahrens * Recursively load all children. 1691789Sahrens */ 16928241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 16931986Seschrock vdev_load(vd->vdev_child[c]); 1694789Sahrens 1695789Sahrens /* 16961585Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1697789Sahrens */ 16981986Seschrock if (vd == vd->vdev_top && 16991986Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 17001986Seschrock vdev_metaslab_init(vd, 0) != 0)) 17011986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17021986Seschrock VDEV_AUX_CORRUPT_DATA); 1703789Sahrens 1704789Sahrens /* 1705789Sahrens * If this is a leaf vdev, load its DTL. 1706789Sahrens */ 17071986Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 17081986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17091986Seschrock VDEV_AUX_CORRUPT_DATA); 1710789Sahrens } 1711789Sahrens 17122082Seschrock /* 17135450Sbrendan * The special vdev case is used for hot spares and l2cache devices. Its 17145450Sbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 17155450Sbrendan * we make sure that we can open the underlying device, then try to read the 17165450Sbrendan * label, and make sure that the label is sane and that it hasn't been 17175450Sbrendan * repurposed to another pool. 17182082Seschrock */ 17192082Seschrock int 17205450Sbrendan vdev_validate_aux(vdev_t *vd) 17212082Seschrock { 17222082Seschrock nvlist_t *label; 17232082Seschrock uint64_t guid, version; 17242082Seschrock uint64_t state; 17252082Seschrock 17267754SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 17276643Seschrock return (0); 17286643Seschrock 17292082Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 17302082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17312082Seschrock VDEV_AUX_CORRUPT_DATA); 17322082Seschrock return (-1); 17332082Seschrock } 17342082Seschrock 17352082Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 17364577Sahrens version > SPA_VERSION || 17372082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 17382082Seschrock guid != vd->vdev_guid || 17392082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 17402082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17412082Seschrock VDEV_AUX_CORRUPT_DATA); 17422082Seschrock nvlist_free(label); 17432082Seschrock return (-1); 17442082Seschrock } 17452082Seschrock 17462082Seschrock /* 17472082Seschrock * We don't actually check the pool state here. If it's in fact in 17482082Seschrock * use by another pool, we update this fact on the fly when requested. 17492082Seschrock */ 17502082Seschrock nvlist_free(label); 17512082Seschrock return (0); 17522082Seschrock } 17532082Seschrock 1754789Sahrens void 1755789Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 1756789Sahrens { 1757789Sahrens metaslab_t *msp; 1758789Sahrens 1759789Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1760789Sahrens metaslab_sync_done(msp, txg); 1761789Sahrens } 1762789Sahrens 1763789Sahrens void 1764789Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 1765789Sahrens { 1766789Sahrens spa_t *spa = vd->vdev_spa; 1767789Sahrens vdev_t *lvd; 1768789Sahrens metaslab_t *msp; 17691732Sbonwick dmu_tx_t *tx; 1770789Sahrens 17711732Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 17721732Sbonwick ASSERT(vd == vd->vdev_top); 17731732Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 17741732Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 17751732Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 17761732Sbonwick ASSERT(vd->vdev_ms_array != 0); 17771732Sbonwick vdev_config_dirty(vd); 17781732Sbonwick dmu_tx_commit(tx); 17791732Sbonwick } 1780789Sahrens 17811732Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1782789Sahrens metaslab_sync(msp, txg); 17831732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 17841732Sbonwick } 1785789Sahrens 1786789Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1787789Sahrens vdev_dtl_sync(lvd, txg); 1788789Sahrens 1789789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1790789Sahrens } 1791789Sahrens 1792789Sahrens uint64_t 1793789Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1794789Sahrens { 1795789Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1796789Sahrens } 1797789Sahrens 17984451Seschrock /* 17994451Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 18004451Seschrock * not be opened, and no I/O is attempted. 18014451Seschrock */ 1802789Sahrens int 18034451Seschrock vdev_fault(spa_t *spa, uint64_t guid) 18044451Seschrock { 18056643Seschrock vdev_t *vd; 18064451Seschrock 18077754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18084451Seschrock 18096643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18107754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18117754SJeff.Bonwick@Sun.COM 18124451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18137754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18144451Seschrock 18154451Seschrock /* 18164451Seschrock * Faulted state takes precedence over degraded. 18174451Seschrock */ 18184451Seschrock vd->vdev_faulted = 1ULL; 18194451Seschrock vd->vdev_degraded = 0ULL; 18207754SJeff.Bonwick@Sun.COM vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 18214451Seschrock 18224451Seschrock /* 18238123SDavid.Marker@sun.com * If marking the vdev as faulted cause the top-level vdev to become 18244451Seschrock * unavailable, then back off and simply mark the vdev as degraded 18254451Seschrock * instead. 18264451Seschrock */ 18276643Seschrock if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 18284451Seschrock vd->vdev_degraded = 1ULL; 18294451Seschrock vd->vdev_faulted = 0ULL; 18304451Seschrock 18314451Seschrock /* 18324451Seschrock * If we reopen the device and it's not dead, only then do we 18334451Seschrock * mark it degraded. 18344451Seschrock */ 18354451Seschrock vdev_reopen(vd); 18364451Seschrock 18375329Sgw25295 if (vdev_readable(vd)) { 18384451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 18394451Seschrock VDEV_AUX_ERR_EXCEEDED); 18404451Seschrock } 18414451Seschrock } 18424451Seschrock 18437754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 18444451Seschrock } 18454451Seschrock 18464451Seschrock /* 18474451Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 18484451Seschrock * user that something is wrong. The vdev continues to operate as normal as far 18494451Seschrock * as I/O is concerned. 18504451Seschrock */ 18514451Seschrock int 18524451Seschrock vdev_degrade(spa_t *spa, uint64_t guid) 18534451Seschrock { 18546643Seschrock vdev_t *vd; 18554451Seschrock 18567754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18574451Seschrock 18586643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18597754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18607754SJeff.Bonwick@Sun.COM 18614451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18627754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18634451Seschrock 18644451Seschrock /* 18654451Seschrock * If the vdev is already faulted, then don't do anything. 18664451Seschrock */ 18677754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded) 18687754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, 0)); 18694451Seschrock 18704451Seschrock vd->vdev_degraded = 1ULL; 18714451Seschrock if (!vdev_is_dead(vd)) 18724451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 18734451Seschrock VDEV_AUX_ERR_EXCEEDED); 18744451Seschrock 18757754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 18764451Seschrock } 18774451Seschrock 18784451Seschrock /* 18794451Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 18804451Seschrock * any attached spare device should be detached when the device finishes 18814451Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 18824451Seschrock * so no FMA events are generated if the device fails to open. 18834451Seschrock */ 18844451Seschrock int 18857754SJeff.Bonwick@Sun.COM vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 1886789Sahrens { 18876643Seschrock vdev_t *vd; 1888789Sahrens 18897754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18901485Slling 18916643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18927754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1893789Sahrens 18941585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 18957754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18961585Sbonwick 1897789Sahrens vd->vdev_offline = B_FALSE; 18981485Slling vd->vdev_tmpoffline = B_FALSE; 18997754SJeff.Bonwick@Sun.COM vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 19007754SJeff.Bonwick@Sun.COM vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 19011544Seschrock vdev_reopen(vd->vdev_top); 19024451Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 19034451Seschrock 19044451Seschrock if (newstate) 19054451Seschrock *newstate = vd->vdev_state; 19064451Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 19074451Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 19084451Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 19094451Seschrock vd->vdev_parent->vdev_child[0] == vd) 19104451Seschrock vd->vdev_unspare = B_TRUE; 1911789Sahrens 19128241SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 1913789Sahrens } 1914789Sahrens 1915789Sahrens int 19164451Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 1917789Sahrens { 1918*9701SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd; 1919*9701SGeorge.Wilson@Sun.COM int error; 1920789Sahrens 19217754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 1922789Sahrens 19236643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19247754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1925789Sahrens 19261585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19277754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19281585Sbonwick 1929*9701SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 1930*9701SGeorge.Wilson@Sun.COM 1931789Sahrens /* 19321732Sbonwick * If the device isn't already offline, try to offline it. 1933789Sahrens */ 19341732Sbonwick if (!vd->vdev_offline) { 19351732Sbonwick /* 19368241SJeff.Bonwick@Sun.COM * If this device has the only valid copy of some data, 1937*9701SGeorge.Wilson@Sun.COM * don't allow it to be offlined. Log devices are always 1938*9701SGeorge.Wilson@Sun.COM * expendable. 19391732Sbonwick */ 1940*9701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 1941*9701SGeorge.Wilson@Sun.COM vdev_dtl_required(vd)) 19427754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 1943789Sahrens 19441732Sbonwick /* 19451732Sbonwick * Offline this device and reopen its top-level vdev. 1946*9701SGeorge.Wilson@Sun.COM * If the top-level vdev is a log device then just offline 1947*9701SGeorge.Wilson@Sun.COM * it. Otherwise, if this action results in the top-level 1948*9701SGeorge.Wilson@Sun.COM * vdev becoming unusable, undo it and fail the request. 19491732Sbonwick */ 19501732Sbonwick vd->vdev_offline = B_TRUE; 1951*9701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 1952*9701SGeorge.Wilson@Sun.COM 1953*9701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 1954*9701SGeorge.Wilson@Sun.COM vdev_is_dead(tvd)) { 19551732Sbonwick vd->vdev_offline = B_FALSE; 1956*9701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 19577754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 19581732Sbonwick } 1959789Sahrens } 1960789Sahrens 19617754SJeff.Bonwick@Sun.COM vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 19621732Sbonwick 1963*9701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog || !vdev_is_dead(tvd)) 1964*9701SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 1965*9701SGeorge.Wilson@Sun.COM 1966*9701SGeorge.Wilson@Sun.COM (void) spa_vdev_state_exit(spa, vd, 0); 1967*9701SGeorge.Wilson@Sun.COM 1968*9701SGeorge.Wilson@Sun.COM error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1969*9701SGeorge.Wilson@Sun.COM NULL, DS_FIND_CHILDREN); 1970*9701SGeorge.Wilson@Sun.COM if (error) { 1971*9701SGeorge.Wilson@Sun.COM (void) vdev_online(spa, guid, 0, NULL); 1972*9701SGeorge.Wilson@Sun.COM return (error); 1973*9701SGeorge.Wilson@Sun.COM } 1974*9701SGeorge.Wilson@Sun.COM /* 1975*9701SGeorge.Wilson@Sun.COM * If we successfully offlined the log device then we need to 1976*9701SGeorge.Wilson@Sun.COM * sync out the current txg so that the "stubby" block can be 1977*9701SGeorge.Wilson@Sun.COM * removed by zil_sync(). 1978*9701SGeorge.Wilson@Sun.COM */ 1979*9701SGeorge.Wilson@Sun.COM txg_wait_synced(spa->spa_dsl_pool, 0); 1980*9701SGeorge.Wilson@Sun.COM return (0); 1981789Sahrens } 1982789Sahrens 19831544Seschrock /* 19841544Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 19851544Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 19861544Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 19871544Seschrock */ 19881544Seschrock void 19897754SJeff.Bonwick@Sun.COM vdev_clear(spa_t *spa, vdev_t *vd) 1990789Sahrens { 19917754SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 19927754SJeff.Bonwick@Sun.COM 19937754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1994789Sahrens 19951544Seschrock if (vd == NULL) 19967754SJeff.Bonwick@Sun.COM vd = rvd; 1997789Sahrens 19981544Seschrock vd->vdev_stat.vs_read_errors = 0; 19991544Seschrock vd->vdev_stat.vs_write_errors = 0; 20001544Seschrock vd->vdev_stat.vs_checksum_errors = 0; 2001789Sahrens 20027754SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 20037754SJeff.Bonwick@Sun.COM vdev_clear(spa, vd->vdev_child[c]); 20044451Seschrock 20054451Seschrock /* 20066959Sek110237 * If we're in the FAULTED state or have experienced failed I/O, then 20076959Sek110237 * clear the persistent state and attempt to reopen the device. We 20086959Sek110237 * also mark the vdev config dirty, so that the new faulted state is 20096959Sek110237 * written out to disk. 20104451Seschrock */ 20117754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded || 20127754SJeff.Bonwick@Sun.COM !vdev_readable(vd) || !vdev_writeable(vd)) { 20136959Sek110237 20144451Seschrock vd->vdev_faulted = vd->vdev_degraded = 0; 20157754SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_FALSE; 20167754SJeff.Bonwick@Sun.COM vd->vdev_cant_write = B_FALSE; 20177754SJeff.Bonwick@Sun.COM 20184451Seschrock vdev_reopen(vd); 20194451Seschrock 20207754SJeff.Bonwick@Sun.COM if (vd != rvd) 20217754SJeff.Bonwick@Sun.COM vdev_state_dirty(vd->vdev_top); 20227754SJeff.Bonwick@Sun.COM 20237754SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 20244808Sek110237 spa_async_request(spa, SPA_ASYNC_RESILVER); 20254451Seschrock 20264451Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 20274451Seschrock } 2028789Sahrens } 2029789Sahrens 20307754SJeff.Bonwick@Sun.COM boolean_t 20317754SJeff.Bonwick@Sun.COM vdev_is_dead(vdev_t *vd) 20325329Sgw25295 { 20337754SJeff.Bonwick@Sun.COM return (vd->vdev_state < VDEV_STATE_DEGRADED); 20345329Sgw25295 } 20355329Sgw25295 20367754SJeff.Bonwick@Sun.COM boolean_t 20377754SJeff.Bonwick@Sun.COM vdev_readable(vdev_t *vd) 2038789Sahrens { 20397754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2040789Sahrens } 2041789Sahrens 20427754SJeff.Bonwick@Sun.COM boolean_t 20437754SJeff.Bonwick@Sun.COM vdev_writeable(vdev_t *vd) 2044789Sahrens { 20457754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 20467754SJeff.Bonwick@Sun.COM } 2047789Sahrens 20487754SJeff.Bonwick@Sun.COM boolean_t 20497980SGeorge.Wilson@Sun.COM vdev_allocatable(vdev_t *vd) 20507980SGeorge.Wilson@Sun.COM { 20518241SJeff.Bonwick@Sun.COM uint64_t state = vd->vdev_state; 20528241SJeff.Bonwick@Sun.COM 20537980SGeorge.Wilson@Sun.COM /* 20548241SJeff.Bonwick@Sun.COM * We currently allow allocations from vdevs which may be in the 20557980SGeorge.Wilson@Sun.COM * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 20567980SGeorge.Wilson@Sun.COM * fails to reopen then we'll catch it later when we're holding 20578241SJeff.Bonwick@Sun.COM * the proper locks. Note that we have to get the vdev state 20588241SJeff.Bonwick@Sun.COM * in a local variable because although it changes atomically, 20598241SJeff.Bonwick@Sun.COM * we're asking two separate questions about it. 20607980SGeorge.Wilson@Sun.COM */ 20618241SJeff.Bonwick@Sun.COM return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 20627980SGeorge.Wilson@Sun.COM !vd->vdev_cant_write); 20637980SGeorge.Wilson@Sun.COM } 20647980SGeorge.Wilson@Sun.COM 20657980SGeorge.Wilson@Sun.COM boolean_t 20667754SJeff.Bonwick@Sun.COM vdev_accessible(vdev_t *vd, zio_t *zio) 20677754SJeff.Bonwick@Sun.COM { 20687754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 2069789Sahrens 20707754SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 20717754SJeff.Bonwick@Sun.COM return (B_FALSE); 2072789Sahrens 20737754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) 20747754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_read); 2075789Sahrens 20767754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_WRITE) 20777754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_write); 20787754SJeff.Bonwick@Sun.COM 20797754SJeff.Bonwick@Sun.COM return (B_TRUE); 2080789Sahrens } 2081789Sahrens 2082789Sahrens /* 2083789Sahrens * Get statistics for the given vdev. 2084789Sahrens */ 2085789Sahrens void 2086789Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2087789Sahrens { 2088789Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2089789Sahrens 2090789Sahrens mutex_enter(&vd->vdev_stat_lock); 2091789Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 20927046Sahrens vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2093789Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2094789Sahrens vs->vs_state = vd->vdev_state; 20951175Slling vs->vs_rsize = vdev_get_rsize(vd); 2096789Sahrens mutex_exit(&vd->vdev_stat_lock); 2097789Sahrens 2098789Sahrens /* 2099789Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2100789Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2101789Sahrens */ 2102789Sahrens if (vd == rvd) { 21037754SJeff.Bonwick@Sun.COM for (int c = 0; c < rvd->vdev_children; c++) { 2104789Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2105789Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2106789Sahrens 2107789Sahrens mutex_enter(&vd->vdev_stat_lock); 21087754SJeff.Bonwick@Sun.COM for (int t = 0; t < ZIO_TYPES; t++) { 2109789Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2110789Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2111789Sahrens } 2112789Sahrens vs->vs_scrub_examined += cvs->vs_scrub_examined; 2113789Sahrens mutex_exit(&vd->vdev_stat_lock); 2114789Sahrens } 2115789Sahrens } 2116789Sahrens } 2117789Sahrens 2118789Sahrens void 21195450Sbrendan vdev_clear_stats(vdev_t *vd) 21205450Sbrendan { 21215450Sbrendan mutex_enter(&vd->vdev_stat_lock); 21225450Sbrendan vd->vdev_stat.vs_space = 0; 21235450Sbrendan vd->vdev_stat.vs_dspace = 0; 21245450Sbrendan vd->vdev_stat.vs_alloc = 0; 21255450Sbrendan mutex_exit(&vd->vdev_stat_lock); 21265450Sbrendan } 21275450Sbrendan 21285450Sbrendan void 21297754SJeff.Bonwick@Sun.COM vdev_stat_update(zio_t *zio, uint64_t psize) 2130789Sahrens { 21318241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 21328241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 21337754SJeff.Bonwick@Sun.COM vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2134789Sahrens vdev_t *pvd; 2135789Sahrens uint64_t txg = zio->io_txg; 2136789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2137789Sahrens zio_type_t type = zio->io_type; 2138789Sahrens int flags = zio->io_flags; 2139789Sahrens 21407754SJeff.Bonwick@Sun.COM /* 21417754SJeff.Bonwick@Sun.COM * If this i/o is a gang leader, it didn't do any actual work. 21427754SJeff.Bonwick@Sun.COM */ 21437754SJeff.Bonwick@Sun.COM if (zio->io_gang_tree) 21447754SJeff.Bonwick@Sun.COM return; 21457754SJeff.Bonwick@Sun.COM 2146789Sahrens if (zio->io_error == 0) { 21477754SJeff.Bonwick@Sun.COM /* 21487754SJeff.Bonwick@Sun.COM * If this is a root i/o, don't count it -- we've already 21497754SJeff.Bonwick@Sun.COM * counted the top-level vdevs, and vdev_get_stats() will 21507754SJeff.Bonwick@Sun.COM * aggregate them when asked. This reduces contention on 21517754SJeff.Bonwick@Sun.COM * the root vdev_stat_lock and implicitly handles blocks 21527754SJeff.Bonwick@Sun.COM * that compress away to holes, for which there is no i/o. 21537754SJeff.Bonwick@Sun.COM * (Holes never create vdev children, so all the counters 21547754SJeff.Bonwick@Sun.COM * remain zero, which is what we want.) 21557754SJeff.Bonwick@Sun.COM * 21567754SJeff.Bonwick@Sun.COM * Note: this only applies to successful i/o (io_error == 0) 21577754SJeff.Bonwick@Sun.COM * because unlike i/o counts, errors are not additive. 21587754SJeff.Bonwick@Sun.COM * When reading a ditto block, for example, failure of 21597754SJeff.Bonwick@Sun.COM * one top-level vdev does not imply a root-level error. 21607754SJeff.Bonwick@Sun.COM */ 21617754SJeff.Bonwick@Sun.COM if (vd == rvd) 21627754SJeff.Bonwick@Sun.COM return; 21637754SJeff.Bonwick@Sun.COM 21647754SJeff.Bonwick@Sun.COM ASSERT(vd == zio->io_vd); 21658241SJeff.Bonwick@Sun.COM 21668241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_BYPASS) 21678241SJeff.Bonwick@Sun.COM return; 21688241SJeff.Bonwick@Sun.COM 21698241SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 21708241SJeff.Bonwick@Sun.COM 21717754SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_REPAIR) { 21721807Sbonwick if (flags & ZIO_FLAG_SCRUB_THREAD) 21737754SJeff.Bonwick@Sun.COM vs->vs_scrub_repaired += psize; 21748241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SELF_HEAL) 21757754SJeff.Bonwick@Sun.COM vs->vs_self_healed += psize; 2176789Sahrens } 21778241SJeff.Bonwick@Sun.COM 21788241SJeff.Bonwick@Sun.COM vs->vs_ops[type]++; 21798241SJeff.Bonwick@Sun.COM vs->vs_bytes[type] += psize; 21808241SJeff.Bonwick@Sun.COM 21818241SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2182789Sahrens return; 2183789Sahrens } 2184789Sahrens 2185789Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2186789Sahrens return; 2187789Sahrens 21887754SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 21899230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 21907754SJeff.Bonwick@Sun.COM if (zio->io_error == ECKSUM) 21917754SJeff.Bonwick@Sun.COM vs->vs_checksum_errors++; 21927754SJeff.Bonwick@Sun.COM else 21937754SJeff.Bonwick@Sun.COM vs->vs_read_errors++; 2194789Sahrens } 21959230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 21967754SJeff.Bonwick@Sun.COM vs->vs_write_errors++; 21977754SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2198789Sahrens 21998241SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_WRITE && txg != 0 && 22008241SJeff.Bonwick@Sun.COM (!(flags & ZIO_FLAG_IO_REPAIR) || 22018241SJeff.Bonwick@Sun.COM (flags & ZIO_FLAG_SCRUB_THREAD))) { 22028241SJeff.Bonwick@Sun.COM /* 22038241SJeff.Bonwick@Sun.COM * This is either a normal write (not a repair), or it's a 22048241SJeff.Bonwick@Sun.COM * repair induced by the scrub thread. In the normal case, 22058241SJeff.Bonwick@Sun.COM * we commit the DTL change in the same txg as the block 22068241SJeff.Bonwick@Sun.COM * was born. In the scrub-induced repair case, we know that 22078241SJeff.Bonwick@Sun.COM * scrubs run in first-pass syncing context, so we commit 22088241SJeff.Bonwick@Sun.COM * the DTL change in spa->spa_syncing_txg. 22098241SJeff.Bonwick@Sun.COM * 22108241SJeff.Bonwick@Sun.COM * We currently do not make DTL entries for failed spontaneous 22118241SJeff.Bonwick@Sun.COM * self-healing writes triggered by normal (non-scrubbing) 22128241SJeff.Bonwick@Sun.COM * reads, because we have no transactional context in which to 22138241SJeff.Bonwick@Sun.COM * do so -- and it's not clear that it'd be desirable anyway. 22148241SJeff.Bonwick@Sun.COM */ 22158241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 22168241SJeff.Bonwick@Sun.COM uint64_t commit_txg = txg; 22178241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SCRUB_THREAD) { 22188241SJeff.Bonwick@Sun.COM ASSERT(flags & ZIO_FLAG_IO_REPAIR); 22198241SJeff.Bonwick@Sun.COM ASSERT(spa_sync_pass(spa) == 1); 22208241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 22218241SJeff.Bonwick@Sun.COM commit_txg = spa->spa_syncing_txg; 22228241SJeff.Bonwick@Sun.COM } 22238241SJeff.Bonwick@Sun.COM ASSERT(commit_txg >= spa->spa_syncing_txg); 22248241SJeff.Bonwick@Sun.COM if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 22258241SJeff.Bonwick@Sun.COM return; 22268241SJeff.Bonwick@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 22278241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 22288241SJeff.Bonwick@Sun.COM vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2229789Sahrens } 22308241SJeff.Bonwick@Sun.COM if (vd != rvd) 22318241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2232789Sahrens } 2233789Sahrens } 2234789Sahrens 2235789Sahrens void 2236789Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2237789Sahrens { 2238789Sahrens int c; 2239789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2240789Sahrens 2241789Sahrens for (c = 0; c < vd->vdev_children; c++) 2242789Sahrens vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2243789Sahrens 2244789Sahrens mutex_enter(&vd->vdev_stat_lock); 2245789Sahrens 2246789Sahrens if (type == POOL_SCRUB_NONE) { 2247789Sahrens /* 2248789Sahrens * Update completion and end time. Leave everything else alone 2249789Sahrens * so we can report what happened during the previous scrub. 2250789Sahrens */ 2251789Sahrens vs->vs_scrub_complete = complete; 2252789Sahrens vs->vs_scrub_end = gethrestime_sec(); 2253789Sahrens } else { 2254789Sahrens vs->vs_scrub_type = type; 2255789Sahrens vs->vs_scrub_complete = 0; 2256789Sahrens vs->vs_scrub_examined = 0; 2257789Sahrens vs->vs_scrub_repaired = 0; 2258789Sahrens vs->vs_scrub_start = gethrestime_sec(); 2259789Sahrens vs->vs_scrub_end = 0; 2260789Sahrens } 2261789Sahrens 2262789Sahrens mutex_exit(&vd->vdev_stat_lock); 2263789Sahrens } 2264789Sahrens 2265789Sahrens /* 2266789Sahrens * Update the in-core space usage stats for this vdev and the root vdev. 2267789Sahrens */ 2268789Sahrens void 22695450Sbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 22705450Sbrendan boolean_t update_root) 2271789Sahrens { 22724527Sperrin int64_t dspace_delta = space_delta; 22734527Sperrin spa_t *spa = vd->vdev_spa; 22744527Sperrin vdev_t *rvd = spa->spa_root_vdev; 22754527Sperrin 2276789Sahrens ASSERT(vd == vd->vdev_top); 22774527Sperrin 22784527Sperrin /* 22794527Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 22804527Sperrin * factor. We must calculate this here and not at the root vdev 22814527Sperrin * because the root vdev's psize-to-asize is simply the max of its 22824527Sperrin * childrens', thus not accurate enough for us. 22834527Sperrin */ 22844527Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 2285*9701SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 22864527Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 22874527Sperrin vd->vdev_deflate_ratio; 2288789Sahrens 22894527Sperrin mutex_enter(&vd->vdev_stat_lock); 22904527Sperrin vd->vdev_stat.vs_space += space_delta; 22914527Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 22924527Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 22934527Sperrin mutex_exit(&vd->vdev_stat_lock); 22942082Seschrock 22955450Sbrendan if (update_root) { 22965450Sbrendan ASSERT(rvd == vd->vdev_parent); 22975450Sbrendan ASSERT(vd->vdev_ms_count != 0); 22984527Sperrin 22995450Sbrendan /* 23005450Sbrendan * Don't count non-normal (e.g. intent log) space as part of 23015450Sbrendan * the pool's capacity. 23025450Sbrendan */ 23035450Sbrendan if (vd->vdev_mg->mg_class != spa->spa_normal_class) 23045450Sbrendan return; 23055450Sbrendan 23065450Sbrendan mutex_enter(&rvd->vdev_stat_lock); 23075450Sbrendan rvd->vdev_stat.vs_space += space_delta; 23085450Sbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 23095450Sbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 23105450Sbrendan mutex_exit(&rvd->vdev_stat_lock); 23115450Sbrendan } 2312789Sahrens } 2313789Sahrens 2314789Sahrens /* 2315789Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2316789Sahrens * so that it will be written out next time the vdev configuration is synced. 2317789Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2318789Sahrens */ 2319789Sahrens void 2320789Sahrens vdev_config_dirty(vdev_t *vd) 2321789Sahrens { 2322789Sahrens spa_t *spa = vd->vdev_spa; 2323789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2324789Sahrens int c; 2325789Sahrens 23261601Sbonwick /* 23279425SEric.Schrock@Sun.COM * If this is an aux vdev (as with l2cache and spare devices), then we 23289425SEric.Schrock@Sun.COM * update the vdev config manually and set the sync flag. 23296643Seschrock */ 23306643Seschrock if (vd->vdev_aux != NULL) { 23316643Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 23326643Seschrock nvlist_t **aux; 23336643Seschrock uint_t naux; 23346643Seschrock 23356643Seschrock for (c = 0; c < sav->sav_count; c++) { 23366643Seschrock if (sav->sav_vdevs[c] == vd) 23376643Seschrock break; 23386643Seschrock } 23396643Seschrock 23407754SJeff.Bonwick@Sun.COM if (c == sav->sav_count) { 23417754SJeff.Bonwick@Sun.COM /* 23427754SJeff.Bonwick@Sun.COM * We're being removed. There's nothing more to do. 23437754SJeff.Bonwick@Sun.COM */ 23447754SJeff.Bonwick@Sun.COM ASSERT(sav->sav_sync == B_TRUE); 23457754SJeff.Bonwick@Sun.COM return; 23467754SJeff.Bonwick@Sun.COM } 23477754SJeff.Bonwick@Sun.COM 23486643Seschrock sav->sav_sync = B_TRUE; 23496643Seschrock 23509425SEric.Schrock@Sun.COM if (nvlist_lookup_nvlist_array(sav->sav_config, 23519425SEric.Schrock@Sun.COM ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 23529425SEric.Schrock@Sun.COM VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 23539425SEric.Schrock@Sun.COM ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 23549425SEric.Schrock@Sun.COM } 23556643Seschrock 23566643Seschrock ASSERT(c < naux); 23576643Seschrock 23586643Seschrock /* 23596643Seschrock * Setting the nvlist in the middle if the array is a little 23606643Seschrock * sketchy, but it will work. 23616643Seschrock */ 23626643Seschrock nvlist_free(aux[c]); 23636643Seschrock aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 23646643Seschrock 23656643Seschrock return; 23666643Seschrock } 23676643Seschrock 23686643Seschrock /* 23697754SJeff.Bonwick@Sun.COM * The dirty list is protected by the SCL_CONFIG lock. The caller 23707754SJeff.Bonwick@Sun.COM * must either hold SCL_CONFIG as writer, or must be the sync thread 23717754SJeff.Bonwick@Sun.COM * (which holds SCL_CONFIG as reader). There's only one sync thread, 23721601Sbonwick * so this is sufficient to ensure mutual exclusion. 23731601Sbonwick */ 23747754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 23757754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 23767754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 23771601Sbonwick 2378789Sahrens if (vd == rvd) { 2379789Sahrens for (c = 0; c < rvd->vdev_children; c++) 2380789Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2381789Sahrens } else { 2382789Sahrens ASSERT(vd == vd->vdev_top); 2383789Sahrens 23847754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_config_dirty_node)) 23857754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_config_dirty_list, vd); 2386789Sahrens } 2387789Sahrens } 2388789Sahrens 2389789Sahrens void 2390789Sahrens vdev_config_clean(vdev_t *vd) 2391789Sahrens { 23921601Sbonwick spa_t *spa = vd->vdev_spa; 23931601Sbonwick 23947754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 23957754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 23967754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 23977754SJeff.Bonwick@Sun.COM 23987754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 23997754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_config_dirty_list, vd); 24007754SJeff.Bonwick@Sun.COM } 24017754SJeff.Bonwick@Sun.COM 24027754SJeff.Bonwick@Sun.COM /* 24037754SJeff.Bonwick@Sun.COM * Mark a top-level vdev's state as dirty, so that the next pass of 24047754SJeff.Bonwick@Sun.COM * spa_sync() can convert this into vdev_config_dirty(). We distinguish 24057754SJeff.Bonwick@Sun.COM * the state changes from larger config changes because they require 24067754SJeff.Bonwick@Sun.COM * much less locking, and are often needed for administrative actions. 24077754SJeff.Bonwick@Sun.COM */ 24087754SJeff.Bonwick@Sun.COM void 24097754SJeff.Bonwick@Sun.COM vdev_state_dirty(vdev_t *vd) 24107754SJeff.Bonwick@Sun.COM { 24117754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24127754SJeff.Bonwick@Sun.COM 24137754SJeff.Bonwick@Sun.COM ASSERT(vd == vd->vdev_top); 24141601Sbonwick 24157754SJeff.Bonwick@Sun.COM /* 24167754SJeff.Bonwick@Sun.COM * The state list is protected by the SCL_STATE lock. The caller 24177754SJeff.Bonwick@Sun.COM * must either hold SCL_STATE as writer, or must be the sync thread 24187754SJeff.Bonwick@Sun.COM * (which holds SCL_STATE as reader). There's only one sync thread, 24197754SJeff.Bonwick@Sun.COM * so this is sufficient to ensure mutual exclusion. 24207754SJeff.Bonwick@Sun.COM */ 24217754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 24227754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24237754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 24247754SJeff.Bonwick@Sun.COM 24257754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_state_dirty_node)) 24267754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_state_dirty_list, vd); 24277754SJeff.Bonwick@Sun.COM } 24287754SJeff.Bonwick@Sun.COM 24297754SJeff.Bonwick@Sun.COM void 24307754SJeff.Bonwick@Sun.COM vdev_state_clean(vdev_t *vd) 24317754SJeff.Bonwick@Sun.COM { 24327754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24337754SJeff.Bonwick@Sun.COM 24347754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 24357754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24367754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 24377754SJeff.Bonwick@Sun.COM 24387754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 24397754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_state_dirty_list, vd); 2440789Sahrens } 2441789Sahrens 24426523Sek110237 /* 24436523Sek110237 * Propagate vdev state up from children to parent. 24446523Sek110237 */ 24451775Sbillm void 24461775Sbillm vdev_propagate_state(vdev_t *vd) 24471775Sbillm { 24488241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24498241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 24501775Sbillm int degraded = 0, faulted = 0; 24511775Sbillm int corrupted = 0; 24521775Sbillm int c; 24531775Sbillm vdev_t *child; 24541775Sbillm 24554451Seschrock if (vd->vdev_children > 0) { 24564451Seschrock for (c = 0; c < vd->vdev_children; c++) { 24574451Seschrock child = vd->vdev_child[c]; 24586976Seschrock 24597754SJeff.Bonwick@Sun.COM if (!vdev_readable(child) || 24608241SJeff.Bonwick@Sun.COM (!vdev_writeable(child) && spa_writeable(spa))) { 24616976Seschrock /* 24626976Seschrock * Root special: if there is a top-level log 24636976Seschrock * device, treat the root vdev as if it were 24646976Seschrock * degraded. 24656976Seschrock */ 24666976Seschrock if (child->vdev_islog && vd == rvd) 24676976Seschrock degraded++; 24686976Seschrock else 24696976Seschrock faulted++; 24706976Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 24714451Seschrock degraded++; 24726976Seschrock } 24734451Seschrock 24744451Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 24754451Seschrock corrupted++; 24764451Seschrock } 24771775Sbillm 24784451Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 24794451Seschrock 24804451Seschrock /* 24817754SJeff.Bonwick@Sun.COM * Root special: if there is a top-level vdev that cannot be 24824451Seschrock * opened due to corrupted metadata, then propagate the root 24834451Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 24844451Seschrock * replicas'. 24854451Seschrock */ 24864451Seschrock if (corrupted && vd == rvd && 24874451Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 24884451Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 24894451Seschrock VDEV_AUX_CORRUPT_DATA); 24901775Sbillm } 24911775Sbillm 24926976Seschrock if (vd->vdev_parent) 24934451Seschrock vdev_propagate_state(vd->vdev_parent); 24941775Sbillm } 24951775Sbillm 2496789Sahrens /* 24971544Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 24981544Seschrock * state, because we're in the process of opening children depth-first. 24991544Seschrock * Otherwise, we propagate the change to the parent. 25001544Seschrock * 25011544Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 25021544Seschrock * generated. 2503789Sahrens */ 2504789Sahrens void 25051544Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2506789Sahrens { 25071986Seschrock uint64_t save_state; 25086643Seschrock spa_t *spa = vd->vdev_spa; 25091544Seschrock 25101544Seschrock if (state == vd->vdev_state) { 25111544Seschrock vd->vdev_stat.vs_aux = aux; 2512789Sahrens return; 25131544Seschrock } 25141544Seschrock 25151986Seschrock save_state = vd->vdev_state; 2516789Sahrens 2517789Sahrens vd->vdev_state = state; 2518789Sahrens vd->vdev_stat.vs_aux = aux; 2519789Sahrens 25204451Seschrock /* 25214451Seschrock * If we are setting the vdev state to anything but an open state, then 25224451Seschrock * always close the underlying device. Otherwise, we keep accessible 25234451Seschrock * but invalid devices open forever. We don't call vdev_close() itself, 25244451Seschrock * because that implies some extra checks (offline, etc) that we don't 25254451Seschrock * want here. This is limited to leaf devices, because otherwise 25264451Seschrock * closing the device will affect other children. 25274451Seschrock */ 25287780SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 25294451Seschrock vd->vdev_ops->vdev_op_close(vd); 25304451Seschrock 25314451Seschrock if (vd->vdev_removed && 25324451Seschrock state == VDEV_STATE_CANT_OPEN && 25334451Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 25344451Seschrock /* 25354451Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 25364451Seschrock * device was previously marked removed and someone attempted to 25374451Seschrock * reopen it. If this failed due to a nonexistent device, then 25384451Seschrock * keep the device in the REMOVED state. We also let this be if 25394451Seschrock * it is one of our special test online cases, which is only 25404451Seschrock * attempting to online the device and shouldn't generate an FMA 25414451Seschrock * fault. 25424451Seschrock */ 25434451Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 25444451Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 25454451Seschrock } else if (state == VDEV_STATE_REMOVED) { 25464451Seschrock /* 25474451Seschrock * Indicate to the ZFS DE that this device has been removed, and 25484451Seschrock * any recent errors should be ignored. 25494451Seschrock */ 25506643Seschrock zfs_post_remove(spa, vd); 25514451Seschrock vd->vdev_removed = B_TRUE; 25524451Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 25531544Seschrock /* 25541544Seschrock * If we fail to open a vdev during an import, we mark it as 25551544Seschrock * "not available", which signifies that it was never there to 25561544Seschrock * begin with. Failure to open such a device is not considered 25571544Seschrock * an error. 25581544Seschrock */ 25596643Seschrock if (spa->spa_load_state == SPA_LOAD_IMPORT && 25601986Seschrock vd->vdev_ops->vdev_op_leaf) 25611986Seschrock vd->vdev_not_present = 1; 25621986Seschrock 25631986Seschrock /* 25641986Seschrock * Post the appropriate ereport. If the 'prevstate' field is 25651986Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 25661986Seschrock * that this is part of a vdev_reopen(). In this case, we don't 25671986Seschrock * want to post the ereport if the device was already in the 25681986Seschrock * CANT_OPEN state beforehand. 25694451Seschrock * 25704451Seschrock * If the 'checkremove' flag is set, then this is an attempt to 25714451Seschrock * online the device in response to an insertion event. If we 25724451Seschrock * hit this case, then we have detected an insertion event for a 25734451Seschrock * faulted or offline device that wasn't in the removed state. 25744451Seschrock * In this scenario, we don't post an ereport because we are 25754451Seschrock * about to replace the device, or attempt an online with 25764451Seschrock * vdev_forcefault, which will generate the fault for us. 25771986Seschrock */ 25784451Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 25794451Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 25806643Seschrock vd != spa->spa_root_vdev) { 25811544Seschrock const char *class; 25821544Seschrock 25831544Seschrock switch (aux) { 25841544Seschrock case VDEV_AUX_OPEN_FAILED: 25851544Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 25861544Seschrock break; 25871544Seschrock case VDEV_AUX_CORRUPT_DATA: 25881544Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 25891544Seschrock break; 25901544Seschrock case VDEV_AUX_NO_REPLICAS: 25911544Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 25921544Seschrock break; 25931544Seschrock case VDEV_AUX_BAD_GUID_SUM: 25941544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 25951544Seschrock break; 25961544Seschrock case VDEV_AUX_TOO_SMALL: 25971544Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 25981544Seschrock break; 25991544Seschrock case VDEV_AUX_BAD_LABEL: 26001544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 26011544Seschrock break; 26027754SJeff.Bonwick@Sun.COM case VDEV_AUX_IO_FAILURE: 26037754SJeff.Bonwick@Sun.COM class = FM_EREPORT_ZFS_IO_FAILURE; 26047754SJeff.Bonwick@Sun.COM break; 26051544Seschrock default: 26061544Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 26071544Seschrock } 26081544Seschrock 26096643Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 26101544Seschrock } 26114451Seschrock 26124451Seschrock /* Erase any notion of persistent removed state */ 26134451Seschrock vd->vdev_removed = B_FALSE; 26144451Seschrock } else { 26154451Seschrock vd->vdev_removed = B_FALSE; 26161544Seschrock } 26171544Seschrock 26189583STim.Haley@Sun.COM if (!isopen && vd->vdev_parent) 26199583STim.Haley@Sun.COM vdev_propagate_state(vd->vdev_parent); 2620789Sahrens } 26217042Sgw25295 26227042Sgw25295 /* 26237042Sgw25295 * Check the vdev configuration to ensure that it's capable of supporting 26247042Sgw25295 * a root pool. Currently, we do not support RAID-Z or partial configuration. 26257042Sgw25295 * In addition, only a single top-level vdev is allowed and none of the leaves 26267042Sgw25295 * can be wholedisks. 26277042Sgw25295 */ 26287042Sgw25295 boolean_t 26297042Sgw25295 vdev_is_bootable(vdev_t *vd) 26307042Sgw25295 { 26317042Sgw25295 int c; 26327042Sgw25295 26337042Sgw25295 if (!vd->vdev_ops->vdev_op_leaf) { 26347042Sgw25295 char *vdev_type = vd->vdev_ops->vdev_op_type; 26357042Sgw25295 26367042Sgw25295 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 26377042Sgw25295 vd->vdev_children > 1) { 26387042Sgw25295 return (B_FALSE); 26397042Sgw25295 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 26407042Sgw25295 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 26417042Sgw25295 return (B_FALSE); 26427042Sgw25295 } 26437042Sgw25295 } else if (vd->vdev_wholedisk == 1) { 26447042Sgw25295 return (B_FALSE); 26457042Sgw25295 } 26467042Sgw25295 26477042Sgw25295 for (c = 0; c < vd->vdev_children; c++) { 26487042Sgw25295 if (!vdev_is_bootable(vd->vdev_child[c])) 26497042Sgw25295 return (B_FALSE); 26507042Sgw25295 } 26517042Sgw25295 return (B_TRUE); 26527042Sgw25295 } 2653*9701SGeorge.Wilson@Sun.COM 2654*9701SGeorge.Wilson@Sun.COM void 2655*9701SGeorge.Wilson@Sun.COM vdev_load_log_state(vdev_t *vd, nvlist_t *nv) 2656*9701SGeorge.Wilson@Sun.COM { 2657*9701SGeorge.Wilson@Sun.COM uint_t c, children; 2658*9701SGeorge.Wilson@Sun.COM nvlist_t **child; 2659*9701SGeorge.Wilson@Sun.COM uint64_t val; 2660*9701SGeorge.Wilson@Sun.COM spa_t *spa = vd->vdev_spa; 2661*9701SGeorge.Wilson@Sun.COM 2662*9701SGeorge.Wilson@Sun.COM if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2663*9701SGeorge.Wilson@Sun.COM &child, &children) == 0) { 2664*9701SGeorge.Wilson@Sun.COM for (c = 0; c < children; c++) 2665*9701SGeorge.Wilson@Sun.COM vdev_load_log_state(vd->vdev_child[c], child[c]); 2666*9701SGeorge.Wilson@Sun.COM } 2667*9701SGeorge.Wilson@Sun.COM 2668*9701SGeorge.Wilson@Sun.COM if (vd->vdev_ops->vdev_op_leaf && nvlist_lookup_uint64(nv, 2669*9701SGeorge.Wilson@Sun.COM ZPOOL_CONFIG_OFFLINE, &val) == 0 && val) { 2670*9701SGeorge.Wilson@Sun.COM 2671*9701SGeorge.Wilson@Sun.COM /* 2672*9701SGeorge.Wilson@Sun.COM * It would be nice to call vdev_offline() 2673*9701SGeorge.Wilson@Sun.COM * directly but the pool isn't fully loaded and 2674*9701SGeorge.Wilson@Sun.COM * the txg threads have not been started yet. 2675*9701SGeorge.Wilson@Sun.COM */ 2676*9701SGeorge.Wilson@Sun.COM spa_config_enter(spa, SCL_STATE_ALL, FTAG, RW_WRITER); 2677*9701SGeorge.Wilson@Sun.COM vd->vdev_offline = val; 2678*9701SGeorge.Wilson@Sun.COM vdev_reopen(vd->vdev_top); 2679*9701SGeorge.Wilson@Sun.COM spa_config_exit(spa, SCL_STATE_ALL, FTAG); 2680*9701SGeorge.Wilson@Sun.COM } 2681*9701SGeorge.Wilson@Sun.COM } 2682