1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51485Slling * Common Development and Distribution License (the "License"). 61485Slling * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 238632SBill.Moore@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #include <sys/zfs_context.h> 281544Seschrock #include <sys/fm/fs/zfs.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/spa_impl.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/dmu_tx.h> 33789Sahrens #include <sys/vdev_impl.h> 34789Sahrens #include <sys/uberblock_impl.h> 35789Sahrens #include <sys/metaslab.h> 36789Sahrens #include <sys/metaslab_impl.h> 37789Sahrens #include <sys/space_map.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zap.h> 40789Sahrens #include <sys/fs/zfs.h> 416643Seschrock #include <sys/arc.h> 429701SGeorge.Wilson@Sun.COM #include <sys/zil.h> 43789Sahrens 44789Sahrens /* 45789Sahrens * Virtual device management. 46789Sahrens */ 47789Sahrens 48789Sahrens static vdev_ops_t *vdev_ops_table[] = { 49789Sahrens &vdev_root_ops, 50789Sahrens &vdev_raidz_ops, 51789Sahrens &vdev_mirror_ops, 52789Sahrens &vdev_replacing_ops, 532082Seschrock &vdev_spare_ops, 54789Sahrens &vdev_disk_ops, 55789Sahrens &vdev_file_ops, 56789Sahrens &vdev_missing_ops, 57789Sahrens NULL 58789Sahrens }; 59789Sahrens 607046Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 617046Sahrens int zfs_scrub_limit = 10; 623697Smishra 63789Sahrens /* 64789Sahrens * Given a vdev type, return the appropriate ops vector. 65789Sahrens */ 66789Sahrens static vdev_ops_t * 67789Sahrens vdev_getops(const char *type) 68789Sahrens { 69789Sahrens vdev_ops_t *ops, **opspp; 70789Sahrens 71789Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 72789Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 73789Sahrens break; 74789Sahrens 75789Sahrens return (ops); 76789Sahrens } 77789Sahrens 78789Sahrens /* 79789Sahrens * Default asize function: return the MAX of psize with the asize of 80789Sahrens * all children. This is what's used by anything other than RAID-Z. 81789Sahrens */ 82789Sahrens uint64_t 83789Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 84789Sahrens { 851732Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 86789Sahrens uint64_t csize; 87789Sahrens uint64_t c; 88789Sahrens 89789Sahrens for (c = 0; c < vd->vdev_children; c++) { 90789Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 91789Sahrens asize = MAX(asize, csize); 92789Sahrens } 93789Sahrens 94789Sahrens return (asize); 95789Sahrens } 96789Sahrens 971175Slling /* 981175Slling * Get the replaceable or attachable device size. 991175Slling * If the parent is a mirror or raidz, the replaceable size is the minimum 1001175Slling * psize of all its children. For the rest, just return our own psize. 1011175Slling * 1021175Slling * e.g. 1031175Slling * psize rsize 1041175Slling * root - - 1051175Slling * mirror/raidz - - 1061175Slling * disk1 20g 20g 1071175Slling * disk2 40g 20g 1081175Slling * disk3 80g 80g 1091175Slling */ 1101175Slling uint64_t 1111175Slling vdev_get_rsize(vdev_t *vd) 1121175Slling { 1131175Slling vdev_t *pvd, *cvd; 1141175Slling uint64_t c, rsize; 1151175Slling 1161175Slling pvd = vd->vdev_parent; 1171175Slling 1181175Slling /* 1191175Slling * If our parent is NULL or the root, just return our own psize. 1201175Slling */ 1211175Slling if (pvd == NULL || pvd->vdev_parent == NULL) 1221175Slling return (vd->vdev_psize); 1231175Slling 1241175Slling rsize = 0; 1251175Slling 1261175Slling for (c = 0; c < pvd->vdev_children; c++) { 1271175Slling cvd = pvd->vdev_child[c]; 1281175Slling rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1; 1291175Slling } 1301175Slling 1311175Slling return (rsize); 1321175Slling } 1331175Slling 134789Sahrens vdev_t * 135789Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 136789Sahrens { 137789Sahrens vdev_t *rvd = spa->spa_root_vdev; 138789Sahrens 1397754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1405530Sbonwick 1417046Sahrens if (vdev < rvd->vdev_children) { 1427046Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 143789Sahrens return (rvd->vdev_child[vdev]); 1447046Sahrens } 145789Sahrens 146789Sahrens return (NULL); 147789Sahrens } 148789Sahrens 149789Sahrens vdev_t * 150789Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 151789Sahrens { 152789Sahrens int c; 153789Sahrens vdev_t *mvd; 154789Sahrens 1551585Sbonwick if (vd->vdev_guid == guid) 156789Sahrens return (vd); 157789Sahrens 158789Sahrens for (c = 0; c < vd->vdev_children; c++) 159789Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 160789Sahrens NULL) 161789Sahrens return (mvd); 162789Sahrens 163789Sahrens return (NULL); 164789Sahrens } 165789Sahrens 166789Sahrens void 167789Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 168789Sahrens { 169789Sahrens size_t oldsize, newsize; 170789Sahrens uint64_t id = cvd->vdev_id; 171789Sahrens vdev_t **newchild; 172789Sahrens 1737754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 174789Sahrens ASSERT(cvd->vdev_parent == NULL); 175789Sahrens 176789Sahrens cvd->vdev_parent = pvd; 177789Sahrens 178789Sahrens if (pvd == NULL) 179789Sahrens return; 180789Sahrens 181789Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 182789Sahrens 183789Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 184789Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 185789Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 186789Sahrens 187789Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 188789Sahrens if (pvd->vdev_child != NULL) { 189789Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 190789Sahrens kmem_free(pvd->vdev_child, oldsize); 191789Sahrens } 192789Sahrens 193789Sahrens pvd->vdev_child = newchild; 194789Sahrens pvd->vdev_child[id] = cvd; 195789Sahrens 196789Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 197789Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 198789Sahrens 199789Sahrens /* 200789Sahrens * Walk up all ancestors to update guid sum. 201789Sahrens */ 202789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 203789Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 2043697Smishra 2053697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2063697Smishra cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 207789Sahrens } 208789Sahrens 209789Sahrens void 210789Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 211789Sahrens { 212789Sahrens int c; 213789Sahrens uint_t id = cvd->vdev_id; 214789Sahrens 215789Sahrens ASSERT(cvd->vdev_parent == pvd); 216789Sahrens 217789Sahrens if (pvd == NULL) 218789Sahrens return; 219789Sahrens 220789Sahrens ASSERT(id < pvd->vdev_children); 221789Sahrens ASSERT(pvd->vdev_child[id] == cvd); 222789Sahrens 223789Sahrens pvd->vdev_child[id] = NULL; 224789Sahrens cvd->vdev_parent = NULL; 225789Sahrens 226789Sahrens for (c = 0; c < pvd->vdev_children; c++) 227789Sahrens if (pvd->vdev_child[c]) 228789Sahrens break; 229789Sahrens 230789Sahrens if (c == pvd->vdev_children) { 231789Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 232789Sahrens pvd->vdev_child = NULL; 233789Sahrens pvd->vdev_children = 0; 234789Sahrens } 235789Sahrens 236789Sahrens /* 237789Sahrens * Walk up all ancestors to update guid sum. 238789Sahrens */ 239789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 240789Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 2413697Smishra 2423697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2433697Smishra cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 244789Sahrens } 245789Sahrens 246789Sahrens /* 247789Sahrens * Remove any holes in the child array. 248789Sahrens */ 249789Sahrens void 250789Sahrens vdev_compact_children(vdev_t *pvd) 251789Sahrens { 252789Sahrens vdev_t **newchild, *cvd; 253789Sahrens int oldc = pvd->vdev_children; 254789Sahrens int newc, c; 255789Sahrens 2567754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 257789Sahrens 258789Sahrens for (c = newc = 0; c < oldc; c++) 259789Sahrens if (pvd->vdev_child[c]) 260789Sahrens newc++; 261789Sahrens 262789Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 263789Sahrens 264789Sahrens for (c = newc = 0; c < oldc; c++) { 265789Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 266789Sahrens newchild[newc] = cvd; 267789Sahrens cvd->vdev_id = newc++; 268789Sahrens } 269789Sahrens } 270789Sahrens 271789Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 272789Sahrens pvd->vdev_child = newchild; 273789Sahrens pvd->vdev_children = newc; 274789Sahrens } 275789Sahrens 276789Sahrens /* 277789Sahrens * Allocate and minimally initialize a vdev_t. 278789Sahrens */ 279789Sahrens static vdev_t * 280789Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 281789Sahrens { 282789Sahrens vdev_t *vd; 283789Sahrens 2841585Sbonwick vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 2851585Sbonwick 2861585Sbonwick if (spa->spa_root_vdev == NULL) { 2871585Sbonwick ASSERT(ops == &vdev_root_ops); 2881585Sbonwick spa->spa_root_vdev = vd; 2891585Sbonwick } 290789Sahrens 2911585Sbonwick if (guid == 0) { 2921585Sbonwick if (spa->spa_root_vdev == vd) { 2931585Sbonwick /* 2941585Sbonwick * The root vdev's guid will also be the pool guid, 2951585Sbonwick * which must be unique among all pools. 2961585Sbonwick */ 2971585Sbonwick while (guid == 0 || spa_guid_exists(guid, 0)) 2981585Sbonwick guid = spa_get_random(-1ULL); 2991585Sbonwick } else { 3001585Sbonwick /* 3011585Sbonwick * Any other vdev's guid must be unique within the pool. 3021585Sbonwick */ 3031585Sbonwick while (guid == 0 || 3041585Sbonwick spa_guid_exists(spa_guid(spa), guid)) 3051585Sbonwick guid = spa_get_random(-1ULL); 3061585Sbonwick } 3071585Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3081585Sbonwick } 309789Sahrens 310789Sahrens vd->vdev_spa = spa; 311789Sahrens vd->vdev_id = id; 312789Sahrens vd->vdev_guid = guid; 313789Sahrens vd->vdev_guid_sum = guid; 314789Sahrens vd->vdev_ops = ops; 315789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 316789Sahrens 317789Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3182856Snd150628 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 3197754SJeff.Bonwick@Sun.COM mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 3208241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 3218241SJeff.Bonwick@Sun.COM space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 3228241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_lock); 3238241SJeff.Bonwick@Sun.COM } 324789Sahrens txg_list_create(&vd->vdev_ms_list, 325789Sahrens offsetof(struct metaslab, ms_txg_node)); 326789Sahrens txg_list_create(&vd->vdev_dtl_list, 327789Sahrens offsetof(struct vdev, vdev_dtl_node)); 328789Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3294451Seschrock vdev_queue_init(vd); 3304451Seschrock vdev_cache_init(vd); 331789Sahrens 332789Sahrens return (vd); 333789Sahrens } 334789Sahrens 335789Sahrens /* 336789Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 337789Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 338789Sahrens * different for each case. 339789Sahrens */ 3402082Seschrock int 3412082Seschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 3422082Seschrock int alloctype) 343789Sahrens { 344789Sahrens vdev_ops_t *ops; 345789Sahrens char *type; 3464527Sperrin uint64_t guid = 0, islog, nparity; 347789Sahrens vdev_t *vd; 348789Sahrens 3497754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 350789Sahrens 351789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 3522082Seschrock return (EINVAL); 353789Sahrens 354789Sahrens if ((ops = vdev_getops(type)) == NULL) 3552082Seschrock return (EINVAL); 356789Sahrens 357789Sahrens /* 358789Sahrens * If this is a load, get the vdev guid from the nvlist. 359789Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 360789Sahrens */ 361789Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 362789Sahrens uint64_t label_id; 363789Sahrens 364789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 365789Sahrens label_id != id) 3662082Seschrock return (EINVAL); 367789Sahrens 368789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3692082Seschrock return (EINVAL); 3702082Seschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 3712082Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3722082Seschrock return (EINVAL); 3735450Sbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 3745450Sbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3755450Sbrendan return (EINVAL); 376*9790SLin.Ling@Sun.COM } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 377*9790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 378*9790SLin.Ling@Sun.COM return (EINVAL); 379789Sahrens } 380789Sahrens 3812082Seschrock /* 3822082Seschrock * The first allocated vdev must be of type 'root'. 3832082Seschrock */ 3842082Seschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 3852082Seschrock return (EINVAL); 3862082Seschrock 3874527Sperrin /* 3884527Sperrin * Determine whether we're a log vdev. 3894527Sperrin */ 3904527Sperrin islog = 0; 3914527Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 3925094Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 3934527Sperrin return (ENOTSUP); 3944527Sperrin 3954527Sperrin /* 3964527Sperrin * Set the nparity property for RAID-Z vdevs. 3974527Sperrin */ 3984527Sperrin nparity = -1ULL; 3994527Sperrin if (ops == &vdev_raidz_ops) { 4004527Sperrin if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4014527Sperrin &nparity) == 0) { 4024527Sperrin /* 4034527Sperrin * Currently, we can only support 2 parity devices. 4044527Sperrin */ 4054527Sperrin if (nparity == 0 || nparity > 2) 4064527Sperrin return (EINVAL); 4074527Sperrin /* 4084527Sperrin * Older versions can only support 1 parity device. 4094527Sperrin */ 4104527Sperrin if (nparity == 2 && 4114577Sahrens spa_version(spa) < SPA_VERSION_RAID6) 4124527Sperrin return (ENOTSUP); 4134527Sperrin } else { 4144527Sperrin /* 4154527Sperrin * We require the parity to be specified for SPAs that 4164527Sperrin * support multiple parity levels. 4174527Sperrin */ 4184577Sahrens if (spa_version(spa) >= SPA_VERSION_RAID6) 4194527Sperrin return (EINVAL); 4204527Sperrin /* 4214527Sperrin * Otherwise, we default to 1 parity device for RAID-Z. 4224527Sperrin */ 4234527Sperrin nparity = 1; 4244527Sperrin } 4254527Sperrin } else { 4264527Sperrin nparity = 0; 4274527Sperrin } 4284527Sperrin ASSERT(nparity != -1ULL); 4294527Sperrin 430789Sahrens vd = vdev_alloc_common(spa, id, guid, ops); 431789Sahrens 4324527Sperrin vd->vdev_islog = islog; 4334527Sperrin vd->vdev_nparity = nparity; 4344527Sperrin 435789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 436789Sahrens vd->vdev_path = spa_strdup(vd->vdev_path); 437789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 438789Sahrens vd->vdev_devid = spa_strdup(vd->vdev_devid); 4394451Seschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4404451Seschrock &vd->vdev_physpath) == 0) 4414451Seschrock vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 4429425SEric.Schrock@Sun.COM if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 4439425SEric.Schrock@Sun.COM vd->vdev_fru = spa_strdup(vd->vdev_fru); 444789Sahrens 445789Sahrens /* 4461171Seschrock * Set the whole_disk property. If it's not specified, leave the value 4471171Seschrock * as -1. 4481171Seschrock */ 4491171Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 4501171Seschrock &vd->vdev_wholedisk) != 0) 4511171Seschrock vd->vdev_wholedisk = -1ULL; 4521171Seschrock 4531171Seschrock /* 4541544Seschrock * Look for the 'not present' flag. This will only be set if the device 4551544Seschrock * was not present at the time of import. 4561544Seschrock */ 4579425SEric.Schrock@Sun.COM (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4589425SEric.Schrock@Sun.COM &vd->vdev_not_present); 4591544Seschrock 4601544Seschrock /* 4611732Sbonwick * Get the alignment requirement. 4621732Sbonwick */ 4631732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 4641732Sbonwick 4651732Sbonwick /* 466789Sahrens * If we're a top-level vdev, try to load the allocation parameters. 467789Sahrens */ 468789Sahrens if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 469789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 470789Sahrens &vd->vdev_ms_array); 471789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 472789Sahrens &vd->vdev_ms_shift); 473789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 474789Sahrens &vd->vdev_asize); 475789Sahrens } 476789Sahrens 477789Sahrens /* 4784451Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 479789Sahrens */ 4806643Seschrock if (vd->vdev_ops->vdev_op_leaf && 481*9790SLin.Ling@Sun.COM (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 482*9790SLin.Ling@Sun.COM alloctype == VDEV_ALLOC_ROOTPOOL)) { 4836643Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 4846643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 4858241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_smo.smo_object); 4866643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 4876643Seschrock &vd->vdev_unspare); 4886643Seschrock } 489*9790SLin.Ling@Sun.COM 490*9790SLin.Ling@Sun.COM if (alloctype == VDEV_ALLOC_ROOTPOOL) { 491*9790SLin.Ling@Sun.COM uint64_t spare = 0; 492*9790SLin.Ling@Sun.COM 493*9790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 494*9790SLin.Ling@Sun.COM &spare) == 0 && spare) 495*9790SLin.Ling@Sun.COM spa_spare_add(vd); 496*9790SLin.Ling@Sun.COM } 497*9790SLin.Ling@Sun.COM 4981732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 4991732Sbonwick &vd->vdev_offline); 5006643Seschrock 5014451Seschrock /* 5024451Seschrock * When importing a pool, we want to ignore the persistent fault 5034451Seschrock * state, as the diagnosis made on another system may not be 5044451Seschrock * valid in the current context. 5054451Seschrock */ 5064451Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN) { 5074451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 5084451Seschrock &vd->vdev_faulted); 5094451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 5104451Seschrock &vd->vdev_degraded); 5114451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 5124451Seschrock &vd->vdev_removed); 5134451Seschrock } 514789Sahrens } 515789Sahrens 516789Sahrens /* 517789Sahrens * Add ourselves to the parent's list of children. 518789Sahrens */ 519789Sahrens vdev_add_child(parent, vd); 520789Sahrens 5212082Seschrock *vdp = vd; 5222082Seschrock 5232082Seschrock return (0); 524789Sahrens } 525789Sahrens 526789Sahrens void 527789Sahrens vdev_free(vdev_t *vd) 528789Sahrens { 529789Sahrens int c; 5304451Seschrock spa_t *spa = vd->vdev_spa; 531789Sahrens 532789Sahrens /* 533789Sahrens * vdev_free() implies closing the vdev first. This is simpler than 534789Sahrens * trying to ensure complicated semantics for all callers. 535789Sahrens */ 536789Sahrens vdev_close(vd); 537789Sahrens 5387754SJeff.Bonwick@Sun.COM ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 539789Sahrens 540789Sahrens /* 541789Sahrens * Free all children. 542789Sahrens */ 543789Sahrens for (c = 0; c < vd->vdev_children; c++) 544789Sahrens vdev_free(vd->vdev_child[c]); 545789Sahrens 546789Sahrens ASSERT(vd->vdev_child == NULL); 547789Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 548789Sahrens 549789Sahrens /* 550789Sahrens * Discard allocation state. 551789Sahrens */ 552789Sahrens if (vd == vd->vdev_top) 553789Sahrens vdev_metaslab_fini(vd); 554789Sahrens 555789Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 5562082Seschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 557789Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 558789Sahrens 559789Sahrens /* 560789Sahrens * Remove this vdev from its parent's child list. 561789Sahrens */ 562789Sahrens vdev_remove_child(vd->vdev_parent, vd); 563789Sahrens 564789Sahrens ASSERT(vd->vdev_parent == NULL); 565789Sahrens 5664451Seschrock /* 5674451Seschrock * Clean up vdev structure. 5684451Seschrock */ 5694451Seschrock vdev_queue_fini(vd); 5704451Seschrock vdev_cache_fini(vd); 5714451Seschrock 5724451Seschrock if (vd->vdev_path) 5734451Seschrock spa_strfree(vd->vdev_path); 5744451Seschrock if (vd->vdev_devid) 5754451Seschrock spa_strfree(vd->vdev_devid); 5764451Seschrock if (vd->vdev_physpath) 5774451Seschrock spa_strfree(vd->vdev_physpath); 5789425SEric.Schrock@Sun.COM if (vd->vdev_fru) 5799425SEric.Schrock@Sun.COM spa_strfree(vd->vdev_fru); 5804451Seschrock 5814451Seschrock if (vd->vdev_isspare) 5824451Seschrock spa_spare_remove(vd); 5835450Sbrendan if (vd->vdev_isl2cache) 5845450Sbrendan spa_l2cache_remove(vd); 5854451Seschrock 5864451Seschrock txg_list_destroy(&vd->vdev_ms_list); 5874451Seschrock txg_list_destroy(&vd->vdev_dtl_list); 5888241SJeff.Bonwick@Sun.COM 5894451Seschrock mutex_enter(&vd->vdev_dtl_lock); 5908241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 5918241SJeff.Bonwick@Sun.COM space_map_unload(&vd->vdev_dtl[t]); 5928241SJeff.Bonwick@Sun.COM space_map_destroy(&vd->vdev_dtl[t]); 5938241SJeff.Bonwick@Sun.COM } 5944451Seschrock mutex_exit(&vd->vdev_dtl_lock); 5958241SJeff.Bonwick@Sun.COM 5964451Seschrock mutex_destroy(&vd->vdev_dtl_lock); 5974451Seschrock mutex_destroy(&vd->vdev_stat_lock); 5987754SJeff.Bonwick@Sun.COM mutex_destroy(&vd->vdev_probe_lock); 5994451Seschrock 6004451Seschrock if (vd == spa->spa_root_vdev) 6014451Seschrock spa->spa_root_vdev = NULL; 6024451Seschrock 6034451Seschrock kmem_free(vd, sizeof (vdev_t)); 604789Sahrens } 605789Sahrens 606789Sahrens /* 607789Sahrens * Transfer top-level vdev state from svd to tvd. 608789Sahrens */ 609789Sahrens static void 610789Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 611789Sahrens { 612789Sahrens spa_t *spa = svd->vdev_spa; 613789Sahrens metaslab_t *msp; 614789Sahrens vdev_t *vd; 615789Sahrens int t; 616789Sahrens 617789Sahrens ASSERT(tvd == tvd->vdev_top); 618789Sahrens 619789Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 620789Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 621789Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 622789Sahrens 623789Sahrens svd->vdev_ms_array = 0; 624789Sahrens svd->vdev_ms_shift = 0; 625789Sahrens svd->vdev_ms_count = 0; 626789Sahrens 627789Sahrens tvd->vdev_mg = svd->vdev_mg; 628789Sahrens tvd->vdev_ms = svd->vdev_ms; 629789Sahrens 630789Sahrens svd->vdev_mg = NULL; 631789Sahrens svd->vdev_ms = NULL; 6321732Sbonwick 6331732Sbonwick if (tvd->vdev_mg != NULL) 6341732Sbonwick tvd->vdev_mg->mg_vd = tvd; 635789Sahrens 636789Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 637789Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 6382082Seschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 639789Sahrens 640789Sahrens svd->vdev_stat.vs_alloc = 0; 641789Sahrens svd->vdev_stat.vs_space = 0; 6422082Seschrock svd->vdev_stat.vs_dspace = 0; 643789Sahrens 644789Sahrens for (t = 0; t < TXG_SIZE; t++) { 645789Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 646789Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 647789Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 648789Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 649789Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 650789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 651789Sahrens } 652789Sahrens 6537754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_config_dirty_node)) { 654789Sahrens vdev_config_clean(svd); 655789Sahrens vdev_config_dirty(tvd); 656789Sahrens } 657789Sahrens 6587754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_state_dirty_node)) { 6597754SJeff.Bonwick@Sun.COM vdev_state_clean(svd); 6607754SJeff.Bonwick@Sun.COM vdev_state_dirty(tvd); 6617754SJeff.Bonwick@Sun.COM } 6627754SJeff.Bonwick@Sun.COM 6632082Seschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 6642082Seschrock svd->vdev_deflate_ratio = 0; 6654527Sperrin 6664527Sperrin tvd->vdev_islog = svd->vdev_islog; 6674527Sperrin svd->vdev_islog = 0; 668789Sahrens } 669789Sahrens 670789Sahrens static void 671789Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 672789Sahrens { 673789Sahrens int c; 674789Sahrens 675789Sahrens if (vd == NULL) 676789Sahrens return; 677789Sahrens 678789Sahrens vd->vdev_top = tvd; 679789Sahrens 680789Sahrens for (c = 0; c < vd->vdev_children; c++) 681789Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 682789Sahrens } 683789Sahrens 684789Sahrens /* 685789Sahrens * Add a mirror/replacing vdev above an existing vdev. 686789Sahrens */ 687789Sahrens vdev_t * 688789Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 689789Sahrens { 690789Sahrens spa_t *spa = cvd->vdev_spa; 691789Sahrens vdev_t *pvd = cvd->vdev_parent; 692789Sahrens vdev_t *mvd; 693789Sahrens 6947754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 695789Sahrens 696789Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 6971732Sbonwick 6981732Sbonwick mvd->vdev_asize = cvd->vdev_asize; 6991732Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 7001732Sbonwick mvd->vdev_state = cvd->vdev_state; 7011732Sbonwick 702789Sahrens vdev_remove_child(pvd, cvd); 703789Sahrens vdev_add_child(pvd, mvd); 704789Sahrens cvd->vdev_id = mvd->vdev_children; 705789Sahrens vdev_add_child(mvd, cvd); 706789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 707789Sahrens 708789Sahrens if (mvd == mvd->vdev_top) 709789Sahrens vdev_top_transfer(cvd, mvd); 710789Sahrens 711789Sahrens return (mvd); 712789Sahrens } 713789Sahrens 714789Sahrens /* 715789Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 716789Sahrens */ 717789Sahrens void 718789Sahrens vdev_remove_parent(vdev_t *cvd) 719789Sahrens { 720789Sahrens vdev_t *mvd = cvd->vdev_parent; 721789Sahrens vdev_t *pvd = mvd->vdev_parent; 722789Sahrens 7237754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 724789Sahrens 725789Sahrens ASSERT(mvd->vdev_children == 1); 726789Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 7272082Seschrock mvd->vdev_ops == &vdev_replacing_ops || 7282082Seschrock mvd->vdev_ops == &vdev_spare_ops); 7291732Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 730789Sahrens 731789Sahrens vdev_remove_child(mvd, cvd); 732789Sahrens vdev_remove_child(pvd, mvd); 7338241SJeff.Bonwick@Sun.COM 7347754SJeff.Bonwick@Sun.COM /* 7357754SJeff.Bonwick@Sun.COM * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 7367754SJeff.Bonwick@Sun.COM * Otherwise, we could have detached an offline device, and when we 7377754SJeff.Bonwick@Sun.COM * go to import the pool we'll think we have two top-level vdevs, 7387754SJeff.Bonwick@Sun.COM * instead of a different version of the same top-level vdev. 7397754SJeff.Bonwick@Sun.COM */ 7408241SJeff.Bonwick@Sun.COM if (mvd->vdev_top == mvd) { 7418241SJeff.Bonwick@Sun.COM uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 7428241SJeff.Bonwick@Sun.COM cvd->vdev_guid += guid_delta; 7438241SJeff.Bonwick@Sun.COM cvd->vdev_guid_sum += guid_delta; 7448241SJeff.Bonwick@Sun.COM } 745789Sahrens cvd->vdev_id = mvd->vdev_id; 746789Sahrens vdev_add_child(pvd, cvd); 747789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 748789Sahrens 749789Sahrens if (cvd == cvd->vdev_top) 750789Sahrens vdev_top_transfer(mvd, cvd); 751789Sahrens 752789Sahrens ASSERT(mvd->vdev_children == 0); 753789Sahrens vdev_free(mvd); 754789Sahrens } 755789Sahrens 7561544Seschrock int 757789Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 758789Sahrens { 759789Sahrens spa_t *spa = vd->vdev_spa; 7601732Sbonwick objset_t *mos = spa->spa_meta_objset; 7614527Sperrin metaslab_class_t *mc; 7621732Sbonwick uint64_t m; 763789Sahrens uint64_t oldc = vd->vdev_ms_count; 764789Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 7651732Sbonwick metaslab_t **mspp; 7661732Sbonwick int error; 767789Sahrens 7681585Sbonwick if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 7691585Sbonwick return (0); 7701585Sbonwick 7719701SGeorge.Wilson@Sun.COM /* 7729701SGeorge.Wilson@Sun.COM * Compute the raidz-deflation ratio. Note, we hard-code 7739701SGeorge.Wilson@Sun.COM * in 128k (1 << 17) because it is the current "typical" blocksize. 7749701SGeorge.Wilson@Sun.COM * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 7759701SGeorge.Wilson@Sun.COM * or we will inconsistently account for existing bp's. 7769701SGeorge.Wilson@Sun.COM */ 7779701SGeorge.Wilson@Sun.COM vd->vdev_deflate_ratio = (1 << 17) / 7789701SGeorge.Wilson@Sun.COM (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 7799701SGeorge.Wilson@Sun.COM 780789Sahrens ASSERT(oldc <= newc); 781789Sahrens 7824527Sperrin if (vd->vdev_islog) 7834527Sperrin mc = spa->spa_log_class; 7844527Sperrin else 7854527Sperrin mc = spa->spa_normal_class; 7864527Sperrin 7871732Sbonwick if (vd->vdev_mg == NULL) 7881732Sbonwick vd->vdev_mg = metaslab_group_create(mc, vd); 7891732Sbonwick 7901732Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 7911732Sbonwick 7921732Sbonwick if (oldc != 0) { 7931732Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 7941732Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 7951732Sbonwick } 7961732Sbonwick 7971732Sbonwick vd->vdev_ms = mspp; 798789Sahrens vd->vdev_ms_count = newc; 799789Sahrens 8001732Sbonwick for (m = oldc; m < newc; m++) { 8011732Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 802789Sahrens if (txg == 0) { 8031732Sbonwick uint64_t object = 0; 8041732Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 8059512SNeil.Perrin@Sun.COM m * sizeof (uint64_t), sizeof (uint64_t), &object, 8069512SNeil.Perrin@Sun.COM DMU_READ_PREFETCH); 8071732Sbonwick if (error) 8081732Sbonwick return (error); 8091732Sbonwick if (object != 0) { 8101732Sbonwick dmu_buf_t *db; 8111732Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 8121732Sbonwick if (error) 8131732Sbonwick return (error); 8144944Smaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 8154944Smaybee bcopy(db->db_data, &smo, sizeof (smo)); 8161732Sbonwick ASSERT3U(smo.smo_object, ==, object); 8171544Seschrock dmu_buf_rele(db, FTAG); 818789Sahrens } 819789Sahrens } 8201732Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 8211732Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 822789Sahrens } 823789Sahrens 8241544Seschrock return (0); 825789Sahrens } 826789Sahrens 827789Sahrens void 828789Sahrens vdev_metaslab_fini(vdev_t *vd) 829789Sahrens { 830789Sahrens uint64_t m; 831789Sahrens uint64_t count = vd->vdev_ms_count; 832789Sahrens 833789Sahrens if (vd->vdev_ms != NULL) { 834789Sahrens for (m = 0; m < count; m++) 8351732Sbonwick if (vd->vdev_ms[m] != NULL) 8361732Sbonwick metaslab_fini(vd->vdev_ms[m]); 837789Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 838789Sahrens vd->vdev_ms = NULL; 839789Sahrens } 840789Sahrens } 841789Sahrens 8427754SJeff.Bonwick@Sun.COM typedef struct vdev_probe_stats { 8437754SJeff.Bonwick@Sun.COM boolean_t vps_readable; 8447754SJeff.Bonwick@Sun.COM boolean_t vps_writeable; 8457754SJeff.Bonwick@Sun.COM int vps_flags; 8467754SJeff.Bonwick@Sun.COM } vdev_probe_stats_t; 8477754SJeff.Bonwick@Sun.COM 8487754SJeff.Bonwick@Sun.COM static void 8497754SJeff.Bonwick@Sun.COM vdev_probe_done(zio_t *zio) 8505329Sgw25295 { 8518241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 8528632SBill.Moore@Sun.COM vdev_t *vd = zio->io_vd; 8537754SJeff.Bonwick@Sun.COM vdev_probe_stats_t *vps = zio->io_private; 8548632SBill.Moore@Sun.COM 8558632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio != NULL); 8567754SJeff.Bonwick@Sun.COM 8577754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) { 8587754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8597754SJeff.Bonwick@Sun.COM vps->vps_readable = 1; 8608241SJeff.Bonwick@Sun.COM if (zio->io_error == 0 && spa_writeable(spa)) { 8618632SBill.Moore@Sun.COM zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 8627754SJeff.Bonwick@Sun.COM zio->io_offset, zio->io_size, zio->io_data, 8637754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 8647754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 8657754SJeff.Bonwick@Sun.COM } else { 8667754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8677754SJeff.Bonwick@Sun.COM } 8687754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_WRITE) { 8697754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8707754SJeff.Bonwick@Sun.COM vps->vps_writeable = 1; 8717754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8727754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_NULL) { 8738632SBill.Moore@Sun.COM zio_t *pio; 8747754SJeff.Bonwick@Sun.COM 8757754SJeff.Bonwick@Sun.COM vd->vdev_cant_read |= !vps->vps_readable; 8767754SJeff.Bonwick@Sun.COM vd->vdev_cant_write |= !vps->vps_writeable; 8777754SJeff.Bonwick@Sun.COM 8787754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && 8798241SJeff.Bonwick@Sun.COM (vdev_writeable(vd) || !spa_writeable(spa))) { 8807754SJeff.Bonwick@Sun.COM zio->io_error = 0; 8817754SJeff.Bonwick@Sun.COM } else { 8827754SJeff.Bonwick@Sun.COM ASSERT(zio->io_error != 0); 8837754SJeff.Bonwick@Sun.COM zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 8848241SJeff.Bonwick@Sun.COM spa, vd, NULL, 0, 0); 8857754SJeff.Bonwick@Sun.COM zio->io_error = ENXIO; 8867754SJeff.Bonwick@Sun.COM } 8878632SBill.Moore@Sun.COM 8888632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 8898632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio == zio); 8908632SBill.Moore@Sun.COM vd->vdev_probe_zio = NULL; 8918632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 8928632SBill.Moore@Sun.COM 8938632SBill.Moore@Sun.COM while ((pio = zio_walk_parents(zio)) != NULL) 8948632SBill.Moore@Sun.COM if (!vdev_accessible(vd, pio)) 8958632SBill.Moore@Sun.COM pio->io_error = ENXIO; 8968632SBill.Moore@Sun.COM 8977754SJeff.Bonwick@Sun.COM kmem_free(vps, sizeof (*vps)); 8987754SJeff.Bonwick@Sun.COM } 8997754SJeff.Bonwick@Sun.COM } 9005329Sgw25295 9017754SJeff.Bonwick@Sun.COM /* 9027754SJeff.Bonwick@Sun.COM * Determine whether this device is accessible by reading and writing 9037754SJeff.Bonwick@Sun.COM * to several known locations: the pad regions of each vdev label 9047754SJeff.Bonwick@Sun.COM * but the first (which we leave alone in case it contains a VTOC). 9057754SJeff.Bonwick@Sun.COM */ 9067754SJeff.Bonwick@Sun.COM zio_t * 9078632SBill.Moore@Sun.COM vdev_probe(vdev_t *vd, zio_t *zio) 9087754SJeff.Bonwick@Sun.COM { 9097754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 9108632SBill.Moore@Sun.COM vdev_probe_stats_t *vps = NULL; 9118632SBill.Moore@Sun.COM zio_t *pio; 9127754SJeff.Bonwick@Sun.COM 9137754SJeff.Bonwick@Sun.COM ASSERT(vd->vdev_ops->vdev_op_leaf); 9147754SJeff.Bonwick@Sun.COM 9158632SBill.Moore@Sun.COM /* 9168632SBill.Moore@Sun.COM * Don't probe the probe. 9178632SBill.Moore@Sun.COM */ 9188632SBill.Moore@Sun.COM if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 9198632SBill.Moore@Sun.COM return (NULL); 9208632SBill.Moore@Sun.COM 9218632SBill.Moore@Sun.COM /* 9228632SBill.Moore@Sun.COM * To prevent 'probe storms' when a device fails, we create 9238632SBill.Moore@Sun.COM * just one probe i/o at a time. All zios that want to probe 9248632SBill.Moore@Sun.COM * this vdev will become parents of the probe io. 9258632SBill.Moore@Sun.COM */ 9268632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 9278632SBill.Moore@Sun.COM 9288632SBill.Moore@Sun.COM if ((pio = vd->vdev_probe_zio) == NULL) { 9298632SBill.Moore@Sun.COM vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 9308632SBill.Moore@Sun.COM 9318632SBill.Moore@Sun.COM vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 9328632SBill.Moore@Sun.COM ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 9339725SEric.Schrock@Sun.COM ZIO_FLAG_TRYHARD; 9348632SBill.Moore@Sun.COM 9358632SBill.Moore@Sun.COM if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 9368632SBill.Moore@Sun.COM /* 9378632SBill.Moore@Sun.COM * vdev_cant_read and vdev_cant_write can only 9388632SBill.Moore@Sun.COM * transition from TRUE to FALSE when we have the 9398632SBill.Moore@Sun.COM * SCL_ZIO lock as writer; otherwise they can only 9408632SBill.Moore@Sun.COM * transition from FALSE to TRUE. This ensures that 9418632SBill.Moore@Sun.COM * any zio looking at these values can assume that 9428632SBill.Moore@Sun.COM * failures persist for the life of the I/O. That's 9438632SBill.Moore@Sun.COM * important because when a device has intermittent 9448632SBill.Moore@Sun.COM * connectivity problems, we want to ensure that 9458632SBill.Moore@Sun.COM * they're ascribed to the device (ENXIO) and not 9468632SBill.Moore@Sun.COM * the zio (EIO). 9478632SBill.Moore@Sun.COM * 9488632SBill.Moore@Sun.COM * Since we hold SCL_ZIO as writer here, clear both 9498632SBill.Moore@Sun.COM * values so the probe can reevaluate from first 9508632SBill.Moore@Sun.COM * principles. 9518632SBill.Moore@Sun.COM */ 9528632SBill.Moore@Sun.COM vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 9538632SBill.Moore@Sun.COM vd->vdev_cant_read = B_FALSE; 9548632SBill.Moore@Sun.COM vd->vdev_cant_write = B_FALSE; 9558632SBill.Moore@Sun.COM } 9568632SBill.Moore@Sun.COM 9578632SBill.Moore@Sun.COM vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 9588632SBill.Moore@Sun.COM vdev_probe_done, vps, 9598632SBill.Moore@Sun.COM vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 9608632SBill.Moore@Sun.COM 9618632SBill.Moore@Sun.COM if (zio != NULL) { 9628632SBill.Moore@Sun.COM vd->vdev_probe_wanted = B_TRUE; 9638632SBill.Moore@Sun.COM spa_async_request(spa, SPA_ASYNC_PROBE); 9648632SBill.Moore@Sun.COM } 9658632SBill.Moore@Sun.COM } 9668632SBill.Moore@Sun.COM 9678632SBill.Moore@Sun.COM if (zio != NULL) 9688632SBill.Moore@Sun.COM zio_add_child(zio, pio); 9698632SBill.Moore@Sun.COM 9708632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 9718632SBill.Moore@Sun.COM 9728632SBill.Moore@Sun.COM if (vps == NULL) { 9738632SBill.Moore@Sun.COM ASSERT(zio != NULL); 9748632SBill.Moore@Sun.COM return (NULL); 9758632SBill.Moore@Sun.COM } 9767754SJeff.Bonwick@Sun.COM 9777754SJeff.Bonwick@Sun.COM for (int l = 1; l < VDEV_LABELS; l++) { 9788632SBill.Moore@Sun.COM zio_nowait(zio_read_phys(pio, vd, 9797754SJeff.Bonwick@Sun.COM vdev_label_offset(vd->vdev_psize, l, 9809056SLin.Ling@Sun.COM offsetof(vdev_label_t, vl_pad2)), 9819056SLin.Ling@Sun.COM VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 9827754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 9837754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 9847754SJeff.Bonwick@Sun.COM } 9857754SJeff.Bonwick@Sun.COM 9868632SBill.Moore@Sun.COM if (zio == NULL) 9878632SBill.Moore@Sun.COM return (pio); 9888632SBill.Moore@Sun.COM 9898632SBill.Moore@Sun.COM zio_nowait(pio); 9908632SBill.Moore@Sun.COM return (NULL); 9915329Sgw25295 } 9925329Sgw25295 993789Sahrens /* 994789Sahrens * Prepare a virtual device for access. 995789Sahrens */ 996789Sahrens int 997789Sahrens vdev_open(vdev_t *vd) 998789Sahrens { 9998241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 1000789Sahrens int error; 1001789Sahrens int c; 1002789Sahrens uint64_t osize = 0; 1003789Sahrens uint64_t asize, psize; 10041732Sbonwick uint64_t ashift = 0; 1005789Sahrens 10068241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 10078241SJeff.Bonwick@Sun.COM 1008789Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1009789Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 1010789Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 1011789Sahrens 1012789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 10139701SGeorge.Wilson@Sun.COM vd->vdev_cant_read = B_FALSE; 10149701SGeorge.Wilson@Sun.COM vd->vdev_cant_write = B_FALSE; 1015789Sahrens 10164451Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 10174451Seschrock ASSERT(vd->vdev_children == 0); 10184451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 10194451Seschrock VDEV_AUX_ERR_EXCEEDED); 10204451Seschrock return (ENXIO); 10214451Seschrock } else if (vd->vdev_offline) { 1022789Sahrens ASSERT(vd->vdev_children == 0); 10231544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1024789Sahrens return (ENXIO); 1025789Sahrens } 1026789Sahrens 1027789Sahrens error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1028789Sahrens 10291544Seschrock if (zio_injection_enabled && error == 0) 10309725SEric.Schrock@Sun.COM error = zio_handle_device_injection(vd, NULL, ENXIO); 10311544Seschrock 10324451Seschrock if (error) { 10334451Seschrock if (vd->vdev_removed && 10344451Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 10354451Seschrock vd->vdev_removed = B_FALSE; 1036789Sahrens 10371544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1038789Sahrens vd->vdev_stat.vs_aux); 1039789Sahrens return (error); 1040789Sahrens } 1041789Sahrens 10424451Seschrock vd->vdev_removed = B_FALSE; 10434451Seschrock 10444451Seschrock if (vd->vdev_degraded) { 10454451Seschrock ASSERT(vd->vdev_children == 0); 10464451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10474451Seschrock VDEV_AUX_ERR_EXCEEDED); 10484451Seschrock } else { 10494451Seschrock vd->vdev_state = VDEV_STATE_HEALTHY; 10504451Seschrock } 1051789Sahrens 1052789Sahrens for (c = 0; c < vd->vdev_children; c++) 10531544Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 10541544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10551544Seschrock VDEV_AUX_NONE); 10561544Seschrock break; 10571544Seschrock } 1058789Sahrens 1059789Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1060789Sahrens 1061789Sahrens if (vd->vdev_children == 0) { 1062789Sahrens if (osize < SPA_MINDEVSIZE) { 10631544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10641544Seschrock VDEV_AUX_TOO_SMALL); 1065789Sahrens return (EOVERFLOW); 1066789Sahrens } 1067789Sahrens psize = osize; 1068789Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1069789Sahrens } else { 10701732Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1071789Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 10721544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10731544Seschrock VDEV_AUX_TOO_SMALL); 1074789Sahrens return (EOVERFLOW); 1075789Sahrens } 1076789Sahrens psize = 0; 1077789Sahrens asize = osize; 1078789Sahrens } 1079789Sahrens 1080789Sahrens vd->vdev_psize = psize; 1081789Sahrens 1082789Sahrens if (vd->vdev_asize == 0) { 1083789Sahrens /* 1084789Sahrens * This is the first-ever open, so use the computed values. 10851732Sbonwick * For testing purposes, a higher ashift can be requested. 1086789Sahrens */ 1087789Sahrens vd->vdev_asize = asize; 10881732Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1089789Sahrens } else { 1090789Sahrens /* 1091789Sahrens * Make sure the alignment requirement hasn't increased. 1092789Sahrens */ 10931732Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 10941544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10951544Seschrock VDEV_AUX_BAD_LABEL); 1096789Sahrens return (EINVAL); 1097789Sahrens } 1098789Sahrens 1099789Sahrens /* 1100789Sahrens * Make sure the device hasn't shrunk. 1101789Sahrens */ 1102789Sahrens if (asize < vd->vdev_asize) { 11031544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11041544Seschrock VDEV_AUX_BAD_LABEL); 1105789Sahrens return (EINVAL); 1106789Sahrens } 1107789Sahrens 1108789Sahrens /* 1109789Sahrens * If all children are healthy and the asize has increased, 1110789Sahrens * then we've experienced dynamic LUN growth. 1111789Sahrens */ 1112789Sahrens if (vd->vdev_state == VDEV_STATE_HEALTHY && 1113789Sahrens asize > vd->vdev_asize) { 1114789Sahrens vd->vdev_asize = asize; 1115789Sahrens } 1116789Sahrens } 1117789Sahrens 11181544Seschrock /* 11195329Sgw25295 * Ensure we can issue some IO before declaring the 11205329Sgw25295 * vdev open for business. 11215329Sgw25295 */ 11227754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && 11237754SJeff.Bonwick@Sun.COM (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 11245329Sgw25295 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11257754SJeff.Bonwick@Sun.COM VDEV_AUX_IO_FAILURE); 11265329Sgw25295 return (error); 11275329Sgw25295 } 11285329Sgw25295 11295329Sgw25295 /* 11307046Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 11318241SJeff.Bonwick@Sun.COM * resilver. But don't do this if we are doing a reopen for a scrub, 11328241SJeff.Bonwick@Sun.COM * since this would just restart the scrub we are already doing. 11337046Sahrens */ 11348241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 11358241SJeff.Bonwick@Sun.COM vdev_resilver_needed(vd, NULL, NULL)) 11368241SJeff.Bonwick@Sun.COM spa_async_request(spa, SPA_ASYNC_RESILVER); 11377046Sahrens 1138789Sahrens return (0); 1139789Sahrens } 1140789Sahrens 1141789Sahrens /* 11421986Seschrock * Called once the vdevs are all opened, this routine validates the label 11431986Seschrock * contents. This needs to be done before vdev_load() so that we don't 11444451Seschrock * inadvertently do repair I/Os to the wrong device. 11451986Seschrock * 11461986Seschrock * This function will only return failure if one of the vdevs indicates that it 11471986Seschrock * has since been destroyed or exported. This is only possible if 11481986Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 11491986Seschrock * will be updated but the function will return 0. 11501986Seschrock */ 11511986Seschrock int 11521986Seschrock vdev_validate(vdev_t *vd) 11531986Seschrock { 11541986Seschrock spa_t *spa = vd->vdev_spa; 11551986Seschrock int c; 11561986Seschrock nvlist_t *label; 11577754SJeff.Bonwick@Sun.COM uint64_t guid, top_guid; 11581986Seschrock uint64_t state; 11591986Seschrock 11601986Seschrock for (c = 0; c < vd->vdev_children; c++) 11611986Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 11624070Smc142369 return (EBADF); 11631986Seschrock 11642174Seschrock /* 11652174Seschrock * If the device has already failed, or was marked offline, don't do 11662174Seschrock * any further validation. Otherwise, label I/O will fail and we will 11672174Seschrock * overwrite the previous state. 11682174Seschrock */ 11697754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 11701986Seschrock 11711986Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 11721986Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11731986Seschrock VDEV_AUX_BAD_LABEL); 11741986Seschrock return (0); 11751986Seschrock } 11761986Seschrock 11771986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 11781986Seschrock &guid) != 0 || guid != spa_guid(spa)) { 11791986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 11801986Seschrock VDEV_AUX_CORRUPT_DATA); 11811986Seschrock nvlist_free(label); 11821986Seschrock return (0); 11831986Seschrock } 11841986Seschrock 11857754SJeff.Bonwick@Sun.COM /* 11867754SJeff.Bonwick@Sun.COM * If this vdev just became a top-level vdev because its 11877754SJeff.Bonwick@Sun.COM * sibling was detached, it will have adopted the parent's 11887754SJeff.Bonwick@Sun.COM * vdev guid -- but the label may or may not be on disk yet. 11897754SJeff.Bonwick@Sun.COM * Fortunately, either version of the label will have the 11907754SJeff.Bonwick@Sun.COM * same top guid, so if we're a top-level vdev, we can 11917754SJeff.Bonwick@Sun.COM * safely compare to that instead. 11927754SJeff.Bonwick@Sun.COM */ 11931986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 11947754SJeff.Bonwick@Sun.COM &guid) != 0 || 11957754SJeff.Bonwick@Sun.COM nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 11967754SJeff.Bonwick@Sun.COM &top_guid) != 0 || 11977754SJeff.Bonwick@Sun.COM (vd->vdev_guid != guid && 11987754SJeff.Bonwick@Sun.COM (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 11991986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12001986Seschrock VDEV_AUX_CORRUPT_DATA); 12011986Seschrock nvlist_free(label); 12021986Seschrock return (0); 12031986Seschrock } 12041986Seschrock 12051986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 12061986Seschrock &state) != 0) { 12071986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12081986Seschrock VDEV_AUX_CORRUPT_DATA); 12091986Seschrock nvlist_free(label); 12101986Seschrock return (0); 12111986Seschrock } 12121986Seschrock 12131986Seschrock nvlist_free(label); 12141986Seschrock 12151986Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN && 12161986Seschrock state != POOL_STATE_ACTIVE) 12174070Smc142369 return (EBADF); 12186976Seschrock 12196976Seschrock /* 12206976Seschrock * If we were able to open and validate a vdev that was 12216976Seschrock * previously marked permanently unavailable, clear that state 12226976Seschrock * now. 12236976Seschrock */ 12246976Seschrock if (vd->vdev_not_present) 12256976Seschrock vd->vdev_not_present = 0; 12261986Seschrock } 12271986Seschrock 12281986Seschrock return (0); 12291986Seschrock } 12301986Seschrock 12311986Seschrock /* 1232789Sahrens * Close a virtual device. 1233789Sahrens */ 1234789Sahrens void 1235789Sahrens vdev_close(vdev_t *vd) 1236789Sahrens { 12378241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 12388241SJeff.Bonwick@Sun.COM 12398241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12408241SJeff.Bonwick@Sun.COM 1241789Sahrens vd->vdev_ops->vdev_op_close(vd); 1242789Sahrens 12434451Seschrock vdev_cache_purge(vd); 1244789Sahrens 12451986Seschrock /* 12461986Seschrock * We record the previous state before we close it, so that if we are 12471986Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 12481986Seschrock * it's still faulted. 12491986Seschrock */ 12501986Seschrock vd->vdev_prevstate = vd->vdev_state; 12511986Seschrock 1252789Sahrens if (vd->vdev_offline) 1253789Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1254789Sahrens else 1255789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 12561544Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1257789Sahrens } 1258789Sahrens 1259789Sahrens void 12601544Seschrock vdev_reopen(vdev_t *vd) 1261789Sahrens { 12621544Seschrock spa_t *spa = vd->vdev_spa; 1263789Sahrens 12647754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12651544Seschrock 1266789Sahrens vdev_close(vd); 1267789Sahrens (void) vdev_open(vd); 1268789Sahrens 1269789Sahrens /* 12703377Seschrock * Call vdev_validate() here to make sure we have the same device. 12713377Seschrock * Otherwise, a device with an invalid label could be successfully 12723377Seschrock * opened in response to vdev_reopen(). 12733377Seschrock */ 12746643Seschrock if (vd->vdev_aux) { 12756643Seschrock (void) vdev_validate_aux(vd); 12767754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && vdev_writeable(vd) && 12779425SEric.Schrock@Sun.COM vd->vdev_aux == &spa->spa_l2cache && 12786643Seschrock !l2arc_vdev_present(vd)) { 12796643Seschrock uint64_t size = vdev_get_rsize(vd); 12806643Seschrock l2arc_add_vdev(spa, vd, 12816643Seschrock VDEV_LABEL_START_SIZE, 12826643Seschrock size - VDEV_LABEL_START_SIZE); 12836643Seschrock } 12846643Seschrock } else { 12856643Seschrock (void) vdev_validate(vd); 12866643Seschrock } 12873377Seschrock 12883377Seschrock /* 12894451Seschrock * Reassess parent vdev's health. 1290789Sahrens */ 12914451Seschrock vdev_propagate_state(vd); 1292789Sahrens } 1293789Sahrens 1294789Sahrens int 12952082Seschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1296789Sahrens { 1297789Sahrens int error; 1298789Sahrens 1299789Sahrens /* 1300789Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1301789Sahrens * For a create, however, we want to fail the request if 1302789Sahrens * there are any components we can't open. 1303789Sahrens */ 1304789Sahrens error = vdev_open(vd); 1305789Sahrens 1306789Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1307789Sahrens vdev_close(vd); 1308789Sahrens return (error ? error : ENXIO); 1309789Sahrens } 1310789Sahrens 1311789Sahrens /* 1312789Sahrens * Recursively initialize all labels. 1313789Sahrens */ 13143377Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 13153377Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1316789Sahrens vdev_close(vd); 1317789Sahrens return (error); 1318789Sahrens } 1319789Sahrens 1320789Sahrens return (0); 1321789Sahrens } 1322789Sahrens 1323789Sahrens /* 1324789Sahrens * The is the latter half of vdev_create(). It is distinct because it 1325789Sahrens * involves initiating transactions in order to do metaslab creation. 1326789Sahrens * For creation, we want to try to create all vdevs at once and then undo it 1327789Sahrens * if anything fails; this is much harder if we have pending transactions. 1328789Sahrens */ 13291585Sbonwick void 1330789Sahrens vdev_init(vdev_t *vd, uint64_t txg) 1331789Sahrens { 1332789Sahrens /* 1333789Sahrens * Aim for roughly 200 metaslabs per vdev. 1334789Sahrens */ 1335789Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1336789Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1337789Sahrens 1338789Sahrens /* 13391585Sbonwick * Initialize the vdev's metaslabs. This can't fail because 13401585Sbonwick * there's nothing to read when creating all new metaslabs. 1341789Sahrens */ 13421585Sbonwick VERIFY(vdev_metaslab_init(vd, txg) == 0); 1343789Sahrens } 1344789Sahrens 1345789Sahrens void 13461732Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1347789Sahrens { 13481732Sbonwick ASSERT(vd == vd->vdev_top); 13491732Sbonwick ASSERT(ISP2(flags)); 1350789Sahrens 13511732Sbonwick if (flags & VDD_METASLAB) 13521732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 13531732Sbonwick 13541732Sbonwick if (flags & VDD_DTL) 13551732Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 13561732Sbonwick 13571732Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1358789Sahrens } 1359789Sahrens 13608241SJeff.Bonwick@Sun.COM /* 13618241SJeff.Bonwick@Sun.COM * DTLs. 13628241SJeff.Bonwick@Sun.COM * 13638241SJeff.Bonwick@Sun.COM * A vdev's DTL (dirty time log) is the set of transaction groups for which 13648241SJeff.Bonwick@Sun.COM * the vdev has less than perfect replication. There are three kinds of DTL: 13658241SJeff.Bonwick@Sun.COM * 13668241SJeff.Bonwick@Sun.COM * DTL_MISSING: txgs for which the vdev has no valid copies of the data 13678241SJeff.Bonwick@Sun.COM * 13688241SJeff.Bonwick@Sun.COM * DTL_PARTIAL: txgs for which data is available, but not fully replicated 13698241SJeff.Bonwick@Sun.COM * 13708241SJeff.Bonwick@Sun.COM * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 13718241SJeff.Bonwick@Sun.COM * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 13728241SJeff.Bonwick@Sun.COM * txgs that was scrubbed. 13738241SJeff.Bonwick@Sun.COM * 13748241SJeff.Bonwick@Sun.COM * DTL_OUTAGE: txgs which cannot currently be read, whether due to 13758241SJeff.Bonwick@Sun.COM * persistent errors or just some device being offline. 13768241SJeff.Bonwick@Sun.COM * Unlike the other three, the DTL_OUTAGE map is not generally 13778241SJeff.Bonwick@Sun.COM * maintained; it's only computed when needed, typically to 13788241SJeff.Bonwick@Sun.COM * determine whether a device can be detached. 13798241SJeff.Bonwick@Sun.COM * 13808241SJeff.Bonwick@Sun.COM * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 13818241SJeff.Bonwick@Sun.COM * either has the data or it doesn't. 13828241SJeff.Bonwick@Sun.COM * 13838241SJeff.Bonwick@Sun.COM * For interior vdevs such as mirror and RAID-Z the picture is more complex. 13848241SJeff.Bonwick@Sun.COM * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 13858241SJeff.Bonwick@Sun.COM * if any child is less than fully replicated, then so is its parent. 13868241SJeff.Bonwick@Sun.COM * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 13878241SJeff.Bonwick@Sun.COM * comprising only those txgs which appear in 'maxfaults' or more children; 13888241SJeff.Bonwick@Sun.COM * those are the txgs we don't have enough replication to read. For example, 13898241SJeff.Bonwick@Sun.COM * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 13908241SJeff.Bonwick@Sun.COM * thus, its DTL_MISSING consists of the set of txgs that appear in more than 13918241SJeff.Bonwick@Sun.COM * two child DTL_MISSING maps. 13928241SJeff.Bonwick@Sun.COM * 13938241SJeff.Bonwick@Sun.COM * It should be clear from the above that to compute the DTLs and outage maps 13948241SJeff.Bonwick@Sun.COM * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 13958241SJeff.Bonwick@Sun.COM * Therefore, that is all we keep on disk. When loading the pool, or after 13968241SJeff.Bonwick@Sun.COM * a configuration change, we generate all other DTLs from first principles. 13978241SJeff.Bonwick@Sun.COM */ 1398789Sahrens void 13998241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1400789Sahrens { 14018241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14028241SJeff.Bonwick@Sun.COM 14038241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14048241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 14058241SJeff.Bonwick@Sun.COM 1406789Sahrens mutex_enter(sm->sm_lock); 1407789Sahrens if (!space_map_contains(sm, txg, size)) 1408789Sahrens space_map_add(sm, txg, size); 1409789Sahrens mutex_exit(sm->sm_lock); 1410789Sahrens } 1411789Sahrens 14128241SJeff.Bonwick@Sun.COM boolean_t 14138241SJeff.Bonwick@Sun.COM vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1414789Sahrens { 14158241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14168241SJeff.Bonwick@Sun.COM boolean_t dirty = B_FALSE; 14178241SJeff.Bonwick@Sun.COM 14188241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14198241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1420789Sahrens 1421789Sahrens mutex_enter(sm->sm_lock); 14228241SJeff.Bonwick@Sun.COM if (sm->sm_space != 0) 14238241SJeff.Bonwick@Sun.COM dirty = space_map_contains(sm, txg, size); 1424789Sahrens mutex_exit(sm->sm_lock); 1425789Sahrens 1426789Sahrens return (dirty); 1427789Sahrens } 1428789Sahrens 14298241SJeff.Bonwick@Sun.COM boolean_t 14308241SJeff.Bonwick@Sun.COM vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 14318241SJeff.Bonwick@Sun.COM { 14328241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14338241SJeff.Bonwick@Sun.COM boolean_t empty; 14348241SJeff.Bonwick@Sun.COM 14358241SJeff.Bonwick@Sun.COM mutex_enter(sm->sm_lock); 14368241SJeff.Bonwick@Sun.COM empty = (sm->sm_space == 0); 14378241SJeff.Bonwick@Sun.COM mutex_exit(sm->sm_lock); 14388241SJeff.Bonwick@Sun.COM 14398241SJeff.Bonwick@Sun.COM return (empty); 14408241SJeff.Bonwick@Sun.COM } 14418241SJeff.Bonwick@Sun.COM 1442789Sahrens /* 1443789Sahrens * Reassess DTLs after a config change or scrub completion. 1444789Sahrens */ 1445789Sahrens void 1446789Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1447789Sahrens { 14481544Seschrock spa_t *spa = vd->vdev_spa; 14498241SJeff.Bonwick@Sun.COM avl_tree_t reftree; 14508241SJeff.Bonwick@Sun.COM int minref; 14518241SJeff.Bonwick@Sun.COM 14528241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 14538241SJeff.Bonwick@Sun.COM 14548241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 14558241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(vd->vdev_child[c], txg, 14568241SJeff.Bonwick@Sun.COM scrub_txg, scrub_done); 14578241SJeff.Bonwick@Sun.COM 14588241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev) 14598241SJeff.Bonwick@Sun.COM return; 14608241SJeff.Bonwick@Sun.COM 14618241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 1462789Sahrens mutex_enter(&vd->vdev_dtl_lock); 14637046Sahrens if (scrub_txg != 0 && 14647046Sahrens (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 14657046Sahrens /* XXX should check scrub_done? */ 14667046Sahrens /* 14677046Sahrens * We completed a scrub up to scrub_txg. If we 14687046Sahrens * did it without rebooting, then the scrub dtl 14697046Sahrens * will be valid, so excise the old region and 14707046Sahrens * fold in the scrub dtl. Otherwise, leave the 14717046Sahrens * dtl as-is if there was an error. 14728241SJeff.Bonwick@Sun.COM * 14738241SJeff.Bonwick@Sun.COM * There's little trick here: to excise the beginning 14748241SJeff.Bonwick@Sun.COM * of the DTL_MISSING map, we put it into a reference 14758241SJeff.Bonwick@Sun.COM * tree and then add a segment with refcnt -1 that 14768241SJeff.Bonwick@Sun.COM * covers the range [0, scrub_txg). This means 14778241SJeff.Bonwick@Sun.COM * that each txg in that range has refcnt -1 or 0. 14788241SJeff.Bonwick@Sun.COM * We then add DTL_SCRUB with a refcnt of 2, so that 14798241SJeff.Bonwick@Sun.COM * entries in the range [0, scrub_txg) will have a 14808241SJeff.Bonwick@Sun.COM * positive refcnt -- either 1 or 2. We then convert 14818241SJeff.Bonwick@Sun.COM * the reference tree into the new DTL_MISSING map. 14827046Sahrens */ 14838241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 14848241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 14858241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 14868241SJeff.Bonwick@Sun.COM space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 14878241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 14888241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_SCRUB], 2); 14898241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, 14908241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 14918241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 1492789Sahrens } 14938241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 14948241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 14958241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1496789Sahrens if (scrub_done) 14978241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 14988241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 14998241SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 15008241SJeff.Bonwick@Sun.COM space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 15018241SJeff.Bonwick@Sun.COM else 15028241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15038241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1504789Sahrens mutex_exit(&vd->vdev_dtl_lock); 15057046Sahrens 15061732Sbonwick if (txg != 0) 15071732Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1508789Sahrens return; 1509789Sahrens } 1510789Sahrens 1511789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15128241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 15138241SJeff.Bonwick@Sun.COM if (t == DTL_SCRUB) 15148241SJeff.Bonwick@Sun.COM continue; /* leaf vdevs only */ 15158241SJeff.Bonwick@Sun.COM if (t == DTL_PARTIAL) 15168241SJeff.Bonwick@Sun.COM minref = 1; /* i.e. non-zero */ 15178241SJeff.Bonwick@Sun.COM else if (vd->vdev_nparity != 0) 15188241SJeff.Bonwick@Sun.COM minref = vd->vdev_nparity + 1; /* RAID-Z */ 15198241SJeff.Bonwick@Sun.COM else 15208241SJeff.Bonwick@Sun.COM minref = vd->vdev_children; /* any kind of mirror */ 15218241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 15228241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 15238241SJeff.Bonwick@Sun.COM vdev_t *cvd = vd->vdev_child[c]; 15248241SJeff.Bonwick@Sun.COM mutex_enter(&cvd->vdev_dtl_lock); 15258241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 15268241SJeff.Bonwick@Sun.COM mutex_exit(&cvd->vdev_dtl_lock); 15278241SJeff.Bonwick@Sun.COM } 15288241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 15298241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 15308241SJeff.Bonwick@Sun.COM } 1531789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1532789Sahrens } 1533789Sahrens 1534789Sahrens static int 1535789Sahrens vdev_dtl_load(vdev_t *vd) 1536789Sahrens { 1537789Sahrens spa_t *spa = vd->vdev_spa; 15388241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15391732Sbonwick objset_t *mos = spa->spa_meta_objset; 1540789Sahrens dmu_buf_t *db; 1541789Sahrens int error; 1542789Sahrens 1543789Sahrens ASSERT(vd->vdev_children == 0); 1544789Sahrens 1545789Sahrens if (smo->smo_object == 0) 1546789Sahrens return (0); 1547789Sahrens 15481732Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 15491544Seschrock return (error); 15501732Sbonwick 15514944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 15524944Smaybee bcopy(db->db_data, smo, sizeof (*smo)); 15531544Seschrock dmu_buf_rele(db, FTAG); 1554789Sahrens 1555789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15568241SJeff.Bonwick@Sun.COM error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 15578241SJeff.Bonwick@Sun.COM NULL, SM_ALLOC, smo, mos); 1558789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1559789Sahrens 1560789Sahrens return (error); 1561789Sahrens } 1562789Sahrens 1563789Sahrens void 1564789Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1565789Sahrens { 1566789Sahrens spa_t *spa = vd->vdev_spa; 15678241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15688241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 15691732Sbonwick objset_t *mos = spa->spa_meta_objset; 1570789Sahrens space_map_t smsync; 1571789Sahrens kmutex_t smlock; 1572789Sahrens dmu_buf_t *db; 1573789Sahrens dmu_tx_t *tx; 1574789Sahrens 1575789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1576789Sahrens 1577789Sahrens if (vd->vdev_detached) { 1578789Sahrens if (smo->smo_object != 0) { 15791732Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1580789Sahrens ASSERT3U(err, ==, 0); 1581789Sahrens smo->smo_object = 0; 1582789Sahrens } 1583789Sahrens dmu_tx_commit(tx); 1584789Sahrens return; 1585789Sahrens } 1586789Sahrens 1587789Sahrens if (smo->smo_object == 0) { 1588789Sahrens ASSERT(smo->smo_objsize == 0); 1589789Sahrens ASSERT(smo->smo_alloc == 0); 15901732Sbonwick smo->smo_object = dmu_object_alloc(mos, 1591789Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1592789Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1593789Sahrens ASSERT(smo->smo_object != 0); 1594789Sahrens vdev_config_dirty(vd->vdev_top); 1595789Sahrens } 1596789Sahrens 1597789Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1598789Sahrens 1599789Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1600789Sahrens &smlock); 1601789Sahrens 1602789Sahrens mutex_enter(&smlock); 1603789Sahrens 1604789Sahrens mutex_enter(&vd->vdev_dtl_lock); 16051732Sbonwick space_map_walk(sm, space_map_add, &smsync); 1606789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1607789Sahrens 16081732Sbonwick space_map_truncate(smo, mos, tx); 16091732Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1610789Sahrens 1611789Sahrens space_map_destroy(&smsync); 1612789Sahrens 1613789Sahrens mutex_exit(&smlock); 1614789Sahrens mutex_destroy(&smlock); 1615789Sahrens 16161732Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1617789Sahrens dmu_buf_will_dirty(db, tx); 16184944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16194944Smaybee bcopy(smo, db->db_data, sizeof (*smo)); 16201544Seschrock dmu_buf_rele(db, FTAG); 1621789Sahrens 1622789Sahrens dmu_tx_commit(tx); 1623789Sahrens } 1624789Sahrens 16257046Sahrens /* 16268241SJeff.Bonwick@Sun.COM * Determine whether the specified vdev can be offlined/detached/removed 16278241SJeff.Bonwick@Sun.COM * without losing data. 16288241SJeff.Bonwick@Sun.COM */ 16298241SJeff.Bonwick@Sun.COM boolean_t 16308241SJeff.Bonwick@Sun.COM vdev_dtl_required(vdev_t *vd) 16318241SJeff.Bonwick@Sun.COM { 16328241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 16338241SJeff.Bonwick@Sun.COM vdev_t *tvd = vd->vdev_top; 16348241SJeff.Bonwick@Sun.COM uint8_t cant_read = vd->vdev_cant_read; 16358241SJeff.Bonwick@Sun.COM boolean_t required; 16368241SJeff.Bonwick@Sun.COM 16378241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 16388241SJeff.Bonwick@Sun.COM 16398241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev || vd == tvd) 16408241SJeff.Bonwick@Sun.COM return (B_TRUE); 16418241SJeff.Bonwick@Sun.COM 16428241SJeff.Bonwick@Sun.COM /* 16438241SJeff.Bonwick@Sun.COM * Temporarily mark the device as unreadable, and then determine 16448241SJeff.Bonwick@Sun.COM * whether this results in any DTL outages in the top-level vdev. 16458241SJeff.Bonwick@Sun.COM * If not, we can safely offline/detach/remove the device. 16468241SJeff.Bonwick@Sun.COM */ 16478241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_TRUE; 16488241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16498241SJeff.Bonwick@Sun.COM required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 16508241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = cant_read; 16518241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16528241SJeff.Bonwick@Sun.COM 16538241SJeff.Bonwick@Sun.COM return (required); 16548241SJeff.Bonwick@Sun.COM } 16558241SJeff.Bonwick@Sun.COM 16568241SJeff.Bonwick@Sun.COM /* 16577046Sahrens * Determine if resilver is needed, and if so the txg range. 16587046Sahrens */ 16597046Sahrens boolean_t 16607046Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 16617046Sahrens { 16627046Sahrens boolean_t needed = B_FALSE; 16637046Sahrens uint64_t thismin = UINT64_MAX; 16647046Sahrens uint64_t thismax = 0; 16657046Sahrens 16667046Sahrens if (vd->vdev_children == 0) { 16677046Sahrens mutex_enter(&vd->vdev_dtl_lock); 16688241SJeff.Bonwick@Sun.COM if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 16698241SJeff.Bonwick@Sun.COM vdev_writeable(vd)) { 16707046Sahrens space_seg_t *ss; 16717046Sahrens 16728241SJeff.Bonwick@Sun.COM ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 16737046Sahrens thismin = ss->ss_start - 1; 16748241SJeff.Bonwick@Sun.COM ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 16757046Sahrens thismax = ss->ss_end; 16767046Sahrens needed = B_TRUE; 16777046Sahrens } 16787046Sahrens mutex_exit(&vd->vdev_dtl_lock); 16797046Sahrens } else { 16808241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 16817046Sahrens vdev_t *cvd = vd->vdev_child[c]; 16827046Sahrens uint64_t cmin, cmax; 16837046Sahrens 16847046Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 16857046Sahrens thismin = MIN(thismin, cmin); 16867046Sahrens thismax = MAX(thismax, cmax); 16877046Sahrens needed = B_TRUE; 16887046Sahrens } 16897046Sahrens } 16907046Sahrens } 16917046Sahrens 16927046Sahrens if (needed && minp) { 16937046Sahrens *minp = thismin; 16947046Sahrens *maxp = thismax; 16957046Sahrens } 16967046Sahrens return (needed); 16977046Sahrens } 16987046Sahrens 16991986Seschrock void 17001544Seschrock vdev_load(vdev_t *vd) 1701789Sahrens { 1702789Sahrens /* 1703789Sahrens * Recursively load all children. 1704789Sahrens */ 17058241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 17061986Seschrock vdev_load(vd->vdev_child[c]); 1707789Sahrens 1708789Sahrens /* 17091585Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1710789Sahrens */ 17111986Seschrock if (vd == vd->vdev_top && 17121986Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 17131986Seschrock vdev_metaslab_init(vd, 0) != 0)) 17141986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17151986Seschrock VDEV_AUX_CORRUPT_DATA); 1716789Sahrens 1717789Sahrens /* 1718789Sahrens * If this is a leaf vdev, load its DTL. 1719789Sahrens */ 17201986Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 17211986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17221986Seschrock VDEV_AUX_CORRUPT_DATA); 1723789Sahrens } 1724789Sahrens 17252082Seschrock /* 17265450Sbrendan * The special vdev case is used for hot spares and l2cache devices. Its 17275450Sbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 17285450Sbrendan * we make sure that we can open the underlying device, then try to read the 17295450Sbrendan * label, and make sure that the label is sane and that it hasn't been 17305450Sbrendan * repurposed to another pool. 17312082Seschrock */ 17322082Seschrock int 17335450Sbrendan vdev_validate_aux(vdev_t *vd) 17342082Seschrock { 17352082Seschrock nvlist_t *label; 17362082Seschrock uint64_t guid, version; 17372082Seschrock uint64_t state; 17382082Seschrock 17397754SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 17406643Seschrock return (0); 17416643Seschrock 17422082Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 17432082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17442082Seschrock VDEV_AUX_CORRUPT_DATA); 17452082Seschrock return (-1); 17462082Seschrock } 17472082Seschrock 17482082Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 17494577Sahrens version > SPA_VERSION || 17502082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 17512082Seschrock guid != vd->vdev_guid || 17522082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 17532082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17542082Seschrock VDEV_AUX_CORRUPT_DATA); 17552082Seschrock nvlist_free(label); 17562082Seschrock return (-1); 17572082Seschrock } 17582082Seschrock 17592082Seschrock /* 17602082Seschrock * We don't actually check the pool state here. If it's in fact in 17612082Seschrock * use by another pool, we update this fact on the fly when requested. 17622082Seschrock */ 17632082Seschrock nvlist_free(label); 17642082Seschrock return (0); 17652082Seschrock } 17662082Seschrock 1767789Sahrens void 1768789Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 1769789Sahrens { 1770789Sahrens metaslab_t *msp; 1771789Sahrens 1772789Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1773789Sahrens metaslab_sync_done(msp, txg); 1774789Sahrens } 1775789Sahrens 1776789Sahrens void 1777789Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 1778789Sahrens { 1779789Sahrens spa_t *spa = vd->vdev_spa; 1780789Sahrens vdev_t *lvd; 1781789Sahrens metaslab_t *msp; 17821732Sbonwick dmu_tx_t *tx; 1783789Sahrens 17841732Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 17851732Sbonwick ASSERT(vd == vd->vdev_top); 17861732Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 17871732Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 17881732Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 17891732Sbonwick ASSERT(vd->vdev_ms_array != 0); 17901732Sbonwick vdev_config_dirty(vd); 17911732Sbonwick dmu_tx_commit(tx); 17921732Sbonwick } 1793789Sahrens 17941732Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1795789Sahrens metaslab_sync(msp, txg); 17961732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 17971732Sbonwick } 1798789Sahrens 1799789Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1800789Sahrens vdev_dtl_sync(lvd, txg); 1801789Sahrens 1802789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1803789Sahrens } 1804789Sahrens 1805789Sahrens uint64_t 1806789Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1807789Sahrens { 1808789Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1809789Sahrens } 1810789Sahrens 18114451Seschrock /* 18124451Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 18134451Seschrock * not be opened, and no I/O is attempted. 18144451Seschrock */ 1815789Sahrens int 18164451Seschrock vdev_fault(spa_t *spa, uint64_t guid) 18174451Seschrock { 18186643Seschrock vdev_t *vd; 18194451Seschrock 18207754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18214451Seschrock 18226643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18237754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18247754SJeff.Bonwick@Sun.COM 18254451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18267754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18274451Seschrock 18284451Seschrock /* 18294451Seschrock * Faulted state takes precedence over degraded. 18304451Seschrock */ 18314451Seschrock vd->vdev_faulted = 1ULL; 18324451Seschrock vd->vdev_degraded = 0ULL; 18337754SJeff.Bonwick@Sun.COM vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 18344451Seschrock 18354451Seschrock /* 18368123SDavid.Marker@sun.com * If marking the vdev as faulted cause the top-level vdev to become 18374451Seschrock * unavailable, then back off and simply mark the vdev as degraded 18384451Seschrock * instead. 18394451Seschrock */ 18406643Seschrock if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 18414451Seschrock vd->vdev_degraded = 1ULL; 18424451Seschrock vd->vdev_faulted = 0ULL; 18434451Seschrock 18444451Seschrock /* 18454451Seschrock * If we reopen the device and it's not dead, only then do we 18464451Seschrock * mark it degraded. 18474451Seschrock */ 18484451Seschrock vdev_reopen(vd); 18494451Seschrock 18505329Sgw25295 if (vdev_readable(vd)) { 18514451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 18524451Seschrock VDEV_AUX_ERR_EXCEEDED); 18534451Seschrock } 18544451Seschrock } 18554451Seschrock 18567754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 18574451Seschrock } 18584451Seschrock 18594451Seschrock /* 18604451Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 18614451Seschrock * user that something is wrong. The vdev continues to operate as normal as far 18624451Seschrock * as I/O is concerned. 18634451Seschrock */ 18644451Seschrock int 18654451Seschrock vdev_degrade(spa_t *spa, uint64_t guid) 18664451Seschrock { 18676643Seschrock vdev_t *vd; 18684451Seschrock 18697754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18704451Seschrock 18716643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18727754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18737754SJeff.Bonwick@Sun.COM 18744451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18757754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18764451Seschrock 18774451Seschrock /* 18784451Seschrock * If the vdev is already faulted, then don't do anything. 18794451Seschrock */ 18807754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded) 18817754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, 0)); 18824451Seschrock 18834451Seschrock vd->vdev_degraded = 1ULL; 18844451Seschrock if (!vdev_is_dead(vd)) 18854451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 18864451Seschrock VDEV_AUX_ERR_EXCEEDED); 18874451Seschrock 18887754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 18894451Seschrock } 18904451Seschrock 18914451Seschrock /* 18924451Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 18934451Seschrock * any attached spare device should be detached when the device finishes 18944451Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 18954451Seschrock * so no FMA events are generated if the device fails to open. 18964451Seschrock */ 18974451Seschrock int 18987754SJeff.Bonwick@Sun.COM vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 1899789Sahrens { 19006643Seschrock vdev_t *vd; 1901789Sahrens 19027754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 19031485Slling 19046643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19057754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1906789Sahrens 19071585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19087754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19091585Sbonwick 1910789Sahrens vd->vdev_offline = B_FALSE; 19111485Slling vd->vdev_tmpoffline = B_FALSE; 19127754SJeff.Bonwick@Sun.COM vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 19137754SJeff.Bonwick@Sun.COM vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 19141544Seschrock vdev_reopen(vd->vdev_top); 19154451Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 19164451Seschrock 19174451Seschrock if (newstate) 19184451Seschrock *newstate = vd->vdev_state; 19194451Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 19204451Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 19214451Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 19224451Seschrock vd->vdev_parent->vdev_child[0] == vd) 19234451Seschrock vd->vdev_unspare = B_TRUE; 1924789Sahrens 19258241SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 1926789Sahrens } 1927789Sahrens 1928789Sahrens int 19294451Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 1930789Sahrens { 19319701SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd; 19329701SGeorge.Wilson@Sun.COM int error; 1933789Sahrens 19347754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 1935789Sahrens 19366643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19377754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1938789Sahrens 19391585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19407754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19411585Sbonwick 19429701SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 19439701SGeorge.Wilson@Sun.COM 1944789Sahrens /* 19451732Sbonwick * If the device isn't already offline, try to offline it. 1946789Sahrens */ 19471732Sbonwick if (!vd->vdev_offline) { 19481732Sbonwick /* 19498241SJeff.Bonwick@Sun.COM * If this device has the only valid copy of some data, 19509701SGeorge.Wilson@Sun.COM * don't allow it to be offlined. Log devices are always 19519701SGeorge.Wilson@Sun.COM * expendable. 19521732Sbonwick */ 19539701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 19549701SGeorge.Wilson@Sun.COM vdev_dtl_required(vd)) 19557754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 1956789Sahrens 19571732Sbonwick /* 19581732Sbonwick * Offline this device and reopen its top-level vdev. 19599701SGeorge.Wilson@Sun.COM * If the top-level vdev is a log device then just offline 19609701SGeorge.Wilson@Sun.COM * it. Otherwise, if this action results in the top-level 19619701SGeorge.Wilson@Sun.COM * vdev becoming unusable, undo it and fail the request. 19621732Sbonwick */ 19631732Sbonwick vd->vdev_offline = B_TRUE; 19649701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 19659701SGeorge.Wilson@Sun.COM 19669701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 19679701SGeorge.Wilson@Sun.COM vdev_is_dead(tvd)) { 19681732Sbonwick vd->vdev_offline = B_FALSE; 19699701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 19707754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 19711732Sbonwick } 1972789Sahrens } 1973789Sahrens 19747754SJeff.Bonwick@Sun.COM vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 19751732Sbonwick 19769701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog || !vdev_is_dead(tvd)) 19779701SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 19789701SGeorge.Wilson@Sun.COM 19799701SGeorge.Wilson@Sun.COM (void) spa_vdev_state_exit(spa, vd, 0); 19809701SGeorge.Wilson@Sun.COM 19819701SGeorge.Wilson@Sun.COM error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 19829701SGeorge.Wilson@Sun.COM NULL, DS_FIND_CHILDREN); 19839701SGeorge.Wilson@Sun.COM if (error) { 19849701SGeorge.Wilson@Sun.COM (void) vdev_online(spa, guid, 0, NULL); 19859701SGeorge.Wilson@Sun.COM return (error); 19869701SGeorge.Wilson@Sun.COM } 19879701SGeorge.Wilson@Sun.COM /* 19889701SGeorge.Wilson@Sun.COM * If we successfully offlined the log device then we need to 19899701SGeorge.Wilson@Sun.COM * sync out the current txg so that the "stubby" block can be 19909701SGeorge.Wilson@Sun.COM * removed by zil_sync(). 19919701SGeorge.Wilson@Sun.COM */ 19929701SGeorge.Wilson@Sun.COM txg_wait_synced(spa->spa_dsl_pool, 0); 19939701SGeorge.Wilson@Sun.COM return (0); 1994789Sahrens } 1995789Sahrens 19961544Seschrock /* 19971544Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 19981544Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 19991544Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 20001544Seschrock */ 20011544Seschrock void 20027754SJeff.Bonwick@Sun.COM vdev_clear(spa_t *spa, vdev_t *vd) 2003789Sahrens { 20047754SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 20057754SJeff.Bonwick@Sun.COM 20067754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2007789Sahrens 20081544Seschrock if (vd == NULL) 20097754SJeff.Bonwick@Sun.COM vd = rvd; 2010789Sahrens 20111544Seschrock vd->vdev_stat.vs_read_errors = 0; 20121544Seschrock vd->vdev_stat.vs_write_errors = 0; 20131544Seschrock vd->vdev_stat.vs_checksum_errors = 0; 2014789Sahrens 20157754SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 20167754SJeff.Bonwick@Sun.COM vdev_clear(spa, vd->vdev_child[c]); 20174451Seschrock 20184451Seschrock /* 20196959Sek110237 * If we're in the FAULTED state or have experienced failed I/O, then 20206959Sek110237 * clear the persistent state and attempt to reopen the device. We 20216959Sek110237 * also mark the vdev config dirty, so that the new faulted state is 20226959Sek110237 * written out to disk. 20234451Seschrock */ 20247754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded || 20257754SJeff.Bonwick@Sun.COM !vdev_readable(vd) || !vdev_writeable(vd)) { 20266959Sek110237 20274451Seschrock vd->vdev_faulted = vd->vdev_degraded = 0; 20287754SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_FALSE; 20297754SJeff.Bonwick@Sun.COM vd->vdev_cant_write = B_FALSE; 20307754SJeff.Bonwick@Sun.COM 20314451Seschrock vdev_reopen(vd); 20324451Seschrock 20337754SJeff.Bonwick@Sun.COM if (vd != rvd) 20347754SJeff.Bonwick@Sun.COM vdev_state_dirty(vd->vdev_top); 20357754SJeff.Bonwick@Sun.COM 20367754SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 20374808Sek110237 spa_async_request(spa, SPA_ASYNC_RESILVER); 20384451Seschrock 20394451Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 20404451Seschrock } 2041789Sahrens } 2042789Sahrens 20437754SJeff.Bonwick@Sun.COM boolean_t 20447754SJeff.Bonwick@Sun.COM vdev_is_dead(vdev_t *vd) 20455329Sgw25295 { 20467754SJeff.Bonwick@Sun.COM return (vd->vdev_state < VDEV_STATE_DEGRADED); 20475329Sgw25295 } 20485329Sgw25295 20497754SJeff.Bonwick@Sun.COM boolean_t 20507754SJeff.Bonwick@Sun.COM vdev_readable(vdev_t *vd) 2051789Sahrens { 20527754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2053789Sahrens } 2054789Sahrens 20557754SJeff.Bonwick@Sun.COM boolean_t 20567754SJeff.Bonwick@Sun.COM vdev_writeable(vdev_t *vd) 2057789Sahrens { 20587754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 20597754SJeff.Bonwick@Sun.COM } 2060789Sahrens 20617754SJeff.Bonwick@Sun.COM boolean_t 20627980SGeorge.Wilson@Sun.COM vdev_allocatable(vdev_t *vd) 20637980SGeorge.Wilson@Sun.COM { 20648241SJeff.Bonwick@Sun.COM uint64_t state = vd->vdev_state; 20658241SJeff.Bonwick@Sun.COM 20667980SGeorge.Wilson@Sun.COM /* 20678241SJeff.Bonwick@Sun.COM * We currently allow allocations from vdevs which may be in the 20687980SGeorge.Wilson@Sun.COM * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 20697980SGeorge.Wilson@Sun.COM * fails to reopen then we'll catch it later when we're holding 20708241SJeff.Bonwick@Sun.COM * the proper locks. Note that we have to get the vdev state 20718241SJeff.Bonwick@Sun.COM * in a local variable because although it changes atomically, 20728241SJeff.Bonwick@Sun.COM * we're asking two separate questions about it. 20737980SGeorge.Wilson@Sun.COM */ 20748241SJeff.Bonwick@Sun.COM return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 20757980SGeorge.Wilson@Sun.COM !vd->vdev_cant_write); 20767980SGeorge.Wilson@Sun.COM } 20777980SGeorge.Wilson@Sun.COM 20787980SGeorge.Wilson@Sun.COM boolean_t 20797754SJeff.Bonwick@Sun.COM vdev_accessible(vdev_t *vd, zio_t *zio) 20807754SJeff.Bonwick@Sun.COM { 20817754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 2082789Sahrens 20837754SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 20847754SJeff.Bonwick@Sun.COM return (B_FALSE); 2085789Sahrens 20867754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) 20877754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_read); 2088789Sahrens 20897754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_WRITE) 20907754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_write); 20917754SJeff.Bonwick@Sun.COM 20927754SJeff.Bonwick@Sun.COM return (B_TRUE); 2093789Sahrens } 2094789Sahrens 2095789Sahrens /* 2096789Sahrens * Get statistics for the given vdev. 2097789Sahrens */ 2098789Sahrens void 2099789Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2100789Sahrens { 2101789Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2102789Sahrens 2103789Sahrens mutex_enter(&vd->vdev_stat_lock); 2104789Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 21057046Sahrens vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2106789Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2107789Sahrens vs->vs_state = vd->vdev_state; 21081175Slling vs->vs_rsize = vdev_get_rsize(vd); 2109789Sahrens mutex_exit(&vd->vdev_stat_lock); 2110789Sahrens 2111789Sahrens /* 2112789Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2113789Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2114789Sahrens */ 2115789Sahrens if (vd == rvd) { 21167754SJeff.Bonwick@Sun.COM for (int c = 0; c < rvd->vdev_children; c++) { 2117789Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2118789Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2119789Sahrens 2120789Sahrens mutex_enter(&vd->vdev_stat_lock); 21217754SJeff.Bonwick@Sun.COM for (int t = 0; t < ZIO_TYPES; t++) { 2122789Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2123789Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2124789Sahrens } 2125789Sahrens vs->vs_scrub_examined += cvs->vs_scrub_examined; 2126789Sahrens mutex_exit(&vd->vdev_stat_lock); 2127789Sahrens } 2128789Sahrens } 2129789Sahrens } 2130789Sahrens 2131789Sahrens void 21325450Sbrendan vdev_clear_stats(vdev_t *vd) 21335450Sbrendan { 21345450Sbrendan mutex_enter(&vd->vdev_stat_lock); 21355450Sbrendan vd->vdev_stat.vs_space = 0; 21365450Sbrendan vd->vdev_stat.vs_dspace = 0; 21375450Sbrendan vd->vdev_stat.vs_alloc = 0; 21385450Sbrendan mutex_exit(&vd->vdev_stat_lock); 21395450Sbrendan } 21405450Sbrendan 21415450Sbrendan void 21427754SJeff.Bonwick@Sun.COM vdev_stat_update(zio_t *zio, uint64_t psize) 2143789Sahrens { 21448241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 21458241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 21467754SJeff.Bonwick@Sun.COM vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2147789Sahrens vdev_t *pvd; 2148789Sahrens uint64_t txg = zio->io_txg; 2149789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2150789Sahrens zio_type_t type = zio->io_type; 2151789Sahrens int flags = zio->io_flags; 2152789Sahrens 21537754SJeff.Bonwick@Sun.COM /* 21547754SJeff.Bonwick@Sun.COM * If this i/o is a gang leader, it didn't do any actual work. 21557754SJeff.Bonwick@Sun.COM */ 21567754SJeff.Bonwick@Sun.COM if (zio->io_gang_tree) 21577754SJeff.Bonwick@Sun.COM return; 21587754SJeff.Bonwick@Sun.COM 2159789Sahrens if (zio->io_error == 0) { 21607754SJeff.Bonwick@Sun.COM /* 21617754SJeff.Bonwick@Sun.COM * If this is a root i/o, don't count it -- we've already 21627754SJeff.Bonwick@Sun.COM * counted the top-level vdevs, and vdev_get_stats() will 21637754SJeff.Bonwick@Sun.COM * aggregate them when asked. This reduces contention on 21647754SJeff.Bonwick@Sun.COM * the root vdev_stat_lock and implicitly handles blocks 21657754SJeff.Bonwick@Sun.COM * that compress away to holes, for which there is no i/o. 21667754SJeff.Bonwick@Sun.COM * (Holes never create vdev children, so all the counters 21677754SJeff.Bonwick@Sun.COM * remain zero, which is what we want.) 21687754SJeff.Bonwick@Sun.COM * 21697754SJeff.Bonwick@Sun.COM * Note: this only applies to successful i/o (io_error == 0) 21707754SJeff.Bonwick@Sun.COM * because unlike i/o counts, errors are not additive. 21717754SJeff.Bonwick@Sun.COM * When reading a ditto block, for example, failure of 21727754SJeff.Bonwick@Sun.COM * one top-level vdev does not imply a root-level error. 21737754SJeff.Bonwick@Sun.COM */ 21747754SJeff.Bonwick@Sun.COM if (vd == rvd) 21757754SJeff.Bonwick@Sun.COM return; 21767754SJeff.Bonwick@Sun.COM 21777754SJeff.Bonwick@Sun.COM ASSERT(vd == zio->io_vd); 21788241SJeff.Bonwick@Sun.COM 21798241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_BYPASS) 21808241SJeff.Bonwick@Sun.COM return; 21818241SJeff.Bonwick@Sun.COM 21828241SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 21838241SJeff.Bonwick@Sun.COM 21847754SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_REPAIR) { 21851807Sbonwick if (flags & ZIO_FLAG_SCRUB_THREAD) 21867754SJeff.Bonwick@Sun.COM vs->vs_scrub_repaired += psize; 21878241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SELF_HEAL) 21887754SJeff.Bonwick@Sun.COM vs->vs_self_healed += psize; 2189789Sahrens } 21908241SJeff.Bonwick@Sun.COM 21918241SJeff.Bonwick@Sun.COM vs->vs_ops[type]++; 21928241SJeff.Bonwick@Sun.COM vs->vs_bytes[type] += psize; 21938241SJeff.Bonwick@Sun.COM 21948241SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2195789Sahrens return; 2196789Sahrens } 2197789Sahrens 2198789Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2199789Sahrens return; 2200789Sahrens 22019725SEric.Schrock@Sun.COM /* 22029725SEric.Schrock@Sun.COM * If this is an I/O error that is going to be retried, then ignore the 22039725SEric.Schrock@Sun.COM * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 22049725SEric.Schrock@Sun.COM * hard errors, when in reality they can happen for any number of 22059725SEric.Schrock@Sun.COM * innocuous reasons (bus resets, MPxIO link failure, etc). 22069725SEric.Schrock@Sun.COM */ 22079725SEric.Schrock@Sun.COM if (zio->io_error == EIO && 22089725SEric.Schrock@Sun.COM !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 22099725SEric.Schrock@Sun.COM return; 22109725SEric.Schrock@Sun.COM 22117754SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 22129230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 22137754SJeff.Bonwick@Sun.COM if (zio->io_error == ECKSUM) 22147754SJeff.Bonwick@Sun.COM vs->vs_checksum_errors++; 22157754SJeff.Bonwick@Sun.COM else 22167754SJeff.Bonwick@Sun.COM vs->vs_read_errors++; 2217789Sahrens } 22189230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 22197754SJeff.Bonwick@Sun.COM vs->vs_write_errors++; 22207754SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2221789Sahrens 22228241SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_WRITE && txg != 0 && 22238241SJeff.Bonwick@Sun.COM (!(flags & ZIO_FLAG_IO_REPAIR) || 22248241SJeff.Bonwick@Sun.COM (flags & ZIO_FLAG_SCRUB_THREAD))) { 22258241SJeff.Bonwick@Sun.COM /* 22268241SJeff.Bonwick@Sun.COM * This is either a normal write (not a repair), or it's a 22278241SJeff.Bonwick@Sun.COM * repair induced by the scrub thread. In the normal case, 22288241SJeff.Bonwick@Sun.COM * we commit the DTL change in the same txg as the block 22298241SJeff.Bonwick@Sun.COM * was born. In the scrub-induced repair case, we know that 22308241SJeff.Bonwick@Sun.COM * scrubs run in first-pass syncing context, so we commit 22318241SJeff.Bonwick@Sun.COM * the DTL change in spa->spa_syncing_txg. 22328241SJeff.Bonwick@Sun.COM * 22338241SJeff.Bonwick@Sun.COM * We currently do not make DTL entries for failed spontaneous 22348241SJeff.Bonwick@Sun.COM * self-healing writes triggered by normal (non-scrubbing) 22358241SJeff.Bonwick@Sun.COM * reads, because we have no transactional context in which to 22368241SJeff.Bonwick@Sun.COM * do so -- and it's not clear that it'd be desirable anyway. 22378241SJeff.Bonwick@Sun.COM */ 22388241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 22398241SJeff.Bonwick@Sun.COM uint64_t commit_txg = txg; 22408241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SCRUB_THREAD) { 22418241SJeff.Bonwick@Sun.COM ASSERT(flags & ZIO_FLAG_IO_REPAIR); 22428241SJeff.Bonwick@Sun.COM ASSERT(spa_sync_pass(spa) == 1); 22438241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 22448241SJeff.Bonwick@Sun.COM commit_txg = spa->spa_syncing_txg; 22458241SJeff.Bonwick@Sun.COM } 22468241SJeff.Bonwick@Sun.COM ASSERT(commit_txg >= spa->spa_syncing_txg); 22478241SJeff.Bonwick@Sun.COM if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 22488241SJeff.Bonwick@Sun.COM return; 22498241SJeff.Bonwick@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 22508241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 22518241SJeff.Bonwick@Sun.COM vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2252789Sahrens } 22538241SJeff.Bonwick@Sun.COM if (vd != rvd) 22548241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2255789Sahrens } 2256789Sahrens } 2257789Sahrens 2258789Sahrens void 2259789Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2260789Sahrens { 2261789Sahrens int c; 2262789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2263789Sahrens 2264789Sahrens for (c = 0; c < vd->vdev_children; c++) 2265789Sahrens vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2266789Sahrens 2267789Sahrens mutex_enter(&vd->vdev_stat_lock); 2268789Sahrens 2269789Sahrens if (type == POOL_SCRUB_NONE) { 2270789Sahrens /* 2271789Sahrens * Update completion and end time. Leave everything else alone 2272789Sahrens * so we can report what happened during the previous scrub. 2273789Sahrens */ 2274789Sahrens vs->vs_scrub_complete = complete; 2275789Sahrens vs->vs_scrub_end = gethrestime_sec(); 2276789Sahrens } else { 2277789Sahrens vs->vs_scrub_type = type; 2278789Sahrens vs->vs_scrub_complete = 0; 2279789Sahrens vs->vs_scrub_examined = 0; 2280789Sahrens vs->vs_scrub_repaired = 0; 2281789Sahrens vs->vs_scrub_start = gethrestime_sec(); 2282789Sahrens vs->vs_scrub_end = 0; 2283789Sahrens } 2284789Sahrens 2285789Sahrens mutex_exit(&vd->vdev_stat_lock); 2286789Sahrens } 2287789Sahrens 2288789Sahrens /* 2289789Sahrens * Update the in-core space usage stats for this vdev and the root vdev. 2290789Sahrens */ 2291789Sahrens void 22925450Sbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 22935450Sbrendan boolean_t update_root) 2294789Sahrens { 22954527Sperrin int64_t dspace_delta = space_delta; 22964527Sperrin spa_t *spa = vd->vdev_spa; 22974527Sperrin vdev_t *rvd = spa->spa_root_vdev; 22984527Sperrin 2299789Sahrens ASSERT(vd == vd->vdev_top); 23004527Sperrin 23014527Sperrin /* 23024527Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 23034527Sperrin * factor. We must calculate this here and not at the root vdev 23044527Sperrin * because the root vdev's psize-to-asize is simply the max of its 23054527Sperrin * childrens', thus not accurate enough for us. 23064527Sperrin */ 23074527Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 23089701SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 23094527Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 23104527Sperrin vd->vdev_deflate_ratio; 2311789Sahrens 23124527Sperrin mutex_enter(&vd->vdev_stat_lock); 23134527Sperrin vd->vdev_stat.vs_space += space_delta; 23144527Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 23154527Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 23164527Sperrin mutex_exit(&vd->vdev_stat_lock); 23172082Seschrock 23185450Sbrendan if (update_root) { 23195450Sbrendan ASSERT(rvd == vd->vdev_parent); 23205450Sbrendan ASSERT(vd->vdev_ms_count != 0); 23214527Sperrin 23225450Sbrendan /* 23235450Sbrendan * Don't count non-normal (e.g. intent log) space as part of 23245450Sbrendan * the pool's capacity. 23255450Sbrendan */ 23265450Sbrendan if (vd->vdev_mg->mg_class != spa->spa_normal_class) 23275450Sbrendan return; 23285450Sbrendan 23295450Sbrendan mutex_enter(&rvd->vdev_stat_lock); 23305450Sbrendan rvd->vdev_stat.vs_space += space_delta; 23315450Sbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 23325450Sbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 23335450Sbrendan mutex_exit(&rvd->vdev_stat_lock); 23345450Sbrendan } 2335789Sahrens } 2336789Sahrens 2337789Sahrens /* 2338789Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2339789Sahrens * so that it will be written out next time the vdev configuration is synced. 2340789Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2341789Sahrens */ 2342789Sahrens void 2343789Sahrens vdev_config_dirty(vdev_t *vd) 2344789Sahrens { 2345789Sahrens spa_t *spa = vd->vdev_spa; 2346789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2347789Sahrens int c; 2348789Sahrens 23491601Sbonwick /* 23509425SEric.Schrock@Sun.COM * If this is an aux vdev (as with l2cache and spare devices), then we 23519425SEric.Schrock@Sun.COM * update the vdev config manually and set the sync flag. 23526643Seschrock */ 23536643Seschrock if (vd->vdev_aux != NULL) { 23546643Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 23556643Seschrock nvlist_t **aux; 23566643Seschrock uint_t naux; 23576643Seschrock 23586643Seschrock for (c = 0; c < sav->sav_count; c++) { 23596643Seschrock if (sav->sav_vdevs[c] == vd) 23606643Seschrock break; 23616643Seschrock } 23626643Seschrock 23637754SJeff.Bonwick@Sun.COM if (c == sav->sav_count) { 23647754SJeff.Bonwick@Sun.COM /* 23657754SJeff.Bonwick@Sun.COM * We're being removed. There's nothing more to do. 23667754SJeff.Bonwick@Sun.COM */ 23677754SJeff.Bonwick@Sun.COM ASSERT(sav->sav_sync == B_TRUE); 23687754SJeff.Bonwick@Sun.COM return; 23697754SJeff.Bonwick@Sun.COM } 23707754SJeff.Bonwick@Sun.COM 23716643Seschrock sav->sav_sync = B_TRUE; 23726643Seschrock 23739425SEric.Schrock@Sun.COM if (nvlist_lookup_nvlist_array(sav->sav_config, 23749425SEric.Schrock@Sun.COM ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 23759425SEric.Schrock@Sun.COM VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 23769425SEric.Schrock@Sun.COM ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 23779425SEric.Schrock@Sun.COM } 23786643Seschrock 23796643Seschrock ASSERT(c < naux); 23806643Seschrock 23816643Seschrock /* 23826643Seschrock * Setting the nvlist in the middle if the array is a little 23836643Seschrock * sketchy, but it will work. 23846643Seschrock */ 23856643Seschrock nvlist_free(aux[c]); 23866643Seschrock aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 23876643Seschrock 23886643Seschrock return; 23896643Seschrock } 23906643Seschrock 23916643Seschrock /* 23927754SJeff.Bonwick@Sun.COM * The dirty list is protected by the SCL_CONFIG lock. The caller 23937754SJeff.Bonwick@Sun.COM * must either hold SCL_CONFIG as writer, or must be the sync thread 23947754SJeff.Bonwick@Sun.COM * (which holds SCL_CONFIG as reader). There's only one sync thread, 23951601Sbonwick * so this is sufficient to ensure mutual exclusion. 23961601Sbonwick */ 23977754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 23987754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 23997754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 24001601Sbonwick 2401789Sahrens if (vd == rvd) { 2402789Sahrens for (c = 0; c < rvd->vdev_children; c++) 2403789Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2404789Sahrens } else { 2405789Sahrens ASSERT(vd == vd->vdev_top); 2406789Sahrens 24077754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_config_dirty_node)) 24087754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_config_dirty_list, vd); 2409789Sahrens } 2410789Sahrens } 2411789Sahrens 2412789Sahrens void 2413789Sahrens vdev_config_clean(vdev_t *vd) 2414789Sahrens { 24151601Sbonwick spa_t *spa = vd->vdev_spa; 24161601Sbonwick 24177754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 24187754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24197754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 24207754SJeff.Bonwick@Sun.COM 24217754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 24227754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_config_dirty_list, vd); 24237754SJeff.Bonwick@Sun.COM } 24247754SJeff.Bonwick@Sun.COM 24257754SJeff.Bonwick@Sun.COM /* 24267754SJeff.Bonwick@Sun.COM * Mark a top-level vdev's state as dirty, so that the next pass of 24277754SJeff.Bonwick@Sun.COM * spa_sync() can convert this into vdev_config_dirty(). We distinguish 24287754SJeff.Bonwick@Sun.COM * the state changes from larger config changes because they require 24297754SJeff.Bonwick@Sun.COM * much less locking, and are often needed for administrative actions. 24307754SJeff.Bonwick@Sun.COM */ 24317754SJeff.Bonwick@Sun.COM void 24327754SJeff.Bonwick@Sun.COM vdev_state_dirty(vdev_t *vd) 24337754SJeff.Bonwick@Sun.COM { 24347754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24357754SJeff.Bonwick@Sun.COM 24367754SJeff.Bonwick@Sun.COM ASSERT(vd == vd->vdev_top); 24371601Sbonwick 24387754SJeff.Bonwick@Sun.COM /* 24397754SJeff.Bonwick@Sun.COM * The state list is protected by the SCL_STATE lock. The caller 24407754SJeff.Bonwick@Sun.COM * must either hold SCL_STATE as writer, or must be the sync thread 24417754SJeff.Bonwick@Sun.COM * (which holds SCL_STATE as reader). There's only one sync thread, 24427754SJeff.Bonwick@Sun.COM * so this is sufficient to ensure mutual exclusion. 24437754SJeff.Bonwick@Sun.COM */ 24447754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 24457754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24467754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 24477754SJeff.Bonwick@Sun.COM 24487754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_state_dirty_node)) 24497754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_state_dirty_list, vd); 24507754SJeff.Bonwick@Sun.COM } 24517754SJeff.Bonwick@Sun.COM 24527754SJeff.Bonwick@Sun.COM void 24537754SJeff.Bonwick@Sun.COM vdev_state_clean(vdev_t *vd) 24547754SJeff.Bonwick@Sun.COM { 24557754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24567754SJeff.Bonwick@Sun.COM 24577754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 24587754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24597754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 24607754SJeff.Bonwick@Sun.COM 24617754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 24627754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_state_dirty_list, vd); 2463789Sahrens } 2464789Sahrens 24656523Sek110237 /* 24666523Sek110237 * Propagate vdev state up from children to parent. 24676523Sek110237 */ 24681775Sbillm void 24691775Sbillm vdev_propagate_state(vdev_t *vd) 24701775Sbillm { 24718241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24728241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 24731775Sbillm int degraded = 0, faulted = 0; 24741775Sbillm int corrupted = 0; 24751775Sbillm int c; 24761775Sbillm vdev_t *child; 24771775Sbillm 24784451Seschrock if (vd->vdev_children > 0) { 24794451Seschrock for (c = 0; c < vd->vdev_children; c++) { 24804451Seschrock child = vd->vdev_child[c]; 24816976Seschrock 24827754SJeff.Bonwick@Sun.COM if (!vdev_readable(child) || 24838241SJeff.Bonwick@Sun.COM (!vdev_writeable(child) && spa_writeable(spa))) { 24846976Seschrock /* 24856976Seschrock * Root special: if there is a top-level log 24866976Seschrock * device, treat the root vdev as if it were 24876976Seschrock * degraded. 24886976Seschrock */ 24896976Seschrock if (child->vdev_islog && vd == rvd) 24906976Seschrock degraded++; 24916976Seschrock else 24926976Seschrock faulted++; 24936976Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 24944451Seschrock degraded++; 24956976Seschrock } 24964451Seschrock 24974451Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 24984451Seschrock corrupted++; 24994451Seschrock } 25001775Sbillm 25014451Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 25024451Seschrock 25034451Seschrock /* 25047754SJeff.Bonwick@Sun.COM * Root special: if there is a top-level vdev that cannot be 25054451Seschrock * opened due to corrupted metadata, then propagate the root 25064451Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 25074451Seschrock * replicas'. 25084451Seschrock */ 25094451Seschrock if (corrupted && vd == rvd && 25104451Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 25114451Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 25124451Seschrock VDEV_AUX_CORRUPT_DATA); 25131775Sbillm } 25141775Sbillm 25156976Seschrock if (vd->vdev_parent) 25164451Seschrock vdev_propagate_state(vd->vdev_parent); 25171775Sbillm } 25181775Sbillm 2519789Sahrens /* 25201544Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 25211544Seschrock * state, because we're in the process of opening children depth-first. 25221544Seschrock * Otherwise, we propagate the change to the parent. 25231544Seschrock * 25241544Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 25251544Seschrock * generated. 2526789Sahrens */ 2527789Sahrens void 25281544Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2529789Sahrens { 25301986Seschrock uint64_t save_state; 25316643Seschrock spa_t *spa = vd->vdev_spa; 25321544Seschrock 25331544Seschrock if (state == vd->vdev_state) { 25341544Seschrock vd->vdev_stat.vs_aux = aux; 2535789Sahrens return; 25361544Seschrock } 25371544Seschrock 25381986Seschrock save_state = vd->vdev_state; 2539789Sahrens 2540789Sahrens vd->vdev_state = state; 2541789Sahrens vd->vdev_stat.vs_aux = aux; 2542789Sahrens 25434451Seschrock /* 25444451Seschrock * If we are setting the vdev state to anything but an open state, then 25454451Seschrock * always close the underlying device. Otherwise, we keep accessible 25464451Seschrock * but invalid devices open forever. We don't call vdev_close() itself, 25474451Seschrock * because that implies some extra checks (offline, etc) that we don't 25484451Seschrock * want here. This is limited to leaf devices, because otherwise 25494451Seschrock * closing the device will affect other children. 25504451Seschrock */ 25517780SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 25524451Seschrock vd->vdev_ops->vdev_op_close(vd); 25534451Seschrock 25544451Seschrock if (vd->vdev_removed && 25554451Seschrock state == VDEV_STATE_CANT_OPEN && 25564451Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 25574451Seschrock /* 25584451Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 25594451Seschrock * device was previously marked removed and someone attempted to 25604451Seschrock * reopen it. If this failed due to a nonexistent device, then 25614451Seschrock * keep the device in the REMOVED state. We also let this be if 25624451Seschrock * it is one of our special test online cases, which is only 25634451Seschrock * attempting to online the device and shouldn't generate an FMA 25644451Seschrock * fault. 25654451Seschrock */ 25664451Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 25674451Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 25684451Seschrock } else if (state == VDEV_STATE_REMOVED) { 25694451Seschrock /* 25704451Seschrock * Indicate to the ZFS DE that this device has been removed, and 25714451Seschrock * any recent errors should be ignored. 25724451Seschrock */ 25736643Seschrock zfs_post_remove(spa, vd); 25744451Seschrock vd->vdev_removed = B_TRUE; 25754451Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 25761544Seschrock /* 25771544Seschrock * If we fail to open a vdev during an import, we mark it as 25781544Seschrock * "not available", which signifies that it was never there to 25791544Seschrock * begin with. Failure to open such a device is not considered 25801544Seschrock * an error. 25811544Seschrock */ 25826643Seschrock if (spa->spa_load_state == SPA_LOAD_IMPORT && 25831986Seschrock vd->vdev_ops->vdev_op_leaf) 25841986Seschrock vd->vdev_not_present = 1; 25851986Seschrock 25861986Seschrock /* 25871986Seschrock * Post the appropriate ereport. If the 'prevstate' field is 25881986Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 25891986Seschrock * that this is part of a vdev_reopen(). In this case, we don't 25901986Seschrock * want to post the ereport if the device was already in the 25911986Seschrock * CANT_OPEN state beforehand. 25924451Seschrock * 25934451Seschrock * If the 'checkremove' flag is set, then this is an attempt to 25944451Seschrock * online the device in response to an insertion event. If we 25954451Seschrock * hit this case, then we have detected an insertion event for a 25964451Seschrock * faulted or offline device that wasn't in the removed state. 25974451Seschrock * In this scenario, we don't post an ereport because we are 25984451Seschrock * about to replace the device, or attempt an online with 25994451Seschrock * vdev_forcefault, which will generate the fault for us. 26001986Seschrock */ 26014451Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 26024451Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 26036643Seschrock vd != spa->spa_root_vdev) { 26041544Seschrock const char *class; 26051544Seschrock 26061544Seschrock switch (aux) { 26071544Seschrock case VDEV_AUX_OPEN_FAILED: 26081544Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 26091544Seschrock break; 26101544Seschrock case VDEV_AUX_CORRUPT_DATA: 26111544Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 26121544Seschrock break; 26131544Seschrock case VDEV_AUX_NO_REPLICAS: 26141544Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 26151544Seschrock break; 26161544Seschrock case VDEV_AUX_BAD_GUID_SUM: 26171544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 26181544Seschrock break; 26191544Seschrock case VDEV_AUX_TOO_SMALL: 26201544Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 26211544Seschrock break; 26221544Seschrock case VDEV_AUX_BAD_LABEL: 26231544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 26241544Seschrock break; 26257754SJeff.Bonwick@Sun.COM case VDEV_AUX_IO_FAILURE: 26267754SJeff.Bonwick@Sun.COM class = FM_EREPORT_ZFS_IO_FAILURE; 26277754SJeff.Bonwick@Sun.COM break; 26281544Seschrock default: 26291544Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 26301544Seschrock } 26311544Seschrock 26326643Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 26331544Seschrock } 26344451Seschrock 26354451Seschrock /* Erase any notion of persistent removed state */ 26364451Seschrock vd->vdev_removed = B_FALSE; 26374451Seschrock } else { 26384451Seschrock vd->vdev_removed = B_FALSE; 26391544Seschrock } 26401544Seschrock 26419583STim.Haley@Sun.COM if (!isopen && vd->vdev_parent) 26429583STim.Haley@Sun.COM vdev_propagate_state(vd->vdev_parent); 2643789Sahrens } 26447042Sgw25295 26457042Sgw25295 /* 26467042Sgw25295 * Check the vdev configuration to ensure that it's capable of supporting 26477042Sgw25295 * a root pool. Currently, we do not support RAID-Z or partial configuration. 26487042Sgw25295 * In addition, only a single top-level vdev is allowed and none of the leaves 26497042Sgw25295 * can be wholedisks. 26507042Sgw25295 */ 26517042Sgw25295 boolean_t 26527042Sgw25295 vdev_is_bootable(vdev_t *vd) 26537042Sgw25295 { 26547042Sgw25295 int c; 26557042Sgw25295 26567042Sgw25295 if (!vd->vdev_ops->vdev_op_leaf) { 26577042Sgw25295 char *vdev_type = vd->vdev_ops->vdev_op_type; 26587042Sgw25295 26597042Sgw25295 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 26607042Sgw25295 vd->vdev_children > 1) { 26617042Sgw25295 return (B_FALSE); 26627042Sgw25295 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 26637042Sgw25295 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 26647042Sgw25295 return (B_FALSE); 26657042Sgw25295 } 26667042Sgw25295 } else if (vd->vdev_wholedisk == 1) { 26677042Sgw25295 return (B_FALSE); 26687042Sgw25295 } 26697042Sgw25295 26707042Sgw25295 for (c = 0; c < vd->vdev_children; c++) { 26717042Sgw25295 if (!vdev_is_bootable(vd->vdev_child[c])) 26727042Sgw25295 return (B_FALSE); 26737042Sgw25295 } 26747042Sgw25295 return (B_TRUE); 26757042Sgw25295 } 26769701SGeorge.Wilson@Sun.COM 26779701SGeorge.Wilson@Sun.COM void 26789701SGeorge.Wilson@Sun.COM vdev_load_log_state(vdev_t *vd, nvlist_t *nv) 26799701SGeorge.Wilson@Sun.COM { 26809701SGeorge.Wilson@Sun.COM uint_t c, children; 26819701SGeorge.Wilson@Sun.COM nvlist_t **child; 26829701SGeorge.Wilson@Sun.COM uint64_t val; 26839701SGeorge.Wilson@Sun.COM spa_t *spa = vd->vdev_spa; 26849701SGeorge.Wilson@Sun.COM 26859701SGeorge.Wilson@Sun.COM if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 26869701SGeorge.Wilson@Sun.COM &child, &children) == 0) { 26879701SGeorge.Wilson@Sun.COM for (c = 0; c < children; c++) 26889701SGeorge.Wilson@Sun.COM vdev_load_log_state(vd->vdev_child[c], child[c]); 26899701SGeorge.Wilson@Sun.COM } 26909701SGeorge.Wilson@Sun.COM 26919701SGeorge.Wilson@Sun.COM if (vd->vdev_ops->vdev_op_leaf && nvlist_lookup_uint64(nv, 26929701SGeorge.Wilson@Sun.COM ZPOOL_CONFIG_OFFLINE, &val) == 0 && val) { 26939701SGeorge.Wilson@Sun.COM 26949701SGeorge.Wilson@Sun.COM /* 26959701SGeorge.Wilson@Sun.COM * It would be nice to call vdev_offline() 26969701SGeorge.Wilson@Sun.COM * directly but the pool isn't fully loaded and 26979701SGeorge.Wilson@Sun.COM * the txg threads have not been started yet. 26989701SGeorge.Wilson@Sun.COM */ 26999701SGeorge.Wilson@Sun.COM spa_config_enter(spa, SCL_STATE_ALL, FTAG, RW_WRITER); 27009701SGeorge.Wilson@Sun.COM vd->vdev_offline = val; 27019701SGeorge.Wilson@Sun.COM vdev_reopen(vd->vdev_top); 27029701SGeorge.Wilson@Sun.COM spa_config_exit(spa, SCL_STATE_ALL, FTAG); 27039701SGeorge.Wilson@Sun.COM } 27049701SGeorge.Wilson@Sun.COM } 2705