1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51485Slling * Common Development and Distribution License (the "License"). 61485Slling * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 238632SBill.Moore@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #include <sys/zfs_context.h> 281544Seschrock #include <sys/fm/fs/zfs.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/spa_impl.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/dmu_tx.h> 33789Sahrens #include <sys/vdev_impl.h> 34789Sahrens #include <sys/uberblock_impl.h> 35789Sahrens #include <sys/metaslab.h> 36789Sahrens #include <sys/metaslab_impl.h> 37789Sahrens #include <sys/space_map.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zap.h> 40789Sahrens #include <sys/fs/zfs.h> 416643Seschrock #include <sys/arc.h> 429701SGeorge.Wilson@Sun.COM #include <sys/zil.h> 43789Sahrens 44789Sahrens /* 45789Sahrens * Virtual device management. 46789Sahrens */ 47789Sahrens 48789Sahrens static vdev_ops_t *vdev_ops_table[] = { 49789Sahrens &vdev_root_ops, 50789Sahrens &vdev_raidz_ops, 51789Sahrens &vdev_mirror_ops, 52789Sahrens &vdev_replacing_ops, 532082Seschrock &vdev_spare_ops, 54789Sahrens &vdev_disk_ops, 55789Sahrens &vdev_file_ops, 56789Sahrens &vdev_missing_ops, 57789Sahrens NULL 58789Sahrens }; 59789Sahrens 607046Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 617046Sahrens int zfs_scrub_limit = 10; 623697Smishra 63789Sahrens /* 64789Sahrens * Given a vdev type, return the appropriate ops vector. 65789Sahrens */ 66789Sahrens static vdev_ops_t * 67789Sahrens vdev_getops(const char *type) 68789Sahrens { 69789Sahrens vdev_ops_t *ops, **opspp; 70789Sahrens 71789Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 72789Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 73789Sahrens break; 74789Sahrens 75789Sahrens return (ops); 76789Sahrens } 77789Sahrens 78789Sahrens /* 79789Sahrens * Default asize function: return the MAX of psize with the asize of 80789Sahrens * all children. This is what's used by anything other than RAID-Z. 81789Sahrens */ 82789Sahrens uint64_t 83789Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 84789Sahrens { 851732Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 86789Sahrens uint64_t csize; 879816SGeorge.Wilson@Sun.COM 889816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 89789Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 90789Sahrens asize = MAX(asize, csize); 91789Sahrens } 92789Sahrens 93789Sahrens return (asize); 94789Sahrens } 95789Sahrens 961175Slling /* 979816SGeorge.Wilson@Sun.COM * Get the minimum allocatable size. We define the allocatable size as 989816SGeorge.Wilson@Sun.COM * the vdev's asize rounded to the nearest metaslab. This allows us to 999816SGeorge.Wilson@Sun.COM * replace or attach devices which don't have the same physical size but 1009816SGeorge.Wilson@Sun.COM * can still satisfy the same number of allocations. 1011175Slling */ 1021175Slling uint64_t 1039816SGeorge.Wilson@Sun.COM vdev_get_min_asize(vdev_t *vd) 1041175Slling { 1059816SGeorge.Wilson@Sun.COM vdev_t *pvd = vd->vdev_parent; 1069816SGeorge.Wilson@Sun.COM 1079816SGeorge.Wilson@Sun.COM /* 1089816SGeorge.Wilson@Sun.COM * The our parent is NULL (inactive spare or cache) or is the root, 1099816SGeorge.Wilson@Sun.COM * just return our own asize. 1109816SGeorge.Wilson@Sun.COM */ 1119816SGeorge.Wilson@Sun.COM if (pvd == NULL) 1129816SGeorge.Wilson@Sun.COM return (vd->vdev_asize); 1131175Slling 1141175Slling /* 1159816SGeorge.Wilson@Sun.COM * The top-level vdev just returns the allocatable size rounded 1169816SGeorge.Wilson@Sun.COM * to the nearest metaslab. 1179816SGeorge.Wilson@Sun.COM */ 1189816SGeorge.Wilson@Sun.COM if (vd == vd->vdev_top) 1199816SGeorge.Wilson@Sun.COM return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 1209816SGeorge.Wilson@Sun.COM 1219816SGeorge.Wilson@Sun.COM /* 1229816SGeorge.Wilson@Sun.COM * The allocatable space for a raidz vdev is N * sizeof(smallest child), 1239816SGeorge.Wilson@Sun.COM * so each child must provide at least 1/Nth of its asize. 1241175Slling */ 1259816SGeorge.Wilson@Sun.COM if (pvd->vdev_ops == &vdev_raidz_ops) 1269816SGeorge.Wilson@Sun.COM return (pvd->vdev_min_asize / pvd->vdev_children); 1279816SGeorge.Wilson@Sun.COM 1289816SGeorge.Wilson@Sun.COM return (pvd->vdev_min_asize); 1299816SGeorge.Wilson@Sun.COM } 1309816SGeorge.Wilson@Sun.COM 1319816SGeorge.Wilson@Sun.COM void 1329816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vdev_t *vd) 1339816SGeorge.Wilson@Sun.COM { 1349816SGeorge.Wilson@Sun.COM vd->vdev_min_asize = vdev_get_min_asize(vd); 1359816SGeorge.Wilson@Sun.COM 1369816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 1379816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vd->vdev_child[c]); 1381175Slling } 1391175Slling 140789Sahrens vdev_t * 141789Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 142789Sahrens { 143789Sahrens vdev_t *rvd = spa->spa_root_vdev; 144789Sahrens 1457754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1465530Sbonwick 1477046Sahrens if (vdev < rvd->vdev_children) { 1487046Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 149789Sahrens return (rvd->vdev_child[vdev]); 1507046Sahrens } 151789Sahrens 152789Sahrens return (NULL); 153789Sahrens } 154789Sahrens 155789Sahrens vdev_t * 156789Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 157789Sahrens { 158789Sahrens vdev_t *mvd; 159789Sahrens 1601585Sbonwick if (vd->vdev_guid == guid) 161789Sahrens return (vd); 162789Sahrens 1639816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 164789Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 165789Sahrens NULL) 166789Sahrens return (mvd); 167789Sahrens 168789Sahrens return (NULL); 169789Sahrens } 170789Sahrens 171789Sahrens void 172789Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 173789Sahrens { 174789Sahrens size_t oldsize, newsize; 175789Sahrens uint64_t id = cvd->vdev_id; 176789Sahrens vdev_t **newchild; 177789Sahrens 1787754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 179789Sahrens ASSERT(cvd->vdev_parent == NULL); 180789Sahrens 181789Sahrens cvd->vdev_parent = pvd; 182789Sahrens 183789Sahrens if (pvd == NULL) 184789Sahrens return; 185789Sahrens 186789Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 187789Sahrens 188789Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 189789Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 190789Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 191789Sahrens 192789Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 193789Sahrens if (pvd->vdev_child != NULL) { 194789Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 195789Sahrens kmem_free(pvd->vdev_child, oldsize); 196789Sahrens } 197789Sahrens 198789Sahrens pvd->vdev_child = newchild; 199789Sahrens pvd->vdev_child[id] = cvd; 200789Sahrens 201789Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 202789Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 203789Sahrens 204789Sahrens /* 205789Sahrens * Walk up all ancestors to update guid sum. 206789Sahrens */ 207789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 208789Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 2093697Smishra 2103697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2113697Smishra cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 212789Sahrens } 213789Sahrens 214789Sahrens void 215789Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 216789Sahrens { 217789Sahrens int c; 218789Sahrens uint_t id = cvd->vdev_id; 219789Sahrens 220789Sahrens ASSERT(cvd->vdev_parent == pvd); 221789Sahrens 222789Sahrens if (pvd == NULL) 223789Sahrens return; 224789Sahrens 225789Sahrens ASSERT(id < pvd->vdev_children); 226789Sahrens ASSERT(pvd->vdev_child[id] == cvd); 227789Sahrens 228789Sahrens pvd->vdev_child[id] = NULL; 229789Sahrens cvd->vdev_parent = NULL; 230789Sahrens 231789Sahrens for (c = 0; c < pvd->vdev_children; c++) 232789Sahrens if (pvd->vdev_child[c]) 233789Sahrens break; 234789Sahrens 235789Sahrens if (c == pvd->vdev_children) { 236789Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 237789Sahrens pvd->vdev_child = NULL; 238789Sahrens pvd->vdev_children = 0; 239789Sahrens } 240789Sahrens 241789Sahrens /* 242789Sahrens * Walk up all ancestors to update guid sum. 243789Sahrens */ 244789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 245789Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 2463697Smishra 2473697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2483697Smishra cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 249789Sahrens } 250789Sahrens 251789Sahrens /* 252789Sahrens * Remove any holes in the child array. 253789Sahrens */ 254789Sahrens void 255789Sahrens vdev_compact_children(vdev_t *pvd) 256789Sahrens { 257789Sahrens vdev_t **newchild, *cvd; 258789Sahrens int oldc = pvd->vdev_children; 2599816SGeorge.Wilson@Sun.COM int newc; 260789Sahrens 2617754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 262789Sahrens 2639816SGeorge.Wilson@Sun.COM for (int c = newc = 0; c < oldc; c++) 264789Sahrens if (pvd->vdev_child[c]) 265789Sahrens newc++; 266789Sahrens 267789Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 268789Sahrens 2699816SGeorge.Wilson@Sun.COM for (int c = newc = 0; c < oldc; c++) { 270789Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 271789Sahrens newchild[newc] = cvd; 272789Sahrens cvd->vdev_id = newc++; 273789Sahrens } 274789Sahrens } 275789Sahrens 276789Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 277789Sahrens pvd->vdev_child = newchild; 278789Sahrens pvd->vdev_children = newc; 279789Sahrens } 280789Sahrens 281789Sahrens /* 282789Sahrens * Allocate and minimally initialize a vdev_t. 283789Sahrens */ 284789Sahrens static vdev_t * 285789Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 286789Sahrens { 287789Sahrens vdev_t *vd; 288789Sahrens 2891585Sbonwick vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 2901585Sbonwick 2911585Sbonwick if (spa->spa_root_vdev == NULL) { 2921585Sbonwick ASSERT(ops == &vdev_root_ops); 2931585Sbonwick spa->spa_root_vdev = vd; 2941585Sbonwick } 295789Sahrens 2961585Sbonwick if (guid == 0) { 2971585Sbonwick if (spa->spa_root_vdev == vd) { 2981585Sbonwick /* 2991585Sbonwick * The root vdev's guid will also be the pool guid, 3001585Sbonwick * which must be unique among all pools. 3011585Sbonwick */ 3021585Sbonwick while (guid == 0 || spa_guid_exists(guid, 0)) 3031585Sbonwick guid = spa_get_random(-1ULL); 3041585Sbonwick } else { 3051585Sbonwick /* 3061585Sbonwick * Any other vdev's guid must be unique within the pool. 3071585Sbonwick */ 3081585Sbonwick while (guid == 0 || 3091585Sbonwick spa_guid_exists(spa_guid(spa), guid)) 3101585Sbonwick guid = spa_get_random(-1ULL); 3111585Sbonwick } 3121585Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3131585Sbonwick } 314789Sahrens 315789Sahrens vd->vdev_spa = spa; 316789Sahrens vd->vdev_id = id; 317789Sahrens vd->vdev_guid = guid; 318789Sahrens vd->vdev_guid_sum = guid; 319789Sahrens vd->vdev_ops = ops; 320789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 321789Sahrens 322789Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3232856Snd150628 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 3247754SJeff.Bonwick@Sun.COM mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 3258241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 3268241SJeff.Bonwick@Sun.COM space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 3278241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_lock); 3288241SJeff.Bonwick@Sun.COM } 329789Sahrens txg_list_create(&vd->vdev_ms_list, 330789Sahrens offsetof(struct metaslab, ms_txg_node)); 331789Sahrens txg_list_create(&vd->vdev_dtl_list, 332789Sahrens offsetof(struct vdev, vdev_dtl_node)); 333789Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3344451Seschrock vdev_queue_init(vd); 3354451Seschrock vdev_cache_init(vd); 336789Sahrens 337789Sahrens return (vd); 338789Sahrens } 339789Sahrens 340789Sahrens /* 341789Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 342789Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 343789Sahrens * different for each case. 344789Sahrens */ 3452082Seschrock int 3462082Seschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 3472082Seschrock int alloctype) 348789Sahrens { 349789Sahrens vdev_ops_t *ops; 350789Sahrens char *type; 3514527Sperrin uint64_t guid = 0, islog, nparity; 352789Sahrens vdev_t *vd; 353789Sahrens 3547754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 355789Sahrens 356789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 3572082Seschrock return (EINVAL); 358789Sahrens 359789Sahrens if ((ops = vdev_getops(type)) == NULL) 3602082Seschrock return (EINVAL); 361789Sahrens 362789Sahrens /* 363789Sahrens * If this is a load, get the vdev guid from the nvlist. 364789Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 365789Sahrens */ 366789Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 367789Sahrens uint64_t label_id; 368789Sahrens 369789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 370789Sahrens label_id != id) 3712082Seschrock return (EINVAL); 372789Sahrens 373789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3742082Seschrock return (EINVAL); 3752082Seschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 3762082Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3772082Seschrock return (EINVAL); 3785450Sbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 3795450Sbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3805450Sbrendan return (EINVAL); 3819790SLin.Ling@Sun.COM } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 3829790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3839790SLin.Ling@Sun.COM return (EINVAL); 384789Sahrens } 385789Sahrens 3862082Seschrock /* 3872082Seschrock * The first allocated vdev must be of type 'root'. 3882082Seschrock */ 3892082Seschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 3902082Seschrock return (EINVAL); 3912082Seschrock 3924527Sperrin /* 3934527Sperrin * Determine whether we're a log vdev. 3944527Sperrin */ 3954527Sperrin islog = 0; 3964527Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 3975094Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 3984527Sperrin return (ENOTSUP); 3994527Sperrin 4004527Sperrin /* 4014527Sperrin * Set the nparity property for RAID-Z vdevs. 4024527Sperrin */ 4034527Sperrin nparity = -1ULL; 4044527Sperrin if (ops == &vdev_raidz_ops) { 4054527Sperrin if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4064527Sperrin &nparity) == 0) { 4074527Sperrin /* 4084527Sperrin * Currently, we can only support 2 parity devices. 4094527Sperrin */ 4104527Sperrin if (nparity == 0 || nparity > 2) 4114527Sperrin return (EINVAL); 4124527Sperrin /* 4134527Sperrin * Older versions can only support 1 parity device. 4144527Sperrin */ 4154527Sperrin if (nparity == 2 && 4164577Sahrens spa_version(spa) < SPA_VERSION_RAID6) 4174527Sperrin return (ENOTSUP); 4184527Sperrin } else { 4194527Sperrin /* 4204527Sperrin * We require the parity to be specified for SPAs that 4214527Sperrin * support multiple parity levels. 4224527Sperrin */ 4234577Sahrens if (spa_version(spa) >= SPA_VERSION_RAID6) 4244527Sperrin return (EINVAL); 4254527Sperrin /* 4264527Sperrin * Otherwise, we default to 1 parity device for RAID-Z. 4274527Sperrin */ 4284527Sperrin nparity = 1; 4294527Sperrin } 4304527Sperrin } else { 4314527Sperrin nparity = 0; 4324527Sperrin } 4334527Sperrin ASSERT(nparity != -1ULL); 4344527Sperrin 435789Sahrens vd = vdev_alloc_common(spa, id, guid, ops); 436789Sahrens 4374527Sperrin vd->vdev_islog = islog; 4384527Sperrin vd->vdev_nparity = nparity; 4394527Sperrin 440789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 441789Sahrens vd->vdev_path = spa_strdup(vd->vdev_path); 442789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 443789Sahrens vd->vdev_devid = spa_strdup(vd->vdev_devid); 4444451Seschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4454451Seschrock &vd->vdev_physpath) == 0) 4464451Seschrock vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 4479425SEric.Schrock@Sun.COM if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 4489425SEric.Schrock@Sun.COM vd->vdev_fru = spa_strdup(vd->vdev_fru); 449789Sahrens 450789Sahrens /* 4511171Seschrock * Set the whole_disk property. If it's not specified, leave the value 4521171Seschrock * as -1. 4531171Seschrock */ 4541171Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 4551171Seschrock &vd->vdev_wholedisk) != 0) 4561171Seschrock vd->vdev_wholedisk = -1ULL; 4571171Seschrock 4581171Seschrock /* 4591544Seschrock * Look for the 'not present' flag. This will only be set if the device 4601544Seschrock * was not present at the time of import. 4611544Seschrock */ 4629425SEric.Schrock@Sun.COM (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4639425SEric.Schrock@Sun.COM &vd->vdev_not_present); 4641544Seschrock 4651544Seschrock /* 4661732Sbonwick * Get the alignment requirement. 4671732Sbonwick */ 4681732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 4691732Sbonwick 4701732Sbonwick /* 471789Sahrens * If we're a top-level vdev, try to load the allocation parameters. 472789Sahrens */ 473789Sahrens if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 474789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 475789Sahrens &vd->vdev_ms_array); 476789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 477789Sahrens &vd->vdev_ms_shift); 478789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 479789Sahrens &vd->vdev_asize); 480789Sahrens } 481789Sahrens 482789Sahrens /* 4834451Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 484789Sahrens */ 4856643Seschrock if (vd->vdev_ops->vdev_op_leaf && 4869790SLin.Ling@Sun.COM (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 4879790SLin.Ling@Sun.COM alloctype == VDEV_ALLOC_ROOTPOOL)) { 4886643Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 4896643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 4908241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_smo.smo_object); 4916643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 4926643Seschrock &vd->vdev_unspare); 4936643Seschrock } 4949790SLin.Ling@Sun.COM 4959790SLin.Ling@Sun.COM if (alloctype == VDEV_ALLOC_ROOTPOOL) { 4969790SLin.Ling@Sun.COM uint64_t spare = 0; 4979790SLin.Ling@Sun.COM 4989790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 4999790SLin.Ling@Sun.COM &spare) == 0 && spare) 5009790SLin.Ling@Sun.COM spa_spare_add(vd); 5019790SLin.Ling@Sun.COM } 5029790SLin.Ling@Sun.COM 5031732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 5041732Sbonwick &vd->vdev_offline); 5056643Seschrock 5064451Seschrock /* 5074451Seschrock * When importing a pool, we want to ignore the persistent fault 5084451Seschrock * state, as the diagnosis made on another system may not be 5094451Seschrock * valid in the current context. 5104451Seschrock */ 5114451Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN) { 5124451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 5134451Seschrock &vd->vdev_faulted); 5144451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 5154451Seschrock &vd->vdev_degraded); 5164451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 5174451Seschrock &vd->vdev_removed); 5184451Seschrock } 519789Sahrens } 520789Sahrens 521789Sahrens /* 522789Sahrens * Add ourselves to the parent's list of children. 523789Sahrens */ 524789Sahrens vdev_add_child(parent, vd); 525789Sahrens 5262082Seschrock *vdp = vd; 5272082Seschrock 5282082Seschrock return (0); 529789Sahrens } 530789Sahrens 531789Sahrens void 532789Sahrens vdev_free(vdev_t *vd) 533789Sahrens { 5344451Seschrock spa_t *spa = vd->vdev_spa; 535789Sahrens 536789Sahrens /* 537789Sahrens * vdev_free() implies closing the vdev first. This is simpler than 538789Sahrens * trying to ensure complicated semantics for all callers. 539789Sahrens */ 540789Sahrens vdev_close(vd); 541789Sahrens 5427754SJeff.Bonwick@Sun.COM ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 543789Sahrens 544789Sahrens /* 545789Sahrens * Free all children. 546789Sahrens */ 5479816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 548789Sahrens vdev_free(vd->vdev_child[c]); 549789Sahrens 550789Sahrens ASSERT(vd->vdev_child == NULL); 551789Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 552789Sahrens 553789Sahrens /* 554789Sahrens * Discard allocation state. 555789Sahrens */ 556789Sahrens if (vd == vd->vdev_top) 557789Sahrens vdev_metaslab_fini(vd); 558789Sahrens 559789Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 5602082Seschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 561789Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 562789Sahrens 563789Sahrens /* 564789Sahrens * Remove this vdev from its parent's child list. 565789Sahrens */ 566789Sahrens vdev_remove_child(vd->vdev_parent, vd); 567789Sahrens 568789Sahrens ASSERT(vd->vdev_parent == NULL); 569789Sahrens 5704451Seschrock /* 5714451Seschrock * Clean up vdev structure. 5724451Seschrock */ 5734451Seschrock vdev_queue_fini(vd); 5744451Seschrock vdev_cache_fini(vd); 5754451Seschrock 5764451Seschrock if (vd->vdev_path) 5774451Seschrock spa_strfree(vd->vdev_path); 5784451Seschrock if (vd->vdev_devid) 5794451Seschrock spa_strfree(vd->vdev_devid); 5804451Seschrock if (vd->vdev_physpath) 5814451Seschrock spa_strfree(vd->vdev_physpath); 5829425SEric.Schrock@Sun.COM if (vd->vdev_fru) 5839425SEric.Schrock@Sun.COM spa_strfree(vd->vdev_fru); 5844451Seschrock 5854451Seschrock if (vd->vdev_isspare) 5864451Seschrock spa_spare_remove(vd); 5875450Sbrendan if (vd->vdev_isl2cache) 5885450Sbrendan spa_l2cache_remove(vd); 5894451Seschrock 5904451Seschrock txg_list_destroy(&vd->vdev_ms_list); 5914451Seschrock txg_list_destroy(&vd->vdev_dtl_list); 5928241SJeff.Bonwick@Sun.COM 5934451Seschrock mutex_enter(&vd->vdev_dtl_lock); 5948241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 5958241SJeff.Bonwick@Sun.COM space_map_unload(&vd->vdev_dtl[t]); 5968241SJeff.Bonwick@Sun.COM space_map_destroy(&vd->vdev_dtl[t]); 5978241SJeff.Bonwick@Sun.COM } 5984451Seschrock mutex_exit(&vd->vdev_dtl_lock); 5998241SJeff.Bonwick@Sun.COM 6004451Seschrock mutex_destroy(&vd->vdev_dtl_lock); 6014451Seschrock mutex_destroy(&vd->vdev_stat_lock); 6027754SJeff.Bonwick@Sun.COM mutex_destroy(&vd->vdev_probe_lock); 6034451Seschrock 6044451Seschrock if (vd == spa->spa_root_vdev) 6054451Seschrock spa->spa_root_vdev = NULL; 6064451Seschrock 6074451Seschrock kmem_free(vd, sizeof (vdev_t)); 608789Sahrens } 609789Sahrens 610789Sahrens /* 611789Sahrens * Transfer top-level vdev state from svd to tvd. 612789Sahrens */ 613789Sahrens static void 614789Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 615789Sahrens { 616789Sahrens spa_t *spa = svd->vdev_spa; 617789Sahrens metaslab_t *msp; 618789Sahrens vdev_t *vd; 619789Sahrens int t; 620789Sahrens 621789Sahrens ASSERT(tvd == tvd->vdev_top); 622789Sahrens 623789Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 624789Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 625789Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 626789Sahrens 627789Sahrens svd->vdev_ms_array = 0; 628789Sahrens svd->vdev_ms_shift = 0; 629789Sahrens svd->vdev_ms_count = 0; 630789Sahrens 631789Sahrens tvd->vdev_mg = svd->vdev_mg; 632789Sahrens tvd->vdev_ms = svd->vdev_ms; 633789Sahrens 634789Sahrens svd->vdev_mg = NULL; 635789Sahrens svd->vdev_ms = NULL; 6361732Sbonwick 6371732Sbonwick if (tvd->vdev_mg != NULL) 6381732Sbonwick tvd->vdev_mg->mg_vd = tvd; 639789Sahrens 640789Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 641789Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 6422082Seschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 643789Sahrens 644789Sahrens svd->vdev_stat.vs_alloc = 0; 645789Sahrens svd->vdev_stat.vs_space = 0; 6462082Seschrock svd->vdev_stat.vs_dspace = 0; 647789Sahrens 648789Sahrens for (t = 0; t < TXG_SIZE; t++) { 649789Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 650789Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 651789Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 652789Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 653789Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 654789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 655789Sahrens } 656789Sahrens 6577754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_config_dirty_node)) { 658789Sahrens vdev_config_clean(svd); 659789Sahrens vdev_config_dirty(tvd); 660789Sahrens } 661789Sahrens 6627754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_state_dirty_node)) { 6637754SJeff.Bonwick@Sun.COM vdev_state_clean(svd); 6647754SJeff.Bonwick@Sun.COM vdev_state_dirty(tvd); 6657754SJeff.Bonwick@Sun.COM } 6667754SJeff.Bonwick@Sun.COM 6672082Seschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 6682082Seschrock svd->vdev_deflate_ratio = 0; 6694527Sperrin 6704527Sperrin tvd->vdev_islog = svd->vdev_islog; 6714527Sperrin svd->vdev_islog = 0; 672789Sahrens } 673789Sahrens 674789Sahrens static void 675789Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 676789Sahrens { 677789Sahrens if (vd == NULL) 678789Sahrens return; 679789Sahrens 680789Sahrens vd->vdev_top = tvd; 681789Sahrens 6829816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 683789Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 684789Sahrens } 685789Sahrens 686789Sahrens /* 687789Sahrens * Add a mirror/replacing vdev above an existing vdev. 688789Sahrens */ 689789Sahrens vdev_t * 690789Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 691789Sahrens { 692789Sahrens spa_t *spa = cvd->vdev_spa; 693789Sahrens vdev_t *pvd = cvd->vdev_parent; 694789Sahrens vdev_t *mvd; 695789Sahrens 6967754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 697789Sahrens 698789Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 6991732Sbonwick 7001732Sbonwick mvd->vdev_asize = cvd->vdev_asize; 7019816SGeorge.Wilson@Sun.COM mvd->vdev_min_asize = cvd->vdev_min_asize; 7021732Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 7031732Sbonwick mvd->vdev_state = cvd->vdev_state; 7041732Sbonwick 705789Sahrens vdev_remove_child(pvd, cvd); 706789Sahrens vdev_add_child(pvd, mvd); 707789Sahrens cvd->vdev_id = mvd->vdev_children; 708789Sahrens vdev_add_child(mvd, cvd); 709789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 710789Sahrens 711789Sahrens if (mvd == mvd->vdev_top) 712789Sahrens vdev_top_transfer(cvd, mvd); 713789Sahrens 714789Sahrens return (mvd); 715789Sahrens } 716789Sahrens 717789Sahrens /* 718789Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 719789Sahrens */ 720789Sahrens void 721789Sahrens vdev_remove_parent(vdev_t *cvd) 722789Sahrens { 723789Sahrens vdev_t *mvd = cvd->vdev_parent; 724789Sahrens vdev_t *pvd = mvd->vdev_parent; 725789Sahrens 7267754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 727789Sahrens 728789Sahrens ASSERT(mvd->vdev_children == 1); 729789Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 7302082Seschrock mvd->vdev_ops == &vdev_replacing_ops || 7312082Seschrock mvd->vdev_ops == &vdev_spare_ops); 7321732Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 733789Sahrens 734789Sahrens vdev_remove_child(mvd, cvd); 735789Sahrens vdev_remove_child(pvd, mvd); 7368241SJeff.Bonwick@Sun.COM 7377754SJeff.Bonwick@Sun.COM /* 7387754SJeff.Bonwick@Sun.COM * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 7397754SJeff.Bonwick@Sun.COM * Otherwise, we could have detached an offline device, and when we 7407754SJeff.Bonwick@Sun.COM * go to import the pool we'll think we have two top-level vdevs, 7417754SJeff.Bonwick@Sun.COM * instead of a different version of the same top-level vdev. 7427754SJeff.Bonwick@Sun.COM */ 7438241SJeff.Bonwick@Sun.COM if (mvd->vdev_top == mvd) { 7448241SJeff.Bonwick@Sun.COM uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 7458241SJeff.Bonwick@Sun.COM cvd->vdev_guid += guid_delta; 7468241SJeff.Bonwick@Sun.COM cvd->vdev_guid_sum += guid_delta; 7478241SJeff.Bonwick@Sun.COM } 748789Sahrens cvd->vdev_id = mvd->vdev_id; 749789Sahrens vdev_add_child(pvd, cvd); 750789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 751789Sahrens 752789Sahrens if (cvd == cvd->vdev_top) 753789Sahrens vdev_top_transfer(mvd, cvd); 754789Sahrens 755789Sahrens ASSERT(mvd->vdev_children == 0); 756789Sahrens vdev_free(mvd); 757789Sahrens } 758789Sahrens 7591544Seschrock int 760789Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 761789Sahrens { 762789Sahrens spa_t *spa = vd->vdev_spa; 7631732Sbonwick objset_t *mos = spa->spa_meta_objset; 7644527Sperrin metaslab_class_t *mc; 7651732Sbonwick uint64_t m; 766789Sahrens uint64_t oldc = vd->vdev_ms_count; 767789Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 7681732Sbonwick metaslab_t **mspp; 7691732Sbonwick int error; 770789Sahrens 7711585Sbonwick if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 7721585Sbonwick return (0); 7731585Sbonwick 7749701SGeorge.Wilson@Sun.COM /* 7759701SGeorge.Wilson@Sun.COM * Compute the raidz-deflation ratio. Note, we hard-code 7769701SGeorge.Wilson@Sun.COM * in 128k (1 << 17) because it is the current "typical" blocksize. 7779701SGeorge.Wilson@Sun.COM * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 7789701SGeorge.Wilson@Sun.COM * or we will inconsistently account for existing bp's. 7799701SGeorge.Wilson@Sun.COM */ 7809701SGeorge.Wilson@Sun.COM vd->vdev_deflate_ratio = (1 << 17) / 7819701SGeorge.Wilson@Sun.COM (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 7829701SGeorge.Wilson@Sun.COM 783789Sahrens ASSERT(oldc <= newc); 784789Sahrens 7854527Sperrin if (vd->vdev_islog) 7864527Sperrin mc = spa->spa_log_class; 7874527Sperrin else 7884527Sperrin mc = spa->spa_normal_class; 7894527Sperrin 7901732Sbonwick if (vd->vdev_mg == NULL) 7911732Sbonwick vd->vdev_mg = metaslab_group_create(mc, vd); 7921732Sbonwick 7931732Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 7941732Sbonwick 7951732Sbonwick if (oldc != 0) { 7961732Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 7971732Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 7981732Sbonwick } 7991732Sbonwick 8001732Sbonwick vd->vdev_ms = mspp; 801789Sahrens vd->vdev_ms_count = newc; 802789Sahrens 8031732Sbonwick for (m = oldc; m < newc; m++) { 8041732Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 805789Sahrens if (txg == 0) { 8061732Sbonwick uint64_t object = 0; 8071732Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 8089512SNeil.Perrin@Sun.COM m * sizeof (uint64_t), sizeof (uint64_t), &object, 8099512SNeil.Perrin@Sun.COM DMU_READ_PREFETCH); 8101732Sbonwick if (error) 8111732Sbonwick return (error); 8121732Sbonwick if (object != 0) { 8131732Sbonwick dmu_buf_t *db; 8141732Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 8151732Sbonwick if (error) 8161732Sbonwick return (error); 8174944Smaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 8184944Smaybee bcopy(db->db_data, &smo, sizeof (smo)); 8191732Sbonwick ASSERT3U(smo.smo_object, ==, object); 8201544Seschrock dmu_buf_rele(db, FTAG); 821789Sahrens } 822789Sahrens } 8231732Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 8241732Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 825789Sahrens } 826789Sahrens 8271544Seschrock return (0); 828789Sahrens } 829789Sahrens 830789Sahrens void 831789Sahrens vdev_metaslab_fini(vdev_t *vd) 832789Sahrens { 833789Sahrens uint64_t m; 834789Sahrens uint64_t count = vd->vdev_ms_count; 835789Sahrens 836789Sahrens if (vd->vdev_ms != NULL) { 837789Sahrens for (m = 0; m < count; m++) 8381732Sbonwick if (vd->vdev_ms[m] != NULL) 8391732Sbonwick metaslab_fini(vd->vdev_ms[m]); 840789Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 841789Sahrens vd->vdev_ms = NULL; 842789Sahrens } 843789Sahrens } 844789Sahrens 8457754SJeff.Bonwick@Sun.COM typedef struct vdev_probe_stats { 8467754SJeff.Bonwick@Sun.COM boolean_t vps_readable; 8477754SJeff.Bonwick@Sun.COM boolean_t vps_writeable; 8487754SJeff.Bonwick@Sun.COM int vps_flags; 8497754SJeff.Bonwick@Sun.COM } vdev_probe_stats_t; 8507754SJeff.Bonwick@Sun.COM 8517754SJeff.Bonwick@Sun.COM static void 8527754SJeff.Bonwick@Sun.COM vdev_probe_done(zio_t *zio) 8535329Sgw25295 { 8548241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 8558632SBill.Moore@Sun.COM vdev_t *vd = zio->io_vd; 8567754SJeff.Bonwick@Sun.COM vdev_probe_stats_t *vps = zio->io_private; 8578632SBill.Moore@Sun.COM 8588632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio != NULL); 8597754SJeff.Bonwick@Sun.COM 8607754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) { 8617754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8627754SJeff.Bonwick@Sun.COM vps->vps_readable = 1; 8638241SJeff.Bonwick@Sun.COM if (zio->io_error == 0 && spa_writeable(spa)) { 8648632SBill.Moore@Sun.COM zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 8657754SJeff.Bonwick@Sun.COM zio->io_offset, zio->io_size, zio->io_data, 8667754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 8677754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 8687754SJeff.Bonwick@Sun.COM } else { 8697754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8707754SJeff.Bonwick@Sun.COM } 8717754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_WRITE) { 8727754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8737754SJeff.Bonwick@Sun.COM vps->vps_writeable = 1; 8747754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8757754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_NULL) { 8768632SBill.Moore@Sun.COM zio_t *pio; 8777754SJeff.Bonwick@Sun.COM 8787754SJeff.Bonwick@Sun.COM vd->vdev_cant_read |= !vps->vps_readable; 8797754SJeff.Bonwick@Sun.COM vd->vdev_cant_write |= !vps->vps_writeable; 8807754SJeff.Bonwick@Sun.COM 8817754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && 8828241SJeff.Bonwick@Sun.COM (vdev_writeable(vd) || !spa_writeable(spa))) { 8837754SJeff.Bonwick@Sun.COM zio->io_error = 0; 8847754SJeff.Bonwick@Sun.COM } else { 8857754SJeff.Bonwick@Sun.COM ASSERT(zio->io_error != 0); 8867754SJeff.Bonwick@Sun.COM zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 8878241SJeff.Bonwick@Sun.COM spa, vd, NULL, 0, 0); 8887754SJeff.Bonwick@Sun.COM zio->io_error = ENXIO; 8897754SJeff.Bonwick@Sun.COM } 8908632SBill.Moore@Sun.COM 8918632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 8928632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio == zio); 8938632SBill.Moore@Sun.COM vd->vdev_probe_zio = NULL; 8948632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 8958632SBill.Moore@Sun.COM 8968632SBill.Moore@Sun.COM while ((pio = zio_walk_parents(zio)) != NULL) 8978632SBill.Moore@Sun.COM if (!vdev_accessible(vd, pio)) 8988632SBill.Moore@Sun.COM pio->io_error = ENXIO; 8998632SBill.Moore@Sun.COM 9007754SJeff.Bonwick@Sun.COM kmem_free(vps, sizeof (*vps)); 9017754SJeff.Bonwick@Sun.COM } 9027754SJeff.Bonwick@Sun.COM } 9035329Sgw25295 9047754SJeff.Bonwick@Sun.COM /* 9057754SJeff.Bonwick@Sun.COM * Determine whether this device is accessible by reading and writing 9067754SJeff.Bonwick@Sun.COM * to several known locations: the pad regions of each vdev label 9077754SJeff.Bonwick@Sun.COM * but the first (which we leave alone in case it contains a VTOC). 9087754SJeff.Bonwick@Sun.COM */ 9097754SJeff.Bonwick@Sun.COM zio_t * 9108632SBill.Moore@Sun.COM vdev_probe(vdev_t *vd, zio_t *zio) 9117754SJeff.Bonwick@Sun.COM { 9127754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 9138632SBill.Moore@Sun.COM vdev_probe_stats_t *vps = NULL; 9148632SBill.Moore@Sun.COM zio_t *pio; 9157754SJeff.Bonwick@Sun.COM 9167754SJeff.Bonwick@Sun.COM ASSERT(vd->vdev_ops->vdev_op_leaf); 9177754SJeff.Bonwick@Sun.COM 9188632SBill.Moore@Sun.COM /* 9198632SBill.Moore@Sun.COM * Don't probe the probe. 9208632SBill.Moore@Sun.COM */ 9218632SBill.Moore@Sun.COM if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 9228632SBill.Moore@Sun.COM return (NULL); 9238632SBill.Moore@Sun.COM 9248632SBill.Moore@Sun.COM /* 9258632SBill.Moore@Sun.COM * To prevent 'probe storms' when a device fails, we create 9268632SBill.Moore@Sun.COM * just one probe i/o at a time. All zios that want to probe 9278632SBill.Moore@Sun.COM * this vdev will become parents of the probe io. 9288632SBill.Moore@Sun.COM */ 9298632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 9308632SBill.Moore@Sun.COM 9318632SBill.Moore@Sun.COM if ((pio = vd->vdev_probe_zio) == NULL) { 9328632SBill.Moore@Sun.COM vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 9338632SBill.Moore@Sun.COM 9348632SBill.Moore@Sun.COM vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 9358632SBill.Moore@Sun.COM ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 9369725SEric.Schrock@Sun.COM ZIO_FLAG_TRYHARD; 9378632SBill.Moore@Sun.COM 9388632SBill.Moore@Sun.COM if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 9398632SBill.Moore@Sun.COM /* 9408632SBill.Moore@Sun.COM * vdev_cant_read and vdev_cant_write can only 9418632SBill.Moore@Sun.COM * transition from TRUE to FALSE when we have the 9428632SBill.Moore@Sun.COM * SCL_ZIO lock as writer; otherwise they can only 9438632SBill.Moore@Sun.COM * transition from FALSE to TRUE. This ensures that 9448632SBill.Moore@Sun.COM * any zio looking at these values can assume that 9458632SBill.Moore@Sun.COM * failures persist for the life of the I/O. That's 9468632SBill.Moore@Sun.COM * important because when a device has intermittent 9478632SBill.Moore@Sun.COM * connectivity problems, we want to ensure that 9488632SBill.Moore@Sun.COM * they're ascribed to the device (ENXIO) and not 9498632SBill.Moore@Sun.COM * the zio (EIO). 9508632SBill.Moore@Sun.COM * 9518632SBill.Moore@Sun.COM * Since we hold SCL_ZIO as writer here, clear both 9528632SBill.Moore@Sun.COM * values so the probe can reevaluate from first 9538632SBill.Moore@Sun.COM * principles. 9548632SBill.Moore@Sun.COM */ 9558632SBill.Moore@Sun.COM vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 9568632SBill.Moore@Sun.COM vd->vdev_cant_read = B_FALSE; 9578632SBill.Moore@Sun.COM vd->vdev_cant_write = B_FALSE; 9588632SBill.Moore@Sun.COM } 9598632SBill.Moore@Sun.COM 9608632SBill.Moore@Sun.COM vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 9618632SBill.Moore@Sun.COM vdev_probe_done, vps, 9628632SBill.Moore@Sun.COM vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 9638632SBill.Moore@Sun.COM 9648632SBill.Moore@Sun.COM if (zio != NULL) { 9658632SBill.Moore@Sun.COM vd->vdev_probe_wanted = B_TRUE; 9668632SBill.Moore@Sun.COM spa_async_request(spa, SPA_ASYNC_PROBE); 9678632SBill.Moore@Sun.COM } 9688632SBill.Moore@Sun.COM } 9698632SBill.Moore@Sun.COM 9708632SBill.Moore@Sun.COM if (zio != NULL) 9718632SBill.Moore@Sun.COM zio_add_child(zio, pio); 9728632SBill.Moore@Sun.COM 9738632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 9748632SBill.Moore@Sun.COM 9758632SBill.Moore@Sun.COM if (vps == NULL) { 9768632SBill.Moore@Sun.COM ASSERT(zio != NULL); 9778632SBill.Moore@Sun.COM return (NULL); 9788632SBill.Moore@Sun.COM } 9797754SJeff.Bonwick@Sun.COM 9807754SJeff.Bonwick@Sun.COM for (int l = 1; l < VDEV_LABELS; l++) { 9818632SBill.Moore@Sun.COM zio_nowait(zio_read_phys(pio, vd, 9827754SJeff.Bonwick@Sun.COM vdev_label_offset(vd->vdev_psize, l, 9839056SLin.Ling@Sun.COM offsetof(vdev_label_t, vl_pad2)), 9849056SLin.Ling@Sun.COM VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 9857754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 9867754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 9877754SJeff.Bonwick@Sun.COM } 9887754SJeff.Bonwick@Sun.COM 9898632SBill.Moore@Sun.COM if (zio == NULL) 9908632SBill.Moore@Sun.COM return (pio); 9918632SBill.Moore@Sun.COM 9928632SBill.Moore@Sun.COM zio_nowait(pio); 9938632SBill.Moore@Sun.COM return (NULL); 9945329Sgw25295 } 9955329Sgw25295 996*9846SEric.Taylor@Sun.COM static void 997*9846SEric.Taylor@Sun.COM vdev_open_child(void *arg) 998*9846SEric.Taylor@Sun.COM { 999*9846SEric.Taylor@Sun.COM vdev_t *vd = arg; 1000*9846SEric.Taylor@Sun.COM 1001*9846SEric.Taylor@Sun.COM vd->vdev_open_thread = curthread; 1002*9846SEric.Taylor@Sun.COM vd->vdev_open_error = vdev_open(vd); 1003*9846SEric.Taylor@Sun.COM vd->vdev_open_thread = NULL; 1004*9846SEric.Taylor@Sun.COM } 1005*9846SEric.Taylor@Sun.COM 1006*9846SEric.Taylor@Sun.COM void 1007*9846SEric.Taylor@Sun.COM vdev_open_children(vdev_t *vd) 1008*9846SEric.Taylor@Sun.COM { 1009*9846SEric.Taylor@Sun.COM taskq_t *tq; 1010*9846SEric.Taylor@Sun.COM int children = vd->vdev_children; 1011*9846SEric.Taylor@Sun.COM 1012*9846SEric.Taylor@Sun.COM tq = taskq_create("vdev_open", children, minclsyspri, 1013*9846SEric.Taylor@Sun.COM children, children, TASKQ_PREPOPULATE); 1014*9846SEric.Taylor@Sun.COM 1015*9846SEric.Taylor@Sun.COM for (int c = 0; c < children; c++) 1016*9846SEric.Taylor@Sun.COM VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1017*9846SEric.Taylor@Sun.COM TQ_SLEEP) != NULL); 1018*9846SEric.Taylor@Sun.COM 1019*9846SEric.Taylor@Sun.COM taskq_destroy(tq); 1020*9846SEric.Taylor@Sun.COM } 1021*9846SEric.Taylor@Sun.COM 1022789Sahrens /* 1023789Sahrens * Prepare a virtual device for access. 1024789Sahrens */ 1025789Sahrens int 1026789Sahrens vdev_open(vdev_t *vd) 1027789Sahrens { 10288241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 1029789Sahrens int error; 1030789Sahrens uint64_t osize = 0; 1031789Sahrens uint64_t asize, psize; 10321732Sbonwick uint64_t ashift = 0; 1033789Sahrens 1034*9846SEric.Taylor@Sun.COM ASSERT(vd->vdev_open_thread == curthread || 1035*9846SEric.Taylor@Sun.COM spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1036789Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1037789Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 1038789Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 1039789Sahrens 1040789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 10419701SGeorge.Wilson@Sun.COM vd->vdev_cant_read = B_FALSE; 10429701SGeorge.Wilson@Sun.COM vd->vdev_cant_write = B_FALSE; 10439816SGeorge.Wilson@Sun.COM vd->vdev_min_asize = vdev_get_min_asize(vd); 1044789Sahrens 10454451Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 10464451Seschrock ASSERT(vd->vdev_children == 0); 10474451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 10484451Seschrock VDEV_AUX_ERR_EXCEEDED); 10494451Seschrock return (ENXIO); 10504451Seschrock } else if (vd->vdev_offline) { 1051789Sahrens ASSERT(vd->vdev_children == 0); 10521544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1053789Sahrens return (ENXIO); 1054789Sahrens } 1055789Sahrens 1056789Sahrens error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1057789Sahrens 10581544Seschrock if (zio_injection_enabled && error == 0) 10599725SEric.Schrock@Sun.COM error = zio_handle_device_injection(vd, NULL, ENXIO); 10601544Seschrock 10614451Seschrock if (error) { 10624451Seschrock if (vd->vdev_removed && 10634451Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 10644451Seschrock vd->vdev_removed = B_FALSE; 1065789Sahrens 10661544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1067789Sahrens vd->vdev_stat.vs_aux); 1068789Sahrens return (error); 1069789Sahrens } 1070789Sahrens 10714451Seschrock vd->vdev_removed = B_FALSE; 10724451Seschrock 10734451Seschrock if (vd->vdev_degraded) { 10744451Seschrock ASSERT(vd->vdev_children == 0); 10754451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10764451Seschrock VDEV_AUX_ERR_EXCEEDED); 10774451Seschrock } else { 10784451Seschrock vd->vdev_state = VDEV_STATE_HEALTHY; 10794451Seschrock } 1080789Sahrens 10819816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 10821544Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 10831544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 10841544Seschrock VDEV_AUX_NONE); 10851544Seschrock break; 10861544Seschrock } 10879816SGeorge.Wilson@Sun.COM } 1088789Sahrens 1089789Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1090789Sahrens 1091789Sahrens if (vd->vdev_children == 0) { 1092789Sahrens if (osize < SPA_MINDEVSIZE) { 10931544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 10941544Seschrock VDEV_AUX_TOO_SMALL); 1095789Sahrens return (EOVERFLOW); 1096789Sahrens } 1097789Sahrens psize = osize; 1098789Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1099789Sahrens } else { 11001732Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1101789Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 11021544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11031544Seschrock VDEV_AUX_TOO_SMALL); 1104789Sahrens return (EOVERFLOW); 1105789Sahrens } 1106789Sahrens psize = 0; 1107789Sahrens asize = osize; 1108789Sahrens } 1109789Sahrens 1110789Sahrens vd->vdev_psize = psize; 1111789Sahrens 11129816SGeorge.Wilson@Sun.COM /* 11139816SGeorge.Wilson@Sun.COM * Make sure the allocatable size hasn't shrunk. 11149816SGeorge.Wilson@Sun.COM */ 11159816SGeorge.Wilson@Sun.COM if (asize < vd->vdev_min_asize) { 11169816SGeorge.Wilson@Sun.COM vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11179816SGeorge.Wilson@Sun.COM VDEV_AUX_BAD_LABEL); 11189816SGeorge.Wilson@Sun.COM return (EINVAL); 11199816SGeorge.Wilson@Sun.COM } 11209816SGeorge.Wilson@Sun.COM 1121789Sahrens if (vd->vdev_asize == 0) { 1122789Sahrens /* 1123789Sahrens * This is the first-ever open, so use the computed values. 11241732Sbonwick * For testing purposes, a higher ashift can be requested. 1125789Sahrens */ 1126789Sahrens vd->vdev_asize = asize; 11271732Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1128789Sahrens } else { 1129789Sahrens /* 1130789Sahrens * Make sure the alignment requirement hasn't increased. 1131789Sahrens */ 11321732Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 11331544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11341544Seschrock VDEV_AUX_BAD_LABEL); 1135789Sahrens return (EINVAL); 1136789Sahrens } 1137789Sahrens } 1138789Sahrens 11391544Seschrock /* 11409816SGeorge.Wilson@Sun.COM * If all children are healthy and the asize has increased, 11419816SGeorge.Wilson@Sun.COM * then we've experienced dynamic LUN growth. If automatic 11429816SGeorge.Wilson@Sun.COM * expansion is enabled then use the additional space. 11439816SGeorge.Wilson@Sun.COM */ 11449816SGeorge.Wilson@Sun.COM if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 11459816SGeorge.Wilson@Sun.COM (vd->vdev_expanding || spa->spa_autoexpand)) 11469816SGeorge.Wilson@Sun.COM vd->vdev_asize = asize; 11479816SGeorge.Wilson@Sun.COM 11489816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vd); 11499816SGeorge.Wilson@Sun.COM 11509816SGeorge.Wilson@Sun.COM /* 11515329Sgw25295 * Ensure we can issue some IO before declaring the 11525329Sgw25295 * vdev open for business. 11535329Sgw25295 */ 11547754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && 11557754SJeff.Bonwick@Sun.COM (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 11565329Sgw25295 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11577754SJeff.Bonwick@Sun.COM VDEV_AUX_IO_FAILURE); 11585329Sgw25295 return (error); 11595329Sgw25295 } 11605329Sgw25295 11615329Sgw25295 /* 11627046Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 11638241SJeff.Bonwick@Sun.COM * resilver. But don't do this if we are doing a reopen for a scrub, 11648241SJeff.Bonwick@Sun.COM * since this would just restart the scrub we are already doing. 11657046Sahrens */ 11668241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 11678241SJeff.Bonwick@Sun.COM vdev_resilver_needed(vd, NULL, NULL)) 11688241SJeff.Bonwick@Sun.COM spa_async_request(spa, SPA_ASYNC_RESILVER); 11697046Sahrens 1170789Sahrens return (0); 1171789Sahrens } 1172789Sahrens 1173789Sahrens /* 11741986Seschrock * Called once the vdevs are all opened, this routine validates the label 11751986Seschrock * contents. This needs to be done before vdev_load() so that we don't 11764451Seschrock * inadvertently do repair I/Os to the wrong device. 11771986Seschrock * 11781986Seschrock * This function will only return failure if one of the vdevs indicates that it 11791986Seschrock * has since been destroyed or exported. This is only possible if 11801986Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 11811986Seschrock * will be updated but the function will return 0. 11821986Seschrock */ 11831986Seschrock int 11841986Seschrock vdev_validate(vdev_t *vd) 11851986Seschrock { 11861986Seschrock spa_t *spa = vd->vdev_spa; 11871986Seschrock nvlist_t *label; 11887754SJeff.Bonwick@Sun.COM uint64_t guid, top_guid; 11891986Seschrock uint64_t state; 11901986Seschrock 11919816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 11921986Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 11934070Smc142369 return (EBADF); 11941986Seschrock 11952174Seschrock /* 11962174Seschrock * If the device has already failed, or was marked offline, don't do 11972174Seschrock * any further validation. Otherwise, label I/O will fail and we will 11982174Seschrock * overwrite the previous state. 11992174Seschrock */ 12007754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 12011986Seschrock 12021986Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 12031986Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 12041986Seschrock VDEV_AUX_BAD_LABEL); 12051986Seschrock return (0); 12061986Seschrock } 12071986Seschrock 12081986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 12091986Seschrock &guid) != 0 || guid != spa_guid(spa)) { 12101986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12111986Seschrock VDEV_AUX_CORRUPT_DATA); 12121986Seschrock nvlist_free(label); 12131986Seschrock return (0); 12141986Seschrock } 12151986Seschrock 12167754SJeff.Bonwick@Sun.COM /* 12177754SJeff.Bonwick@Sun.COM * If this vdev just became a top-level vdev because its 12187754SJeff.Bonwick@Sun.COM * sibling was detached, it will have adopted the parent's 12197754SJeff.Bonwick@Sun.COM * vdev guid -- but the label may or may not be on disk yet. 12207754SJeff.Bonwick@Sun.COM * Fortunately, either version of the label will have the 12217754SJeff.Bonwick@Sun.COM * same top guid, so if we're a top-level vdev, we can 12227754SJeff.Bonwick@Sun.COM * safely compare to that instead. 12237754SJeff.Bonwick@Sun.COM */ 12241986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 12257754SJeff.Bonwick@Sun.COM &guid) != 0 || 12267754SJeff.Bonwick@Sun.COM nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 12277754SJeff.Bonwick@Sun.COM &top_guid) != 0 || 12287754SJeff.Bonwick@Sun.COM (vd->vdev_guid != guid && 12297754SJeff.Bonwick@Sun.COM (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 12301986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12311986Seschrock VDEV_AUX_CORRUPT_DATA); 12321986Seschrock nvlist_free(label); 12331986Seschrock return (0); 12341986Seschrock } 12351986Seschrock 12361986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 12371986Seschrock &state) != 0) { 12381986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12391986Seschrock VDEV_AUX_CORRUPT_DATA); 12401986Seschrock nvlist_free(label); 12411986Seschrock return (0); 12421986Seschrock } 12431986Seschrock 12441986Seschrock nvlist_free(label); 12451986Seschrock 12461986Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN && 12471986Seschrock state != POOL_STATE_ACTIVE) 12484070Smc142369 return (EBADF); 12496976Seschrock 12506976Seschrock /* 12516976Seschrock * If we were able to open and validate a vdev that was 12526976Seschrock * previously marked permanently unavailable, clear that state 12536976Seschrock * now. 12546976Seschrock */ 12556976Seschrock if (vd->vdev_not_present) 12566976Seschrock vd->vdev_not_present = 0; 12571986Seschrock } 12581986Seschrock 12591986Seschrock return (0); 12601986Seschrock } 12611986Seschrock 12621986Seschrock /* 1263789Sahrens * Close a virtual device. 1264789Sahrens */ 1265789Sahrens void 1266789Sahrens vdev_close(vdev_t *vd) 1267789Sahrens { 12688241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 12698241SJeff.Bonwick@Sun.COM 12708241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12718241SJeff.Bonwick@Sun.COM 1272789Sahrens vd->vdev_ops->vdev_op_close(vd); 1273789Sahrens 12744451Seschrock vdev_cache_purge(vd); 1275789Sahrens 12761986Seschrock /* 12779816SGeorge.Wilson@Sun.COM * We record the previous state before we close it, so that if we are 12781986Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 12791986Seschrock * it's still faulted. 12801986Seschrock */ 12811986Seschrock vd->vdev_prevstate = vd->vdev_state; 12821986Seschrock 1283789Sahrens if (vd->vdev_offline) 1284789Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1285789Sahrens else 1286789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 12871544Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1288789Sahrens } 1289789Sahrens 1290789Sahrens void 12911544Seschrock vdev_reopen(vdev_t *vd) 1292789Sahrens { 12931544Seschrock spa_t *spa = vd->vdev_spa; 1294789Sahrens 12957754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 12961544Seschrock 1297789Sahrens vdev_close(vd); 1298789Sahrens (void) vdev_open(vd); 1299789Sahrens 1300789Sahrens /* 13013377Seschrock * Call vdev_validate() here to make sure we have the same device. 13023377Seschrock * Otherwise, a device with an invalid label could be successfully 13033377Seschrock * opened in response to vdev_reopen(). 13043377Seschrock */ 13056643Seschrock if (vd->vdev_aux) { 13066643Seschrock (void) vdev_validate_aux(vd); 13077754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && vdev_writeable(vd) && 13089425SEric.Schrock@Sun.COM vd->vdev_aux == &spa->spa_l2cache && 13099816SGeorge.Wilson@Sun.COM !l2arc_vdev_present(vd)) 13109816SGeorge.Wilson@Sun.COM l2arc_add_vdev(spa, vd); 13116643Seschrock } else { 13126643Seschrock (void) vdev_validate(vd); 13136643Seschrock } 13143377Seschrock 13153377Seschrock /* 13164451Seschrock * Reassess parent vdev's health. 1317789Sahrens */ 13184451Seschrock vdev_propagate_state(vd); 1319789Sahrens } 1320789Sahrens 1321789Sahrens int 13222082Seschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1323789Sahrens { 1324789Sahrens int error; 1325789Sahrens 1326789Sahrens /* 1327789Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1328789Sahrens * For a create, however, we want to fail the request if 1329789Sahrens * there are any components we can't open. 1330789Sahrens */ 1331789Sahrens error = vdev_open(vd); 1332789Sahrens 1333789Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1334789Sahrens vdev_close(vd); 1335789Sahrens return (error ? error : ENXIO); 1336789Sahrens } 1337789Sahrens 1338789Sahrens /* 1339789Sahrens * Recursively initialize all labels. 1340789Sahrens */ 13413377Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 13423377Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1343789Sahrens vdev_close(vd); 1344789Sahrens return (error); 1345789Sahrens } 1346789Sahrens 1347789Sahrens return (0); 1348789Sahrens } 1349789Sahrens 13501585Sbonwick void 13519816SGeorge.Wilson@Sun.COM vdev_metaslab_set_size(vdev_t *vd) 1352789Sahrens { 1353789Sahrens /* 1354789Sahrens * Aim for roughly 200 metaslabs per vdev. 1355789Sahrens */ 1356789Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1357789Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1358789Sahrens } 1359789Sahrens 1360789Sahrens void 13611732Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1362789Sahrens { 13631732Sbonwick ASSERT(vd == vd->vdev_top); 13641732Sbonwick ASSERT(ISP2(flags)); 1365789Sahrens 13661732Sbonwick if (flags & VDD_METASLAB) 13671732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 13681732Sbonwick 13691732Sbonwick if (flags & VDD_DTL) 13701732Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 13711732Sbonwick 13721732Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1373789Sahrens } 1374789Sahrens 13758241SJeff.Bonwick@Sun.COM /* 13768241SJeff.Bonwick@Sun.COM * DTLs. 13778241SJeff.Bonwick@Sun.COM * 13788241SJeff.Bonwick@Sun.COM * A vdev's DTL (dirty time log) is the set of transaction groups for which 13798241SJeff.Bonwick@Sun.COM * the vdev has less than perfect replication. There are three kinds of DTL: 13808241SJeff.Bonwick@Sun.COM * 13818241SJeff.Bonwick@Sun.COM * DTL_MISSING: txgs for which the vdev has no valid copies of the data 13828241SJeff.Bonwick@Sun.COM * 13838241SJeff.Bonwick@Sun.COM * DTL_PARTIAL: txgs for which data is available, but not fully replicated 13848241SJeff.Bonwick@Sun.COM * 13858241SJeff.Bonwick@Sun.COM * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 13868241SJeff.Bonwick@Sun.COM * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 13878241SJeff.Bonwick@Sun.COM * txgs that was scrubbed. 13888241SJeff.Bonwick@Sun.COM * 13898241SJeff.Bonwick@Sun.COM * DTL_OUTAGE: txgs which cannot currently be read, whether due to 13908241SJeff.Bonwick@Sun.COM * persistent errors or just some device being offline. 13918241SJeff.Bonwick@Sun.COM * Unlike the other three, the DTL_OUTAGE map is not generally 13928241SJeff.Bonwick@Sun.COM * maintained; it's only computed when needed, typically to 13938241SJeff.Bonwick@Sun.COM * determine whether a device can be detached. 13948241SJeff.Bonwick@Sun.COM * 13958241SJeff.Bonwick@Sun.COM * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 13968241SJeff.Bonwick@Sun.COM * either has the data or it doesn't. 13978241SJeff.Bonwick@Sun.COM * 13988241SJeff.Bonwick@Sun.COM * For interior vdevs such as mirror and RAID-Z the picture is more complex. 13998241SJeff.Bonwick@Sun.COM * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 14008241SJeff.Bonwick@Sun.COM * if any child is less than fully replicated, then so is its parent. 14018241SJeff.Bonwick@Sun.COM * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 14028241SJeff.Bonwick@Sun.COM * comprising only those txgs which appear in 'maxfaults' or more children; 14038241SJeff.Bonwick@Sun.COM * those are the txgs we don't have enough replication to read. For example, 14048241SJeff.Bonwick@Sun.COM * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 14058241SJeff.Bonwick@Sun.COM * thus, its DTL_MISSING consists of the set of txgs that appear in more than 14068241SJeff.Bonwick@Sun.COM * two child DTL_MISSING maps. 14078241SJeff.Bonwick@Sun.COM * 14088241SJeff.Bonwick@Sun.COM * It should be clear from the above that to compute the DTLs and outage maps 14098241SJeff.Bonwick@Sun.COM * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 14108241SJeff.Bonwick@Sun.COM * Therefore, that is all we keep on disk. When loading the pool, or after 14118241SJeff.Bonwick@Sun.COM * a configuration change, we generate all other DTLs from first principles. 14128241SJeff.Bonwick@Sun.COM */ 1413789Sahrens void 14148241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1415789Sahrens { 14168241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14178241SJeff.Bonwick@Sun.COM 14188241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14198241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 14208241SJeff.Bonwick@Sun.COM 1421789Sahrens mutex_enter(sm->sm_lock); 1422789Sahrens if (!space_map_contains(sm, txg, size)) 1423789Sahrens space_map_add(sm, txg, size); 1424789Sahrens mutex_exit(sm->sm_lock); 1425789Sahrens } 1426789Sahrens 14278241SJeff.Bonwick@Sun.COM boolean_t 14288241SJeff.Bonwick@Sun.COM vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1429789Sahrens { 14308241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14318241SJeff.Bonwick@Sun.COM boolean_t dirty = B_FALSE; 14328241SJeff.Bonwick@Sun.COM 14338241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14348241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1435789Sahrens 1436789Sahrens mutex_enter(sm->sm_lock); 14378241SJeff.Bonwick@Sun.COM if (sm->sm_space != 0) 14388241SJeff.Bonwick@Sun.COM dirty = space_map_contains(sm, txg, size); 1439789Sahrens mutex_exit(sm->sm_lock); 1440789Sahrens 1441789Sahrens return (dirty); 1442789Sahrens } 1443789Sahrens 14448241SJeff.Bonwick@Sun.COM boolean_t 14458241SJeff.Bonwick@Sun.COM vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 14468241SJeff.Bonwick@Sun.COM { 14478241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14488241SJeff.Bonwick@Sun.COM boolean_t empty; 14498241SJeff.Bonwick@Sun.COM 14508241SJeff.Bonwick@Sun.COM mutex_enter(sm->sm_lock); 14518241SJeff.Bonwick@Sun.COM empty = (sm->sm_space == 0); 14528241SJeff.Bonwick@Sun.COM mutex_exit(sm->sm_lock); 14538241SJeff.Bonwick@Sun.COM 14548241SJeff.Bonwick@Sun.COM return (empty); 14558241SJeff.Bonwick@Sun.COM } 14568241SJeff.Bonwick@Sun.COM 1457789Sahrens /* 1458789Sahrens * Reassess DTLs after a config change or scrub completion. 1459789Sahrens */ 1460789Sahrens void 1461789Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1462789Sahrens { 14631544Seschrock spa_t *spa = vd->vdev_spa; 14648241SJeff.Bonwick@Sun.COM avl_tree_t reftree; 14658241SJeff.Bonwick@Sun.COM int minref; 14668241SJeff.Bonwick@Sun.COM 14678241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 14688241SJeff.Bonwick@Sun.COM 14698241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 14708241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(vd->vdev_child[c], txg, 14718241SJeff.Bonwick@Sun.COM scrub_txg, scrub_done); 14728241SJeff.Bonwick@Sun.COM 14738241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev) 14748241SJeff.Bonwick@Sun.COM return; 14758241SJeff.Bonwick@Sun.COM 14768241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 1477789Sahrens mutex_enter(&vd->vdev_dtl_lock); 14787046Sahrens if (scrub_txg != 0 && 14797046Sahrens (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 14807046Sahrens /* XXX should check scrub_done? */ 14817046Sahrens /* 14827046Sahrens * We completed a scrub up to scrub_txg. If we 14837046Sahrens * did it without rebooting, then the scrub dtl 14847046Sahrens * will be valid, so excise the old region and 14857046Sahrens * fold in the scrub dtl. Otherwise, leave the 14867046Sahrens * dtl as-is if there was an error. 14878241SJeff.Bonwick@Sun.COM * 14888241SJeff.Bonwick@Sun.COM * There's little trick here: to excise the beginning 14898241SJeff.Bonwick@Sun.COM * of the DTL_MISSING map, we put it into a reference 14908241SJeff.Bonwick@Sun.COM * tree and then add a segment with refcnt -1 that 14918241SJeff.Bonwick@Sun.COM * covers the range [0, scrub_txg). This means 14928241SJeff.Bonwick@Sun.COM * that each txg in that range has refcnt -1 or 0. 14938241SJeff.Bonwick@Sun.COM * We then add DTL_SCRUB with a refcnt of 2, so that 14948241SJeff.Bonwick@Sun.COM * entries in the range [0, scrub_txg) will have a 14958241SJeff.Bonwick@Sun.COM * positive refcnt -- either 1 or 2. We then convert 14968241SJeff.Bonwick@Sun.COM * the reference tree into the new DTL_MISSING map. 14977046Sahrens */ 14988241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 14998241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 15008241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 15018241SJeff.Bonwick@Sun.COM space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 15028241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 15038241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_SCRUB], 2); 15048241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, 15058241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 15068241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 1507789Sahrens } 15088241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 15098241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15108241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1511789Sahrens if (scrub_done) 15128241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 15138241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 15148241SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 15158241SJeff.Bonwick@Sun.COM space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 15168241SJeff.Bonwick@Sun.COM else 15178241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15188241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1519789Sahrens mutex_exit(&vd->vdev_dtl_lock); 15207046Sahrens 15211732Sbonwick if (txg != 0) 15221732Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1523789Sahrens return; 1524789Sahrens } 1525789Sahrens 1526789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15278241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 15288241SJeff.Bonwick@Sun.COM if (t == DTL_SCRUB) 15298241SJeff.Bonwick@Sun.COM continue; /* leaf vdevs only */ 15308241SJeff.Bonwick@Sun.COM if (t == DTL_PARTIAL) 15318241SJeff.Bonwick@Sun.COM minref = 1; /* i.e. non-zero */ 15328241SJeff.Bonwick@Sun.COM else if (vd->vdev_nparity != 0) 15338241SJeff.Bonwick@Sun.COM minref = vd->vdev_nparity + 1; /* RAID-Z */ 15348241SJeff.Bonwick@Sun.COM else 15358241SJeff.Bonwick@Sun.COM minref = vd->vdev_children; /* any kind of mirror */ 15368241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 15378241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 15388241SJeff.Bonwick@Sun.COM vdev_t *cvd = vd->vdev_child[c]; 15398241SJeff.Bonwick@Sun.COM mutex_enter(&cvd->vdev_dtl_lock); 15408241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 15418241SJeff.Bonwick@Sun.COM mutex_exit(&cvd->vdev_dtl_lock); 15428241SJeff.Bonwick@Sun.COM } 15438241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 15448241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 15458241SJeff.Bonwick@Sun.COM } 1546789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1547789Sahrens } 1548789Sahrens 1549789Sahrens static int 1550789Sahrens vdev_dtl_load(vdev_t *vd) 1551789Sahrens { 1552789Sahrens spa_t *spa = vd->vdev_spa; 15538241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15541732Sbonwick objset_t *mos = spa->spa_meta_objset; 1555789Sahrens dmu_buf_t *db; 1556789Sahrens int error; 1557789Sahrens 1558789Sahrens ASSERT(vd->vdev_children == 0); 1559789Sahrens 1560789Sahrens if (smo->smo_object == 0) 1561789Sahrens return (0); 1562789Sahrens 15631732Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 15641544Seschrock return (error); 15651732Sbonwick 15664944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 15674944Smaybee bcopy(db->db_data, smo, sizeof (*smo)); 15681544Seschrock dmu_buf_rele(db, FTAG); 1569789Sahrens 1570789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15718241SJeff.Bonwick@Sun.COM error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 15728241SJeff.Bonwick@Sun.COM NULL, SM_ALLOC, smo, mos); 1573789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1574789Sahrens 1575789Sahrens return (error); 1576789Sahrens } 1577789Sahrens 1578789Sahrens void 1579789Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1580789Sahrens { 1581789Sahrens spa_t *spa = vd->vdev_spa; 15828241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 15838241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 15841732Sbonwick objset_t *mos = spa->spa_meta_objset; 1585789Sahrens space_map_t smsync; 1586789Sahrens kmutex_t smlock; 1587789Sahrens dmu_buf_t *db; 1588789Sahrens dmu_tx_t *tx; 1589789Sahrens 1590789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1591789Sahrens 1592789Sahrens if (vd->vdev_detached) { 1593789Sahrens if (smo->smo_object != 0) { 15941732Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1595789Sahrens ASSERT3U(err, ==, 0); 1596789Sahrens smo->smo_object = 0; 1597789Sahrens } 1598789Sahrens dmu_tx_commit(tx); 1599789Sahrens return; 1600789Sahrens } 1601789Sahrens 1602789Sahrens if (smo->smo_object == 0) { 1603789Sahrens ASSERT(smo->smo_objsize == 0); 1604789Sahrens ASSERT(smo->smo_alloc == 0); 16051732Sbonwick smo->smo_object = dmu_object_alloc(mos, 1606789Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1607789Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1608789Sahrens ASSERT(smo->smo_object != 0); 1609789Sahrens vdev_config_dirty(vd->vdev_top); 1610789Sahrens } 1611789Sahrens 1612789Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1613789Sahrens 1614789Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1615789Sahrens &smlock); 1616789Sahrens 1617789Sahrens mutex_enter(&smlock); 1618789Sahrens 1619789Sahrens mutex_enter(&vd->vdev_dtl_lock); 16201732Sbonwick space_map_walk(sm, space_map_add, &smsync); 1621789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1622789Sahrens 16231732Sbonwick space_map_truncate(smo, mos, tx); 16241732Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1625789Sahrens 1626789Sahrens space_map_destroy(&smsync); 1627789Sahrens 1628789Sahrens mutex_exit(&smlock); 1629789Sahrens mutex_destroy(&smlock); 1630789Sahrens 16311732Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1632789Sahrens dmu_buf_will_dirty(db, tx); 16334944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16344944Smaybee bcopy(smo, db->db_data, sizeof (*smo)); 16351544Seschrock dmu_buf_rele(db, FTAG); 1636789Sahrens 1637789Sahrens dmu_tx_commit(tx); 1638789Sahrens } 1639789Sahrens 16407046Sahrens /* 16418241SJeff.Bonwick@Sun.COM * Determine whether the specified vdev can be offlined/detached/removed 16428241SJeff.Bonwick@Sun.COM * without losing data. 16438241SJeff.Bonwick@Sun.COM */ 16448241SJeff.Bonwick@Sun.COM boolean_t 16458241SJeff.Bonwick@Sun.COM vdev_dtl_required(vdev_t *vd) 16468241SJeff.Bonwick@Sun.COM { 16478241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 16488241SJeff.Bonwick@Sun.COM vdev_t *tvd = vd->vdev_top; 16498241SJeff.Bonwick@Sun.COM uint8_t cant_read = vd->vdev_cant_read; 16508241SJeff.Bonwick@Sun.COM boolean_t required; 16518241SJeff.Bonwick@Sun.COM 16528241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 16538241SJeff.Bonwick@Sun.COM 16548241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev || vd == tvd) 16558241SJeff.Bonwick@Sun.COM return (B_TRUE); 16568241SJeff.Bonwick@Sun.COM 16578241SJeff.Bonwick@Sun.COM /* 16588241SJeff.Bonwick@Sun.COM * Temporarily mark the device as unreadable, and then determine 16598241SJeff.Bonwick@Sun.COM * whether this results in any DTL outages in the top-level vdev. 16608241SJeff.Bonwick@Sun.COM * If not, we can safely offline/detach/remove the device. 16618241SJeff.Bonwick@Sun.COM */ 16628241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_TRUE; 16638241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16648241SJeff.Bonwick@Sun.COM required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 16658241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = cant_read; 16668241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 16678241SJeff.Bonwick@Sun.COM 16688241SJeff.Bonwick@Sun.COM return (required); 16698241SJeff.Bonwick@Sun.COM } 16708241SJeff.Bonwick@Sun.COM 16718241SJeff.Bonwick@Sun.COM /* 16727046Sahrens * Determine if resilver is needed, and if so the txg range. 16737046Sahrens */ 16747046Sahrens boolean_t 16757046Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 16767046Sahrens { 16777046Sahrens boolean_t needed = B_FALSE; 16787046Sahrens uint64_t thismin = UINT64_MAX; 16797046Sahrens uint64_t thismax = 0; 16807046Sahrens 16817046Sahrens if (vd->vdev_children == 0) { 16827046Sahrens mutex_enter(&vd->vdev_dtl_lock); 16838241SJeff.Bonwick@Sun.COM if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 16848241SJeff.Bonwick@Sun.COM vdev_writeable(vd)) { 16857046Sahrens space_seg_t *ss; 16867046Sahrens 16878241SJeff.Bonwick@Sun.COM ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 16887046Sahrens thismin = ss->ss_start - 1; 16898241SJeff.Bonwick@Sun.COM ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 16907046Sahrens thismax = ss->ss_end; 16917046Sahrens needed = B_TRUE; 16927046Sahrens } 16937046Sahrens mutex_exit(&vd->vdev_dtl_lock); 16947046Sahrens } else { 16958241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 16967046Sahrens vdev_t *cvd = vd->vdev_child[c]; 16977046Sahrens uint64_t cmin, cmax; 16987046Sahrens 16997046Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 17007046Sahrens thismin = MIN(thismin, cmin); 17017046Sahrens thismax = MAX(thismax, cmax); 17027046Sahrens needed = B_TRUE; 17037046Sahrens } 17047046Sahrens } 17057046Sahrens } 17067046Sahrens 17077046Sahrens if (needed && minp) { 17087046Sahrens *minp = thismin; 17097046Sahrens *maxp = thismax; 17107046Sahrens } 17117046Sahrens return (needed); 17127046Sahrens } 17137046Sahrens 17141986Seschrock void 17151544Seschrock vdev_load(vdev_t *vd) 1716789Sahrens { 1717789Sahrens /* 1718789Sahrens * Recursively load all children. 1719789Sahrens */ 17208241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 17211986Seschrock vdev_load(vd->vdev_child[c]); 1722789Sahrens 1723789Sahrens /* 17241585Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1725789Sahrens */ 17261986Seschrock if (vd == vd->vdev_top && 17271986Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 17281986Seschrock vdev_metaslab_init(vd, 0) != 0)) 17291986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17301986Seschrock VDEV_AUX_CORRUPT_DATA); 1731789Sahrens 1732789Sahrens /* 1733789Sahrens * If this is a leaf vdev, load its DTL. 1734789Sahrens */ 17351986Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 17361986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17371986Seschrock VDEV_AUX_CORRUPT_DATA); 1738789Sahrens } 1739789Sahrens 17402082Seschrock /* 17415450Sbrendan * The special vdev case is used for hot spares and l2cache devices. Its 17425450Sbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 17435450Sbrendan * we make sure that we can open the underlying device, then try to read the 17445450Sbrendan * label, and make sure that the label is sane and that it hasn't been 17455450Sbrendan * repurposed to another pool. 17462082Seschrock */ 17472082Seschrock int 17485450Sbrendan vdev_validate_aux(vdev_t *vd) 17492082Seschrock { 17502082Seschrock nvlist_t *label; 17512082Seschrock uint64_t guid, version; 17522082Seschrock uint64_t state; 17532082Seschrock 17547754SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 17556643Seschrock return (0); 17566643Seschrock 17572082Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 17582082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17592082Seschrock VDEV_AUX_CORRUPT_DATA); 17602082Seschrock return (-1); 17612082Seschrock } 17622082Seschrock 17632082Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 17644577Sahrens version > SPA_VERSION || 17652082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 17662082Seschrock guid != vd->vdev_guid || 17672082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 17682082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 17692082Seschrock VDEV_AUX_CORRUPT_DATA); 17702082Seschrock nvlist_free(label); 17712082Seschrock return (-1); 17722082Seschrock } 17732082Seschrock 17742082Seschrock /* 17752082Seschrock * We don't actually check the pool state here. If it's in fact in 17762082Seschrock * use by another pool, we update this fact on the fly when requested. 17772082Seschrock */ 17782082Seschrock nvlist_free(label); 17792082Seschrock return (0); 17802082Seschrock } 17812082Seschrock 1782789Sahrens void 1783789Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 1784789Sahrens { 1785789Sahrens metaslab_t *msp; 1786789Sahrens 1787789Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1788789Sahrens metaslab_sync_done(msp, txg); 1789789Sahrens } 1790789Sahrens 1791789Sahrens void 1792789Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 1793789Sahrens { 1794789Sahrens spa_t *spa = vd->vdev_spa; 1795789Sahrens vdev_t *lvd; 1796789Sahrens metaslab_t *msp; 17971732Sbonwick dmu_tx_t *tx; 1798789Sahrens 17991732Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 18001732Sbonwick ASSERT(vd == vd->vdev_top); 18011732Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 18021732Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 18031732Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 18041732Sbonwick ASSERT(vd->vdev_ms_array != 0); 18051732Sbonwick vdev_config_dirty(vd); 18061732Sbonwick dmu_tx_commit(tx); 18071732Sbonwick } 1808789Sahrens 18091732Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1810789Sahrens metaslab_sync(msp, txg); 18111732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 18121732Sbonwick } 1813789Sahrens 1814789Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1815789Sahrens vdev_dtl_sync(lvd, txg); 1816789Sahrens 1817789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1818789Sahrens } 1819789Sahrens 1820789Sahrens uint64_t 1821789Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1822789Sahrens { 1823789Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1824789Sahrens } 1825789Sahrens 18264451Seschrock /* 18274451Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 18284451Seschrock * not be opened, and no I/O is attempted. 18294451Seschrock */ 1830789Sahrens int 18314451Seschrock vdev_fault(spa_t *spa, uint64_t guid) 18324451Seschrock { 18336643Seschrock vdev_t *vd; 18344451Seschrock 18357754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18364451Seschrock 18376643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18387754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18397754SJeff.Bonwick@Sun.COM 18404451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18417754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18424451Seschrock 18434451Seschrock /* 18444451Seschrock * Faulted state takes precedence over degraded. 18454451Seschrock */ 18464451Seschrock vd->vdev_faulted = 1ULL; 18474451Seschrock vd->vdev_degraded = 0ULL; 18487754SJeff.Bonwick@Sun.COM vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 18494451Seschrock 18504451Seschrock /* 18518123SDavid.Marker@sun.com * If marking the vdev as faulted cause the top-level vdev to become 18524451Seschrock * unavailable, then back off and simply mark the vdev as degraded 18534451Seschrock * instead. 18544451Seschrock */ 18556643Seschrock if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 18564451Seschrock vd->vdev_degraded = 1ULL; 18574451Seschrock vd->vdev_faulted = 0ULL; 18584451Seschrock 18594451Seschrock /* 18604451Seschrock * If we reopen the device and it's not dead, only then do we 18614451Seschrock * mark it degraded. 18624451Seschrock */ 18634451Seschrock vdev_reopen(vd); 18644451Seschrock 18655329Sgw25295 if (vdev_readable(vd)) { 18664451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 18674451Seschrock VDEV_AUX_ERR_EXCEEDED); 18684451Seschrock } 18694451Seschrock } 18704451Seschrock 18717754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 18724451Seschrock } 18734451Seschrock 18744451Seschrock /* 18754451Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 18764451Seschrock * user that something is wrong. The vdev continues to operate as normal as far 18774451Seschrock * as I/O is concerned. 18784451Seschrock */ 18794451Seschrock int 18804451Seschrock vdev_degrade(spa_t *spa, uint64_t guid) 18814451Seschrock { 18826643Seschrock vdev_t *vd; 18834451Seschrock 18847754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 18854451Seschrock 18866643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 18877754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 18887754SJeff.Bonwick@Sun.COM 18894451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 18907754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 18914451Seschrock 18924451Seschrock /* 18934451Seschrock * If the vdev is already faulted, then don't do anything. 18944451Seschrock */ 18957754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded) 18967754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, 0)); 18974451Seschrock 18984451Seschrock vd->vdev_degraded = 1ULL; 18994451Seschrock if (!vdev_is_dead(vd)) 19004451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 19014451Seschrock VDEV_AUX_ERR_EXCEEDED); 19024451Seschrock 19037754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 19044451Seschrock } 19054451Seschrock 19064451Seschrock /* 19074451Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 19084451Seschrock * any attached spare device should be detached when the device finishes 19094451Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 19104451Seschrock * so no FMA events are generated if the device fails to open. 19114451Seschrock */ 19124451Seschrock int 19137754SJeff.Bonwick@Sun.COM vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 1914789Sahrens { 19159816SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 1916789Sahrens 19177754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 19181485Slling 19196643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19207754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1921789Sahrens 19221585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19237754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19241585Sbonwick 19259816SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 1926789Sahrens vd->vdev_offline = B_FALSE; 19271485Slling vd->vdev_tmpoffline = B_FALSE; 19287754SJeff.Bonwick@Sun.COM vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 19297754SJeff.Bonwick@Sun.COM vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 19309816SGeorge.Wilson@Sun.COM 19319816SGeorge.Wilson@Sun.COM /* XXX - L2ARC 1.0 does not support expansion */ 19329816SGeorge.Wilson@Sun.COM if (!vd->vdev_aux) { 19339816SGeorge.Wilson@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 19349816SGeorge.Wilson@Sun.COM pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 19359816SGeorge.Wilson@Sun.COM } 19369816SGeorge.Wilson@Sun.COM 19379816SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 19384451Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 19394451Seschrock 19409816SGeorge.Wilson@Sun.COM if (!vd->vdev_aux) { 19419816SGeorge.Wilson@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 19429816SGeorge.Wilson@Sun.COM pvd->vdev_expanding = B_FALSE; 19439816SGeorge.Wilson@Sun.COM } 19449816SGeorge.Wilson@Sun.COM 19454451Seschrock if (newstate) 19464451Seschrock *newstate = vd->vdev_state; 19474451Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 19484451Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 19494451Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 19504451Seschrock vd->vdev_parent->vdev_child[0] == vd) 19514451Seschrock vd->vdev_unspare = B_TRUE; 1952789Sahrens 19539816SGeorge.Wilson@Sun.COM if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 19549816SGeorge.Wilson@Sun.COM 19559816SGeorge.Wilson@Sun.COM /* XXX - L2ARC 1.0 does not support expansion */ 19569816SGeorge.Wilson@Sun.COM if (vd->vdev_aux) 19579816SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 19589816SGeorge.Wilson@Sun.COM spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 19599816SGeorge.Wilson@Sun.COM } 19608241SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 1961789Sahrens } 1962789Sahrens 1963789Sahrens int 19644451Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 1965789Sahrens { 19669701SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd; 19679701SGeorge.Wilson@Sun.COM int error; 1968789Sahrens 19697754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 1970789Sahrens 19716643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19727754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1973789Sahrens 19741585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19757754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19761585Sbonwick 19779701SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 19789701SGeorge.Wilson@Sun.COM 1979789Sahrens /* 19801732Sbonwick * If the device isn't already offline, try to offline it. 1981789Sahrens */ 19821732Sbonwick if (!vd->vdev_offline) { 19831732Sbonwick /* 19848241SJeff.Bonwick@Sun.COM * If this device has the only valid copy of some data, 19859701SGeorge.Wilson@Sun.COM * don't allow it to be offlined. Log devices are always 19869701SGeorge.Wilson@Sun.COM * expendable. 19871732Sbonwick */ 19889701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 19899701SGeorge.Wilson@Sun.COM vdev_dtl_required(vd)) 19907754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 1991789Sahrens 19921732Sbonwick /* 19931732Sbonwick * Offline this device and reopen its top-level vdev. 19949701SGeorge.Wilson@Sun.COM * If the top-level vdev is a log device then just offline 19959701SGeorge.Wilson@Sun.COM * it. Otherwise, if this action results in the top-level 19969701SGeorge.Wilson@Sun.COM * vdev becoming unusable, undo it and fail the request. 19971732Sbonwick */ 19981732Sbonwick vd->vdev_offline = B_TRUE; 19999701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 20009701SGeorge.Wilson@Sun.COM 20019701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 20029701SGeorge.Wilson@Sun.COM vdev_is_dead(tvd)) { 20031732Sbonwick vd->vdev_offline = B_FALSE; 20049701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 20057754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 20061732Sbonwick } 2007789Sahrens } 2008789Sahrens 20097754SJeff.Bonwick@Sun.COM vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 20101732Sbonwick 20119701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog || !vdev_is_dead(tvd)) 20129701SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 20139701SGeorge.Wilson@Sun.COM 20149701SGeorge.Wilson@Sun.COM (void) spa_vdev_state_exit(spa, vd, 0); 20159701SGeorge.Wilson@Sun.COM 20169701SGeorge.Wilson@Sun.COM error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 20179701SGeorge.Wilson@Sun.COM NULL, DS_FIND_CHILDREN); 20189701SGeorge.Wilson@Sun.COM if (error) { 20199701SGeorge.Wilson@Sun.COM (void) vdev_online(spa, guid, 0, NULL); 20209701SGeorge.Wilson@Sun.COM return (error); 20219701SGeorge.Wilson@Sun.COM } 20229701SGeorge.Wilson@Sun.COM /* 20239701SGeorge.Wilson@Sun.COM * If we successfully offlined the log device then we need to 20249701SGeorge.Wilson@Sun.COM * sync out the current txg so that the "stubby" block can be 20259701SGeorge.Wilson@Sun.COM * removed by zil_sync(). 20269701SGeorge.Wilson@Sun.COM */ 20279701SGeorge.Wilson@Sun.COM txg_wait_synced(spa->spa_dsl_pool, 0); 20289701SGeorge.Wilson@Sun.COM return (0); 2029789Sahrens } 2030789Sahrens 20311544Seschrock /* 20321544Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 20331544Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 20341544Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 20351544Seschrock */ 20361544Seschrock void 20377754SJeff.Bonwick@Sun.COM vdev_clear(spa_t *spa, vdev_t *vd) 2038789Sahrens { 20397754SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 20407754SJeff.Bonwick@Sun.COM 20417754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2042789Sahrens 20431544Seschrock if (vd == NULL) 20447754SJeff.Bonwick@Sun.COM vd = rvd; 2045789Sahrens 20461544Seschrock vd->vdev_stat.vs_read_errors = 0; 20471544Seschrock vd->vdev_stat.vs_write_errors = 0; 20481544Seschrock vd->vdev_stat.vs_checksum_errors = 0; 2049789Sahrens 20507754SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 20517754SJeff.Bonwick@Sun.COM vdev_clear(spa, vd->vdev_child[c]); 20524451Seschrock 20534451Seschrock /* 20546959Sek110237 * If we're in the FAULTED state or have experienced failed I/O, then 20556959Sek110237 * clear the persistent state and attempt to reopen the device. We 20566959Sek110237 * also mark the vdev config dirty, so that the new faulted state is 20576959Sek110237 * written out to disk. 20584451Seschrock */ 20597754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded || 20607754SJeff.Bonwick@Sun.COM !vdev_readable(vd) || !vdev_writeable(vd)) { 20616959Sek110237 20624451Seschrock vd->vdev_faulted = vd->vdev_degraded = 0; 20637754SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_FALSE; 20647754SJeff.Bonwick@Sun.COM vd->vdev_cant_write = B_FALSE; 20657754SJeff.Bonwick@Sun.COM 20664451Seschrock vdev_reopen(vd); 20674451Seschrock 20687754SJeff.Bonwick@Sun.COM if (vd != rvd) 20697754SJeff.Bonwick@Sun.COM vdev_state_dirty(vd->vdev_top); 20707754SJeff.Bonwick@Sun.COM 20717754SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 20724808Sek110237 spa_async_request(spa, SPA_ASYNC_RESILVER); 20734451Seschrock 20744451Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 20754451Seschrock } 2076789Sahrens } 2077789Sahrens 20787754SJeff.Bonwick@Sun.COM boolean_t 20797754SJeff.Bonwick@Sun.COM vdev_is_dead(vdev_t *vd) 20805329Sgw25295 { 20817754SJeff.Bonwick@Sun.COM return (vd->vdev_state < VDEV_STATE_DEGRADED); 20825329Sgw25295 } 20835329Sgw25295 20847754SJeff.Bonwick@Sun.COM boolean_t 20857754SJeff.Bonwick@Sun.COM vdev_readable(vdev_t *vd) 2086789Sahrens { 20877754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2088789Sahrens } 2089789Sahrens 20907754SJeff.Bonwick@Sun.COM boolean_t 20917754SJeff.Bonwick@Sun.COM vdev_writeable(vdev_t *vd) 2092789Sahrens { 20937754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 20947754SJeff.Bonwick@Sun.COM } 2095789Sahrens 20967754SJeff.Bonwick@Sun.COM boolean_t 20977980SGeorge.Wilson@Sun.COM vdev_allocatable(vdev_t *vd) 20987980SGeorge.Wilson@Sun.COM { 20998241SJeff.Bonwick@Sun.COM uint64_t state = vd->vdev_state; 21008241SJeff.Bonwick@Sun.COM 21017980SGeorge.Wilson@Sun.COM /* 21028241SJeff.Bonwick@Sun.COM * We currently allow allocations from vdevs which may be in the 21037980SGeorge.Wilson@Sun.COM * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 21047980SGeorge.Wilson@Sun.COM * fails to reopen then we'll catch it later when we're holding 21058241SJeff.Bonwick@Sun.COM * the proper locks. Note that we have to get the vdev state 21068241SJeff.Bonwick@Sun.COM * in a local variable because although it changes atomically, 21078241SJeff.Bonwick@Sun.COM * we're asking two separate questions about it. 21087980SGeorge.Wilson@Sun.COM */ 21098241SJeff.Bonwick@Sun.COM return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 21107980SGeorge.Wilson@Sun.COM !vd->vdev_cant_write); 21117980SGeorge.Wilson@Sun.COM } 21127980SGeorge.Wilson@Sun.COM 21137980SGeorge.Wilson@Sun.COM boolean_t 21147754SJeff.Bonwick@Sun.COM vdev_accessible(vdev_t *vd, zio_t *zio) 21157754SJeff.Bonwick@Sun.COM { 21167754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 2117789Sahrens 21187754SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 21197754SJeff.Bonwick@Sun.COM return (B_FALSE); 2120789Sahrens 21217754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) 21227754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_read); 2123789Sahrens 21247754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_WRITE) 21257754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_write); 21267754SJeff.Bonwick@Sun.COM 21277754SJeff.Bonwick@Sun.COM return (B_TRUE); 2128789Sahrens } 2129789Sahrens 2130789Sahrens /* 2131789Sahrens * Get statistics for the given vdev. 2132789Sahrens */ 2133789Sahrens void 2134789Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2135789Sahrens { 2136789Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2137789Sahrens 2138789Sahrens mutex_enter(&vd->vdev_stat_lock); 2139789Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 21407046Sahrens vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2141789Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2142789Sahrens vs->vs_state = vd->vdev_state; 21439816SGeorge.Wilson@Sun.COM vs->vs_rsize = vdev_get_min_asize(vd); 21449816SGeorge.Wilson@Sun.COM if (vd->vdev_ops->vdev_op_leaf) 21459816SGeorge.Wilson@Sun.COM vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2146789Sahrens mutex_exit(&vd->vdev_stat_lock); 2147789Sahrens 2148789Sahrens /* 2149789Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2150789Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2151789Sahrens */ 2152789Sahrens if (vd == rvd) { 21537754SJeff.Bonwick@Sun.COM for (int c = 0; c < rvd->vdev_children; c++) { 2154789Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2155789Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2156789Sahrens 2157789Sahrens mutex_enter(&vd->vdev_stat_lock); 21587754SJeff.Bonwick@Sun.COM for (int t = 0; t < ZIO_TYPES; t++) { 2159789Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2160789Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2161789Sahrens } 2162789Sahrens vs->vs_scrub_examined += cvs->vs_scrub_examined; 2163789Sahrens mutex_exit(&vd->vdev_stat_lock); 2164789Sahrens } 2165789Sahrens } 2166789Sahrens } 2167789Sahrens 2168789Sahrens void 21695450Sbrendan vdev_clear_stats(vdev_t *vd) 21705450Sbrendan { 21715450Sbrendan mutex_enter(&vd->vdev_stat_lock); 21725450Sbrendan vd->vdev_stat.vs_space = 0; 21735450Sbrendan vd->vdev_stat.vs_dspace = 0; 21745450Sbrendan vd->vdev_stat.vs_alloc = 0; 21755450Sbrendan mutex_exit(&vd->vdev_stat_lock); 21765450Sbrendan } 21775450Sbrendan 21785450Sbrendan void 21797754SJeff.Bonwick@Sun.COM vdev_stat_update(zio_t *zio, uint64_t psize) 2180789Sahrens { 21818241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 21828241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 21837754SJeff.Bonwick@Sun.COM vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2184789Sahrens vdev_t *pvd; 2185789Sahrens uint64_t txg = zio->io_txg; 2186789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2187789Sahrens zio_type_t type = zio->io_type; 2188789Sahrens int flags = zio->io_flags; 2189789Sahrens 21907754SJeff.Bonwick@Sun.COM /* 21917754SJeff.Bonwick@Sun.COM * If this i/o is a gang leader, it didn't do any actual work. 21927754SJeff.Bonwick@Sun.COM */ 21937754SJeff.Bonwick@Sun.COM if (zio->io_gang_tree) 21947754SJeff.Bonwick@Sun.COM return; 21957754SJeff.Bonwick@Sun.COM 2196789Sahrens if (zio->io_error == 0) { 21977754SJeff.Bonwick@Sun.COM /* 21987754SJeff.Bonwick@Sun.COM * If this is a root i/o, don't count it -- we've already 21997754SJeff.Bonwick@Sun.COM * counted the top-level vdevs, and vdev_get_stats() will 22007754SJeff.Bonwick@Sun.COM * aggregate them when asked. This reduces contention on 22017754SJeff.Bonwick@Sun.COM * the root vdev_stat_lock and implicitly handles blocks 22027754SJeff.Bonwick@Sun.COM * that compress away to holes, for which there is no i/o. 22037754SJeff.Bonwick@Sun.COM * (Holes never create vdev children, so all the counters 22047754SJeff.Bonwick@Sun.COM * remain zero, which is what we want.) 22057754SJeff.Bonwick@Sun.COM * 22067754SJeff.Bonwick@Sun.COM * Note: this only applies to successful i/o (io_error == 0) 22077754SJeff.Bonwick@Sun.COM * because unlike i/o counts, errors are not additive. 22087754SJeff.Bonwick@Sun.COM * When reading a ditto block, for example, failure of 22097754SJeff.Bonwick@Sun.COM * one top-level vdev does not imply a root-level error. 22107754SJeff.Bonwick@Sun.COM */ 22117754SJeff.Bonwick@Sun.COM if (vd == rvd) 22127754SJeff.Bonwick@Sun.COM return; 22137754SJeff.Bonwick@Sun.COM 22147754SJeff.Bonwick@Sun.COM ASSERT(vd == zio->io_vd); 22158241SJeff.Bonwick@Sun.COM 22168241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_BYPASS) 22178241SJeff.Bonwick@Sun.COM return; 22188241SJeff.Bonwick@Sun.COM 22198241SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 22208241SJeff.Bonwick@Sun.COM 22217754SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_REPAIR) { 22221807Sbonwick if (flags & ZIO_FLAG_SCRUB_THREAD) 22237754SJeff.Bonwick@Sun.COM vs->vs_scrub_repaired += psize; 22248241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SELF_HEAL) 22257754SJeff.Bonwick@Sun.COM vs->vs_self_healed += psize; 2226789Sahrens } 22278241SJeff.Bonwick@Sun.COM 22288241SJeff.Bonwick@Sun.COM vs->vs_ops[type]++; 22298241SJeff.Bonwick@Sun.COM vs->vs_bytes[type] += psize; 22308241SJeff.Bonwick@Sun.COM 22318241SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2232789Sahrens return; 2233789Sahrens } 2234789Sahrens 2235789Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2236789Sahrens return; 2237789Sahrens 22389725SEric.Schrock@Sun.COM /* 22399725SEric.Schrock@Sun.COM * If this is an I/O error that is going to be retried, then ignore the 22409725SEric.Schrock@Sun.COM * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 22419725SEric.Schrock@Sun.COM * hard errors, when in reality they can happen for any number of 22429725SEric.Schrock@Sun.COM * innocuous reasons (bus resets, MPxIO link failure, etc). 22439725SEric.Schrock@Sun.COM */ 22449725SEric.Schrock@Sun.COM if (zio->io_error == EIO && 22459725SEric.Schrock@Sun.COM !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 22469725SEric.Schrock@Sun.COM return; 22479725SEric.Schrock@Sun.COM 22487754SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 22499230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 22507754SJeff.Bonwick@Sun.COM if (zio->io_error == ECKSUM) 22517754SJeff.Bonwick@Sun.COM vs->vs_checksum_errors++; 22527754SJeff.Bonwick@Sun.COM else 22537754SJeff.Bonwick@Sun.COM vs->vs_read_errors++; 2254789Sahrens } 22559230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 22567754SJeff.Bonwick@Sun.COM vs->vs_write_errors++; 22577754SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2258789Sahrens 22598241SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_WRITE && txg != 0 && 22608241SJeff.Bonwick@Sun.COM (!(flags & ZIO_FLAG_IO_REPAIR) || 22618241SJeff.Bonwick@Sun.COM (flags & ZIO_FLAG_SCRUB_THREAD))) { 22628241SJeff.Bonwick@Sun.COM /* 22638241SJeff.Bonwick@Sun.COM * This is either a normal write (not a repair), or it's a 22648241SJeff.Bonwick@Sun.COM * repair induced by the scrub thread. In the normal case, 22658241SJeff.Bonwick@Sun.COM * we commit the DTL change in the same txg as the block 22668241SJeff.Bonwick@Sun.COM * was born. In the scrub-induced repair case, we know that 22678241SJeff.Bonwick@Sun.COM * scrubs run in first-pass syncing context, so we commit 22688241SJeff.Bonwick@Sun.COM * the DTL change in spa->spa_syncing_txg. 22698241SJeff.Bonwick@Sun.COM * 22708241SJeff.Bonwick@Sun.COM * We currently do not make DTL entries for failed spontaneous 22718241SJeff.Bonwick@Sun.COM * self-healing writes triggered by normal (non-scrubbing) 22728241SJeff.Bonwick@Sun.COM * reads, because we have no transactional context in which to 22738241SJeff.Bonwick@Sun.COM * do so -- and it's not clear that it'd be desirable anyway. 22748241SJeff.Bonwick@Sun.COM */ 22758241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 22768241SJeff.Bonwick@Sun.COM uint64_t commit_txg = txg; 22778241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SCRUB_THREAD) { 22788241SJeff.Bonwick@Sun.COM ASSERT(flags & ZIO_FLAG_IO_REPAIR); 22798241SJeff.Bonwick@Sun.COM ASSERT(spa_sync_pass(spa) == 1); 22808241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 22818241SJeff.Bonwick@Sun.COM commit_txg = spa->spa_syncing_txg; 22828241SJeff.Bonwick@Sun.COM } 22838241SJeff.Bonwick@Sun.COM ASSERT(commit_txg >= spa->spa_syncing_txg); 22848241SJeff.Bonwick@Sun.COM if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 22858241SJeff.Bonwick@Sun.COM return; 22868241SJeff.Bonwick@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 22878241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 22888241SJeff.Bonwick@Sun.COM vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2289789Sahrens } 22908241SJeff.Bonwick@Sun.COM if (vd != rvd) 22918241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2292789Sahrens } 2293789Sahrens } 2294789Sahrens 2295789Sahrens void 2296789Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2297789Sahrens { 2298789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2299789Sahrens 23009816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 2301789Sahrens vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2302789Sahrens 2303789Sahrens mutex_enter(&vd->vdev_stat_lock); 2304789Sahrens 2305789Sahrens if (type == POOL_SCRUB_NONE) { 2306789Sahrens /* 2307789Sahrens * Update completion and end time. Leave everything else alone 2308789Sahrens * so we can report what happened during the previous scrub. 2309789Sahrens */ 2310789Sahrens vs->vs_scrub_complete = complete; 2311789Sahrens vs->vs_scrub_end = gethrestime_sec(); 2312789Sahrens } else { 2313789Sahrens vs->vs_scrub_type = type; 2314789Sahrens vs->vs_scrub_complete = 0; 2315789Sahrens vs->vs_scrub_examined = 0; 2316789Sahrens vs->vs_scrub_repaired = 0; 2317789Sahrens vs->vs_scrub_start = gethrestime_sec(); 2318789Sahrens vs->vs_scrub_end = 0; 2319789Sahrens } 2320789Sahrens 2321789Sahrens mutex_exit(&vd->vdev_stat_lock); 2322789Sahrens } 2323789Sahrens 2324789Sahrens /* 2325789Sahrens * Update the in-core space usage stats for this vdev and the root vdev. 2326789Sahrens */ 2327789Sahrens void 23285450Sbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 23295450Sbrendan boolean_t update_root) 2330789Sahrens { 23314527Sperrin int64_t dspace_delta = space_delta; 23324527Sperrin spa_t *spa = vd->vdev_spa; 23334527Sperrin vdev_t *rvd = spa->spa_root_vdev; 23344527Sperrin 2335789Sahrens ASSERT(vd == vd->vdev_top); 23364527Sperrin 23374527Sperrin /* 23384527Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 23394527Sperrin * factor. We must calculate this here and not at the root vdev 23404527Sperrin * because the root vdev's psize-to-asize is simply the max of its 23414527Sperrin * childrens', thus not accurate enough for us. 23424527Sperrin */ 23434527Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 23449701SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 23454527Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 23464527Sperrin vd->vdev_deflate_ratio; 2347789Sahrens 23484527Sperrin mutex_enter(&vd->vdev_stat_lock); 23494527Sperrin vd->vdev_stat.vs_space += space_delta; 23504527Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 23514527Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 23524527Sperrin mutex_exit(&vd->vdev_stat_lock); 23532082Seschrock 23545450Sbrendan if (update_root) { 23555450Sbrendan ASSERT(rvd == vd->vdev_parent); 23565450Sbrendan ASSERT(vd->vdev_ms_count != 0); 23574527Sperrin 23585450Sbrendan /* 23595450Sbrendan * Don't count non-normal (e.g. intent log) space as part of 23605450Sbrendan * the pool's capacity. 23615450Sbrendan */ 23625450Sbrendan if (vd->vdev_mg->mg_class != spa->spa_normal_class) 23635450Sbrendan return; 23645450Sbrendan 23655450Sbrendan mutex_enter(&rvd->vdev_stat_lock); 23665450Sbrendan rvd->vdev_stat.vs_space += space_delta; 23675450Sbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 23685450Sbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 23695450Sbrendan mutex_exit(&rvd->vdev_stat_lock); 23705450Sbrendan } 2371789Sahrens } 2372789Sahrens 2373789Sahrens /* 2374789Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2375789Sahrens * so that it will be written out next time the vdev configuration is synced. 2376789Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2377789Sahrens */ 2378789Sahrens void 2379789Sahrens vdev_config_dirty(vdev_t *vd) 2380789Sahrens { 2381789Sahrens spa_t *spa = vd->vdev_spa; 2382789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2383789Sahrens int c; 2384789Sahrens 23851601Sbonwick /* 23869425SEric.Schrock@Sun.COM * If this is an aux vdev (as with l2cache and spare devices), then we 23879425SEric.Schrock@Sun.COM * update the vdev config manually and set the sync flag. 23886643Seschrock */ 23896643Seschrock if (vd->vdev_aux != NULL) { 23906643Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 23916643Seschrock nvlist_t **aux; 23926643Seschrock uint_t naux; 23936643Seschrock 23946643Seschrock for (c = 0; c < sav->sav_count; c++) { 23956643Seschrock if (sav->sav_vdevs[c] == vd) 23966643Seschrock break; 23976643Seschrock } 23986643Seschrock 23997754SJeff.Bonwick@Sun.COM if (c == sav->sav_count) { 24007754SJeff.Bonwick@Sun.COM /* 24017754SJeff.Bonwick@Sun.COM * We're being removed. There's nothing more to do. 24027754SJeff.Bonwick@Sun.COM */ 24037754SJeff.Bonwick@Sun.COM ASSERT(sav->sav_sync == B_TRUE); 24047754SJeff.Bonwick@Sun.COM return; 24057754SJeff.Bonwick@Sun.COM } 24067754SJeff.Bonwick@Sun.COM 24076643Seschrock sav->sav_sync = B_TRUE; 24086643Seschrock 24099425SEric.Schrock@Sun.COM if (nvlist_lookup_nvlist_array(sav->sav_config, 24109425SEric.Schrock@Sun.COM ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 24119425SEric.Schrock@Sun.COM VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 24129425SEric.Schrock@Sun.COM ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 24139425SEric.Schrock@Sun.COM } 24146643Seschrock 24156643Seschrock ASSERT(c < naux); 24166643Seschrock 24176643Seschrock /* 24186643Seschrock * Setting the nvlist in the middle if the array is a little 24196643Seschrock * sketchy, but it will work. 24206643Seschrock */ 24216643Seschrock nvlist_free(aux[c]); 24226643Seschrock aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 24236643Seschrock 24246643Seschrock return; 24256643Seschrock } 24266643Seschrock 24276643Seschrock /* 24287754SJeff.Bonwick@Sun.COM * The dirty list is protected by the SCL_CONFIG lock. The caller 24297754SJeff.Bonwick@Sun.COM * must either hold SCL_CONFIG as writer, or must be the sync thread 24307754SJeff.Bonwick@Sun.COM * (which holds SCL_CONFIG as reader). There's only one sync thread, 24311601Sbonwick * so this is sufficient to ensure mutual exclusion. 24321601Sbonwick */ 24337754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 24347754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24357754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 24361601Sbonwick 2437789Sahrens if (vd == rvd) { 2438789Sahrens for (c = 0; c < rvd->vdev_children; c++) 2439789Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2440789Sahrens } else { 2441789Sahrens ASSERT(vd == vd->vdev_top); 2442789Sahrens 24437754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_config_dirty_node)) 24447754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_config_dirty_list, vd); 2445789Sahrens } 2446789Sahrens } 2447789Sahrens 2448789Sahrens void 2449789Sahrens vdev_config_clean(vdev_t *vd) 2450789Sahrens { 24511601Sbonwick spa_t *spa = vd->vdev_spa; 24521601Sbonwick 24537754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 24547754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24557754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 24567754SJeff.Bonwick@Sun.COM 24577754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 24587754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_config_dirty_list, vd); 24597754SJeff.Bonwick@Sun.COM } 24607754SJeff.Bonwick@Sun.COM 24617754SJeff.Bonwick@Sun.COM /* 24627754SJeff.Bonwick@Sun.COM * Mark a top-level vdev's state as dirty, so that the next pass of 24637754SJeff.Bonwick@Sun.COM * spa_sync() can convert this into vdev_config_dirty(). We distinguish 24647754SJeff.Bonwick@Sun.COM * the state changes from larger config changes because they require 24657754SJeff.Bonwick@Sun.COM * much less locking, and are often needed for administrative actions. 24667754SJeff.Bonwick@Sun.COM */ 24677754SJeff.Bonwick@Sun.COM void 24687754SJeff.Bonwick@Sun.COM vdev_state_dirty(vdev_t *vd) 24697754SJeff.Bonwick@Sun.COM { 24707754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24717754SJeff.Bonwick@Sun.COM 24727754SJeff.Bonwick@Sun.COM ASSERT(vd == vd->vdev_top); 24731601Sbonwick 24747754SJeff.Bonwick@Sun.COM /* 24757754SJeff.Bonwick@Sun.COM * The state list is protected by the SCL_STATE lock. The caller 24767754SJeff.Bonwick@Sun.COM * must either hold SCL_STATE as writer, or must be the sync thread 24777754SJeff.Bonwick@Sun.COM * (which holds SCL_STATE as reader). There's only one sync thread, 24787754SJeff.Bonwick@Sun.COM * so this is sufficient to ensure mutual exclusion. 24797754SJeff.Bonwick@Sun.COM */ 24807754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 24817754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24827754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 24837754SJeff.Bonwick@Sun.COM 24847754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_state_dirty_node)) 24857754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_state_dirty_list, vd); 24867754SJeff.Bonwick@Sun.COM } 24877754SJeff.Bonwick@Sun.COM 24887754SJeff.Bonwick@Sun.COM void 24897754SJeff.Bonwick@Sun.COM vdev_state_clean(vdev_t *vd) 24907754SJeff.Bonwick@Sun.COM { 24917754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 24927754SJeff.Bonwick@Sun.COM 24937754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 24947754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 24957754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 24967754SJeff.Bonwick@Sun.COM 24977754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 24987754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_state_dirty_list, vd); 2499789Sahrens } 2500789Sahrens 25016523Sek110237 /* 25026523Sek110237 * Propagate vdev state up from children to parent. 25036523Sek110237 */ 25041775Sbillm void 25051775Sbillm vdev_propagate_state(vdev_t *vd) 25061775Sbillm { 25078241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 25088241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 25091775Sbillm int degraded = 0, faulted = 0; 25101775Sbillm int corrupted = 0; 25111775Sbillm vdev_t *child; 25121775Sbillm 25134451Seschrock if (vd->vdev_children > 0) { 25149816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 25154451Seschrock child = vd->vdev_child[c]; 25166976Seschrock 25177754SJeff.Bonwick@Sun.COM if (!vdev_readable(child) || 25188241SJeff.Bonwick@Sun.COM (!vdev_writeable(child) && spa_writeable(spa))) { 25196976Seschrock /* 25206976Seschrock * Root special: if there is a top-level log 25216976Seschrock * device, treat the root vdev as if it were 25226976Seschrock * degraded. 25236976Seschrock */ 25246976Seschrock if (child->vdev_islog && vd == rvd) 25256976Seschrock degraded++; 25266976Seschrock else 25276976Seschrock faulted++; 25286976Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 25294451Seschrock degraded++; 25306976Seschrock } 25314451Seschrock 25324451Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 25334451Seschrock corrupted++; 25344451Seschrock } 25351775Sbillm 25364451Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 25374451Seschrock 25384451Seschrock /* 25397754SJeff.Bonwick@Sun.COM * Root special: if there is a top-level vdev that cannot be 25404451Seschrock * opened due to corrupted metadata, then propagate the root 25414451Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 25424451Seschrock * replicas'. 25434451Seschrock */ 25444451Seschrock if (corrupted && vd == rvd && 25454451Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 25464451Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 25474451Seschrock VDEV_AUX_CORRUPT_DATA); 25481775Sbillm } 25491775Sbillm 25506976Seschrock if (vd->vdev_parent) 25514451Seschrock vdev_propagate_state(vd->vdev_parent); 25521775Sbillm } 25531775Sbillm 2554789Sahrens /* 25551544Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 25561544Seschrock * state, because we're in the process of opening children depth-first. 25571544Seschrock * Otherwise, we propagate the change to the parent. 25581544Seschrock * 25591544Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 25601544Seschrock * generated. 2561789Sahrens */ 2562789Sahrens void 25631544Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2564789Sahrens { 25651986Seschrock uint64_t save_state; 25666643Seschrock spa_t *spa = vd->vdev_spa; 25671544Seschrock 25681544Seschrock if (state == vd->vdev_state) { 25691544Seschrock vd->vdev_stat.vs_aux = aux; 2570789Sahrens return; 25711544Seschrock } 25721544Seschrock 25731986Seschrock save_state = vd->vdev_state; 2574789Sahrens 2575789Sahrens vd->vdev_state = state; 2576789Sahrens vd->vdev_stat.vs_aux = aux; 2577789Sahrens 25784451Seschrock /* 25794451Seschrock * If we are setting the vdev state to anything but an open state, then 25804451Seschrock * always close the underlying device. Otherwise, we keep accessible 25814451Seschrock * but invalid devices open forever. We don't call vdev_close() itself, 25824451Seschrock * because that implies some extra checks (offline, etc) that we don't 25834451Seschrock * want here. This is limited to leaf devices, because otherwise 25844451Seschrock * closing the device will affect other children. 25854451Seschrock */ 25867780SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 25874451Seschrock vd->vdev_ops->vdev_op_close(vd); 25884451Seschrock 25894451Seschrock if (vd->vdev_removed && 25904451Seschrock state == VDEV_STATE_CANT_OPEN && 25914451Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 25924451Seschrock /* 25934451Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 25944451Seschrock * device was previously marked removed and someone attempted to 25954451Seschrock * reopen it. If this failed due to a nonexistent device, then 25964451Seschrock * keep the device in the REMOVED state. We also let this be if 25974451Seschrock * it is one of our special test online cases, which is only 25984451Seschrock * attempting to online the device and shouldn't generate an FMA 25994451Seschrock * fault. 26004451Seschrock */ 26014451Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 26024451Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 26034451Seschrock } else if (state == VDEV_STATE_REMOVED) { 26044451Seschrock /* 26054451Seschrock * Indicate to the ZFS DE that this device has been removed, and 26064451Seschrock * any recent errors should be ignored. 26074451Seschrock */ 26086643Seschrock zfs_post_remove(spa, vd); 26094451Seschrock vd->vdev_removed = B_TRUE; 26104451Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 26111544Seschrock /* 26121544Seschrock * If we fail to open a vdev during an import, we mark it as 26131544Seschrock * "not available", which signifies that it was never there to 26141544Seschrock * begin with. Failure to open such a device is not considered 26151544Seschrock * an error. 26161544Seschrock */ 26176643Seschrock if (spa->spa_load_state == SPA_LOAD_IMPORT && 26181986Seschrock vd->vdev_ops->vdev_op_leaf) 26191986Seschrock vd->vdev_not_present = 1; 26201986Seschrock 26211986Seschrock /* 26221986Seschrock * Post the appropriate ereport. If the 'prevstate' field is 26231986Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 26241986Seschrock * that this is part of a vdev_reopen(). In this case, we don't 26251986Seschrock * want to post the ereport if the device was already in the 26261986Seschrock * CANT_OPEN state beforehand. 26274451Seschrock * 26284451Seschrock * If the 'checkremove' flag is set, then this is an attempt to 26294451Seschrock * online the device in response to an insertion event. If we 26304451Seschrock * hit this case, then we have detected an insertion event for a 26314451Seschrock * faulted or offline device that wasn't in the removed state. 26324451Seschrock * In this scenario, we don't post an ereport because we are 26334451Seschrock * about to replace the device, or attempt an online with 26344451Seschrock * vdev_forcefault, which will generate the fault for us. 26351986Seschrock */ 26364451Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 26374451Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 26386643Seschrock vd != spa->spa_root_vdev) { 26391544Seschrock const char *class; 26401544Seschrock 26411544Seschrock switch (aux) { 26421544Seschrock case VDEV_AUX_OPEN_FAILED: 26431544Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 26441544Seschrock break; 26451544Seschrock case VDEV_AUX_CORRUPT_DATA: 26461544Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 26471544Seschrock break; 26481544Seschrock case VDEV_AUX_NO_REPLICAS: 26491544Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 26501544Seschrock break; 26511544Seschrock case VDEV_AUX_BAD_GUID_SUM: 26521544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 26531544Seschrock break; 26541544Seschrock case VDEV_AUX_TOO_SMALL: 26551544Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 26561544Seschrock break; 26571544Seschrock case VDEV_AUX_BAD_LABEL: 26581544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 26591544Seschrock break; 26607754SJeff.Bonwick@Sun.COM case VDEV_AUX_IO_FAILURE: 26617754SJeff.Bonwick@Sun.COM class = FM_EREPORT_ZFS_IO_FAILURE; 26627754SJeff.Bonwick@Sun.COM break; 26631544Seschrock default: 26641544Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 26651544Seschrock } 26661544Seschrock 26676643Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 26681544Seschrock } 26694451Seschrock 26704451Seschrock /* Erase any notion of persistent removed state */ 26714451Seschrock vd->vdev_removed = B_FALSE; 26724451Seschrock } else { 26734451Seschrock vd->vdev_removed = B_FALSE; 26741544Seschrock } 26751544Seschrock 26769583STim.Haley@Sun.COM if (!isopen && vd->vdev_parent) 26779583STim.Haley@Sun.COM vdev_propagate_state(vd->vdev_parent); 2678789Sahrens } 26797042Sgw25295 26807042Sgw25295 /* 26817042Sgw25295 * Check the vdev configuration to ensure that it's capable of supporting 26827042Sgw25295 * a root pool. Currently, we do not support RAID-Z or partial configuration. 26837042Sgw25295 * In addition, only a single top-level vdev is allowed and none of the leaves 26847042Sgw25295 * can be wholedisks. 26857042Sgw25295 */ 26867042Sgw25295 boolean_t 26877042Sgw25295 vdev_is_bootable(vdev_t *vd) 26887042Sgw25295 { 26897042Sgw25295 if (!vd->vdev_ops->vdev_op_leaf) { 26907042Sgw25295 char *vdev_type = vd->vdev_ops->vdev_op_type; 26917042Sgw25295 26927042Sgw25295 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 26937042Sgw25295 vd->vdev_children > 1) { 26947042Sgw25295 return (B_FALSE); 26957042Sgw25295 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 26967042Sgw25295 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 26977042Sgw25295 return (B_FALSE); 26987042Sgw25295 } 26997042Sgw25295 } else if (vd->vdev_wholedisk == 1) { 27007042Sgw25295 return (B_FALSE); 27017042Sgw25295 } 27027042Sgw25295 27039816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 27047042Sgw25295 if (!vdev_is_bootable(vd->vdev_child[c])) 27057042Sgw25295 return (B_FALSE); 27067042Sgw25295 } 27077042Sgw25295 return (B_TRUE); 27087042Sgw25295 } 27099701SGeorge.Wilson@Sun.COM 27109701SGeorge.Wilson@Sun.COM void 27119701SGeorge.Wilson@Sun.COM vdev_load_log_state(vdev_t *vd, nvlist_t *nv) 27129701SGeorge.Wilson@Sun.COM { 27139816SGeorge.Wilson@Sun.COM uint_t children; 27149701SGeorge.Wilson@Sun.COM nvlist_t **child; 27159701SGeorge.Wilson@Sun.COM uint64_t val; 27169701SGeorge.Wilson@Sun.COM spa_t *spa = vd->vdev_spa; 27179701SGeorge.Wilson@Sun.COM 27189701SGeorge.Wilson@Sun.COM if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 27199701SGeorge.Wilson@Sun.COM &child, &children) == 0) { 27209816SGeorge.Wilson@Sun.COM for (int c = 0; c < children; c++) 27219701SGeorge.Wilson@Sun.COM vdev_load_log_state(vd->vdev_child[c], child[c]); 27229701SGeorge.Wilson@Sun.COM } 27239701SGeorge.Wilson@Sun.COM 27249701SGeorge.Wilson@Sun.COM if (vd->vdev_ops->vdev_op_leaf && nvlist_lookup_uint64(nv, 27259701SGeorge.Wilson@Sun.COM ZPOOL_CONFIG_OFFLINE, &val) == 0 && val) { 27269701SGeorge.Wilson@Sun.COM 27279701SGeorge.Wilson@Sun.COM /* 27289701SGeorge.Wilson@Sun.COM * It would be nice to call vdev_offline() 27299701SGeorge.Wilson@Sun.COM * directly but the pool isn't fully loaded and 27309701SGeorge.Wilson@Sun.COM * the txg threads have not been started yet. 27319701SGeorge.Wilson@Sun.COM */ 27329701SGeorge.Wilson@Sun.COM spa_config_enter(spa, SCL_STATE_ALL, FTAG, RW_WRITER); 27339701SGeorge.Wilson@Sun.COM vd->vdev_offline = val; 27349701SGeorge.Wilson@Sun.COM vdev_reopen(vd->vdev_top); 27359701SGeorge.Wilson@Sun.COM spa_config_exit(spa, SCL_STATE_ALL, FTAG); 27369701SGeorge.Wilson@Sun.COM } 27379701SGeorge.Wilson@Sun.COM } 27389816SGeorge.Wilson@Sun.COM 27399816SGeorge.Wilson@Sun.COM /* 27409816SGeorge.Wilson@Sun.COM * Expand a vdev if possible. 27419816SGeorge.Wilson@Sun.COM */ 27429816SGeorge.Wilson@Sun.COM void 27439816SGeorge.Wilson@Sun.COM vdev_expand(vdev_t *vd, uint64_t txg) 27449816SGeorge.Wilson@Sun.COM { 27459816SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_top == vd); 27469816SGeorge.Wilson@Sun.COM ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 27479816SGeorge.Wilson@Sun.COM 27489816SGeorge.Wilson@Sun.COM if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 27499816SGeorge.Wilson@Sun.COM VERIFY(vdev_metaslab_init(vd, txg) == 0); 27509816SGeorge.Wilson@Sun.COM vdev_config_dirty(vd); 27519816SGeorge.Wilson@Sun.COM } 27529816SGeorge.Wilson@Sun.COM } 2753