1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51485Slling * Common Development and Distribution License (the "License"). 61485Slling * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 212082Seschrock 22789Sahrens /* 238632SBill.Moore@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #include <sys/zfs_context.h> 281544Seschrock #include <sys/fm/fs/zfs.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/spa_impl.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/dmu_tx.h> 33789Sahrens #include <sys/vdev_impl.h> 34789Sahrens #include <sys/uberblock_impl.h> 35789Sahrens #include <sys/metaslab.h> 36789Sahrens #include <sys/metaslab_impl.h> 37789Sahrens #include <sys/space_map.h> 38789Sahrens #include <sys/zio.h> 39789Sahrens #include <sys/zap.h> 40789Sahrens #include <sys/fs/zfs.h> 416643Seschrock #include <sys/arc.h> 429701SGeorge.Wilson@Sun.COM #include <sys/zil.h> 43789Sahrens 44789Sahrens /* 45789Sahrens * Virtual device management. 46789Sahrens */ 47789Sahrens 48789Sahrens static vdev_ops_t *vdev_ops_table[] = { 49789Sahrens &vdev_root_ops, 50789Sahrens &vdev_raidz_ops, 51789Sahrens &vdev_mirror_ops, 52789Sahrens &vdev_replacing_ops, 532082Seschrock &vdev_spare_ops, 54789Sahrens &vdev_disk_ops, 55789Sahrens &vdev_file_ops, 56789Sahrens &vdev_missing_ops, 57*10594SGeorge.Wilson@Sun.COM &vdev_hole_ops, 58789Sahrens NULL 59789Sahrens }; 60789Sahrens 617046Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 627046Sahrens int zfs_scrub_limit = 10; 633697Smishra 64789Sahrens /* 65789Sahrens * Given a vdev type, return the appropriate ops vector. 66789Sahrens */ 67789Sahrens static vdev_ops_t * 68789Sahrens vdev_getops(const char *type) 69789Sahrens { 70789Sahrens vdev_ops_t *ops, **opspp; 71789Sahrens 72789Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 73789Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 74789Sahrens break; 75789Sahrens 76789Sahrens return (ops); 77789Sahrens } 78789Sahrens 79789Sahrens /* 80789Sahrens * Default asize function: return the MAX of psize with the asize of 81789Sahrens * all children. This is what's used by anything other than RAID-Z. 82789Sahrens */ 83789Sahrens uint64_t 84789Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 85789Sahrens { 861732Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 87789Sahrens uint64_t csize; 889816SGeorge.Wilson@Sun.COM 899816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 90789Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 91789Sahrens asize = MAX(asize, csize); 92789Sahrens } 93789Sahrens 94789Sahrens return (asize); 95789Sahrens } 96789Sahrens 971175Slling /* 989816SGeorge.Wilson@Sun.COM * Get the minimum allocatable size. We define the allocatable size as 999816SGeorge.Wilson@Sun.COM * the vdev's asize rounded to the nearest metaslab. This allows us to 1009816SGeorge.Wilson@Sun.COM * replace or attach devices which don't have the same physical size but 1019816SGeorge.Wilson@Sun.COM * can still satisfy the same number of allocations. 1021175Slling */ 1031175Slling uint64_t 1049816SGeorge.Wilson@Sun.COM vdev_get_min_asize(vdev_t *vd) 1051175Slling { 1069816SGeorge.Wilson@Sun.COM vdev_t *pvd = vd->vdev_parent; 1079816SGeorge.Wilson@Sun.COM 1089816SGeorge.Wilson@Sun.COM /* 1099816SGeorge.Wilson@Sun.COM * The our parent is NULL (inactive spare or cache) or is the root, 1109816SGeorge.Wilson@Sun.COM * just return our own asize. 1119816SGeorge.Wilson@Sun.COM */ 1129816SGeorge.Wilson@Sun.COM if (pvd == NULL) 1139816SGeorge.Wilson@Sun.COM return (vd->vdev_asize); 1141175Slling 1151175Slling /* 1169816SGeorge.Wilson@Sun.COM * The top-level vdev just returns the allocatable size rounded 1179816SGeorge.Wilson@Sun.COM * to the nearest metaslab. 1189816SGeorge.Wilson@Sun.COM */ 1199816SGeorge.Wilson@Sun.COM if (vd == vd->vdev_top) 1209816SGeorge.Wilson@Sun.COM return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 1219816SGeorge.Wilson@Sun.COM 1229816SGeorge.Wilson@Sun.COM /* 1239816SGeorge.Wilson@Sun.COM * The allocatable space for a raidz vdev is N * sizeof(smallest child), 1249816SGeorge.Wilson@Sun.COM * so each child must provide at least 1/Nth of its asize. 1251175Slling */ 1269816SGeorge.Wilson@Sun.COM if (pvd->vdev_ops == &vdev_raidz_ops) 1279816SGeorge.Wilson@Sun.COM return (pvd->vdev_min_asize / pvd->vdev_children); 1289816SGeorge.Wilson@Sun.COM 1299816SGeorge.Wilson@Sun.COM return (pvd->vdev_min_asize); 1309816SGeorge.Wilson@Sun.COM } 1319816SGeorge.Wilson@Sun.COM 1329816SGeorge.Wilson@Sun.COM void 1339816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vdev_t *vd) 1349816SGeorge.Wilson@Sun.COM { 1359816SGeorge.Wilson@Sun.COM vd->vdev_min_asize = vdev_get_min_asize(vd); 1369816SGeorge.Wilson@Sun.COM 1379816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 1389816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vd->vdev_child[c]); 1391175Slling } 1401175Slling 141789Sahrens vdev_t * 142789Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 143789Sahrens { 144789Sahrens vdev_t *rvd = spa->spa_root_vdev; 145789Sahrens 1467754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1475530Sbonwick 1487046Sahrens if (vdev < rvd->vdev_children) { 1497046Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 150789Sahrens return (rvd->vdev_child[vdev]); 1517046Sahrens } 152789Sahrens 153789Sahrens return (NULL); 154789Sahrens } 155789Sahrens 156789Sahrens vdev_t * 157789Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 158789Sahrens { 159789Sahrens vdev_t *mvd; 160789Sahrens 1611585Sbonwick if (vd->vdev_guid == guid) 162789Sahrens return (vd); 163789Sahrens 1649816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 165789Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 166789Sahrens NULL) 167789Sahrens return (mvd); 168789Sahrens 169789Sahrens return (NULL); 170789Sahrens } 171789Sahrens 172789Sahrens void 173789Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 174789Sahrens { 175789Sahrens size_t oldsize, newsize; 176789Sahrens uint64_t id = cvd->vdev_id; 177789Sahrens vdev_t **newchild; 178789Sahrens 1797754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 180789Sahrens ASSERT(cvd->vdev_parent == NULL); 181789Sahrens 182789Sahrens cvd->vdev_parent = pvd; 183789Sahrens 184789Sahrens if (pvd == NULL) 185789Sahrens return; 186789Sahrens 187789Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 188789Sahrens 189789Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 190789Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 191789Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 192789Sahrens 193789Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 194789Sahrens if (pvd->vdev_child != NULL) { 195789Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 196789Sahrens kmem_free(pvd->vdev_child, oldsize); 197789Sahrens } 198789Sahrens 199789Sahrens pvd->vdev_child = newchild; 200789Sahrens pvd->vdev_child[id] = cvd; 201789Sahrens 202789Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 203789Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 204789Sahrens 205789Sahrens /* 206789Sahrens * Walk up all ancestors to update guid sum. 207789Sahrens */ 208789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 209789Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 2103697Smishra 2113697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2123697Smishra cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 213789Sahrens } 214789Sahrens 215789Sahrens void 216789Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 217789Sahrens { 218789Sahrens int c; 219789Sahrens uint_t id = cvd->vdev_id; 220789Sahrens 221789Sahrens ASSERT(cvd->vdev_parent == pvd); 222789Sahrens 223789Sahrens if (pvd == NULL) 224789Sahrens return; 225789Sahrens 226789Sahrens ASSERT(id < pvd->vdev_children); 227789Sahrens ASSERT(pvd->vdev_child[id] == cvd); 228789Sahrens 229789Sahrens pvd->vdev_child[id] = NULL; 230789Sahrens cvd->vdev_parent = NULL; 231789Sahrens 232789Sahrens for (c = 0; c < pvd->vdev_children; c++) 233789Sahrens if (pvd->vdev_child[c]) 234789Sahrens break; 235789Sahrens 236789Sahrens if (c == pvd->vdev_children) { 237789Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 238789Sahrens pvd->vdev_child = NULL; 239789Sahrens pvd->vdev_children = 0; 240789Sahrens } 241789Sahrens 242789Sahrens /* 243789Sahrens * Walk up all ancestors to update guid sum. 244789Sahrens */ 245789Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 246789Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 2473697Smishra 2483697Smishra if (cvd->vdev_ops->vdev_op_leaf) 2493697Smishra cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 250789Sahrens } 251789Sahrens 252789Sahrens /* 253789Sahrens * Remove any holes in the child array. 254789Sahrens */ 255789Sahrens void 256789Sahrens vdev_compact_children(vdev_t *pvd) 257789Sahrens { 258789Sahrens vdev_t **newchild, *cvd; 259789Sahrens int oldc = pvd->vdev_children; 2609816SGeorge.Wilson@Sun.COM int newc; 261789Sahrens 2627754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 263789Sahrens 2649816SGeorge.Wilson@Sun.COM for (int c = newc = 0; c < oldc; c++) 265789Sahrens if (pvd->vdev_child[c]) 266789Sahrens newc++; 267789Sahrens 268789Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 269789Sahrens 2709816SGeorge.Wilson@Sun.COM for (int c = newc = 0; c < oldc; c++) { 271789Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 272789Sahrens newchild[newc] = cvd; 273789Sahrens cvd->vdev_id = newc++; 274789Sahrens } 275789Sahrens } 276789Sahrens 277789Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 278789Sahrens pvd->vdev_child = newchild; 279789Sahrens pvd->vdev_children = newc; 280789Sahrens } 281789Sahrens 282789Sahrens /* 283789Sahrens * Allocate and minimally initialize a vdev_t. 284789Sahrens */ 285*10594SGeorge.Wilson@Sun.COM vdev_t * 286789Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 287789Sahrens { 288789Sahrens vdev_t *vd; 289789Sahrens 2901585Sbonwick vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 2911585Sbonwick 2921585Sbonwick if (spa->spa_root_vdev == NULL) { 2931585Sbonwick ASSERT(ops == &vdev_root_ops); 2941585Sbonwick spa->spa_root_vdev = vd; 2951585Sbonwick } 296789Sahrens 297*10594SGeorge.Wilson@Sun.COM if (guid == 0 && ops != &vdev_hole_ops) { 2981585Sbonwick if (spa->spa_root_vdev == vd) { 2991585Sbonwick /* 3001585Sbonwick * The root vdev's guid will also be the pool guid, 3011585Sbonwick * which must be unique among all pools. 3021585Sbonwick */ 3031585Sbonwick while (guid == 0 || spa_guid_exists(guid, 0)) 3041585Sbonwick guid = spa_get_random(-1ULL); 3051585Sbonwick } else { 3061585Sbonwick /* 3071585Sbonwick * Any other vdev's guid must be unique within the pool. 3081585Sbonwick */ 3091585Sbonwick while (guid == 0 || 3101585Sbonwick spa_guid_exists(spa_guid(spa), guid)) 3111585Sbonwick guid = spa_get_random(-1ULL); 3121585Sbonwick } 3131585Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3141585Sbonwick } 315789Sahrens 316789Sahrens vd->vdev_spa = spa; 317789Sahrens vd->vdev_id = id; 318789Sahrens vd->vdev_guid = guid; 319789Sahrens vd->vdev_guid_sum = guid; 320789Sahrens vd->vdev_ops = ops; 321789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 322*10594SGeorge.Wilson@Sun.COM vd->vdev_ishole = (ops == &vdev_hole_ops); 323789Sahrens 324789Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3252856Snd150628 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 3267754SJeff.Bonwick@Sun.COM mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 3278241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 3288241SJeff.Bonwick@Sun.COM space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 3298241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_lock); 3308241SJeff.Bonwick@Sun.COM } 331789Sahrens txg_list_create(&vd->vdev_ms_list, 332789Sahrens offsetof(struct metaslab, ms_txg_node)); 333789Sahrens txg_list_create(&vd->vdev_dtl_list, 334789Sahrens offsetof(struct vdev, vdev_dtl_node)); 335789Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3364451Seschrock vdev_queue_init(vd); 3374451Seschrock vdev_cache_init(vd); 338789Sahrens 339789Sahrens return (vd); 340789Sahrens } 341789Sahrens 342789Sahrens /* 343789Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 344789Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 345789Sahrens * different for each case. 346789Sahrens */ 3472082Seschrock int 3482082Seschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 3492082Seschrock int alloctype) 350789Sahrens { 351789Sahrens vdev_ops_t *ops; 352789Sahrens char *type; 3534527Sperrin uint64_t guid = 0, islog, nparity; 354789Sahrens vdev_t *vd; 355789Sahrens 3567754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 357789Sahrens 358789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 3592082Seschrock return (EINVAL); 360789Sahrens 361789Sahrens if ((ops = vdev_getops(type)) == NULL) 3622082Seschrock return (EINVAL); 363789Sahrens 364789Sahrens /* 365789Sahrens * If this is a load, get the vdev guid from the nvlist. 366789Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 367789Sahrens */ 368789Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 369789Sahrens uint64_t label_id; 370789Sahrens 371789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 372789Sahrens label_id != id) 3732082Seschrock return (EINVAL); 374789Sahrens 375789Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3762082Seschrock return (EINVAL); 3772082Seschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 3782082Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3792082Seschrock return (EINVAL); 3805450Sbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 3815450Sbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3825450Sbrendan return (EINVAL); 3839790SLin.Ling@Sun.COM } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 3849790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 3859790SLin.Ling@Sun.COM return (EINVAL); 386789Sahrens } 387789Sahrens 3882082Seschrock /* 3892082Seschrock * The first allocated vdev must be of type 'root'. 3902082Seschrock */ 3912082Seschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 3922082Seschrock return (EINVAL); 3932082Seschrock 3944527Sperrin /* 3954527Sperrin * Determine whether we're a log vdev. 3964527Sperrin */ 3974527Sperrin islog = 0; 3984527Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 3995094Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 4004527Sperrin return (ENOTSUP); 4014527Sperrin 402*10594SGeorge.Wilson@Sun.COM if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 403*10594SGeorge.Wilson@Sun.COM return (ENOTSUP); 404*10594SGeorge.Wilson@Sun.COM 4054527Sperrin /* 4064527Sperrin * Set the nparity property for RAID-Z vdevs. 4074527Sperrin */ 4084527Sperrin nparity = -1ULL; 4094527Sperrin if (ops == &vdev_raidz_ops) { 4104527Sperrin if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4114527Sperrin &nparity) == 0) { 4124527Sperrin /* 41310105Sadam.leventhal@sun.com * Currently, we can only support 3 parity devices. 4144527Sperrin */ 41510105Sadam.leventhal@sun.com if (nparity == 0 || nparity > 3) 4164527Sperrin return (EINVAL); 4174527Sperrin /* 41810105Sadam.leventhal@sun.com * Previous versions could only support 1 or 2 parity 41910105Sadam.leventhal@sun.com * device. 4204527Sperrin */ 42110105Sadam.leventhal@sun.com if (nparity > 1 && 42210105Sadam.leventhal@sun.com spa_version(spa) < SPA_VERSION_RAIDZ2) 42310105Sadam.leventhal@sun.com return (ENOTSUP); 42410105Sadam.leventhal@sun.com if (nparity > 2 && 42510105Sadam.leventhal@sun.com spa_version(spa) < SPA_VERSION_RAIDZ3) 4264527Sperrin return (ENOTSUP); 4274527Sperrin } else { 4284527Sperrin /* 4294527Sperrin * We require the parity to be specified for SPAs that 4304527Sperrin * support multiple parity levels. 4314527Sperrin */ 43210105Sadam.leventhal@sun.com if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 4334527Sperrin return (EINVAL); 4344527Sperrin /* 4354527Sperrin * Otherwise, we default to 1 parity device for RAID-Z. 4364527Sperrin */ 4374527Sperrin nparity = 1; 4384527Sperrin } 4394527Sperrin } else { 4404527Sperrin nparity = 0; 4414527Sperrin } 4424527Sperrin ASSERT(nparity != -1ULL); 4434527Sperrin 444789Sahrens vd = vdev_alloc_common(spa, id, guid, ops); 445789Sahrens 4464527Sperrin vd->vdev_islog = islog; 4474527Sperrin vd->vdev_nparity = nparity; 4484527Sperrin 449789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 450789Sahrens vd->vdev_path = spa_strdup(vd->vdev_path); 451789Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 452789Sahrens vd->vdev_devid = spa_strdup(vd->vdev_devid); 4534451Seschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4544451Seschrock &vd->vdev_physpath) == 0) 4554451Seschrock vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 4569425SEric.Schrock@Sun.COM if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 4579425SEric.Schrock@Sun.COM vd->vdev_fru = spa_strdup(vd->vdev_fru); 458789Sahrens 459789Sahrens /* 4601171Seschrock * Set the whole_disk property. If it's not specified, leave the value 4611171Seschrock * as -1. 4621171Seschrock */ 4631171Seschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 4641171Seschrock &vd->vdev_wholedisk) != 0) 4651171Seschrock vd->vdev_wholedisk = -1ULL; 4661171Seschrock 4671171Seschrock /* 4681544Seschrock * Look for the 'not present' flag. This will only be set if the device 4691544Seschrock * was not present at the time of import. 4701544Seschrock */ 4719425SEric.Schrock@Sun.COM (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4729425SEric.Schrock@Sun.COM &vd->vdev_not_present); 4731544Seschrock 4741544Seschrock /* 4751732Sbonwick * Get the alignment requirement. 4761732Sbonwick */ 4771732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 4781732Sbonwick 4791732Sbonwick /* 480*10594SGeorge.Wilson@Sun.COM * Retrieve the vdev creation time. 481*10594SGeorge.Wilson@Sun.COM */ 482*10594SGeorge.Wilson@Sun.COM (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 483*10594SGeorge.Wilson@Sun.COM &vd->vdev_crtxg); 484*10594SGeorge.Wilson@Sun.COM 485*10594SGeorge.Wilson@Sun.COM /* 486789Sahrens * If we're a top-level vdev, try to load the allocation parameters. 487789Sahrens */ 488789Sahrens if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 489789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 490789Sahrens &vd->vdev_ms_array); 491789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 492789Sahrens &vd->vdev_ms_shift); 493789Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 494789Sahrens &vd->vdev_asize); 495789Sahrens } 496789Sahrens 497789Sahrens /* 4984451Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 499789Sahrens */ 5006643Seschrock if (vd->vdev_ops->vdev_op_leaf && 5019790SLin.Ling@Sun.COM (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 5029790SLin.Ling@Sun.COM alloctype == VDEV_ALLOC_ROOTPOOL)) { 5036643Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 5046643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 5058241SJeff.Bonwick@Sun.COM &vd->vdev_dtl_smo.smo_object); 5066643Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 5076643Seschrock &vd->vdev_unspare); 5086643Seschrock } 5099790SLin.Ling@Sun.COM 5109790SLin.Ling@Sun.COM if (alloctype == VDEV_ALLOC_ROOTPOOL) { 5119790SLin.Ling@Sun.COM uint64_t spare = 0; 5129790SLin.Ling@Sun.COM 5139790SLin.Ling@Sun.COM if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 5149790SLin.Ling@Sun.COM &spare) == 0 && spare) 5159790SLin.Ling@Sun.COM spa_spare_add(vd); 5169790SLin.Ling@Sun.COM } 5179790SLin.Ling@Sun.COM 5181732Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 5191732Sbonwick &vd->vdev_offline); 5206643Seschrock 5214451Seschrock /* 5224451Seschrock * When importing a pool, we want to ignore the persistent fault 5234451Seschrock * state, as the diagnosis made on another system may not be 5244451Seschrock * valid in the current context. 5254451Seschrock */ 5264451Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN) { 5274451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 5284451Seschrock &vd->vdev_faulted); 5294451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 5304451Seschrock &vd->vdev_degraded); 5314451Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 5324451Seschrock &vd->vdev_removed); 5334451Seschrock } 534789Sahrens } 535789Sahrens 536789Sahrens /* 537789Sahrens * Add ourselves to the parent's list of children. 538789Sahrens */ 539789Sahrens vdev_add_child(parent, vd); 540789Sahrens 5412082Seschrock *vdp = vd; 5422082Seschrock 5432082Seschrock return (0); 544789Sahrens } 545789Sahrens 546789Sahrens void 547789Sahrens vdev_free(vdev_t *vd) 548789Sahrens { 5494451Seschrock spa_t *spa = vd->vdev_spa; 550789Sahrens 551789Sahrens /* 552789Sahrens * vdev_free() implies closing the vdev first. This is simpler than 553789Sahrens * trying to ensure complicated semantics for all callers. 554789Sahrens */ 555789Sahrens vdev_close(vd); 556789Sahrens 5577754SJeff.Bonwick@Sun.COM ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 558789Sahrens 559789Sahrens /* 560789Sahrens * Free all children. 561789Sahrens */ 5629816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 563789Sahrens vdev_free(vd->vdev_child[c]); 564789Sahrens 565789Sahrens ASSERT(vd->vdev_child == NULL); 566789Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 567789Sahrens 568789Sahrens /* 569789Sahrens * Discard allocation state. 570789Sahrens */ 571789Sahrens if (vd == vd->vdev_top) 572789Sahrens vdev_metaslab_fini(vd); 573789Sahrens 574789Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 5752082Seschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 576789Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 577789Sahrens 578789Sahrens /* 579789Sahrens * Remove this vdev from its parent's child list. 580789Sahrens */ 581789Sahrens vdev_remove_child(vd->vdev_parent, vd); 582789Sahrens 583789Sahrens ASSERT(vd->vdev_parent == NULL); 584789Sahrens 5854451Seschrock /* 5864451Seschrock * Clean up vdev structure. 5874451Seschrock */ 5884451Seschrock vdev_queue_fini(vd); 5894451Seschrock vdev_cache_fini(vd); 5904451Seschrock 5914451Seschrock if (vd->vdev_path) 5924451Seschrock spa_strfree(vd->vdev_path); 5934451Seschrock if (vd->vdev_devid) 5944451Seschrock spa_strfree(vd->vdev_devid); 5954451Seschrock if (vd->vdev_physpath) 5964451Seschrock spa_strfree(vd->vdev_physpath); 5979425SEric.Schrock@Sun.COM if (vd->vdev_fru) 5989425SEric.Schrock@Sun.COM spa_strfree(vd->vdev_fru); 5994451Seschrock 6004451Seschrock if (vd->vdev_isspare) 6014451Seschrock spa_spare_remove(vd); 6025450Sbrendan if (vd->vdev_isl2cache) 6035450Sbrendan spa_l2cache_remove(vd); 6044451Seschrock 6054451Seschrock txg_list_destroy(&vd->vdev_ms_list); 6064451Seschrock txg_list_destroy(&vd->vdev_dtl_list); 6078241SJeff.Bonwick@Sun.COM 6084451Seschrock mutex_enter(&vd->vdev_dtl_lock); 6098241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 6108241SJeff.Bonwick@Sun.COM space_map_unload(&vd->vdev_dtl[t]); 6118241SJeff.Bonwick@Sun.COM space_map_destroy(&vd->vdev_dtl[t]); 6128241SJeff.Bonwick@Sun.COM } 6134451Seschrock mutex_exit(&vd->vdev_dtl_lock); 6148241SJeff.Bonwick@Sun.COM 6154451Seschrock mutex_destroy(&vd->vdev_dtl_lock); 6164451Seschrock mutex_destroy(&vd->vdev_stat_lock); 6177754SJeff.Bonwick@Sun.COM mutex_destroy(&vd->vdev_probe_lock); 6184451Seschrock 6194451Seschrock if (vd == spa->spa_root_vdev) 6204451Seschrock spa->spa_root_vdev = NULL; 6214451Seschrock 6224451Seschrock kmem_free(vd, sizeof (vdev_t)); 623789Sahrens } 624789Sahrens 625789Sahrens /* 626789Sahrens * Transfer top-level vdev state from svd to tvd. 627789Sahrens */ 628789Sahrens static void 629789Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 630789Sahrens { 631789Sahrens spa_t *spa = svd->vdev_spa; 632789Sahrens metaslab_t *msp; 633789Sahrens vdev_t *vd; 634789Sahrens int t; 635789Sahrens 636789Sahrens ASSERT(tvd == tvd->vdev_top); 637789Sahrens 638789Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 639789Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 640789Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 641789Sahrens 642789Sahrens svd->vdev_ms_array = 0; 643789Sahrens svd->vdev_ms_shift = 0; 644789Sahrens svd->vdev_ms_count = 0; 645789Sahrens 646789Sahrens tvd->vdev_mg = svd->vdev_mg; 647789Sahrens tvd->vdev_ms = svd->vdev_ms; 648789Sahrens 649789Sahrens svd->vdev_mg = NULL; 650789Sahrens svd->vdev_ms = NULL; 6511732Sbonwick 6521732Sbonwick if (tvd->vdev_mg != NULL) 6531732Sbonwick tvd->vdev_mg->mg_vd = tvd; 654789Sahrens 655789Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 656789Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 6572082Seschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 658789Sahrens 659789Sahrens svd->vdev_stat.vs_alloc = 0; 660789Sahrens svd->vdev_stat.vs_space = 0; 6612082Seschrock svd->vdev_stat.vs_dspace = 0; 662789Sahrens 663789Sahrens for (t = 0; t < TXG_SIZE; t++) { 664789Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 665789Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 666789Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 667789Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 668789Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 669789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 670789Sahrens } 671789Sahrens 6727754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_config_dirty_node)) { 673789Sahrens vdev_config_clean(svd); 674789Sahrens vdev_config_dirty(tvd); 675789Sahrens } 676789Sahrens 6777754SJeff.Bonwick@Sun.COM if (list_link_active(&svd->vdev_state_dirty_node)) { 6787754SJeff.Bonwick@Sun.COM vdev_state_clean(svd); 6797754SJeff.Bonwick@Sun.COM vdev_state_dirty(tvd); 6807754SJeff.Bonwick@Sun.COM } 6817754SJeff.Bonwick@Sun.COM 6822082Seschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 6832082Seschrock svd->vdev_deflate_ratio = 0; 6844527Sperrin 6854527Sperrin tvd->vdev_islog = svd->vdev_islog; 6864527Sperrin svd->vdev_islog = 0; 687789Sahrens } 688789Sahrens 689789Sahrens static void 690789Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 691789Sahrens { 692789Sahrens if (vd == NULL) 693789Sahrens return; 694789Sahrens 695789Sahrens vd->vdev_top = tvd; 696789Sahrens 6979816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 698789Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 699789Sahrens } 700789Sahrens 701789Sahrens /* 702789Sahrens * Add a mirror/replacing vdev above an existing vdev. 703789Sahrens */ 704789Sahrens vdev_t * 705789Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 706789Sahrens { 707789Sahrens spa_t *spa = cvd->vdev_spa; 708789Sahrens vdev_t *pvd = cvd->vdev_parent; 709789Sahrens vdev_t *mvd; 710789Sahrens 7117754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 712789Sahrens 713789Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 7141732Sbonwick 7151732Sbonwick mvd->vdev_asize = cvd->vdev_asize; 7169816SGeorge.Wilson@Sun.COM mvd->vdev_min_asize = cvd->vdev_min_asize; 7171732Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 7181732Sbonwick mvd->vdev_state = cvd->vdev_state; 719*10594SGeorge.Wilson@Sun.COM mvd->vdev_crtxg = cvd->vdev_crtxg; 7201732Sbonwick 721789Sahrens vdev_remove_child(pvd, cvd); 722789Sahrens vdev_add_child(pvd, mvd); 723789Sahrens cvd->vdev_id = mvd->vdev_children; 724789Sahrens vdev_add_child(mvd, cvd); 725789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 726789Sahrens 727789Sahrens if (mvd == mvd->vdev_top) 728789Sahrens vdev_top_transfer(cvd, mvd); 729789Sahrens 730789Sahrens return (mvd); 731789Sahrens } 732789Sahrens 733789Sahrens /* 734789Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 735789Sahrens */ 736789Sahrens void 737789Sahrens vdev_remove_parent(vdev_t *cvd) 738789Sahrens { 739789Sahrens vdev_t *mvd = cvd->vdev_parent; 740789Sahrens vdev_t *pvd = mvd->vdev_parent; 741789Sahrens 7427754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 743789Sahrens 744789Sahrens ASSERT(mvd->vdev_children == 1); 745789Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 7462082Seschrock mvd->vdev_ops == &vdev_replacing_ops || 7472082Seschrock mvd->vdev_ops == &vdev_spare_ops); 7481732Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 749789Sahrens 750789Sahrens vdev_remove_child(mvd, cvd); 751789Sahrens vdev_remove_child(pvd, mvd); 7528241SJeff.Bonwick@Sun.COM 7537754SJeff.Bonwick@Sun.COM /* 7547754SJeff.Bonwick@Sun.COM * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 7557754SJeff.Bonwick@Sun.COM * Otherwise, we could have detached an offline device, and when we 7567754SJeff.Bonwick@Sun.COM * go to import the pool we'll think we have two top-level vdevs, 7577754SJeff.Bonwick@Sun.COM * instead of a different version of the same top-level vdev. 7587754SJeff.Bonwick@Sun.COM */ 7598241SJeff.Bonwick@Sun.COM if (mvd->vdev_top == mvd) { 7608241SJeff.Bonwick@Sun.COM uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 7618241SJeff.Bonwick@Sun.COM cvd->vdev_guid += guid_delta; 7628241SJeff.Bonwick@Sun.COM cvd->vdev_guid_sum += guid_delta; 7638241SJeff.Bonwick@Sun.COM } 764789Sahrens cvd->vdev_id = mvd->vdev_id; 765789Sahrens vdev_add_child(pvd, cvd); 766789Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 767789Sahrens 768789Sahrens if (cvd == cvd->vdev_top) 769789Sahrens vdev_top_transfer(mvd, cvd); 770789Sahrens 771789Sahrens ASSERT(mvd->vdev_children == 0); 772789Sahrens vdev_free(mvd); 773789Sahrens } 774789Sahrens 7751544Seschrock int 776789Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 777789Sahrens { 778789Sahrens spa_t *spa = vd->vdev_spa; 7791732Sbonwick objset_t *mos = spa->spa_meta_objset; 7804527Sperrin metaslab_class_t *mc; 7811732Sbonwick uint64_t m; 782789Sahrens uint64_t oldc = vd->vdev_ms_count; 783789Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 7841732Sbonwick metaslab_t **mspp; 7851732Sbonwick int error; 786789Sahrens 787*10594SGeorge.Wilson@Sun.COM /* 788*10594SGeorge.Wilson@Sun.COM * This vdev is not being allocated from yet or is a hole. 789*10594SGeorge.Wilson@Sun.COM */ 790*10594SGeorge.Wilson@Sun.COM if (vd->vdev_ms_shift == 0) 7911585Sbonwick return (0); 7921585Sbonwick 793*10594SGeorge.Wilson@Sun.COM ASSERT(!vd->vdev_ishole); 794*10594SGeorge.Wilson@Sun.COM 7959701SGeorge.Wilson@Sun.COM /* 7969701SGeorge.Wilson@Sun.COM * Compute the raidz-deflation ratio. Note, we hard-code 7979701SGeorge.Wilson@Sun.COM * in 128k (1 << 17) because it is the current "typical" blocksize. 7989701SGeorge.Wilson@Sun.COM * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 7999701SGeorge.Wilson@Sun.COM * or we will inconsistently account for existing bp's. 8009701SGeorge.Wilson@Sun.COM */ 8019701SGeorge.Wilson@Sun.COM vd->vdev_deflate_ratio = (1 << 17) / 8029701SGeorge.Wilson@Sun.COM (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 8039701SGeorge.Wilson@Sun.COM 804789Sahrens ASSERT(oldc <= newc); 805789Sahrens 8064527Sperrin if (vd->vdev_islog) 8074527Sperrin mc = spa->spa_log_class; 8084527Sperrin else 8094527Sperrin mc = spa->spa_normal_class; 8104527Sperrin 8111732Sbonwick if (vd->vdev_mg == NULL) 8121732Sbonwick vd->vdev_mg = metaslab_group_create(mc, vd); 8131732Sbonwick 8141732Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 8151732Sbonwick 8161732Sbonwick if (oldc != 0) { 8171732Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 8181732Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 8191732Sbonwick } 8201732Sbonwick 8211732Sbonwick vd->vdev_ms = mspp; 822789Sahrens vd->vdev_ms_count = newc; 823789Sahrens 8241732Sbonwick for (m = oldc; m < newc; m++) { 8251732Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 826789Sahrens if (txg == 0) { 8271732Sbonwick uint64_t object = 0; 8281732Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 8299512SNeil.Perrin@Sun.COM m * sizeof (uint64_t), sizeof (uint64_t), &object, 8309512SNeil.Perrin@Sun.COM DMU_READ_PREFETCH); 8311732Sbonwick if (error) 8321732Sbonwick return (error); 8331732Sbonwick if (object != 0) { 8341732Sbonwick dmu_buf_t *db; 8351732Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 8361732Sbonwick if (error) 8371732Sbonwick return (error); 8384944Smaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 8394944Smaybee bcopy(db->db_data, &smo, sizeof (smo)); 8401732Sbonwick ASSERT3U(smo.smo_object, ==, object); 8411544Seschrock dmu_buf_rele(db, FTAG); 842789Sahrens } 843789Sahrens } 8441732Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 8451732Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 846789Sahrens } 847789Sahrens 8481544Seschrock return (0); 849789Sahrens } 850789Sahrens 851789Sahrens void 852789Sahrens vdev_metaslab_fini(vdev_t *vd) 853789Sahrens { 854789Sahrens uint64_t m; 855789Sahrens uint64_t count = vd->vdev_ms_count; 856789Sahrens 857789Sahrens if (vd->vdev_ms != NULL) { 858789Sahrens for (m = 0; m < count; m++) 8591732Sbonwick if (vd->vdev_ms[m] != NULL) 8601732Sbonwick metaslab_fini(vd->vdev_ms[m]); 861789Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 862789Sahrens vd->vdev_ms = NULL; 863789Sahrens } 864789Sahrens } 865789Sahrens 8667754SJeff.Bonwick@Sun.COM typedef struct vdev_probe_stats { 8677754SJeff.Bonwick@Sun.COM boolean_t vps_readable; 8687754SJeff.Bonwick@Sun.COM boolean_t vps_writeable; 8697754SJeff.Bonwick@Sun.COM int vps_flags; 8707754SJeff.Bonwick@Sun.COM } vdev_probe_stats_t; 8717754SJeff.Bonwick@Sun.COM 8727754SJeff.Bonwick@Sun.COM static void 8737754SJeff.Bonwick@Sun.COM vdev_probe_done(zio_t *zio) 8745329Sgw25295 { 8758241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 8768632SBill.Moore@Sun.COM vdev_t *vd = zio->io_vd; 8777754SJeff.Bonwick@Sun.COM vdev_probe_stats_t *vps = zio->io_private; 8788632SBill.Moore@Sun.COM 8798632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio != NULL); 8807754SJeff.Bonwick@Sun.COM 8817754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) { 8827754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8837754SJeff.Bonwick@Sun.COM vps->vps_readable = 1; 8848241SJeff.Bonwick@Sun.COM if (zio->io_error == 0 && spa_writeable(spa)) { 8858632SBill.Moore@Sun.COM zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 8867754SJeff.Bonwick@Sun.COM zio->io_offset, zio->io_size, zio->io_data, 8877754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 8887754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 8897754SJeff.Bonwick@Sun.COM } else { 8907754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8917754SJeff.Bonwick@Sun.COM } 8927754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_WRITE) { 8937754SJeff.Bonwick@Sun.COM if (zio->io_error == 0) 8947754SJeff.Bonwick@Sun.COM vps->vps_writeable = 1; 8957754SJeff.Bonwick@Sun.COM zio_buf_free(zio->io_data, zio->io_size); 8967754SJeff.Bonwick@Sun.COM } else if (zio->io_type == ZIO_TYPE_NULL) { 8978632SBill.Moore@Sun.COM zio_t *pio; 8987754SJeff.Bonwick@Sun.COM 8997754SJeff.Bonwick@Sun.COM vd->vdev_cant_read |= !vps->vps_readable; 9007754SJeff.Bonwick@Sun.COM vd->vdev_cant_write |= !vps->vps_writeable; 9017754SJeff.Bonwick@Sun.COM 9027754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && 9038241SJeff.Bonwick@Sun.COM (vdev_writeable(vd) || !spa_writeable(spa))) { 9047754SJeff.Bonwick@Sun.COM zio->io_error = 0; 9057754SJeff.Bonwick@Sun.COM } else { 9067754SJeff.Bonwick@Sun.COM ASSERT(zio->io_error != 0); 9077754SJeff.Bonwick@Sun.COM zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 9088241SJeff.Bonwick@Sun.COM spa, vd, NULL, 0, 0); 9097754SJeff.Bonwick@Sun.COM zio->io_error = ENXIO; 9107754SJeff.Bonwick@Sun.COM } 9118632SBill.Moore@Sun.COM 9128632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 9138632SBill.Moore@Sun.COM ASSERT(vd->vdev_probe_zio == zio); 9148632SBill.Moore@Sun.COM vd->vdev_probe_zio = NULL; 9158632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 9168632SBill.Moore@Sun.COM 9178632SBill.Moore@Sun.COM while ((pio = zio_walk_parents(zio)) != NULL) 9188632SBill.Moore@Sun.COM if (!vdev_accessible(vd, pio)) 9198632SBill.Moore@Sun.COM pio->io_error = ENXIO; 9208632SBill.Moore@Sun.COM 9217754SJeff.Bonwick@Sun.COM kmem_free(vps, sizeof (*vps)); 9227754SJeff.Bonwick@Sun.COM } 9237754SJeff.Bonwick@Sun.COM } 9245329Sgw25295 9257754SJeff.Bonwick@Sun.COM /* 9267754SJeff.Bonwick@Sun.COM * Determine whether this device is accessible by reading and writing 9277754SJeff.Bonwick@Sun.COM * to several known locations: the pad regions of each vdev label 9287754SJeff.Bonwick@Sun.COM * but the first (which we leave alone in case it contains a VTOC). 9297754SJeff.Bonwick@Sun.COM */ 9307754SJeff.Bonwick@Sun.COM zio_t * 9318632SBill.Moore@Sun.COM vdev_probe(vdev_t *vd, zio_t *zio) 9327754SJeff.Bonwick@Sun.COM { 9337754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 9348632SBill.Moore@Sun.COM vdev_probe_stats_t *vps = NULL; 9358632SBill.Moore@Sun.COM zio_t *pio; 9367754SJeff.Bonwick@Sun.COM 9377754SJeff.Bonwick@Sun.COM ASSERT(vd->vdev_ops->vdev_op_leaf); 9387754SJeff.Bonwick@Sun.COM 9398632SBill.Moore@Sun.COM /* 9408632SBill.Moore@Sun.COM * Don't probe the probe. 9418632SBill.Moore@Sun.COM */ 9428632SBill.Moore@Sun.COM if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 9438632SBill.Moore@Sun.COM return (NULL); 9448632SBill.Moore@Sun.COM 9458632SBill.Moore@Sun.COM /* 9468632SBill.Moore@Sun.COM * To prevent 'probe storms' when a device fails, we create 9478632SBill.Moore@Sun.COM * just one probe i/o at a time. All zios that want to probe 9488632SBill.Moore@Sun.COM * this vdev will become parents of the probe io. 9498632SBill.Moore@Sun.COM */ 9508632SBill.Moore@Sun.COM mutex_enter(&vd->vdev_probe_lock); 9518632SBill.Moore@Sun.COM 9528632SBill.Moore@Sun.COM if ((pio = vd->vdev_probe_zio) == NULL) { 9538632SBill.Moore@Sun.COM vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 9548632SBill.Moore@Sun.COM 9558632SBill.Moore@Sun.COM vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 9568632SBill.Moore@Sun.COM ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 9579725SEric.Schrock@Sun.COM ZIO_FLAG_TRYHARD; 9588632SBill.Moore@Sun.COM 9598632SBill.Moore@Sun.COM if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 9608632SBill.Moore@Sun.COM /* 9618632SBill.Moore@Sun.COM * vdev_cant_read and vdev_cant_write can only 9628632SBill.Moore@Sun.COM * transition from TRUE to FALSE when we have the 9638632SBill.Moore@Sun.COM * SCL_ZIO lock as writer; otherwise they can only 9648632SBill.Moore@Sun.COM * transition from FALSE to TRUE. This ensures that 9658632SBill.Moore@Sun.COM * any zio looking at these values can assume that 9668632SBill.Moore@Sun.COM * failures persist for the life of the I/O. That's 9678632SBill.Moore@Sun.COM * important because when a device has intermittent 9688632SBill.Moore@Sun.COM * connectivity problems, we want to ensure that 9698632SBill.Moore@Sun.COM * they're ascribed to the device (ENXIO) and not 9708632SBill.Moore@Sun.COM * the zio (EIO). 9718632SBill.Moore@Sun.COM * 9728632SBill.Moore@Sun.COM * Since we hold SCL_ZIO as writer here, clear both 9738632SBill.Moore@Sun.COM * values so the probe can reevaluate from first 9748632SBill.Moore@Sun.COM * principles. 9758632SBill.Moore@Sun.COM */ 9768632SBill.Moore@Sun.COM vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 9778632SBill.Moore@Sun.COM vd->vdev_cant_read = B_FALSE; 9788632SBill.Moore@Sun.COM vd->vdev_cant_write = B_FALSE; 9798632SBill.Moore@Sun.COM } 9808632SBill.Moore@Sun.COM 9818632SBill.Moore@Sun.COM vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 9828632SBill.Moore@Sun.COM vdev_probe_done, vps, 9838632SBill.Moore@Sun.COM vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 9848632SBill.Moore@Sun.COM 9858632SBill.Moore@Sun.COM if (zio != NULL) { 9868632SBill.Moore@Sun.COM vd->vdev_probe_wanted = B_TRUE; 9878632SBill.Moore@Sun.COM spa_async_request(spa, SPA_ASYNC_PROBE); 9888632SBill.Moore@Sun.COM } 9898632SBill.Moore@Sun.COM } 9908632SBill.Moore@Sun.COM 9918632SBill.Moore@Sun.COM if (zio != NULL) 9928632SBill.Moore@Sun.COM zio_add_child(zio, pio); 9938632SBill.Moore@Sun.COM 9948632SBill.Moore@Sun.COM mutex_exit(&vd->vdev_probe_lock); 9958632SBill.Moore@Sun.COM 9968632SBill.Moore@Sun.COM if (vps == NULL) { 9978632SBill.Moore@Sun.COM ASSERT(zio != NULL); 9988632SBill.Moore@Sun.COM return (NULL); 9998632SBill.Moore@Sun.COM } 10007754SJeff.Bonwick@Sun.COM 10017754SJeff.Bonwick@Sun.COM for (int l = 1; l < VDEV_LABELS; l++) { 10028632SBill.Moore@Sun.COM zio_nowait(zio_read_phys(pio, vd, 10037754SJeff.Bonwick@Sun.COM vdev_label_offset(vd->vdev_psize, l, 10049056SLin.Ling@Sun.COM offsetof(vdev_label_t, vl_pad2)), 10059056SLin.Ling@Sun.COM VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 10067754SJeff.Bonwick@Sun.COM ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 10077754SJeff.Bonwick@Sun.COM ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 10087754SJeff.Bonwick@Sun.COM } 10097754SJeff.Bonwick@Sun.COM 10108632SBill.Moore@Sun.COM if (zio == NULL) 10118632SBill.Moore@Sun.COM return (pio); 10128632SBill.Moore@Sun.COM 10138632SBill.Moore@Sun.COM zio_nowait(pio); 10148632SBill.Moore@Sun.COM return (NULL); 10155329Sgw25295 } 10165329Sgw25295 10179846SEric.Taylor@Sun.COM static void 10189846SEric.Taylor@Sun.COM vdev_open_child(void *arg) 10199846SEric.Taylor@Sun.COM { 10209846SEric.Taylor@Sun.COM vdev_t *vd = arg; 10219846SEric.Taylor@Sun.COM 10229846SEric.Taylor@Sun.COM vd->vdev_open_thread = curthread; 10239846SEric.Taylor@Sun.COM vd->vdev_open_error = vdev_open(vd); 10249846SEric.Taylor@Sun.COM vd->vdev_open_thread = NULL; 10259846SEric.Taylor@Sun.COM } 10269846SEric.Taylor@Sun.COM 102710588SEric.Taylor@Sun.COM boolean_t 102810588SEric.Taylor@Sun.COM vdev_uses_zvols(vdev_t *vd) 102910588SEric.Taylor@Sun.COM { 103010588SEric.Taylor@Sun.COM if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 103110588SEric.Taylor@Sun.COM strlen(ZVOL_DIR)) == 0) 103210588SEric.Taylor@Sun.COM return (B_TRUE); 103310588SEric.Taylor@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 103410588SEric.Taylor@Sun.COM if (vdev_uses_zvols(vd->vdev_child[c])) 103510588SEric.Taylor@Sun.COM return (B_TRUE); 103610588SEric.Taylor@Sun.COM return (B_FALSE); 103710588SEric.Taylor@Sun.COM } 103810588SEric.Taylor@Sun.COM 10399846SEric.Taylor@Sun.COM void 10409846SEric.Taylor@Sun.COM vdev_open_children(vdev_t *vd) 10419846SEric.Taylor@Sun.COM { 10429846SEric.Taylor@Sun.COM taskq_t *tq; 10439846SEric.Taylor@Sun.COM int children = vd->vdev_children; 10449846SEric.Taylor@Sun.COM 104510588SEric.Taylor@Sun.COM /* 104610588SEric.Taylor@Sun.COM * in order to handle pools on top of zvols, do the opens 104710588SEric.Taylor@Sun.COM * in a single thread so that the same thread holds the 104810588SEric.Taylor@Sun.COM * spa_namespace_lock 104910588SEric.Taylor@Sun.COM */ 105010588SEric.Taylor@Sun.COM if (vdev_uses_zvols(vd)) { 105110588SEric.Taylor@Sun.COM for (int c = 0; c < children; c++) 105210588SEric.Taylor@Sun.COM vd->vdev_child[c]->vdev_open_error = 105310588SEric.Taylor@Sun.COM vdev_open(vd->vdev_child[c]); 105410588SEric.Taylor@Sun.COM return; 105510588SEric.Taylor@Sun.COM } 10569846SEric.Taylor@Sun.COM tq = taskq_create("vdev_open", children, minclsyspri, 10579846SEric.Taylor@Sun.COM children, children, TASKQ_PREPOPULATE); 10589846SEric.Taylor@Sun.COM 10599846SEric.Taylor@Sun.COM for (int c = 0; c < children; c++) 10609846SEric.Taylor@Sun.COM VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 10619846SEric.Taylor@Sun.COM TQ_SLEEP) != NULL); 10629846SEric.Taylor@Sun.COM 10639846SEric.Taylor@Sun.COM taskq_destroy(tq); 10649846SEric.Taylor@Sun.COM } 10659846SEric.Taylor@Sun.COM 1066789Sahrens /* 1067789Sahrens * Prepare a virtual device for access. 1068789Sahrens */ 1069789Sahrens int 1070789Sahrens vdev_open(vdev_t *vd) 1071789Sahrens { 10728241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 1073789Sahrens int error; 1074789Sahrens uint64_t osize = 0; 1075789Sahrens uint64_t asize, psize; 10761732Sbonwick uint64_t ashift = 0; 1077789Sahrens 10789846SEric.Taylor@Sun.COM ASSERT(vd->vdev_open_thread == curthread || 10799846SEric.Taylor@Sun.COM spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1080789Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1081789Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 1082789Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 1083789Sahrens 1084789Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 10859701SGeorge.Wilson@Sun.COM vd->vdev_cant_read = B_FALSE; 10869701SGeorge.Wilson@Sun.COM vd->vdev_cant_write = B_FALSE; 10879816SGeorge.Wilson@Sun.COM vd->vdev_min_asize = vdev_get_min_asize(vd); 1088789Sahrens 10894451Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 10904451Seschrock ASSERT(vd->vdev_children == 0); 10914451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 10924451Seschrock VDEV_AUX_ERR_EXCEEDED); 10934451Seschrock return (ENXIO); 10944451Seschrock } else if (vd->vdev_offline) { 1095789Sahrens ASSERT(vd->vdev_children == 0); 10961544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1097789Sahrens return (ENXIO); 1098789Sahrens } 1099789Sahrens 1100789Sahrens error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1101789Sahrens 11021544Seschrock if (zio_injection_enabled && error == 0) 11039725SEric.Schrock@Sun.COM error = zio_handle_device_injection(vd, NULL, ENXIO); 11041544Seschrock 11054451Seschrock if (error) { 11064451Seschrock if (vd->vdev_removed && 11074451Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 11084451Seschrock vd->vdev_removed = B_FALSE; 1109789Sahrens 11101544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1111789Sahrens vd->vdev_stat.vs_aux); 1112789Sahrens return (error); 1113789Sahrens } 1114789Sahrens 11154451Seschrock vd->vdev_removed = B_FALSE; 11164451Seschrock 11174451Seschrock if (vd->vdev_degraded) { 11184451Seschrock ASSERT(vd->vdev_children == 0); 11194451Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 11204451Seschrock VDEV_AUX_ERR_EXCEEDED); 11214451Seschrock } else { 11224451Seschrock vd->vdev_state = VDEV_STATE_HEALTHY; 11234451Seschrock } 1124789Sahrens 1125*10594SGeorge.Wilson@Sun.COM /* 1126*10594SGeorge.Wilson@Sun.COM * For hole or missing vdevs we just return success. 1127*10594SGeorge.Wilson@Sun.COM */ 1128*10594SGeorge.Wilson@Sun.COM if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1129*10594SGeorge.Wilson@Sun.COM return (0); 1130*10594SGeorge.Wilson@Sun.COM 11319816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 11321544Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 11331544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 11341544Seschrock VDEV_AUX_NONE); 11351544Seschrock break; 11361544Seschrock } 11379816SGeorge.Wilson@Sun.COM } 1138789Sahrens 1139789Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1140789Sahrens 1141789Sahrens if (vd->vdev_children == 0) { 1142789Sahrens if (osize < SPA_MINDEVSIZE) { 11431544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11441544Seschrock VDEV_AUX_TOO_SMALL); 1145789Sahrens return (EOVERFLOW); 1146789Sahrens } 1147789Sahrens psize = osize; 1148789Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1149789Sahrens } else { 11501732Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1151789Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 11521544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11531544Seschrock VDEV_AUX_TOO_SMALL); 1154789Sahrens return (EOVERFLOW); 1155789Sahrens } 1156789Sahrens psize = 0; 1157789Sahrens asize = osize; 1158789Sahrens } 1159789Sahrens 1160789Sahrens vd->vdev_psize = psize; 1161789Sahrens 11629816SGeorge.Wilson@Sun.COM /* 11639816SGeorge.Wilson@Sun.COM * Make sure the allocatable size hasn't shrunk. 11649816SGeorge.Wilson@Sun.COM */ 11659816SGeorge.Wilson@Sun.COM if (asize < vd->vdev_min_asize) { 11669816SGeorge.Wilson@Sun.COM vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11679816SGeorge.Wilson@Sun.COM VDEV_AUX_BAD_LABEL); 11689816SGeorge.Wilson@Sun.COM return (EINVAL); 11699816SGeorge.Wilson@Sun.COM } 11709816SGeorge.Wilson@Sun.COM 1171789Sahrens if (vd->vdev_asize == 0) { 1172789Sahrens /* 1173789Sahrens * This is the first-ever open, so use the computed values. 11741732Sbonwick * For testing purposes, a higher ashift can be requested. 1175789Sahrens */ 1176789Sahrens vd->vdev_asize = asize; 11771732Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1178789Sahrens } else { 1179789Sahrens /* 1180789Sahrens * Make sure the alignment requirement hasn't increased. 1181789Sahrens */ 11821732Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 11831544Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 11841544Seschrock VDEV_AUX_BAD_LABEL); 1185789Sahrens return (EINVAL); 1186789Sahrens } 1187789Sahrens } 1188789Sahrens 11891544Seschrock /* 11909816SGeorge.Wilson@Sun.COM * If all children are healthy and the asize has increased, 11919816SGeorge.Wilson@Sun.COM * then we've experienced dynamic LUN growth. If automatic 11929816SGeorge.Wilson@Sun.COM * expansion is enabled then use the additional space. 11939816SGeorge.Wilson@Sun.COM */ 11949816SGeorge.Wilson@Sun.COM if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 11959816SGeorge.Wilson@Sun.COM (vd->vdev_expanding || spa->spa_autoexpand)) 11969816SGeorge.Wilson@Sun.COM vd->vdev_asize = asize; 11979816SGeorge.Wilson@Sun.COM 11989816SGeorge.Wilson@Sun.COM vdev_set_min_asize(vd); 11999816SGeorge.Wilson@Sun.COM 12009816SGeorge.Wilson@Sun.COM /* 12015329Sgw25295 * Ensure we can issue some IO before declaring the 12025329Sgw25295 * vdev open for business. 12035329Sgw25295 */ 12047754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && 12057754SJeff.Bonwick@Sun.COM (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 12065329Sgw25295 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 12077754SJeff.Bonwick@Sun.COM VDEV_AUX_IO_FAILURE); 12085329Sgw25295 return (error); 12095329Sgw25295 } 12105329Sgw25295 12115329Sgw25295 /* 12127046Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 12138241SJeff.Bonwick@Sun.COM * resilver. But don't do this if we are doing a reopen for a scrub, 12148241SJeff.Bonwick@Sun.COM * since this would just restart the scrub we are already doing. 12157046Sahrens */ 12168241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 12178241SJeff.Bonwick@Sun.COM vdev_resilver_needed(vd, NULL, NULL)) 12188241SJeff.Bonwick@Sun.COM spa_async_request(spa, SPA_ASYNC_RESILVER); 12197046Sahrens 1220789Sahrens return (0); 1221789Sahrens } 1222789Sahrens 1223789Sahrens /* 12241986Seschrock * Called once the vdevs are all opened, this routine validates the label 12251986Seschrock * contents. This needs to be done before vdev_load() so that we don't 12264451Seschrock * inadvertently do repair I/Os to the wrong device. 12271986Seschrock * 12281986Seschrock * This function will only return failure if one of the vdevs indicates that it 12291986Seschrock * has since been destroyed or exported. This is only possible if 12301986Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 12311986Seschrock * will be updated but the function will return 0. 12321986Seschrock */ 12331986Seschrock int 12341986Seschrock vdev_validate(vdev_t *vd) 12351986Seschrock { 12361986Seschrock spa_t *spa = vd->vdev_spa; 12371986Seschrock nvlist_t *label; 12387754SJeff.Bonwick@Sun.COM uint64_t guid, top_guid; 12391986Seschrock uint64_t state; 12401986Seschrock 12419816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 12421986Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 12434070Smc142369 return (EBADF); 12441986Seschrock 12452174Seschrock /* 12462174Seschrock * If the device has already failed, or was marked offline, don't do 12472174Seschrock * any further validation. Otherwise, label I/O will fail and we will 12482174Seschrock * overwrite the previous state. 12492174Seschrock */ 12507754SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 12511986Seschrock 12521986Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 12531986Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 12541986Seschrock VDEV_AUX_BAD_LABEL); 12551986Seschrock return (0); 12561986Seschrock } 12571986Seschrock 12581986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 12591986Seschrock &guid) != 0 || guid != spa_guid(spa)) { 12601986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12611986Seschrock VDEV_AUX_CORRUPT_DATA); 12621986Seschrock nvlist_free(label); 12631986Seschrock return (0); 12641986Seschrock } 12651986Seschrock 12667754SJeff.Bonwick@Sun.COM /* 12677754SJeff.Bonwick@Sun.COM * If this vdev just became a top-level vdev because its 12687754SJeff.Bonwick@Sun.COM * sibling was detached, it will have adopted the parent's 12697754SJeff.Bonwick@Sun.COM * vdev guid -- but the label may or may not be on disk yet. 12707754SJeff.Bonwick@Sun.COM * Fortunately, either version of the label will have the 12717754SJeff.Bonwick@Sun.COM * same top guid, so if we're a top-level vdev, we can 12727754SJeff.Bonwick@Sun.COM * safely compare to that instead. 12737754SJeff.Bonwick@Sun.COM */ 12741986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 12757754SJeff.Bonwick@Sun.COM &guid) != 0 || 12767754SJeff.Bonwick@Sun.COM nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 12777754SJeff.Bonwick@Sun.COM &top_guid) != 0 || 12787754SJeff.Bonwick@Sun.COM (vd->vdev_guid != guid && 12797754SJeff.Bonwick@Sun.COM (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 12801986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12811986Seschrock VDEV_AUX_CORRUPT_DATA); 12821986Seschrock nvlist_free(label); 12831986Seschrock return (0); 12841986Seschrock } 12851986Seschrock 12861986Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 12871986Seschrock &state) != 0) { 12881986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 12891986Seschrock VDEV_AUX_CORRUPT_DATA); 12901986Seschrock nvlist_free(label); 12911986Seschrock return (0); 12921986Seschrock } 12931986Seschrock 12941986Seschrock nvlist_free(label); 12951986Seschrock 129610100SLin.Ling@Sun.COM /* 129710100SLin.Ling@Sun.COM * If spa->spa_load_verbatim is true, no need to check the 129810100SLin.Ling@Sun.COM * state of the pool. 129910100SLin.Ling@Sun.COM */ 130010100SLin.Ling@Sun.COM if (!spa->spa_load_verbatim && 130110100SLin.Ling@Sun.COM spa->spa_load_state == SPA_LOAD_OPEN && 130210100SLin.Ling@Sun.COM state != POOL_STATE_ACTIVE) 13034070Smc142369 return (EBADF); 13046976Seschrock 13056976Seschrock /* 13066976Seschrock * If we were able to open and validate a vdev that was 13076976Seschrock * previously marked permanently unavailable, clear that state 13086976Seschrock * now. 13096976Seschrock */ 13106976Seschrock if (vd->vdev_not_present) 13116976Seschrock vd->vdev_not_present = 0; 13121986Seschrock } 13131986Seschrock 13141986Seschrock return (0); 13151986Seschrock } 13161986Seschrock 13171986Seschrock /* 1318789Sahrens * Close a virtual device. 1319789Sahrens */ 1320789Sahrens void 1321789Sahrens vdev_close(vdev_t *vd) 1322789Sahrens { 13238241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 13248241SJeff.Bonwick@Sun.COM 13258241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 13268241SJeff.Bonwick@Sun.COM 1327789Sahrens vd->vdev_ops->vdev_op_close(vd); 1328789Sahrens 13294451Seschrock vdev_cache_purge(vd); 1330789Sahrens 13311986Seschrock /* 13329816SGeorge.Wilson@Sun.COM * We record the previous state before we close it, so that if we are 13331986Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 13341986Seschrock * it's still faulted. 13351986Seschrock */ 13361986Seschrock vd->vdev_prevstate = vd->vdev_state; 13371986Seschrock 1338789Sahrens if (vd->vdev_offline) 1339789Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1340789Sahrens else 1341789Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 13421544Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1343789Sahrens } 1344789Sahrens 1345789Sahrens void 13461544Seschrock vdev_reopen(vdev_t *vd) 1347789Sahrens { 13481544Seschrock spa_t *spa = vd->vdev_spa; 1349789Sahrens 13507754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 13511544Seschrock 1352789Sahrens vdev_close(vd); 1353789Sahrens (void) vdev_open(vd); 1354789Sahrens 1355789Sahrens /* 13563377Seschrock * Call vdev_validate() here to make sure we have the same device. 13573377Seschrock * Otherwise, a device with an invalid label could be successfully 13583377Seschrock * opened in response to vdev_reopen(). 13593377Seschrock */ 13606643Seschrock if (vd->vdev_aux) { 13616643Seschrock (void) vdev_validate_aux(vd); 13627754SJeff.Bonwick@Sun.COM if (vdev_readable(vd) && vdev_writeable(vd) && 13639425SEric.Schrock@Sun.COM vd->vdev_aux == &spa->spa_l2cache && 13649816SGeorge.Wilson@Sun.COM !l2arc_vdev_present(vd)) 13659816SGeorge.Wilson@Sun.COM l2arc_add_vdev(spa, vd); 13666643Seschrock } else { 13676643Seschrock (void) vdev_validate(vd); 13686643Seschrock } 13693377Seschrock 13703377Seschrock /* 13714451Seschrock * Reassess parent vdev's health. 1372789Sahrens */ 13734451Seschrock vdev_propagate_state(vd); 1374789Sahrens } 1375789Sahrens 1376789Sahrens int 13772082Seschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1378789Sahrens { 1379789Sahrens int error; 1380789Sahrens 1381789Sahrens /* 1382789Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1383789Sahrens * For a create, however, we want to fail the request if 1384789Sahrens * there are any components we can't open. 1385789Sahrens */ 1386789Sahrens error = vdev_open(vd); 1387789Sahrens 1388789Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1389789Sahrens vdev_close(vd); 1390789Sahrens return (error ? error : ENXIO); 1391789Sahrens } 1392789Sahrens 1393789Sahrens /* 1394789Sahrens * Recursively initialize all labels. 1395789Sahrens */ 13963377Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 13973377Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1398789Sahrens vdev_close(vd); 1399789Sahrens return (error); 1400789Sahrens } 1401789Sahrens 1402789Sahrens return (0); 1403789Sahrens } 1404789Sahrens 14051585Sbonwick void 14069816SGeorge.Wilson@Sun.COM vdev_metaslab_set_size(vdev_t *vd) 1407789Sahrens { 1408789Sahrens /* 1409789Sahrens * Aim for roughly 200 metaslabs per vdev. 1410789Sahrens */ 1411789Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1412789Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1413789Sahrens } 1414789Sahrens 1415789Sahrens void 14161732Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1417789Sahrens { 14181732Sbonwick ASSERT(vd == vd->vdev_top); 1419*10594SGeorge.Wilson@Sun.COM ASSERT(!vd->vdev_ishole); 14201732Sbonwick ASSERT(ISP2(flags)); 1421789Sahrens 14221732Sbonwick if (flags & VDD_METASLAB) 14231732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 14241732Sbonwick 14251732Sbonwick if (flags & VDD_DTL) 14261732Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 14271732Sbonwick 14281732Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1429789Sahrens } 1430789Sahrens 14318241SJeff.Bonwick@Sun.COM /* 14328241SJeff.Bonwick@Sun.COM * DTLs. 14338241SJeff.Bonwick@Sun.COM * 14348241SJeff.Bonwick@Sun.COM * A vdev's DTL (dirty time log) is the set of transaction groups for which 14358241SJeff.Bonwick@Sun.COM * the vdev has less than perfect replication. There are three kinds of DTL: 14368241SJeff.Bonwick@Sun.COM * 14378241SJeff.Bonwick@Sun.COM * DTL_MISSING: txgs for which the vdev has no valid copies of the data 14388241SJeff.Bonwick@Sun.COM * 14398241SJeff.Bonwick@Sun.COM * DTL_PARTIAL: txgs for which data is available, but not fully replicated 14408241SJeff.Bonwick@Sun.COM * 14418241SJeff.Bonwick@Sun.COM * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 14428241SJeff.Bonwick@Sun.COM * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 14438241SJeff.Bonwick@Sun.COM * txgs that was scrubbed. 14448241SJeff.Bonwick@Sun.COM * 14458241SJeff.Bonwick@Sun.COM * DTL_OUTAGE: txgs which cannot currently be read, whether due to 14468241SJeff.Bonwick@Sun.COM * persistent errors or just some device being offline. 14478241SJeff.Bonwick@Sun.COM * Unlike the other three, the DTL_OUTAGE map is not generally 14488241SJeff.Bonwick@Sun.COM * maintained; it's only computed when needed, typically to 14498241SJeff.Bonwick@Sun.COM * determine whether a device can be detached. 14508241SJeff.Bonwick@Sun.COM * 14518241SJeff.Bonwick@Sun.COM * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 14528241SJeff.Bonwick@Sun.COM * either has the data or it doesn't. 14538241SJeff.Bonwick@Sun.COM * 14548241SJeff.Bonwick@Sun.COM * For interior vdevs such as mirror and RAID-Z the picture is more complex. 14558241SJeff.Bonwick@Sun.COM * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 14568241SJeff.Bonwick@Sun.COM * if any child is less than fully replicated, then so is its parent. 14578241SJeff.Bonwick@Sun.COM * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 14588241SJeff.Bonwick@Sun.COM * comprising only those txgs which appear in 'maxfaults' or more children; 14598241SJeff.Bonwick@Sun.COM * those are the txgs we don't have enough replication to read. For example, 14608241SJeff.Bonwick@Sun.COM * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 14618241SJeff.Bonwick@Sun.COM * thus, its DTL_MISSING consists of the set of txgs that appear in more than 14628241SJeff.Bonwick@Sun.COM * two child DTL_MISSING maps. 14638241SJeff.Bonwick@Sun.COM * 14648241SJeff.Bonwick@Sun.COM * It should be clear from the above that to compute the DTLs and outage maps 14658241SJeff.Bonwick@Sun.COM * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 14668241SJeff.Bonwick@Sun.COM * Therefore, that is all we keep on disk. When loading the pool, or after 14678241SJeff.Bonwick@Sun.COM * a configuration change, we generate all other DTLs from first principles. 14688241SJeff.Bonwick@Sun.COM */ 1469789Sahrens void 14708241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1471789Sahrens { 14728241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14738241SJeff.Bonwick@Sun.COM 14748241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14758241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 14768241SJeff.Bonwick@Sun.COM 1477789Sahrens mutex_enter(sm->sm_lock); 1478789Sahrens if (!space_map_contains(sm, txg, size)) 1479789Sahrens space_map_add(sm, txg, size); 1480789Sahrens mutex_exit(sm->sm_lock); 1481789Sahrens } 1482789Sahrens 14838241SJeff.Bonwick@Sun.COM boolean_t 14848241SJeff.Bonwick@Sun.COM vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1485789Sahrens { 14868241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 14878241SJeff.Bonwick@Sun.COM boolean_t dirty = B_FALSE; 14888241SJeff.Bonwick@Sun.COM 14898241SJeff.Bonwick@Sun.COM ASSERT(t < DTL_TYPES); 14908241SJeff.Bonwick@Sun.COM ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1491789Sahrens 1492789Sahrens mutex_enter(sm->sm_lock); 14938241SJeff.Bonwick@Sun.COM if (sm->sm_space != 0) 14948241SJeff.Bonwick@Sun.COM dirty = space_map_contains(sm, txg, size); 1495789Sahrens mutex_exit(sm->sm_lock); 1496789Sahrens 1497789Sahrens return (dirty); 1498789Sahrens } 1499789Sahrens 15008241SJeff.Bonwick@Sun.COM boolean_t 15018241SJeff.Bonwick@Sun.COM vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 15028241SJeff.Bonwick@Sun.COM { 15038241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[t]; 15048241SJeff.Bonwick@Sun.COM boolean_t empty; 15058241SJeff.Bonwick@Sun.COM 15068241SJeff.Bonwick@Sun.COM mutex_enter(sm->sm_lock); 15078241SJeff.Bonwick@Sun.COM empty = (sm->sm_space == 0); 15088241SJeff.Bonwick@Sun.COM mutex_exit(sm->sm_lock); 15098241SJeff.Bonwick@Sun.COM 15108241SJeff.Bonwick@Sun.COM return (empty); 15118241SJeff.Bonwick@Sun.COM } 15128241SJeff.Bonwick@Sun.COM 1513789Sahrens /* 1514789Sahrens * Reassess DTLs after a config change or scrub completion. 1515789Sahrens */ 1516789Sahrens void 1517789Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1518789Sahrens { 15191544Seschrock spa_t *spa = vd->vdev_spa; 15208241SJeff.Bonwick@Sun.COM avl_tree_t reftree; 15218241SJeff.Bonwick@Sun.COM int minref; 15228241SJeff.Bonwick@Sun.COM 15238241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 15248241SJeff.Bonwick@Sun.COM 15258241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 15268241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(vd->vdev_child[c], txg, 15278241SJeff.Bonwick@Sun.COM scrub_txg, scrub_done); 15288241SJeff.Bonwick@Sun.COM 1529*10594SGeorge.Wilson@Sun.COM if (vd == spa->spa_root_vdev || vd->vdev_ishole) 15308241SJeff.Bonwick@Sun.COM return; 15318241SJeff.Bonwick@Sun.COM 15328241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 1533789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15347046Sahrens if (scrub_txg != 0 && 15357046Sahrens (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 15367046Sahrens /* XXX should check scrub_done? */ 15377046Sahrens /* 15387046Sahrens * We completed a scrub up to scrub_txg. If we 15397046Sahrens * did it without rebooting, then the scrub dtl 15407046Sahrens * will be valid, so excise the old region and 15417046Sahrens * fold in the scrub dtl. Otherwise, leave the 15427046Sahrens * dtl as-is if there was an error. 15438241SJeff.Bonwick@Sun.COM * 15448241SJeff.Bonwick@Sun.COM * There's little trick here: to excise the beginning 15458241SJeff.Bonwick@Sun.COM * of the DTL_MISSING map, we put it into a reference 15468241SJeff.Bonwick@Sun.COM * tree and then add a segment with refcnt -1 that 15478241SJeff.Bonwick@Sun.COM * covers the range [0, scrub_txg). This means 15488241SJeff.Bonwick@Sun.COM * that each txg in that range has refcnt -1 or 0. 15498241SJeff.Bonwick@Sun.COM * We then add DTL_SCRUB with a refcnt of 2, so that 15508241SJeff.Bonwick@Sun.COM * entries in the range [0, scrub_txg) will have a 15518241SJeff.Bonwick@Sun.COM * positive refcnt -- either 1 or 2. We then convert 15528241SJeff.Bonwick@Sun.COM * the reference tree into the new DTL_MISSING map. 15537046Sahrens */ 15548241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 15558241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 15568241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 15578241SJeff.Bonwick@Sun.COM space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 15588241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, 15598241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_SCRUB], 2); 15608241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, 15618241SJeff.Bonwick@Sun.COM &vd->vdev_dtl[DTL_MISSING], 1); 15628241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 1563789Sahrens } 15648241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 15658241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15668241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1567789Sahrens if (scrub_done) 15688241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 15698241SJeff.Bonwick@Sun.COM space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 15708241SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 15718241SJeff.Bonwick@Sun.COM space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 15728241SJeff.Bonwick@Sun.COM else 15738241SJeff.Bonwick@Sun.COM space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15748241SJeff.Bonwick@Sun.COM space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1575789Sahrens mutex_exit(&vd->vdev_dtl_lock); 15767046Sahrens 15771732Sbonwick if (txg != 0) 15781732Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1579789Sahrens return; 1580789Sahrens } 1581789Sahrens 1582789Sahrens mutex_enter(&vd->vdev_dtl_lock); 15838241SJeff.Bonwick@Sun.COM for (int t = 0; t < DTL_TYPES; t++) { 15848241SJeff.Bonwick@Sun.COM if (t == DTL_SCRUB) 15858241SJeff.Bonwick@Sun.COM continue; /* leaf vdevs only */ 15868241SJeff.Bonwick@Sun.COM if (t == DTL_PARTIAL) 15878241SJeff.Bonwick@Sun.COM minref = 1; /* i.e. non-zero */ 15888241SJeff.Bonwick@Sun.COM else if (vd->vdev_nparity != 0) 15898241SJeff.Bonwick@Sun.COM minref = vd->vdev_nparity + 1; /* RAID-Z */ 15908241SJeff.Bonwick@Sun.COM else 15918241SJeff.Bonwick@Sun.COM minref = vd->vdev_children; /* any kind of mirror */ 15928241SJeff.Bonwick@Sun.COM space_map_ref_create(&reftree); 15938241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 15948241SJeff.Bonwick@Sun.COM vdev_t *cvd = vd->vdev_child[c]; 15958241SJeff.Bonwick@Sun.COM mutex_enter(&cvd->vdev_dtl_lock); 15968241SJeff.Bonwick@Sun.COM space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 15978241SJeff.Bonwick@Sun.COM mutex_exit(&cvd->vdev_dtl_lock); 15988241SJeff.Bonwick@Sun.COM } 15998241SJeff.Bonwick@Sun.COM space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 16008241SJeff.Bonwick@Sun.COM space_map_ref_destroy(&reftree); 16018241SJeff.Bonwick@Sun.COM } 1602789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1603789Sahrens } 1604789Sahrens 1605789Sahrens static int 1606789Sahrens vdev_dtl_load(vdev_t *vd) 1607789Sahrens { 1608789Sahrens spa_t *spa = vd->vdev_spa; 16098241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 16101732Sbonwick objset_t *mos = spa->spa_meta_objset; 1611789Sahrens dmu_buf_t *db; 1612789Sahrens int error; 1613789Sahrens 1614789Sahrens ASSERT(vd->vdev_children == 0); 1615789Sahrens 1616789Sahrens if (smo->smo_object == 0) 1617789Sahrens return (0); 1618789Sahrens 1619*10594SGeorge.Wilson@Sun.COM ASSERT(!vd->vdev_ishole); 1620*10594SGeorge.Wilson@Sun.COM 16211732Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 16221544Seschrock return (error); 16231732Sbonwick 16244944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16254944Smaybee bcopy(db->db_data, smo, sizeof (*smo)); 16261544Seschrock dmu_buf_rele(db, FTAG); 1627789Sahrens 1628789Sahrens mutex_enter(&vd->vdev_dtl_lock); 16298241SJeff.Bonwick@Sun.COM error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 16308241SJeff.Bonwick@Sun.COM NULL, SM_ALLOC, smo, mos); 1631789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1632789Sahrens 1633789Sahrens return (error); 1634789Sahrens } 1635789Sahrens 1636789Sahrens void 1637789Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1638789Sahrens { 1639789Sahrens spa_t *spa = vd->vdev_spa; 16408241SJeff.Bonwick@Sun.COM space_map_obj_t *smo = &vd->vdev_dtl_smo; 16418241SJeff.Bonwick@Sun.COM space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 16421732Sbonwick objset_t *mos = spa->spa_meta_objset; 1643789Sahrens space_map_t smsync; 1644789Sahrens kmutex_t smlock; 1645789Sahrens dmu_buf_t *db; 1646789Sahrens dmu_tx_t *tx; 1647789Sahrens 1648*10594SGeorge.Wilson@Sun.COM ASSERT(!vd->vdev_ishole); 1649*10594SGeorge.Wilson@Sun.COM 1650789Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1651789Sahrens 1652789Sahrens if (vd->vdev_detached) { 1653789Sahrens if (smo->smo_object != 0) { 16541732Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1655789Sahrens ASSERT3U(err, ==, 0); 1656789Sahrens smo->smo_object = 0; 1657789Sahrens } 1658789Sahrens dmu_tx_commit(tx); 1659789Sahrens return; 1660789Sahrens } 1661789Sahrens 1662789Sahrens if (smo->smo_object == 0) { 1663789Sahrens ASSERT(smo->smo_objsize == 0); 1664789Sahrens ASSERT(smo->smo_alloc == 0); 16651732Sbonwick smo->smo_object = dmu_object_alloc(mos, 1666789Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1667789Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1668789Sahrens ASSERT(smo->smo_object != 0); 1669789Sahrens vdev_config_dirty(vd->vdev_top); 1670789Sahrens } 1671789Sahrens 1672789Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1673789Sahrens 1674789Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1675789Sahrens &smlock); 1676789Sahrens 1677789Sahrens mutex_enter(&smlock); 1678789Sahrens 1679789Sahrens mutex_enter(&vd->vdev_dtl_lock); 16801732Sbonwick space_map_walk(sm, space_map_add, &smsync); 1681789Sahrens mutex_exit(&vd->vdev_dtl_lock); 1682789Sahrens 16831732Sbonwick space_map_truncate(smo, mos, tx); 16841732Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1685789Sahrens 1686789Sahrens space_map_destroy(&smsync); 1687789Sahrens 1688789Sahrens mutex_exit(&smlock); 1689789Sahrens mutex_destroy(&smlock); 1690789Sahrens 16911732Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1692789Sahrens dmu_buf_will_dirty(db, tx); 16934944Smaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16944944Smaybee bcopy(smo, db->db_data, sizeof (*smo)); 16951544Seschrock dmu_buf_rele(db, FTAG); 1696789Sahrens 1697789Sahrens dmu_tx_commit(tx); 1698789Sahrens } 1699789Sahrens 17007046Sahrens /* 17018241SJeff.Bonwick@Sun.COM * Determine whether the specified vdev can be offlined/detached/removed 17028241SJeff.Bonwick@Sun.COM * without losing data. 17038241SJeff.Bonwick@Sun.COM */ 17048241SJeff.Bonwick@Sun.COM boolean_t 17058241SJeff.Bonwick@Sun.COM vdev_dtl_required(vdev_t *vd) 17068241SJeff.Bonwick@Sun.COM { 17078241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 17088241SJeff.Bonwick@Sun.COM vdev_t *tvd = vd->vdev_top; 17098241SJeff.Bonwick@Sun.COM uint8_t cant_read = vd->vdev_cant_read; 17108241SJeff.Bonwick@Sun.COM boolean_t required; 17118241SJeff.Bonwick@Sun.COM 17128241SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 17138241SJeff.Bonwick@Sun.COM 17148241SJeff.Bonwick@Sun.COM if (vd == spa->spa_root_vdev || vd == tvd) 17158241SJeff.Bonwick@Sun.COM return (B_TRUE); 17168241SJeff.Bonwick@Sun.COM 17178241SJeff.Bonwick@Sun.COM /* 17188241SJeff.Bonwick@Sun.COM * Temporarily mark the device as unreadable, and then determine 17198241SJeff.Bonwick@Sun.COM * whether this results in any DTL outages in the top-level vdev. 17208241SJeff.Bonwick@Sun.COM * If not, we can safely offline/detach/remove the device. 17218241SJeff.Bonwick@Sun.COM */ 17228241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_TRUE; 17238241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 17248241SJeff.Bonwick@Sun.COM required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 17258241SJeff.Bonwick@Sun.COM vd->vdev_cant_read = cant_read; 17268241SJeff.Bonwick@Sun.COM vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 17278241SJeff.Bonwick@Sun.COM 17288241SJeff.Bonwick@Sun.COM return (required); 17298241SJeff.Bonwick@Sun.COM } 17308241SJeff.Bonwick@Sun.COM 17318241SJeff.Bonwick@Sun.COM /* 17327046Sahrens * Determine if resilver is needed, and if so the txg range. 17337046Sahrens */ 17347046Sahrens boolean_t 17357046Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 17367046Sahrens { 17377046Sahrens boolean_t needed = B_FALSE; 17387046Sahrens uint64_t thismin = UINT64_MAX; 17397046Sahrens uint64_t thismax = 0; 17407046Sahrens 17417046Sahrens if (vd->vdev_children == 0) { 17427046Sahrens mutex_enter(&vd->vdev_dtl_lock); 17438241SJeff.Bonwick@Sun.COM if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 17448241SJeff.Bonwick@Sun.COM vdev_writeable(vd)) { 17457046Sahrens space_seg_t *ss; 17467046Sahrens 17478241SJeff.Bonwick@Sun.COM ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 17487046Sahrens thismin = ss->ss_start - 1; 17498241SJeff.Bonwick@Sun.COM ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 17507046Sahrens thismax = ss->ss_end; 17517046Sahrens needed = B_TRUE; 17527046Sahrens } 17537046Sahrens mutex_exit(&vd->vdev_dtl_lock); 17547046Sahrens } else { 17558241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 17567046Sahrens vdev_t *cvd = vd->vdev_child[c]; 17577046Sahrens uint64_t cmin, cmax; 17587046Sahrens 17597046Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 17607046Sahrens thismin = MIN(thismin, cmin); 17617046Sahrens thismax = MAX(thismax, cmax); 17627046Sahrens needed = B_TRUE; 17637046Sahrens } 17647046Sahrens } 17657046Sahrens } 17667046Sahrens 17677046Sahrens if (needed && minp) { 17687046Sahrens *minp = thismin; 17697046Sahrens *maxp = thismax; 17707046Sahrens } 17717046Sahrens return (needed); 17727046Sahrens } 17737046Sahrens 17741986Seschrock void 17751544Seschrock vdev_load(vdev_t *vd) 1776789Sahrens { 1777789Sahrens /* 1778789Sahrens * Recursively load all children. 1779789Sahrens */ 17808241SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 17811986Seschrock vdev_load(vd->vdev_child[c]); 1782789Sahrens 1783789Sahrens /* 17841585Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1785789Sahrens */ 1786*10594SGeorge.Wilson@Sun.COM if (vd == vd->vdev_top && !vd->vdev_ishole && 17871986Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 17881986Seschrock vdev_metaslab_init(vd, 0) != 0)) 17891986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17901986Seschrock VDEV_AUX_CORRUPT_DATA); 1791789Sahrens 1792789Sahrens /* 1793789Sahrens * If this is a leaf vdev, load its DTL. 1794789Sahrens */ 17951986Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 17961986Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 17971986Seschrock VDEV_AUX_CORRUPT_DATA); 1798789Sahrens } 1799789Sahrens 18002082Seschrock /* 18015450Sbrendan * The special vdev case is used for hot spares and l2cache devices. Its 18025450Sbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 18035450Sbrendan * we make sure that we can open the underlying device, then try to read the 18045450Sbrendan * label, and make sure that the label is sane and that it hasn't been 18055450Sbrendan * repurposed to another pool. 18062082Seschrock */ 18072082Seschrock int 18085450Sbrendan vdev_validate_aux(vdev_t *vd) 18092082Seschrock { 18102082Seschrock nvlist_t *label; 18112082Seschrock uint64_t guid, version; 18122082Seschrock uint64_t state; 18132082Seschrock 18147754SJeff.Bonwick@Sun.COM if (!vdev_readable(vd)) 18156643Seschrock return (0); 18166643Seschrock 18172082Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 18182082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 18192082Seschrock VDEV_AUX_CORRUPT_DATA); 18202082Seschrock return (-1); 18212082Seschrock } 18222082Seschrock 18232082Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 18244577Sahrens version > SPA_VERSION || 18252082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 18262082Seschrock guid != vd->vdev_guid || 18272082Seschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 18282082Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 18292082Seschrock VDEV_AUX_CORRUPT_DATA); 18302082Seschrock nvlist_free(label); 18312082Seschrock return (-1); 18322082Seschrock } 18332082Seschrock 18342082Seschrock /* 18352082Seschrock * We don't actually check the pool state here. If it's in fact in 18362082Seschrock * use by another pool, we update this fact on the fly when requested. 18372082Seschrock */ 18382082Seschrock nvlist_free(label); 18392082Seschrock return (0); 18402082Seschrock } 18412082Seschrock 1842789Sahrens void 1843*10594SGeorge.Wilson@Sun.COM vdev_remove(vdev_t *vd, uint64_t txg) 1844*10594SGeorge.Wilson@Sun.COM { 1845*10594SGeorge.Wilson@Sun.COM spa_t *spa = vd->vdev_spa; 1846*10594SGeorge.Wilson@Sun.COM objset_t *mos = spa->spa_meta_objset; 1847*10594SGeorge.Wilson@Sun.COM dmu_tx_t *tx; 1848*10594SGeorge.Wilson@Sun.COM 1849*10594SGeorge.Wilson@Sun.COM tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 1850*10594SGeorge.Wilson@Sun.COM 1851*10594SGeorge.Wilson@Sun.COM if (vd->vdev_dtl_smo.smo_object) { 1852*10594SGeorge.Wilson@Sun.COM ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); 1853*10594SGeorge.Wilson@Sun.COM (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); 1854*10594SGeorge.Wilson@Sun.COM vd->vdev_dtl_smo.smo_object = 0; 1855*10594SGeorge.Wilson@Sun.COM } 1856*10594SGeorge.Wilson@Sun.COM 1857*10594SGeorge.Wilson@Sun.COM if (vd->vdev_ms != NULL) { 1858*10594SGeorge.Wilson@Sun.COM for (int m = 0; m < vd->vdev_ms_count; m++) { 1859*10594SGeorge.Wilson@Sun.COM metaslab_t *msp = vd->vdev_ms[m]; 1860*10594SGeorge.Wilson@Sun.COM 1861*10594SGeorge.Wilson@Sun.COM if (msp == NULL || msp->ms_smo.smo_object == 0) 1862*10594SGeorge.Wilson@Sun.COM continue; 1863*10594SGeorge.Wilson@Sun.COM 1864*10594SGeorge.Wilson@Sun.COM ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); 1865*10594SGeorge.Wilson@Sun.COM (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); 1866*10594SGeorge.Wilson@Sun.COM msp->ms_smo.smo_object = 0; 1867*10594SGeorge.Wilson@Sun.COM } 1868*10594SGeorge.Wilson@Sun.COM } 1869*10594SGeorge.Wilson@Sun.COM 1870*10594SGeorge.Wilson@Sun.COM if (vd->vdev_ms_array) { 1871*10594SGeorge.Wilson@Sun.COM (void) dmu_object_free(mos, vd->vdev_ms_array, tx); 1872*10594SGeorge.Wilson@Sun.COM vd->vdev_ms_array = 0; 1873*10594SGeorge.Wilson@Sun.COM vd->vdev_ms_shift = 0; 1874*10594SGeorge.Wilson@Sun.COM } 1875*10594SGeorge.Wilson@Sun.COM dmu_tx_commit(tx); 1876*10594SGeorge.Wilson@Sun.COM } 1877*10594SGeorge.Wilson@Sun.COM 1878*10594SGeorge.Wilson@Sun.COM void 1879789Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 1880789Sahrens { 1881789Sahrens metaslab_t *msp; 1882789Sahrens 1883*10594SGeorge.Wilson@Sun.COM ASSERT(!vd->vdev_ishole); 1884*10594SGeorge.Wilson@Sun.COM 1885789Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1886789Sahrens metaslab_sync_done(msp, txg); 1887789Sahrens } 1888789Sahrens 1889789Sahrens void 1890789Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 1891789Sahrens { 1892789Sahrens spa_t *spa = vd->vdev_spa; 1893789Sahrens vdev_t *lvd; 1894789Sahrens metaslab_t *msp; 18951732Sbonwick dmu_tx_t *tx; 1896789Sahrens 1897*10594SGeorge.Wilson@Sun.COM ASSERT(!vd->vdev_ishole); 1898*10594SGeorge.Wilson@Sun.COM 18991732Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 19001732Sbonwick ASSERT(vd == vd->vdev_top); 19011732Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 19021732Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 19031732Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 19041732Sbonwick ASSERT(vd->vdev_ms_array != 0); 19051732Sbonwick vdev_config_dirty(vd); 19061732Sbonwick dmu_tx_commit(tx); 19071732Sbonwick } 1908789Sahrens 1909*10594SGeorge.Wilson@Sun.COM if (vd->vdev_removing) 1910*10594SGeorge.Wilson@Sun.COM vdev_remove(vd, txg); 1911*10594SGeorge.Wilson@Sun.COM 19121732Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1913789Sahrens metaslab_sync(msp, txg); 19141732Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 19151732Sbonwick } 1916789Sahrens 1917789Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1918789Sahrens vdev_dtl_sync(lvd, txg); 1919789Sahrens 1920789Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1921789Sahrens } 1922789Sahrens 1923789Sahrens uint64_t 1924789Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1925789Sahrens { 1926789Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1927789Sahrens } 1928789Sahrens 19294451Seschrock /* 19304451Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 19314451Seschrock * not be opened, and no I/O is attempted. 19324451Seschrock */ 1933789Sahrens int 19344451Seschrock vdev_fault(spa_t *spa, uint64_t guid) 19354451Seschrock { 19366643Seschrock vdev_t *vd; 19374451Seschrock 19387754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 19394451Seschrock 19406643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19417754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 19427754SJeff.Bonwick@Sun.COM 19434451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 19447754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19454451Seschrock 19464451Seschrock /* 19474451Seschrock * Faulted state takes precedence over degraded. 19484451Seschrock */ 19494451Seschrock vd->vdev_faulted = 1ULL; 19504451Seschrock vd->vdev_degraded = 0ULL; 19517754SJeff.Bonwick@Sun.COM vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 19524451Seschrock 19534451Seschrock /* 19548123SDavid.Marker@sun.com * If marking the vdev as faulted cause the top-level vdev to become 19554451Seschrock * unavailable, then back off and simply mark the vdev as degraded 19564451Seschrock * instead. 19574451Seschrock */ 19586643Seschrock if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 19594451Seschrock vd->vdev_degraded = 1ULL; 19604451Seschrock vd->vdev_faulted = 0ULL; 19614451Seschrock 19624451Seschrock /* 19634451Seschrock * If we reopen the device and it's not dead, only then do we 19644451Seschrock * mark it degraded. 19654451Seschrock */ 19664451Seschrock vdev_reopen(vd); 19674451Seschrock 19685329Sgw25295 if (vdev_readable(vd)) { 19694451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 19704451Seschrock VDEV_AUX_ERR_EXCEEDED); 19714451Seschrock } 19724451Seschrock } 19734451Seschrock 19747754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 19754451Seschrock } 19764451Seschrock 19774451Seschrock /* 19784451Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 19794451Seschrock * user that something is wrong. The vdev continues to operate as normal as far 19804451Seschrock * as I/O is concerned. 19814451Seschrock */ 19824451Seschrock int 19834451Seschrock vdev_degrade(spa_t *spa, uint64_t guid) 19844451Seschrock { 19856643Seschrock vdev_t *vd; 19864451Seschrock 19877754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 19884451Seschrock 19896643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 19907754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 19917754SJeff.Bonwick@Sun.COM 19924451Seschrock if (!vd->vdev_ops->vdev_op_leaf) 19937754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19944451Seschrock 19954451Seschrock /* 19964451Seschrock * If the vdev is already faulted, then don't do anything. 19974451Seschrock */ 19987754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded) 19997754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, 0)); 20004451Seschrock 20014451Seschrock vd->vdev_degraded = 1ULL; 20024451Seschrock if (!vdev_is_dead(vd)) 20034451Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 20044451Seschrock VDEV_AUX_ERR_EXCEEDED); 20054451Seschrock 20067754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 20074451Seschrock } 20084451Seschrock 20094451Seschrock /* 20104451Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 20114451Seschrock * any attached spare device should be detached when the device finishes 20124451Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 20134451Seschrock * so no FMA events are generated if the device fails to open. 20144451Seschrock */ 20154451Seschrock int 20167754SJeff.Bonwick@Sun.COM vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 2017789Sahrens { 20189816SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 2019789Sahrens 20207754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 20211485Slling 20226643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 20237754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2024789Sahrens 20251585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 20267754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 20271585Sbonwick 20289816SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 2029789Sahrens vd->vdev_offline = B_FALSE; 20301485Slling vd->vdev_tmpoffline = B_FALSE; 20317754SJeff.Bonwick@Sun.COM vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 20327754SJeff.Bonwick@Sun.COM vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 20339816SGeorge.Wilson@Sun.COM 20349816SGeorge.Wilson@Sun.COM /* XXX - L2ARC 1.0 does not support expansion */ 20359816SGeorge.Wilson@Sun.COM if (!vd->vdev_aux) { 20369816SGeorge.Wilson@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 20379816SGeorge.Wilson@Sun.COM pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 20389816SGeorge.Wilson@Sun.COM } 20399816SGeorge.Wilson@Sun.COM 20409816SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 20414451Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 20424451Seschrock 20439816SGeorge.Wilson@Sun.COM if (!vd->vdev_aux) { 20449816SGeorge.Wilson@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 20459816SGeorge.Wilson@Sun.COM pvd->vdev_expanding = B_FALSE; 20469816SGeorge.Wilson@Sun.COM } 20479816SGeorge.Wilson@Sun.COM 20484451Seschrock if (newstate) 20494451Seschrock *newstate = vd->vdev_state; 20504451Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 20514451Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 20524451Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 20534451Seschrock vd->vdev_parent->vdev_child[0] == vd) 20544451Seschrock vd->vdev_unspare = B_TRUE; 2055789Sahrens 20569816SGeorge.Wilson@Sun.COM if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 20579816SGeorge.Wilson@Sun.COM 20589816SGeorge.Wilson@Sun.COM /* XXX - L2ARC 1.0 does not support expansion */ 20599816SGeorge.Wilson@Sun.COM if (vd->vdev_aux) 20609816SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 20619816SGeorge.Wilson@Sun.COM spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 20629816SGeorge.Wilson@Sun.COM } 20638241SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 2064789Sahrens } 2065789Sahrens 2066789Sahrens int 20674451Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 2068789Sahrens { 20699701SGeorge.Wilson@Sun.COM vdev_t *vd, *tvd; 20709701SGeorge.Wilson@Sun.COM int error; 2071789Sahrens 20727754SJeff.Bonwick@Sun.COM spa_vdev_state_enter(spa); 2073789Sahrens 20746643Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 20757754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2076789Sahrens 20771585Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 20787754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 20791585Sbonwick 20809701SGeorge.Wilson@Sun.COM tvd = vd->vdev_top; 20819701SGeorge.Wilson@Sun.COM 2082789Sahrens /* 20831732Sbonwick * If the device isn't already offline, try to offline it. 2084789Sahrens */ 20851732Sbonwick if (!vd->vdev_offline) { 20861732Sbonwick /* 20878241SJeff.Bonwick@Sun.COM * If this device has the only valid copy of some data, 20889701SGeorge.Wilson@Sun.COM * don't allow it to be offlined. Log devices are always 20899701SGeorge.Wilson@Sun.COM * expendable. 20901732Sbonwick */ 20919701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 20929701SGeorge.Wilson@Sun.COM vdev_dtl_required(vd)) 20937754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2094789Sahrens 20951732Sbonwick /* 20961732Sbonwick * Offline this device and reopen its top-level vdev. 20979701SGeorge.Wilson@Sun.COM * If the top-level vdev is a log device then just offline 20989701SGeorge.Wilson@Sun.COM * it. Otherwise, if this action results in the top-level 20999701SGeorge.Wilson@Sun.COM * vdev becoming unusable, undo it and fail the request. 21001732Sbonwick */ 21011732Sbonwick vd->vdev_offline = B_TRUE; 21029701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 21039701SGeorge.Wilson@Sun.COM 21049701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog && vd->vdev_aux == NULL && 21059701SGeorge.Wilson@Sun.COM vdev_is_dead(tvd)) { 21061732Sbonwick vd->vdev_offline = B_FALSE; 21079701SGeorge.Wilson@Sun.COM vdev_reopen(tvd); 21087754SJeff.Bonwick@Sun.COM return (spa_vdev_state_exit(spa, NULL, EBUSY)); 21091732Sbonwick } 2110789Sahrens } 2111789Sahrens 21127754SJeff.Bonwick@Sun.COM vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 21131732Sbonwick 21149701SGeorge.Wilson@Sun.COM if (!tvd->vdev_islog || !vdev_is_dead(tvd)) 21159701SGeorge.Wilson@Sun.COM return (spa_vdev_state_exit(spa, vd, 0)); 21169701SGeorge.Wilson@Sun.COM 21179701SGeorge.Wilson@Sun.COM (void) spa_vdev_state_exit(spa, vd, 0); 21189701SGeorge.Wilson@Sun.COM 21199701SGeorge.Wilson@Sun.COM error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 21209701SGeorge.Wilson@Sun.COM NULL, DS_FIND_CHILDREN); 21219701SGeorge.Wilson@Sun.COM if (error) { 21229701SGeorge.Wilson@Sun.COM (void) vdev_online(spa, guid, 0, NULL); 21239701SGeorge.Wilson@Sun.COM return (error); 21249701SGeorge.Wilson@Sun.COM } 21259701SGeorge.Wilson@Sun.COM /* 21269701SGeorge.Wilson@Sun.COM * If we successfully offlined the log device then we need to 21279701SGeorge.Wilson@Sun.COM * sync out the current txg so that the "stubby" block can be 21289701SGeorge.Wilson@Sun.COM * removed by zil_sync(). 21299701SGeorge.Wilson@Sun.COM */ 21309701SGeorge.Wilson@Sun.COM txg_wait_synced(spa->spa_dsl_pool, 0); 21319701SGeorge.Wilson@Sun.COM return (0); 2132789Sahrens } 2133789Sahrens 21341544Seschrock /* 21351544Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 21361544Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 21371544Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 21381544Seschrock */ 21391544Seschrock void 21407754SJeff.Bonwick@Sun.COM vdev_clear(spa_t *spa, vdev_t *vd) 2141789Sahrens { 21427754SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 21437754SJeff.Bonwick@Sun.COM 21447754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2145789Sahrens 21461544Seschrock if (vd == NULL) 21477754SJeff.Bonwick@Sun.COM vd = rvd; 2148789Sahrens 21491544Seschrock vd->vdev_stat.vs_read_errors = 0; 21501544Seschrock vd->vdev_stat.vs_write_errors = 0; 21511544Seschrock vd->vdev_stat.vs_checksum_errors = 0; 2152789Sahrens 21537754SJeff.Bonwick@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 21547754SJeff.Bonwick@Sun.COM vdev_clear(spa, vd->vdev_child[c]); 21554451Seschrock 21564451Seschrock /* 21576959Sek110237 * If we're in the FAULTED state or have experienced failed I/O, then 21586959Sek110237 * clear the persistent state and attempt to reopen the device. We 21596959Sek110237 * also mark the vdev config dirty, so that the new faulted state is 21606959Sek110237 * written out to disk. 21614451Seschrock */ 21627754SJeff.Bonwick@Sun.COM if (vd->vdev_faulted || vd->vdev_degraded || 21637754SJeff.Bonwick@Sun.COM !vdev_readable(vd) || !vdev_writeable(vd)) { 21646959Sek110237 21654451Seschrock vd->vdev_faulted = vd->vdev_degraded = 0; 21667754SJeff.Bonwick@Sun.COM vd->vdev_cant_read = B_FALSE; 21677754SJeff.Bonwick@Sun.COM vd->vdev_cant_write = B_FALSE; 21687754SJeff.Bonwick@Sun.COM 21694451Seschrock vdev_reopen(vd); 21704451Seschrock 21717754SJeff.Bonwick@Sun.COM if (vd != rvd) 21727754SJeff.Bonwick@Sun.COM vdev_state_dirty(vd->vdev_top); 21737754SJeff.Bonwick@Sun.COM 21747754SJeff.Bonwick@Sun.COM if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 21754808Sek110237 spa_async_request(spa, SPA_ASYNC_RESILVER); 21764451Seschrock 21774451Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 21784451Seschrock } 2179789Sahrens } 2180789Sahrens 21817754SJeff.Bonwick@Sun.COM boolean_t 21827754SJeff.Bonwick@Sun.COM vdev_is_dead(vdev_t *vd) 21835329Sgw25295 { 2184*10594SGeorge.Wilson@Sun.COM /* 2185*10594SGeorge.Wilson@Sun.COM * Holes and missing devices are always considered "dead". 2186*10594SGeorge.Wilson@Sun.COM * This simplifies the code since we don't have to check for 2187*10594SGeorge.Wilson@Sun.COM * these types of devices in the various code paths. 2188*10594SGeorge.Wilson@Sun.COM * Instead we rely on the fact that we skip over dead devices 2189*10594SGeorge.Wilson@Sun.COM * before issuing I/O to them. 2190*10594SGeorge.Wilson@Sun.COM */ 2191*10594SGeorge.Wilson@Sun.COM return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole || 2192*10594SGeorge.Wilson@Sun.COM vd->vdev_ops == &vdev_missing_ops); 21935329Sgw25295 } 21945329Sgw25295 21957754SJeff.Bonwick@Sun.COM boolean_t 21967754SJeff.Bonwick@Sun.COM vdev_readable(vdev_t *vd) 2197789Sahrens { 21987754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2199789Sahrens } 2200789Sahrens 22017754SJeff.Bonwick@Sun.COM boolean_t 22027754SJeff.Bonwick@Sun.COM vdev_writeable(vdev_t *vd) 2203789Sahrens { 22047754SJeff.Bonwick@Sun.COM return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 22057754SJeff.Bonwick@Sun.COM } 2206789Sahrens 22077754SJeff.Bonwick@Sun.COM boolean_t 22087980SGeorge.Wilson@Sun.COM vdev_allocatable(vdev_t *vd) 22097980SGeorge.Wilson@Sun.COM { 22108241SJeff.Bonwick@Sun.COM uint64_t state = vd->vdev_state; 22118241SJeff.Bonwick@Sun.COM 22127980SGeorge.Wilson@Sun.COM /* 22138241SJeff.Bonwick@Sun.COM * We currently allow allocations from vdevs which may be in the 22147980SGeorge.Wilson@Sun.COM * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 22157980SGeorge.Wilson@Sun.COM * fails to reopen then we'll catch it later when we're holding 22168241SJeff.Bonwick@Sun.COM * the proper locks. Note that we have to get the vdev state 22178241SJeff.Bonwick@Sun.COM * in a local variable because although it changes atomically, 22188241SJeff.Bonwick@Sun.COM * we're asking two separate questions about it. 22197980SGeorge.Wilson@Sun.COM */ 22208241SJeff.Bonwick@Sun.COM return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 2221*10594SGeorge.Wilson@Sun.COM !vd->vdev_cant_write && !vd->vdev_ishole && !vd->vdev_removing); 22227980SGeorge.Wilson@Sun.COM } 22237980SGeorge.Wilson@Sun.COM 22247980SGeorge.Wilson@Sun.COM boolean_t 22257754SJeff.Bonwick@Sun.COM vdev_accessible(vdev_t *vd, zio_t *zio) 22267754SJeff.Bonwick@Sun.COM { 22277754SJeff.Bonwick@Sun.COM ASSERT(zio->io_vd == vd); 2228789Sahrens 22297754SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 22307754SJeff.Bonwick@Sun.COM return (B_FALSE); 2231789Sahrens 22327754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_READ) 22337754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_read); 2234789Sahrens 22357754SJeff.Bonwick@Sun.COM if (zio->io_type == ZIO_TYPE_WRITE) 22367754SJeff.Bonwick@Sun.COM return (!vd->vdev_cant_write); 22377754SJeff.Bonwick@Sun.COM 22387754SJeff.Bonwick@Sun.COM return (B_TRUE); 2239789Sahrens } 2240789Sahrens 2241789Sahrens /* 2242789Sahrens * Get statistics for the given vdev. 2243789Sahrens */ 2244789Sahrens void 2245789Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2246789Sahrens { 2247789Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2248789Sahrens 2249789Sahrens mutex_enter(&vd->vdev_stat_lock); 2250789Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 22517046Sahrens vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2252789Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2253789Sahrens vs->vs_state = vd->vdev_state; 22549816SGeorge.Wilson@Sun.COM vs->vs_rsize = vdev_get_min_asize(vd); 22559816SGeorge.Wilson@Sun.COM if (vd->vdev_ops->vdev_op_leaf) 22569816SGeorge.Wilson@Sun.COM vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2257789Sahrens mutex_exit(&vd->vdev_stat_lock); 2258789Sahrens 2259789Sahrens /* 2260789Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2261789Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2262789Sahrens */ 2263789Sahrens if (vd == rvd) { 22647754SJeff.Bonwick@Sun.COM for (int c = 0; c < rvd->vdev_children; c++) { 2265789Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2266789Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2267789Sahrens 2268789Sahrens mutex_enter(&vd->vdev_stat_lock); 22697754SJeff.Bonwick@Sun.COM for (int t = 0; t < ZIO_TYPES; t++) { 2270789Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2271789Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2272789Sahrens } 2273789Sahrens vs->vs_scrub_examined += cvs->vs_scrub_examined; 2274789Sahrens mutex_exit(&vd->vdev_stat_lock); 2275789Sahrens } 2276789Sahrens } 2277789Sahrens } 2278789Sahrens 2279789Sahrens void 22805450Sbrendan vdev_clear_stats(vdev_t *vd) 22815450Sbrendan { 22825450Sbrendan mutex_enter(&vd->vdev_stat_lock); 22835450Sbrendan vd->vdev_stat.vs_space = 0; 22845450Sbrendan vd->vdev_stat.vs_dspace = 0; 22855450Sbrendan vd->vdev_stat.vs_alloc = 0; 22865450Sbrendan mutex_exit(&vd->vdev_stat_lock); 22875450Sbrendan } 22885450Sbrendan 22895450Sbrendan void 22907754SJeff.Bonwick@Sun.COM vdev_stat_update(zio_t *zio, uint64_t psize) 2291789Sahrens { 22928241SJeff.Bonwick@Sun.COM spa_t *spa = zio->io_spa; 22938241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 22947754SJeff.Bonwick@Sun.COM vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2295789Sahrens vdev_t *pvd; 2296789Sahrens uint64_t txg = zio->io_txg; 2297789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2298789Sahrens zio_type_t type = zio->io_type; 2299789Sahrens int flags = zio->io_flags; 2300789Sahrens 23017754SJeff.Bonwick@Sun.COM /* 23027754SJeff.Bonwick@Sun.COM * If this i/o is a gang leader, it didn't do any actual work. 23037754SJeff.Bonwick@Sun.COM */ 23047754SJeff.Bonwick@Sun.COM if (zio->io_gang_tree) 23057754SJeff.Bonwick@Sun.COM return; 23067754SJeff.Bonwick@Sun.COM 2307789Sahrens if (zio->io_error == 0) { 23087754SJeff.Bonwick@Sun.COM /* 23097754SJeff.Bonwick@Sun.COM * If this is a root i/o, don't count it -- we've already 23107754SJeff.Bonwick@Sun.COM * counted the top-level vdevs, and vdev_get_stats() will 23117754SJeff.Bonwick@Sun.COM * aggregate them when asked. This reduces contention on 23127754SJeff.Bonwick@Sun.COM * the root vdev_stat_lock and implicitly handles blocks 23137754SJeff.Bonwick@Sun.COM * that compress away to holes, for which there is no i/o. 23147754SJeff.Bonwick@Sun.COM * (Holes never create vdev children, so all the counters 23157754SJeff.Bonwick@Sun.COM * remain zero, which is what we want.) 23167754SJeff.Bonwick@Sun.COM * 23177754SJeff.Bonwick@Sun.COM * Note: this only applies to successful i/o (io_error == 0) 23187754SJeff.Bonwick@Sun.COM * because unlike i/o counts, errors are not additive. 23197754SJeff.Bonwick@Sun.COM * When reading a ditto block, for example, failure of 23207754SJeff.Bonwick@Sun.COM * one top-level vdev does not imply a root-level error. 23217754SJeff.Bonwick@Sun.COM */ 23227754SJeff.Bonwick@Sun.COM if (vd == rvd) 23237754SJeff.Bonwick@Sun.COM return; 23247754SJeff.Bonwick@Sun.COM 23257754SJeff.Bonwick@Sun.COM ASSERT(vd == zio->io_vd); 23268241SJeff.Bonwick@Sun.COM 23278241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_BYPASS) 23288241SJeff.Bonwick@Sun.COM return; 23298241SJeff.Bonwick@Sun.COM 23308241SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 23318241SJeff.Bonwick@Sun.COM 23327754SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_IO_REPAIR) { 23331807Sbonwick if (flags & ZIO_FLAG_SCRUB_THREAD) 23347754SJeff.Bonwick@Sun.COM vs->vs_scrub_repaired += psize; 23358241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SELF_HEAL) 23367754SJeff.Bonwick@Sun.COM vs->vs_self_healed += psize; 2337789Sahrens } 23388241SJeff.Bonwick@Sun.COM 23398241SJeff.Bonwick@Sun.COM vs->vs_ops[type]++; 23408241SJeff.Bonwick@Sun.COM vs->vs_bytes[type] += psize; 23418241SJeff.Bonwick@Sun.COM 23428241SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2343789Sahrens return; 2344789Sahrens } 2345789Sahrens 2346789Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2347789Sahrens return; 2348789Sahrens 23499725SEric.Schrock@Sun.COM /* 23509725SEric.Schrock@Sun.COM * If this is an I/O error that is going to be retried, then ignore the 23519725SEric.Schrock@Sun.COM * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 23529725SEric.Schrock@Sun.COM * hard errors, when in reality they can happen for any number of 23539725SEric.Schrock@Sun.COM * innocuous reasons (bus resets, MPxIO link failure, etc). 23549725SEric.Schrock@Sun.COM */ 23559725SEric.Schrock@Sun.COM if (zio->io_error == EIO && 23569725SEric.Schrock@Sun.COM !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 23579725SEric.Schrock@Sun.COM return; 23589725SEric.Schrock@Sun.COM 23597754SJeff.Bonwick@Sun.COM mutex_enter(&vd->vdev_stat_lock); 23609230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 23617754SJeff.Bonwick@Sun.COM if (zio->io_error == ECKSUM) 23627754SJeff.Bonwick@Sun.COM vs->vs_checksum_errors++; 23637754SJeff.Bonwick@Sun.COM else 23647754SJeff.Bonwick@Sun.COM vs->vs_read_errors++; 2365789Sahrens } 23669230SGeorge.Wilson@Sun.COM if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 23677754SJeff.Bonwick@Sun.COM vs->vs_write_errors++; 23687754SJeff.Bonwick@Sun.COM mutex_exit(&vd->vdev_stat_lock); 2369789Sahrens 23708241SJeff.Bonwick@Sun.COM if (type == ZIO_TYPE_WRITE && txg != 0 && 23718241SJeff.Bonwick@Sun.COM (!(flags & ZIO_FLAG_IO_REPAIR) || 23728241SJeff.Bonwick@Sun.COM (flags & ZIO_FLAG_SCRUB_THREAD))) { 23738241SJeff.Bonwick@Sun.COM /* 23748241SJeff.Bonwick@Sun.COM * This is either a normal write (not a repair), or it's a 23758241SJeff.Bonwick@Sun.COM * repair induced by the scrub thread. In the normal case, 23768241SJeff.Bonwick@Sun.COM * we commit the DTL change in the same txg as the block 23778241SJeff.Bonwick@Sun.COM * was born. In the scrub-induced repair case, we know that 23788241SJeff.Bonwick@Sun.COM * scrubs run in first-pass syncing context, so we commit 23798241SJeff.Bonwick@Sun.COM * the DTL change in spa->spa_syncing_txg. 23808241SJeff.Bonwick@Sun.COM * 23818241SJeff.Bonwick@Sun.COM * We currently do not make DTL entries for failed spontaneous 23828241SJeff.Bonwick@Sun.COM * self-healing writes triggered by normal (non-scrubbing) 23838241SJeff.Bonwick@Sun.COM * reads, because we have no transactional context in which to 23848241SJeff.Bonwick@Sun.COM * do so -- and it's not clear that it'd be desirable anyway. 23858241SJeff.Bonwick@Sun.COM */ 23868241SJeff.Bonwick@Sun.COM if (vd->vdev_ops->vdev_op_leaf) { 23878241SJeff.Bonwick@Sun.COM uint64_t commit_txg = txg; 23888241SJeff.Bonwick@Sun.COM if (flags & ZIO_FLAG_SCRUB_THREAD) { 23898241SJeff.Bonwick@Sun.COM ASSERT(flags & ZIO_FLAG_IO_REPAIR); 23908241SJeff.Bonwick@Sun.COM ASSERT(spa_sync_pass(spa) == 1); 23918241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 23928241SJeff.Bonwick@Sun.COM commit_txg = spa->spa_syncing_txg; 23938241SJeff.Bonwick@Sun.COM } 23948241SJeff.Bonwick@Sun.COM ASSERT(commit_txg >= spa->spa_syncing_txg); 23958241SJeff.Bonwick@Sun.COM if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 23968241SJeff.Bonwick@Sun.COM return; 23978241SJeff.Bonwick@Sun.COM for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 23988241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 23998241SJeff.Bonwick@Sun.COM vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2400789Sahrens } 24018241SJeff.Bonwick@Sun.COM if (vd != rvd) 24028241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2403789Sahrens } 2404789Sahrens } 2405789Sahrens 2406789Sahrens void 2407789Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2408789Sahrens { 2409789Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2410789Sahrens 24119816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) 2412789Sahrens vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2413789Sahrens 2414789Sahrens mutex_enter(&vd->vdev_stat_lock); 2415789Sahrens 2416789Sahrens if (type == POOL_SCRUB_NONE) { 2417789Sahrens /* 2418789Sahrens * Update completion and end time. Leave everything else alone 2419789Sahrens * so we can report what happened during the previous scrub. 2420789Sahrens */ 2421789Sahrens vs->vs_scrub_complete = complete; 2422789Sahrens vs->vs_scrub_end = gethrestime_sec(); 2423789Sahrens } else { 2424789Sahrens vs->vs_scrub_type = type; 2425789Sahrens vs->vs_scrub_complete = 0; 2426789Sahrens vs->vs_scrub_examined = 0; 2427789Sahrens vs->vs_scrub_repaired = 0; 2428789Sahrens vs->vs_scrub_start = gethrestime_sec(); 2429789Sahrens vs->vs_scrub_end = 0; 2430789Sahrens } 2431789Sahrens 2432789Sahrens mutex_exit(&vd->vdev_stat_lock); 2433789Sahrens } 2434789Sahrens 2435789Sahrens /* 2436789Sahrens * Update the in-core space usage stats for this vdev and the root vdev. 2437789Sahrens */ 2438789Sahrens void 24395450Sbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 24405450Sbrendan boolean_t update_root) 2441789Sahrens { 24424527Sperrin int64_t dspace_delta = space_delta; 24434527Sperrin spa_t *spa = vd->vdev_spa; 24444527Sperrin vdev_t *rvd = spa->spa_root_vdev; 24454527Sperrin 2446789Sahrens ASSERT(vd == vd->vdev_top); 24474527Sperrin 24484527Sperrin /* 24494527Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 24504527Sperrin * factor. We must calculate this here and not at the root vdev 24514527Sperrin * because the root vdev's psize-to-asize is simply the max of its 24524527Sperrin * childrens', thus not accurate enough for us. 24534527Sperrin */ 24544527Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 24559701SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 24564527Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 24574527Sperrin vd->vdev_deflate_ratio; 2458789Sahrens 24594527Sperrin mutex_enter(&vd->vdev_stat_lock); 24604527Sperrin vd->vdev_stat.vs_space += space_delta; 24614527Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 24624527Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 24634527Sperrin mutex_exit(&vd->vdev_stat_lock); 24642082Seschrock 24655450Sbrendan if (update_root) { 24665450Sbrendan ASSERT(rvd == vd->vdev_parent); 24675450Sbrendan ASSERT(vd->vdev_ms_count != 0); 24684527Sperrin 24695450Sbrendan /* 24705450Sbrendan * Don't count non-normal (e.g. intent log) space as part of 24715450Sbrendan * the pool's capacity. 24725450Sbrendan */ 2473*10594SGeorge.Wilson@Sun.COM if (vd->vdev_islog) 24745450Sbrendan return; 24755450Sbrendan 24765450Sbrendan mutex_enter(&rvd->vdev_stat_lock); 24775450Sbrendan rvd->vdev_stat.vs_space += space_delta; 24785450Sbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 24795450Sbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 24805450Sbrendan mutex_exit(&rvd->vdev_stat_lock); 24815450Sbrendan } 2482789Sahrens } 2483789Sahrens 2484789Sahrens /* 2485789Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2486789Sahrens * so that it will be written out next time the vdev configuration is synced. 2487789Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2488789Sahrens */ 2489789Sahrens void 2490789Sahrens vdev_config_dirty(vdev_t *vd) 2491789Sahrens { 2492789Sahrens spa_t *spa = vd->vdev_spa; 2493789Sahrens vdev_t *rvd = spa->spa_root_vdev; 2494789Sahrens int c; 2495789Sahrens 24961601Sbonwick /* 24979425SEric.Schrock@Sun.COM * If this is an aux vdev (as with l2cache and spare devices), then we 24989425SEric.Schrock@Sun.COM * update the vdev config manually and set the sync flag. 24996643Seschrock */ 25006643Seschrock if (vd->vdev_aux != NULL) { 25016643Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 25026643Seschrock nvlist_t **aux; 25036643Seschrock uint_t naux; 25046643Seschrock 25056643Seschrock for (c = 0; c < sav->sav_count; c++) { 25066643Seschrock if (sav->sav_vdevs[c] == vd) 25076643Seschrock break; 25086643Seschrock } 25096643Seschrock 25107754SJeff.Bonwick@Sun.COM if (c == sav->sav_count) { 25117754SJeff.Bonwick@Sun.COM /* 25127754SJeff.Bonwick@Sun.COM * We're being removed. There's nothing more to do. 25137754SJeff.Bonwick@Sun.COM */ 25147754SJeff.Bonwick@Sun.COM ASSERT(sav->sav_sync == B_TRUE); 25157754SJeff.Bonwick@Sun.COM return; 25167754SJeff.Bonwick@Sun.COM } 25177754SJeff.Bonwick@Sun.COM 25186643Seschrock sav->sav_sync = B_TRUE; 25196643Seschrock 25209425SEric.Schrock@Sun.COM if (nvlist_lookup_nvlist_array(sav->sav_config, 25219425SEric.Schrock@Sun.COM ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 25229425SEric.Schrock@Sun.COM VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 25239425SEric.Schrock@Sun.COM ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 25249425SEric.Schrock@Sun.COM } 25256643Seschrock 25266643Seschrock ASSERT(c < naux); 25276643Seschrock 25286643Seschrock /* 25296643Seschrock * Setting the nvlist in the middle if the array is a little 25306643Seschrock * sketchy, but it will work. 25316643Seschrock */ 25326643Seschrock nvlist_free(aux[c]); 25336643Seschrock aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 25346643Seschrock 25356643Seschrock return; 25366643Seschrock } 25376643Seschrock 25386643Seschrock /* 25397754SJeff.Bonwick@Sun.COM * The dirty list is protected by the SCL_CONFIG lock. The caller 25407754SJeff.Bonwick@Sun.COM * must either hold SCL_CONFIG as writer, or must be the sync thread 25417754SJeff.Bonwick@Sun.COM * (which holds SCL_CONFIG as reader). There's only one sync thread, 25421601Sbonwick * so this is sufficient to ensure mutual exclusion. 25431601Sbonwick */ 25447754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 25457754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 25467754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 25471601Sbonwick 2548789Sahrens if (vd == rvd) { 2549789Sahrens for (c = 0; c < rvd->vdev_children; c++) 2550789Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2551789Sahrens } else { 2552789Sahrens ASSERT(vd == vd->vdev_top); 2553789Sahrens 2554*10594SGeorge.Wilson@Sun.COM if (!list_link_active(&vd->vdev_config_dirty_node) && 2555*10594SGeorge.Wilson@Sun.COM !vd->vdev_ishole) 25567754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_config_dirty_list, vd); 2557789Sahrens } 2558789Sahrens } 2559789Sahrens 2560789Sahrens void 2561789Sahrens vdev_config_clean(vdev_t *vd) 2562789Sahrens { 25631601Sbonwick spa_t *spa = vd->vdev_spa; 25641601Sbonwick 25657754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 25667754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 25677754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_CONFIG, RW_READER))); 25687754SJeff.Bonwick@Sun.COM 25697754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 25707754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_config_dirty_list, vd); 25717754SJeff.Bonwick@Sun.COM } 25727754SJeff.Bonwick@Sun.COM 25737754SJeff.Bonwick@Sun.COM /* 25747754SJeff.Bonwick@Sun.COM * Mark a top-level vdev's state as dirty, so that the next pass of 25757754SJeff.Bonwick@Sun.COM * spa_sync() can convert this into vdev_config_dirty(). We distinguish 25767754SJeff.Bonwick@Sun.COM * the state changes from larger config changes because they require 25777754SJeff.Bonwick@Sun.COM * much less locking, and are often needed for administrative actions. 25787754SJeff.Bonwick@Sun.COM */ 25797754SJeff.Bonwick@Sun.COM void 25807754SJeff.Bonwick@Sun.COM vdev_state_dirty(vdev_t *vd) 25817754SJeff.Bonwick@Sun.COM { 25827754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 25837754SJeff.Bonwick@Sun.COM 25847754SJeff.Bonwick@Sun.COM ASSERT(vd == vd->vdev_top); 25851601Sbonwick 25867754SJeff.Bonwick@Sun.COM /* 25877754SJeff.Bonwick@Sun.COM * The state list is protected by the SCL_STATE lock. The caller 25887754SJeff.Bonwick@Sun.COM * must either hold SCL_STATE as writer, or must be the sync thread 25897754SJeff.Bonwick@Sun.COM * (which holds SCL_STATE as reader). There's only one sync thread, 25907754SJeff.Bonwick@Sun.COM * so this is sufficient to ensure mutual exclusion. 25917754SJeff.Bonwick@Sun.COM */ 25927754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 25937754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 25947754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 25957754SJeff.Bonwick@Sun.COM 25967754SJeff.Bonwick@Sun.COM if (!list_link_active(&vd->vdev_state_dirty_node)) 25977754SJeff.Bonwick@Sun.COM list_insert_head(&spa->spa_state_dirty_list, vd); 25987754SJeff.Bonwick@Sun.COM } 25997754SJeff.Bonwick@Sun.COM 26007754SJeff.Bonwick@Sun.COM void 26017754SJeff.Bonwick@Sun.COM vdev_state_clean(vdev_t *vd) 26027754SJeff.Bonwick@Sun.COM { 26037754SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 26047754SJeff.Bonwick@Sun.COM 26057754SJeff.Bonwick@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 26067754SJeff.Bonwick@Sun.COM (dsl_pool_sync_context(spa_get_dsl(spa)) && 26077754SJeff.Bonwick@Sun.COM spa_config_held(spa, SCL_STATE, RW_READER))); 26087754SJeff.Bonwick@Sun.COM 26097754SJeff.Bonwick@Sun.COM ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 26107754SJeff.Bonwick@Sun.COM list_remove(&spa->spa_state_dirty_list, vd); 2611789Sahrens } 2612789Sahrens 26136523Sek110237 /* 26146523Sek110237 * Propagate vdev state up from children to parent. 26156523Sek110237 */ 26161775Sbillm void 26171775Sbillm vdev_propagate_state(vdev_t *vd) 26181775Sbillm { 26198241SJeff.Bonwick@Sun.COM spa_t *spa = vd->vdev_spa; 26208241SJeff.Bonwick@Sun.COM vdev_t *rvd = spa->spa_root_vdev; 26211775Sbillm int degraded = 0, faulted = 0; 26221775Sbillm int corrupted = 0; 26231775Sbillm vdev_t *child; 26241775Sbillm 26254451Seschrock if (vd->vdev_children > 0) { 26269816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 26274451Seschrock child = vd->vdev_child[c]; 26286976Seschrock 2629*10594SGeorge.Wilson@Sun.COM /* 2630*10594SGeorge.Wilson@Sun.COM * Don't factor holes into the decision. 2631*10594SGeorge.Wilson@Sun.COM */ 2632*10594SGeorge.Wilson@Sun.COM if (child->vdev_ishole) 2633*10594SGeorge.Wilson@Sun.COM continue; 2634*10594SGeorge.Wilson@Sun.COM 26357754SJeff.Bonwick@Sun.COM if (!vdev_readable(child) || 26368241SJeff.Bonwick@Sun.COM (!vdev_writeable(child) && spa_writeable(spa))) { 26376976Seschrock /* 26386976Seschrock * Root special: if there is a top-level log 26396976Seschrock * device, treat the root vdev as if it were 26406976Seschrock * degraded. 26416976Seschrock */ 26426976Seschrock if (child->vdev_islog && vd == rvd) 26436976Seschrock degraded++; 26446976Seschrock else 26456976Seschrock faulted++; 26466976Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 26474451Seschrock degraded++; 26486976Seschrock } 26494451Seschrock 26504451Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 26514451Seschrock corrupted++; 26524451Seschrock } 26531775Sbillm 26544451Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 26554451Seschrock 26564451Seschrock /* 26577754SJeff.Bonwick@Sun.COM * Root special: if there is a top-level vdev that cannot be 26584451Seschrock * opened due to corrupted metadata, then propagate the root 26594451Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 26604451Seschrock * replicas'. 26614451Seschrock */ 26624451Seschrock if (corrupted && vd == rvd && 26634451Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 26644451Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 26654451Seschrock VDEV_AUX_CORRUPT_DATA); 26661775Sbillm } 26671775Sbillm 26686976Seschrock if (vd->vdev_parent) 26694451Seschrock vdev_propagate_state(vd->vdev_parent); 26701775Sbillm } 26711775Sbillm 2672789Sahrens /* 26731544Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 26741544Seschrock * state, because we're in the process of opening children depth-first. 26751544Seschrock * Otherwise, we propagate the change to the parent. 26761544Seschrock * 26771544Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 26781544Seschrock * generated. 2679789Sahrens */ 2680789Sahrens void 26811544Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2682789Sahrens { 26831986Seschrock uint64_t save_state; 26846643Seschrock spa_t *spa = vd->vdev_spa; 26851544Seschrock 26861544Seschrock if (state == vd->vdev_state) { 26871544Seschrock vd->vdev_stat.vs_aux = aux; 2688789Sahrens return; 26891544Seschrock } 26901544Seschrock 26911986Seschrock save_state = vd->vdev_state; 2692789Sahrens 2693789Sahrens vd->vdev_state = state; 2694789Sahrens vd->vdev_stat.vs_aux = aux; 2695789Sahrens 26964451Seschrock /* 26974451Seschrock * If we are setting the vdev state to anything but an open state, then 26984451Seschrock * always close the underlying device. Otherwise, we keep accessible 26994451Seschrock * but invalid devices open forever. We don't call vdev_close() itself, 27004451Seschrock * because that implies some extra checks (offline, etc) that we don't 27014451Seschrock * want here. This is limited to leaf devices, because otherwise 27024451Seschrock * closing the device will affect other children. 27034451Seschrock */ 27047780SJeff.Bonwick@Sun.COM if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 27054451Seschrock vd->vdev_ops->vdev_op_close(vd); 27064451Seschrock 27074451Seschrock if (vd->vdev_removed && 27084451Seschrock state == VDEV_STATE_CANT_OPEN && 27094451Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 27104451Seschrock /* 27114451Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 27124451Seschrock * device was previously marked removed and someone attempted to 27134451Seschrock * reopen it. If this failed due to a nonexistent device, then 27144451Seschrock * keep the device in the REMOVED state. We also let this be if 27154451Seschrock * it is one of our special test online cases, which is only 27164451Seschrock * attempting to online the device and shouldn't generate an FMA 27174451Seschrock * fault. 27184451Seschrock */ 27194451Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 27204451Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 27214451Seschrock } else if (state == VDEV_STATE_REMOVED) { 27224451Seschrock /* 27234451Seschrock * Indicate to the ZFS DE that this device has been removed, and 27244451Seschrock * any recent errors should be ignored. 27254451Seschrock */ 27266643Seschrock zfs_post_remove(spa, vd); 27274451Seschrock vd->vdev_removed = B_TRUE; 27284451Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 27291544Seschrock /* 27301544Seschrock * If we fail to open a vdev during an import, we mark it as 27311544Seschrock * "not available", which signifies that it was never there to 27321544Seschrock * begin with. Failure to open such a device is not considered 27331544Seschrock * an error. 27341544Seschrock */ 27356643Seschrock if (spa->spa_load_state == SPA_LOAD_IMPORT && 27361986Seschrock vd->vdev_ops->vdev_op_leaf) 27371986Seschrock vd->vdev_not_present = 1; 27381986Seschrock 27391986Seschrock /* 27401986Seschrock * Post the appropriate ereport. If the 'prevstate' field is 27411986Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 27421986Seschrock * that this is part of a vdev_reopen(). In this case, we don't 27431986Seschrock * want to post the ereport if the device was already in the 27441986Seschrock * CANT_OPEN state beforehand. 27454451Seschrock * 27464451Seschrock * If the 'checkremove' flag is set, then this is an attempt to 27474451Seschrock * online the device in response to an insertion event. If we 27484451Seschrock * hit this case, then we have detected an insertion event for a 27494451Seschrock * faulted or offline device that wasn't in the removed state. 27504451Seschrock * In this scenario, we don't post an ereport because we are 27514451Seschrock * about to replace the device, or attempt an online with 27524451Seschrock * vdev_forcefault, which will generate the fault for us. 27531986Seschrock */ 27544451Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 27554451Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 27566643Seschrock vd != spa->spa_root_vdev) { 27571544Seschrock const char *class; 27581544Seschrock 27591544Seschrock switch (aux) { 27601544Seschrock case VDEV_AUX_OPEN_FAILED: 27611544Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 27621544Seschrock break; 27631544Seschrock case VDEV_AUX_CORRUPT_DATA: 27641544Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 27651544Seschrock break; 27661544Seschrock case VDEV_AUX_NO_REPLICAS: 27671544Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 27681544Seschrock break; 27691544Seschrock case VDEV_AUX_BAD_GUID_SUM: 27701544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 27711544Seschrock break; 27721544Seschrock case VDEV_AUX_TOO_SMALL: 27731544Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 27741544Seschrock break; 27751544Seschrock case VDEV_AUX_BAD_LABEL: 27761544Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 27771544Seschrock break; 27787754SJeff.Bonwick@Sun.COM case VDEV_AUX_IO_FAILURE: 27797754SJeff.Bonwick@Sun.COM class = FM_EREPORT_ZFS_IO_FAILURE; 27807754SJeff.Bonwick@Sun.COM break; 27811544Seschrock default: 27821544Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 27831544Seschrock } 27841544Seschrock 27856643Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 27861544Seschrock } 27874451Seschrock 27884451Seschrock /* Erase any notion of persistent removed state */ 27894451Seschrock vd->vdev_removed = B_FALSE; 27904451Seschrock } else { 27914451Seschrock vd->vdev_removed = B_FALSE; 27921544Seschrock } 27931544Seschrock 27949583STim.Haley@Sun.COM if (!isopen && vd->vdev_parent) 27959583STim.Haley@Sun.COM vdev_propagate_state(vd->vdev_parent); 2796789Sahrens } 27977042Sgw25295 27987042Sgw25295 /* 27997042Sgw25295 * Check the vdev configuration to ensure that it's capable of supporting 28007042Sgw25295 * a root pool. Currently, we do not support RAID-Z or partial configuration. 28017042Sgw25295 * In addition, only a single top-level vdev is allowed and none of the leaves 28027042Sgw25295 * can be wholedisks. 28037042Sgw25295 */ 28047042Sgw25295 boolean_t 28057042Sgw25295 vdev_is_bootable(vdev_t *vd) 28067042Sgw25295 { 28077042Sgw25295 if (!vd->vdev_ops->vdev_op_leaf) { 28087042Sgw25295 char *vdev_type = vd->vdev_ops->vdev_op_type; 28097042Sgw25295 28107042Sgw25295 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 28117042Sgw25295 vd->vdev_children > 1) { 28127042Sgw25295 return (B_FALSE); 28137042Sgw25295 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 28147042Sgw25295 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 28157042Sgw25295 return (B_FALSE); 28167042Sgw25295 } 28177042Sgw25295 } else if (vd->vdev_wholedisk == 1) { 28187042Sgw25295 return (B_FALSE); 28197042Sgw25295 } 28207042Sgw25295 28219816SGeorge.Wilson@Sun.COM for (int c = 0; c < vd->vdev_children; c++) { 28227042Sgw25295 if (!vdev_is_bootable(vd->vdev_child[c])) 28237042Sgw25295 return (B_FALSE); 28247042Sgw25295 } 28257042Sgw25295 return (B_TRUE); 28267042Sgw25295 } 28279701SGeorge.Wilson@Sun.COM 2828*10594SGeorge.Wilson@Sun.COM /* 2829*10594SGeorge.Wilson@Sun.COM * Load the state from the original vdev tree (ovd) which 2830*10594SGeorge.Wilson@Sun.COM * we've retrieved from the MOS config object. If the original 2831*10594SGeorge.Wilson@Sun.COM * vdev was offline then we transfer that state to the device 2832*10594SGeorge.Wilson@Sun.COM * in the current vdev tree (nvd). 2833*10594SGeorge.Wilson@Sun.COM */ 28349701SGeorge.Wilson@Sun.COM void 2835*10594SGeorge.Wilson@Sun.COM vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) 28369701SGeorge.Wilson@Sun.COM { 2837*10594SGeorge.Wilson@Sun.COM spa_t *spa = nvd->vdev_spa; 2838*10594SGeorge.Wilson@Sun.COM 2839*10594SGeorge.Wilson@Sun.COM ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2840*10594SGeorge.Wilson@Sun.COM ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); 2841*10594SGeorge.Wilson@Sun.COM 2842*10594SGeorge.Wilson@Sun.COM for (int c = 0; c < nvd->vdev_children; c++) 2843*10594SGeorge.Wilson@Sun.COM vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); 2844*10594SGeorge.Wilson@Sun.COM 2845*10594SGeorge.Wilson@Sun.COM if (nvd->vdev_ops->vdev_op_leaf && ovd->vdev_offline) { 28469701SGeorge.Wilson@Sun.COM /* 28479701SGeorge.Wilson@Sun.COM * It would be nice to call vdev_offline() 28489701SGeorge.Wilson@Sun.COM * directly but the pool isn't fully loaded and 28499701SGeorge.Wilson@Sun.COM * the txg threads have not been started yet. 28509701SGeorge.Wilson@Sun.COM */ 2851*10594SGeorge.Wilson@Sun.COM nvd->vdev_offline = ovd->vdev_offline; 2852*10594SGeorge.Wilson@Sun.COM vdev_reopen(nvd->vdev_top); 28539701SGeorge.Wilson@Sun.COM } 28549701SGeorge.Wilson@Sun.COM } 28559816SGeorge.Wilson@Sun.COM 28569816SGeorge.Wilson@Sun.COM /* 28579816SGeorge.Wilson@Sun.COM * Expand a vdev if possible. 28589816SGeorge.Wilson@Sun.COM */ 28599816SGeorge.Wilson@Sun.COM void 28609816SGeorge.Wilson@Sun.COM vdev_expand(vdev_t *vd, uint64_t txg) 28619816SGeorge.Wilson@Sun.COM { 28629816SGeorge.Wilson@Sun.COM ASSERT(vd->vdev_top == vd); 28639816SGeorge.Wilson@Sun.COM ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 28649816SGeorge.Wilson@Sun.COM 28659816SGeorge.Wilson@Sun.COM if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 28669816SGeorge.Wilson@Sun.COM VERIFY(vdev_metaslab_init(vd, txg) == 0); 28679816SGeorge.Wilson@Sun.COM vdev_config_dirty(vd); 28689816SGeorge.Wilson@Sun.COM } 28699816SGeorge.Wilson@Sun.COM } 2870