xref: /onnv-gate/usr/src/uts/common/fs/zfs/vdev.c (revision 8632)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51485Slling  * Common Development and Distribution License (the "License").
61485Slling  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
212082Seschrock 
22789Sahrens /*
23*8632SBill.Moore@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24789Sahrens  * Use is subject to license terms.
25789Sahrens  */
26789Sahrens 
27789Sahrens #include <sys/zfs_context.h>
281544Seschrock #include <sys/fm/fs/zfs.h>
29789Sahrens #include <sys/spa.h>
30789Sahrens #include <sys/spa_impl.h>
31789Sahrens #include <sys/dmu.h>
32789Sahrens #include <sys/dmu_tx.h>
33789Sahrens #include <sys/vdev_impl.h>
34789Sahrens #include <sys/uberblock_impl.h>
35789Sahrens #include <sys/metaslab.h>
36789Sahrens #include <sys/metaslab_impl.h>
37789Sahrens #include <sys/space_map.h>
38789Sahrens #include <sys/zio.h>
39789Sahrens #include <sys/zap.h>
40789Sahrens #include <sys/fs/zfs.h>
416643Seschrock #include <sys/arc.h>
42789Sahrens 
43789Sahrens /*
44789Sahrens  * Virtual device management.
45789Sahrens  */
46789Sahrens 
47789Sahrens static vdev_ops_t *vdev_ops_table[] = {
48789Sahrens 	&vdev_root_ops,
49789Sahrens 	&vdev_raidz_ops,
50789Sahrens 	&vdev_mirror_ops,
51789Sahrens 	&vdev_replacing_ops,
522082Seschrock 	&vdev_spare_ops,
53789Sahrens 	&vdev_disk_ops,
54789Sahrens 	&vdev_file_ops,
55789Sahrens 	&vdev_missing_ops,
56789Sahrens 	NULL
57789Sahrens };
58789Sahrens 
597046Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */
607046Sahrens int zfs_scrub_limit = 10;
613697Smishra 
62789Sahrens /*
63789Sahrens  * Given a vdev type, return the appropriate ops vector.
64789Sahrens  */
65789Sahrens static vdev_ops_t *
66789Sahrens vdev_getops(const char *type)
67789Sahrens {
68789Sahrens 	vdev_ops_t *ops, **opspp;
69789Sahrens 
70789Sahrens 	for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
71789Sahrens 		if (strcmp(ops->vdev_op_type, type) == 0)
72789Sahrens 			break;
73789Sahrens 
74789Sahrens 	return (ops);
75789Sahrens }
76789Sahrens 
77789Sahrens /*
78789Sahrens  * Default asize function: return the MAX of psize with the asize of
79789Sahrens  * all children.  This is what's used by anything other than RAID-Z.
80789Sahrens  */
81789Sahrens uint64_t
82789Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize)
83789Sahrens {
841732Sbonwick 	uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
85789Sahrens 	uint64_t csize;
86789Sahrens 	uint64_t c;
87789Sahrens 
88789Sahrens 	for (c = 0; c < vd->vdev_children; c++) {
89789Sahrens 		csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
90789Sahrens 		asize = MAX(asize, csize);
91789Sahrens 	}
92789Sahrens 
93789Sahrens 	return (asize);
94789Sahrens }
95789Sahrens 
961175Slling /*
971175Slling  * Get the replaceable or attachable device size.
981175Slling  * If the parent is a mirror or raidz, the replaceable size is the minimum
991175Slling  * psize of all its children. For the rest, just return our own psize.
1001175Slling  *
1011175Slling  * e.g.
1021175Slling  *			psize	rsize
1031175Slling  * root			-	-
1041175Slling  *	mirror/raidz	-	-
1051175Slling  *	    disk1	20g	20g
1061175Slling  *	    disk2 	40g	20g
1071175Slling  *	disk3 		80g	80g
1081175Slling  */
1091175Slling uint64_t
1101175Slling vdev_get_rsize(vdev_t *vd)
1111175Slling {
1121175Slling 	vdev_t *pvd, *cvd;
1131175Slling 	uint64_t c, rsize;
1141175Slling 
1151175Slling 	pvd = vd->vdev_parent;
1161175Slling 
1171175Slling 	/*
1181175Slling 	 * If our parent is NULL or the root, just return our own psize.
1191175Slling 	 */
1201175Slling 	if (pvd == NULL || pvd->vdev_parent == NULL)
1211175Slling 		return (vd->vdev_psize);
1221175Slling 
1231175Slling 	rsize = 0;
1241175Slling 
1251175Slling 	for (c = 0; c < pvd->vdev_children; c++) {
1261175Slling 		cvd = pvd->vdev_child[c];
1271175Slling 		rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1;
1281175Slling 	}
1291175Slling 
1301175Slling 	return (rsize);
1311175Slling }
1321175Slling 
133789Sahrens vdev_t *
134789Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev)
135789Sahrens {
136789Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
137789Sahrens 
1387754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1395530Sbonwick 
1407046Sahrens 	if (vdev < rvd->vdev_children) {
1417046Sahrens 		ASSERT(rvd->vdev_child[vdev] != NULL);
142789Sahrens 		return (rvd->vdev_child[vdev]);
1437046Sahrens 	}
144789Sahrens 
145789Sahrens 	return (NULL);
146789Sahrens }
147789Sahrens 
148789Sahrens vdev_t *
149789Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
150789Sahrens {
151789Sahrens 	int c;
152789Sahrens 	vdev_t *mvd;
153789Sahrens 
1541585Sbonwick 	if (vd->vdev_guid == guid)
155789Sahrens 		return (vd);
156789Sahrens 
157789Sahrens 	for (c = 0; c < vd->vdev_children; c++)
158789Sahrens 		if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
159789Sahrens 		    NULL)
160789Sahrens 			return (mvd);
161789Sahrens 
162789Sahrens 	return (NULL);
163789Sahrens }
164789Sahrens 
165789Sahrens void
166789Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd)
167789Sahrens {
168789Sahrens 	size_t oldsize, newsize;
169789Sahrens 	uint64_t id = cvd->vdev_id;
170789Sahrens 	vdev_t **newchild;
171789Sahrens 
1727754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
173789Sahrens 	ASSERT(cvd->vdev_parent == NULL);
174789Sahrens 
175789Sahrens 	cvd->vdev_parent = pvd;
176789Sahrens 
177789Sahrens 	if (pvd == NULL)
178789Sahrens 		return;
179789Sahrens 
180789Sahrens 	ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
181789Sahrens 
182789Sahrens 	oldsize = pvd->vdev_children * sizeof (vdev_t *);
183789Sahrens 	pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
184789Sahrens 	newsize = pvd->vdev_children * sizeof (vdev_t *);
185789Sahrens 
186789Sahrens 	newchild = kmem_zalloc(newsize, KM_SLEEP);
187789Sahrens 	if (pvd->vdev_child != NULL) {
188789Sahrens 		bcopy(pvd->vdev_child, newchild, oldsize);
189789Sahrens 		kmem_free(pvd->vdev_child, oldsize);
190789Sahrens 	}
191789Sahrens 
192789Sahrens 	pvd->vdev_child = newchild;
193789Sahrens 	pvd->vdev_child[id] = cvd;
194789Sahrens 
195789Sahrens 	cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
196789Sahrens 	ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
197789Sahrens 
198789Sahrens 	/*
199789Sahrens 	 * Walk up all ancestors to update guid sum.
200789Sahrens 	 */
201789Sahrens 	for (; pvd != NULL; pvd = pvd->vdev_parent)
202789Sahrens 		pvd->vdev_guid_sum += cvd->vdev_guid_sum;
2033697Smishra 
2043697Smishra 	if (cvd->vdev_ops->vdev_op_leaf)
2053697Smishra 		cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit;
206789Sahrens }
207789Sahrens 
208789Sahrens void
209789Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
210789Sahrens {
211789Sahrens 	int c;
212789Sahrens 	uint_t id = cvd->vdev_id;
213789Sahrens 
214789Sahrens 	ASSERT(cvd->vdev_parent == pvd);
215789Sahrens 
216789Sahrens 	if (pvd == NULL)
217789Sahrens 		return;
218789Sahrens 
219789Sahrens 	ASSERT(id < pvd->vdev_children);
220789Sahrens 	ASSERT(pvd->vdev_child[id] == cvd);
221789Sahrens 
222789Sahrens 	pvd->vdev_child[id] = NULL;
223789Sahrens 	cvd->vdev_parent = NULL;
224789Sahrens 
225789Sahrens 	for (c = 0; c < pvd->vdev_children; c++)
226789Sahrens 		if (pvd->vdev_child[c])
227789Sahrens 			break;
228789Sahrens 
229789Sahrens 	if (c == pvd->vdev_children) {
230789Sahrens 		kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
231789Sahrens 		pvd->vdev_child = NULL;
232789Sahrens 		pvd->vdev_children = 0;
233789Sahrens 	}
234789Sahrens 
235789Sahrens 	/*
236789Sahrens 	 * Walk up all ancestors to update guid sum.
237789Sahrens 	 */
238789Sahrens 	for (; pvd != NULL; pvd = pvd->vdev_parent)
239789Sahrens 		pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
2403697Smishra 
2413697Smishra 	if (cvd->vdev_ops->vdev_op_leaf)
2423697Smishra 		cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit;
243789Sahrens }
244789Sahrens 
245789Sahrens /*
246789Sahrens  * Remove any holes in the child array.
247789Sahrens  */
248789Sahrens void
249789Sahrens vdev_compact_children(vdev_t *pvd)
250789Sahrens {
251789Sahrens 	vdev_t **newchild, *cvd;
252789Sahrens 	int oldc = pvd->vdev_children;
253789Sahrens 	int newc, c;
254789Sahrens 
2557754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
256789Sahrens 
257789Sahrens 	for (c = newc = 0; c < oldc; c++)
258789Sahrens 		if (pvd->vdev_child[c])
259789Sahrens 			newc++;
260789Sahrens 
261789Sahrens 	newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
262789Sahrens 
263789Sahrens 	for (c = newc = 0; c < oldc; c++) {
264789Sahrens 		if ((cvd = pvd->vdev_child[c]) != NULL) {
265789Sahrens 			newchild[newc] = cvd;
266789Sahrens 			cvd->vdev_id = newc++;
267789Sahrens 		}
268789Sahrens 	}
269789Sahrens 
270789Sahrens 	kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
271789Sahrens 	pvd->vdev_child = newchild;
272789Sahrens 	pvd->vdev_children = newc;
273789Sahrens }
274789Sahrens 
275789Sahrens /*
276789Sahrens  * Allocate and minimally initialize a vdev_t.
277789Sahrens  */
278789Sahrens static vdev_t *
279789Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
280789Sahrens {
281789Sahrens 	vdev_t *vd;
282789Sahrens 
2831585Sbonwick 	vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
2841585Sbonwick 
2851585Sbonwick 	if (spa->spa_root_vdev == NULL) {
2861585Sbonwick 		ASSERT(ops == &vdev_root_ops);
2871585Sbonwick 		spa->spa_root_vdev = vd;
2881585Sbonwick 	}
289789Sahrens 
2901585Sbonwick 	if (guid == 0) {
2911585Sbonwick 		if (spa->spa_root_vdev == vd) {
2921585Sbonwick 			/*
2931585Sbonwick 			 * The root vdev's guid will also be the pool guid,
2941585Sbonwick 			 * which must be unique among all pools.
2951585Sbonwick 			 */
2961585Sbonwick 			while (guid == 0 || spa_guid_exists(guid, 0))
2971585Sbonwick 				guid = spa_get_random(-1ULL);
2981585Sbonwick 		} else {
2991585Sbonwick 			/*
3001585Sbonwick 			 * Any other vdev's guid must be unique within the pool.
3011585Sbonwick 			 */
3021585Sbonwick 			while (guid == 0 ||
3031585Sbonwick 			    spa_guid_exists(spa_guid(spa), guid))
3041585Sbonwick 				guid = spa_get_random(-1ULL);
3051585Sbonwick 		}
3061585Sbonwick 		ASSERT(!spa_guid_exists(spa_guid(spa), guid));
3071585Sbonwick 	}
308789Sahrens 
309789Sahrens 	vd->vdev_spa = spa;
310789Sahrens 	vd->vdev_id = id;
311789Sahrens 	vd->vdev_guid = guid;
312789Sahrens 	vd->vdev_guid_sum = guid;
313789Sahrens 	vd->vdev_ops = ops;
314789Sahrens 	vd->vdev_state = VDEV_STATE_CLOSED;
315789Sahrens 
316789Sahrens 	mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
3172856Snd150628 	mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
3187754SJeff.Bonwick@Sun.COM 	mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
3198241SJeff.Bonwick@Sun.COM 	for (int t = 0; t < DTL_TYPES; t++) {
3208241SJeff.Bonwick@Sun.COM 		space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0,
3218241SJeff.Bonwick@Sun.COM 		    &vd->vdev_dtl_lock);
3228241SJeff.Bonwick@Sun.COM 	}
323789Sahrens 	txg_list_create(&vd->vdev_ms_list,
324789Sahrens 	    offsetof(struct metaslab, ms_txg_node));
325789Sahrens 	txg_list_create(&vd->vdev_dtl_list,
326789Sahrens 	    offsetof(struct vdev, vdev_dtl_node));
327789Sahrens 	vd->vdev_stat.vs_timestamp = gethrtime();
3284451Seschrock 	vdev_queue_init(vd);
3294451Seschrock 	vdev_cache_init(vd);
330789Sahrens 
331789Sahrens 	return (vd);
332789Sahrens }
333789Sahrens 
334789Sahrens /*
335789Sahrens  * Allocate a new vdev.  The 'alloctype' is used to control whether we are
336789Sahrens  * creating a new vdev or loading an existing one - the behavior is slightly
337789Sahrens  * different for each case.
338789Sahrens  */
3392082Seschrock int
3402082Seschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
3412082Seschrock     int alloctype)
342789Sahrens {
343789Sahrens 	vdev_ops_t *ops;
344789Sahrens 	char *type;
3454527Sperrin 	uint64_t guid = 0, islog, nparity;
346789Sahrens 	vdev_t *vd;
347789Sahrens 
3487754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
349789Sahrens 
350789Sahrens 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
3512082Seschrock 		return (EINVAL);
352789Sahrens 
353789Sahrens 	if ((ops = vdev_getops(type)) == NULL)
3542082Seschrock 		return (EINVAL);
355789Sahrens 
356789Sahrens 	/*
357789Sahrens 	 * If this is a load, get the vdev guid from the nvlist.
358789Sahrens 	 * Otherwise, vdev_alloc_common() will generate one for us.
359789Sahrens 	 */
360789Sahrens 	if (alloctype == VDEV_ALLOC_LOAD) {
361789Sahrens 		uint64_t label_id;
362789Sahrens 
363789Sahrens 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
364789Sahrens 		    label_id != id)
3652082Seschrock 			return (EINVAL);
366789Sahrens 
367789Sahrens 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
3682082Seschrock 			return (EINVAL);
3692082Seschrock 	} else if (alloctype == VDEV_ALLOC_SPARE) {
3702082Seschrock 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
3712082Seschrock 			return (EINVAL);
3725450Sbrendan 	} else if (alloctype == VDEV_ALLOC_L2CACHE) {
3735450Sbrendan 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
3745450Sbrendan 			return (EINVAL);
375789Sahrens 	}
376789Sahrens 
3772082Seschrock 	/*
3782082Seschrock 	 * The first allocated vdev must be of type 'root'.
3792082Seschrock 	 */
3802082Seschrock 	if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
3812082Seschrock 		return (EINVAL);
3822082Seschrock 
3834527Sperrin 	/*
3844527Sperrin 	 * Determine whether we're a log vdev.
3854527Sperrin 	 */
3864527Sperrin 	islog = 0;
3874527Sperrin 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
3885094Slling 	if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
3894527Sperrin 		return (ENOTSUP);
3904527Sperrin 
3914527Sperrin 	/*
3924527Sperrin 	 * Set the nparity property for RAID-Z vdevs.
3934527Sperrin 	 */
3944527Sperrin 	nparity = -1ULL;
3954527Sperrin 	if (ops == &vdev_raidz_ops) {
3964527Sperrin 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3974527Sperrin 		    &nparity) == 0) {
3984527Sperrin 			/*
3994527Sperrin 			 * Currently, we can only support 2 parity devices.
4004527Sperrin 			 */
4014527Sperrin 			if (nparity == 0 || nparity > 2)
4024527Sperrin 				return (EINVAL);
4034527Sperrin 			/*
4044527Sperrin 			 * Older versions can only support 1 parity device.
4054527Sperrin 			 */
4064527Sperrin 			if (nparity == 2 &&
4074577Sahrens 			    spa_version(spa) < SPA_VERSION_RAID6)
4084527Sperrin 				return (ENOTSUP);
4094527Sperrin 		} else {
4104527Sperrin 			/*
4114527Sperrin 			 * We require the parity to be specified for SPAs that
4124527Sperrin 			 * support multiple parity levels.
4134527Sperrin 			 */
4144577Sahrens 			if (spa_version(spa) >= SPA_VERSION_RAID6)
4154527Sperrin 				return (EINVAL);
4164527Sperrin 			/*
4174527Sperrin 			 * Otherwise, we default to 1 parity device for RAID-Z.
4184527Sperrin 			 */
4194527Sperrin 			nparity = 1;
4204527Sperrin 		}
4214527Sperrin 	} else {
4224527Sperrin 		nparity = 0;
4234527Sperrin 	}
4244527Sperrin 	ASSERT(nparity != -1ULL);
4254527Sperrin 
426789Sahrens 	vd = vdev_alloc_common(spa, id, guid, ops);
427789Sahrens 
4284527Sperrin 	vd->vdev_islog = islog;
4294527Sperrin 	vd->vdev_nparity = nparity;
4304527Sperrin 
431789Sahrens 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
432789Sahrens 		vd->vdev_path = spa_strdup(vd->vdev_path);
433789Sahrens 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
434789Sahrens 		vd->vdev_devid = spa_strdup(vd->vdev_devid);
4354451Seschrock 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
4364451Seschrock 	    &vd->vdev_physpath) == 0)
4374451Seschrock 		vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
438789Sahrens 
439789Sahrens 	/*
4401171Seschrock 	 * Set the whole_disk property.  If it's not specified, leave the value
4411171Seschrock 	 * as -1.
4421171Seschrock 	 */
4431171Seschrock 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
4441171Seschrock 	    &vd->vdev_wholedisk) != 0)
4451171Seschrock 		vd->vdev_wholedisk = -1ULL;
4461171Seschrock 
4471171Seschrock 	/*
4481544Seschrock 	 * Look for the 'not present' flag.  This will only be set if the device
4491544Seschrock 	 * was not present at the time of import.
4501544Seschrock 	 */
4516643Seschrock 	if (!spa->spa_import_faulted)
4526643Seschrock 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
4536643Seschrock 		    &vd->vdev_not_present);
4541544Seschrock 
4551544Seschrock 	/*
4561732Sbonwick 	 * Get the alignment requirement.
4571732Sbonwick 	 */
4581732Sbonwick 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
4591732Sbonwick 
4601732Sbonwick 	/*
461789Sahrens 	 * If we're a top-level vdev, try to load the allocation parameters.
462789Sahrens 	 */
463789Sahrens 	if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) {
464789Sahrens 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
465789Sahrens 		    &vd->vdev_ms_array);
466789Sahrens 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
467789Sahrens 		    &vd->vdev_ms_shift);
468789Sahrens 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
469789Sahrens 		    &vd->vdev_asize);
470789Sahrens 	}
471789Sahrens 
472789Sahrens 	/*
4734451Seschrock 	 * If we're a leaf vdev, try to load the DTL object and other state.
474789Sahrens 	 */
4756643Seschrock 	if (vd->vdev_ops->vdev_op_leaf &&
4766643Seschrock 	    (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE)) {
4776643Seschrock 		if (alloctype == VDEV_ALLOC_LOAD) {
4786643Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
4798241SJeff.Bonwick@Sun.COM 			    &vd->vdev_dtl_smo.smo_object);
4806643Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
4816643Seschrock 			    &vd->vdev_unspare);
4826643Seschrock 		}
4831732Sbonwick 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
4841732Sbonwick 		    &vd->vdev_offline);
4856643Seschrock 
4864451Seschrock 		/*
4874451Seschrock 		 * When importing a pool, we want to ignore the persistent fault
4884451Seschrock 		 * state, as the diagnosis made on another system may not be
4894451Seschrock 		 * valid in the current context.
4904451Seschrock 		 */
4914451Seschrock 		if (spa->spa_load_state == SPA_LOAD_OPEN) {
4924451Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
4934451Seschrock 			    &vd->vdev_faulted);
4944451Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
4954451Seschrock 			    &vd->vdev_degraded);
4964451Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
4974451Seschrock 			    &vd->vdev_removed);
4984451Seschrock 		}
499789Sahrens 	}
500789Sahrens 
501789Sahrens 	/*
502789Sahrens 	 * Add ourselves to the parent's list of children.
503789Sahrens 	 */
504789Sahrens 	vdev_add_child(parent, vd);
505789Sahrens 
5062082Seschrock 	*vdp = vd;
5072082Seschrock 
5082082Seschrock 	return (0);
509789Sahrens }
510789Sahrens 
511789Sahrens void
512789Sahrens vdev_free(vdev_t *vd)
513789Sahrens {
514789Sahrens 	int c;
5154451Seschrock 	spa_t *spa = vd->vdev_spa;
516789Sahrens 
517789Sahrens 	/*
518789Sahrens 	 * vdev_free() implies closing the vdev first.  This is simpler than
519789Sahrens 	 * trying to ensure complicated semantics for all callers.
520789Sahrens 	 */
521789Sahrens 	vdev_close(vd);
522789Sahrens 
5237754SJeff.Bonwick@Sun.COM 	ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
524789Sahrens 
525789Sahrens 	/*
526789Sahrens 	 * Free all children.
527789Sahrens 	 */
528789Sahrens 	for (c = 0; c < vd->vdev_children; c++)
529789Sahrens 		vdev_free(vd->vdev_child[c]);
530789Sahrens 
531789Sahrens 	ASSERT(vd->vdev_child == NULL);
532789Sahrens 	ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
533789Sahrens 
534789Sahrens 	/*
535789Sahrens 	 * Discard allocation state.
536789Sahrens 	 */
537789Sahrens 	if (vd == vd->vdev_top)
538789Sahrens 		vdev_metaslab_fini(vd);
539789Sahrens 
540789Sahrens 	ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
5412082Seschrock 	ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
542789Sahrens 	ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
543789Sahrens 
544789Sahrens 	/*
545789Sahrens 	 * Remove this vdev from its parent's child list.
546789Sahrens 	 */
547789Sahrens 	vdev_remove_child(vd->vdev_parent, vd);
548789Sahrens 
549789Sahrens 	ASSERT(vd->vdev_parent == NULL);
550789Sahrens 
5514451Seschrock 	/*
5524451Seschrock 	 * Clean up vdev structure.
5534451Seschrock 	 */
5544451Seschrock 	vdev_queue_fini(vd);
5554451Seschrock 	vdev_cache_fini(vd);
5564451Seschrock 
5574451Seschrock 	if (vd->vdev_path)
5584451Seschrock 		spa_strfree(vd->vdev_path);
5594451Seschrock 	if (vd->vdev_devid)
5604451Seschrock 		spa_strfree(vd->vdev_devid);
5614451Seschrock 	if (vd->vdev_physpath)
5624451Seschrock 		spa_strfree(vd->vdev_physpath);
5634451Seschrock 
5644451Seschrock 	if (vd->vdev_isspare)
5654451Seschrock 		spa_spare_remove(vd);
5665450Sbrendan 	if (vd->vdev_isl2cache)
5675450Sbrendan 		spa_l2cache_remove(vd);
5684451Seschrock 
5694451Seschrock 	txg_list_destroy(&vd->vdev_ms_list);
5704451Seschrock 	txg_list_destroy(&vd->vdev_dtl_list);
5718241SJeff.Bonwick@Sun.COM 
5724451Seschrock 	mutex_enter(&vd->vdev_dtl_lock);
5738241SJeff.Bonwick@Sun.COM 	for (int t = 0; t < DTL_TYPES; t++) {
5748241SJeff.Bonwick@Sun.COM 		space_map_unload(&vd->vdev_dtl[t]);
5758241SJeff.Bonwick@Sun.COM 		space_map_destroy(&vd->vdev_dtl[t]);
5768241SJeff.Bonwick@Sun.COM 	}
5774451Seschrock 	mutex_exit(&vd->vdev_dtl_lock);
5788241SJeff.Bonwick@Sun.COM 
5794451Seschrock 	mutex_destroy(&vd->vdev_dtl_lock);
5804451Seschrock 	mutex_destroy(&vd->vdev_stat_lock);
5817754SJeff.Bonwick@Sun.COM 	mutex_destroy(&vd->vdev_probe_lock);
5824451Seschrock 
5834451Seschrock 	if (vd == spa->spa_root_vdev)
5844451Seschrock 		spa->spa_root_vdev = NULL;
5854451Seschrock 
5864451Seschrock 	kmem_free(vd, sizeof (vdev_t));
587789Sahrens }
588789Sahrens 
589789Sahrens /*
590789Sahrens  * Transfer top-level vdev state from svd to tvd.
591789Sahrens  */
592789Sahrens static void
593789Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
594789Sahrens {
595789Sahrens 	spa_t *spa = svd->vdev_spa;
596789Sahrens 	metaslab_t *msp;
597789Sahrens 	vdev_t *vd;
598789Sahrens 	int t;
599789Sahrens 
600789Sahrens 	ASSERT(tvd == tvd->vdev_top);
601789Sahrens 
602789Sahrens 	tvd->vdev_ms_array = svd->vdev_ms_array;
603789Sahrens 	tvd->vdev_ms_shift = svd->vdev_ms_shift;
604789Sahrens 	tvd->vdev_ms_count = svd->vdev_ms_count;
605789Sahrens 
606789Sahrens 	svd->vdev_ms_array = 0;
607789Sahrens 	svd->vdev_ms_shift = 0;
608789Sahrens 	svd->vdev_ms_count = 0;
609789Sahrens 
610789Sahrens 	tvd->vdev_mg = svd->vdev_mg;
611789Sahrens 	tvd->vdev_ms = svd->vdev_ms;
612789Sahrens 
613789Sahrens 	svd->vdev_mg = NULL;
614789Sahrens 	svd->vdev_ms = NULL;
6151732Sbonwick 
6161732Sbonwick 	if (tvd->vdev_mg != NULL)
6171732Sbonwick 		tvd->vdev_mg->mg_vd = tvd;
618789Sahrens 
619789Sahrens 	tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
620789Sahrens 	tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
6212082Seschrock 	tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
622789Sahrens 
623789Sahrens 	svd->vdev_stat.vs_alloc = 0;
624789Sahrens 	svd->vdev_stat.vs_space = 0;
6252082Seschrock 	svd->vdev_stat.vs_dspace = 0;
626789Sahrens 
627789Sahrens 	for (t = 0; t < TXG_SIZE; t++) {
628789Sahrens 		while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
629789Sahrens 			(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
630789Sahrens 		while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
631789Sahrens 			(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
632789Sahrens 		if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
633789Sahrens 			(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
634789Sahrens 	}
635789Sahrens 
6367754SJeff.Bonwick@Sun.COM 	if (list_link_active(&svd->vdev_config_dirty_node)) {
637789Sahrens 		vdev_config_clean(svd);
638789Sahrens 		vdev_config_dirty(tvd);
639789Sahrens 	}
640789Sahrens 
6417754SJeff.Bonwick@Sun.COM 	if (list_link_active(&svd->vdev_state_dirty_node)) {
6427754SJeff.Bonwick@Sun.COM 		vdev_state_clean(svd);
6437754SJeff.Bonwick@Sun.COM 		vdev_state_dirty(tvd);
6447754SJeff.Bonwick@Sun.COM 	}
6457754SJeff.Bonwick@Sun.COM 
6462082Seschrock 	tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
6472082Seschrock 	svd->vdev_deflate_ratio = 0;
6484527Sperrin 
6494527Sperrin 	tvd->vdev_islog = svd->vdev_islog;
6504527Sperrin 	svd->vdev_islog = 0;
651789Sahrens }
652789Sahrens 
653789Sahrens static void
654789Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd)
655789Sahrens {
656789Sahrens 	int c;
657789Sahrens 
658789Sahrens 	if (vd == NULL)
659789Sahrens 		return;
660789Sahrens 
661789Sahrens 	vd->vdev_top = tvd;
662789Sahrens 
663789Sahrens 	for (c = 0; c < vd->vdev_children; c++)
664789Sahrens 		vdev_top_update(tvd, vd->vdev_child[c]);
665789Sahrens }
666789Sahrens 
667789Sahrens /*
668789Sahrens  * Add a mirror/replacing vdev above an existing vdev.
669789Sahrens  */
670789Sahrens vdev_t *
671789Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
672789Sahrens {
673789Sahrens 	spa_t *spa = cvd->vdev_spa;
674789Sahrens 	vdev_t *pvd = cvd->vdev_parent;
675789Sahrens 	vdev_t *mvd;
676789Sahrens 
6777754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
678789Sahrens 
679789Sahrens 	mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
6801732Sbonwick 
6811732Sbonwick 	mvd->vdev_asize = cvd->vdev_asize;
6821732Sbonwick 	mvd->vdev_ashift = cvd->vdev_ashift;
6831732Sbonwick 	mvd->vdev_state = cvd->vdev_state;
6841732Sbonwick 
685789Sahrens 	vdev_remove_child(pvd, cvd);
686789Sahrens 	vdev_add_child(pvd, mvd);
687789Sahrens 	cvd->vdev_id = mvd->vdev_children;
688789Sahrens 	vdev_add_child(mvd, cvd);
689789Sahrens 	vdev_top_update(cvd->vdev_top, cvd->vdev_top);
690789Sahrens 
691789Sahrens 	if (mvd == mvd->vdev_top)
692789Sahrens 		vdev_top_transfer(cvd, mvd);
693789Sahrens 
694789Sahrens 	return (mvd);
695789Sahrens }
696789Sahrens 
697789Sahrens /*
698789Sahrens  * Remove a 1-way mirror/replacing vdev from the tree.
699789Sahrens  */
700789Sahrens void
701789Sahrens vdev_remove_parent(vdev_t *cvd)
702789Sahrens {
703789Sahrens 	vdev_t *mvd = cvd->vdev_parent;
704789Sahrens 	vdev_t *pvd = mvd->vdev_parent;
705789Sahrens 
7067754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
707789Sahrens 
708789Sahrens 	ASSERT(mvd->vdev_children == 1);
709789Sahrens 	ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
7102082Seschrock 	    mvd->vdev_ops == &vdev_replacing_ops ||
7112082Seschrock 	    mvd->vdev_ops == &vdev_spare_ops);
7121732Sbonwick 	cvd->vdev_ashift = mvd->vdev_ashift;
713789Sahrens 
714789Sahrens 	vdev_remove_child(mvd, cvd);
715789Sahrens 	vdev_remove_child(pvd, mvd);
7168241SJeff.Bonwick@Sun.COM 
7177754SJeff.Bonwick@Sun.COM 	/*
7187754SJeff.Bonwick@Sun.COM 	 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
7197754SJeff.Bonwick@Sun.COM 	 * Otherwise, we could have detached an offline device, and when we
7207754SJeff.Bonwick@Sun.COM 	 * go to import the pool we'll think we have two top-level vdevs,
7217754SJeff.Bonwick@Sun.COM 	 * instead of a different version of the same top-level vdev.
7227754SJeff.Bonwick@Sun.COM 	 */
7238241SJeff.Bonwick@Sun.COM 	if (mvd->vdev_top == mvd) {
7248241SJeff.Bonwick@Sun.COM 		uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
7258241SJeff.Bonwick@Sun.COM 		cvd->vdev_guid += guid_delta;
7268241SJeff.Bonwick@Sun.COM 		cvd->vdev_guid_sum += guid_delta;
7278241SJeff.Bonwick@Sun.COM 	}
728789Sahrens 	cvd->vdev_id = mvd->vdev_id;
729789Sahrens 	vdev_add_child(pvd, cvd);
730789Sahrens 	vdev_top_update(cvd->vdev_top, cvd->vdev_top);
731789Sahrens 
732789Sahrens 	if (cvd == cvd->vdev_top)
733789Sahrens 		vdev_top_transfer(mvd, cvd);
734789Sahrens 
735789Sahrens 	ASSERT(mvd->vdev_children == 0);
736789Sahrens 	vdev_free(mvd);
737789Sahrens }
738789Sahrens 
7391544Seschrock int
740789Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg)
741789Sahrens {
742789Sahrens 	spa_t *spa = vd->vdev_spa;
7431732Sbonwick 	objset_t *mos = spa->spa_meta_objset;
7444527Sperrin 	metaslab_class_t *mc;
7451732Sbonwick 	uint64_t m;
746789Sahrens 	uint64_t oldc = vd->vdev_ms_count;
747789Sahrens 	uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
7481732Sbonwick 	metaslab_t **mspp;
7491732Sbonwick 	int error;
750789Sahrens 
7511585Sbonwick 	if (vd->vdev_ms_shift == 0)	/* not being allocated from yet */
7521585Sbonwick 		return (0);
7531585Sbonwick 
754789Sahrens 	ASSERT(oldc <= newc);
755789Sahrens 
7564527Sperrin 	if (vd->vdev_islog)
7574527Sperrin 		mc = spa->spa_log_class;
7584527Sperrin 	else
7594527Sperrin 		mc = spa->spa_normal_class;
7604527Sperrin 
7611732Sbonwick 	if (vd->vdev_mg == NULL)
7621732Sbonwick 		vd->vdev_mg = metaslab_group_create(mc, vd);
7631732Sbonwick 
7641732Sbonwick 	mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
7651732Sbonwick 
7661732Sbonwick 	if (oldc != 0) {
7671732Sbonwick 		bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
7681732Sbonwick 		kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
7691732Sbonwick 	}
7701732Sbonwick 
7711732Sbonwick 	vd->vdev_ms = mspp;
772789Sahrens 	vd->vdev_ms_count = newc;
773789Sahrens 
7741732Sbonwick 	for (m = oldc; m < newc; m++) {
7751732Sbonwick 		space_map_obj_t smo = { 0, 0, 0 };
776789Sahrens 		if (txg == 0) {
7771732Sbonwick 			uint64_t object = 0;
7781732Sbonwick 			error = dmu_read(mos, vd->vdev_ms_array,
7791732Sbonwick 			    m * sizeof (uint64_t), sizeof (uint64_t), &object);
7801732Sbonwick 			if (error)
7811732Sbonwick 				return (error);
7821732Sbonwick 			if (object != 0) {
7831732Sbonwick 				dmu_buf_t *db;
7841732Sbonwick 				error = dmu_bonus_hold(mos, object, FTAG, &db);
7851732Sbonwick 				if (error)
7861732Sbonwick 					return (error);
7874944Smaybee 				ASSERT3U(db->db_size, >=, sizeof (smo));
7884944Smaybee 				bcopy(db->db_data, &smo, sizeof (smo));
7891732Sbonwick 				ASSERT3U(smo.smo_object, ==, object);
7901544Seschrock 				dmu_buf_rele(db, FTAG);
791789Sahrens 			}
792789Sahrens 		}
7931732Sbonwick 		vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo,
7941732Sbonwick 		    m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg);
795789Sahrens 	}
796789Sahrens 
7971544Seschrock 	return (0);
798789Sahrens }
799789Sahrens 
800789Sahrens void
801789Sahrens vdev_metaslab_fini(vdev_t *vd)
802789Sahrens {
803789Sahrens 	uint64_t m;
804789Sahrens 	uint64_t count = vd->vdev_ms_count;
805789Sahrens 
806789Sahrens 	if (vd->vdev_ms != NULL) {
807789Sahrens 		for (m = 0; m < count; m++)
8081732Sbonwick 			if (vd->vdev_ms[m] != NULL)
8091732Sbonwick 				metaslab_fini(vd->vdev_ms[m]);
810789Sahrens 		kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
811789Sahrens 		vd->vdev_ms = NULL;
812789Sahrens 	}
813789Sahrens }
814789Sahrens 
8157754SJeff.Bonwick@Sun.COM typedef struct vdev_probe_stats {
8167754SJeff.Bonwick@Sun.COM 	boolean_t	vps_readable;
8177754SJeff.Bonwick@Sun.COM 	boolean_t	vps_writeable;
8187754SJeff.Bonwick@Sun.COM 	int		vps_flags;
8197754SJeff.Bonwick@Sun.COM } vdev_probe_stats_t;
8207754SJeff.Bonwick@Sun.COM 
8217754SJeff.Bonwick@Sun.COM static void
8227754SJeff.Bonwick@Sun.COM vdev_probe_done(zio_t *zio)
8235329Sgw25295 {
8248241SJeff.Bonwick@Sun.COM 	spa_t *spa = zio->io_spa;
825*8632SBill.Moore@Sun.COM 	vdev_t *vd = zio->io_vd;
8267754SJeff.Bonwick@Sun.COM 	vdev_probe_stats_t *vps = zio->io_private;
827*8632SBill.Moore@Sun.COM 
828*8632SBill.Moore@Sun.COM 	ASSERT(vd->vdev_probe_zio != NULL);
8297754SJeff.Bonwick@Sun.COM 
8307754SJeff.Bonwick@Sun.COM 	if (zio->io_type == ZIO_TYPE_READ) {
8317754SJeff.Bonwick@Sun.COM 		if (zio->io_error == 0)
8327754SJeff.Bonwick@Sun.COM 			vps->vps_readable = 1;
8338241SJeff.Bonwick@Sun.COM 		if (zio->io_error == 0 && spa_writeable(spa)) {
834*8632SBill.Moore@Sun.COM 			zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
8357754SJeff.Bonwick@Sun.COM 			    zio->io_offset, zio->io_size, zio->io_data,
8367754SJeff.Bonwick@Sun.COM 			    ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
8377754SJeff.Bonwick@Sun.COM 			    ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
8387754SJeff.Bonwick@Sun.COM 		} else {
8397754SJeff.Bonwick@Sun.COM 			zio_buf_free(zio->io_data, zio->io_size);
8407754SJeff.Bonwick@Sun.COM 		}
8417754SJeff.Bonwick@Sun.COM 	} else if (zio->io_type == ZIO_TYPE_WRITE) {
8427754SJeff.Bonwick@Sun.COM 		if (zio->io_error == 0)
8437754SJeff.Bonwick@Sun.COM 			vps->vps_writeable = 1;
8447754SJeff.Bonwick@Sun.COM 		zio_buf_free(zio->io_data, zio->io_size);
8457754SJeff.Bonwick@Sun.COM 	} else if (zio->io_type == ZIO_TYPE_NULL) {
846*8632SBill.Moore@Sun.COM 		zio_t *pio;
8477754SJeff.Bonwick@Sun.COM 
8487754SJeff.Bonwick@Sun.COM 		vd->vdev_cant_read |= !vps->vps_readable;
8497754SJeff.Bonwick@Sun.COM 		vd->vdev_cant_write |= !vps->vps_writeable;
8507754SJeff.Bonwick@Sun.COM 
8517754SJeff.Bonwick@Sun.COM 		if (vdev_readable(vd) &&
8528241SJeff.Bonwick@Sun.COM 		    (vdev_writeable(vd) || !spa_writeable(spa))) {
8537754SJeff.Bonwick@Sun.COM 			zio->io_error = 0;
8547754SJeff.Bonwick@Sun.COM 		} else {
8557754SJeff.Bonwick@Sun.COM 			ASSERT(zio->io_error != 0);
8567754SJeff.Bonwick@Sun.COM 			zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
8578241SJeff.Bonwick@Sun.COM 			    spa, vd, NULL, 0, 0);
8587754SJeff.Bonwick@Sun.COM 			zio->io_error = ENXIO;
8597754SJeff.Bonwick@Sun.COM 		}
860*8632SBill.Moore@Sun.COM 
861*8632SBill.Moore@Sun.COM 		mutex_enter(&vd->vdev_probe_lock);
862*8632SBill.Moore@Sun.COM 		ASSERT(vd->vdev_probe_zio == zio);
863*8632SBill.Moore@Sun.COM 		vd->vdev_probe_zio = NULL;
864*8632SBill.Moore@Sun.COM 		mutex_exit(&vd->vdev_probe_lock);
865*8632SBill.Moore@Sun.COM 
866*8632SBill.Moore@Sun.COM 		while ((pio = zio_walk_parents(zio)) != NULL)
867*8632SBill.Moore@Sun.COM 			if (!vdev_accessible(vd, pio))
868*8632SBill.Moore@Sun.COM 				pio->io_error = ENXIO;
869*8632SBill.Moore@Sun.COM 
8707754SJeff.Bonwick@Sun.COM 		kmem_free(vps, sizeof (*vps));
8717754SJeff.Bonwick@Sun.COM 	}
8727754SJeff.Bonwick@Sun.COM }
8735329Sgw25295 
8747754SJeff.Bonwick@Sun.COM /*
8757754SJeff.Bonwick@Sun.COM  * Determine whether this device is accessible by reading and writing
8767754SJeff.Bonwick@Sun.COM  * to several known locations: the pad regions of each vdev label
8777754SJeff.Bonwick@Sun.COM  * but the first (which we leave alone in case it contains a VTOC).
8787754SJeff.Bonwick@Sun.COM  */
8797754SJeff.Bonwick@Sun.COM zio_t *
880*8632SBill.Moore@Sun.COM vdev_probe(vdev_t *vd, zio_t *zio)
8817754SJeff.Bonwick@Sun.COM {
8827754SJeff.Bonwick@Sun.COM 	spa_t *spa = vd->vdev_spa;
883*8632SBill.Moore@Sun.COM 	vdev_probe_stats_t *vps = NULL;
884*8632SBill.Moore@Sun.COM 	zio_t *pio;
8857754SJeff.Bonwick@Sun.COM 
8867754SJeff.Bonwick@Sun.COM 	ASSERT(vd->vdev_ops->vdev_op_leaf);
8877754SJeff.Bonwick@Sun.COM 
888*8632SBill.Moore@Sun.COM 	/*
889*8632SBill.Moore@Sun.COM 	 * Don't probe the probe.
890*8632SBill.Moore@Sun.COM 	 */
891*8632SBill.Moore@Sun.COM 	if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
892*8632SBill.Moore@Sun.COM 		return (NULL);
893*8632SBill.Moore@Sun.COM 
894*8632SBill.Moore@Sun.COM 	/*
895*8632SBill.Moore@Sun.COM 	 * To prevent 'probe storms' when a device fails, we create
896*8632SBill.Moore@Sun.COM 	 * just one probe i/o at a time.  All zios that want to probe
897*8632SBill.Moore@Sun.COM 	 * this vdev will become parents of the probe io.
898*8632SBill.Moore@Sun.COM 	 */
899*8632SBill.Moore@Sun.COM 	mutex_enter(&vd->vdev_probe_lock);
900*8632SBill.Moore@Sun.COM 
901*8632SBill.Moore@Sun.COM 	if ((pio = vd->vdev_probe_zio) == NULL) {
902*8632SBill.Moore@Sun.COM 		vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
903*8632SBill.Moore@Sun.COM 
904*8632SBill.Moore@Sun.COM 		vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
905*8632SBill.Moore@Sun.COM 		    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
906*8632SBill.Moore@Sun.COM 		    ZIO_FLAG_DONT_RETRY;
907*8632SBill.Moore@Sun.COM 
908*8632SBill.Moore@Sun.COM 		if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
909*8632SBill.Moore@Sun.COM 			/*
910*8632SBill.Moore@Sun.COM 			 * vdev_cant_read and vdev_cant_write can only
911*8632SBill.Moore@Sun.COM 			 * transition from TRUE to FALSE when we have the
912*8632SBill.Moore@Sun.COM 			 * SCL_ZIO lock as writer; otherwise they can only
913*8632SBill.Moore@Sun.COM 			 * transition from FALSE to TRUE.  This ensures that
914*8632SBill.Moore@Sun.COM 			 * any zio looking at these values can assume that
915*8632SBill.Moore@Sun.COM 			 * failures persist for the life of the I/O.  That's
916*8632SBill.Moore@Sun.COM 			 * important because when a device has intermittent
917*8632SBill.Moore@Sun.COM 			 * connectivity problems, we want to ensure that
918*8632SBill.Moore@Sun.COM 			 * they're ascribed to the device (ENXIO) and not
919*8632SBill.Moore@Sun.COM 			 * the zio (EIO).
920*8632SBill.Moore@Sun.COM 			 *
921*8632SBill.Moore@Sun.COM 			 * Since we hold SCL_ZIO as writer here, clear both
922*8632SBill.Moore@Sun.COM 			 * values so the probe can reevaluate from first
923*8632SBill.Moore@Sun.COM 			 * principles.
924*8632SBill.Moore@Sun.COM 			 */
925*8632SBill.Moore@Sun.COM 			vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
926*8632SBill.Moore@Sun.COM 			vd->vdev_cant_read = B_FALSE;
927*8632SBill.Moore@Sun.COM 			vd->vdev_cant_write = B_FALSE;
928*8632SBill.Moore@Sun.COM 		}
929*8632SBill.Moore@Sun.COM 
930*8632SBill.Moore@Sun.COM 		vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
931*8632SBill.Moore@Sun.COM 		    vdev_probe_done, vps,
932*8632SBill.Moore@Sun.COM 		    vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
933*8632SBill.Moore@Sun.COM 
934*8632SBill.Moore@Sun.COM 		if (zio != NULL) {
935*8632SBill.Moore@Sun.COM 			vd->vdev_probe_wanted = B_TRUE;
936*8632SBill.Moore@Sun.COM 			spa_async_request(spa, SPA_ASYNC_PROBE);
937*8632SBill.Moore@Sun.COM 		}
938*8632SBill.Moore@Sun.COM 	}
939*8632SBill.Moore@Sun.COM 
940*8632SBill.Moore@Sun.COM 	if (zio != NULL)
941*8632SBill.Moore@Sun.COM 		zio_add_child(zio, pio);
942*8632SBill.Moore@Sun.COM 
943*8632SBill.Moore@Sun.COM 	mutex_exit(&vd->vdev_probe_lock);
944*8632SBill.Moore@Sun.COM 
945*8632SBill.Moore@Sun.COM 	if (vps == NULL) {
946*8632SBill.Moore@Sun.COM 		ASSERT(zio != NULL);
947*8632SBill.Moore@Sun.COM 		return (NULL);
948*8632SBill.Moore@Sun.COM 	}
9497754SJeff.Bonwick@Sun.COM 
9507754SJeff.Bonwick@Sun.COM 	for (int l = 1; l < VDEV_LABELS; l++) {
951*8632SBill.Moore@Sun.COM 		zio_nowait(zio_read_phys(pio, vd,
9527754SJeff.Bonwick@Sun.COM 		    vdev_label_offset(vd->vdev_psize, l,
9537754SJeff.Bonwick@Sun.COM 		    offsetof(vdev_label_t, vl_pad)),
9547754SJeff.Bonwick@Sun.COM 		    VDEV_SKIP_SIZE, zio_buf_alloc(VDEV_SKIP_SIZE),
9557754SJeff.Bonwick@Sun.COM 		    ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
9567754SJeff.Bonwick@Sun.COM 		    ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
9577754SJeff.Bonwick@Sun.COM 	}
9587754SJeff.Bonwick@Sun.COM 
959*8632SBill.Moore@Sun.COM 	if (zio == NULL)
960*8632SBill.Moore@Sun.COM 		return (pio);
961*8632SBill.Moore@Sun.COM 
962*8632SBill.Moore@Sun.COM 	zio_nowait(pio);
963*8632SBill.Moore@Sun.COM 	return (NULL);
9645329Sgw25295 }
9655329Sgw25295 
966789Sahrens /*
967789Sahrens  * Prepare a virtual device for access.
968789Sahrens  */
969789Sahrens int
970789Sahrens vdev_open(vdev_t *vd)
971789Sahrens {
9728241SJeff.Bonwick@Sun.COM 	spa_t *spa = vd->vdev_spa;
973789Sahrens 	int error;
974789Sahrens 	int c;
975789Sahrens 	uint64_t osize = 0;
976789Sahrens 	uint64_t asize, psize;
9771732Sbonwick 	uint64_t ashift = 0;
978789Sahrens 
9798241SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
9808241SJeff.Bonwick@Sun.COM 
981789Sahrens 	ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
982789Sahrens 	    vd->vdev_state == VDEV_STATE_CANT_OPEN ||
983789Sahrens 	    vd->vdev_state == VDEV_STATE_OFFLINE);
984789Sahrens 
985789Sahrens 	vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
986789Sahrens 
9874451Seschrock 	if (!vd->vdev_removed && vd->vdev_faulted) {
9884451Seschrock 		ASSERT(vd->vdev_children == 0);
9894451Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
9904451Seschrock 		    VDEV_AUX_ERR_EXCEEDED);
9914451Seschrock 		return (ENXIO);
9924451Seschrock 	} else if (vd->vdev_offline) {
993789Sahrens 		ASSERT(vd->vdev_children == 0);
9941544Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
995789Sahrens 		return (ENXIO);
996789Sahrens 	}
997789Sahrens 
998789Sahrens 	error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
999789Sahrens 
10001544Seschrock 	if (zio_injection_enabled && error == 0)
10011544Seschrock 		error = zio_handle_device_injection(vd, ENXIO);
10021544Seschrock 
10034451Seschrock 	if (error) {
10044451Seschrock 		if (vd->vdev_removed &&
10054451Seschrock 		    vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
10064451Seschrock 			vd->vdev_removed = B_FALSE;
1007789Sahrens 
10081544Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1009789Sahrens 		    vd->vdev_stat.vs_aux);
1010789Sahrens 		return (error);
1011789Sahrens 	}
1012789Sahrens 
10134451Seschrock 	vd->vdev_removed = B_FALSE;
10144451Seschrock 
10154451Seschrock 	if (vd->vdev_degraded) {
10164451Seschrock 		ASSERT(vd->vdev_children == 0);
10174451Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
10184451Seschrock 		    VDEV_AUX_ERR_EXCEEDED);
10194451Seschrock 	} else {
10204451Seschrock 		vd->vdev_state = VDEV_STATE_HEALTHY;
10214451Seschrock 	}
1022789Sahrens 
1023789Sahrens 	for (c = 0; c < vd->vdev_children; c++)
10241544Seschrock 		if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
10251544Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
10261544Seschrock 			    VDEV_AUX_NONE);
10271544Seschrock 			break;
10281544Seschrock 		}
1029789Sahrens 
1030789Sahrens 	osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1031789Sahrens 
1032789Sahrens 	if (vd->vdev_children == 0) {
1033789Sahrens 		if (osize < SPA_MINDEVSIZE) {
10341544Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
10351544Seschrock 			    VDEV_AUX_TOO_SMALL);
1036789Sahrens 			return (EOVERFLOW);
1037789Sahrens 		}
1038789Sahrens 		psize = osize;
1039789Sahrens 		asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1040789Sahrens 	} else {
10411732Sbonwick 		if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1042789Sahrens 		    (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
10431544Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
10441544Seschrock 			    VDEV_AUX_TOO_SMALL);
1045789Sahrens 			return (EOVERFLOW);
1046789Sahrens 		}
1047789Sahrens 		psize = 0;
1048789Sahrens 		asize = osize;
1049789Sahrens 	}
1050789Sahrens 
1051789Sahrens 	vd->vdev_psize = psize;
1052789Sahrens 
1053789Sahrens 	if (vd->vdev_asize == 0) {
1054789Sahrens 		/*
1055789Sahrens 		 * This is the first-ever open, so use the computed values.
10561732Sbonwick 		 * For testing purposes, a higher ashift can be requested.
1057789Sahrens 		 */
1058789Sahrens 		vd->vdev_asize = asize;
10591732Sbonwick 		vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
1060789Sahrens 	} else {
1061789Sahrens 		/*
1062789Sahrens 		 * Make sure the alignment requirement hasn't increased.
1063789Sahrens 		 */
10641732Sbonwick 		if (ashift > vd->vdev_top->vdev_ashift) {
10651544Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
10661544Seschrock 			    VDEV_AUX_BAD_LABEL);
1067789Sahrens 			return (EINVAL);
1068789Sahrens 		}
1069789Sahrens 
1070789Sahrens 		/*
1071789Sahrens 		 * Make sure the device hasn't shrunk.
1072789Sahrens 		 */
1073789Sahrens 		if (asize < vd->vdev_asize) {
10741544Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
10751544Seschrock 			    VDEV_AUX_BAD_LABEL);
1076789Sahrens 			return (EINVAL);
1077789Sahrens 		}
1078789Sahrens 
1079789Sahrens 		/*
1080789Sahrens 		 * If all children are healthy and the asize has increased,
1081789Sahrens 		 * then we've experienced dynamic LUN growth.
1082789Sahrens 		 */
1083789Sahrens 		if (vd->vdev_state == VDEV_STATE_HEALTHY &&
1084789Sahrens 		    asize > vd->vdev_asize) {
1085789Sahrens 			vd->vdev_asize = asize;
1086789Sahrens 		}
1087789Sahrens 	}
1088789Sahrens 
10891544Seschrock 	/*
10905329Sgw25295 	 * Ensure we can issue some IO before declaring the
10915329Sgw25295 	 * vdev open for business.
10925329Sgw25295 	 */
10937754SJeff.Bonwick@Sun.COM 	if (vd->vdev_ops->vdev_op_leaf &&
10947754SJeff.Bonwick@Sun.COM 	    (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
10955329Sgw25295 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
10967754SJeff.Bonwick@Sun.COM 		    VDEV_AUX_IO_FAILURE);
10975329Sgw25295 		return (error);
10985329Sgw25295 	}
10995329Sgw25295 
11005329Sgw25295 	/*
11012082Seschrock 	 * If this is a top-level vdev, compute the raidz-deflation
11022082Seschrock 	 * ratio.  Note, we hard-code in 128k (1<<17) because it is the
11032082Seschrock 	 * current "typical" blocksize.  Even if SPA_MAXBLOCKSIZE
11042082Seschrock 	 * changes, this algorithm must never change, or we will
11052082Seschrock 	 * inconsistently account for existing bp's.
11062082Seschrock 	 */
11072082Seschrock 	if (vd->vdev_top == vd) {
11082082Seschrock 		vd->vdev_deflate_ratio = (1<<17) /
11092082Seschrock 		    (vdev_psize_to_asize(vd, 1<<17) >> SPA_MINBLOCKSHIFT);
11102082Seschrock 	}
11112082Seschrock 
11127046Sahrens 	/*
11137046Sahrens 	 * If a leaf vdev has a DTL, and seems healthy, then kick off a
11148241SJeff.Bonwick@Sun.COM 	 * resilver.  But don't do this if we are doing a reopen for a scrub,
11158241SJeff.Bonwick@Sun.COM 	 * since this would just restart the scrub we are already doing.
11167046Sahrens 	 */
11178241SJeff.Bonwick@Sun.COM 	if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
11188241SJeff.Bonwick@Sun.COM 	    vdev_resilver_needed(vd, NULL, NULL))
11198241SJeff.Bonwick@Sun.COM 		spa_async_request(spa, SPA_ASYNC_RESILVER);
11207046Sahrens 
1121789Sahrens 	return (0);
1122789Sahrens }
1123789Sahrens 
1124789Sahrens /*
11251986Seschrock  * Called once the vdevs are all opened, this routine validates the label
11261986Seschrock  * contents.  This needs to be done before vdev_load() so that we don't
11274451Seschrock  * inadvertently do repair I/Os to the wrong device.
11281986Seschrock  *
11291986Seschrock  * This function will only return failure if one of the vdevs indicates that it
11301986Seschrock  * has since been destroyed or exported.  This is only possible if
11311986Seschrock  * /etc/zfs/zpool.cache was readonly at the time.  Otherwise, the vdev state
11321986Seschrock  * will be updated but the function will return 0.
11331986Seschrock  */
11341986Seschrock int
11351986Seschrock vdev_validate(vdev_t *vd)
11361986Seschrock {
11371986Seschrock 	spa_t *spa = vd->vdev_spa;
11381986Seschrock 	int c;
11391986Seschrock 	nvlist_t *label;
11407754SJeff.Bonwick@Sun.COM 	uint64_t guid, top_guid;
11411986Seschrock 	uint64_t state;
11421986Seschrock 
11431986Seschrock 	for (c = 0; c < vd->vdev_children; c++)
11441986Seschrock 		if (vdev_validate(vd->vdev_child[c]) != 0)
11454070Smc142369 			return (EBADF);
11461986Seschrock 
11472174Seschrock 	/*
11482174Seschrock 	 * If the device has already failed, or was marked offline, don't do
11492174Seschrock 	 * any further validation.  Otherwise, label I/O will fail and we will
11502174Seschrock 	 * overwrite the previous state.
11512174Seschrock 	 */
11527754SJeff.Bonwick@Sun.COM 	if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
11531986Seschrock 
11541986Seschrock 		if ((label = vdev_label_read_config(vd)) == NULL) {
11551986Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
11561986Seschrock 			    VDEV_AUX_BAD_LABEL);
11571986Seschrock 			return (0);
11581986Seschrock 		}
11591986Seschrock 
11601986Seschrock 		if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
11611986Seschrock 		    &guid) != 0 || guid != spa_guid(spa)) {
11621986Seschrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
11631986Seschrock 			    VDEV_AUX_CORRUPT_DATA);
11641986Seschrock 			nvlist_free(label);
11651986Seschrock 			return (0);
11661986Seschrock 		}
11671986Seschrock 
11687754SJeff.Bonwick@Sun.COM 		/*
11697754SJeff.Bonwick@Sun.COM 		 * If this vdev just became a top-level vdev because its
11707754SJeff.Bonwick@Sun.COM 		 * sibling was detached, it will have adopted the parent's
11717754SJeff.Bonwick@Sun.COM 		 * vdev guid -- but the label may or may not be on disk yet.
11727754SJeff.Bonwick@Sun.COM 		 * Fortunately, either version of the label will have the
11737754SJeff.Bonwick@Sun.COM 		 * same top guid, so if we're a top-level vdev, we can
11747754SJeff.Bonwick@Sun.COM 		 * safely compare to that instead.
11757754SJeff.Bonwick@Sun.COM 		 */
11761986Seschrock 		if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
11777754SJeff.Bonwick@Sun.COM 		    &guid) != 0 ||
11787754SJeff.Bonwick@Sun.COM 		    nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
11797754SJeff.Bonwick@Sun.COM 		    &top_guid) != 0 ||
11807754SJeff.Bonwick@Sun.COM 		    (vd->vdev_guid != guid &&
11817754SJeff.Bonwick@Sun.COM 		    (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
11821986Seschrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
11831986Seschrock 			    VDEV_AUX_CORRUPT_DATA);
11841986Seschrock 			nvlist_free(label);
11851986Seschrock 			return (0);
11861986Seschrock 		}
11871986Seschrock 
11881986Seschrock 		if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
11891986Seschrock 		    &state) != 0) {
11901986Seschrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
11911986Seschrock 			    VDEV_AUX_CORRUPT_DATA);
11921986Seschrock 			nvlist_free(label);
11931986Seschrock 			return (0);
11941986Seschrock 		}
11951986Seschrock 
11961986Seschrock 		nvlist_free(label);
11971986Seschrock 
11981986Seschrock 		if (spa->spa_load_state == SPA_LOAD_OPEN &&
11991986Seschrock 		    state != POOL_STATE_ACTIVE)
12004070Smc142369 			return (EBADF);
12016976Seschrock 
12026976Seschrock 		/*
12036976Seschrock 		 * If we were able to open and validate a vdev that was
12046976Seschrock 		 * previously marked permanently unavailable, clear that state
12056976Seschrock 		 * now.
12066976Seschrock 		 */
12076976Seschrock 		if (vd->vdev_not_present)
12086976Seschrock 			vd->vdev_not_present = 0;
12091986Seschrock 	}
12101986Seschrock 
12111986Seschrock 	return (0);
12121986Seschrock }
12131986Seschrock 
12141986Seschrock /*
1215789Sahrens  * Close a virtual device.
1216789Sahrens  */
1217789Sahrens void
1218789Sahrens vdev_close(vdev_t *vd)
1219789Sahrens {
12208241SJeff.Bonwick@Sun.COM 	spa_t *spa = vd->vdev_spa;
12218241SJeff.Bonwick@Sun.COM 
12228241SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
12238241SJeff.Bonwick@Sun.COM 
1224789Sahrens 	vd->vdev_ops->vdev_op_close(vd);
1225789Sahrens 
12264451Seschrock 	vdev_cache_purge(vd);
1227789Sahrens 
12281986Seschrock 	/*
12291986Seschrock 	 * We record the previous state before we close it, so  that if we are
12301986Seschrock 	 * doing a reopen(), we don't generate FMA ereports if we notice that
12311986Seschrock 	 * it's still faulted.
12321986Seschrock 	 */
12331986Seschrock 	vd->vdev_prevstate = vd->vdev_state;
12341986Seschrock 
1235789Sahrens 	if (vd->vdev_offline)
1236789Sahrens 		vd->vdev_state = VDEV_STATE_OFFLINE;
1237789Sahrens 	else
1238789Sahrens 		vd->vdev_state = VDEV_STATE_CLOSED;
12391544Seschrock 	vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1240789Sahrens }
1241789Sahrens 
1242789Sahrens void
12431544Seschrock vdev_reopen(vdev_t *vd)
1244789Sahrens {
12451544Seschrock 	spa_t *spa = vd->vdev_spa;
1246789Sahrens 
12477754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
12481544Seschrock 
1249789Sahrens 	vdev_close(vd);
1250789Sahrens 	(void) vdev_open(vd);
1251789Sahrens 
1252789Sahrens 	/*
12533377Seschrock 	 * Call vdev_validate() here to make sure we have the same device.
12543377Seschrock 	 * Otherwise, a device with an invalid label could be successfully
12553377Seschrock 	 * opened in response to vdev_reopen().
12563377Seschrock 	 */
12576643Seschrock 	if (vd->vdev_aux) {
12586643Seschrock 		(void) vdev_validate_aux(vd);
12597754SJeff.Bonwick@Sun.COM 		if (vdev_readable(vd) && vdev_writeable(vd) &&
12606643Seschrock 		    !l2arc_vdev_present(vd)) {
12616643Seschrock 			uint64_t size = vdev_get_rsize(vd);
12626643Seschrock 			l2arc_add_vdev(spa, vd,
12636643Seschrock 			    VDEV_LABEL_START_SIZE,
12646643Seschrock 			    size - VDEV_LABEL_START_SIZE);
12656643Seschrock 		}
12666643Seschrock 	} else {
12676643Seschrock 		(void) vdev_validate(vd);
12686643Seschrock 	}
12693377Seschrock 
12703377Seschrock 	/*
12714451Seschrock 	 * Reassess parent vdev's health.
1272789Sahrens 	 */
12734451Seschrock 	vdev_propagate_state(vd);
1274789Sahrens }
1275789Sahrens 
1276789Sahrens int
12772082Seschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1278789Sahrens {
1279789Sahrens 	int error;
1280789Sahrens 
1281789Sahrens 	/*
1282789Sahrens 	 * Normally, partial opens (e.g. of a mirror) are allowed.
1283789Sahrens 	 * For a create, however, we want to fail the request if
1284789Sahrens 	 * there are any components we can't open.
1285789Sahrens 	 */
1286789Sahrens 	error = vdev_open(vd);
1287789Sahrens 
1288789Sahrens 	if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1289789Sahrens 		vdev_close(vd);
1290789Sahrens 		return (error ? error : ENXIO);
1291789Sahrens 	}
1292789Sahrens 
1293789Sahrens 	/*
1294789Sahrens 	 * Recursively initialize all labels.
1295789Sahrens 	 */
12963377Seschrock 	if ((error = vdev_label_init(vd, txg, isreplacing ?
12973377Seschrock 	    VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1298789Sahrens 		vdev_close(vd);
1299789Sahrens 		return (error);
1300789Sahrens 	}
1301789Sahrens 
1302789Sahrens 	return (0);
1303789Sahrens }
1304789Sahrens 
1305789Sahrens /*
1306789Sahrens  * The is the latter half of vdev_create().  It is distinct because it
1307789Sahrens  * involves initiating transactions in order to do metaslab creation.
1308789Sahrens  * For creation, we want to try to create all vdevs at once and then undo it
1309789Sahrens  * if anything fails; this is much harder if we have pending transactions.
1310789Sahrens  */
13111585Sbonwick void
1312789Sahrens vdev_init(vdev_t *vd, uint64_t txg)
1313789Sahrens {
1314789Sahrens 	/*
1315789Sahrens 	 * Aim for roughly 200 metaslabs per vdev.
1316789Sahrens 	 */
1317789Sahrens 	vd->vdev_ms_shift = highbit(vd->vdev_asize / 200);
1318789Sahrens 	vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1319789Sahrens 
1320789Sahrens 	/*
13211585Sbonwick 	 * Initialize the vdev's metaslabs.  This can't fail because
13221585Sbonwick 	 * there's nothing to read when creating all new metaslabs.
1323789Sahrens 	 */
13241585Sbonwick 	VERIFY(vdev_metaslab_init(vd, txg) == 0);
1325789Sahrens }
1326789Sahrens 
1327789Sahrens void
13281732Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1329789Sahrens {
13301732Sbonwick 	ASSERT(vd == vd->vdev_top);
13311732Sbonwick 	ASSERT(ISP2(flags));
1332789Sahrens 
13331732Sbonwick 	if (flags & VDD_METASLAB)
13341732Sbonwick 		(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
13351732Sbonwick 
13361732Sbonwick 	if (flags & VDD_DTL)
13371732Sbonwick 		(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
13381732Sbonwick 
13391732Sbonwick 	(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1340789Sahrens }
1341789Sahrens 
13428241SJeff.Bonwick@Sun.COM /*
13438241SJeff.Bonwick@Sun.COM  * DTLs.
13448241SJeff.Bonwick@Sun.COM  *
13458241SJeff.Bonwick@Sun.COM  * A vdev's DTL (dirty time log) is the set of transaction groups for which
13468241SJeff.Bonwick@Sun.COM  * the vdev has less than perfect replication.  There are three kinds of DTL:
13478241SJeff.Bonwick@Sun.COM  *
13488241SJeff.Bonwick@Sun.COM  * DTL_MISSING: txgs for which the vdev has no valid copies of the data
13498241SJeff.Bonwick@Sun.COM  *
13508241SJeff.Bonwick@Sun.COM  * DTL_PARTIAL: txgs for which data is available, but not fully replicated
13518241SJeff.Bonwick@Sun.COM  *
13528241SJeff.Bonwick@Sun.COM  * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
13538241SJeff.Bonwick@Sun.COM  *	scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
13548241SJeff.Bonwick@Sun.COM  *	txgs that was scrubbed.
13558241SJeff.Bonwick@Sun.COM  *
13568241SJeff.Bonwick@Sun.COM  * DTL_OUTAGE: txgs which cannot currently be read, whether due to
13578241SJeff.Bonwick@Sun.COM  *	persistent errors or just some device being offline.
13588241SJeff.Bonwick@Sun.COM  *	Unlike the other three, the DTL_OUTAGE map is not generally
13598241SJeff.Bonwick@Sun.COM  *	maintained; it's only computed when needed, typically to
13608241SJeff.Bonwick@Sun.COM  *	determine whether a device can be detached.
13618241SJeff.Bonwick@Sun.COM  *
13628241SJeff.Bonwick@Sun.COM  * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
13638241SJeff.Bonwick@Sun.COM  * either has the data or it doesn't.
13648241SJeff.Bonwick@Sun.COM  *
13658241SJeff.Bonwick@Sun.COM  * For interior vdevs such as mirror and RAID-Z the picture is more complex.
13668241SJeff.Bonwick@Sun.COM  * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
13678241SJeff.Bonwick@Sun.COM  * if any child is less than fully replicated, then so is its parent.
13688241SJeff.Bonwick@Sun.COM  * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
13698241SJeff.Bonwick@Sun.COM  * comprising only those txgs which appear in 'maxfaults' or more children;
13708241SJeff.Bonwick@Sun.COM  * those are the txgs we don't have enough replication to read.  For example,
13718241SJeff.Bonwick@Sun.COM  * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
13728241SJeff.Bonwick@Sun.COM  * thus, its DTL_MISSING consists of the set of txgs that appear in more than
13738241SJeff.Bonwick@Sun.COM  * two child DTL_MISSING maps.
13748241SJeff.Bonwick@Sun.COM  *
13758241SJeff.Bonwick@Sun.COM  * It should be clear from the above that to compute the DTLs and outage maps
13768241SJeff.Bonwick@Sun.COM  * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
13778241SJeff.Bonwick@Sun.COM  * Therefore, that is all we keep on disk.  When loading the pool, or after
13788241SJeff.Bonwick@Sun.COM  * a configuration change, we generate all other DTLs from first principles.
13798241SJeff.Bonwick@Sun.COM  */
1380789Sahrens void
13818241SJeff.Bonwick@Sun.COM vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1382789Sahrens {
13838241SJeff.Bonwick@Sun.COM 	space_map_t *sm = &vd->vdev_dtl[t];
13848241SJeff.Bonwick@Sun.COM 
13858241SJeff.Bonwick@Sun.COM 	ASSERT(t < DTL_TYPES);
13868241SJeff.Bonwick@Sun.COM 	ASSERT(vd != vd->vdev_spa->spa_root_vdev);
13878241SJeff.Bonwick@Sun.COM 
1388789Sahrens 	mutex_enter(sm->sm_lock);
1389789Sahrens 	if (!space_map_contains(sm, txg, size))
1390789Sahrens 		space_map_add(sm, txg, size);
1391789Sahrens 	mutex_exit(sm->sm_lock);
1392789Sahrens }
1393789Sahrens 
13948241SJeff.Bonwick@Sun.COM boolean_t
13958241SJeff.Bonwick@Sun.COM vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1396789Sahrens {
13978241SJeff.Bonwick@Sun.COM 	space_map_t *sm = &vd->vdev_dtl[t];
13988241SJeff.Bonwick@Sun.COM 	boolean_t dirty = B_FALSE;
13998241SJeff.Bonwick@Sun.COM 
14008241SJeff.Bonwick@Sun.COM 	ASSERT(t < DTL_TYPES);
14018241SJeff.Bonwick@Sun.COM 	ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1402789Sahrens 
1403789Sahrens 	mutex_enter(sm->sm_lock);
14048241SJeff.Bonwick@Sun.COM 	if (sm->sm_space != 0)
14058241SJeff.Bonwick@Sun.COM 		dirty = space_map_contains(sm, txg, size);
1406789Sahrens 	mutex_exit(sm->sm_lock);
1407789Sahrens 
1408789Sahrens 	return (dirty);
1409789Sahrens }
1410789Sahrens 
14118241SJeff.Bonwick@Sun.COM boolean_t
14128241SJeff.Bonwick@Sun.COM vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
14138241SJeff.Bonwick@Sun.COM {
14148241SJeff.Bonwick@Sun.COM 	space_map_t *sm = &vd->vdev_dtl[t];
14158241SJeff.Bonwick@Sun.COM 	boolean_t empty;
14168241SJeff.Bonwick@Sun.COM 
14178241SJeff.Bonwick@Sun.COM 	mutex_enter(sm->sm_lock);
14188241SJeff.Bonwick@Sun.COM 	empty = (sm->sm_space == 0);
14198241SJeff.Bonwick@Sun.COM 	mutex_exit(sm->sm_lock);
14208241SJeff.Bonwick@Sun.COM 
14218241SJeff.Bonwick@Sun.COM 	return (empty);
14228241SJeff.Bonwick@Sun.COM }
14238241SJeff.Bonwick@Sun.COM 
1424789Sahrens /*
1425789Sahrens  * Reassess DTLs after a config change or scrub completion.
1426789Sahrens  */
1427789Sahrens void
1428789Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1429789Sahrens {
14301544Seschrock 	spa_t *spa = vd->vdev_spa;
14318241SJeff.Bonwick@Sun.COM 	avl_tree_t reftree;
14328241SJeff.Bonwick@Sun.COM 	int minref;
14338241SJeff.Bonwick@Sun.COM 
14348241SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
14358241SJeff.Bonwick@Sun.COM 
14368241SJeff.Bonwick@Sun.COM 	for (int c = 0; c < vd->vdev_children; c++)
14378241SJeff.Bonwick@Sun.COM 		vdev_dtl_reassess(vd->vdev_child[c], txg,
14388241SJeff.Bonwick@Sun.COM 		    scrub_txg, scrub_done);
14398241SJeff.Bonwick@Sun.COM 
14408241SJeff.Bonwick@Sun.COM 	if (vd == spa->spa_root_vdev)
14418241SJeff.Bonwick@Sun.COM 		return;
14428241SJeff.Bonwick@Sun.COM 
14438241SJeff.Bonwick@Sun.COM 	if (vd->vdev_ops->vdev_op_leaf) {
1444789Sahrens 		mutex_enter(&vd->vdev_dtl_lock);
14457046Sahrens 		if (scrub_txg != 0 &&
14467046Sahrens 		    (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) {
14477046Sahrens 			/* XXX should check scrub_done? */
14487046Sahrens 			/*
14497046Sahrens 			 * We completed a scrub up to scrub_txg.  If we
14507046Sahrens 			 * did it without rebooting, then the scrub dtl
14517046Sahrens 			 * will be valid, so excise the old region and
14527046Sahrens 			 * fold in the scrub dtl.  Otherwise, leave the
14537046Sahrens 			 * dtl as-is if there was an error.
14548241SJeff.Bonwick@Sun.COM 			 *
14558241SJeff.Bonwick@Sun.COM 			 * There's little trick here: to excise the beginning
14568241SJeff.Bonwick@Sun.COM 			 * of the DTL_MISSING map, we put it into a reference
14578241SJeff.Bonwick@Sun.COM 			 * tree and then add a segment with refcnt -1 that
14588241SJeff.Bonwick@Sun.COM 			 * covers the range [0, scrub_txg).  This means
14598241SJeff.Bonwick@Sun.COM 			 * that each txg in that range has refcnt -1 or 0.
14608241SJeff.Bonwick@Sun.COM 			 * We then add DTL_SCRUB with a refcnt of 2, so that
14618241SJeff.Bonwick@Sun.COM 			 * entries in the range [0, scrub_txg) will have a
14628241SJeff.Bonwick@Sun.COM 			 * positive refcnt -- either 1 or 2.  We then convert
14638241SJeff.Bonwick@Sun.COM 			 * the reference tree into the new DTL_MISSING map.
14647046Sahrens 			 */
14658241SJeff.Bonwick@Sun.COM 			space_map_ref_create(&reftree);
14668241SJeff.Bonwick@Sun.COM 			space_map_ref_add_map(&reftree,
14678241SJeff.Bonwick@Sun.COM 			    &vd->vdev_dtl[DTL_MISSING], 1);
14688241SJeff.Bonwick@Sun.COM 			space_map_ref_add_seg(&reftree, 0, scrub_txg, -1);
14698241SJeff.Bonwick@Sun.COM 			space_map_ref_add_map(&reftree,
14708241SJeff.Bonwick@Sun.COM 			    &vd->vdev_dtl[DTL_SCRUB], 2);
14718241SJeff.Bonwick@Sun.COM 			space_map_ref_generate_map(&reftree,
14728241SJeff.Bonwick@Sun.COM 			    &vd->vdev_dtl[DTL_MISSING], 1);
14738241SJeff.Bonwick@Sun.COM 			space_map_ref_destroy(&reftree);
1474789Sahrens 		}
14758241SJeff.Bonwick@Sun.COM 		space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
14768241SJeff.Bonwick@Sun.COM 		space_map_walk(&vd->vdev_dtl[DTL_MISSING],
14778241SJeff.Bonwick@Sun.COM 		    space_map_add, &vd->vdev_dtl[DTL_PARTIAL]);
1478789Sahrens 		if (scrub_done)
14798241SJeff.Bonwick@Sun.COM 			space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
14808241SJeff.Bonwick@Sun.COM 		space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
14818241SJeff.Bonwick@Sun.COM 		if (!vdev_readable(vd))
14828241SJeff.Bonwick@Sun.COM 			space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
14838241SJeff.Bonwick@Sun.COM 		else
14848241SJeff.Bonwick@Sun.COM 			space_map_walk(&vd->vdev_dtl[DTL_MISSING],
14858241SJeff.Bonwick@Sun.COM 			    space_map_add, &vd->vdev_dtl[DTL_OUTAGE]);
1486789Sahrens 		mutex_exit(&vd->vdev_dtl_lock);
14877046Sahrens 
14881732Sbonwick 		if (txg != 0)
14891732Sbonwick 			vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1490789Sahrens 		return;
1491789Sahrens 	}
1492789Sahrens 
1493789Sahrens 	mutex_enter(&vd->vdev_dtl_lock);
14948241SJeff.Bonwick@Sun.COM 	for (int t = 0; t < DTL_TYPES; t++) {
14958241SJeff.Bonwick@Sun.COM 		if (t == DTL_SCRUB)
14968241SJeff.Bonwick@Sun.COM 			continue;			/* leaf vdevs only */
14978241SJeff.Bonwick@Sun.COM 		if (t == DTL_PARTIAL)
14988241SJeff.Bonwick@Sun.COM 			minref = 1;			/* i.e. non-zero */
14998241SJeff.Bonwick@Sun.COM 		else if (vd->vdev_nparity != 0)
15008241SJeff.Bonwick@Sun.COM 			minref = vd->vdev_nparity + 1;	/* RAID-Z */
15018241SJeff.Bonwick@Sun.COM 		else
15028241SJeff.Bonwick@Sun.COM 			minref = vd->vdev_children;	/* any kind of mirror */
15038241SJeff.Bonwick@Sun.COM 		space_map_ref_create(&reftree);
15048241SJeff.Bonwick@Sun.COM 		for (int c = 0; c < vd->vdev_children; c++) {
15058241SJeff.Bonwick@Sun.COM 			vdev_t *cvd = vd->vdev_child[c];
15068241SJeff.Bonwick@Sun.COM 			mutex_enter(&cvd->vdev_dtl_lock);
15078241SJeff.Bonwick@Sun.COM 			space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1);
15088241SJeff.Bonwick@Sun.COM 			mutex_exit(&cvd->vdev_dtl_lock);
15098241SJeff.Bonwick@Sun.COM 		}
15108241SJeff.Bonwick@Sun.COM 		space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref);
15118241SJeff.Bonwick@Sun.COM 		space_map_ref_destroy(&reftree);
15128241SJeff.Bonwick@Sun.COM 	}
1513789Sahrens 	mutex_exit(&vd->vdev_dtl_lock);
1514789Sahrens }
1515789Sahrens 
1516789Sahrens static int
1517789Sahrens vdev_dtl_load(vdev_t *vd)
1518789Sahrens {
1519789Sahrens 	spa_t *spa = vd->vdev_spa;
15208241SJeff.Bonwick@Sun.COM 	space_map_obj_t *smo = &vd->vdev_dtl_smo;
15211732Sbonwick 	objset_t *mos = spa->spa_meta_objset;
1522789Sahrens 	dmu_buf_t *db;
1523789Sahrens 	int error;
1524789Sahrens 
1525789Sahrens 	ASSERT(vd->vdev_children == 0);
1526789Sahrens 
1527789Sahrens 	if (smo->smo_object == 0)
1528789Sahrens 		return (0);
1529789Sahrens 
15301732Sbonwick 	if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0)
15311544Seschrock 		return (error);
15321732Sbonwick 
15334944Smaybee 	ASSERT3U(db->db_size, >=, sizeof (*smo));
15344944Smaybee 	bcopy(db->db_data, smo, sizeof (*smo));
15351544Seschrock 	dmu_buf_rele(db, FTAG);
1536789Sahrens 
1537789Sahrens 	mutex_enter(&vd->vdev_dtl_lock);
15388241SJeff.Bonwick@Sun.COM 	error = space_map_load(&vd->vdev_dtl[DTL_MISSING],
15398241SJeff.Bonwick@Sun.COM 	    NULL, SM_ALLOC, smo, mos);
1540789Sahrens 	mutex_exit(&vd->vdev_dtl_lock);
1541789Sahrens 
1542789Sahrens 	return (error);
1543789Sahrens }
1544789Sahrens 
1545789Sahrens void
1546789Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1547789Sahrens {
1548789Sahrens 	spa_t *spa = vd->vdev_spa;
15498241SJeff.Bonwick@Sun.COM 	space_map_obj_t *smo = &vd->vdev_dtl_smo;
15508241SJeff.Bonwick@Sun.COM 	space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
15511732Sbonwick 	objset_t *mos = spa->spa_meta_objset;
1552789Sahrens 	space_map_t smsync;
1553789Sahrens 	kmutex_t smlock;
1554789Sahrens 	dmu_buf_t *db;
1555789Sahrens 	dmu_tx_t *tx;
1556789Sahrens 
1557789Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1558789Sahrens 
1559789Sahrens 	if (vd->vdev_detached) {
1560789Sahrens 		if (smo->smo_object != 0) {
15611732Sbonwick 			int err = dmu_object_free(mos, smo->smo_object, tx);
1562789Sahrens 			ASSERT3U(err, ==, 0);
1563789Sahrens 			smo->smo_object = 0;
1564789Sahrens 		}
1565789Sahrens 		dmu_tx_commit(tx);
1566789Sahrens 		return;
1567789Sahrens 	}
1568789Sahrens 
1569789Sahrens 	if (smo->smo_object == 0) {
1570789Sahrens 		ASSERT(smo->smo_objsize == 0);
1571789Sahrens 		ASSERT(smo->smo_alloc == 0);
15721732Sbonwick 		smo->smo_object = dmu_object_alloc(mos,
1573789Sahrens 		    DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1574789Sahrens 		    DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1575789Sahrens 		ASSERT(smo->smo_object != 0);
1576789Sahrens 		vdev_config_dirty(vd->vdev_top);
1577789Sahrens 	}
1578789Sahrens 
1579789Sahrens 	mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1580789Sahrens 
1581789Sahrens 	space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1582789Sahrens 	    &smlock);
1583789Sahrens 
1584789Sahrens 	mutex_enter(&smlock);
1585789Sahrens 
1586789Sahrens 	mutex_enter(&vd->vdev_dtl_lock);
15871732Sbonwick 	space_map_walk(sm, space_map_add, &smsync);
1588789Sahrens 	mutex_exit(&vd->vdev_dtl_lock);
1589789Sahrens 
15901732Sbonwick 	space_map_truncate(smo, mos, tx);
15911732Sbonwick 	space_map_sync(&smsync, SM_ALLOC, smo, mos, tx);
1592789Sahrens 
1593789Sahrens 	space_map_destroy(&smsync);
1594789Sahrens 
1595789Sahrens 	mutex_exit(&smlock);
1596789Sahrens 	mutex_destroy(&smlock);
1597789Sahrens 
15981732Sbonwick 	VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1599789Sahrens 	dmu_buf_will_dirty(db, tx);
16004944Smaybee 	ASSERT3U(db->db_size, >=, sizeof (*smo));
16014944Smaybee 	bcopy(smo, db->db_data, sizeof (*smo));
16021544Seschrock 	dmu_buf_rele(db, FTAG);
1603789Sahrens 
1604789Sahrens 	dmu_tx_commit(tx);
1605789Sahrens }
1606789Sahrens 
16077046Sahrens /*
16088241SJeff.Bonwick@Sun.COM  * Determine whether the specified vdev can be offlined/detached/removed
16098241SJeff.Bonwick@Sun.COM  * without losing data.
16108241SJeff.Bonwick@Sun.COM  */
16118241SJeff.Bonwick@Sun.COM boolean_t
16128241SJeff.Bonwick@Sun.COM vdev_dtl_required(vdev_t *vd)
16138241SJeff.Bonwick@Sun.COM {
16148241SJeff.Bonwick@Sun.COM 	spa_t *spa = vd->vdev_spa;
16158241SJeff.Bonwick@Sun.COM 	vdev_t *tvd = vd->vdev_top;
16168241SJeff.Bonwick@Sun.COM 	uint8_t cant_read = vd->vdev_cant_read;
16178241SJeff.Bonwick@Sun.COM 	boolean_t required;
16188241SJeff.Bonwick@Sun.COM 
16198241SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
16208241SJeff.Bonwick@Sun.COM 
16218241SJeff.Bonwick@Sun.COM 	if (vd == spa->spa_root_vdev || vd == tvd)
16228241SJeff.Bonwick@Sun.COM 		return (B_TRUE);
16238241SJeff.Bonwick@Sun.COM 
16248241SJeff.Bonwick@Sun.COM 	/*
16258241SJeff.Bonwick@Sun.COM 	 * Temporarily mark the device as unreadable, and then determine
16268241SJeff.Bonwick@Sun.COM 	 * whether this results in any DTL outages in the top-level vdev.
16278241SJeff.Bonwick@Sun.COM 	 * If not, we can safely offline/detach/remove the device.
16288241SJeff.Bonwick@Sun.COM 	 */
16298241SJeff.Bonwick@Sun.COM 	vd->vdev_cant_read = B_TRUE;
16308241SJeff.Bonwick@Sun.COM 	vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
16318241SJeff.Bonwick@Sun.COM 	required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
16328241SJeff.Bonwick@Sun.COM 	vd->vdev_cant_read = cant_read;
16338241SJeff.Bonwick@Sun.COM 	vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
16348241SJeff.Bonwick@Sun.COM 
16358241SJeff.Bonwick@Sun.COM 	return (required);
16368241SJeff.Bonwick@Sun.COM }
16378241SJeff.Bonwick@Sun.COM 
16388241SJeff.Bonwick@Sun.COM /*
16397046Sahrens  * Determine if resilver is needed, and if so the txg range.
16407046Sahrens  */
16417046Sahrens boolean_t
16427046Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
16437046Sahrens {
16447046Sahrens 	boolean_t needed = B_FALSE;
16457046Sahrens 	uint64_t thismin = UINT64_MAX;
16467046Sahrens 	uint64_t thismax = 0;
16477046Sahrens 
16487046Sahrens 	if (vd->vdev_children == 0) {
16497046Sahrens 		mutex_enter(&vd->vdev_dtl_lock);
16508241SJeff.Bonwick@Sun.COM 		if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 &&
16518241SJeff.Bonwick@Sun.COM 		    vdev_writeable(vd)) {
16527046Sahrens 			space_seg_t *ss;
16537046Sahrens 
16548241SJeff.Bonwick@Sun.COM 			ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root);
16557046Sahrens 			thismin = ss->ss_start - 1;
16568241SJeff.Bonwick@Sun.COM 			ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root);
16577046Sahrens 			thismax = ss->ss_end;
16587046Sahrens 			needed = B_TRUE;
16597046Sahrens 		}
16607046Sahrens 		mutex_exit(&vd->vdev_dtl_lock);
16617046Sahrens 	} else {
16628241SJeff.Bonwick@Sun.COM 		for (int c = 0; c < vd->vdev_children; c++) {
16637046Sahrens 			vdev_t *cvd = vd->vdev_child[c];
16647046Sahrens 			uint64_t cmin, cmax;
16657046Sahrens 
16667046Sahrens 			if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
16677046Sahrens 				thismin = MIN(thismin, cmin);
16687046Sahrens 				thismax = MAX(thismax, cmax);
16697046Sahrens 				needed = B_TRUE;
16707046Sahrens 			}
16717046Sahrens 		}
16727046Sahrens 	}
16737046Sahrens 
16747046Sahrens 	if (needed && minp) {
16757046Sahrens 		*minp = thismin;
16767046Sahrens 		*maxp = thismax;
16777046Sahrens 	}
16787046Sahrens 	return (needed);
16797046Sahrens }
16807046Sahrens 
16811986Seschrock void
16821544Seschrock vdev_load(vdev_t *vd)
1683789Sahrens {
1684789Sahrens 	/*
1685789Sahrens 	 * Recursively load all children.
1686789Sahrens 	 */
16878241SJeff.Bonwick@Sun.COM 	for (int c = 0; c < vd->vdev_children; c++)
16881986Seschrock 		vdev_load(vd->vdev_child[c]);
1689789Sahrens 
1690789Sahrens 	/*
16911585Sbonwick 	 * If this is a top-level vdev, initialize its metaslabs.
1692789Sahrens 	 */
16931986Seschrock 	if (vd == vd->vdev_top &&
16941986Seschrock 	    (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
16951986Seschrock 	    vdev_metaslab_init(vd, 0) != 0))
16961986Seschrock 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
16971986Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1698789Sahrens 
1699789Sahrens 	/*
1700789Sahrens 	 * If this is a leaf vdev, load its DTL.
1701789Sahrens 	 */
17021986Seschrock 	if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
17031986Seschrock 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
17041986Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1705789Sahrens }
1706789Sahrens 
17072082Seschrock /*
17085450Sbrendan  * The special vdev case is used for hot spares and l2cache devices.  Its
17095450Sbrendan  * sole purpose it to set the vdev state for the associated vdev.  To do this,
17105450Sbrendan  * we make sure that we can open the underlying device, then try to read the
17115450Sbrendan  * label, and make sure that the label is sane and that it hasn't been
17125450Sbrendan  * repurposed to another pool.
17132082Seschrock  */
17142082Seschrock int
17155450Sbrendan vdev_validate_aux(vdev_t *vd)
17162082Seschrock {
17172082Seschrock 	nvlist_t *label;
17182082Seschrock 	uint64_t guid, version;
17192082Seschrock 	uint64_t state;
17202082Seschrock 
17217754SJeff.Bonwick@Sun.COM 	if (!vdev_readable(vd))
17226643Seschrock 		return (0);
17236643Seschrock 
17242082Seschrock 	if ((label = vdev_label_read_config(vd)) == NULL) {
17252082Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
17262082Seschrock 		    VDEV_AUX_CORRUPT_DATA);
17272082Seschrock 		return (-1);
17282082Seschrock 	}
17292082Seschrock 
17302082Seschrock 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
17314577Sahrens 	    version > SPA_VERSION ||
17322082Seschrock 	    nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
17332082Seschrock 	    guid != vd->vdev_guid ||
17342082Seschrock 	    nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
17352082Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
17362082Seschrock 		    VDEV_AUX_CORRUPT_DATA);
17372082Seschrock 		nvlist_free(label);
17382082Seschrock 		return (-1);
17392082Seschrock 	}
17402082Seschrock 
17412082Seschrock 	/*
17422082Seschrock 	 * We don't actually check the pool state here.  If it's in fact in
17432082Seschrock 	 * use by another pool, we update this fact on the fly when requested.
17442082Seschrock 	 */
17452082Seschrock 	nvlist_free(label);
17462082Seschrock 	return (0);
17472082Seschrock }
17482082Seschrock 
1749789Sahrens void
1750789Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg)
1751789Sahrens {
1752789Sahrens 	metaslab_t *msp;
1753789Sahrens 
1754789Sahrens 	while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
1755789Sahrens 		metaslab_sync_done(msp, txg);
1756789Sahrens }
1757789Sahrens 
1758789Sahrens void
1759789Sahrens vdev_sync(vdev_t *vd, uint64_t txg)
1760789Sahrens {
1761789Sahrens 	spa_t *spa = vd->vdev_spa;
1762789Sahrens 	vdev_t *lvd;
1763789Sahrens 	metaslab_t *msp;
17641732Sbonwick 	dmu_tx_t *tx;
1765789Sahrens 
17661732Sbonwick 	if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
17671732Sbonwick 		ASSERT(vd == vd->vdev_top);
17681732Sbonwick 		tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
17691732Sbonwick 		vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
17701732Sbonwick 		    DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
17711732Sbonwick 		ASSERT(vd->vdev_ms_array != 0);
17721732Sbonwick 		vdev_config_dirty(vd);
17731732Sbonwick 		dmu_tx_commit(tx);
17741732Sbonwick 	}
1775789Sahrens 
17761732Sbonwick 	while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
1777789Sahrens 		metaslab_sync(msp, txg);
17781732Sbonwick 		(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
17791732Sbonwick 	}
1780789Sahrens 
1781789Sahrens 	while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
1782789Sahrens 		vdev_dtl_sync(lvd, txg);
1783789Sahrens 
1784789Sahrens 	(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
1785789Sahrens }
1786789Sahrens 
1787789Sahrens uint64_t
1788789Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
1789789Sahrens {
1790789Sahrens 	return (vd->vdev_ops->vdev_op_asize(vd, psize));
1791789Sahrens }
1792789Sahrens 
17934451Seschrock /*
17944451Seschrock  * Mark the given vdev faulted.  A faulted vdev behaves as if the device could
17954451Seschrock  * not be opened, and no I/O is attempted.
17964451Seschrock  */
1797789Sahrens int
17984451Seschrock vdev_fault(spa_t *spa, uint64_t guid)
17994451Seschrock {
18006643Seschrock 	vdev_t *vd;
18014451Seschrock 
18027754SJeff.Bonwick@Sun.COM 	spa_vdev_state_enter(spa);
18034451Seschrock 
18046643Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
18057754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
18067754SJeff.Bonwick@Sun.COM 
18074451Seschrock 	if (!vd->vdev_ops->vdev_op_leaf)
18087754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
18094451Seschrock 
18104451Seschrock 	/*
18114451Seschrock 	 * Faulted state takes precedence over degraded.
18124451Seschrock 	 */
18134451Seschrock 	vd->vdev_faulted = 1ULL;
18144451Seschrock 	vd->vdev_degraded = 0ULL;
18157754SJeff.Bonwick@Sun.COM 	vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED);
18164451Seschrock 
18174451Seschrock 	/*
18188123SDavid.Marker@sun.com 	 * If marking the vdev as faulted cause the top-level vdev to become
18194451Seschrock 	 * unavailable, then back off and simply mark the vdev as degraded
18204451Seschrock 	 * instead.
18214451Seschrock 	 */
18226643Seschrock 	if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) {
18234451Seschrock 		vd->vdev_degraded = 1ULL;
18244451Seschrock 		vd->vdev_faulted = 0ULL;
18254451Seschrock 
18264451Seschrock 		/*
18274451Seschrock 		 * If we reopen the device and it's not dead, only then do we
18284451Seschrock 		 * mark it degraded.
18294451Seschrock 		 */
18304451Seschrock 		vdev_reopen(vd);
18314451Seschrock 
18325329Sgw25295 		if (vdev_readable(vd)) {
18334451Seschrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
18344451Seschrock 			    VDEV_AUX_ERR_EXCEEDED);
18354451Seschrock 		}
18364451Seschrock 	}
18374451Seschrock 
18387754SJeff.Bonwick@Sun.COM 	return (spa_vdev_state_exit(spa, vd, 0));
18394451Seschrock }
18404451Seschrock 
18414451Seschrock /*
18424451Seschrock  * Mark the given vdev degraded.  A degraded vdev is purely an indication to the
18434451Seschrock  * user that something is wrong.  The vdev continues to operate as normal as far
18444451Seschrock  * as I/O is concerned.
18454451Seschrock  */
18464451Seschrock int
18474451Seschrock vdev_degrade(spa_t *spa, uint64_t guid)
18484451Seschrock {
18496643Seschrock 	vdev_t *vd;
18504451Seschrock 
18517754SJeff.Bonwick@Sun.COM 	spa_vdev_state_enter(spa);
18524451Seschrock 
18536643Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
18547754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
18557754SJeff.Bonwick@Sun.COM 
18564451Seschrock 	if (!vd->vdev_ops->vdev_op_leaf)
18577754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
18584451Seschrock 
18594451Seschrock 	/*
18604451Seschrock 	 * If the vdev is already faulted, then don't do anything.
18614451Seschrock 	 */
18627754SJeff.Bonwick@Sun.COM 	if (vd->vdev_faulted || vd->vdev_degraded)
18637754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, 0));
18644451Seschrock 
18654451Seschrock 	vd->vdev_degraded = 1ULL;
18664451Seschrock 	if (!vdev_is_dead(vd))
18674451Seschrock 		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
18684451Seschrock 		    VDEV_AUX_ERR_EXCEEDED);
18694451Seschrock 
18707754SJeff.Bonwick@Sun.COM 	return (spa_vdev_state_exit(spa, vd, 0));
18714451Seschrock }
18724451Seschrock 
18734451Seschrock /*
18744451Seschrock  * Online the given vdev.  If 'unspare' is set, it implies two things.  First,
18754451Seschrock  * any attached spare device should be detached when the device finishes
18764451Seschrock  * resilvering.  Second, the online should be treated like a 'test' online case,
18774451Seschrock  * so no FMA events are generated if the device fails to open.
18784451Seschrock  */
18794451Seschrock int
18807754SJeff.Bonwick@Sun.COM vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
1881789Sahrens {
18826643Seschrock 	vdev_t *vd;
1883789Sahrens 
18847754SJeff.Bonwick@Sun.COM 	spa_vdev_state_enter(spa);
18851485Slling 
18866643Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
18877754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
1888789Sahrens 
18891585Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
18907754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
18911585Sbonwick 
1892789Sahrens 	vd->vdev_offline = B_FALSE;
18931485Slling 	vd->vdev_tmpoffline = B_FALSE;
18947754SJeff.Bonwick@Sun.COM 	vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
18957754SJeff.Bonwick@Sun.COM 	vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
18961544Seschrock 	vdev_reopen(vd->vdev_top);
18974451Seschrock 	vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
18984451Seschrock 
18994451Seschrock 	if (newstate)
19004451Seschrock 		*newstate = vd->vdev_state;
19014451Seschrock 	if ((flags & ZFS_ONLINE_UNSPARE) &&
19024451Seschrock 	    !vdev_is_dead(vd) && vd->vdev_parent &&
19034451Seschrock 	    vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
19044451Seschrock 	    vd->vdev_parent->vdev_child[0] == vd)
19054451Seschrock 		vd->vdev_unspare = B_TRUE;
1906789Sahrens 
19078241SJeff.Bonwick@Sun.COM 	return (spa_vdev_state_exit(spa, vd, 0));
1908789Sahrens }
1909789Sahrens 
1910789Sahrens int
19114451Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
1912789Sahrens {
19136643Seschrock 	vdev_t *vd;
1914789Sahrens 
19157754SJeff.Bonwick@Sun.COM 	spa_vdev_state_enter(spa);
1916789Sahrens 
19176643Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
19187754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
1919789Sahrens 
19201585Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
19217754SJeff.Bonwick@Sun.COM 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
19221585Sbonwick 
1923789Sahrens 	/*
19241732Sbonwick 	 * If the device isn't already offline, try to offline it.
1925789Sahrens 	 */
19261732Sbonwick 	if (!vd->vdev_offline) {
19271732Sbonwick 		/*
19288241SJeff.Bonwick@Sun.COM 		 * If this device has the only valid copy of some data,
19298241SJeff.Bonwick@Sun.COM 		 * don't allow it to be offlined.
19301732Sbonwick 		 */
19318241SJeff.Bonwick@Sun.COM 		if (vd->vdev_aux == NULL && vdev_dtl_required(vd))
19327754SJeff.Bonwick@Sun.COM 			return (spa_vdev_state_exit(spa, NULL, EBUSY));
1933789Sahrens 
19341732Sbonwick 		/*
19351732Sbonwick 		 * Offline this device and reopen its top-level vdev.
19361732Sbonwick 		 * If this action results in the top-level vdev becoming
19371732Sbonwick 		 * unusable, undo it and fail the request.
19381732Sbonwick 		 */
19391732Sbonwick 		vd->vdev_offline = B_TRUE;
19401544Seschrock 		vdev_reopen(vd->vdev_top);
19418241SJeff.Bonwick@Sun.COM 		if (vd->vdev_aux == NULL && vdev_is_dead(vd->vdev_top)) {
19421732Sbonwick 			vd->vdev_offline = B_FALSE;
19431732Sbonwick 			vdev_reopen(vd->vdev_top);
19447754SJeff.Bonwick@Sun.COM 			return (spa_vdev_state_exit(spa, NULL, EBUSY));
19451732Sbonwick 		}
1946789Sahrens 	}
1947789Sahrens 
19487754SJeff.Bonwick@Sun.COM 	vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
19491732Sbonwick 
19507754SJeff.Bonwick@Sun.COM 	return (spa_vdev_state_exit(spa, vd, 0));
1951789Sahrens }
1952789Sahrens 
19531544Seschrock /*
19541544Seschrock  * Clear the error counts associated with this vdev.  Unlike vdev_online() and
19551544Seschrock  * vdev_offline(), we assume the spa config is locked.  We also clear all
19561544Seschrock  * children.  If 'vd' is NULL, then the user wants to clear all vdevs.
19571544Seschrock  */
19581544Seschrock void
19597754SJeff.Bonwick@Sun.COM vdev_clear(spa_t *spa, vdev_t *vd)
1960789Sahrens {
19617754SJeff.Bonwick@Sun.COM 	vdev_t *rvd = spa->spa_root_vdev;
19627754SJeff.Bonwick@Sun.COM 
19637754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1964789Sahrens 
19651544Seschrock 	if (vd == NULL)
19667754SJeff.Bonwick@Sun.COM 		vd = rvd;
1967789Sahrens 
19681544Seschrock 	vd->vdev_stat.vs_read_errors = 0;
19691544Seschrock 	vd->vdev_stat.vs_write_errors = 0;
19701544Seschrock 	vd->vdev_stat.vs_checksum_errors = 0;
1971789Sahrens 
19727754SJeff.Bonwick@Sun.COM 	for (int c = 0; c < vd->vdev_children; c++)
19737754SJeff.Bonwick@Sun.COM 		vdev_clear(spa, vd->vdev_child[c]);
19744451Seschrock 
19754451Seschrock 	/*
19766959Sek110237 	 * If we're in the FAULTED state or have experienced failed I/O, then
19776959Sek110237 	 * clear the persistent state and attempt to reopen the device.  We
19786959Sek110237 	 * also mark the vdev config dirty, so that the new faulted state is
19796959Sek110237 	 * written out to disk.
19804451Seschrock 	 */
19817754SJeff.Bonwick@Sun.COM 	if (vd->vdev_faulted || vd->vdev_degraded ||
19827754SJeff.Bonwick@Sun.COM 	    !vdev_readable(vd) || !vdev_writeable(vd)) {
19836959Sek110237 
19844451Seschrock 		vd->vdev_faulted = vd->vdev_degraded = 0;
19857754SJeff.Bonwick@Sun.COM 		vd->vdev_cant_read = B_FALSE;
19867754SJeff.Bonwick@Sun.COM 		vd->vdev_cant_write = B_FALSE;
19877754SJeff.Bonwick@Sun.COM 
19884451Seschrock 		vdev_reopen(vd);
19894451Seschrock 
19907754SJeff.Bonwick@Sun.COM 		if (vd != rvd)
19917754SJeff.Bonwick@Sun.COM 			vdev_state_dirty(vd->vdev_top);
19927754SJeff.Bonwick@Sun.COM 
19937754SJeff.Bonwick@Sun.COM 		if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
19944808Sek110237 			spa_async_request(spa, SPA_ASYNC_RESILVER);
19954451Seschrock 
19964451Seschrock 		spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
19974451Seschrock 	}
1998789Sahrens }
1999789Sahrens 
20007754SJeff.Bonwick@Sun.COM boolean_t
20017754SJeff.Bonwick@Sun.COM vdev_is_dead(vdev_t *vd)
20025329Sgw25295 {
20037754SJeff.Bonwick@Sun.COM 	return (vd->vdev_state < VDEV_STATE_DEGRADED);
20045329Sgw25295 }
20055329Sgw25295 
20067754SJeff.Bonwick@Sun.COM boolean_t
20077754SJeff.Bonwick@Sun.COM vdev_readable(vdev_t *vd)
2008789Sahrens {
20097754SJeff.Bonwick@Sun.COM 	return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
2010789Sahrens }
2011789Sahrens 
20127754SJeff.Bonwick@Sun.COM boolean_t
20137754SJeff.Bonwick@Sun.COM vdev_writeable(vdev_t *vd)
2014789Sahrens {
20157754SJeff.Bonwick@Sun.COM 	return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
20167754SJeff.Bonwick@Sun.COM }
2017789Sahrens 
20187754SJeff.Bonwick@Sun.COM boolean_t
20197980SGeorge.Wilson@Sun.COM vdev_allocatable(vdev_t *vd)
20207980SGeorge.Wilson@Sun.COM {
20218241SJeff.Bonwick@Sun.COM 	uint64_t state = vd->vdev_state;
20228241SJeff.Bonwick@Sun.COM 
20237980SGeorge.Wilson@Sun.COM 	/*
20248241SJeff.Bonwick@Sun.COM 	 * We currently allow allocations from vdevs which may be in the
20257980SGeorge.Wilson@Sun.COM 	 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
20267980SGeorge.Wilson@Sun.COM 	 * fails to reopen then we'll catch it later when we're holding
20278241SJeff.Bonwick@Sun.COM 	 * the proper locks.  Note that we have to get the vdev state
20288241SJeff.Bonwick@Sun.COM 	 * in a local variable because although it changes atomically,
20298241SJeff.Bonwick@Sun.COM 	 * we're asking two separate questions about it.
20307980SGeorge.Wilson@Sun.COM 	 */
20318241SJeff.Bonwick@Sun.COM 	return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
20327980SGeorge.Wilson@Sun.COM 	    !vd->vdev_cant_write);
20337980SGeorge.Wilson@Sun.COM }
20347980SGeorge.Wilson@Sun.COM 
20357980SGeorge.Wilson@Sun.COM boolean_t
20367754SJeff.Bonwick@Sun.COM vdev_accessible(vdev_t *vd, zio_t *zio)
20377754SJeff.Bonwick@Sun.COM {
20387754SJeff.Bonwick@Sun.COM 	ASSERT(zio->io_vd == vd);
2039789Sahrens 
20407754SJeff.Bonwick@Sun.COM 	if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
20417754SJeff.Bonwick@Sun.COM 		return (B_FALSE);
2042789Sahrens 
20437754SJeff.Bonwick@Sun.COM 	if (zio->io_type == ZIO_TYPE_READ)
20447754SJeff.Bonwick@Sun.COM 		return (!vd->vdev_cant_read);
2045789Sahrens 
20467754SJeff.Bonwick@Sun.COM 	if (zio->io_type == ZIO_TYPE_WRITE)
20477754SJeff.Bonwick@Sun.COM 		return (!vd->vdev_cant_write);
20487754SJeff.Bonwick@Sun.COM 
20497754SJeff.Bonwick@Sun.COM 	return (B_TRUE);
2050789Sahrens }
2051789Sahrens 
2052789Sahrens /*
2053789Sahrens  * Get statistics for the given vdev.
2054789Sahrens  */
2055789Sahrens void
2056789Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2057789Sahrens {
2058789Sahrens 	vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2059789Sahrens 
2060789Sahrens 	mutex_enter(&vd->vdev_stat_lock);
2061789Sahrens 	bcopy(&vd->vdev_stat, vs, sizeof (*vs));
20627046Sahrens 	vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors;
2063789Sahrens 	vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
2064789Sahrens 	vs->vs_state = vd->vdev_state;
20651175Slling 	vs->vs_rsize = vdev_get_rsize(vd);
2066789Sahrens 	mutex_exit(&vd->vdev_stat_lock);
2067789Sahrens 
2068789Sahrens 	/*
2069789Sahrens 	 * If we're getting stats on the root vdev, aggregate the I/O counts
2070789Sahrens 	 * over all top-level vdevs (i.e. the direct children of the root).
2071789Sahrens 	 */
2072789Sahrens 	if (vd == rvd) {
20737754SJeff.Bonwick@Sun.COM 		for (int c = 0; c < rvd->vdev_children; c++) {
2074789Sahrens 			vdev_t *cvd = rvd->vdev_child[c];
2075789Sahrens 			vdev_stat_t *cvs = &cvd->vdev_stat;
2076789Sahrens 
2077789Sahrens 			mutex_enter(&vd->vdev_stat_lock);
20787754SJeff.Bonwick@Sun.COM 			for (int t = 0; t < ZIO_TYPES; t++) {
2079789Sahrens 				vs->vs_ops[t] += cvs->vs_ops[t];
2080789Sahrens 				vs->vs_bytes[t] += cvs->vs_bytes[t];
2081789Sahrens 			}
2082789Sahrens 			vs->vs_scrub_examined += cvs->vs_scrub_examined;
2083789Sahrens 			mutex_exit(&vd->vdev_stat_lock);
2084789Sahrens 		}
2085789Sahrens 	}
2086789Sahrens }
2087789Sahrens 
2088789Sahrens void
20895450Sbrendan vdev_clear_stats(vdev_t *vd)
20905450Sbrendan {
20915450Sbrendan 	mutex_enter(&vd->vdev_stat_lock);
20925450Sbrendan 	vd->vdev_stat.vs_space = 0;
20935450Sbrendan 	vd->vdev_stat.vs_dspace = 0;
20945450Sbrendan 	vd->vdev_stat.vs_alloc = 0;
20955450Sbrendan 	mutex_exit(&vd->vdev_stat_lock);
20965450Sbrendan }
20975450Sbrendan 
20985450Sbrendan void
20997754SJeff.Bonwick@Sun.COM vdev_stat_update(zio_t *zio, uint64_t psize)
2100789Sahrens {
21018241SJeff.Bonwick@Sun.COM 	spa_t *spa = zio->io_spa;
21028241SJeff.Bonwick@Sun.COM 	vdev_t *rvd = spa->spa_root_vdev;
21037754SJeff.Bonwick@Sun.COM 	vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
2104789Sahrens 	vdev_t *pvd;
2105789Sahrens 	uint64_t txg = zio->io_txg;
2106789Sahrens 	vdev_stat_t *vs = &vd->vdev_stat;
2107789Sahrens 	zio_type_t type = zio->io_type;
2108789Sahrens 	int flags = zio->io_flags;
2109789Sahrens 
21107754SJeff.Bonwick@Sun.COM 	/*
21117754SJeff.Bonwick@Sun.COM 	 * If this i/o is a gang leader, it didn't do any actual work.
21127754SJeff.Bonwick@Sun.COM 	 */
21137754SJeff.Bonwick@Sun.COM 	if (zio->io_gang_tree)
21147754SJeff.Bonwick@Sun.COM 		return;
21157754SJeff.Bonwick@Sun.COM 
2116789Sahrens 	if (zio->io_error == 0) {
21177754SJeff.Bonwick@Sun.COM 		/*
21187754SJeff.Bonwick@Sun.COM 		 * If this is a root i/o, don't count it -- we've already
21197754SJeff.Bonwick@Sun.COM 		 * counted the top-level vdevs, and vdev_get_stats() will
21207754SJeff.Bonwick@Sun.COM 		 * aggregate them when asked.  This reduces contention on
21217754SJeff.Bonwick@Sun.COM 		 * the root vdev_stat_lock and implicitly handles blocks
21227754SJeff.Bonwick@Sun.COM 		 * that compress away to holes, for which there is no i/o.
21237754SJeff.Bonwick@Sun.COM 		 * (Holes never create vdev children, so all the counters
21247754SJeff.Bonwick@Sun.COM 		 * remain zero, which is what we want.)
21257754SJeff.Bonwick@Sun.COM 		 *
21267754SJeff.Bonwick@Sun.COM 		 * Note: this only applies to successful i/o (io_error == 0)
21277754SJeff.Bonwick@Sun.COM 		 * because unlike i/o counts, errors are not additive.
21287754SJeff.Bonwick@Sun.COM 		 * When reading a ditto block, for example, failure of
21297754SJeff.Bonwick@Sun.COM 		 * one top-level vdev does not imply a root-level error.
21307754SJeff.Bonwick@Sun.COM 		 */
21317754SJeff.Bonwick@Sun.COM 		if (vd == rvd)
21327754SJeff.Bonwick@Sun.COM 			return;
21337754SJeff.Bonwick@Sun.COM 
21347754SJeff.Bonwick@Sun.COM 		ASSERT(vd == zio->io_vd);
21358241SJeff.Bonwick@Sun.COM 
21368241SJeff.Bonwick@Sun.COM 		if (flags & ZIO_FLAG_IO_BYPASS)
21378241SJeff.Bonwick@Sun.COM 			return;
21388241SJeff.Bonwick@Sun.COM 
21398241SJeff.Bonwick@Sun.COM 		mutex_enter(&vd->vdev_stat_lock);
21408241SJeff.Bonwick@Sun.COM 
21417754SJeff.Bonwick@Sun.COM 		if (flags & ZIO_FLAG_IO_REPAIR) {
21421807Sbonwick 			if (flags & ZIO_FLAG_SCRUB_THREAD)
21437754SJeff.Bonwick@Sun.COM 				vs->vs_scrub_repaired += psize;
21448241SJeff.Bonwick@Sun.COM 			if (flags & ZIO_FLAG_SELF_HEAL)
21457754SJeff.Bonwick@Sun.COM 				vs->vs_self_healed += psize;
2146789Sahrens 		}
21478241SJeff.Bonwick@Sun.COM 
21488241SJeff.Bonwick@Sun.COM 		vs->vs_ops[type]++;
21498241SJeff.Bonwick@Sun.COM 		vs->vs_bytes[type] += psize;
21508241SJeff.Bonwick@Sun.COM 
21518241SJeff.Bonwick@Sun.COM 		mutex_exit(&vd->vdev_stat_lock);
2152789Sahrens 		return;
2153789Sahrens 	}
2154789Sahrens 
2155789Sahrens 	if (flags & ZIO_FLAG_SPECULATIVE)
2156789Sahrens 		return;
2157789Sahrens 
21587754SJeff.Bonwick@Sun.COM 	mutex_enter(&vd->vdev_stat_lock);
21597754SJeff.Bonwick@Sun.COM 	if (type == ZIO_TYPE_READ) {
21607754SJeff.Bonwick@Sun.COM 		if (zio->io_error == ECKSUM)
21617754SJeff.Bonwick@Sun.COM 			vs->vs_checksum_errors++;
21627754SJeff.Bonwick@Sun.COM 		else
21637754SJeff.Bonwick@Sun.COM 			vs->vs_read_errors++;
2164789Sahrens 	}
21657754SJeff.Bonwick@Sun.COM 	if (type == ZIO_TYPE_WRITE)
21667754SJeff.Bonwick@Sun.COM 		vs->vs_write_errors++;
21677754SJeff.Bonwick@Sun.COM 	mutex_exit(&vd->vdev_stat_lock);
2168789Sahrens 
21698241SJeff.Bonwick@Sun.COM 	if (type == ZIO_TYPE_WRITE && txg != 0 &&
21708241SJeff.Bonwick@Sun.COM 	    (!(flags & ZIO_FLAG_IO_REPAIR) ||
21718241SJeff.Bonwick@Sun.COM 	    (flags & ZIO_FLAG_SCRUB_THREAD))) {
21728241SJeff.Bonwick@Sun.COM 		/*
21738241SJeff.Bonwick@Sun.COM 		 * This is either a normal write (not a repair), or it's a
21748241SJeff.Bonwick@Sun.COM 		 * repair induced by the scrub thread.  In the normal case,
21758241SJeff.Bonwick@Sun.COM 		 * we commit the DTL change in the same txg as the block
21768241SJeff.Bonwick@Sun.COM 		 * was born.  In the scrub-induced repair case, we know that
21778241SJeff.Bonwick@Sun.COM 		 * scrubs run in first-pass syncing context, so we commit
21788241SJeff.Bonwick@Sun.COM 		 * the DTL change in spa->spa_syncing_txg.
21798241SJeff.Bonwick@Sun.COM 		 *
21808241SJeff.Bonwick@Sun.COM 		 * We currently do not make DTL entries for failed spontaneous
21818241SJeff.Bonwick@Sun.COM 		 * self-healing writes triggered by normal (non-scrubbing)
21828241SJeff.Bonwick@Sun.COM 		 * reads, because we have no transactional context in which to
21838241SJeff.Bonwick@Sun.COM 		 * do so -- and it's not clear that it'd be desirable anyway.
21848241SJeff.Bonwick@Sun.COM 		 */
21858241SJeff.Bonwick@Sun.COM 		if (vd->vdev_ops->vdev_op_leaf) {
21868241SJeff.Bonwick@Sun.COM 			uint64_t commit_txg = txg;
21878241SJeff.Bonwick@Sun.COM 			if (flags & ZIO_FLAG_SCRUB_THREAD) {
21888241SJeff.Bonwick@Sun.COM 				ASSERT(flags & ZIO_FLAG_IO_REPAIR);
21898241SJeff.Bonwick@Sun.COM 				ASSERT(spa_sync_pass(spa) == 1);
21908241SJeff.Bonwick@Sun.COM 				vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
21918241SJeff.Bonwick@Sun.COM 				commit_txg = spa->spa_syncing_txg;
21928241SJeff.Bonwick@Sun.COM 			}
21938241SJeff.Bonwick@Sun.COM 			ASSERT(commit_txg >= spa->spa_syncing_txg);
21948241SJeff.Bonwick@Sun.COM 			if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
21958241SJeff.Bonwick@Sun.COM 				return;
21968241SJeff.Bonwick@Sun.COM 			for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
21978241SJeff.Bonwick@Sun.COM 				vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
21988241SJeff.Bonwick@Sun.COM 			vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
2199789Sahrens 		}
22008241SJeff.Bonwick@Sun.COM 		if (vd != rvd)
22018241SJeff.Bonwick@Sun.COM 			vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
2202789Sahrens 	}
2203789Sahrens }
2204789Sahrens 
2205789Sahrens void
2206789Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete)
2207789Sahrens {
2208789Sahrens 	int c;
2209789Sahrens 	vdev_stat_t *vs = &vd->vdev_stat;
2210789Sahrens 
2211789Sahrens 	for (c = 0; c < vd->vdev_children; c++)
2212789Sahrens 		vdev_scrub_stat_update(vd->vdev_child[c], type, complete);
2213789Sahrens 
2214789Sahrens 	mutex_enter(&vd->vdev_stat_lock);
2215789Sahrens 
2216789Sahrens 	if (type == POOL_SCRUB_NONE) {
2217789Sahrens 		/*
2218789Sahrens 		 * Update completion and end time.  Leave everything else alone
2219789Sahrens 		 * so we can report what happened during the previous scrub.
2220789Sahrens 		 */
2221789Sahrens 		vs->vs_scrub_complete = complete;
2222789Sahrens 		vs->vs_scrub_end = gethrestime_sec();
2223789Sahrens 	} else {
2224789Sahrens 		vs->vs_scrub_type = type;
2225789Sahrens 		vs->vs_scrub_complete = 0;
2226789Sahrens 		vs->vs_scrub_examined = 0;
2227789Sahrens 		vs->vs_scrub_repaired = 0;
2228789Sahrens 		vs->vs_scrub_start = gethrestime_sec();
2229789Sahrens 		vs->vs_scrub_end = 0;
2230789Sahrens 	}
2231789Sahrens 
2232789Sahrens 	mutex_exit(&vd->vdev_stat_lock);
2233789Sahrens }
2234789Sahrens 
2235789Sahrens /*
2236789Sahrens  * Update the in-core space usage stats for this vdev and the root vdev.
2237789Sahrens  */
2238789Sahrens void
22395450Sbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta,
22405450Sbrendan     boolean_t update_root)
2241789Sahrens {
22424527Sperrin 	int64_t dspace_delta = space_delta;
22434527Sperrin 	spa_t *spa = vd->vdev_spa;
22444527Sperrin 	vdev_t *rvd = spa->spa_root_vdev;
22454527Sperrin 
2246789Sahrens 	ASSERT(vd == vd->vdev_top);
22474527Sperrin 
22484527Sperrin 	/*
22494527Sperrin 	 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
22504527Sperrin 	 * factor.  We must calculate this here and not at the root vdev
22514527Sperrin 	 * because the root vdev's psize-to-asize is simply the max of its
22524527Sperrin 	 * childrens', thus not accurate enough for us.
22534527Sperrin 	 */
22544527Sperrin 	ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
22554527Sperrin 	dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
22564527Sperrin 	    vd->vdev_deflate_ratio;
2257789Sahrens 
22584527Sperrin 	mutex_enter(&vd->vdev_stat_lock);
22594527Sperrin 	vd->vdev_stat.vs_space += space_delta;
22604527Sperrin 	vd->vdev_stat.vs_alloc += alloc_delta;
22614527Sperrin 	vd->vdev_stat.vs_dspace += dspace_delta;
22624527Sperrin 	mutex_exit(&vd->vdev_stat_lock);
22632082Seschrock 
22645450Sbrendan 	if (update_root) {
22655450Sbrendan 		ASSERT(rvd == vd->vdev_parent);
22665450Sbrendan 		ASSERT(vd->vdev_ms_count != 0);
22674527Sperrin 
22685450Sbrendan 		/*
22695450Sbrendan 		 * Don't count non-normal (e.g. intent log) space as part of
22705450Sbrendan 		 * the pool's capacity.
22715450Sbrendan 		 */
22725450Sbrendan 		if (vd->vdev_mg->mg_class != spa->spa_normal_class)
22735450Sbrendan 			return;
22745450Sbrendan 
22755450Sbrendan 		mutex_enter(&rvd->vdev_stat_lock);
22765450Sbrendan 		rvd->vdev_stat.vs_space += space_delta;
22775450Sbrendan 		rvd->vdev_stat.vs_alloc += alloc_delta;
22785450Sbrendan 		rvd->vdev_stat.vs_dspace += dspace_delta;
22795450Sbrendan 		mutex_exit(&rvd->vdev_stat_lock);
22805450Sbrendan 	}
2281789Sahrens }
2282789Sahrens 
2283789Sahrens /*
2284789Sahrens  * Mark a top-level vdev's config as dirty, placing it on the dirty list
2285789Sahrens  * so that it will be written out next time the vdev configuration is synced.
2286789Sahrens  * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
2287789Sahrens  */
2288789Sahrens void
2289789Sahrens vdev_config_dirty(vdev_t *vd)
2290789Sahrens {
2291789Sahrens 	spa_t *spa = vd->vdev_spa;
2292789Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2293789Sahrens 	int c;
2294789Sahrens 
22951601Sbonwick 	/*
22966643Seschrock 	 * If this is an aux vdev (as with l2cache devices), then we update the
22976643Seschrock 	 * vdev config manually and set the sync flag.
22986643Seschrock 	 */
22996643Seschrock 	if (vd->vdev_aux != NULL) {
23006643Seschrock 		spa_aux_vdev_t *sav = vd->vdev_aux;
23016643Seschrock 		nvlist_t **aux;
23026643Seschrock 		uint_t naux;
23036643Seschrock 
23046643Seschrock 		for (c = 0; c < sav->sav_count; c++) {
23056643Seschrock 			if (sav->sav_vdevs[c] == vd)
23066643Seschrock 				break;
23076643Seschrock 		}
23086643Seschrock 
23097754SJeff.Bonwick@Sun.COM 		if (c == sav->sav_count) {
23107754SJeff.Bonwick@Sun.COM 			/*
23117754SJeff.Bonwick@Sun.COM 			 * We're being removed.  There's nothing more to do.
23127754SJeff.Bonwick@Sun.COM 			 */
23137754SJeff.Bonwick@Sun.COM 			ASSERT(sav->sav_sync == B_TRUE);
23147754SJeff.Bonwick@Sun.COM 			return;
23157754SJeff.Bonwick@Sun.COM 		}
23167754SJeff.Bonwick@Sun.COM 
23176643Seschrock 		sav->sav_sync = B_TRUE;
23186643Seschrock 
23196643Seschrock 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
23206643Seschrock 		    ZPOOL_CONFIG_L2CACHE, &aux, &naux) == 0);
23216643Seschrock 
23226643Seschrock 		ASSERT(c < naux);
23236643Seschrock 
23246643Seschrock 		/*
23256643Seschrock 		 * Setting the nvlist in the middle if the array is a little
23266643Seschrock 		 * sketchy, but it will work.
23276643Seschrock 		 */
23286643Seschrock 		nvlist_free(aux[c]);
23296643Seschrock 		aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE);
23306643Seschrock 
23316643Seschrock 		return;
23326643Seschrock 	}
23336643Seschrock 
23346643Seschrock 	/*
23357754SJeff.Bonwick@Sun.COM 	 * The dirty list is protected by the SCL_CONFIG lock.  The caller
23367754SJeff.Bonwick@Sun.COM 	 * must either hold SCL_CONFIG as writer, or must be the sync thread
23377754SJeff.Bonwick@Sun.COM 	 * (which holds SCL_CONFIG as reader).  There's only one sync thread,
23381601Sbonwick 	 * so this is sufficient to ensure mutual exclusion.
23391601Sbonwick 	 */
23407754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
23417754SJeff.Bonwick@Sun.COM 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
23427754SJeff.Bonwick@Sun.COM 	    spa_config_held(spa, SCL_CONFIG, RW_READER)));
23431601Sbonwick 
2344789Sahrens 	if (vd == rvd) {
2345789Sahrens 		for (c = 0; c < rvd->vdev_children; c++)
2346789Sahrens 			vdev_config_dirty(rvd->vdev_child[c]);
2347789Sahrens 	} else {
2348789Sahrens 		ASSERT(vd == vd->vdev_top);
2349789Sahrens 
23507754SJeff.Bonwick@Sun.COM 		if (!list_link_active(&vd->vdev_config_dirty_node))
23517754SJeff.Bonwick@Sun.COM 			list_insert_head(&spa->spa_config_dirty_list, vd);
2352789Sahrens 	}
2353789Sahrens }
2354789Sahrens 
2355789Sahrens void
2356789Sahrens vdev_config_clean(vdev_t *vd)
2357789Sahrens {
23581601Sbonwick 	spa_t *spa = vd->vdev_spa;
23591601Sbonwick 
23607754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
23617754SJeff.Bonwick@Sun.COM 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
23627754SJeff.Bonwick@Sun.COM 	    spa_config_held(spa, SCL_CONFIG, RW_READER)));
23637754SJeff.Bonwick@Sun.COM 
23647754SJeff.Bonwick@Sun.COM 	ASSERT(list_link_active(&vd->vdev_config_dirty_node));
23657754SJeff.Bonwick@Sun.COM 	list_remove(&spa->spa_config_dirty_list, vd);
23667754SJeff.Bonwick@Sun.COM }
23677754SJeff.Bonwick@Sun.COM 
23687754SJeff.Bonwick@Sun.COM /*
23697754SJeff.Bonwick@Sun.COM  * Mark a top-level vdev's state as dirty, so that the next pass of
23707754SJeff.Bonwick@Sun.COM  * spa_sync() can convert this into vdev_config_dirty().  We distinguish
23717754SJeff.Bonwick@Sun.COM  * the state changes from larger config changes because they require
23727754SJeff.Bonwick@Sun.COM  * much less locking, and are often needed for administrative actions.
23737754SJeff.Bonwick@Sun.COM  */
23747754SJeff.Bonwick@Sun.COM void
23757754SJeff.Bonwick@Sun.COM vdev_state_dirty(vdev_t *vd)
23767754SJeff.Bonwick@Sun.COM {
23777754SJeff.Bonwick@Sun.COM 	spa_t *spa = vd->vdev_spa;
23787754SJeff.Bonwick@Sun.COM 
23797754SJeff.Bonwick@Sun.COM 	ASSERT(vd == vd->vdev_top);
23801601Sbonwick 
23817754SJeff.Bonwick@Sun.COM 	/*
23827754SJeff.Bonwick@Sun.COM 	 * The state list is protected by the SCL_STATE lock.  The caller
23837754SJeff.Bonwick@Sun.COM 	 * must either hold SCL_STATE as writer, or must be the sync thread
23847754SJeff.Bonwick@Sun.COM 	 * (which holds SCL_STATE as reader).  There's only one sync thread,
23857754SJeff.Bonwick@Sun.COM 	 * so this is sufficient to ensure mutual exclusion.
23867754SJeff.Bonwick@Sun.COM 	 */
23877754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
23887754SJeff.Bonwick@Sun.COM 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
23897754SJeff.Bonwick@Sun.COM 	    spa_config_held(spa, SCL_STATE, RW_READER)));
23907754SJeff.Bonwick@Sun.COM 
23917754SJeff.Bonwick@Sun.COM 	if (!list_link_active(&vd->vdev_state_dirty_node))
23927754SJeff.Bonwick@Sun.COM 		list_insert_head(&spa->spa_state_dirty_list, vd);
23937754SJeff.Bonwick@Sun.COM }
23947754SJeff.Bonwick@Sun.COM 
23957754SJeff.Bonwick@Sun.COM void
23967754SJeff.Bonwick@Sun.COM vdev_state_clean(vdev_t *vd)
23977754SJeff.Bonwick@Sun.COM {
23987754SJeff.Bonwick@Sun.COM 	spa_t *spa = vd->vdev_spa;
23997754SJeff.Bonwick@Sun.COM 
24007754SJeff.Bonwick@Sun.COM 	ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
24017754SJeff.Bonwick@Sun.COM 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
24027754SJeff.Bonwick@Sun.COM 	    spa_config_held(spa, SCL_STATE, RW_READER)));
24037754SJeff.Bonwick@Sun.COM 
24047754SJeff.Bonwick@Sun.COM 	ASSERT(list_link_active(&vd->vdev_state_dirty_node));
24057754SJeff.Bonwick@Sun.COM 	list_remove(&spa->spa_state_dirty_list, vd);
2406789Sahrens }
2407789Sahrens 
24086523Sek110237 /*
24096523Sek110237  * Propagate vdev state up from children to parent.
24106523Sek110237  */
24111775Sbillm void
24121775Sbillm vdev_propagate_state(vdev_t *vd)
24131775Sbillm {
24148241SJeff.Bonwick@Sun.COM 	spa_t *spa = vd->vdev_spa;
24158241SJeff.Bonwick@Sun.COM 	vdev_t *rvd = spa->spa_root_vdev;
24161775Sbillm 	int degraded = 0, faulted = 0;
24171775Sbillm 	int corrupted = 0;
24181775Sbillm 	int c;
24191775Sbillm 	vdev_t *child;
24201775Sbillm 
24214451Seschrock 	if (vd->vdev_children > 0) {
24224451Seschrock 		for (c = 0; c < vd->vdev_children; c++) {
24234451Seschrock 			child = vd->vdev_child[c];
24246976Seschrock 
24257754SJeff.Bonwick@Sun.COM 			if (!vdev_readable(child) ||
24268241SJeff.Bonwick@Sun.COM 			    (!vdev_writeable(child) && spa_writeable(spa))) {
24276976Seschrock 				/*
24286976Seschrock 				 * Root special: if there is a top-level log
24296976Seschrock 				 * device, treat the root vdev as if it were
24306976Seschrock 				 * degraded.
24316976Seschrock 				 */
24326976Seschrock 				if (child->vdev_islog && vd == rvd)
24336976Seschrock 					degraded++;
24346976Seschrock 				else
24356976Seschrock 					faulted++;
24366976Seschrock 			} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
24374451Seschrock 				degraded++;
24386976Seschrock 			}
24394451Seschrock 
24404451Seschrock 			if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
24414451Seschrock 				corrupted++;
24424451Seschrock 		}
24431775Sbillm 
24444451Seschrock 		vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
24454451Seschrock 
24464451Seschrock 		/*
24477754SJeff.Bonwick@Sun.COM 		 * Root special: if there is a top-level vdev that cannot be
24484451Seschrock 		 * opened due to corrupted metadata, then propagate the root
24494451Seschrock 		 * vdev's aux state as 'corrupt' rather than 'insufficient
24504451Seschrock 		 * replicas'.
24514451Seschrock 		 */
24524451Seschrock 		if (corrupted && vd == rvd &&
24534451Seschrock 		    rvd->vdev_state == VDEV_STATE_CANT_OPEN)
24544451Seschrock 			vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
24554451Seschrock 			    VDEV_AUX_CORRUPT_DATA);
24561775Sbillm 	}
24571775Sbillm 
24586976Seschrock 	if (vd->vdev_parent)
24594451Seschrock 		vdev_propagate_state(vd->vdev_parent);
24601775Sbillm }
24611775Sbillm 
2462789Sahrens /*
24631544Seschrock  * Set a vdev's state.  If this is during an open, we don't update the parent
24641544Seschrock  * state, because we're in the process of opening children depth-first.
24651544Seschrock  * Otherwise, we propagate the change to the parent.
24661544Seschrock  *
24671544Seschrock  * If this routine places a device in a faulted state, an appropriate ereport is
24681544Seschrock  * generated.
2469789Sahrens  */
2470789Sahrens void
24711544Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
2472789Sahrens {
24731986Seschrock 	uint64_t save_state;
24746643Seschrock 	spa_t *spa = vd->vdev_spa;
24751544Seschrock 
24761544Seschrock 	if (state == vd->vdev_state) {
24771544Seschrock 		vd->vdev_stat.vs_aux = aux;
2478789Sahrens 		return;
24791544Seschrock 	}
24801544Seschrock 
24811986Seschrock 	save_state = vd->vdev_state;
2482789Sahrens 
2483789Sahrens 	vd->vdev_state = state;
2484789Sahrens 	vd->vdev_stat.vs_aux = aux;
2485789Sahrens 
24864451Seschrock 	/*
24874451Seschrock 	 * If we are setting the vdev state to anything but an open state, then
24884451Seschrock 	 * always close the underlying device.  Otherwise, we keep accessible
24894451Seschrock 	 * but invalid devices open forever.  We don't call vdev_close() itself,
24904451Seschrock 	 * because that implies some extra checks (offline, etc) that we don't
24914451Seschrock 	 * want here.  This is limited to leaf devices, because otherwise
24924451Seschrock 	 * closing the device will affect other children.
24934451Seschrock 	 */
24947780SJeff.Bonwick@Sun.COM 	if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf)
24954451Seschrock 		vd->vdev_ops->vdev_op_close(vd);
24964451Seschrock 
24974451Seschrock 	if (vd->vdev_removed &&
24984451Seschrock 	    state == VDEV_STATE_CANT_OPEN &&
24994451Seschrock 	    (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
25004451Seschrock 		/*
25014451Seschrock 		 * If the previous state is set to VDEV_STATE_REMOVED, then this
25024451Seschrock 		 * device was previously marked removed and someone attempted to
25034451Seschrock 		 * reopen it.  If this failed due to a nonexistent device, then
25044451Seschrock 		 * keep the device in the REMOVED state.  We also let this be if
25054451Seschrock 		 * it is one of our special test online cases, which is only
25064451Seschrock 		 * attempting to online the device and shouldn't generate an FMA
25074451Seschrock 		 * fault.
25084451Seschrock 		 */
25094451Seschrock 		vd->vdev_state = VDEV_STATE_REMOVED;
25104451Seschrock 		vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
25114451Seschrock 	} else if (state == VDEV_STATE_REMOVED) {
25124451Seschrock 		/*
25134451Seschrock 		 * Indicate to the ZFS DE that this device has been removed, and
25144451Seschrock 		 * any recent errors should be ignored.
25154451Seschrock 		 */
25166643Seschrock 		zfs_post_remove(spa, vd);
25174451Seschrock 		vd->vdev_removed = B_TRUE;
25184451Seschrock 	} else if (state == VDEV_STATE_CANT_OPEN) {
25191544Seschrock 		/*
25201544Seschrock 		 * If we fail to open a vdev during an import, we mark it as
25211544Seschrock 		 * "not available", which signifies that it was never there to
25221544Seschrock 		 * begin with.  Failure to open such a device is not considered
25231544Seschrock 		 * an error.
25241544Seschrock 		 */
25256643Seschrock 		if (spa->spa_load_state == SPA_LOAD_IMPORT &&
25266643Seschrock 		    !spa->spa_import_faulted &&
25271986Seschrock 		    vd->vdev_ops->vdev_op_leaf)
25281986Seschrock 			vd->vdev_not_present = 1;
25291986Seschrock 
25301986Seschrock 		/*
25311986Seschrock 		 * Post the appropriate ereport.  If the 'prevstate' field is
25321986Seschrock 		 * set to something other than VDEV_STATE_UNKNOWN, it indicates
25331986Seschrock 		 * that this is part of a vdev_reopen().  In this case, we don't
25341986Seschrock 		 * want to post the ereport if the device was already in the
25351986Seschrock 		 * CANT_OPEN state beforehand.
25364451Seschrock 		 *
25374451Seschrock 		 * If the 'checkremove' flag is set, then this is an attempt to
25384451Seschrock 		 * online the device in response to an insertion event.  If we
25394451Seschrock 		 * hit this case, then we have detected an insertion event for a
25404451Seschrock 		 * faulted or offline device that wasn't in the removed state.
25414451Seschrock 		 * In this scenario, we don't post an ereport because we are
25424451Seschrock 		 * about to replace the device, or attempt an online with
25434451Seschrock 		 * vdev_forcefault, which will generate the fault for us.
25441986Seschrock 		 */
25454451Seschrock 		if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
25464451Seschrock 		    !vd->vdev_not_present && !vd->vdev_checkremove &&
25476643Seschrock 		    vd != spa->spa_root_vdev) {
25481544Seschrock 			const char *class;
25491544Seschrock 
25501544Seschrock 			switch (aux) {
25511544Seschrock 			case VDEV_AUX_OPEN_FAILED:
25521544Seschrock 				class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
25531544Seschrock 				break;
25541544Seschrock 			case VDEV_AUX_CORRUPT_DATA:
25551544Seschrock 				class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
25561544Seschrock 				break;
25571544Seschrock 			case VDEV_AUX_NO_REPLICAS:
25581544Seschrock 				class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
25591544Seschrock 				break;
25601544Seschrock 			case VDEV_AUX_BAD_GUID_SUM:
25611544Seschrock 				class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
25621544Seschrock 				break;
25631544Seschrock 			case VDEV_AUX_TOO_SMALL:
25641544Seschrock 				class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
25651544Seschrock 				break;
25661544Seschrock 			case VDEV_AUX_BAD_LABEL:
25671544Seschrock 				class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
25681544Seschrock 				break;
25697754SJeff.Bonwick@Sun.COM 			case VDEV_AUX_IO_FAILURE:
25707754SJeff.Bonwick@Sun.COM 				class = FM_EREPORT_ZFS_IO_FAILURE;
25717754SJeff.Bonwick@Sun.COM 				break;
25721544Seschrock 			default:
25731544Seschrock 				class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
25741544Seschrock 			}
25751544Seschrock 
25766643Seschrock 			zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
25771544Seschrock 		}
25784451Seschrock 
25794451Seschrock 		/* Erase any notion of persistent removed state */
25804451Seschrock 		vd->vdev_removed = B_FALSE;
25814451Seschrock 	} else {
25824451Seschrock 		vd->vdev_removed = B_FALSE;
25831544Seschrock 	}
25841544Seschrock 
25854451Seschrock 	if (!isopen)
25864451Seschrock 		vdev_propagate_state(vd);
2587789Sahrens }
25887042Sgw25295 
25897042Sgw25295 /*
25907042Sgw25295  * Check the vdev configuration to ensure that it's capable of supporting
25917042Sgw25295  * a root pool. Currently, we do not support RAID-Z or partial configuration.
25927042Sgw25295  * In addition, only a single top-level vdev is allowed and none of the leaves
25937042Sgw25295  * can be wholedisks.
25947042Sgw25295  */
25957042Sgw25295 boolean_t
25967042Sgw25295 vdev_is_bootable(vdev_t *vd)
25977042Sgw25295 {
25987042Sgw25295 	int c;
25997042Sgw25295 
26007042Sgw25295 	if (!vd->vdev_ops->vdev_op_leaf) {
26017042Sgw25295 		char *vdev_type = vd->vdev_ops->vdev_op_type;
26027042Sgw25295 
26037042Sgw25295 		if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
26047042Sgw25295 		    vd->vdev_children > 1) {
26057042Sgw25295 			return (B_FALSE);
26067042Sgw25295 		} else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
26077042Sgw25295 		    strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
26087042Sgw25295 			return (B_FALSE);
26097042Sgw25295 		}
26107042Sgw25295 	} else if (vd->vdev_wholedisk == 1) {
26117042Sgw25295 		return (B_FALSE);
26127042Sgw25295 	}
26137042Sgw25295 
26147042Sgw25295 	for (c = 0; c < vd->vdev_children; c++) {
26157042Sgw25295 		if (!vdev_is_bootable(vd->vdev_child[c]))
26167042Sgw25295 			return (B_FALSE);
26177042Sgw25295 	}
26187042Sgw25295 	return (B_TRUE);
26197042Sgw25295 }
2620