xref: /freebsd-src/sys/contrib/openzfs/module/zfs/spa.c (revision 16d6b3b3da62aa5baaf3c66c8d4e6f8c8f70aeb7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25  * Copyright (c) 2018, Nexenta Systems, Inc.  All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright 2013 Saso Kiselkov. All rights reserved.
28  * Copyright (c) 2014 Integros [integros.com]
29  * Copyright 2016 Toomas Soome <tsoome@me.com>
30  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
31  * Copyright 2018 Joyent, Inc.
32  * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
33  * Copyright 2017 Joyent, Inc.
34  * Copyright (c) 2017, Intel Corporation.
35  */
36 
37 /*
38  * SPA: Storage Pool Allocator
39  *
40  * This file contains all the routines used when modifying on-disk SPA state.
41  * This includes opening, importing, destroying, exporting a pool, and syncing a
42  * pool.
43  */
44 
45 #include <sys/zfs_context.h>
46 #include <sys/fm/fs/zfs.h>
47 #include <sys/spa_impl.h>
48 #include <sys/zio.h>
49 #include <sys/zio_checksum.h>
50 #include <sys/dmu.h>
51 #include <sys/dmu_tx.h>
52 #include <sys/zap.h>
53 #include <sys/zil.h>
54 #include <sys/ddt.h>
55 #include <sys/vdev_impl.h>
56 #include <sys/vdev_removal.h>
57 #include <sys/vdev_indirect_mapping.h>
58 #include <sys/vdev_indirect_births.h>
59 #include <sys/vdev_initialize.h>
60 #include <sys/vdev_rebuild.h>
61 #include <sys/vdev_trim.h>
62 #include <sys/vdev_disk.h>
63 #include <sys/metaslab.h>
64 #include <sys/metaslab_impl.h>
65 #include <sys/mmp.h>
66 #include <sys/uberblock_impl.h>
67 #include <sys/txg.h>
68 #include <sys/avl.h>
69 #include <sys/bpobj.h>
70 #include <sys/dmu_traverse.h>
71 #include <sys/dmu_objset.h>
72 #include <sys/unique.h>
73 #include <sys/dsl_pool.h>
74 #include <sys/dsl_dataset.h>
75 #include <sys/dsl_dir.h>
76 #include <sys/dsl_prop.h>
77 #include <sys/dsl_synctask.h>
78 #include <sys/fs/zfs.h>
79 #include <sys/arc.h>
80 #include <sys/callb.h>
81 #include <sys/systeminfo.h>
82 #include <sys/spa_boot.h>
83 #include <sys/zfs_ioctl.h>
84 #include <sys/dsl_scan.h>
85 #include <sys/zfeature.h>
86 #include <sys/dsl_destroy.h>
87 #include <sys/zvol.h>
88 
89 #ifdef	_KERNEL
90 #include <sys/fm/protocol.h>
91 #include <sys/fm/util.h>
92 #include <sys/callb.h>
93 #include <sys/zone.h>
94 #include <sys/vmsystm.h>
95 #endif	/* _KERNEL */
96 
97 #include "zfs_prop.h"
98 #include "zfs_comutil.h"
99 
100 /*
101  * The interval, in seconds, at which failed configuration cache file writes
102  * should be retried.
103  */
104 int zfs_ccw_retry_interval = 300;
105 
106 typedef enum zti_modes {
107 	ZTI_MODE_FIXED,			/* value is # of threads (min 1) */
108 	ZTI_MODE_BATCH,			/* cpu-intensive; value is ignored */
109 	ZTI_MODE_NULL,			/* don't create a taskq */
110 	ZTI_NMODES
111 } zti_modes_t;
112 
113 #define	ZTI_P(n, q)	{ ZTI_MODE_FIXED, (n), (q) }
114 #define	ZTI_PCT(n)	{ ZTI_MODE_ONLINE_PERCENT, (n), 1 }
115 #define	ZTI_BATCH	{ ZTI_MODE_BATCH, 0, 1 }
116 #define	ZTI_NULL	{ ZTI_MODE_NULL, 0, 0 }
117 
118 #define	ZTI_N(n)	ZTI_P(n, 1)
119 #define	ZTI_ONE		ZTI_N(1)
120 
121 typedef struct zio_taskq_info {
122 	zti_modes_t zti_mode;
123 	uint_t zti_value;
124 	uint_t zti_count;
125 } zio_taskq_info_t;
126 
127 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
128 	"iss", "iss_h", "int", "int_h"
129 };
130 
131 /*
132  * This table defines the taskq settings for each ZFS I/O type. When
133  * initializing a pool, we use this table to create an appropriately sized
134  * taskq. Some operations are low volume and therefore have a small, static
135  * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
136  * macros. Other operations process a large amount of data; the ZTI_BATCH
137  * macro causes us to create a taskq oriented for throughput. Some operations
138  * are so high frequency and short-lived that the taskq itself can become a
139  * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
140  * additional degree of parallelism specified by the number of threads per-
141  * taskq and the number of taskqs; when dispatching an event in this case, the
142  * particular taskq is chosen at random.
143  *
144  * The different taskq priorities are to handle the different contexts (issue
145  * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
146  * need to be handled with minimum delay.
147  */
148 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
149 	/* ISSUE	ISSUE_HIGH	INTR		INTR_HIGH */
150 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* NULL */
151 	{ ZTI_N(8),	ZTI_NULL,	ZTI_P(12, 8),	ZTI_NULL }, /* READ */
152 	{ ZTI_BATCH,	ZTI_N(5),	ZTI_P(12, 8),	ZTI_N(5) }, /* WRITE */
153 	{ ZTI_P(12, 8),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FREE */
154 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* CLAIM */
155 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* IOCTL */
156 	{ ZTI_N(4),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* TRIM */
157 };
158 
159 static void spa_sync_version(void *arg, dmu_tx_t *tx);
160 static void spa_sync_props(void *arg, dmu_tx_t *tx);
161 static boolean_t spa_has_active_shared_spare(spa_t *spa);
162 static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
163 static void spa_vdev_resilver_done(spa_t *spa);
164 
165 uint_t		zio_taskq_batch_pct = 75;	/* 1 thread per cpu in pset */
166 boolean_t	zio_taskq_sysdc = B_TRUE;	/* use SDC scheduling class */
167 uint_t		zio_taskq_basedc = 80;		/* base duty cycle */
168 
169 boolean_t	spa_create_process = B_TRUE;	/* no process ==> no sysdc */
170 
171 /*
172  * Report any spa_load_verify errors found, but do not fail spa_load.
173  * This is used by zdb to analyze non-idle pools.
174  */
175 boolean_t	spa_load_verify_dryrun = B_FALSE;
176 
177 /*
178  * This (illegal) pool name is used when temporarily importing a spa_t in order
179  * to get the vdev stats associated with the imported devices.
180  */
181 #define	TRYIMPORT_NAME	"$import"
182 
183 /*
184  * For debugging purposes: print out vdev tree during pool import.
185  */
186 int		spa_load_print_vdev_tree = B_FALSE;
187 
188 /*
189  * A non-zero value for zfs_max_missing_tvds means that we allow importing
190  * pools with missing top-level vdevs. This is strictly intended for advanced
191  * pool recovery cases since missing data is almost inevitable. Pools with
192  * missing devices can only be imported read-only for safety reasons, and their
193  * fail-mode will be automatically set to "continue".
194  *
195  * With 1 missing vdev we should be able to import the pool and mount all
196  * datasets. User data that was not modified after the missing device has been
197  * added should be recoverable. This means that snapshots created prior to the
198  * addition of that device should be completely intact.
199  *
200  * With 2 missing vdevs, some datasets may fail to mount since there are
201  * dataset statistics that are stored as regular metadata. Some data might be
202  * recoverable if those vdevs were added recently.
203  *
204  * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
205  * may be missing entirely. Chances of data recovery are very low. Note that
206  * there are also risks of performing an inadvertent rewind as we might be
207  * missing all the vdevs with the latest uberblocks.
208  */
209 unsigned long	zfs_max_missing_tvds = 0;
210 
211 /*
212  * The parameters below are similar to zfs_max_missing_tvds but are only
213  * intended for a preliminary open of the pool with an untrusted config which
214  * might be incomplete or out-dated.
215  *
216  * We are more tolerant for pools opened from a cachefile since we could have
217  * an out-dated cachefile where a device removal was not registered.
218  * We could have set the limit arbitrarily high but in the case where devices
219  * are really missing we would want to return the proper error codes; we chose
220  * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
221  * and we get a chance to retrieve the trusted config.
222  */
223 uint64_t	zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
224 
225 /*
226  * In the case where config was assembled by scanning device paths (/dev/dsks
227  * by default) we are less tolerant since all the existing devices should have
228  * been detected and we want spa_load to return the right error codes.
229  */
230 uint64_t	zfs_max_missing_tvds_scan = 0;
231 
232 /*
233  * Debugging aid that pauses spa_sync() towards the end.
234  */
235 boolean_t	zfs_pause_spa_sync = B_FALSE;
236 
237 /*
238  * Variables to indicate the livelist condense zthr func should wait at certain
239  * points for the livelist to be removed - used to test condense/destroy races
240  */
241 int zfs_livelist_condense_zthr_pause = 0;
242 int zfs_livelist_condense_sync_pause = 0;
243 
244 /*
245  * Variables to track whether or not condense cancellation has been
246  * triggered in testing.
247  */
248 int zfs_livelist_condense_sync_cancel = 0;
249 int zfs_livelist_condense_zthr_cancel = 0;
250 
251 /*
252  * Variable to track whether or not extra ALLOC blkptrs were added to a
253  * livelist entry while it was being condensed (caused by the way we track
254  * remapped blkptrs in dbuf_remap_impl)
255  */
256 int zfs_livelist_condense_new_alloc = 0;
257 
258 /*
259  * ==========================================================================
260  * SPA properties routines
261  * ==========================================================================
262  */
263 
264 /*
265  * Add a (source=src, propname=propval) list to an nvlist.
266  */
267 static void
268 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
269     uint64_t intval, zprop_source_t src)
270 {
271 	const char *propname = zpool_prop_to_name(prop);
272 	nvlist_t *propval;
273 
274 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
275 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
276 
277 	if (strval != NULL)
278 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
279 	else
280 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
281 
282 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
283 	nvlist_free(propval);
284 }
285 
286 /*
287  * Get property values from the spa configuration.
288  */
289 static void
290 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
291 {
292 	vdev_t *rvd = spa->spa_root_vdev;
293 	dsl_pool_t *pool = spa->spa_dsl_pool;
294 	uint64_t size, alloc, cap, version;
295 	const zprop_source_t src = ZPROP_SRC_NONE;
296 	spa_config_dirent_t *dp;
297 	metaslab_class_t *mc = spa_normal_class(spa);
298 
299 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
300 
301 	if (rvd != NULL) {
302 		alloc = metaslab_class_get_alloc(mc);
303 		alloc += metaslab_class_get_alloc(spa_special_class(spa));
304 		alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
305 
306 		size = metaslab_class_get_space(mc);
307 		size += metaslab_class_get_space(spa_special_class(spa));
308 		size += metaslab_class_get_space(spa_dedup_class(spa));
309 
310 		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
311 		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
312 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
313 		spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
314 		    size - alloc, src);
315 		spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
316 		    spa->spa_checkpoint_info.sci_dspace, src);
317 
318 		spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
319 		    metaslab_class_fragmentation(mc), src);
320 		spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
321 		    metaslab_class_expandable_space(mc), src);
322 		spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
323 		    (spa_mode(spa) == SPA_MODE_READ), src);
324 
325 		cap = (size == 0) ? 0 : (alloc * 100 / size);
326 		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
327 
328 		spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
329 		    ddt_get_pool_dedup_ratio(spa), src);
330 
331 		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
332 		    rvd->vdev_state, src);
333 
334 		version = spa_version(spa);
335 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
336 			spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
337 			    version, ZPROP_SRC_DEFAULT);
338 		} else {
339 			spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
340 			    version, ZPROP_SRC_LOCAL);
341 		}
342 		spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
343 		    NULL, spa_load_guid(spa), src);
344 	}
345 
346 	if (pool != NULL) {
347 		/*
348 		 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
349 		 * when opening pools before this version freedir will be NULL.
350 		 */
351 		if (pool->dp_free_dir != NULL) {
352 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
353 			    dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
354 			    src);
355 		} else {
356 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
357 			    NULL, 0, src);
358 		}
359 
360 		if (pool->dp_leak_dir != NULL) {
361 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
362 			    dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
363 			    src);
364 		} else {
365 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
366 			    NULL, 0, src);
367 		}
368 	}
369 
370 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
371 
372 	if (spa->spa_comment != NULL) {
373 		spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
374 		    0, ZPROP_SRC_LOCAL);
375 	}
376 
377 	if (spa->spa_root != NULL)
378 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
379 		    0, ZPROP_SRC_LOCAL);
380 
381 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
382 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
383 		    MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
384 	} else {
385 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
386 		    SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
387 	}
388 
389 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
390 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
391 		    DNODE_MAX_SIZE, ZPROP_SRC_NONE);
392 	} else {
393 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
394 		    DNODE_MIN_SIZE, ZPROP_SRC_NONE);
395 	}
396 
397 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
398 		if (dp->scd_path == NULL) {
399 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
400 			    "none", 0, ZPROP_SRC_LOCAL);
401 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
402 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
403 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
404 		}
405 	}
406 }
407 
408 /*
409  * Get zpool property values.
410  */
411 int
412 spa_prop_get(spa_t *spa, nvlist_t **nvp)
413 {
414 	objset_t *mos = spa->spa_meta_objset;
415 	zap_cursor_t zc;
416 	zap_attribute_t za;
417 	dsl_pool_t *dp;
418 	int err;
419 
420 	err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
421 	if (err)
422 		return (err);
423 
424 	dp = spa_get_dsl(spa);
425 	dsl_pool_config_enter(dp, FTAG);
426 	mutex_enter(&spa->spa_props_lock);
427 
428 	/*
429 	 * Get properties from the spa config.
430 	 */
431 	spa_prop_get_config(spa, nvp);
432 
433 	/* If no pool property object, no more prop to get. */
434 	if (mos == NULL || spa->spa_pool_props_object == 0)
435 		goto out;
436 
437 	/*
438 	 * Get properties from the MOS pool property object.
439 	 */
440 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
441 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
442 	    zap_cursor_advance(&zc)) {
443 		uint64_t intval = 0;
444 		char *strval = NULL;
445 		zprop_source_t src = ZPROP_SRC_DEFAULT;
446 		zpool_prop_t prop;
447 
448 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
449 			continue;
450 
451 		switch (za.za_integer_length) {
452 		case 8:
453 			/* integer property */
454 			if (za.za_first_integer !=
455 			    zpool_prop_default_numeric(prop))
456 				src = ZPROP_SRC_LOCAL;
457 
458 			if (prop == ZPOOL_PROP_BOOTFS) {
459 				dsl_dataset_t *ds = NULL;
460 
461 				err = dsl_dataset_hold_obj(dp,
462 				    za.za_first_integer, FTAG, &ds);
463 				if (err != 0)
464 					break;
465 
466 				strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
467 				    KM_SLEEP);
468 				dsl_dataset_name(ds, strval);
469 				dsl_dataset_rele(ds, FTAG);
470 			} else {
471 				strval = NULL;
472 				intval = za.za_first_integer;
473 			}
474 
475 			spa_prop_add_list(*nvp, prop, strval, intval, src);
476 
477 			if (strval != NULL)
478 				kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
479 
480 			break;
481 
482 		case 1:
483 			/* string property */
484 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
485 			err = zap_lookup(mos, spa->spa_pool_props_object,
486 			    za.za_name, 1, za.za_num_integers, strval);
487 			if (err) {
488 				kmem_free(strval, za.za_num_integers);
489 				break;
490 			}
491 			spa_prop_add_list(*nvp, prop, strval, 0, src);
492 			kmem_free(strval, za.za_num_integers);
493 			break;
494 
495 		default:
496 			break;
497 		}
498 	}
499 	zap_cursor_fini(&zc);
500 out:
501 	mutex_exit(&spa->spa_props_lock);
502 	dsl_pool_config_exit(dp, FTAG);
503 	if (err && err != ENOENT) {
504 		nvlist_free(*nvp);
505 		*nvp = NULL;
506 		return (err);
507 	}
508 
509 	return (0);
510 }
511 
512 /*
513  * Validate the given pool properties nvlist and modify the list
514  * for the property values to be set.
515  */
516 static int
517 spa_prop_validate(spa_t *spa, nvlist_t *props)
518 {
519 	nvpair_t *elem;
520 	int error = 0, reset_bootfs = 0;
521 	uint64_t objnum = 0;
522 	boolean_t has_feature = B_FALSE;
523 
524 	elem = NULL;
525 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
526 		uint64_t intval;
527 		char *strval, *slash, *check, *fname;
528 		const char *propname = nvpair_name(elem);
529 		zpool_prop_t prop = zpool_name_to_prop(propname);
530 
531 		switch (prop) {
532 		case ZPOOL_PROP_INVAL:
533 			if (!zpool_prop_feature(propname)) {
534 				error = SET_ERROR(EINVAL);
535 				break;
536 			}
537 
538 			/*
539 			 * Sanitize the input.
540 			 */
541 			if (nvpair_type(elem) != DATA_TYPE_UINT64) {
542 				error = SET_ERROR(EINVAL);
543 				break;
544 			}
545 
546 			if (nvpair_value_uint64(elem, &intval) != 0) {
547 				error = SET_ERROR(EINVAL);
548 				break;
549 			}
550 
551 			if (intval != 0) {
552 				error = SET_ERROR(EINVAL);
553 				break;
554 			}
555 
556 			fname = strchr(propname, '@') + 1;
557 			if (zfeature_lookup_name(fname, NULL) != 0) {
558 				error = SET_ERROR(EINVAL);
559 				break;
560 			}
561 
562 			has_feature = B_TRUE;
563 			break;
564 
565 		case ZPOOL_PROP_VERSION:
566 			error = nvpair_value_uint64(elem, &intval);
567 			if (!error &&
568 			    (intval < spa_version(spa) ||
569 			    intval > SPA_VERSION_BEFORE_FEATURES ||
570 			    has_feature))
571 				error = SET_ERROR(EINVAL);
572 			break;
573 
574 		case ZPOOL_PROP_DELEGATION:
575 		case ZPOOL_PROP_AUTOREPLACE:
576 		case ZPOOL_PROP_LISTSNAPS:
577 		case ZPOOL_PROP_AUTOEXPAND:
578 		case ZPOOL_PROP_AUTOTRIM:
579 			error = nvpair_value_uint64(elem, &intval);
580 			if (!error && intval > 1)
581 				error = SET_ERROR(EINVAL);
582 			break;
583 
584 		case ZPOOL_PROP_MULTIHOST:
585 			error = nvpair_value_uint64(elem, &intval);
586 			if (!error && intval > 1)
587 				error = SET_ERROR(EINVAL);
588 
589 			if (!error) {
590 				uint32_t hostid = zone_get_hostid(NULL);
591 				if (hostid)
592 					spa->spa_hostid = hostid;
593 				else
594 					error = SET_ERROR(ENOTSUP);
595 			}
596 
597 			break;
598 
599 		case ZPOOL_PROP_BOOTFS:
600 			/*
601 			 * If the pool version is less than SPA_VERSION_BOOTFS,
602 			 * or the pool is still being created (version == 0),
603 			 * the bootfs property cannot be set.
604 			 */
605 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
606 				error = SET_ERROR(ENOTSUP);
607 				break;
608 			}
609 
610 			/*
611 			 * Make sure the vdev config is bootable
612 			 */
613 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
614 				error = SET_ERROR(ENOTSUP);
615 				break;
616 			}
617 
618 			reset_bootfs = 1;
619 
620 			error = nvpair_value_string(elem, &strval);
621 
622 			if (!error) {
623 				objset_t *os;
624 
625 				if (strval == NULL || strval[0] == '\0') {
626 					objnum = zpool_prop_default_numeric(
627 					    ZPOOL_PROP_BOOTFS);
628 					break;
629 				}
630 
631 				error = dmu_objset_hold(strval, FTAG, &os);
632 				if (error != 0)
633 					break;
634 
635 				/* Must be ZPL. */
636 				if (dmu_objset_type(os) != DMU_OST_ZFS) {
637 					error = SET_ERROR(ENOTSUP);
638 				} else {
639 					objnum = dmu_objset_id(os);
640 				}
641 				dmu_objset_rele(os, FTAG);
642 			}
643 			break;
644 
645 		case ZPOOL_PROP_FAILUREMODE:
646 			error = nvpair_value_uint64(elem, &intval);
647 			if (!error && intval > ZIO_FAILURE_MODE_PANIC)
648 				error = SET_ERROR(EINVAL);
649 
650 			/*
651 			 * This is a special case which only occurs when
652 			 * the pool has completely failed. This allows
653 			 * the user to change the in-core failmode property
654 			 * without syncing it out to disk (I/Os might
655 			 * currently be blocked). We do this by returning
656 			 * EIO to the caller (spa_prop_set) to trick it
657 			 * into thinking we encountered a property validation
658 			 * error.
659 			 */
660 			if (!error && spa_suspended(spa)) {
661 				spa->spa_failmode = intval;
662 				error = SET_ERROR(EIO);
663 			}
664 			break;
665 
666 		case ZPOOL_PROP_CACHEFILE:
667 			if ((error = nvpair_value_string(elem, &strval)) != 0)
668 				break;
669 
670 			if (strval[0] == '\0')
671 				break;
672 
673 			if (strcmp(strval, "none") == 0)
674 				break;
675 
676 			if (strval[0] != '/') {
677 				error = SET_ERROR(EINVAL);
678 				break;
679 			}
680 
681 			slash = strrchr(strval, '/');
682 			ASSERT(slash != NULL);
683 
684 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
685 			    strcmp(slash, "/..") == 0)
686 				error = SET_ERROR(EINVAL);
687 			break;
688 
689 		case ZPOOL_PROP_COMMENT:
690 			if ((error = nvpair_value_string(elem, &strval)) != 0)
691 				break;
692 			for (check = strval; *check != '\0'; check++) {
693 				if (!isprint(*check)) {
694 					error = SET_ERROR(EINVAL);
695 					break;
696 				}
697 			}
698 			if (strlen(strval) > ZPROP_MAX_COMMENT)
699 				error = SET_ERROR(E2BIG);
700 			break;
701 
702 		default:
703 			break;
704 		}
705 
706 		if (error)
707 			break;
708 	}
709 
710 	(void) nvlist_remove_all(props,
711 	    zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
712 
713 	if (!error && reset_bootfs) {
714 		error = nvlist_remove(props,
715 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
716 
717 		if (!error) {
718 			error = nvlist_add_uint64(props,
719 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
720 		}
721 	}
722 
723 	return (error);
724 }
725 
726 void
727 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
728 {
729 	char *cachefile;
730 	spa_config_dirent_t *dp;
731 
732 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
733 	    &cachefile) != 0)
734 		return;
735 
736 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
737 	    KM_SLEEP);
738 
739 	if (cachefile[0] == '\0')
740 		dp->scd_path = spa_strdup(spa_config_path);
741 	else if (strcmp(cachefile, "none") == 0)
742 		dp->scd_path = NULL;
743 	else
744 		dp->scd_path = spa_strdup(cachefile);
745 
746 	list_insert_head(&spa->spa_config_list, dp);
747 	if (need_sync)
748 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
749 }
750 
751 int
752 spa_prop_set(spa_t *spa, nvlist_t *nvp)
753 {
754 	int error;
755 	nvpair_t *elem = NULL;
756 	boolean_t need_sync = B_FALSE;
757 
758 	if ((error = spa_prop_validate(spa, nvp)) != 0)
759 		return (error);
760 
761 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
762 		zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
763 
764 		if (prop == ZPOOL_PROP_CACHEFILE ||
765 		    prop == ZPOOL_PROP_ALTROOT ||
766 		    prop == ZPOOL_PROP_READONLY)
767 			continue;
768 
769 		if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
770 			uint64_t ver;
771 
772 			if (prop == ZPOOL_PROP_VERSION) {
773 				VERIFY(nvpair_value_uint64(elem, &ver) == 0);
774 			} else {
775 				ASSERT(zpool_prop_feature(nvpair_name(elem)));
776 				ver = SPA_VERSION_FEATURES;
777 				need_sync = B_TRUE;
778 			}
779 
780 			/* Save time if the version is already set. */
781 			if (ver == spa_version(spa))
782 				continue;
783 
784 			/*
785 			 * In addition to the pool directory object, we might
786 			 * create the pool properties object, the features for
787 			 * read object, the features for write object, or the
788 			 * feature descriptions object.
789 			 */
790 			error = dsl_sync_task(spa->spa_name, NULL,
791 			    spa_sync_version, &ver,
792 			    6, ZFS_SPACE_CHECK_RESERVED);
793 			if (error)
794 				return (error);
795 			continue;
796 		}
797 
798 		need_sync = B_TRUE;
799 		break;
800 	}
801 
802 	if (need_sync) {
803 		return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
804 		    nvp, 6, ZFS_SPACE_CHECK_RESERVED));
805 	}
806 
807 	return (0);
808 }
809 
810 /*
811  * If the bootfs property value is dsobj, clear it.
812  */
813 void
814 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
815 {
816 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
817 		VERIFY(zap_remove(spa->spa_meta_objset,
818 		    spa->spa_pool_props_object,
819 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
820 		spa->spa_bootfs = 0;
821 	}
822 }
823 
824 /*ARGSUSED*/
825 static int
826 spa_change_guid_check(void *arg, dmu_tx_t *tx)
827 {
828 	uint64_t *newguid __maybe_unused = arg;
829 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
830 	vdev_t *rvd = spa->spa_root_vdev;
831 	uint64_t vdev_state;
832 
833 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
834 		int error = (spa_has_checkpoint(spa)) ?
835 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
836 		return (SET_ERROR(error));
837 	}
838 
839 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
840 	vdev_state = rvd->vdev_state;
841 	spa_config_exit(spa, SCL_STATE, FTAG);
842 
843 	if (vdev_state != VDEV_STATE_HEALTHY)
844 		return (SET_ERROR(ENXIO));
845 
846 	ASSERT3U(spa_guid(spa), !=, *newguid);
847 
848 	return (0);
849 }
850 
851 static void
852 spa_change_guid_sync(void *arg, dmu_tx_t *tx)
853 {
854 	uint64_t *newguid = arg;
855 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
856 	uint64_t oldguid;
857 	vdev_t *rvd = spa->spa_root_vdev;
858 
859 	oldguid = spa_guid(spa);
860 
861 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
862 	rvd->vdev_guid = *newguid;
863 	rvd->vdev_guid_sum += (*newguid - oldguid);
864 	vdev_config_dirty(rvd);
865 	spa_config_exit(spa, SCL_STATE, FTAG);
866 
867 	spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
868 	    (u_longlong_t)oldguid, (u_longlong_t)*newguid);
869 }
870 
871 /*
872  * Change the GUID for the pool.  This is done so that we can later
873  * re-import a pool built from a clone of our own vdevs.  We will modify
874  * the root vdev's guid, our own pool guid, and then mark all of our
875  * vdevs dirty.  Note that we must make sure that all our vdevs are
876  * online when we do this, or else any vdevs that weren't present
877  * would be orphaned from our pool.  We are also going to issue a
878  * sysevent to update any watchers.
879  */
880 int
881 spa_change_guid(spa_t *spa)
882 {
883 	int error;
884 	uint64_t guid;
885 
886 	mutex_enter(&spa->spa_vdev_top_lock);
887 	mutex_enter(&spa_namespace_lock);
888 	guid = spa_generate_guid(NULL);
889 
890 	error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
891 	    spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
892 
893 	if (error == 0) {
894 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
895 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
896 	}
897 
898 	mutex_exit(&spa_namespace_lock);
899 	mutex_exit(&spa->spa_vdev_top_lock);
900 
901 	return (error);
902 }
903 
904 /*
905  * ==========================================================================
906  * SPA state manipulation (open/create/destroy/import/export)
907  * ==========================================================================
908  */
909 
910 static int
911 spa_error_entry_compare(const void *a, const void *b)
912 {
913 	const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
914 	const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
915 	int ret;
916 
917 	ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
918 	    sizeof (zbookmark_phys_t));
919 
920 	return (TREE_ISIGN(ret));
921 }
922 
923 /*
924  * Utility function which retrieves copies of the current logs and
925  * re-initializes them in the process.
926  */
927 void
928 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
929 {
930 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
931 
932 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
933 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
934 
935 	avl_create(&spa->spa_errlist_scrub,
936 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
937 	    offsetof(spa_error_entry_t, se_avl));
938 	avl_create(&spa->spa_errlist_last,
939 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
940 	    offsetof(spa_error_entry_t, se_avl));
941 }
942 
943 static void
944 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
945 {
946 	const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
947 	enum zti_modes mode = ztip->zti_mode;
948 	uint_t value = ztip->zti_value;
949 	uint_t count = ztip->zti_count;
950 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
951 	uint_t flags = 0;
952 	boolean_t batch = B_FALSE;
953 
954 	if (mode == ZTI_MODE_NULL) {
955 		tqs->stqs_count = 0;
956 		tqs->stqs_taskq = NULL;
957 		return;
958 	}
959 
960 	ASSERT3U(count, >, 0);
961 
962 	tqs->stqs_count = count;
963 	tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
964 
965 	switch (mode) {
966 	case ZTI_MODE_FIXED:
967 		ASSERT3U(value, >=, 1);
968 		value = MAX(value, 1);
969 		flags |= TASKQ_DYNAMIC;
970 		break;
971 
972 	case ZTI_MODE_BATCH:
973 		batch = B_TRUE;
974 		flags |= TASKQ_THREADS_CPU_PCT;
975 		value = MIN(zio_taskq_batch_pct, 100);
976 		break;
977 
978 	default:
979 		panic("unrecognized mode for %s_%s taskq (%u:%u) in "
980 		    "spa_activate()",
981 		    zio_type_name[t], zio_taskq_types[q], mode, value);
982 		break;
983 	}
984 
985 	for (uint_t i = 0; i < count; i++) {
986 		taskq_t *tq;
987 		char name[32];
988 
989 		(void) snprintf(name, sizeof (name), "%s_%s",
990 		    zio_type_name[t], zio_taskq_types[q]);
991 
992 		if (zio_taskq_sysdc && spa->spa_proc != &p0) {
993 			if (batch)
994 				flags |= TASKQ_DC_BATCH;
995 
996 			tq = taskq_create_sysdc(name, value, 50, INT_MAX,
997 			    spa->spa_proc, zio_taskq_basedc, flags);
998 		} else {
999 			pri_t pri = maxclsyspri;
1000 			/*
1001 			 * The write issue taskq can be extremely CPU
1002 			 * intensive.  Run it at slightly less important
1003 			 * priority than the other taskqs.  Under Linux this
1004 			 * means incrementing the priority value on platforms
1005 			 * like illumos it should be decremented.
1006 			 */
1007 			if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
1008 				pri++;
1009 
1010 			tq = taskq_create_proc(name, value, pri, 50,
1011 			    INT_MAX, spa->spa_proc, flags);
1012 		}
1013 
1014 		tqs->stqs_taskq[i] = tq;
1015 	}
1016 }
1017 
1018 static void
1019 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1020 {
1021 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1022 
1023 	if (tqs->stqs_taskq == NULL) {
1024 		ASSERT3U(tqs->stqs_count, ==, 0);
1025 		return;
1026 	}
1027 
1028 	for (uint_t i = 0; i < tqs->stqs_count; i++) {
1029 		ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
1030 		taskq_destroy(tqs->stqs_taskq[i]);
1031 	}
1032 
1033 	kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1034 	tqs->stqs_taskq = NULL;
1035 }
1036 
1037 /*
1038  * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1039  * Note that a type may have multiple discrete taskqs to avoid lock contention
1040  * on the taskq itself. In that case we choose which taskq at random by using
1041  * the low bits of gethrtime().
1042  */
1043 void
1044 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1045     task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
1046 {
1047 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1048 	taskq_t *tq;
1049 
1050 	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1051 	ASSERT3U(tqs->stqs_count, !=, 0);
1052 
1053 	if (tqs->stqs_count == 1) {
1054 		tq = tqs->stqs_taskq[0];
1055 	} else {
1056 		tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1057 	}
1058 
1059 	taskq_dispatch_ent(tq, func, arg, flags, ent);
1060 }
1061 
1062 /*
1063  * Same as spa_taskq_dispatch_ent() but block on the task until completion.
1064  */
1065 void
1066 spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1067     task_func_t *func, void *arg, uint_t flags)
1068 {
1069 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1070 	taskq_t *tq;
1071 	taskqid_t id;
1072 
1073 	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1074 	ASSERT3U(tqs->stqs_count, !=, 0);
1075 
1076 	if (tqs->stqs_count == 1) {
1077 		tq = tqs->stqs_taskq[0];
1078 	} else {
1079 		tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1080 	}
1081 
1082 	id = taskq_dispatch(tq, func, arg, flags);
1083 	if (id)
1084 		taskq_wait_id(tq, id);
1085 }
1086 
1087 static void
1088 spa_create_zio_taskqs(spa_t *spa)
1089 {
1090 	for (int t = 0; t < ZIO_TYPES; t++) {
1091 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1092 			spa_taskqs_init(spa, t, q);
1093 		}
1094 	}
1095 }
1096 
1097 /*
1098  * Disabled until spa_thread() can be adapted for Linux.
1099  */
1100 #undef HAVE_SPA_THREAD
1101 
1102 #if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
1103 static void
1104 spa_thread(void *arg)
1105 {
1106 	psetid_t zio_taskq_psrset_bind = PS_NONE;
1107 	callb_cpr_t cprinfo;
1108 
1109 	spa_t *spa = arg;
1110 	user_t *pu = PTOU(curproc);
1111 
1112 	CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1113 	    spa->spa_name);
1114 
1115 	ASSERT(curproc != &p0);
1116 	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1117 	    "zpool-%s", spa->spa_name);
1118 	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1119 
1120 	/* bind this thread to the requested psrset */
1121 	if (zio_taskq_psrset_bind != PS_NONE) {
1122 		pool_lock();
1123 		mutex_enter(&cpu_lock);
1124 		mutex_enter(&pidlock);
1125 		mutex_enter(&curproc->p_lock);
1126 
1127 		if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1128 		    0, NULL, NULL) == 0)  {
1129 			curthread->t_bind_pset = zio_taskq_psrset_bind;
1130 		} else {
1131 			cmn_err(CE_WARN,
1132 			    "Couldn't bind process for zfs pool \"%s\" to "
1133 			    "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1134 		}
1135 
1136 		mutex_exit(&curproc->p_lock);
1137 		mutex_exit(&pidlock);
1138 		mutex_exit(&cpu_lock);
1139 		pool_unlock();
1140 	}
1141 
1142 	if (zio_taskq_sysdc) {
1143 		sysdc_thread_enter(curthread, 100, 0);
1144 	}
1145 
1146 	spa->spa_proc = curproc;
1147 	spa->spa_did = curthread->t_did;
1148 
1149 	spa_create_zio_taskqs(spa);
1150 
1151 	mutex_enter(&spa->spa_proc_lock);
1152 	ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1153 
1154 	spa->spa_proc_state = SPA_PROC_ACTIVE;
1155 	cv_broadcast(&spa->spa_proc_cv);
1156 
1157 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
1158 	while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1159 		cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1160 	CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1161 
1162 	ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1163 	spa->spa_proc_state = SPA_PROC_GONE;
1164 	spa->spa_proc = &p0;
1165 	cv_broadcast(&spa->spa_proc_cv);
1166 	CALLB_CPR_EXIT(&cprinfo);	/* drops spa_proc_lock */
1167 
1168 	mutex_enter(&curproc->p_lock);
1169 	lwp_exit();
1170 }
1171 #endif
1172 
1173 /*
1174  * Activate an uninitialized pool.
1175  */
1176 static void
1177 spa_activate(spa_t *spa, spa_mode_t mode)
1178 {
1179 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1180 
1181 	spa->spa_state = POOL_STATE_ACTIVE;
1182 	spa->spa_mode = mode;
1183 
1184 	spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1185 	spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1186 	spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops);
1187 	spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops);
1188 
1189 	/* Try to create a covering process */
1190 	mutex_enter(&spa->spa_proc_lock);
1191 	ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1192 	ASSERT(spa->spa_proc == &p0);
1193 	spa->spa_did = 0;
1194 
1195 #ifdef HAVE_SPA_THREAD
1196 	/* Only create a process if we're going to be around a while. */
1197 	if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1198 		if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1199 		    NULL, 0) == 0) {
1200 			spa->spa_proc_state = SPA_PROC_CREATED;
1201 			while (spa->spa_proc_state == SPA_PROC_CREATED) {
1202 				cv_wait(&spa->spa_proc_cv,
1203 				    &spa->spa_proc_lock);
1204 			}
1205 			ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1206 			ASSERT(spa->spa_proc != &p0);
1207 			ASSERT(spa->spa_did != 0);
1208 		} else {
1209 #ifdef _KERNEL
1210 			cmn_err(CE_WARN,
1211 			    "Couldn't create process for zfs pool \"%s\"\n",
1212 			    spa->spa_name);
1213 #endif
1214 		}
1215 	}
1216 #endif /* HAVE_SPA_THREAD */
1217 	mutex_exit(&spa->spa_proc_lock);
1218 
1219 	/* If we didn't create a process, we need to create our taskqs. */
1220 	if (spa->spa_proc == &p0) {
1221 		spa_create_zio_taskqs(spa);
1222 	}
1223 
1224 	for (size_t i = 0; i < TXG_SIZE; i++) {
1225 		spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
1226 		    ZIO_FLAG_CANFAIL);
1227 	}
1228 
1229 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1230 	    offsetof(vdev_t, vdev_config_dirty_node));
1231 	list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1232 	    offsetof(objset_t, os_evicting_node));
1233 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1234 	    offsetof(vdev_t, vdev_state_dirty_node));
1235 
1236 	txg_list_create(&spa->spa_vdev_txg_list, spa,
1237 	    offsetof(struct vdev, vdev_txg_node));
1238 
1239 	avl_create(&spa->spa_errlist_scrub,
1240 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1241 	    offsetof(spa_error_entry_t, se_avl));
1242 	avl_create(&spa->spa_errlist_last,
1243 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1244 	    offsetof(spa_error_entry_t, se_avl));
1245 
1246 	spa_keystore_init(&spa->spa_keystore);
1247 
1248 	/*
1249 	 * This taskq is used to perform zvol-minor-related tasks
1250 	 * asynchronously. This has several advantages, including easy
1251 	 * resolution of various deadlocks (zfsonlinux bug #3681).
1252 	 *
1253 	 * The taskq must be single threaded to ensure tasks are always
1254 	 * processed in the order in which they were dispatched.
1255 	 *
1256 	 * A taskq per pool allows one to keep the pools independent.
1257 	 * This way if one pool is suspended, it will not impact another.
1258 	 *
1259 	 * The preferred location to dispatch a zvol minor task is a sync
1260 	 * task. In this context, there is easy access to the spa_t and minimal
1261 	 * error handling is required because the sync task must succeed.
1262 	 */
1263 	spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1264 	    1, INT_MAX, 0);
1265 
1266 	/*
1267 	 * Taskq dedicated to prefetcher threads: this is used to prevent the
1268 	 * pool traverse code from monopolizing the global (and limited)
1269 	 * system_taskq by inappropriately scheduling long running tasks on it.
1270 	 */
1271 	spa->spa_prefetch_taskq = taskq_create("z_prefetch", boot_ncpus,
1272 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
1273 
1274 	/*
1275 	 * The taskq to upgrade datasets in this pool. Currently used by
1276 	 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
1277 	 */
1278 	spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus,
1279 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
1280 }
1281 
1282 /*
1283  * Opposite of spa_activate().
1284  */
1285 static void
1286 spa_deactivate(spa_t *spa)
1287 {
1288 	ASSERT(spa->spa_sync_on == B_FALSE);
1289 	ASSERT(spa->spa_dsl_pool == NULL);
1290 	ASSERT(spa->spa_root_vdev == NULL);
1291 	ASSERT(spa->spa_async_zio_root == NULL);
1292 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1293 
1294 	spa_evicting_os_wait(spa);
1295 
1296 	if (spa->spa_zvol_taskq) {
1297 		taskq_destroy(spa->spa_zvol_taskq);
1298 		spa->spa_zvol_taskq = NULL;
1299 	}
1300 
1301 	if (spa->spa_prefetch_taskq) {
1302 		taskq_destroy(spa->spa_prefetch_taskq);
1303 		spa->spa_prefetch_taskq = NULL;
1304 	}
1305 
1306 	if (spa->spa_upgrade_taskq) {
1307 		taskq_destroy(spa->spa_upgrade_taskq);
1308 		spa->spa_upgrade_taskq = NULL;
1309 	}
1310 
1311 	txg_list_destroy(&spa->spa_vdev_txg_list);
1312 
1313 	list_destroy(&spa->spa_config_dirty_list);
1314 	list_destroy(&spa->spa_evicting_os_list);
1315 	list_destroy(&spa->spa_state_dirty_list);
1316 
1317 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
1318 
1319 	for (int t = 0; t < ZIO_TYPES; t++) {
1320 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1321 			spa_taskqs_fini(spa, t, q);
1322 		}
1323 	}
1324 
1325 	for (size_t i = 0; i < TXG_SIZE; i++) {
1326 		ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1327 		VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1328 		spa->spa_txg_zio[i] = NULL;
1329 	}
1330 
1331 	metaslab_class_destroy(spa->spa_normal_class);
1332 	spa->spa_normal_class = NULL;
1333 
1334 	metaslab_class_destroy(spa->spa_log_class);
1335 	spa->spa_log_class = NULL;
1336 
1337 	metaslab_class_destroy(spa->spa_special_class);
1338 	spa->spa_special_class = NULL;
1339 
1340 	metaslab_class_destroy(spa->spa_dedup_class);
1341 	spa->spa_dedup_class = NULL;
1342 
1343 	/*
1344 	 * If this was part of an import or the open otherwise failed, we may
1345 	 * still have errors left in the queues.  Empty them just in case.
1346 	 */
1347 	spa_errlog_drain(spa);
1348 	avl_destroy(&spa->spa_errlist_scrub);
1349 	avl_destroy(&spa->spa_errlist_last);
1350 
1351 	spa_keystore_fini(&spa->spa_keystore);
1352 
1353 	spa->spa_state = POOL_STATE_UNINITIALIZED;
1354 
1355 	mutex_enter(&spa->spa_proc_lock);
1356 	if (spa->spa_proc_state != SPA_PROC_NONE) {
1357 		ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1358 		spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1359 		cv_broadcast(&spa->spa_proc_cv);
1360 		while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1361 			ASSERT(spa->spa_proc != &p0);
1362 			cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1363 		}
1364 		ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1365 		spa->spa_proc_state = SPA_PROC_NONE;
1366 	}
1367 	ASSERT(spa->spa_proc == &p0);
1368 	mutex_exit(&spa->spa_proc_lock);
1369 
1370 	/*
1371 	 * We want to make sure spa_thread() has actually exited the ZFS
1372 	 * module, so that the module can't be unloaded out from underneath
1373 	 * it.
1374 	 */
1375 	if (spa->spa_did != 0) {
1376 		thread_join(spa->spa_did);
1377 		spa->spa_did = 0;
1378 	}
1379 }
1380 
1381 /*
1382  * Verify a pool configuration, and construct the vdev tree appropriately.  This
1383  * will create all the necessary vdevs in the appropriate layout, with each vdev
1384  * in the CLOSED state.  This will prep the pool before open/creation/import.
1385  * All vdev validation is done by the vdev_alloc() routine.
1386  */
1387 int
1388 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1389     uint_t id, int atype)
1390 {
1391 	nvlist_t **child;
1392 	uint_t children;
1393 	int error;
1394 
1395 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1396 		return (error);
1397 
1398 	if ((*vdp)->vdev_ops->vdev_op_leaf)
1399 		return (0);
1400 
1401 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1402 	    &child, &children);
1403 
1404 	if (error == ENOENT)
1405 		return (0);
1406 
1407 	if (error) {
1408 		vdev_free(*vdp);
1409 		*vdp = NULL;
1410 		return (SET_ERROR(EINVAL));
1411 	}
1412 
1413 	for (int c = 0; c < children; c++) {
1414 		vdev_t *vd;
1415 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1416 		    atype)) != 0) {
1417 			vdev_free(*vdp);
1418 			*vdp = NULL;
1419 			return (error);
1420 		}
1421 	}
1422 
1423 	ASSERT(*vdp != NULL);
1424 
1425 	return (0);
1426 }
1427 
1428 static boolean_t
1429 spa_should_flush_logs_on_unload(spa_t *spa)
1430 {
1431 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
1432 		return (B_FALSE);
1433 
1434 	if (!spa_writeable(spa))
1435 		return (B_FALSE);
1436 
1437 	if (!spa->spa_sync_on)
1438 		return (B_FALSE);
1439 
1440 	if (spa_state(spa) != POOL_STATE_EXPORTED)
1441 		return (B_FALSE);
1442 
1443 	if (zfs_keep_log_spacemaps_at_export)
1444 		return (B_FALSE);
1445 
1446 	return (B_TRUE);
1447 }
1448 
1449 /*
1450  * Opens a transaction that will set the flag that will instruct
1451  * spa_sync to attempt to flush all the metaslabs for that txg.
1452  */
1453 static void
1454 spa_unload_log_sm_flush_all(spa_t *spa)
1455 {
1456 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1457 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1458 
1459 	ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
1460 	spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
1461 
1462 	dmu_tx_commit(tx);
1463 	txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
1464 }
1465 
1466 static void
1467 spa_unload_log_sm_metadata(spa_t *spa)
1468 {
1469 	void *cookie = NULL;
1470 	spa_log_sm_t *sls;
1471 	while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
1472 	    &cookie)) != NULL) {
1473 		VERIFY0(sls->sls_mscount);
1474 		kmem_free(sls, sizeof (spa_log_sm_t));
1475 	}
1476 
1477 	for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
1478 	    e != NULL; e = list_head(&spa->spa_log_summary)) {
1479 		VERIFY0(e->lse_mscount);
1480 		list_remove(&spa->spa_log_summary, e);
1481 		kmem_free(e, sizeof (log_summary_entry_t));
1482 	}
1483 
1484 	spa->spa_unflushed_stats.sus_nblocks = 0;
1485 	spa->spa_unflushed_stats.sus_memused = 0;
1486 	spa->spa_unflushed_stats.sus_blocklimit = 0;
1487 }
1488 
1489 static void
1490 spa_destroy_aux_threads(spa_t *spa)
1491 {
1492 	if (spa->spa_condense_zthr != NULL) {
1493 		zthr_destroy(spa->spa_condense_zthr);
1494 		spa->spa_condense_zthr = NULL;
1495 	}
1496 	if (spa->spa_checkpoint_discard_zthr != NULL) {
1497 		zthr_destroy(spa->spa_checkpoint_discard_zthr);
1498 		spa->spa_checkpoint_discard_zthr = NULL;
1499 	}
1500 	if (spa->spa_livelist_delete_zthr != NULL) {
1501 		zthr_destroy(spa->spa_livelist_delete_zthr);
1502 		spa->spa_livelist_delete_zthr = NULL;
1503 	}
1504 	if (spa->spa_livelist_condense_zthr != NULL) {
1505 		zthr_destroy(spa->spa_livelist_condense_zthr);
1506 		spa->spa_livelist_condense_zthr = NULL;
1507 	}
1508 }
1509 
1510 /*
1511  * Opposite of spa_load().
1512  */
1513 static void
1514 spa_unload(spa_t *spa)
1515 {
1516 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1517 	ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
1518 
1519 	spa_import_progress_remove(spa_guid(spa));
1520 	spa_load_note(spa, "UNLOADING");
1521 
1522 	spa_wake_waiters(spa);
1523 
1524 	/*
1525 	 * If the log space map feature is enabled and the pool is getting
1526 	 * exported (but not destroyed), we want to spend some time flushing
1527 	 * as many metaslabs as we can in an attempt to destroy log space
1528 	 * maps and save import time.
1529 	 */
1530 	if (spa_should_flush_logs_on_unload(spa))
1531 		spa_unload_log_sm_flush_all(spa);
1532 
1533 	/*
1534 	 * Stop async tasks.
1535 	 */
1536 	spa_async_suspend(spa);
1537 
1538 	if (spa->spa_root_vdev) {
1539 		vdev_t *root_vdev = spa->spa_root_vdev;
1540 		vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
1541 		vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
1542 		vdev_autotrim_stop_all(spa);
1543 		vdev_rebuild_stop_all(spa);
1544 	}
1545 
1546 	/*
1547 	 * Stop syncing.
1548 	 */
1549 	if (spa->spa_sync_on) {
1550 		txg_sync_stop(spa->spa_dsl_pool);
1551 		spa->spa_sync_on = B_FALSE;
1552 	}
1553 
1554 	/*
1555 	 * This ensures that there is no async metaslab prefetching
1556 	 * while we attempt to unload the spa.
1557 	 */
1558 	if (spa->spa_root_vdev != NULL) {
1559 		for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
1560 			vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
1561 			if (vc->vdev_mg != NULL)
1562 				taskq_wait(vc->vdev_mg->mg_taskq);
1563 		}
1564 	}
1565 
1566 	if (spa->spa_mmp.mmp_thread)
1567 		mmp_thread_stop(spa);
1568 
1569 	/*
1570 	 * Wait for any outstanding async I/O to complete.
1571 	 */
1572 	if (spa->spa_async_zio_root != NULL) {
1573 		for (int i = 0; i < max_ncpus; i++)
1574 			(void) zio_wait(spa->spa_async_zio_root[i]);
1575 		kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1576 		spa->spa_async_zio_root = NULL;
1577 	}
1578 
1579 	if (spa->spa_vdev_removal != NULL) {
1580 		spa_vdev_removal_destroy(spa->spa_vdev_removal);
1581 		spa->spa_vdev_removal = NULL;
1582 	}
1583 
1584 	spa_destroy_aux_threads(spa);
1585 
1586 	spa_condense_fini(spa);
1587 
1588 	bpobj_close(&spa->spa_deferred_bpobj);
1589 
1590 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1591 
1592 	/*
1593 	 * Close all vdevs.
1594 	 */
1595 	if (spa->spa_root_vdev)
1596 		vdev_free(spa->spa_root_vdev);
1597 	ASSERT(spa->spa_root_vdev == NULL);
1598 
1599 	/*
1600 	 * Close the dsl pool.
1601 	 */
1602 	if (spa->spa_dsl_pool) {
1603 		dsl_pool_close(spa->spa_dsl_pool);
1604 		spa->spa_dsl_pool = NULL;
1605 		spa->spa_meta_objset = NULL;
1606 	}
1607 
1608 	ddt_unload(spa);
1609 	spa_unload_log_sm_metadata(spa);
1610 
1611 	/*
1612 	 * Drop and purge level 2 cache
1613 	 */
1614 	spa_l2cache_drop(spa);
1615 
1616 	for (int i = 0; i < spa->spa_spares.sav_count; i++)
1617 		vdev_free(spa->spa_spares.sav_vdevs[i]);
1618 	if (spa->spa_spares.sav_vdevs) {
1619 		kmem_free(spa->spa_spares.sav_vdevs,
1620 		    spa->spa_spares.sav_count * sizeof (void *));
1621 		spa->spa_spares.sav_vdevs = NULL;
1622 	}
1623 	if (spa->spa_spares.sav_config) {
1624 		nvlist_free(spa->spa_spares.sav_config);
1625 		spa->spa_spares.sav_config = NULL;
1626 	}
1627 	spa->spa_spares.sav_count = 0;
1628 
1629 	for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
1630 		vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1631 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1632 	}
1633 	if (spa->spa_l2cache.sav_vdevs) {
1634 		kmem_free(spa->spa_l2cache.sav_vdevs,
1635 		    spa->spa_l2cache.sav_count * sizeof (void *));
1636 		spa->spa_l2cache.sav_vdevs = NULL;
1637 	}
1638 	if (spa->spa_l2cache.sav_config) {
1639 		nvlist_free(spa->spa_l2cache.sav_config);
1640 		spa->spa_l2cache.sav_config = NULL;
1641 	}
1642 	spa->spa_l2cache.sav_count = 0;
1643 
1644 	spa->spa_async_suspended = 0;
1645 
1646 	spa->spa_indirect_vdevs_loaded = B_FALSE;
1647 
1648 	if (spa->spa_comment != NULL) {
1649 		spa_strfree(spa->spa_comment);
1650 		spa->spa_comment = NULL;
1651 	}
1652 
1653 	spa_config_exit(spa, SCL_ALL, spa);
1654 }
1655 
1656 /*
1657  * Load (or re-load) the current list of vdevs describing the active spares for
1658  * this pool.  When this is called, we have some form of basic information in
1659  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
1660  * then re-generate a more complete list including status information.
1661  */
1662 void
1663 spa_load_spares(spa_t *spa)
1664 {
1665 	nvlist_t **spares;
1666 	uint_t nspares;
1667 	int i;
1668 	vdev_t *vd, *tvd;
1669 
1670 #ifndef _KERNEL
1671 	/*
1672 	 * zdb opens both the current state of the pool and the
1673 	 * checkpointed state (if present), with a different spa_t.
1674 	 *
1675 	 * As spare vdevs are shared among open pools, we skip loading
1676 	 * them when we load the checkpointed state of the pool.
1677 	 */
1678 	if (!spa_writeable(spa))
1679 		return;
1680 #endif
1681 
1682 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1683 
1684 	/*
1685 	 * First, close and free any existing spare vdevs.
1686 	 */
1687 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1688 		vd = spa->spa_spares.sav_vdevs[i];
1689 
1690 		/* Undo the call to spa_activate() below */
1691 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1692 		    B_FALSE)) != NULL && tvd->vdev_isspare)
1693 			spa_spare_remove(tvd);
1694 		vdev_close(vd);
1695 		vdev_free(vd);
1696 	}
1697 
1698 	if (spa->spa_spares.sav_vdevs)
1699 		kmem_free(spa->spa_spares.sav_vdevs,
1700 		    spa->spa_spares.sav_count * sizeof (void *));
1701 
1702 	if (spa->spa_spares.sav_config == NULL)
1703 		nspares = 0;
1704 	else
1705 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1706 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1707 
1708 	spa->spa_spares.sav_count = (int)nspares;
1709 	spa->spa_spares.sav_vdevs = NULL;
1710 
1711 	if (nspares == 0)
1712 		return;
1713 
1714 	/*
1715 	 * Construct the array of vdevs, opening them to get status in the
1716 	 * process.   For each spare, there is potentially two different vdev_t
1717 	 * structures associated with it: one in the list of spares (used only
1718 	 * for basic validation purposes) and one in the active vdev
1719 	 * configuration (if it's spared in).  During this phase we open and
1720 	 * validate each vdev on the spare list.  If the vdev also exists in the
1721 	 * active configuration, then we also mark this vdev as an active spare.
1722 	 */
1723 	spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
1724 	    KM_SLEEP);
1725 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1726 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1727 		    VDEV_ALLOC_SPARE) == 0);
1728 		ASSERT(vd != NULL);
1729 
1730 		spa->spa_spares.sav_vdevs[i] = vd;
1731 
1732 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1733 		    B_FALSE)) != NULL) {
1734 			if (!tvd->vdev_isspare)
1735 				spa_spare_add(tvd);
1736 
1737 			/*
1738 			 * We only mark the spare active if we were successfully
1739 			 * able to load the vdev.  Otherwise, importing a pool
1740 			 * with a bad active spare would result in strange
1741 			 * behavior, because multiple pool would think the spare
1742 			 * is actively in use.
1743 			 *
1744 			 * There is a vulnerability here to an equally bizarre
1745 			 * circumstance, where a dead active spare is later
1746 			 * brought back to life (onlined or otherwise).  Given
1747 			 * the rarity of this scenario, and the extra complexity
1748 			 * it adds, we ignore the possibility.
1749 			 */
1750 			if (!vdev_is_dead(tvd))
1751 				spa_spare_activate(tvd);
1752 		}
1753 
1754 		vd->vdev_top = vd;
1755 		vd->vdev_aux = &spa->spa_spares;
1756 
1757 		if (vdev_open(vd) != 0)
1758 			continue;
1759 
1760 		if (vdev_validate_aux(vd) == 0)
1761 			spa_spare_add(vd);
1762 	}
1763 
1764 	/*
1765 	 * Recompute the stashed list of spares, with status information
1766 	 * this time.
1767 	 */
1768 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1769 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1770 
1771 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1772 	    KM_SLEEP);
1773 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1774 		spares[i] = vdev_config_generate(spa,
1775 		    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1776 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1777 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1778 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1779 		nvlist_free(spares[i]);
1780 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1781 }
1782 
1783 /*
1784  * Load (or re-load) the current list of vdevs describing the active l2cache for
1785  * this pool.  When this is called, we have some form of basic information in
1786  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
1787  * then re-generate a more complete list including status information.
1788  * Devices which are already active have their details maintained, and are
1789  * not re-opened.
1790  */
1791 void
1792 spa_load_l2cache(spa_t *spa)
1793 {
1794 	nvlist_t **l2cache = NULL;
1795 	uint_t nl2cache;
1796 	int i, j, oldnvdevs;
1797 	uint64_t guid;
1798 	vdev_t *vd, **oldvdevs, **newvdevs;
1799 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1800 
1801 #ifndef _KERNEL
1802 	/*
1803 	 * zdb opens both the current state of the pool and the
1804 	 * checkpointed state (if present), with a different spa_t.
1805 	 *
1806 	 * As L2 caches are part of the ARC which is shared among open
1807 	 * pools, we skip loading them when we load the checkpointed
1808 	 * state of the pool.
1809 	 */
1810 	if (!spa_writeable(spa))
1811 		return;
1812 #endif
1813 
1814 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1815 
1816 	oldvdevs = sav->sav_vdevs;
1817 	oldnvdevs = sav->sav_count;
1818 	sav->sav_vdevs = NULL;
1819 	sav->sav_count = 0;
1820 
1821 	if (sav->sav_config == NULL) {
1822 		nl2cache = 0;
1823 		newvdevs = NULL;
1824 		goto out;
1825 	}
1826 
1827 	VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1828 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1829 	newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1830 
1831 	/*
1832 	 * Process new nvlist of vdevs.
1833 	 */
1834 	for (i = 0; i < nl2cache; i++) {
1835 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1836 		    &guid) == 0);
1837 
1838 		newvdevs[i] = NULL;
1839 		for (j = 0; j < oldnvdevs; j++) {
1840 			vd = oldvdevs[j];
1841 			if (vd != NULL && guid == vd->vdev_guid) {
1842 				/*
1843 				 * Retain previous vdev for add/remove ops.
1844 				 */
1845 				newvdevs[i] = vd;
1846 				oldvdevs[j] = NULL;
1847 				break;
1848 			}
1849 		}
1850 
1851 		if (newvdevs[i] == NULL) {
1852 			/*
1853 			 * Create new vdev
1854 			 */
1855 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1856 			    VDEV_ALLOC_L2CACHE) == 0);
1857 			ASSERT(vd != NULL);
1858 			newvdevs[i] = vd;
1859 
1860 			/*
1861 			 * Commit this vdev as an l2cache device,
1862 			 * even if it fails to open.
1863 			 */
1864 			spa_l2cache_add(vd);
1865 
1866 			vd->vdev_top = vd;
1867 			vd->vdev_aux = sav;
1868 
1869 			spa_l2cache_activate(vd);
1870 
1871 			if (vdev_open(vd) != 0)
1872 				continue;
1873 
1874 			(void) vdev_validate_aux(vd);
1875 
1876 			if (!vdev_is_dead(vd))
1877 				l2arc_add_vdev(spa, vd);
1878 
1879 			/*
1880 			 * Upon cache device addition to a pool or pool
1881 			 * creation with a cache device or if the header
1882 			 * of the device is invalid we issue an async
1883 			 * TRIM command for the whole device which will
1884 			 * execute if l2arc_trim_ahead > 0.
1885 			 */
1886 			spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
1887 		}
1888 	}
1889 
1890 	sav->sav_vdevs = newvdevs;
1891 	sav->sav_count = (int)nl2cache;
1892 
1893 	/*
1894 	 * Recompute the stashed list of l2cache devices, with status
1895 	 * information this time.
1896 	 */
1897 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1898 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1899 
1900 	if (sav->sav_count > 0)
1901 		l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
1902 		    KM_SLEEP);
1903 	for (i = 0; i < sav->sav_count; i++)
1904 		l2cache[i] = vdev_config_generate(spa,
1905 		    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1906 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1907 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1908 
1909 out:
1910 	/*
1911 	 * Purge vdevs that were dropped
1912 	 */
1913 	for (i = 0; i < oldnvdevs; i++) {
1914 		uint64_t pool;
1915 
1916 		vd = oldvdevs[i];
1917 		if (vd != NULL) {
1918 			ASSERT(vd->vdev_isl2cache);
1919 
1920 			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1921 			    pool != 0ULL && l2arc_vdev_present(vd))
1922 				l2arc_remove_vdev(vd);
1923 			vdev_clear_stats(vd);
1924 			vdev_free(vd);
1925 		}
1926 	}
1927 
1928 	if (oldvdevs)
1929 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1930 
1931 	for (i = 0; i < sav->sav_count; i++)
1932 		nvlist_free(l2cache[i]);
1933 	if (sav->sav_count)
1934 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
1935 }
1936 
1937 static int
1938 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1939 {
1940 	dmu_buf_t *db;
1941 	char *packed = NULL;
1942 	size_t nvsize = 0;
1943 	int error;
1944 	*value = NULL;
1945 
1946 	error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1947 	if (error)
1948 		return (error);
1949 
1950 	nvsize = *(uint64_t *)db->db_data;
1951 	dmu_buf_rele(db, FTAG);
1952 
1953 	packed = vmem_alloc(nvsize, KM_SLEEP);
1954 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1955 	    DMU_READ_PREFETCH);
1956 	if (error == 0)
1957 		error = nvlist_unpack(packed, nvsize, value, 0);
1958 	vmem_free(packed, nvsize);
1959 
1960 	return (error);
1961 }
1962 
1963 /*
1964  * Concrete top-level vdevs that are not missing and are not logs. At every
1965  * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
1966  */
1967 static uint64_t
1968 spa_healthy_core_tvds(spa_t *spa)
1969 {
1970 	vdev_t *rvd = spa->spa_root_vdev;
1971 	uint64_t tvds = 0;
1972 
1973 	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1974 		vdev_t *vd = rvd->vdev_child[i];
1975 		if (vd->vdev_islog)
1976 			continue;
1977 		if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
1978 			tvds++;
1979 	}
1980 
1981 	return (tvds);
1982 }
1983 
1984 /*
1985  * Checks to see if the given vdev could not be opened, in which case we post a
1986  * sysevent to notify the autoreplace code that the device has been removed.
1987  */
1988 static void
1989 spa_check_removed(vdev_t *vd)
1990 {
1991 	for (uint64_t c = 0; c < vd->vdev_children; c++)
1992 		spa_check_removed(vd->vdev_child[c]);
1993 
1994 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1995 	    vdev_is_concrete(vd)) {
1996 		zfs_post_autoreplace(vd->vdev_spa, vd);
1997 		spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
1998 	}
1999 }
2000 
2001 static int
2002 spa_check_for_missing_logs(spa_t *spa)
2003 {
2004 	vdev_t *rvd = spa->spa_root_vdev;
2005 
2006 	/*
2007 	 * If we're doing a normal import, then build up any additional
2008 	 * diagnostic information about missing log devices.
2009 	 * We'll pass this up to the user for further processing.
2010 	 */
2011 	if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
2012 		nvlist_t **child, *nv;
2013 		uint64_t idx = 0;
2014 
2015 		child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
2016 		    KM_SLEEP);
2017 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2018 
2019 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2020 			vdev_t *tvd = rvd->vdev_child[c];
2021 
2022 			/*
2023 			 * We consider a device as missing only if it failed
2024 			 * to open (i.e. offline or faulted is not considered
2025 			 * as missing).
2026 			 */
2027 			if (tvd->vdev_islog &&
2028 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2029 				child[idx++] = vdev_config_generate(spa, tvd,
2030 				    B_FALSE, VDEV_CONFIG_MISSING);
2031 			}
2032 		}
2033 
2034 		if (idx > 0) {
2035 			fnvlist_add_nvlist_array(nv,
2036 			    ZPOOL_CONFIG_CHILDREN, child, idx);
2037 			fnvlist_add_nvlist(spa->spa_load_info,
2038 			    ZPOOL_CONFIG_MISSING_DEVICES, nv);
2039 
2040 			for (uint64_t i = 0; i < idx; i++)
2041 				nvlist_free(child[i]);
2042 		}
2043 		nvlist_free(nv);
2044 		kmem_free(child, rvd->vdev_children * sizeof (char **));
2045 
2046 		if (idx > 0) {
2047 			spa_load_failed(spa, "some log devices are missing");
2048 			vdev_dbgmsg_print_tree(rvd, 2);
2049 			return (SET_ERROR(ENXIO));
2050 		}
2051 	} else {
2052 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2053 			vdev_t *tvd = rvd->vdev_child[c];
2054 
2055 			if (tvd->vdev_islog &&
2056 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2057 				spa_set_log_state(spa, SPA_LOG_CLEAR);
2058 				spa_load_note(spa, "some log devices are "
2059 				    "missing, ZIL is dropped.");
2060 				vdev_dbgmsg_print_tree(rvd, 2);
2061 				break;
2062 			}
2063 		}
2064 	}
2065 
2066 	return (0);
2067 }
2068 
2069 /*
2070  * Check for missing log devices
2071  */
2072 static boolean_t
2073 spa_check_logs(spa_t *spa)
2074 {
2075 	boolean_t rv = B_FALSE;
2076 	dsl_pool_t *dp = spa_get_dsl(spa);
2077 
2078 	switch (spa->spa_log_state) {
2079 	default:
2080 		break;
2081 	case SPA_LOG_MISSING:
2082 		/* need to recheck in case slog has been restored */
2083 	case SPA_LOG_UNKNOWN:
2084 		rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2085 		    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
2086 		if (rv)
2087 			spa_set_log_state(spa, SPA_LOG_MISSING);
2088 		break;
2089 	}
2090 	return (rv);
2091 }
2092 
2093 static boolean_t
2094 spa_passivate_log(spa_t *spa)
2095 {
2096 	vdev_t *rvd = spa->spa_root_vdev;
2097 	boolean_t slog_found = B_FALSE;
2098 
2099 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2100 
2101 	if (!spa_has_slogs(spa))
2102 		return (B_FALSE);
2103 
2104 	for (int c = 0; c < rvd->vdev_children; c++) {
2105 		vdev_t *tvd = rvd->vdev_child[c];
2106 		metaslab_group_t *mg = tvd->vdev_mg;
2107 
2108 		if (tvd->vdev_islog) {
2109 			metaslab_group_passivate(mg);
2110 			slog_found = B_TRUE;
2111 		}
2112 	}
2113 
2114 	return (slog_found);
2115 }
2116 
2117 static void
2118 spa_activate_log(spa_t *spa)
2119 {
2120 	vdev_t *rvd = spa->spa_root_vdev;
2121 
2122 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2123 
2124 	for (int c = 0; c < rvd->vdev_children; c++) {
2125 		vdev_t *tvd = rvd->vdev_child[c];
2126 		metaslab_group_t *mg = tvd->vdev_mg;
2127 
2128 		if (tvd->vdev_islog)
2129 			metaslab_group_activate(mg);
2130 	}
2131 }
2132 
2133 int
2134 spa_reset_logs(spa_t *spa)
2135 {
2136 	int error;
2137 
2138 	error = dmu_objset_find(spa_name(spa), zil_reset,
2139 	    NULL, DS_FIND_CHILDREN);
2140 	if (error == 0) {
2141 		/*
2142 		 * We successfully offlined the log device, sync out the
2143 		 * current txg so that the "stubby" block can be removed
2144 		 * by zil_sync().
2145 		 */
2146 		txg_wait_synced(spa->spa_dsl_pool, 0);
2147 	}
2148 	return (error);
2149 }
2150 
2151 static void
2152 spa_aux_check_removed(spa_aux_vdev_t *sav)
2153 {
2154 	for (int i = 0; i < sav->sav_count; i++)
2155 		spa_check_removed(sav->sav_vdevs[i]);
2156 }
2157 
2158 void
2159 spa_claim_notify(zio_t *zio)
2160 {
2161 	spa_t *spa = zio->io_spa;
2162 
2163 	if (zio->io_error)
2164 		return;
2165 
2166 	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
2167 	if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
2168 		spa->spa_claim_max_txg = zio->io_bp->blk_birth;
2169 	mutex_exit(&spa->spa_props_lock);
2170 }
2171 
2172 typedef struct spa_load_error {
2173 	uint64_t	sle_meta_count;
2174 	uint64_t	sle_data_count;
2175 } spa_load_error_t;
2176 
2177 static void
2178 spa_load_verify_done(zio_t *zio)
2179 {
2180 	blkptr_t *bp = zio->io_bp;
2181 	spa_load_error_t *sle = zio->io_private;
2182 	dmu_object_type_t type = BP_GET_TYPE(bp);
2183 	int error = zio->io_error;
2184 	spa_t *spa = zio->io_spa;
2185 
2186 	abd_free(zio->io_abd);
2187 	if (error) {
2188 		if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
2189 		    type != DMU_OT_INTENT_LOG)
2190 			atomic_inc_64(&sle->sle_meta_count);
2191 		else
2192 			atomic_inc_64(&sle->sle_data_count);
2193 	}
2194 
2195 	mutex_enter(&spa->spa_scrub_lock);
2196 	spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
2197 	cv_broadcast(&spa->spa_scrub_io_cv);
2198 	mutex_exit(&spa->spa_scrub_lock);
2199 }
2200 
2201 /*
2202  * Maximum number of inflight bytes is the log2 fraction of the arc size.
2203  * By default, we set it to 1/16th of the arc.
2204  */
2205 int spa_load_verify_shift = 4;
2206 int spa_load_verify_metadata = B_TRUE;
2207 int spa_load_verify_data = B_TRUE;
2208 
2209 /*ARGSUSED*/
2210 static int
2211 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2212     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2213 {
2214 	if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
2215 	    BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
2216 		return (0);
2217 	/*
2218 	 * Note: normally this routine will not be called if
2219 	 * spa_load_verify_metadata is not set.  However, it may be useful
2220 	 * to manually set the flag after the traversal has begun.
2221 	 */
2222 	if (!spa_load_verify_metadata)
2223 		return (0);
2224 	if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
2225 		return (0);
2226 
2227 	uint64_t maxinflight_bytes =
2228 	    arc_target_bytes() >> spa_load_verify_shift;
2229 	zio_t *rio = arg;
2230 	size_t size = BP_GET_PSIZE(bp);
2231 
2232 	mutex_enter(&spa->spa_scrub_lock);
2233 	while (spa->spa_load_verify_bytes >= maxinflight_bytes)
2234 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2235 	spa->spa_load_verify_bytes += size;
2236 	mutex_exit(&spa->spa_scrub_lock);
2237 
2238 	zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2239 	    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2240 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2241 	    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2242 	return (0);
2243 }
2244 
2245 /* ARGSUSED */
2246 static int
2247 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2248 {
2249 	if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2250 		return (SET_ERROR(ENAMETOOLONG));
2251 
2252 	return (0);
2253 }
2254 
2255 static int
2256 spa_load_verify(spa_t *spa)
2257 {
2258 	zio_t *rio;
2259 	spa_load_error_t sle = { 0 };
2260 	zpool_load_policy_t policy;
2261 	boolean_t verify_ok = B_FALSE;
2262 	int error = 0;
2263 
2264 	zpool_get_load_policy(spa->spa_config, &policy);
2265 
2266 	if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
2267 		return (0);
2268 
2269 	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2270 	error = dmu_objset_find_dp(spa->spa_dsl_pool,
2271 	    spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2272 	    DS_FIND_CHILDREN);
2273 	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2274 	if (error != 0)
2275 		return (error);
2276 
2277 	rio = zio_root(spa, NULL, &sle,
2278 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2279 
2280 	if (spa_load_verify_metadata) {
2281 		if (spa->spa_extreme_rewind) {
2282 			spa_load_note(spa, "performing a complete scan of the "
2283 			    "pool since extreme rewind is on. This may take "
2284 			    "a very long time.\n  (spa_load_verify_data=%u, "
2285 			    "spa_load_verify_metadata=%u)",
2286 			    spa_load_verify_data, spa_load_verify_metadata);
2287 		}
2288 
2289 		error = traverse_pool(spa, spa->spa_verify_min_txg,
2290 		    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
2291 		    TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
2292 	}
2293 
2294 	(void) zio_wait(rio);
2295 	ASSERT0(spa->spa_load_verify_bytes);
2296 
2297 	spa->spa_load_meta_errors = sle.sle_meta_count;
2298 	spa->spa_load_data_errors = sle.sle_data_count;
2299 
2300 	if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
2301 		spa_load_note(spa, "spa_load_verify found %llu metadata errors "
2302 		    "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
2303 		    (u_longlong_t)sle.sle_data_count);
2304 	}
2305 
2306 	if (spa_load_verify_dryrun ||
2307 	    (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
2308 	    sle.sle_data_count <= policy.zlp_maxdata)) {
2309 		int64_t loss = 0;
2310 
2311 		verify_ok = B_TRUE;
2312 		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2313 		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2314 
2315 		loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2316 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2317 		    ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2318 		VERIFY(nvlist_add_int64(spa->spa_load_info,
2319 		    ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2320 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2321 		    ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2322 	} else {
2323 		spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2324 	}
2325 
2326 	if (spa_load_verify_dryrun)
2327 		return (0);
2328 
2329 	if (error) {
2330 		if (error != ENXIO && error != EIO)
2331 			error = SET_ERROR(EIO);
2332 		return (error);
2333 	}
2334 
2335 	return (verify_ok ? 0 : EIO);
2336 }
2337 
2338 /*
2339  * Find a value in the pool props object.
2340  */
2341 static void
2342 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2343 {
2344 	(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2345 	    zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2346 }
2347 
2348 /*
2349  * Find a value in the pool directory object.
2350  */
2351 static int
2352 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
2353 {
2354 	int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2355 	    name, sizeof (uint64_t), 1, val);
2356 
2357 	if (error != 0 && (error != ENOENT || log_enoent)) {
2358 		spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
2359 		    "[error=%d]", name, error);
2360 	}
2361 
2362 	return (error);
2363 }
2364 
2365 static int
2366 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2367 {
2368 	vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2369 	return (SET_ERROR(err));
2370 }
2371 
2372 boolean_t
2373 spa_livelist_delete_check(spa_t *spa)
2374 {
2375 	return (spa->spa_livelists_to_delete != 0);
2376 }
2377 
2378 /* ARGSUSED */
2379 static boolean_t
2380 spa_livelist_delete_cb_check(void *arg, zthr_t *z)
2381 {
2382 	spa_t *spa = arg;
2383 	return (spa_livelist_delete_check(spa));
2384 }
2385 
2386 static int
2387 delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2388 {
2389 	spa_t *spa = arg;
2390 	zio_free(spa, tx->tx_txg, bp);
2391 	dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
2392 	    -bp_get_dsize_sync(spa, bp),
2393 	    -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
2394 	return (0);
2395 }
2396 
2397 static int
2398 dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
2399 {
2400 	int err;
2401 	zap_cursor_t zc;
2402 	zap_attribute_t za;
2403 	zap_cursor_init(&zc, os, zap_obj);
2404 	err = zap_cursor_retrieve(&zc, &za);
2405 	zap_cursor_fini(&zc);
2406 	if (err == 0)
2407 		*llp = za.za_first_integer;
2408 	return (err);
2409 }
2410 
2411 /*
2412  * Components of livelist deletion that must be performed in syncing
2413  * context: freeing block pointers and updating the pool-wide data
2414  * structures to indicate how much work is left to do
2415  */
2416 typedef struct sublist_delete_arg {
2417 	spa_t *spa;
2418 	dsl_deadlist_t *ll;
2419 	uint64_t key;
2420 	bplist_t *to_free;
2421 } sublist_delete_arg_t;
2422 
2423 static void
2424 sublist_delete_sync(void *arg, dmu_tx_t *tx)
2425 {
2426 	sublist_delete_arg_t *sda = arg;
2427 	spa_t *spa = sda->spa;
2428 	dsl_deadlist_t *ll = sda->ll;
2429 	uint64_t key = sda->key;
2430 	bplist_t *to_free = sda->to_free;
2431 
2432 	bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
2433 	dsl_deadlist_remove_entry(ll, key, tx);
2434 }
2435 
2436 typedef struct livelist_delete_arg {
2437 	spa_t *spa;
2438 	uint64_t ll_obj;
2439 	uint64_t zap_obj;
2440 } livelist_delete_arg_t;
2441 
2442 static void
2443 livelist_delete_sync(void *arg, dmu_tx_t *tx)
2444 {
2445 	livelist_delete_arg_t *lda = arg;
2446 	spa_t *spa = lda->spa;
2447 	uint64_t ll_obj = lda->ll_obj;
2448 	uint64_t zap_obj = lda->zap_obj;
2449 	objset_t *mos = spa->spa_meta_objset;
2450 	uint64_t count;
2451 
2452 	/* free the livelist and decrement the feature count */
2453 	VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
2454 	dsl_deadlist_free(mos, ll_obj, tx);
2455 	spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
2456 	VERIFY0(zap_count(mos, zap_obj, &count));
2457 	if (count == 0) {
2458 		/* no more livelists to delete */
2459 		VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
2460 		    DMU_POOL_DELETED_CLONES, tx));
2461 		VERIFY0(zap_destroy(mos, zap_obj, tx));
2462 		spa->spa_livelists_to_delete = 0;
2463 		spa_notify_waiters(spa);
2464 	}
2465 }
2466 
2467 /*
2468  * Load in the value for the livelist to be removed and open it. Then,
2469  * load its first sublist and determine which block pointers should actually
2470  * be freed. Then, call a synctask which performs the actual frees and updates
2471  * the pool-wide livelist data.
2472  */
2473 /* ARGSUSED */
2474 static void
2475 spa_livelist_delete_cb(void *arg, zthr_t *z)
2476 {
2477 	spa_t *spa = arg;
2478 	uint64_t ll_obj = 0, count;
2479 	objset_t *mos = spa->spa_meta_objset;
2480 	uint64_t zap_obj = spa->spa_livelists_to_delete;
2481 	/*
2482 	 * Determine the next livelist to delete. This function should only
2483 	 * be called if there is at least one deleted clone.
2484 	 */
2485 	VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
2486 	VERIFY0(zap_count(mos, ll_obj, &count));
2487 	if (count > 0) {
2488 		dsl_deadlist_t ll = { 0 };
2489 		dsl_deadlist_entry_t *dle;
2490 		bplist_t to_free;
2491 		dsl_deadlist_open(&ll, mos, ll_obj);
2492 		dle = dsl_deadlist_first(&ll);
2493 		ASSERT3P(dle, !=, NULL);
2494 		bplist_create(&to_free);
2495 		int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
2496 		    z, NULL);
2497 		if (err == 0) {
2498 			sublist_delete_arg_t sync_arg = {
2499 			    .spa = spa,
2500 			    .ll = &ll,
2501 			    .key = dle->dle_mintxg,
2502 			    .to_free = &to_free
2503 			};
2504 			zfs_dbgmsg("deleting sublist (id %llu) from"
2505 			    " livelist %llu, %d remaining",
2506 			    dle->dle_bpobj.bpo_object, ll_obj, count - 1);
2507 			VERIFY0(dsl_sync_task(spa_name(spa), NULL,
2508 			    sublist_delete_sync, &sync_arg, 0,
2509 			    ZFS_SPACE_CHECK_DESTROY));
2510 		} else {
2511 			VERIFY3U(err, ==, EINTR);
2512 		}
2513 		bplist_clear(&to_free);
2514 		bplist_destroy(&to_free);
2515 		dsl_deadlist_close(&ll);
2516 	} else {
2517 		livelist_delete_arg_t sync_arg = {
2518 		    .spa = spa,
2519 		    .ll_obj = ll_obj,
2520 		    .zap_obj = zap_obj
2521 		};
2522 		zfs_dbgmsg("deletion of livelist %llu completed", ll_obj);
2523 		VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
2524 		    &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
2525 	}
2526 }
2527 
2528 static void
2529 spa_start_livelist_destroy_thread(spa_t *spa)
2530 {
2531 	ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
2532 	spa->spa_livelist_delete_zthr =
2533 	    zthr_create("z_livelist_destroy",
2534 	    spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa);
2535 }
2536 
2537 typedef struct livelist_new_arg {
2538 	bplist_t *allocs;
2539 	bplist_t *frees;
2540 } livelist_new_arg_t;
2541 
2542 static int
2543 livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
2544     dmu_tx_t *tx)
2545 {
2546 	ASSERT(tx == NULL);
2547 	livelist_new_arg_t *lna = arg;
2548 	if (bp_freed) {
2549 		bplist_append(lna->frees, bp);
2550 	} else {
2551 		bplist_append(lna->allocs, bp);
2552 		zfs_livelist_condense_new_alloc++;
2553 	}
2554 	return (0);
2555 }
2556 
2557 typedef struct livelist_condense_arg {
2558 	spa_t *spa;
2559 	bplist_t to_keep;
2560 	uint64_t first_size;
2561 	uint64_t next_size;
2562 } livelist_condense_arg_t;
2563 
2564 static void
2565 spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
2566 {
2567 	livelist_condense_arg_t *lca = arg;
2568 	spa_t *spa = lca->spa;
2569 	bplist_t new_frees;
2570 	dsl_dataset_t *ds = spa->spa_to_condense.ds;
2571 
2572 	/* Have we been cancelled? */
2573 	if (spa->spa_to_condense.cancelled) {
2574 		zfs_livelist_condense_sync_cancel++;
2575 		goto out;
2576 	}
2577 
2578 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
2579 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
2580 	dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
2581 
2582 	/*
2583 	 * It's possible that the livelist was changed while the zthr was
2584 	 * running. Therefore, we need to check for new blkptrs in the two
2585 	 * entries being condensed and continue to track them in the livelist.
2586 	 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
2587 	 * it's possible that the newly added blkptrs are FREEs or ALLOCs so
2588 	 * we need to sort them into two different bplists.
2589 	 */
2590 	uint64_t first_obj = first->dle_bpobj.bpo_object;
2591 	uint64_t next_obj = next->dle_bpobj.bpo_object;
2592 	uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
2593 	uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
2594 
2595 	bplist_create(&new_frees);
2596 	livelist_new_arg_t new_bps = {
2597 	    .allocs = &lca->to_keep,
2598 	    .frees = &new_frees,
2599 	};
2600 
2601 	if (cur_first_size > lca->first_size) {
2602 		VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
2603 		    livelist_track_new_cb, &new_bps, lca->first_size));
2604 	}
2605 	if (cur_next_size > lca->next_size) {
2606 		VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
2607 		    livelist_track_new_cb, &new_bps, lca->next_size));
2608 	}
2609 
2610 	dsl_deadlist_clear_entry(first, ll, tx);
2611 	ASSERT(bpobj_is_empty(&first->dle_bpobj));
2612 	dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
2613 
2614 	bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
2615 	bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
2616 	bplist_destroy(&new_frees);
2617 
2618 	char dsname[ZFS_MAX_DATASET_NAME_LEN];
2619 	dsl_dataset_name(ds, dsname);
2620 	zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
2621 	    "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
2622 	    "(%llu blkptrs)", tx->tx_txg, dsname, ds->ds_object, first_obj,
2623 	    cur_first_size, next_obj, cur_next_size,
2624 	    first->dle_bpobj.bpo_object,
2625 	    first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
2626 out:
2627 	dmu_buf_rele(ds->ds_dbuf, spa);
2628 	spa->spa_to_condense.ds = NULL;
2629 	bplist_clear(&lca->to_keep);
2630 	bplist_destroy(&lca->to_keep);
2631 	kmem_free(lca, sizeof (livelist_condense_arg_t));
2632 	spa->spa_to_condense.syncing = B_FALSE;
2633 }
2634 
2635 static void
2636 spa_livelist_condense_cb(void *arg, zthr_t *t)
2637 {
2638 	while (zfs_livelist_condense_zthr_pause &&
2639 	    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
2640 		delay(1);
2641 
2642 	spa_t *spa = arg;
2643 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
2644 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
2645 	uint64_t first_size, next_size;
2646 
2647 	livelist_condense_arg_t *lca =
2648 	    kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
2649 	bplist_create(&lca->to_keep);
2650 
2651 	/*
2652 	 * Process the livelists (matching FREEs and ALLOCs) in open context
2653 	 * so we have minimal work in syncing context to condense.
2654 	 *
2655 	 * We save bpobj sizes (first_size and next_size) to use later in
2656 	 * syncing context to determine if entries were added to these sublists
2657 	 * while in open context. This is possible because the clone is still
2658 	 * active and open for normal writes and we want to make sure the new,
2659 	 * unprocessed blockpointers are inserted into the livelist normally.
2660 	 *
2661 	 * Note that dsl_process_sub_livelist() both stores the size number of
2662 	 * blockpointers and iterates over them while the bpobj's lock held, so
2663 	 * the sizes returned to us are consistent which what was actually
2664 	 * processed.
2665 	 */
2666 	int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
2667 	    &first_size);
2668 	if (err == 0)
2669 		err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
2670 		    t, &next_size);
2671 
2672 	if (err == 0) {
2673 		while (zfs_livelist_condense_sync_pause &&
2674 		    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
2675 			delay(1);
2676 
2677 		dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
2678 		dmu_tx_mark_netfree(tx);
2679 		dmu_tx_hold_space(tx, 1);
2680 		err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
2681 		if (err == 0) {
2682 			/*
2683 			 * Prevent the condense zthr restarting before
2684 			 * the synctask completes.
2685 			 */
2686 			spa->spa_to_condense.syncing = B_TRUE;
2687 			lca->spa = spa;
2688 			lca->first_size = first_size;
2689 			lca->next_size = next_size;
2690 			dsl_sync_task_nowait(spa_get_dsl(spa),
2691 			    spa_livelist_condense_sync, lca, 0,
2692 			    ZFS_SPACE_CHECK_NONE, tx);
2693 			dmu_tx_commit(tx);
2694 			return;
2695 		}
2696 	}
2697 	/*
2698 	 * Condensing can not continue: either it was externally stopped or
2699 	 * we were unable to assign to a tx because the pool has run out of
2700 	 * space. In the second case, we'll just end up trying to condense
2701 	 * again in a later txg.
2702 	 */
2703 	ASSERT(err != 0);
2704 	bplist_clear(&lca->to_keep);
2705 	bplist_destroy(&lca->to_keep);
2706 	kmem_free(lca, sizeof (livelist_condense_arg_t));
2707 	dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
2708 	spa->spa_to_condense.ds = NULL;
2709 	if (err == EINTR)
2710 		zfs_livelist_condense_zthr_cancel++;
2711 }
2712 
2713 /* ARGSUSED */
2714 /*
2715  * Check that there is something to condense but that a condense is not
2716  * already in progress and that condensing has not been cancelled.
2717  */
2718 static boolean_t
2719 spa_livelist_condense_cb_check(void *arg, zthr_t *z)
2720 {
2721 	spa_t *spa = arg;
2722 	if ((spa->spa_to_condense.ds != NULL) &&
2723 	    (spa->spa_to_condense.syncing == B_FALSE) &&
2724 	    (spa->spa_to_condense.cancelled == B_FALSE)) {
2725 		return (B_TRUE);
2726 	}
2727 	return (B_FALSE);
2728 }
2729 
2730 static void
2731 spa_start_livelist_condensing_thread(spa_t *spa)
2732 {
2733 	spa->spa_to_condense.ds = NULL;
2734 	spa->spa_to_condense.first = NULL;
2735 	spa->spa_to_condense.next = NULL;
2736 	spa->spa_to_condense.syncing = B_FALSE;
2737 	spa->spa_to_condense.cancelled = B_FALSE;
2738 
2739 	ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
2740 	spa->spa_livelist_condense_zthr =
2741 	    zthr_create("z_livelist_condense",
2742 	    spa_livelist_condense_cb_check,
2743 	    spa_livelist_condense_cb, spa);
2744 }
2745 
2746 static void
2747 spa_spawn_aux_threads(spa_t *spa)
2748 {
2749 	ASSERT(spa_writeable(spa));
2750 
2751 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2752 
2753 	spa_start_indirect_condensing_thread(spa);
2754 	spa_start_livelist_destroy_thread(spa);
2755 	spa_start_livelist_condensing_thread(spa);
2756 
2757 	ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
2758 	spa->spa_checkpoint_discard_zthr =
2759 	    zthr_create("z_checkpoint_discard",
2760 	    spa_checkpoint_discard_thread_check,
2761 	    spa_checkpoint_discard_thread, spa);
2762 }
2763 
2764 /*
2765  * Fix up config after a partly-completed split.  This is done with the
2766  * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
2767  * pool have that entry in their config, but only the splitting one contains
2768  * a list of all the guids of the vdevs that are being split off.
2769  *
2770  * This function determines what to do with that list: either rejoin
2771  * all the disks to the pool, or complete the splitting process.  To attempt
2772  * the rejoin, each disk that is offlined is marked online again, and
2773  * we do a reopen() call.  If the vdev label for every disk that was
2774  * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2775  * then we call vdev_split() on each disk, and complete the split.
2776  *
2777  * Otherwise we leave the config alone, with all the vdevs in place in
2778  * the original pool.
2779  */
2780 static void
2781 spa_try_repair(spa_t *spa, nvlist_t *config)
2782 {
2783 	uint_t extracted;
2784 	uint64_t *glist;
2785 	uint_t i, gcount;
2786 	nvlist_t *nvl;
2787 	vdev_t **vd;
2788 	boolean_t attempt_reopen;
2789 
2790 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2791 		return;
2792 
2793 	/* check that the config is complete */
2794 	if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2795 	    &glist, &gcount) != 0)
2796 		return;
2797 
2798 	vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2799 
2800 	/* attempt to online all the vdevs & validate */
2801 	attempt_reopen = B_TRUE;
2802 	for (i = 0; i < gcount; i++) {
2803 		if (glist[i] == 0)	/* vdev is hole */
2804 			continue;
2805 
2806 		vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2807 		if (vd[i] == NULL) {
2808 			/*
2809 			 * Don't bother attempting to reopen the disks;
2810 			 * just do the split.
2811 			 */
2812 			attempt_reopen = B_FALSE;
2813 		} else {
2814 			/* attempt to re-online it */
2815 			vd[i]->vdev_offline = B_FALSE;
2816 		}
2817 	}
2818 
2819 	if (attempt_reopen) {
2820 		vdev_reopen(spa->spa_root_vdev);
2821 
2822 		/* check each device to see what state it's in */
2823 		for (extracted = 0, i = 0; i < gcount; i++) {
2824 			if (vd[i] != NULL &&
2825 			    vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2826 				break;
2827 			++extracted;
2828 		}
2829 	}
2830 
2831 	/*
2832 	 * If every disk has been moved to the new pool, or if we never
2833 	 * even attempted to look at them, then we split them off for
2834 	 * good.
2835 	 */
2836 	if (!attempt_reopen || gcount == extracted) {
2837 		for (i = 0; i < gcount; i++)
2838 			if (vd[i] != NULL)
2839 				vdev_split(vd[i]);
2840 		vdev_reopen(spa->spa_root_vdev);
2841 	}
2842 
2843 	kmem_free(vd, gcount * sizeof (vdev_t *));
2844 }
2845 
2846 static int
2847 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
2848 {
2849 	char *ereport = FM_EREPORT_ZFS_POOL;
2850 	int error;
2851 
2852 	spa->spa_load_state = state;
2853 	(void) spa_import_progress_set_state(spa_guid(spa),
2854 	    spa_load_state(spa));
2855 
2856 	gethrestime(&spa->spa_loaded_ts);
2857 	error = spa_load_impl(spa, type, &ereport);
2858 
2859 	/*
2860 	 * Don't count references from objsets that are already closed
2861 	 * and are making their way through the eviction process.
2862 	 */
2863 	spa_evicting_os_wait(spa);
2864 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
2865 	if (error) {
2866 		if (error != EEXIST) {
2867 			spa->spa_loaded_ts.tv_sec = 0;
2868 			spa->spa_loaded_ts.tv_nsec = 0;
2869 		}
2870 		if (error != EBADF) {
2871 			(void) zfs_ereport_post(ereport, spa,
2872 			    NULL, NULL, NULL, 0, 0);
2873 		}
2874 	}
2875 	spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2876 	spa->spa_ena = 0;
2877 
2878 	(void) spa_import_progress_set_state(spa_guid(spa),
2879 	    spa_load_state(spa));
2880 
2881 	return (error);
2882 }
2883 
2884 #ifdef ZFS_DEBUG
2885 /*
2886  * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2887  * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2888  * spa's per-vdev ZAP list.
2889  */
2890 static uint64_t
2891 vdev_count_verify_zaps(vdev_t *vd)
2892 {
2893 	spa_t *spa = vd->vdev_spa;
2894 	uint64_t total = 0;
2895 
2896 	if (vd->vdev_top_zap != 0) {
2897 		total++;
2898 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2899 		    spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2900 	}
2901 	if (vd->vdev_leaf_zap != 0) {
2902 		total++;
2903 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2904 		    spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2905 	}
2906 
2907 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
2908 		total += vdev_count_verify_zaps(vd->vdev_child[i]);
2909 	}
2910 
2911 	return (total);
2912 }
2913 #endif
2914 
2915 /*
2916  * Determine whether the activity check is required.
2917  */
2918 static boolean_t
2919 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
2920     nvlist_t *config)
2921 {
2922 	uint64_t state = 0;
2923 	uint64_t hostid = 0;
2924 	uint64_t tryconfig_txg = 0;
2925 	uint64_t tryconfig_timestamp = 0;
2926 	uint16_t tryconfig_mmp_seq = 0;
2927 	nvlist_t *nvinfo;
2928 
2929 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
2930 		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2931 		(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
2932 		    &tryconfig_txg);
2933 		(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2934 		    &tryconfig_timestamp);
2935 		(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
2936 		    &tryconfig_mmp_seq);
2937 	}
2938 
2939 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
2940 
2941 	/*
2942 	 * Disable the MMP activity check - This is used by zdb which
2943 	 * is intended to be used on potentially active pools.
2944 	 */
2945 	if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
2946 		return (B_FALSE);
2947 
2948 	/*
2949 	 * Skip the activity check when the MMP feature is disabled.
2950 	 */
2951 	if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
2952 		return (B_FALSE);
2953 
2954 	/*
2955 	 * If the tryconfig_ values are nonzero, they are the results of an
2956 	 * earlier tryimport.  If they all match the uberblock we just found,
2957 	 * then the pool has not changed and we return false so we do not test
2958 	 * a second time.
2959 	 */
2960 	if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
2961 	    tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
2962 	    tryconfig_mmp_seq && tryconfig_mmp_seq ==
2963 	    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
2964 		return (B_FALSE);
2965 
2966 	/*
2967 	 * Allow the activity check to be skipped when importing the pool
2968 	 * on the same host which last imported it.  Since the hostid from
2969 	 * configuration may be stale use the one read from the label.
2970 	 */
2971 	if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
2972 		hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
2973 
2974 	if (hostid == spa_get_hostid(spa))
2975 		return (B_FALSE);
2976 
2977 	/*
2978 	 * Skip the activity test when the pool was cleanly exported.
2979 	 */
2980 	if (state != POOL_STATE_ACTIVE)
2981 		return (B_FALSE);
2982 
2983 	return (B_TRUE);
2984 }
2985 
2986 /*
2987  * Nanoseconds the activity check must watch for changes on-disk.
2988  */
2989 static uint64_t
2990 spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
2991 {
2992 	uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
2993 	uint64_t multihost_interval = MSEC2NSEC(
2994 	    MMP_INTERVAL_OK(zfs_multihost_interval));
2995 	uint64_t import_delay = MAX(NANOSEC, import_intervals *
2996 	    multihost_interval);
2997 
2998 	/*
2999 	 * Local tunables determine a minimum duration except for the case
3000 	 * where we know when the remote host will suspend the pool if MMP
3001 	 * writes do not land.
3002 	 *
3003 	 * See Big Theory comment at the top of mmp.c for the reasoning behind
3004 	 * these cases and times.
3005 	 */
3006 
3007 	ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
3008 
3009 	if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3010 	    MMP_FAIL_INT(ub) > 0) {
3011 
3012 		/* MMP on remote host will suspend pool after failed writes */
3013 		import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
3014 		    MMP_IMPORT_SAFETY_FACTOR / 100;
3015 
3016 		zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
3017 		    "mmp_fails=%llu ub_mmp mmp_interval=%llu "
3018 		    "import_intervals=%u", import_delay, MMP_FAIL_INT(ub),
3019 		    MMP_INTERVAL(ub), import_intervals);
3020 
3021 	} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3022 	    MMP_FAIL_INT(ub) == 0) {
3023 
3024 		/* MMP on remote host will never suspend pool */
3025 		import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
3026 		    ub->ub_mmp_delay) * import_intervals);
3027 
3028 		zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
3029 		    "mmp_interval=%llu ub_mmp_delay=%llu "
3030 		    "import_intervals=%u", import_delay, MMP_INTERVAL(ub),
3031 		    ub->ub_mmp_delay, import_intervals);
3032 
3033 	} else if (MMP_VALID(ub)) {
3034 		/*
3035 		 * zfs-0.7 compatibility case
3036 		 */
3037 
3038 		import_delay = MAX(import_delay, (multihost_interval +
3039 		    ub->ub_mmp_delay) * import_intervals);
3040 
3041 		zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
3042 		    "import_intervals=%u leaves=%u", import_delay,
3043 		    ub->ub_mmp_delay, import_intervals,
3044 		    vdev_count_leaves(spa));
3045 	} else {
3046 		/* Using local tunings is the only reasonable option */
3047 		zfs_dbgmsg("pool last imported on non-MMP aware "
3048 		    "host using import_delay=%llu multihost_interval=%llu "
3049 		    "import_intervals=%u", import_delay, multihost_interval,
3050 		    import_intervals);
3051 	}
3052 
3053 	return (import_delay);
3054 }
3055 
3056 /*
3057  * Perform the import activity check.  If the user canceled the import or
3058  * we detected activity then fail.
3059  */
3060 static int
3061 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
3062 {
3063 	uint64_t txg = ub->ub_txg;
3064 	uint64_t timestamp = ub->ub_timestamp;
3065 	uint64_t mmp_config = ub->ub_mmp_config;
3066 	uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
3067 	uint64_t import_delay;
3068 	hrtime_t import_expire;
3069 	nvlist_t *mmp_label = NULL;
3070 	vdev_t *rvd = spa->spa_root_vdev;
3071 	kcondvar_t cv;
3072 	kmutex_t mtx;
3073 	int error = 0;
3074 
3075 	cv_init(&cv, NULL, CV_DEFAULT, NULL);
3076 	mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
3077 	mutex_enter(&mtx);
3078 
3079 	/*
3080 	 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
3081 	 * during the earlier tryimport.  If the txg recorded there is 0 then
3082 	 * the pool is known to be active on another host.
3083 	 *
3084 	 * Otherwise, the pool might be in use on another host.  Check for
3085 	 * changes in the uberblocks on disk if necessary.
3086 	 */
3087 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3088 		nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
3089 		    ZPOOL_CONFIG_LOAD_INFO);
3090 
3091 		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
3092 		    fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
3093 			vdev_uberblock_load(rvd, ub, &mmp_label);
3094 			error = SET_ERROR(EREMOTEIO);
3095 			goto out;
3096 		}
3097 	}
3098 
3099 	import_delay = spa_activity_check_duration(spa, ub);
3100 
3101 	/* Add a small random factor in case of simultaneous imports (0-25%) */
3102 	import_delay += import_delay * spa_get_random(250) / 1000;
3103 
3104 	import_expire = gethrtime() + import_delay;
3105 
3106 	while (gethrtime() < import_expire) {
3107 		(void) spa_import_progress_set_mmp_check(spa_guid(spa),
3108 		    NSEC2SEC(import_expire - gethrtime()));
3109 
3110 		vdev_uberblock_load(rvd, ub, &mmp_label);
3111 
3112 		if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
3113 		    mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
3114 			zfs_dbgmsg("multihost activity detected "
3115 			    "txg %llu ub_txg  %llu "
3116 			    "timestamp %llu ub_timestamp  %llu "
3117 			    "mmp_config %#llx ub_mmp_config %#llx",
3118 			    txg, ub->ub_txg, timestamp, ub->ub_timestamp,
3119 			    mmp_config, ub->ub_mmp_config);
3120 
3121 			error = SET_ERROR(EREMOTEIO);
3122 			break;
3123 		}
3124 
3125 		if (mmp_label) {
3126 			nvlist_free(mmp_label);
3127 			mmp_label = NULL;
3128 		}
3129 
3130 		error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
3131 		if (error != -1) {
3132 			error = SET_ERROR(EINTR);
3133 			break;
3134 		}
3135 		error = 0;
3136 	}
3137 
3138 out:
3139 	mutex_exit(&mtx);
3140 	mutex_destroy(&mtx);
3141 	cv_destroy(&cv);
3142 
3143 	/*
3144 	 * If the pool is determined to be active store the status in the
3145 	 * spa->spa_load_info nvlist.  If the remote hostname or hostid are
3146 	 * available from configuration read from disk store them as well.
3147 	 * This allows 'zpool import' to generate a more useful message.
3148 	 *
3149 	 * ZPOOL_CONFIG_MMP_STATE    - observed pool status (mandatory)
3150 	 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
3151 	 * ZPOOL_CONFIG_MMP_HOSTID   - hostid from the active pool
3152 	 */
3153 	if (error == EREMOTEIO) {
3154 		char *hostname = "<unknown>";
3155 		uint64_t hostid = 0;
3156 
3157 		if (mmp_label) {
3158 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
3159 				hostname = fnvlist_lookup_string(mmp_label,
3160 				    ZPOOL_CONFIG_HOSTNAME);
3161 				fnvlist_add_string(spa->spa_load_info,
3162 				    ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
3163 			}
3164 
3165 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
3166 				hostid = fnvlist_lookup_uint64(mmp_label,
3167 				    ZPOOL_CONFIG_HOSTID);
3168 				fnvlist_add_uint64(spa->spa_load_info,
3169 				    ZPOOL_CONFIG_MMP_HOSTID, hostid);
3170 			}
3171 		}
3172 
3173 		fnvlist_add_uint64(spa->spa_load_info,
3174 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
3175 		fnvlist_add_uint64(spa->spa_load_info,
3176 		    ZPOOL_CONFIG_MMP_TXG, 0);
3177 
3178 		error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
3179 	}
3180 
3181 	if (mmp_label)
3182 		nvlist_free(mmp_label);
3183 
3184 	return (error);
3185 }
3186 
3187 static int
3188 spa_verify_host(spa_t *spa, nvlist_t *mos_config)
3189 {
3190 	uint64_t hostid;
3191 	char *hostname;
3192 	uint64_t myhostid = 0;
3193 
3194 	if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
3195 	    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
3196 		hostname = fnvlist_lookup_string(mos_config,
3197 		    ZPOOL_CONFIG_HOSTNAME);
3198 
3199 		myhostid = zone_get_hostid(NULL);
3200 
3201 		if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
3202 			cmn_err(CE_WARN, "pool '%s' could not be "
3203 			    "loaded as it was last accessed by "
3204 			    "another system (host: %s hostid: 0x%llx). "
3205 			    "See: https://openzfs.github.io/openzfs-docs/msg/"
3206 			    "ZFS-8000-EY",
3207 			    spa_name(spa), hostname, (u_longlong_t)hostid);
3208 			spa_load_failed(spa, "hostid verification failed: pool "
3209 			    "last accessed by host: %s (hostid: 0x%llx)",
3210 			    hostname, (u_longlong_t)hostid);
3211 			return (SET_ERROR(EBADF));
3212 		}
3213 	}
3214 
3215 	return (0);
3216 }
3217 
3218 static int
3219 spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
3220 {
3221 	int error = 0;
3222 	nvlist_t *nvtree, *nvl, *config = spa->spa_config;
3223 	int parse;
3224 	vdev_t *rvd;
3225 	uint64_t pool_guid;
3226 	char *comment;
3227 
3228 	/*
3229 	 * Versioning wasn't explicitly added to the label until later, so if
3230 	 * it's not present treat it as the initial version.
3231 	 */
3232 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
3233 	    &spa->spa_ubsync.ub_version) != 0)
3234 		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
3235 
3236 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
3237 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3238 		    ZPOOL_CONFIG_POOL_GUID);
3239 		return (SET_ERROR(EINVAL));
3240 	}
3241 
3242 	/*
3243 	 * If we are doing an import, ensure that the pool is not already
3244 	 * imported by checking if its pool guid already exists in the
3245 	 * spa namespace.
3246 	 *
3247 	 * The only case that we allow an already imported pool to be
3248 	 * imported again, is when the pool is checkpointed and we want to
3249 	 * look at its checkpointed state from userland tools like zdb.
3250 	 */
3251 #ifdef _KERNEL
3252 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3253 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3254 	    spa_guid_exists(pool_guid, 0)) {
3255 #else
3256 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3257 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3258 	    spa_guid_exists(pool_guid, 0) &&
3259 	    !spa_importing_readonly_checkpoint(spa)) {
3260 #endif
3261 		spa_load_failed(spa, "a pool with guid %llu is already open",
3262 		    (u_longlong_t)pool_guid);
3263 		return (SET_ERROR(EEXIST));
3264 	}
3265 
3266 	spa->spa_config_guid = pool_guid;
3267 
3268 	nvlist_free(spa->spa_load_info);
3269 	spa->spa_load_info = fnvlist_alloc();
3270 
3271 	ASSERT(spa->spa_comment == NULL);
3272 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3273 		spa->spa_comment = spa_strdup(comment);
3274 
3275 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
3276 	    &spa->spa_config_txg);
3277 
3278 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
3279 		spa->spa_config_splitting = fnvlist_dup(nvl);
3280 
3281 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
3282 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3283 		    ZPOOL_CONFIG_VDEV_TREE);
3284 		return (SET_ERROR(EINVAL));
3285 	}
3286 
3287 	/*
3288 	 * Create "The Godfather" zio to hold all async IOs
3289 	 */
3290 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3291 	    KM_SLEEP);
3292 	for (int i = 0; i < max_ncpus; i++) {
3293 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3294 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3295 		    ZIO_FLAG_GODFATHER);
3296 	}
3297 
3298 	/*
3299 	 * Parse the configuration into a vdev tree.  We explicitly set the
3300 	 * value that will be returned by spa_version() since parsing the
3301 	 * configuration requires knowing the version number.
3302 	 */
3303 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3304 	parse = (type == SPA_IMPORT_EXISTING ?
3305 	    VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
3306 	error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
3307 	spa_config_exit(spa, SCL_ALL, FTAG);
3308 
3309 	if (error != 0) {
3310 		spa_load_failed(spa, "unable to parse config [error=%d]",
3311 		    error);
3312 		return (error);
3313 	}
3314 
3315 	ASSERT(spa->spa_root_vdev == rvd);
3316 	ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
3317 	ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
3318 
3319 	if (type != SPA_IMPORT_ASSEMBLE) {
3320 		ASSERT(spa_guid(spa) == pool_guid);
3321 	}
3322 
3323 	return (0);
3324 }
3325 
3326 /*
3327  * Recursively open all vdevs in the vdev tree. This function is called twice:
3328  * first with the untrusted config, then with the trusted config.
3329  */
3330 static int
3331 spa_ld_open_vdevs(spa_t *spa)
3332 {
3333 	int error = 0;
3334 
3335 	/*
3336 	 * spa_missing_tvds_allowed defines how many top-level vdevs can be
3337 	 * missing/unopenable for the root vdev to be still considered openable.
3338 	 */
3339 	if (spa->spa_trust_config) {
3340 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
3341 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
3342 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
3343 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
3344 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
3345 	} else {
3346 		spa->spa_missing_tvds_allowed = 0;
3347 	}
3348 
3349 	spa->spa_missing_tvds_allowed =
3350 	    MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
3351 
3352 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3353 	error = vdev_open(spa->spa_root_vdev);
3354 	spa_config_exit(spa, SCL_ALL, FTAG);
3355 
3356 	if (spa->spa_missing_tvds != 0) {
3357 		spa_load_note(spa, "vdev tree has %lld missing top-level "
3358 		    "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
3359 		if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
3360 			/*
3361 			 * Although theoretically we could allow users to open
3362 			 * incomplete pools in RW mode, we'd need to add a lot
3363 			 * of extra logic (e.g. adjust pool space to account
3364 			 * for missing vdevs).
3365 			 * This limitation also prevents users from accidentally
3366 			 * opening the pool in RW mode during data recovery and
3367 			 * damaging it further.
3368 			 */
3369 			spa_load_note(spa, "pools with missing top-level "
3370 			    "vdevs can only be opened in read-only mode.");
3371 			error = SET_ERROR(ENXIO);
3372 		} else {
3373 			spa_load_note(spa, "current settings allow for maximum "
3374 			    "%lld missing top-level vdevs at this stage.",
3375 			    (u_longlong_t)spa->spa_missing_tvds_allowed);
3376 		}
3377 	}
3378 	if (error != 0) {
3379 		spa_load_failed(spa, "unable to open vdev tree [error=%d]",
3380 		    error);
3381 	}
3382 	if (spa->spa_missing_tvds != 0 || error != 0)
3383 		vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
3384 
3385 	return (error);
3386 }
3387 
3388 /*
3389  * We need to validate the vdev labels against the configuration that
3390  * we have in hand. This function is called twice: first with an untrusted
3391  * config, then with a trusted config. The validation is more strict when the
3392  * config is trusted.
3393  */
3394 static int
3395 spa_ld_validate_vdevs(spa_t *spa)
3396 {
3397 	int error = 0;
3398 	vdev_t *rvd = spa->spa_root_vdev;
3399 
3400 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3401 	error = vdev_validate(rvd);
3402 	spa_config_exit(spa, SCL_ALL, FTAG);
3403 
3404 	if (error != 0) {
3405 		spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
3406 		return (error);
3407 	}
3408 
3409 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
3410 		spa_load_failed(spa, "cannot open vdev tree after invalidating "
3411 		    "some vdevs");
3412 		vdev_dbgmsg_print_tree(rvd, 2);
3413 		return (SET_ERROR(ENXIO));
3414 	}
3415 
3416 	return (0);
3417 }
3418 
3419 static void
3420 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
3421 {
3422 	spa->spa_state = POOL_STATE_ACTIVE;
3423 	spa->spa_ubsync = spa->spa_uberblock;
3424 	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
3425 	    TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
3426 	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
3427 	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
3428 	spa->spa_claim_max_txg = spa->spa_first_txg;
3429 	spa->spa_prev_software_version = ub->ub_software_version;
3430 }
3431 
3432 static int
3433 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
3434 {
3435 	vdev_t *rvd = spa->spa_root_vdev;
3436 	nvlist_t *label;
3437 	uberblock_t *ub = &spa->spa_uberblock;
3438 	boolean_t activity_check = B_FALSE;
3439 
3440 	/*
3441 	 * If we are opening the checkpointed state of the pool by
3442 	 * rewinding to it, at this point we will have written the
3443 	 * checkpointed uberblock to the vdev labels, so searching
3444 	 * the labels will find the right uberblock.  However, if
3445 	 * we are opening the checkpointed state read-only, we have
3446 	 * not modified the labels. Therefore, we must ignore the
3447 	 * labels and continue using the spa_uberblock that was set
3448 	 * by spa_ld_checkpoint_rewind.
3449 	 *
3450 	 * Note that it would be fine to ignore the labels when
3451 	 * rewinding (opening writeable) as well. However, if we
3452 	 * crash just after writing the labels, we will end up
3453 	 * searching the labels. Doing so in the common case means
3454 	 * that this code path gets exercised normally, rather than
3455 	 * just in the edge case.
3456 	 */
3457 	if (ub->ub_checkpoint_txg != 0 &&
3458 	    spa_importing_readonly_checkpoint(spa)) {
3459 		spa_ld_select_uberblock_done(spa, ub);
3460 		return (0);
3461 	}
3462 
3463 	/*
3464 	 * Find the best uberblock.
3465 	 */
3466 	vdev_uberblock_load(rvd, ub, &label);
3467 
3468 	/*
3469 	 * If we weren't able to find a single valid uberblock, return failure.
3470 	 */
3471 	if (ub->ub_txg == 0) {
3472 		nvlist_free(label);
3473 		spa_load_failed(spa, "no valid uberblock found");
3474 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
3475 	}
3476 
3477 	if (spa->spa_load_max_txg != UINT64_MAX) {
3478 		(void) spa_import_progress_set_max_txg(spa_guid(spa),
3479 		    (u_longlong_t)spa->spa_load_max_txg);
3480 	}
3481 	spa_load_note(spa, "using uberblock with txg=%llu",
3482 	    (u_longlong_t)ub->ub_txg);
3483 
3484 
3485 	/*
3486 	 * For pools which have the multihost property on determine if the
3487 	 * pool is truly inactive and can be safely imported.  Prevent
3488 	 * hosts which don't have a hostid set from importing the pool.
3489 	 */
3490 	activity_check = spa_activity_check_required(spa, ub, label,
3491 	    spa->spa_config);
3492 	if (activity_check) {
3493 		if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
3494 		    spa_get_hostid(spa) == 0) {
3495 			nvlist_free(label);
3496 			fnvlist_add_uint64(spa->spa_load_info,
3497 			    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
3498 			return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
3499 		}
3500 
3501 		int error = spa_activity_check(spa, ub, spa->spa_config);
3502 		if (error) {
3503 			nvlist_free(label);
3504 			return (error);
3505 		}
3506 
3507 		fnvlist_add_uint64(spa->spa_load_info,
3508 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
3509 		fnvlist_add_uint64(spa->spa_load_info,
3510 		    ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
3511 		fnvlist_add_uint16(spa->spa_load_info,
3512 		    ZPOOL_CONFIG_MMP_SEQ,
3513 		    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
3514 	}
3515 
3516 	/*
3517 	 * If the pool has an unsupported version we can't open it.
3518 	 */
3519 	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
3520 		nvlist_free(label);
3521 		spa_load_failed(spa, "version %llu is not supported",
3522 		    (u_longlong_t)ub->ub_version);
3523 		return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
3524 	}
3525 
3526 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
3527 		nvlist_t *features;
3528 
3529 		/*
3530 		 * If we weren't able to find what's necessary for reading the
3531 		 * MOS in the label, return failure.
3532 		 */
3533 		if (label == NULL) {
3534 			spa_load_failed(spa, "label config unavailable");
3535 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3536 			    ENXIO));
3537 		}
3538 
3539 		if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
3540 		    &features) != 0) {
3541 			nvlist_free(label);
3542 			spa_load_failed(spa, "invalid label: '%s' missing",
3543 			    ZPOOL_CONFIG_FEATURES_FOR_READ);
3544 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3545 			    ENXIO));
3546 		}
3547 
3548 		/*
3549 		 * Update our in-core representation with the definitive values
3550 		 * from the label.
3551 		 */
3552 		nvlist_free(spa->spa_label_features);
3553 		VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
3554 	}
3555 
3556 	nvlist_free(label);
3557 
3558 	/*
3559 	 * Look through entries in the label nvlist's features_for_read. If
3560 	 * there is a feature listed there which we don't understand then we
3561 	 * cannot open a pool.
3562 	 */
3563 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
3564 		nvlist_t *unsup_feat;
3565 
3566 		VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
3567 		    0);
3568 
3569 		for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
3570 		    NULL); nvp != NULL;
3571 		    nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
3572 			if (!zfeature_is_supported(nvpair_name(nvp))) {
3573 				VERIFY(nvlist_add_string(unsup_feat,
3574 				    nvpair_name(nvp), "") == 0);
3575 			}
3576 		}
3577 
3578 		if (!nvlist_empty(unsup_feat)) {
3579 			VERIFY(nvlist_add_nvlist(spa->spa_load_info,
3580 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
3581 			nvlist_free(unsup_feat);
3582 			spa_load_failed(spa, "some features are unsupported");
3583 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3584 			    ENOTSUP));
3585 		}
3586 
3587 		nvlist_free(unsup_feat);
3588 	}
3589 
3590 	if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
3591 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3592 		spa_try_repair(spa, spa->spa_config);
3593 		spa_config_exit(spa, SCL_ALL, FTAG);
3594 		nvlist_free(spa->spa_config_splitting);
3595 		spa->spa_config_splitting = NULL;
3596 	}
3597 
3598 	/*
3599 	 * Initialize internal SPA structures.
3600 	 */
3601 	spa_ld_select_uberblock_done(spa, ub);
3602 
3603 	return (0);
3604 }
3605 
3606 static int
3607 spa_ld_open_rootbp(spa_t *spa)
3608 {
3609 	int error = 0;
3610 	vdev_t *rvd = spa->spa_root_vdev;
3611 
3612 	error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
3613 	if (error != 0) {
3614 		spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
3615 		    "[error=%d]", error);
3616 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3617 	}
3618 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
3619 
3620 	return (0);
3621 }
3622 
3623 static int
3624 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
3625     boolean_t reloading)
3626 {
3627 	vdev_t *mrvd, *rvd = spa->spa_root_vdev;
3628 	nvlist_t *nv, *mos_config, *policy;
3629 	int error = 0, copy_error;
3630 	uint64_t healthy_tvds, healthy_tvds_mos;
3631 	uint64_t mos_config_txg;
3632 
3633 	if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
3634 	    != 0)
3635 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3636 
3637 	/*
3638 	 * If we're assembling a pool from a split, the config provided is
3639 	 * already trusted so there is nothing to do.
3640 	 */
3641 	if (type == SPA_IMPORT_ASSEMBLE)
3642 		return (0);
3643 
3644 	healthy_tvds = spa_healthy_core_tvds(spa);
3645 
3646 	if (load_nvlist(spa, spa->spa_config_object, &mos_config)
3647 	    != 0) {
3648 		spa_load_failed(spa, "unable to retrieve MOS config");
3649 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3650 	}
3651 
3652 	/*
3653 	 * If we are doing an open, pool owner wasn't verified yet, thus do
3654 	 * the verification here.
3655 	 */
3656 	if (spa->spa_load_state == SPA_LOAD_OPEN) {
3657 		error = spa_verify_host(spa, mos_config);
3658 		if (error != 0) {
3659 			nvlist_free(mos_config);
3660 			return (error);
3661 		}
3662 	}
3663 
3664 	nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
3665 
3666 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3667 
3668 	/*
3669 	 * Build a new vdev tree from the trusted config
3670 	 */
3671 	VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
3672 
3673 	/*
3674 	 * Vdev paths in the MOS may be obsolete. If the untrusted config was
3675 	 * obtained by scanning /dev/dsk, then it will have the right vdev
3676 	 * paths. We update the trusted MOS config with this information.
3677 	 * We first try to copy the paths with vdev_copy_path_strict, which
3678 	 * succeeds only when both configs have exactly the same vdev tree.
3679 	 * If that fails, we fall back to a more flexible method that has a
3680 	 * best effort policy.
3681 	 */
3682 	copy_error = vdev_copy_path_strict(rvd, mrvd);
3683 	if (copy_error != 0 || spa_load_print_vdev_tree) {
3684 		spa_load_note(spa, "provided vdev tree:");
3685 		vdev_dbgmsg_print_tree(rvd, 2);
3686 		spa_load_note(spa, "MOS vdev tree:");
3687 		vdev_dbgmsg_print_tree(mrvd, 2);
3688 	}
3689 	if (copy_error != 0) {
3690 		spa_load_note(spa, "vdev_copy_path_strict failed, falling "
3691 		    "back to vdev_copy_path_relaxed");
3692 		vdev_copy_path_relaxed(rvd, mrvd);
3693 	}
3694 
3695 	vdev_close(rvd);
3696 	vdev_free(rvd);
3697 	spa->spa_root_vdev = mrvd;
3698 	rvd = mrvd;
3699 	spa_config_exit(spa, SCL_ALL, FTAG);
3700 
3701 	/*
3702 	 * We will use spa_config if we decide to reload the spa or if spa_load
3703 	 * fails and we rewind. We must thus regenerate the config using the
3704 	 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
3705 	 * pass settings on how to load the pool and is not stored in the MOS.
3706 	 * We copy it over to our new, trusted config.
3707 	 */
3708 	mos_config_txg = fnvlist_lookup_uint64(mos_config,
3709 	    ZPOOL_CONFIG_POOL_TXG);
3710 	nvlist_free(mos_config);
3711 	mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
3712 	if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
3713 	    &policy) == 0)
3714 		fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
3715 	spa_config_set(spa, mos_config);
3716 	spa->spa_config_source = SPA_CONFIG_SRC_MOS;
3717 
3718 	/*
3719 	 * Now that we got the config from the MOS, we should be more strict
3720 	 * in checking blkptrs and can make assumptions about the consistency
3721 	 * of the vdev tree. spa_trust_config must be set to true before opening
3722 	 * vdevs in order for them to be writeable.
3723 	 */
3724 	spa->spa_trust_config = B_TRUE;
3725 
3726 	/*
3727 	 * Open and validate the new vdev tree
3728 	 */
3729 	error = spa_ld_open_vdevs(spa);
3730 	if (error != 0)
3731 		return (error);
3732 
3733 	error = spa_ld_validate_vdevs(spa);
3734 	if (error != 0)
3735 		return (error);
3736 
3737 	if (copy_error != 0 || spa_load_print_vdev_tree) {
3738 		spa_load_note(spa, "final vdev tree:");
3739 		vdev_dbgmsg_print_tree(rvd, 2);
3740 	}
3741 
3742 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
3743 	    !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
3744 		/*
3745 		 * Sanity check to make sure that we are indeed loading the
3746 		 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
3747 		 * in the config provided and they happened to be the only ones
3748 		 * to have the latest uberblock, we could involuntarily perform
3749 		 * an extreme rewind.
3750 		 */
3751 		healthy_tvds_mos = spa_healthy_core_tvds(spa);
3752 		if (healthy_tvds_mos - healthy_tvds >=
3753 		    SPA_SYNC_MIN_VDEVS) {
3754 			spa_load_note(spa, "config provided misses too many "
3755 			    "top-level vdevs compared to MOS (%lld vs %lld). ",
3756 			    (u_longlong_t)healthy_tvds,
3757 			    (u_longlong_t)healthy_tvds_mos);
3758 			spa_load_note(spa, "vdev tree:");
3759 			vdev_dbgmsg_print_tree(rvd, 2);
3760 			if (reloading) {
3761 				spa_load_failed(spa, "config was already "
3762 				    "provided from MOS. Aborting.");
3763 				return (spa_vdev_err(rvd,
3764 				    VDEV_AUX_CORRUPT_DATA, EIO));
3765 			}
3766 			spa_load_note(spa, "spa must be reloaded using MOS "
3767 			    "config");
3768 			return (SET_ERROR(EAGAIN));
3769 		}
3770 	}
3771 
3772 	error = spa_check_for_missing_logs(spa);
3773 	if (error != 0)
3774 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
3775 
3776 	if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
3777 		spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
3778 		    "guid sum (%llu != %llu)",
3779 		    (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
3780 		    (u_longlong_t)rvd->vdev_guid_sum);
3781 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
3782 		    ENXIO));
3783 	}
3784 
3785 	return (0);
3786 }
3787 
3788 static int
3789 spa_ld_open_indirect_vdev_metadata(spa_t *spa)
3790 {
3791 	int error = 0;
3792 	vdev_t *rvd = spa->spa_root_vdev;
3793 
3794 	/*
3795 	 * Everything that we read before spa_remove_init() must be stored
3796 	 * on concreted vdevs.  Therefore we do this as early as possible.
3797 	 */
3798 	error = spa_remove_init(spa);
3799 	if (error != 0) {
3800 		spa_load_failed(spa, "spa_remove_init failed [error=%d]",
3801 		    error);
3802 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3803 	}
3804 
3805 	/*
3806 	 * Retrieve information needed to condense indirect vdev mappings.
3807 	 */
3808 	error = spa_condense_init(spa);
3809 	if (error != 0) {
3810 		spa_load_failed(spa, "spa_condense_init failed [error=%d]",
3811 		    error);
3812 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
3813 	}
3814 
3815 	return (0);
3816 }
3817 
3818 static int
3819 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
3820 {
3821 	int error = 0;
3822 	vdev_t *rvd = spa->spa_root_vdev;
3823 
3824 	if (spa_version(spa) >= SPA_VERSION_FEATURES) {
3825 		boolean_t missing_feat_read = B_FALSE;
3826 		nvlist_t *unsup_feat, *enabled_feat;
3827 
3828 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
3829 		    &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
3830 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3831 		}
3832 
3833 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
3834 		    &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
3835 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3836 		}
3837 
3838 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
3839 		    &spa->spa_feat_desc_obj, B_TRUE) != 0) {
3840 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3841 		}
3842 
3843 		enabled_feat = fnvlist_alloc();
3844 		unsup_feat = fnvlist_alloc();
3845 
3846 		if (!spa_features_check(spa, B_FALSE,
3847 		    unsup_feat, enabled_feat))
3848 			missing_feat_read = B_TRUE;
3849 
3850 		if (spa_writeable(spa) ||
3851 		    spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
3852 			if (!spa_features_check(spa, B_TRUE,
3853 			    unsup_feat, enabled_feat)) {
3854 				*missing_feat_writep = B_TRUE;
3855 			}
3856 		}
3857 
3858 		fnvlist_add_nvlist(spa->spa_load_info,
3859 		    ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
3860 
3861 		if (!nvlist_empty(unsup_feat)) {
3862 			fnvlist_add_nvlist(spa->spa_load_info,
3863 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
3864 		}
3865 
3866 		fnvlist_free(enabled_feat);
3867 		fnvlist_free(unsup_feat);
3868 
3869 		if (!missing_feat_read) {
3870 			fnvlist_add_boolean(spa->spa_load_info,
3871 			    ZPOOL_CONFIG_CAN_RDONLY);
3872 		}
3873 
3874 		/*
3875 		 * If the state is SPA_LOAD_TRYIMPORT, our objective is
3876 		 * twofold: to determine whether the pool is available for
3877 		 * import in read-write mode and (if it is not) whether the
3878 		 * pool is available for import in read-only mode. If the pool
3879 		 * is available for import in read-write mode, it is displayed
3880 		 * as available in userland; if it is not available for import
3881 		 * in read-only mode, it is displayed as unavailable in
3882 		 * userland. If the pool is available for import in read-only
3883 		 * mode but not read-write mode, it is displayed as unavailable
3884 		 * in userland with a special note that the pool is actually
3885 		 * available for open in read-only mode.
3886 		 *
3887 		 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
3888 		 * missing a feature for write, we must first determine whether
3889 		 * the pool can be opened read-only before returning to
3890 		 * userland in order to know whether to display the
3891 		 * abovementioned note.
3892 		 */
3893 		if (missing_feat_read || (*missing_feat_writep &&
3894 		    spa_writeable(spa))) {
3895 			spa_load_failed(spa, "pool uses unsupported features");
3896 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3897 			    ENOTSUP));
3898 		}
3899 
3900 		/*
3901 		 * Load refcounts for ZFS features from disk into an in-memory
3902 		 * cache during SPA initialization.
3903 		 */
3904 		for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
3905 			uint64_t refcount;
3906 
3907 			error = feature_get_refcount_from_disk(spa,
3908 			    &spa_feature_table[i], &refcount);
3909 			if (error == 0) {
3910 				spa->spa_feat_refcount_cache[i] = refcount;
3911 			} else if (error == ENOTSUP) {
3912 				spa->spa_feat_refcount_cache[i] =
3913 				    SPA_FEATURE_DISABLED;
3914 			} else {
3915 				spa_load_failed(spa, "error getting refcount "
3916 				    "for feature %s [error=%d]",
3917 				    spa_feature_table[i].fi_guid, error);
3918 				return (spa_vdev_err(rvd,
3919 				    VDEV_AUX_CORRUPT_DATA, EIO));
3920 			}
3921 		}
3922 	}
3923 
3924 	if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
3925 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
3926 		    &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
3927 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3928 	}
3929 
3930 	/*
3931 	 * Encryption was added before bookmark_v2, even though bookmark_v2
3932 	 * is now a dependency. If this pool has encryption enabled without
3933 	 * bookmark_v2, trigger an errata message.
3934 	 */
3935 	if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
3936 	    !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
3937 		spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
3938 	}
3939 
3940 	return (0);
3941 }
3942 
3943 static int
3944 spa_ld_load_special_directories(spa_t *spa)
3945 {
3946 	int error = 0;
3947 	vdev_t *rvd = spa->spa_root_vdev;
3948 
3949 	spa->spa_is_initializing = B_TRUE;
3950 	error = dsl_pool_open(spa->spa_dsl_pool);
3951 	spa->spa_is_initializing = B_FALSE;
3952 	if (error != 0) {
3953 		spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
3954 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3955 	}
3956 
3957 	return (0);
3958 }
3959 
3960 static int
3961 spa_ld_get_props(spa_t *spa)
3962 {
3963 	int error = 0;
3964 	uint64_t obj;
3965 	vdev_t *rvd = spa->spa_root_vdev;
3966 
3967 	/* Grab the checksum salt from the MOS. */
3968 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3969 	    DMU_POOL_CHECKSUM_SALT, 1,
3970 	    sizeof (spa->spa_cksum_salt.zcs_bytes),
3971 	    spa->spa_cksum_salt.zcs_bytes);
3972 	if (error == ENOENT) {
3973 		/* Generate a new salt for subsequent use */
3974 		(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3975 		    sizeof (spa->spa_cksum_salt.zcs_bytes));
3976 	} else if (error != 0) {
3977 		spa_load_failed(spa, "unable to retrieve checksum salt from "
3978 		    "MOS [error=%d]", error);
3979 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3980 	}
3981 
3982 	if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
3983 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3984 	error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
3985 	if (error != 0) {
3986 		spa_load_failed(spa, "error opening deferred-frees bpobj "
3987 		    "[error=%d]", error);
3988 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3989 	}
3990 
3991 	/*
3992 	 * Load the bit that tells us to use the new accounting function
3993 	 * (raid-z deflation).  If we have an older pool, this will not
3994 	 * be present.
3995 	 */
3996 	error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
3997 	if (error != 0 && error != ENOENT)
3998 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3999 
4000 	error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
4001 	    &spa->spa_creation_version, B_FALSE);
4002 	if (error != 0 && error != ENOENT)
4003 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4004 
4005 	/*
4006 	 * Load the persistent error log.  If we have an older pool, this will
4007 	 * not be present.
4008 	 */
4009 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
4010 	    B_FALSE);
4011 	if (error != 0 && error != ENOENT)
4012 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4013 
4014 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
4015 	    &spa->spa_errlog_scrub, B_FALSE);
4016 	if (error != 0 && error != ENOENT)
4017 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4018 
4019 	/*
4020 	 * Load the livelist deletion field. If a livelist is queued for
4021 	 * deletion, indicate that in the spa
4022 	 */
4023 	error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
4024 	    &spa->spa_livelists_to_delete, B_FALSE);
4025 	if (error != 0 && error != ENOENT)
4026 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4027 
4028 	/*
4029 	 * Load the history object.  If we have an older pool, this
4030 	 * will not be present.
4031 	 */
4032 	error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
4033 	if (error != 0 && error != ENOENT)
4034 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4035 
4036 	/*
4037 	 * Load the per-vdev ZAP map. If we have an older pool, this will not
4038 	 * be present; in this case, defer its creation to a later time to
4039 	 * avoid dirtying the MOS this early / out of sync context. See
4040 	 * spa_sync_config_object.
4041 	 */
4042 
4043 	/* The sentinel is only available in the MOS config. */
4044 	nvlist_t *mos_config;
4045 	if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
4046 		spa_load_failed(spa, "unable to retrieve MOS config");
4047 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4048 	}
4049 
4050 	error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
4051 	    &spa->spa_all_vdev_zaps, B_FALSE);
4052 
4053 	if (error == ENOENT) {
4054 		VERIFY(!nvlist_exists(mos_config,
4055 		    ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
4056 		spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
4057 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4058 	} else if (error != 0) {
4059 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4060 	} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
4061 		/*
4062 		 * An older version of ZFS overwrote the sentinel value, so
4063 		 * we have orphaned per-vdev ZAPs in the MOS. Defer their
4064 		 * destruction to later; see spa_sync_config_object.
4065 		 */
4066 		spa->spa_avz_action = AVZ_ACTION_DESTROY;
4067 		/*
4068 		 * We're assuming that no vdevs have had their ZAPs created
4069 		 * before this. Better be sure of it.
4070 		 */
4071 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4072 	}
4073 	nvlist_free(mos_config);
4074 
4075 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
4076 
4077 	error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
4078 	    B_FALSE);
4079 	if (error && error != ENOENT)
4080 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4081 
4082 	if (error == 0) {
4083 		uint64_t autoreplace;
4084 
4085 		spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
4086 		spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
4087 		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
4088 		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
4089 		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
4090 		spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
4091 		spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
4092 		spa->spa_autoreplace = (autoreplace != 0);
4093 	}
4094 
4095 	/*
4096 	 * If we are importing a pool with missing top-level vdevs,
4097 	 * we enforce that the pool doesn't panic or get suspended on
4098 	 * error since the likelihood of missing data is extremely high.
4099 	 */
4100 	if (spa->spa_missing_tvds > 0 &&
4101 	    spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
4102 	    spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4103 		spa_load_note(spa, "forcing failmode to 'continue' "
4104 		    "as some top level vdevs are missing");
4105 		spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
4106 	}
4107 
4108 	return (0);
4109 }
4110 
4111 static int
4112 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
4113 {
4114 	int error = 0;
4115 	vdev_t *rvd = spa->spa_root_vdev;
4116 
4117 	/*
4118 	 * If we're assembling the pool from the split-off vdevs of
4119 	 * an existing pool, we don't want to attach the spares & cache
4120 	 * devices.
4121 	 */
4122 
4123 	/*
4124 	 * Load any hot spares for this pool.
4125 	 */
4126 	error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
4127 	    B_FALSE);
4128 	if (error != 0 && error != ENOENT)
4129 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4130 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4131 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
4132 		if (load_nvlist(spa, spa->spa_spares.sav_object,
4133 		    &spa->spa_spares.sav_config) != 0) {
4134 			spa_load_failed(spa, "error loading spares nvlist");
4135 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4136 		}
4137 
4138 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4139 		spa_load_spares(spa);
4140 		spa_config_exit(spa, SCL_ALL, FTAG);
4141 	} else if (error == 0) {
4142 		spa->spa_spares.sav_sync = B_TRUE;
4143 	}
4144 
4145 	/*
4146 	 * Load any level 2 ARC devices for this pool.
4147 	 */
4148 	error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
4149 	    &spa->spa_l2cache.sav_object, B_FALSE);
4150 	if (error != 0 && error != ENOENT)
4151 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4152 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4153 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
4154 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
4155 		    &spa->spa_l2cache.sav_config) != 0) {
4156 			spa_load_failed(spa, "error loading l2cache nvlist");
4157 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4158 		}
4159 
4160 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4161 		spa_load_l2cache(spa);
4162 		spa_config_exit(spa, SCL_ALL, FTAG);
4163 	} else if (error == 0) {
4164 		spa->spa_l2cache.sav_sync = B_TRUE;
4165 	}
4166 
4167 	return (0);
4168 }
4169 
4170 static int
4171 spa_ld_load_vdev_metadata(spa_t *spa)
4172 {
4173 	int error = 0;
4174 	vdev_t *rvd = spa->spa_root_vdev;
4175 
4176 	/*
4177 	 * If the 'multihost' property is set, then never allow a pool to
4178 	 * be imported when the system hostid is zero.  The exception to
4179 	 * this rule is zdb which is always allowed to access pools.
4180 	 */
4181 	if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
4182 	    (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
4183 		fnvlist_add_uint64(spa->spa_load_info,
4184 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
4185 		return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4186 	}
4187 
4188 	/*
4189 	 * If the 'autoreplace' property is set, then post a resource notifying
4190 	 * the ZFS DE that it should not issue any faults for unopenable
4191 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
4192 	 * unopenable vdevs so that the normal autoreplace handler can take
4193 	 * over.
4194 	 */
4195 	if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4196 		spa_check_removed(spa->spa_root_vdev);
4197 		/*
4198 		 * For the import case, this is done in spa_import(), because
4199 		 * at this point we're using the spare definitions from
4200 		 * the MOS config, not necessarily from the userland config.
4201 		 */
4202 		if (spa->spa_load_state != SPA_LOAD_IMPORT) {
4203 			spa_aux_check_removed(&spa->spa_spares);
4204 			spa_aux_check_removed(&spa->spa_l2cache);
4205 		}
4206 	}
4207 
4208 	/*
4209 	 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
4210 	 */
4211 	error = vdev_load(rvd);
4212 	if (error != 0) {
4213 		spa_load_failed(spa, "vdev_load failed [error=%d]", error);
4214 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4215 	}
4216 
4217 	error = spa_ld_log_spacemaps(spa);
4218 	if (error != 0) {
4219 		spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
4220 		    error);
4221 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4222 	}
4223 
4224 	/*
4225 	 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
4226 	 */
4227 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4228 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
4229 	spa_config_exit(spa, SCL_ALL, FTAG);
4230 
4231 	return (0);
4232 }
4233 
4234 static int
4235 spa_ld_load_dedup_tables(spa_t *spa)
4236 {
4237 	int error = 0;
4238 	vdev_t *rvd = spa->spa_root_vdev;
4239 
4240 	error = ddt_load(spa);
4241 	if (error != 0) {
4242 		spa_load_failed(spa, "ddt_load failed [error=%d]", error);
4243 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4244 	}
4245 
4246 	return (0);
4247 }
4248 
4249 static int
4250 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
4251 {
4252 	vdev_t *rvd = spa->spa_root_vdev;
4253 
4254 	if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
4255 		boolean_t missing = spa_check_logs(spa);
4256 		if (missing) {
4257 			if (spa->spa_missing_tvds != 0) {
4258 				spa_load_note(spa, "spa_check_logs failed "
4259 				    "so dropping the logs");
4260 			} else {
4261 				*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
4262 				spa_load_failed(spa, "spa_check_logs failed");
4263 				return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
4264 				    ENXIO));
4265 			}
4266 		}
4267 	}
4268 
4269 	return (0);
4270 }
4271 
4272 static int
4273 spa_ld_verify_pool_data(spa_t *spa)
4274 {
4275 	int error = 0;
4276 	vdev_t *rvd = spa->spa_root_vdev;
4277 
4278 	/*
4279 	 * We've successfully opened the pool, verify that we're ready
4280 	 * to start pushing transactions.
4281 	 */
4282 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4283 		error = spa_load_verify(spa);
4284 		if (error != 0) {
4285 			spa_load_failed(spa, "spa_load_verify failed "
4286 			    "[error=%d]", error);
4287 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4288 			    error));
4289 		}
4290 	}
4291 
4292 	return (0);
4293 }
4294 
4295 static void
4296 spa_ld_claim_log_blocks(spa_t *spa)
4297 {
4298 	dmu_tx_t *tx;
4299 	dsl_pool_t *dp = spa_get_dsl(spa);
4300 
4301 	/*
4302 	 * Claim log blocks that haven't been committed yet.
4303 	 * This must all happen in a single txg.
4304 	 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
4305 	 * invoked from zil_claim_log_block()'s i/o done callback.
4306 	 * Price of rollback is that we abandon the log.
4307 	 */
4308 	spa->spa_claiming = B_TRUE;
4309 
4310 	tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
4311 	(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
4312 	    zil_claim, tx, DS_FIND_CHILDREN);
4313 	dmu_tx_commit(tx);
4314 
4315 	spa->spa_claiming = B_FALSE;
4316 
4317 	spa_set_log_state(spa, SPA_LOG_GOOD);
4318 }
4319 
4320 static void
4321 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
4322     boolean_t update_config_cache)
4323 {
4324 	vdev_t *rvd = spa->spa_root_vdev;
4325 	int need_update = B_FALSE;
4326 
4327 	/*
4328 	 * If the config cache is stale, or we have uninitialized
4329 	 * metaslabs (see spa_vdev_add()), then update the config.
4330 	 *
4331 	 * If this is a verbatim import, trust the current
4332 	 * in-core spa_config and update the disk labels.
4333 	 */
4334 	if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
4335 	    spa->spa_load_state == SPA_LOAD_IMPORT ||
4336 	    spa->spa_load_state == SPA_LOAD_RECOVER ||
4337 	    (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
4338 		need_update = B_TRUE;
4339 
4340 	for (int c = 0; c < rvd->vdev_children; c++)
4341 		if (rvd->vdev_child[c]->vdev_ms_array == 0)
4342 			need_update = B_TRUE;
4343 
4344 	/*
4345 	 * Update the config cache asynchronously in case we're the
4346 	 * root pool, in which case the config cache isn't writable yet.
4347 	 */
4348 	if (need_update)
4349 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4350 }
4351 
4352 static void
4353 spa_ld_prepare_for_reload(spa_t *spa)
4354 {
4355 	spa_mode_t mode = spa->spa_mode;
4356 	int async_suspended = spa->spa_async_suspended;
4357 
4358 	spa_unload(spa);
4359 	spa_deactivate(spa);
4360 	spa_activate(spa, mode);
4361 
4362 	/*
4363 	 * We save the value of spa_async_suspended as it gets reset to 0 by
4364 	 * spa_unload(). We want to restore it back to the original value before
4365 	 * returning as we might be calling spa_async_resume() later.
4366 	 */
4367 	spa->spa_async_suspended = async_suspended;
4368 }
4369 
4370 static int
4371 spa_ld_read_checkpoint_txg(spa_t *spa)
4372 {
4373 	uberblock_t checkpoint;
4374 	int error = 0;
4375 
4376 	ASSERT0(spa->spa_checkpoint_txg);
4377 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4378 
4379 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4380 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
4381 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
4382 
4383 	if (error == ENOENT)
4384 		return (0);
4385 
4386 	if (error != 0)
4387 		return (error);
4388 
4389 	ASSERT3U(checkpoint.ub_txg, !=, 0);
4390 	ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
4391 	ASSERT3U(checkpoint.ub_timestamp, !=, 0);
4392 	spa->spa_checkpoint_txg = checkpoint.ub_txg;
4393 	spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
4394 
4395 	return (0);
4396 }
4397 
4398 static int
4399 spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
4400 {
4401 	int error = 0;
4402 
4403 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4404 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4405 
4406 	/*
4407 	 * Never trust the config that is provided unless we are assembling
4408 	 * a pool following a split.
4409 	 * This means don't trust blkptrs and the vdev tree in general. This
4410 	 * also effectively puts the spa in read-only mode since
4411 	 * spa_writeable() checks for spa_trust_config to be true.
4412 	 * We will later load a trusted config from the MOS.
4413 	 */
4414 	if (type != SPA_IMPORT_ASSEMBLE)
4415 		spa->spa_trust_config = B_FALSE;
4416 
4417 	/*
4418 	 * Parse the config provided to create a vdev tree.
4419 	 */
4420 	error = spa_ld_parse_config(spa, type);
4421 	if (error != 0)
4422 		return (error);
4423 
4424 	spa_import_progress_add(spa);
4425 
4426 	/*
4427 	 * Now that we have the vdev tree, try to open each vdev. This involves
4428 	 * opening the underlying physical device, retrieving its geometry and
4429 	 * probing the vdev with a dummy I/O. The state of each vdev will be set
4430 	 * based on the success of those operations. After this we'll be ready
4431 	 * to read from the vdevs.
4432 	 */
4433 	error = spa_ld_open_vdevs(spa);
4434 	if (error != 0)
4435 		return (error);
4436 
4437 	/*
4438 	 * Read the label of each vdev and make sure that the GUIDs stored
4439 	 * there match the GUIDs in the config provided.
4440 	 * If we're assembling a new pool that's been split off from an
4441 	 * existing pool, the labels haven't yet been updated so we skip
4442 	 * validation for now.
4443 	 */
4444 	if (type != SPA_IMPORT_ASSEMBLE) {
4445 		error = spa_ld_validate_vdevs(spa);
4446 		if (error != 0)
4447 			return (error);
4448 	}
4449 
4450 	/*
4451 	 * Read all vdev labels to find the best uberblock (i.e. latest,
4452 	 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
4453 	 * get the list of features required to read blkptrs in the MOS from
4454 	 * the vdev label with the best uberblock and verify that our version
4455 	 * of zfs supports them all.
4456 	 */
4457 	error = spa_ld_select_uberblock(spa, type);
4458 	if (error != 0)
4459 		return (error);
4460 
4461 	/*
4462 	 * Pass that uberblock to the dsl_pool layer which will open the root
4463 	 * blkptr. This blkptr points to the latest version of the MOS and will
4464 	 * allow us to read its contents.
4465 	 */
4466 	error = spa_ld_open_rootbp(spa);
4467 	if (error != 0)
4468 		return (error);
4469 
4470 	return (0);
4471 }
4472 
4473 static int
4474 spa_ld_checkpoint_rewind(spa_t *spa)
4475 {
4476 	uberblock_t checkpoint;
4477 	int error = 0;
4478 
4479 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4480 	ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4481 
4482 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4483 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
4484 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
4485 
4486 	if (error != 0) {
4487 		spa_load_failed(spa, "unable to retrieve checkpointed "
4488 		    "uberblock from the MOS config [error=%d]", error);
4489 
4490 		if (error == ENOENT)
4491 			error = ZFS_ERR_NO_CHECKPOINT;
4492 
4493 		return (error);
4494 	}
4495 
4496 	ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
4497 	ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
4498 
4499 	/*
4500 	 * We need to update the txg and timestamp of the checkpointed
4501 	 * uberblock to be higher than the latest one. This ensures that
4502 	 * the checkpointed uberblock is selected if we were to close and
4503 	 * reopen the pool right after we've written it in the vdev labels.
4504 	 * (also see block comment in vdev_uberblock_compare)
4505 	 */
4506 	checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
4507 	checkpoint.ub_timestamp = gethrestime_sec();
4508 
4509 	/*
4510 	 * Set current uberblock to be the checkpointed uberblock.
4511 	 */
4512 	spa->spa_uberblock = checkpoint;
4513 
4514 	/*
4515 	 * If we are doing a normal rewind, then the pool is open for
4516 	 * writing and we sync the "updated" checkpointed uberblock to
4517 	 * disk. Once this is done, we've basically rewound the whole
4518 	 * pool and there is no way back.
4519 	 *
4520 	 * There are cases when we don't want to attempt and sync the
4521 	 * checkpointed uberblock to disk because we are opening a
4522 	 * pool as read-only. Specifically, verifying the checkpointed
4523 	 * state with zdb, and importing the checkpointed state to get
4524 	 * a "preview" of its content.
4525 	 */
4526 	if (spa_writeable(spa)) {
4527 		vdev_t *rvd = spa->spa_root_vdev;
4528 
4529 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4530 		vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
4531 		int svdcount = 0;
4532 		int children = rvd->vdev_children;
4533 		int c0 = spa_get_random(children);
4534 
4535 		for (int c = 0; c < children; c++) {
4536 			vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
4537 
4538 			/* Stop when revisiting the first vdev */
4539 			if (c > 0 && svd[0] == vd)
4540 				break;
4541 
4542 			if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
4543 			    !vdev_is_concrete(vd))
4544 				continue;
4545 
4546 			svd[svdcount++] = vd;
4547 			if (svdcount == SPA_SYNC_MIN_VDEVS)
4548 				break;
4549 		}
4550 		error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
4551 		if (error == 0)
4552 			spa->spa_last_synced_guid = rvd->vdev_guid;
4553 		spa_config_exit(spa, SCL_ALL, FTAG);
4554 
4555 		if (error != 0) {
4556 			spa_load_failed(spa, "failed to write checkpointed "
4557 			    "uberblock to the vdev labels [error=%d]", error);
4558 			return (error);
4559 		}
4560 	}
4561 
4562 	return (0);
4563 }
4564 
4565 static int
4566 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
4567     boolean_t *update_config_cache)
4568 {
4569 	int error;
4570 
4571 	/*
4572 	 * Parse the config for pool, open and validate vdevs,
4573 	 * select an uberblock, and use that uberblock to open
4574 	 * the MOS.
4575 	 */
4576 	error = spa_ld_mos_init(spa, type);
4577 	if (error != 0)
4578 		return (error);
4579 
4580 	/*
4581 	 * Retrieve the trusted config stored in the MOS and use it to create
4582 	 * a new, exact version of the vdev tree, then reopen all vdevs.
4583 	 */
4584 	error = spa_ld_trusted_config(spa, type, B_FALSE);
4585 	if (error == EAGAIN) {
4586 		if (update_config_cache != NULL)
4587 			*update_config_cache = B_TRUE;
4588 
4589 		/*
4590 		 * Redo the loading process with the trusted config if it is
4591 		 * too different from the untrusted config.
4592 		 */
4593 		spa_ld_prepare_for_reload(spa);
4594 		spa_load_note(spa, "RELOADING");
4595 		error = spa_ld_mos_init(spa, type);
4596 		if (error != 0)
4597 			return (error);
4598 
4599 		error = spa_ld_trusted_config(spa, type, B_TRUE);
4600 		if (error != 0)
4601 			return (error);
4602 
4603 	} else if (error != 0) {
4604 		return (error);
4605 	}
4606 
4607 	return (0);
4608 }
4609 
4610 /*
4611  * Load an existing storage pool, using the config provided. This config
4612  * describes which vdevs are part of the pool and is later validated against
4613  * partial configs present in each vdev's label and an entire copy of the
4614  * config stored in the MOS.
4615  */
4616 static int
4617 spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
4618 {
4619 	int error = 0;
4620 	boolean_t missing_feat_write = B_FALSE;
4621 	boolean_t checkpoint_rewind =
4622 	    (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4623 	boolean_t update_config_cache = B_FALSE;
4624 
4625 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4626 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4627 
4628 	spa_load_note(spa, "LOADING");
4629 
4630 	error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
4631 	if (error != 0)
4632 		return (error);
4633 
4634 	/*
4635 	 * If we are rewinding to the checkpoint then we need to repeat
4636 	 * everything we've done so far in this function but this time
4637 	 * selecting the checkpointed uberblock and using that to open
4638 	 * the MOS.
4639 	 */
4640 	if (checkpoint_rewind) {
4641 		/*
4642 		 * If we are rewinding to the checkpoint update config cache
4643 		 * anyway.
4644 		 */
4645 		update_config_cache = B_TRUE;
4646 
4647 		/*
4648 		 * Extract the checkpointed uberblock from the current MOS
4649 		 * and use this as the pool's uberblock from now on. If the
4650 		 * pool is imported as writeable we also write the checkpoint
4651 		 * uberblock to the labels, making the rewind permanent.
4652 		 */
4653 		error = spa_ld_checkpoint_rewind(spa);
4654 		if (error != 0)
4655 			return (error);
4656 
4657 		/*
4658 		 * Redo the loading process again with the
4659 		 * checkpointed uberblock.
4660 		 */
4661 		spa_ld_prepare_for_reload(spa);
4662 		spa_load_note(spa, "LOADING checkpointed uberblock");
4663 		error = spa_ld_mos_with_trusted_config(spa, type, NULL);
4664 		if (error != 0)
4665 			return (error);
4666 	}
4667 
4668 	/*
4669 	 * Retrieve the checkpoint txg if the pool has a checkpoint.
4670 	 */
4671 	error = spa_ld_read_checkpoint_txg(spa);
4672 	if (error != 0)
4673 		return (error);
4674 
4675 	/*
4676 	 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
4677 	 * from the pool and their contents were re-mapped to other vdevs. Note
4678 	 * that everything that we read before this step must have been
4679 	 * rewritten on concrete vdevs after the last device removal was
4680 	 * initiated. Otherwise we could be reading from indirect vdevs before
4681 	 * we have loaded their mappings.
4682 	 */
4683 	error = spa_ld_open_indirect_vdev_metadata(spa);
4684 	if (error != 0)
4685 		return (error);
4686 
4687 	/*
4688 	 * Retrieve the full list of active features from the MOS and check if
4689 	 * they are all supported.
4690 	 */
4691 	error = spa_ld_check_features(spa, &missing_feat_write);
4692 	if (error != 0)
4693 		return (error);
4694 
4695 	/*
4696 	 * Load several special directories from the MOS needed by the dsl_pool
4697 	 * layer.
4698 	 */
4699 	error = spa_ld_load_special_directories(spa);
4700 	if (error != 0)
4701 		return (error);
4702 
4703 	/*
4704 	 * Retrieve pool properties from the MOS.
4705 	 */
4706 	error = spa_ld_get_props(spa);
4707 	if (error != 0)
4708 		return (error);
4709 
4710 	/*
4711 	 * Retrieve the list of auxiliary devices - cache devices and spares -
4712 	 * and open them.
4713 	 */
4714 	error = spa_ld_open_aux_vdevs(spa, type);
4715 	if (error != 0)
4716 		return (error);
4717 
4718 	/*
4719 	 * Load the metadata for all vdevs. Also check if unopenable devices
4720 	 * should be autoreplaced.
4721 	 */
4722 	error = spa_ld_load_vdev_metadata(spa);
4723 	if (error != 0)
4724 		return (error);
4725 
4726 	error = spa_ld_load_dedup_tables(spa);
4727 	if (error != 0)
4728 		return (error);
4729 
4730 	/*
4731 	 * Verify the logs now to make sure we don't have any unexpected errors
4732 	 * when we claim log blocks later.
4733 	 */
4734 	error = spa_ld_verify_logs(spa, type, ereport);
4735 	if (error != 0)
4736 		return (error);
4737 
4738 	if (missing_feat_write) {
4739 		ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
4740 
4741 		/*
4742 		 * At this point, we know that we can open the pool in
4743 		 * read-only mode but not read-write mode. We now have enough
4744 		 * information and can return to userland.
4745 		 */
4746 		return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
4747 		    ENOTSUP));
4748 	}
4749 
4750 	/*
4751 	 * Traverse the last txgs to make sure the pool was left off in a safe
4752 	 * state. When performing an extreme rewind, we verify the whole pool,
4753 	 * which can take a very long time.
4754 	 */
4755 	error = spa_ld_verify_pool_data(spa);
4756 	if (error != 0)
4757 		return (error);
4758 
4759 	/*
4760 	 * Calculate the deflated space for the pool. This must be done before
4761 	 * we write anything to the pool because we'd need to update the space
4762 	 * accounting using the deflated sizes.
4763 	 */
4764 	spa_update_dspace(spa);
4765 
4766 	/*
4767 	 * We have now retrieved all the information we needed to open the
4768 	 * pool. If we are importing the pool in read-write mode, a few
4769 	 * additional steps must be performed to finish the import.
4770 	 */
4771 	if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
4772 	    spa->spa_load_max_txg == UINT64_MAX)) {
4773 		uint64_t config_cache_txg = spa->spa_config_txg;
4774 
4775 		ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
4776 
4777 		/*
4778 		 * In case of a checkpoint rewind, log the original txg
4779 		 * of the checkpointed uberblock.
4780 		 */
4781 		if (checkpoint_rewind) {
4782 			spa_history_log_internal(spa, "checkpoint rewind",
4783 			    NULL, "rewound state to txg=%llu",
4784 			    (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
4785 		}
4786 
4787 		/*
4788 		 * Traverse the ZIL and claim all blocks.
4789 		 */
4790 		spa_ld_claim_log_blocks(spa);
4791 
4792 		/*
4793 		 * Kick-off the syncing thread.
4794 		 */
4795 		spa->spa_sync_on = B_TRUE;
4796 		txg_sync_start(spa->spa_dsl_pool);
4797 		mmp_thread_start(spa);
4798 
4799 		/*
4800 		 * Wait for all claims to sync.  We sync up to the highest
4801 		 * claimed log block birth time so that claimed log blocks
4802 		 * don't appear to be from the future.  spa_claim_max_txg
4803 		 * will have been set for us by ZIL traversal operations
4804 		 * performed above.
4805 		 */
4806 		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
4807 
4808 		/*
4809 		 * Check if we need to request an update of the config. On the
4810 		 * next sync, we would update the config stored in vdev labels
4811 		 * and the cachefile (by default /etc/zfs/zpool.cache).
4812 		 */
4813 		spa_ld_check_for_config_update(spa, config_cache_txg,
4814 		    update_config_cache);
4815 
4816 		/*
4817 		 * Check if a rebuild was in progress and if so resume it.
4818 		 * Then check all DTLs to see if anything needs resilvering.
4819 		 * The resilver will be deferred if a rebuild was started.
4820 		 */
4821 		if (vdev_rebuild_active(spa->spa_root_vdev)) {
4822 			vdev_rebuild_restart(spa);
4823 		} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
4824 		    vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
4825 			spa_async_request(spa, SPA_ASYNC_RESILVER);
4826 		}
4827 
4828 		/*
4829 		 * Log the fact that we booted up (so that we can detect if
4830 		 * we rebooted in the middle of an operation).
4831 		 */
4832 		spa_history_log_version(spa, "open", NULL);
4833 
4834 		spa_restart_removal(spa);
4835 		spa_spawn_aux_threads(spa);
4836 
4837 		/*
4838 		 * Delete any inconsistent datasets.
4839 		 *
4840 		 * Note:
4841 		 * Since we may be issuing deletes for clones here,
4842 		 * we make sure to do so after we've spawned all the
4843 		 * auxiliary threads above (from which the livelist
4844 		 * deletion zthr is part of).
4845 		 */
4846 		(void) dmu_objset_find(spa_name(spa),
4847 		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
4848 
4849 		/*
4850 		 * Clean up any stale temporary dataset userrefs.
4851 		 */
4852 		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
4853 
4854 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4855 		vdev_initialize_restart(spa->spa_root_vdev);
4856 		vdev_trim_restart(spa->spa_root_vdev);
4857 		vdev_autotrim_restart(spa);
4858 		spa_config_exit(spa, SCL_CONFIG, FTAG);
4859 	}
4860 
4861 	spa_import_progress_remove(spa_guid(spa));
4862 	spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
4863 
4864 	spa_load_note(spa, "LOADED");
4865 
4866 	return (0);
4867 }
4868 
4869 static int
4870 spa_load_retry(spa_t *spa, spa_load_state_t state)
4871 {
4872 	spa_mode_t mode = spa->spa_mode;
4873 
4874 	spa_unload(spa);
4875 	spa_deactivate(spa);
4876 
4877 	spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
4878 
4879 	spa_activate(spa, mode);
4880 	spa_async_suspend(spa);
4881 
4882 	spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
4883 	    (u_longlong_t)spa->spa_load_max_txg);
4884 
4885 	return (spa_load(spa, state, SPA_IMPORT_EXISTING));
4886 }
4887 
4888 /*
4889  * If spa_load() fails this function will try loading prior txg's. If
4890  * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
4891  * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
4892  * function will not rewind the pool and will return the same error as
4893  * spa_load().
4894  */
4895 static int
4896 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
4897     int rewind_flags)
4898 {
4899 	nvlist_t *loadinfo = NULL;
4900 	nvlist_t *config = NULL;
4901 	int load_error, rewind_error;
4902 	uint64_t safe_rewind_txg;
4903 	uint64_t min_txg;
4904 
4905 	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
4906 		spa->spa_load_max_txg = spa->spa_load_txg;
4907 		spa_set_log_state(spa, SPA_LOG_CLEAR);
4908 	} else {
4909 		spa->spa_load_max_txg = max_request;
4910 		if (max_request != UINT64_MAX)
4911 			spa->spa_extreme_rewind = B_TRUE;
4912 	}
4913 
4914 	load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
4915 	if (load_error == 0)
4916 		return (0);
4917 	if (load_error == ZFS_ERR_NO_CHECKPOINT) {
4918 		/*
4919 		 * When attempting checkpoint-rewind on a pool with no
4920 		 * checkpoint, we should not attempt to load uberblocks
4921 		 * from previous txgs when spa_load fails.
4922 		 */
4923 		ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4924 		spa_import_progress_remove(spa_guid(spa));
4925 		return (load_error);
4926 	}
4927 
4928 	if (spa->spa_root_vdev != NULL)
4929 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4930 
4931 	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
4932 	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
4933 
4934 	if (rewind_flags & ZPOOL_NEVER_REWIND) {
4935 		nvlist_free(config);
4936 		spa_import_progress_remove(spa_guid(spa));
4937 		return (load_error);
4938 	}
4939 
4940 	if (state == SPA_LOAD_RECOVER) {
4941 		/* Price of rolling back is discarding txgs, including log */
4942 		spa_set_log_state(spa, SPA_LOG_CLEAR);
4943 	} else {
4944 		/*
4945 		 * If we aren't rolling back save the load info from our first
4946 		 * import attempt so that we can restore it after attempting
4947 		 * to rewind.
4948 		 */
4949 		loadinfo = spa->spa_load_info;
4950 		spa->spa_load_info = fnvlist_alloc();
4951 	}
4952 
4953 	spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
4954 	safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
4955 	min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
4956 	    TXG_INITIAL : safe_rewind_txg;
4957 
4958 	/*
4959 	 * Continue as long as we're finding errors, we're still within
4960 	 * the acceptable rewind range, and we're still finding uberblocks
4961 	 */
4962 	while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
4963 	    spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
4964 		if (spa->spa_load_max_txg < safe_rewind_txg)
4965 			spa->spa_extreme_rewind = B_TRUE;
4966 		rewind_error = spa_load_retry(spa, state);
4967 	}
4968 
4969 	spa->spa_extreme_rewind = B_FALSE;
4970 	spa->spa_load_max_txg = UINT64_MAX;
4971 
4972 	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
4973 		spa_config_set(spa, config);
4974 	else
4975 		nvlist_free(config);
4976 
4977 	if (state == SPA_LOAD_RECOVER) {
4978 		ASSERT3P(loadinfo, ==, NULL);
4979 		spa_import_progress_remove(spa_guid(spa));
4980 		return (rewind_error);
4981 	} else {
4982 		/* Store the rewind info as part of the initial load info */
4983 		fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
4984 		    spa->spa_load_info);
4985 
4986 		/* Restore the initial load info */
4987 		fnvlist_free(spa->spa_load_info);
4988 		spa->spa_load_info = loadinfo;
4989 
4990 		spa_import_progress_remove(spa_guid(spa));
4991 		return (load_error);
4992 	}
4993 }
4994 
4995 /*
4996  * Pool Open/Import
4997  *
4998  * The import case is identical to an open except that the configuration is sent
4999  * down from userland, instead of grabbed from the configuration cache.  For the
5000  * case of an open, the pool configuration will exist in the
5001  * POOL_STATE_UNINITIALIZED state.
5002  *
5003  * The stats information (gen/count/ustats) is used to gather vdev statistics at
5004  * the same time open the pool, without having to keep around the spa_t in some
5005  * ambiguous state.
5006  */
5007 static int
5008 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
5009     nvlist_t **config)
5010 {
5011 	spa_t *spa;
5012 	spa_load_state_t state = SPA_LOAD_OPEN;
5013 	int error;
5014 	int locked = B_FALSE;
5015 	int firstopen = B_FALSE;
5016 
5017 	*spapp = NULL;
5018 
5019 	/*
5020 	 * As disgusting as this is, we need to support recursive calls to this
5021 	 * function because dsl_dir_open() is called during spa_load(), and ends
5022 	 * up calling spa_open() again.  The real fix is to figure out how to
5023 	 * avoid dsl_dir_open() calling this in the first place.
5024 	 */
5025 	if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
5026 		mutex_enter(&spa_namespace_lock);
5027 		locked = B_TRUE;
5028 	}
5029 
5030 	if ((spa = spa_lookup(pool)) == NULL) {
5031 		if (locked)
5032 			mutex_exit(&spa_namespace_lock);
5033 		return (SET_ERROR(ENOENT));
5034 	}
5035 
5036 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
5037 		zpool_load_policy_t policy;
5038 
5039 		firstopen = B_TRUE;
5040 
5041 		zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
5042 		    &policy);
5043 		if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5044 			state = SPA_LOAD_RECOVER;
5045 
5046 		spa_activate(spa, spa_mode_global);
5047 
5048 		if (state != SPA_LOAD_RECOVER)
5049 			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5050 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5051 
5052 		zfs_dbgmsg("spa_open_common: opening %s", pool);
5053 		error = spa_load_best(spa, state, policy.zlp_txg,
5054 		    policy.zlp_rewind);
5055 
5056 		if (error == EBADF) {
5057 			/*
5058 			 * If vdev_validate() returns failure (indicated by
5059 			 * EBADF), it indicates that one of the vdevs indicates
5060 			 * that the pool has been exported or destroyed.  If
5061 			 * this is the case, the config cache is out of sync and
5062 			 * we should remove the pool from the namespace.
5063 			 */
5064 			spa_unload(spa);
5065 			spa_deactivate(spa);
5066 			spa_write_cachefile(spa, B_TRUE, B_TRUE);
5067 			spa_remove(spa);
5068 			if (locked)
5069 				mutex_exit(&spa_namespace_lock);
5070 			return (SET_ERROR(ENOENT));
5071 		}
5072 
5073 		if (error) {
5074 			/*
5075 			 * We can't open the pool, but we still have useful
5076 			 * information: the state of each vdev after the
5077 			 * attempted vdev_open().  Return this to the user.
5078 			 */
5079 			if (config != NULL && spa->spa_config) {
5080 				VERIFY(nvlist_dup(spa->spa_config, config,
5081 				    KM_SLEEP) == 0);
5082 				VERIFY(nvlist_add_nvlist(*config,
5083 				    ZPOOL_CONFIG_LOAD_INFO,
5084 				    spa->spa_load_info) == 0);
5085 			}
5086 			spa_unload(spa);
5087 			spa_deactivate(spa);
5088 			spa->spa_last_open_failed = error;
5089 			if (locked)
5090 				mutex_exit(&spa_namespace_lock);
5091 			*spapp = NULL;
5092 			return (error);
5093 		}
5094 	}
5095 
5096 	spa_open_ref(spa, tag);
5097 
5098 	if (config != NULL)
5099 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5100 
5101 	/*
5102 	 * If we've recovered the pool, pass back any information we
5103 	 * gathered while doing the load.
5104 	 */
5105 	if (state == SPA_LOAD_RECOVER) {
5106 		VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
5107 		    spa->spa_load_info) == 0);
5108 	}
5109 
5110 	if (locked) {
5111 		spa->spa_last_open_failed = 0;
5112 		spa->spa_last_ubsync_txg = 0;
5113 		spa->spa_load_txg = 0;
5114 		mutex_exit(&spa_namespace_lock);
5115 	}
5116 
5117 	if (firstopen)
5118 		zvol_create_minors_recursive(spa_name(spa));
5119 
5120 	*spapp = spa;
5121 
5122 	return (0);
5123 }
5124 
5125 int
5126 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
5127     nvlist_t **config)
5128 {
5129 	return (spa_open_common(name, spapp, tag, policy, config));
5130 }
5131 
5132 int
5133 spa_open(const char *name, spa_t **spapp, void *tag)
5134 {
5135 	return (spa_open_common(name, spapp, tag, NULL, NULL));
5136 }
5137 
5138 /*
5139  * Lookup the given spa_t, incrementing the inject count in the process,
5140  * preventing it from being exported or destroyed.
5141  */
5142 spa_t *
5143 spa_inject_addref(char *name)
5144 {
5145 	spa_t *spa;
5146 
5147 	mutex_enter(&spa_namespace_lock);
5148 	if ((spa = spa_lookup(name)) == NULL) {
5149 		mutex_exit(&spa_namespace_lock);
5150 		return (NULL);
5151 	}
5152 	spa->spa_inject_ref++;
5153 	mutex_exit(&spa_namespace_lock);
5154 
5155 	return (spa);
5156 }
5157 
5158 void
5159 spa_inject_delref(spa_t *spa)
5160 {
5161 	mutex_enter(&spa_namespace_lock);
5162 	spa->spa_inject_ref--;
5163 	mutex_exit(&spa_namespace_lock);
5164 }
5165 
5166 /*
5167  * Add spares device information to the nvlist.
5168  */
5169 static void
5170 spa_add_spares(spa_t *spa, nvlist_t *config)
5171 {
5172 	nvlist_t **spares;
5173 	uint_t i, nspares;
5174 	nvlist_t *nvroot;
5175 	uint64_t guid;
5176 	vdev_stat_t *vs;
5177 	uint_t vsc;
5178 	uint64_t pool;
5179 
5180 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5181 
5182 	if (spa->spa_spares.sav_count == 0)
5183 		return;
5184 
5185 	VERIFY(nvlist_lookup_nvlist(config,
5186 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
5187 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5188 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
5189 	if (nspares != 0) {
5190 		VERIFY(nvlist_add_nvlist_array(nvroot,
5191 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5192 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
5193 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
5194 
5195 		/*
5196 		 * Go through and find any spares which have since been
5197 		 * repurposed as an active spare.  If this is the case, update
5198 		 * their status appropriately.
5199 		 */
5200 		for (i = 0; i < nspares; i++) {
5201 			VERIFY(nvlist_lookup_uint64(spares[i],
5202 			    ZPOOL_CONFIG_GUID, &guid) == 0);
5203 			if (spa_spare_exists(guid, &pool, NULL) &&
5204 			    pool != 0ULL) {
5205 				VERIFY(nvlist_lookup_uint64_array(
5206 				    spares[i], ZPOOL_CONFIG_VDEV_STATS,
5207 				    (uint64_t **)&vs, &vsc) == 0);
5208 				vs->vs_state = VDEV_STATE_CANT_OPEN;
5209 				vs->vs_aux = VDEV_AUX_SPARED;
5210 			}
5211 		}
5212 	}
5213 }
5214 
5215 /*
5216  * Add l2cache device information to the nvlist, including vdev stats.
5217  */
5218 static void
5219 spa_add_l2cache(spa_t *spa, nvlist_t *config)
5220 {
5221 	nvlist_t **l2cache;
5222 	uint_t i, j, nl2cache;
5223 	nvlist_t *nvroot;
5224 	uint64_t guid;
5225 	vdev_t *vd;
5226 	vdev_stat_t *vs;
5227 	uint_t vsc;
5228 
5229 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5230 
5231 	if (spa->spa_l2cache.sav_count == 0)
5232 		return;
5233 
5234 	VERIFY(nvlist_lookup_nvlist(config,
5235 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
5236 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5237 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
5238 	if (nl2cache != 0) {
5239 		VERIFY(nvlist_add_nvlist_array(nvroot,
5240 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5241 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
5242 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
5243 
5244 		/*
5245 		 * Update level 2 cache device stats.
5246 		 */
5247 
5248 		for (i = 0; i < nl2cache; i++) {
5249 			VERIFY(nvlist_lookup_uint64(l2cache[i],
5250 			    ZPOOL_CONFIG_GUID, &guid) == 0);
5251 
5252 			vd = NULL;
5253 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
5254 				if (guid ==
5255 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
5256 					vd = spa->spa_l2cache.sav_vdevs[j];
5257 					break;
5258 				}
5259 			}
5260 			ASSERT(vd != NULL);
5261 
5262 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
5263 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
5264 			    == 0);
5265 			vdev_get_stats(vd, vs);
5266 			vdev_config_generate_stats(vd, l2cache[i]);
5267 
5268 		}
5269 	}
5270 }
5271 
5272 static void
5273 spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
5274 {
5275 	zap_cursor_t zc;
5276 	zap_attribute_t za;
5277 
5278 	if (spa->spa_feat_for_read_obj != 0) {
5279 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
5280 		    spa->spa_feat_for_read_obj);
5281 		    zap_cursor_retrieve(&zc, &za) == 0;
5282 		    zap_cursor_advance(&zc)) {
5283 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
5284 			    za.za_num_integers == 1);
5285 			VERIFY0(nvlist_add_uint64(features, za.za_name,
5286 			    za.za_first_integer));
5287 		}
5288 		zap_cursor_fini(&zc);
5289 	}
5290 
5291 	if (spa->spa_feat_for_write_obj != 0) {
5292 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
5293 		    spa->spa_feat_for_write_obj);
5294 		    zap_cursor_retrieve(&zc, &za) == 0;
5295 		    zap_cursor_advance(&zc)) {
5296 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
5297 			    za.za_num_integers == 1);
5298 			VERIFY0(nvlist_add_uint64(features, za.za_name,
5299 			    za.za_first_integer));
5300 		}
5301 		zap_cursor_fini(&zc);
5302 	}
5303 }
5304 
5305 static void
5306 spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
5307 {
5308 	int i;
5309 
5310 	for (i = 0; i < SPA_FEATURES; i++) {
5311 		zfeature_info_t feature = spa_feature_table[i];
5312 		uint64_t refcount;
5313 
5314 		if (feature_get_refcount(spa, &feature, &refcount) != 0)
5315 			continue;
5316 
5317 		VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
5318 	}
5319 }
5320 
5321 /*
5322  * Store a list of pool features and their reference counts in the
5323  * config.
5324  *
5325  * The first time this is called on a spa, allocate a new nvlist, fetch
5326  * the pool features and reference counts from disk, then save the list
5327  * in the spa. In subsequent calls on the same spa use the saved nvlist
5328  * and refresh its values from the cached reference counts.  This
5329  * ensures we don't block here on I/O on a suspended pool so 'zpool
5330  * clear' can resume the pool.
5331  */
5332 static void
5333 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
5334 {
5335 	nvlist_t *features;
5336 
5337 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5338 
5339 	mutex_enter(&spa->spa_feat_stats_lock);
5340 	features = spa->spa_feat_stats;
5341 
5342 	if (features != NULL) {
5343 		spa_feature_stats_from_cache(spa, features);
5344 	} else {
5345 		VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
5346 		spa->spa_feat_stats = features;
5347 		spa_feature_stats_from_disk(spa, features);
5348 	}
5349 
5350 	VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
5351 	    features));
5352 
5353 	mutex_exit(&spa->spa_feat_stats_lock);
5354 }
5355 
5356 int
5357 spa_get_stats(const char *name, nvlist_t **config,
5358     char *altroot, size_t buflen)
5359 {
5360 	int error;
5361 	spa_t *spa;
5362 
5363 	*config = NULL;
5364 	error = spa_open_common(name, &spa, FTAG, NULL, config);
5365 
5366 	if (spa != NULL) {
5367 		/*
5368 		 * This still leaves a window of inconsistency where the spares
5369 		 * or l2cache devices could change and the config would be
5370 		 * self-inconsistent.
5371 		 */
5372 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5373 
5374 		if (*config != NULL) {
5375 			uint64_t loadtimes[2];
5376 
5377 			loadtimes[0] = spa->spa_loaded_ts.tv_sec;
5378 			loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
5379 			VERIFY(nvlist_add_uint64_array(*config,
5380 			    ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
5381 
5382 			VERIFY(nvlist_add_uint64(*config,
5383 			    ZPOOL_CONFIG_ERRCOUNT,
5384 			    spa_get_errlog_size(spa)) == 0);
5385 
5386 			if (spa_suspended(spa)) {
5387 				VERIFY(nvlist_add_uint64(*config,
5388 				    ZPOOL_CONFIG_SUSPENDED,
5389 				    spa->spa_failmode) == 0);
5390 				VERIFY(nvlist_add_uint64(*config,
5391 				    ZPOOL_CONFIG_SUSPENDED_REASON,
5392 				    spa->spa_suspended) == 0);
5393 			}
5394 
5395 			spa_add_spares(spa, *config);
5396 			spa_add_l2cache(spa, *config);
5397 			spa_add_feature_stats(spa, *config);
5398 		}
5399 	}
5400 
5401 	/*
5402 	 * We want to get the alternate root even for faulted pools, so we cheat
5403 	 * and call spa_lookup() directly.
5404 	 */
5405 	if (altroot) {
5406 		if (spa == NULL) {
5407 			mutex_enter(&spa_namespace_lock);
5408 			spa = spa_lookup(name);
5409 			if (spa)
5410 				spa_altroot(spa, altroot, buflen);
5411 			else
5412 				altroot[0] = '\0';
5413 			spa = NULL;
5414 			mutex_exit(&spa_namespace_lock);
5415 		} else {
5416 			spa_altroot(spa, altroot, buflen);
5417 		}
5418 	}
5419 
5420 	if (spa != NULL) {
5421 		spa_config_exit(spa, SCL_CONFIG, FTAG);
5422 		spa_close(spa, FTAG);
5423 	}
5424 
5425 	return (error);
5426 }
5427 
5428 /*
5429  * Validate that the auxiliary device array is well formed.  We must have an
5430  * array of nvlists, each which describes a valid leaf vdev.  If this is an
5431  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
5432  * specified, as long as they are well-formed.
5433  */
5434 static int
5435 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
5436     spa_aux_vdev_t *sav, const char *config, uint64_t version,
5437     vdev_labeltype_t label)
5438 {
5439 	nvlist_t **dev;
5440 	uint_t i, ndev;
5441 	vdev_t *vd;
5442 	int error;
5443 
5444 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5445 
5446 	/*
5447 	 * It's acceptable to have no devs specified.
5448 	 */
5449 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
5450 		return (0);
5451 
5452 	if (ndev == 0)
5453 		return (SET_ERROR(EINVAL));
5454 
5455 	/*
5456 	 * Make sure the pool is formatted with a version that supports this
5457 	 * device type.
5458 	 */
5459 	if (spa_version(spa) < version)
5460 		return (SET_ERROR(ENOTSUP));
5461 
5462 	/*
5463 	 * Set the pending device list so we correctly handle device in-use
5464 	 * checking.
5465 	 */
5466 	sav->sav_pending = dev;
5467 	sav->sav_npending = ndev;
5468 
5469 	for (i = 0; i < ndev; i++) {
5470 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
5471 		    mode)) != 0)
5472 			goto out;
5473 
5474 		if (!vd->vdev_ops->vdev_op_leaf) {
5475 			vdev_free(vd);
5476 			error = SET_ERROR(EINVAL);
5477 			goto out;
5478 		}
5479 
5480 		vd->vdev_top = vd;
5481 
5482 		if ((error = vdev_open(vd)) == 0 &&
5483 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
5484 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
5485 			    vd->vdev_guid) == 0);
5486 		}
5487 
5488 		vdev_free(vd);
5489 
5490 		if (error &&
5491 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
5492 			goto out;
5493 		else
5494 			error = 0;
5495 	}
5496 
5497 out:
5498 	sav->sav_pending = NULL;
5499 	sav->sav_npending = 0;
5500 	return (error);
5501 }
5502 
5503 static int
5504 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
5505 {
5506 	int error;
5507 
5508 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5509 
5510 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
5511 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
5512 	    VDEV_LABEL_SPARE)) != 0) {
5513 		return (error);
5514 	}
5515 
5516 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
5517 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
5518 	    VDEV_LABEL_L2CACHE));
5519 }
5520 
5521 static void
5522 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
5523     const char *config)
5524 {
5525 	int i;
5526 
5527 	if (sav->sav_config != NULL) {
5528 		nvlist_t **olddevs;
5529 		uint_t oldndevs;
5530 		nvlist_t **newdevs;
5531 
5532 		/*
5533 		 * Generate new dev list by concatenating with the
5534 		 * current dev list.
5535 		 */
5536 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
5537 		    &olddevs, &oldndevs) == 0);
5538 
5539 		newdevs = kmem_alloc(sizeof (void *) *
5540 		    (ndevs + oldndevs), KM_SLEEP);
5541 		for (i = 0; i < oldndevs; i++)
5542 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
5543 			    KM_SLEEP) == 0);
5544 		for (i = 0; i < ndevs; i++)
5545 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
5546 			    KM_SLEEP) == 0);
5547 
5548 		VERIFY(nvlist_remove(sav->sav_config, config,
5549 		    DATA_TYPE_NVLIST_ARRAY) == 0);
5550 
5551 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
5552 		    config, newdevs, ndevs + oldndevs) == 0);
5553 		for (i = 0; i < oldndevs + ndevs; i++)
5554 			nvlist_free(newdevs[i]);
5555 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
5556 	} else {
5557 		/*
5558 		 * Generate a new dev list.
5559 		 */
5560 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
5561 		    KM_SLEEP) == 0);
5562 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
5563 		    devs, ndevs) == 0);
5564 	}
5565 }
5566 
5567 /*
5568  * Stop and drop level 2 ARC devices
5569  */
5570 void
5571 spa_l2cache_drop(spa_t *spa)
5572 {
5573 	vdev_t *vd;
5574 	int i;
5575 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
5576 
5577 	for (i = 0; i < sav->sav_count; i++) {
5578 		uint64_t pool;
5579 
5580 		vd = sav->sav_vdevs[i];
5581 		ASSERT(vd != NULL);
5582 
5583 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
5584 		    pool != 0ULL && l2arc_vdev_present(vd))
5585 			l2arc_remove_vdev(vd);
5586 	}
5587 }
5588 
5589 /*
5590  * Verify encryption parameters for spa creation. If we are encrypting, we must
5591  * have the encryption feature flag enabled.
5592  */
5593 static int
5594 spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
5595     boolean_t has_encryption)
5596 {
5597 	if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
5598 	    dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
5599 	    !has_encryption)
5600 		return (SET_ERROR(ENOTSUP));
5601 
5602 	return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
5603 }
5604 
5605 /*
5606  * Pool Creation
5607  */
5608 int
5609 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
5610     nvlist_t *zplprops, dsl_crypto_params_t *dcp)
5611 {
5612 	spa_t *spa;
5613 	char *altroot = NULL;
5614 	vdev_t *rvd;
5615 	dsl_pool_t *dp;
5616 	dmu_tx_t *tx;
5617 	int error = 0;
5618 	uint64_t txg = TXG_INITIAL;
5619 	nvlist_t **spares, **l2cache;
5620 	uint_t nspares, nl2cache;
5621 	uint64_t version, obj;
5622 	boolean_t has_features;
5623 	boolean_t has_encryption;
5624 	boolean_t has_allocclass;
5625 	spa_feature_t feat;
5626 	char *feat_name;
5627 	char *poolname;
5628 	nvlist_t *nvl;
5629 
5630 	if (props == NULL ||
5631 	    nvlist_lookup_string(props, "tname", &poolname) != 0)
5632 		poolname = (char *)pool;
5633 
5634 	/*
5635 	 * If this pool already exists, return failure.
5636 	 */
5637 	mutex_enter(&spa_namespace_lock);
5638 	if (spa_lookup(poolname) != NULL) {
5639 		mutex_exit(&spa_namespace_lock);
5640 		return (SET_ERROR(EEXIST));
5641 	}
5642 
5643 	/*
5644 	 * Allocate a new spa_t structure.
5645 	 */
5646 	nvl = fnvlist_alloc();
5647 	fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
5648 	(void) nvlist_lookup_string(props,
5649 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5650 	spa = spa_add(poolname, nvl, altroot);
5651 	fnvlist_free(nvl);
5652 	spa_activate(spa, spa_mode_global);
5653 
5654 	if (props && (error = spa_prop_validate(spa, props))) {
5655 		spa_deactivate(spa);
5656 		spa_remove(spa);
5657 		mutex_exit(&spa_namespace_lock);
5658 		return (error);
5659 	}
5660 
5661 	/*
5662 	 * Temporary pool names should never be written to disk.
5663 	 */
5664 	if (poolname != pool)
5665 		spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
5666 
5667 	has_features = B_FALSE;
5668 	has_encryption = B_FALSE;
5669 	has_allocclass = B_FALSE;
5670 	for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
5671 	    elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
5672 		if (zpool_prop_feature(nvpair_name(elem))) {
5673 			has_features = B_TRUE;
5674 
5675 			feat_name = strchr(nvpair_name(elem), '@') + 1;
5676 			VERIFY0(zfeature_lookup_name(feat_name, &feat));
5677 			if (feat == SPA_FEATURE_ENCRYPTION)
5678 				has_encryption = B_TRUE;
5679 			if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
5680 				has_allocclass = B_TRUE;
5681 		}
5682 	}
5683 
5684 	/* verify encryption params, if they were provided */
5685 	if (dcp != NULL) {
5686 		error = spa_create_check_encryption_params(dcp, has_encryption);
5687 		if (error != 0) {
5688 			spa_deactivate(spa);
5689 			spa_remove(spa);
5690 			mutex_exit(&spa_namespace_lock);
5691 			return (error);
5692 		}
5693 	}
5694 	if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
5695 		spa_deactivate(spa);
5696 		spa_remove(spa);
5697 		mutex_exit(&spa_namespace_lock);
5698 		return (ENOTSUP);
5699 	}
5700 
5701 	if (has_features || nvlist_lookup_uint64(props,
5702 	    zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
5703 		version = SPA_VERSION;
5704 	}
5705 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
5706 
5707 	spa->spa_first_txg = txg;
5708 	spa->spa_uberblock.ub_txg = txg - 1;
5709 	spa->spa_uberblock.ub_version = version;
5710 	spa->spa_ubsync = spa->spa_uberblock;
5711 	spa->spa_load_state = SPA_LOAD_CREATE;
5712 	spa->spa_removing_phys.sr_state = DSS_NONE;
5713 	spa->spa_removing_phys.sr_removing_vdev = -1;
5714 	spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
5715 	spa->spa_indirect_vdevs_loaded = B_TRUE;
5716 
5717 	/*
5718 	 * Create "The Godfather" zio to hold all async IOs
5719 	 */
5720 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
5721 	    KM_SLEEP);
5722 	for (int i = 0; i < max_ncpus; i++) {
5723 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
5724 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
5725 		    ZIO_FLAG_GODFATHER);
5726 	}
5727 
5728 	/*
5729 	 * Create the root vdev.
5730 	 */
5731 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5732 
5733 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
5734 
5735 	ASSERT(error != 0 || rvd != NULL);
5736 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
5737 
5738 	if (error == 0 && !zfs_allocatable_devs(nvroot))
5739 		error = SET_ERROR(EINVAL);
5740 
5741 	if (error == 0 &&
5742 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
5743 	    (error = spa_validate_aux(spa, nvroot, txg,
5744 	    VDEV_ALLOC_ADD)) == 0) {
5745 		/*
5746 		 * instantiate the metaslab groups (this will dirty the vdevs)
5747 		 * we can no longer error exit past this point
5748 		 */
5749 		for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
5750 			vdev_t *vd = rvd->vdev_child[c];
5751 
5752 			vdev_ashift_optimize(vd);
5753 			vdev_metaslab_set_size(vd);
5754 			vdev_expand(vd, txg);
5755 		}
5756 	}
5757 
5758 	spa_config_exit(spa, SCL_ALL, FTAG);
5759 
5760 	if (error != 0) {
5761 		spa_unload(spa);
5762 		spa_deactivate(spa);
5763 		spa_remove(spa);
5764 		mutex_exit(&spa_namespace_lock);
5765 		return (error);
5766 	}
5767 
5768 	/*
5769 	 * Get the list of spares, if specified.
5770 	 */
5771 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5772 	    &spares, &nspares) == 0) {
5773 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
5774 		    KM_SLEEP) == 0);
5775 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5776 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5777 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5778 		spa_load_spares(spa);
5779 		spa_config_exit(spa, SCL_ALL, FTAG);
5780 		spa->spa_spares.sav_sync = B_TRUE;
5781 	}
5782 
5783 	/*
5784 	 * Get the list of level 2 cache devices, if specified.
5785 	 */
5786 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5787 	    &l2cache, &nl2cache) == 0) {
5788 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5789 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
5790 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5791 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5792 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5793 		spa_load_l2cache(spa);
5794 		spa_config_exit(spa, SCL_ALL, FTAG);
5795 		spa->spa_l2cache.sav_sync = B_TRUE;
5796 	}
5797 
5798 	spa->spa_is_initializing = B_TRUE;
5799 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
5800 	spa->spa_is_initializing = B_FALSE;
5801 
5802 	/*
5803 	 * Create DDTs (dedup tables).
5804 	 */
5805 	ddt_create(spa);
5806 
5807 	spa_update_dspace(spa);
5808 
5809 	tx = dmu_tx_create_assigned(dp, txg);
5810 
5811 	/*
5812 	 * Create the pool's history object.
5813 	 */
5814 	if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
5815 		spa_history_create_obj(spa, tx);
5816 
5817 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
5818 	spa_history_log_version(spa, "create", tx);
5819 
5820 	/*
5821 	 * Create the pool config object.
5822 	 */
5823 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
5824 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
5825 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
5826 
5827 	if (zap_add(spa->spa_meta_objset,
5828 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
5829 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
5830 		cmn_err(CE_PANIC, "failed to add pool config");
5831 	}
5832 
5833 	if (zap_add(spa->spa_meta_objset,
5834 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
5835 	    sizeof (uint64_t), 1, &version, tx) != 0) {
5836 		cmn_err(CE_PANIC, "failed to add pool version");
5837 	}
5838 
5839 	/* Newly created pools with the right version are always deflated. */
5840 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
5841 		spa->spa_deflate = TRUE;
5842 		if (zap_add(spa->spa_meta_objset,
5843 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5844 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
5845 			cmn_err(CE_PANIC, "failed to add deflate");
5846 		}
5847 	}
5848 
5849 	/*
5850 	 * Create the deferred-free bpobj.  Turn off compression
5851 	 * because sync-to-convergence takes longer if the blocksize
5852 	 * keeps changing.
5853 	 */
5854 	obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
5855 	dmu_object_set_compress(spa->spa_meta_objset, obj,
5856 	    ZIO_COMPRESS_OFF, tx);
5857 	if (zap_add(spa->spa_meta_objset,
5858 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
5859 	    sizeof (uint64_t), 1, &obj, tx) != 0) {
5860 		cmn_err(CE_PANIC, "failed to add bpobj");
5861 	}
5862 	VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
5863 	    spa->spa_meta_objset, obj));
5864 
5865 	/*
5866 	 * Generate some random noise for salted checksums to operate on.
5867 	 */
5868 	(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
5869 	    sizeof (spa->spa_cksum_salt.zcs_bytes));
5870 
5871 	/*
5872 	 * Set pool properties.
5873 	 */
5874 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
5875 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
5876 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
5877 	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
5878 	spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
5879 	spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
5880 
5881 	if (props != NULL) {
5882 		spa_configfile_set(spa, props, B_FALSE);
5883 		spa_sync_props(props, tx);
5884 	}
5885 
5886 	dmu_tx_commit(tx);
5887 
5888 	spa->spa_sync_on = B_TRUE;
5889 	txg_sync_start(dp);
5890 	mmp_thread_start(spa);
5891 	txg_wait_synced(dp, txg);
5892 
5893 	spa_spawn_aux_threads(spa);
5894 
5895 	spa_write_cachefile(spa, B_FALSE, B_TRUE);
5896 
5897 	/*
5898 	 * Don't count references from objsets that are already closed
5899 	 * and are making their way through the eviction process.
5900 	 */
5901 	spa_evicting_os_wait(spa);
5902 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
5903 	spa->spa_load_state = SPA_LOAD_NONE;
5904 
5905 	mutex_exit(&spa_namespace_lock);
5906 
5907 	return (0);
5908 }
5909 
5910 /*
5911  * Import a non-root pool into the system.
5912  */
5913 int
5914 spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
5915 {
5916 	spa_t *spa;
5917 	char *altroot = NULL;
5918 	spa_load_state_t state = SPA_LOAD_IMPORT;
5919 	zpool_load_policy_t policy;
5920 	spa_mode_t mode = spa_mode_global;
5921 	uint64_t readonly = B_FALSE;
5922 	int error;
5923 	nvlist_t *nvroot;
5924 	nvlist_t **spares, **l2cache;
5925 	uint_t nspares, nl2cache;
5926 
5927 	/*
5928 	 * If a pool with this name exists, return failure.
5929 	 */
5930 	mutex_enter(&spa_namespace_lock);
5931 	if (spa_lookup(pool) != NULL) {
5932 		mutex_exit(&spa_namespace_lock);
5933 		return (SET_ERROR(EEXIST));
5934 	}
5935 
5936 	/*
5937 	 * Create and initialize the spa structure.
5938 	 */
5939 	(void) nvlist_lookup_string(props,
5940 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5941 	(void) nvlist_lookup_uint64(props,
5942 	    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
5943 	if (readonly)
5944 		mode = SPA_MODE_READ;
5945 	spa = spa_add(pool, config, altroot);
5946 	spa->spa_import_flags = flags;
5947 
5948 	/*
5949 	 * Verbatim import - Take a pool and insert it into the namespace
5950 	 * as if it had been loaded at boot.
5951 	 */
5952 	if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
5953 		if (props != NULL)
5954 			spa_configfile_set(spa, props, B_FALSE);
5955 
5956 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
5957 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5958 		zfs_dbgmsg("spa_import: verbatim import of %s", pool);
5959 		mutex_exit(&spa_namespace_lock);
5960 		return (0);
5961 	}
5962 
5963 	spa_activate(spa, mode);
5964 
5965 	/*
5966 	 * Don't start async tasks until we know everything is healthy.
5967 	 */
5968 	spa_async_suspend(spa);
5969 
5970 	zpool_get_load_policy(config, &policy);
5971 	if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5972 		state = SPA_LOAD_RECOVER;
5973 
5974 	spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
5975 
5976 	if (state != SPA_LOAD_RECOVER) {
5977 		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5978 		zfs_dbgmsg("spa_import: importing %s", pool);
5979 	} else {
5980 		zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
5981 		    "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
5982 	}
5983 	error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
5984 
5985 	/*
5986 	 * Propagate anything learned while loading the pool and pass it
5987 	 * back to caller (i.e. rewind info, missing devices, etc).
5988 	 */
5989 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
5990 	    spa->spa_load_info) == 0);
5991 
5992 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5993 	/*
5994 	 * Toss any existing sparelist, as it doesn't have any validity
5995 	 * anymore, and conflicts with spa_has_spare().
5996 	 */
5997 	if (spa->spa_spares.sav_config) {
5998 		nvlist_free(spa->spa_spares.sav_config);
5999 		spa->spa_spares.sav_config = NULL;
6000 		spa_load_spares(spa);
6001 	}
6002 	if (spa->spa_l2cache.sav_config) {
6003 		nvlist_free(spa->spa_l2cache.sav_config);
6004 		spa->spa_l2cache.sav_config = NULL;
6005 		spa_load_l2cache(spa);
6006 	}
6007 
6008 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6009 	    &nvroot) == 0);
6010 	spa_config_exit(spa, SCL_ALL, FTAG);
6011 
6012 	if (props != NULL)
6013 		spa_configfile_set(spa, props, B_FALSE);
6014 
6015 	if (error != 0 || (props && spa_writeable(spa) &&
6016 	    (error = spa_prop_set(spa, props)))) {
6017 		spa_unload(spa);
6018 		spa_deactivate(spa);
6019 		spa_remove(spa);
6020 		mutex_exit(&spa_namespace_lock);
6021 		return (error);
6022 	}
6023 
6024 	spa_async_resume(spa);
6025 
6026 	/*
6027 	 * Override any spares and level 2 cache devices as specified by
6028 	 * the user, as these may have correct device names/devids, etc.
6029 	 */
6030 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6031 	    &spares, &nspares) == 0) {
6032 		if (spa->spa_spares.sav_config)
6033 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
6034 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
6035 		else
6036 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
6037 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
6038 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
6039 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
6040 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6041 		spa_load_spares(spa);
6042 		spa_config_exit(spa, SCL_ALL, FTAG);
6043 		spa->spa_spares.sav_sync = B_TRUE;
6044 	}
6045 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6046 	    &l2cache, &nl2cache) == 0) {
6047 		if (spa->spa_l2cache.sav_config)
6048 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
6049 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
6050 		else
6051 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
6052 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
6053 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
6054 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
6055 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6056 		spa_load_l2cache(spa);
6057 		spa_config_exit(spa, SCL_ALL, FTAG);
6058 		spa->spa_l2cache.sav_sync = B_TRUE;
6059 	}
6060 
6061 	/*
6062 	 * Check for any removed devices.
6063 	 */
6064 	if (spa->spa_autoreplace) {
6065 		spa_aux_check_removed(&spa->spa_spares);
6066 		spa_aux_check_removed(&spa->spa_l2cache);
6067 	}
6068 
6069 	if (spa_writeable(spa)) {
6070 		/*
6071 		 * Update the config cache to include the newly-imported pool.
6072 		 */
6073 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6074 	}
6075 
6076 	/*
6077 	 * It's possible that the pool was expanded while it was exported.
6078 	 * We kick off an async task to handle this for us.
6079 	 */
6080 	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
6081 
6082 	spa_history_log_version(spa, "import", NULL);
6083 
6084 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
6085 
6086 	mutex_exit(&spa_namespace_lock);
6087 
6088 	zvol_create_minors_recursive(pool);
6089 
6090 	return (0);
6091 }
6092 
6093 nvlist_t *
6094 spa_tryimport(nvlist_t *tryconfig)
6095 {
6096 	nvlist_t *config = NULL;
6097 	char *poolname, *cachefile;
6098 	spa_t *spa;
6099 	uint64_t state;
6100 	int error;
6101 	zpool_load_policy_t policy;
6102 
6103 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
6104 		return (NULL);
6105 
6106 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
6107 		return (NULL);
6108 
6109 	/*
6110 	 * Create and initialize the spa structure.
6111 	 */
6112 	mutex_enter(&spa_namespace_lock);
6113 	spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
6114 	spa_activate(spa, SPA_MODE_READ);
6115 
6116 	/*
6117 	 * Rewind pool if a max txg was provided.
6118 	 */
6119 	zpool_get_load_policy(spa->spa_config, &policy);
6120 	if (policy.zlp_txg != UINT64_MAX) {
6121 		spa->spa_load_max_txg = policy.zlp_txg;
6122 		spa->spa_extreme_rewind = B_TRUE;
6123 		zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
6124 		    poolname, (longlong_t)policy.zlp_txg);
6125 	} else {
6126 		zfs_dbgmsg("spa_tryimport: importing %s", poolname);
6127 	}
6128 
6129 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
6130 	    == 0) {
6131 		zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
6132 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
6133 	} else {
6134 		spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
6135 	}
6136 
6137 	error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
6138 
6139 	/*
6140 	 * If 'tryconfig' was at least parsable, return the current config.
6141 	 */
6142 	if (spa->spa_root_vdev != NULL) {
6143 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
6144 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
6145 		    poolname) == 0);
6146 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
6147 		    state) == 0);
6148 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
6149 		    spa->spa_uberblock.ub_timestamp) == 0);
6150 		VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
6151 		    spa->spa_load_info) == 0);
6152 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
6153 		    spa->spa_errata) == 0);
6154 
6155 		/*
6156 		 * If the bootfs property exists on this pool then we
6157 		 * copy it out so that external consumers can tell which
6158 		 * pools are bootable.
6159 		 */
6160 		if ((!error || error == EEXIST) && spa->spa_bootfs) {
6161 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6162 
6163 			/*
6164 			 * We have to play games with the name since the
6165 			 * pool was opened as TRYIMPORT_NAME.
6166 			 */
6167 			if (dsl_dsobj_to_dsname(spa_name(spa),
6168 			    spa->spa_bootfs, tmpname) == 0) {
6169 				char *cp;
6170 				char *dsname;
6171 
6172 				dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6173 
6174 				cp = strchr(tmpname, '/');
6175 				if (cp == NULL) {
6176 					(void) strlcpy(dsname, tmpname,
6177 					    MAXPATHLEN);
6178 				} else {
6179 					(void) snprintf(dsname, MAXPATHLEN,
6180 					    "%s/%s", poolname, ++cp);
6181 				}
6182 				VERIFY(nvlist_add_string(config,
6183 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
6184 				kmem_free(dsname, MAXPATHLEN);
6185 			}
6186 			kmem_free(tmpname, MAXPATHLEN);
6187 		}
6188 
6189 		/*
6190 		 * Add the list of hot spares and level 2 cache devices.
6191 		 */
6192 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6193 		spa_add_spares(spa, config);
6194 		spa_add_l2cache(spa, config);
6195 		spa_config_exit(spa, SCL_CONFIG, FTAG);
6196 	}
6197 
6198 	spa_unload(spa);
6199 	spa_deactivate(spa);
6200 	spa_remove(spa);
6201 	mutex_exit(&spa_namespace_lock);
6202 
6203 	return (config);
6204 }
6205 
6206 /*
6207  * Pool export/destroy
6208  *
6209  * The act of destroying or exporting a pool is very simple.  We make sure there
6210  * is no more pending I/O and any references to the pool are gone.  Then, we
6211  * update the pool state and sync all the labels to disk, removing the
6212  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
6213  * we don't sync the labels or remove the configuration cache.
6214  */
6215 static int
6216 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
6217     boolean_t force, boolean_t hardforce)
6218 {
6219 	spa_t *spa;
6220 
6221 	if (oldconfig)
6222 		*oldconfig = NULL;
6223 
6224 	if (!(spa_mode_global & SPA_MODE_WRITE))
6225 		return (SET_ERROR(EROFS));
6226 
6227 	mutex_enter(&spa_namespace_lock);
6228 	if ((spa = spa_lookup(pool)) == NULL) {
6229 		mutex_exit(&spa_namespace_lock);
6230 		return (SET_ERROR(ENOENT));
6231 	}
6232 
6233 	if (spa->spa_is_exporting) {
6234 		/* the pool is being exported by another thread */
6235 		mutex_exit(&spa_namespace_lock);
6236 		return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
6237 	}
6238 	spa->spa_is_exporting = B_TRUE;
6239 
6240 	/*
6241 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
6242 	 * reacquire the namespace lock, and see if we can export.
6243 	 */
6244 	spa_open_ref(spa, FTAG);
6245 	mutex_exit(&spa_namespace_lock);
6246 	spa_async_suspend(spa);
6247 	if (spa->spa_zvol_taskq) {
6248 		zvol_remove_minors(spa, spa_name(spa), B_TRUE);
6249 		taskq_wait(spa->spa_zvol_taskq);
6250 	}
6251 	mutex_enter(&spa_namespace_lock);
6252 	spa_close(spa, FTAG);
6253 
6254 	if (spa->spa_state == POOL_STATE_UNINITIALIZED)
6255 		goto export_spa;
6256 	/*
6257 	 * The pool will be in core if it's openable, in which case we can
6258 	 * modify its state.  Objsets may be open only because they're dirty,
6259 	 * so we have to force it to sync before checking spa_refcnt.
6260 	 */
6261 	if (spa->spa_sync_on) {
6262 		txg_wait_synced(spa->spa_dsl_pool, 0);
6263 		spa_evicting_os_wait(spa);
6264 	}
6265 
6266 	/*
6267 	 * A pool cannot be exported or destroyed if there are active
6268 	 * references.  If we are resetting a pool, allow references by
6269 	 * fault injection handlers.
6270 	 */
6271 	if (!spa_refcount_zero(spa) ||
6272 	    (spa->spa_inject_ref != 0 &&
6273 	    new_state != POOL_STATE_UNINITIALIZED)) {
6274 		spa_async_resume(spa);
6275 		spa->spa_is_exporting = B_FALSE;
6276 		mutex_exit(&spa_namespace_lock);
6277 		return (SET_ERROR(EBUSY));
6278 	}
6279 
6280 	if (spa->spa_sync_on) {
6281 		/*
6282 		 * A pool cannot be exported if it has an active shared spare.
6283 		 * This is to prevent other pools stealing the active spare
6284 		 * from an exported pool. At user's own will, such pool can
6285 		 * be forcedly exported.
6286 		 */
6287 		if (!force && new_state == POOL_STATE_EXPORTED &&
6288 		    spa_has_active_shared_spare(spa)) {
6289 			spa_async_resume(spa);
6290 			spa->spa_is_exporting = B_FALSE;
6291 			mutex_exit(&spa_namespace_lock);
6292 			return (SET_ERROR(EXDEV));
6293 		}
6294 
6295 		/*
6296 		 * We're about to export or destroy this pool. Make sure
6297 		 * we stop all initialization and trim activity here before
6298 		 * we set the spa_final_txg. This will ensure that all
6299 		 * dirty data resulting from the initialization is
6300 		 * committed to disk before we unload the pool.
6301 		 */
6302 		if (spa->spa_root_vdev != NULL) {
6303 			vdev_t *rvd = spa->spa_root_vdev;
6304 			vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
6305 			vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
6306 			vdev_autotrim_stop_all(spa);
6307 			vdev_rebuild_stop_all(spa);
6308 		}
6309 
6310 		/*
6311 		 * We want this to be reflected on every label,
6312 		 * so mark them all dirty.  spa_unload() will do the
6313 		 * final sync that pushes these changes out.
6314 		 */
6315 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
6316 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6317 			spa->spa_state = new_state;
6318 			spa->spa_final_txg = spa_last_synced_txg(spa) +
6319 			    TXG_DEFER_SIZE + 1;
6320 			vdev_config_dirty(spa->spa_root_vdev);
6321 			spa_config_exit(spa, SCL_ALL, FTAG);
6322 		}
6323 	}
6324 
6325 export_spa:
6326 	if (new_state == POOL_STATE_DESTROYED)
6327 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
6328 	else if (new_state == POOL_STATE_EXPORTED)
6329 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
6330 
6331 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6332 		spa_unload(spa);
6333 		spa_deactivate(spa);
6334 	}
6335 
6336 	if (oldconfig && spa->spa_config)
6337 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
6338 
6339 	if (new_state != POOL_STATE_UNINITIALIZED) {
6340 		if (!hardforce)
6341 			spa_write_cachefile(spa, B_TRUE, B_TRUE);
6342 		spa_remove(spa);
6343 	} else {
6344 		/*
6345 		 * If spa_remove() is not called for this spa_t and
6346 		 * there is any possibility that it can be reused,
6347 		 * we make sure to reset the exporting flag.
6348 		 */
6349 		spa->spa_is_exporting = B_FALSE;
6350 	}
6351 
6352 	mutex_exit(&spa_namespace_lock);
6353 	return (0);
6354 }
6355 
6356 /*
6357  * Destroy a storage pool.
6358  */
6359 int
6360 spa_destroy(char *pool)
6361 {
6362 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
6363 	    B_FALSE, B_FALSE));
6364 }
6365 
6366 /*
6367  * Export a storage pool.
6368  */
6369 int
6370 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
6371     boolean_t hardforce)
6372 {
6373 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
6374 	    force, hardforce));
6375 }
6376 
6377 /*
6378  * Similar to spa_export(), this unloads the spa_t without actually removing it
6379  * from the namespace in any way.
6380  */
6381 int
6382 spa_reset(char *pool)
6383 {
6384 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
6385 	    B_FALSE, B_FALSE));
6386 }
6387 
6388 /*
6389  * ==========================================================================
6390  * Device manipulation
6391  * ==========================================================================
6392  */
6393 
6394 /*
6395  * Add a device to a storage pool.
6396  */
6397 int
6398 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
6399 {
6400 	uint64_t txg;
6401 	int error;
6402 	vdev_t *rvd = spa->spa_root_vdev;
6403 	vdev_t *vd, *tvd;
6404 	nvlist_t **spares, **l2cache;
6405 	uint_t nspares, nl2cache;
6406 
6407 	ASSERT(spa_writeable(spa));
6408 
6409 	txg = spa_vdev_enter(spa);
6410 
6411 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
6412 	    VDEV_ALLOC_ADD)) != 0)
6413 		return (spa_vdev_exit(spa, NULL, txg, error));
6414 
6415 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
6416 
6417 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
6418 	    &nspares) != 0)
6419 		nspares = 0;
6420 
6421 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
6422 	    &nl2cache) != 0)
6423 		nl2cache = 0;
6424 
6425 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
6426 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
6427 
6428 	if (vd->vdev_children != 0 &&
6429 	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
6430 		return (spa_vdev_exit(spa, vd, txg, error));
6431 
6432 	/*
6433 	 * We must validate the spares and l2cache devices after checking the
6434 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
6435 	 */
6436 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
6437 		return (spa_vdev_exit(spa, vd, txg, error));
6438 
6439 	/*
6440 	 * If we are in the middle of a device removal, we can only add
6441 	 * devices which match the existing devices in the pool.
6442 	 * If we are in the middle of a removal, or have some indirect
6443 	 * vdevs, we can not add raidz toplevels.
6444 	 */
6445 	if (spa->spa_vdev_removal != NULL ||
6446 	    spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
6447 		for (int c = 0; c < vd->vdev_children; c++) {
6448 			tvd = vd->vdev_child[c];
6449 			if (spa->spa_vdev_removal != NULL &&
6450 			    tvd->vdev_ashift != spa->spa_max_ashift) {
6451 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
6452 			}
6453 			/* Fail if top level vdev is raidz */
6454 			if (tvd->vdev_ops == &vdev_raidz_ops) {
6455 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
6456 			}
6457 			/*
6458 			 * Need the top level mirror to be
6459 			 * a mirror of leaf vdevs only
6460 			 */
6461 			if (tvd->vdev_ops == &vdev_mirror_ops) {
6462 				for (uint64_t cid = 0;
6463 				    cid < tvd->vdev_children; cid++) {
6464 					vdev_t *cvd = tvd->vdev_child[cid];
6465 					if (!cvd->vdev_ops->vdev_op_leaf) {
6466 						return (spa_vdev_exit(spa, vd,
6467 						    txg, EINVAL));
6468 					}
6469 				}
6470 			}
6471 		}
6472 	}
6473 
6474 	for (int c = 0; c < vd->vdev_children; c++) {
6475 		tvd = vd->vdev_child[c];
6476 		vdev_remove_child(vd, tvd);
6477 		tvd->vdev_id = rvd->vdev_children;
6478 		vdev_add_child(rvd, tvd);
6479 		vdev_config_dirty(tvd);
6480 	}
6481 
6482 	if (nspares != 0) {
6483 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
6484 		    ZPOOL_CONFIG_SPARES);
6485 		spa_load_spares(spa);
6486 		spa->spa_spares.sav_sync = B_TRUE;
6487 	}
6488 
6489 	if (nl2cache != 0) {
6490 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
6491 		    ZPOOL_CONFIG_L2CACHE);
6492 		spa_load_l2cache(spa);
6493 		spa->spa_l2cache.sav_sync = B_TRUE;
6494 	}
6495 
6496 	/*
6497 	 * We have to be careful when adding new vdevs to an existing pool.
6498 	 * If other threads start allocating from these vdevs before we
6499 	 * sync the config cache, and we lose power, then upon reboot we may
6500 	 * fail to open the pool because there are DVAs that the config cache
6501 	 * can't translate.  Therefore, we first add the vdevs without
6502 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
6503 	 * and then let spa_config_update() initialize the new metaslabs.
6504 	 *
6505 	 * spa_load() checks for added-but-not-initialized vdevs, so that
6506 	 * if we lose power at any point in this sequence, the remaining
6507 	 * steps will be completed the next time we load the pool.
6508 	 */
6509 	(void) spa_vdev_exit(spa, vd, txg, 0);
6510 
6511 	mutex_enter(&spa_namespace_lock);
6512 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6513 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
6514 	mutex_exit(&spa_namespace_lock);
6515 
6516 	return (0);
6517 }
6518 
6519 /*
6520  * Attach a device to a mirror.  The arguments are the path to any device
6521  * in the mirror, and the nvroot for the new device.  If the path specifies
6522  * a device that is not mirrored, we automatically insert the mirror vdev.
6523  *
6524  * If 'replacing' is specified, the new device is intended to replace the
6525  * existing device; in this case the two devices are made into their own
6526  * mirror using the 'replacing' vdev, which is functionally identical to
6527  * the mirror vdev (it actually reuses all the same ops) but has a few
6528  * extra rules: you can't attach to it after it's been created, and upon
6529  * completion of resilvering, the first disk (the one being replaced)
6530  * is automatically detached.
6531  *
6532  * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
6533  * should be performed instead of traditional healing reconstruction.  From
6534  * an administrators perspective these are both resilver operations.
6535  */
6536 int
6537 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
6538     int rebuild)
6539 {
6540 	uint64_t txg, dtl_max_txg;
6541 	vdev_t *rvd = spa->spa_root_vdev;
6542 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
6543 	vdev_ops_t *pvops;
6544 	char *oldvdpath, *newvdpath;
6545 	int newvd_isspare;
6546 	int error;
6547 
6548 	ASSERT(spa_writeable(spa));
6549 
6550 	txg = spa_vdev_enter(spa);
6551 
6552 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
6553 
6554 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
6555 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6556 		error = (spa_has_checkpoint(spa)) ?
6557 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
6558 		return (spa_vdev_exit(spa, NULL, txg, error));
6559 	}
6560 
6561 	if (rebuild) {
6562 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
6563 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6564 
6565 		if (dsl_scan_resilvering(spa_get_dsl(spa)))
6566 			return (spa_vdev_exit(spa, NULL, txg,
6567 			    ZFS_ERR_RESILVER_IN_PROGRESS));
6568 	} else {
6569 		if (vdev_rebuild_active(rvd))
6570 			return (spa_vdev_exit(spa, NULL, txg,
6571 			    ZFS_ERR_REBUILD_IN_PROGRESS));
6572 	}
6573 
6574 	if (spa->spa_vdev_removal != NULL)
6575 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6576 
6577 	if (oldvd == NULL)
6578 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6579 
6580 	if (!oldvd->vdev_ops->vdev_op_leaf)
6581 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6582 
6583 	pvd = oldvd->vdev_parent;
6584 
6585 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
6586 	    VDEV_ALLOC_ATTACH)) != 0)
6587 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6588 
6589 	if (newrootvd->vdev_children != 1)
6590 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6591 
6592 	newvd = newrootvd->vdev_child[0];
6593 
6594 	if (!newvd->vdev_ops->vdev_op_leaf)
6595 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6596 
6597 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
6598 		return (spa_vdev_exit(spa, newrootvd, txg, error));
6599 
6600 	/*
6601 	 * Spares can't replace logs
6602 	 */
6603 	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
6604 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6605 
6606 	if (rebuild) {
6607 		/*
6608 		 * For rebuilds, the parent vdev must support reconstruction
6609 		 * using only space maps.  This means the only allowable
6610 		 * parents are the root vdev or a mirror vdev.
6611 		 */
6612 		if (pvd->vdev_ops != &vdev_mirror_ops &&
6613 		    pvd->vdev_ops != &vdev_root_ops) {
6614 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6615 		}
6616 	}
6617 
6618 	if (!replacing) {
6619 		/*
6620 		 * For attach, the only allowable parent is a mirror or the root
6621 		 * vdev.
6622 		 */
6623 		if (pvd->vdev_ops != &vdev_mirror_ops &&
6624 		    pvd->vdev_ops != &vdev_root_ops)
6625 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6626 
6627 		pvops = &vdev_mirror_ops;
6628 	} else {
6629 		/*
6630 		 * Active hot spares can only be replaced by inactive hot
6631 		 * spares.
6632 		 */
6633 		if (pvd->vdev_ops == &vdev_spare_ops &&
6634 		    oldvd->vdev_isspare &&
6635 		    !spa_has_spare(spa, newvd->vdev_guid))
6636 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6637 
6638 		/*
6639 		 * If the source is a hot spare, and the parent isn't already a
6640 		 * spare, then we want to create a new hot spare.  Otherwise, we
6641 		 * want to create a replacing vdev.  The user is not allowed to
6642 		 * attach to a spared vdev child unless the 'isspare' state is
6643 		 * the same (spare replaces spare, non-spare replaces
6644 		 * non-spare).
6645 		 */
6646 		if (pvd->vdev_ops == &vdev_replacing_ops &&
6647 		    spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
6648 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6649 		} else if (pvd->vdev_ops == &vdev_spare_ops &&
6650 		    newvd->vdev_isspare != oldvd->vdev_isspare) {
6651 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6652 		}
6653 
6654 		if (newvd->vdev_isspare)
6655 			pvops = &vdev_spare_ops;
6656 		else
6657 			pvops = &vdev_replacing_ops;
6658 	}
6659 
6660 	/*
6661 	 * Make sure the new device is big enough.
6662 	 */
6663 	if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
6664 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
6665 
6666 	/*
6667 	 * The new device cannot have a higher alignment requirement
6668 	 * than the top-level vdev.
6669 	 */
6670 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
6671 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6672 
6673 	/*
6674 	 * If this is an in-place replacement, update oldvd's path and devid
6675 	 * to make it distinguishable from newvd, and unopenable from now on.
6676 	 */
6677 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
6678 		spa_strfree(oldvd->vdev_path);
6679 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
6680 		    KM_SLEEP);
6681 		(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
6682 		    "%s/%s", newvd->vdev_path, "old");
6683 		if (oldvd->vdev_devid != NULL) {
6684 			spa_strfree(oldvd->vdev_devid);
6685 			oldvd->vdev_devid = NULL;
6686 		}
6687 	}
6688 
6689 	/*
6690 	 * If the parent is not a mirror, or if we're replacing, insert the new
6691 	 * mirror/replacing/spare vdev above oldvd.
6692 	 */
6693 	if (pvd->vdev_ops != pvops)
6694 		pvd = vdev_add_parent(oldvd, pvops);
6695 
6696 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
6697 	ASSERT(pvd->vdev_ops == pvops);
6698 	ASSERT(oldvd->vdev_parent == pvd);
6699 
6700 	/*
6701 	 * Extract the new device from its root and add it to pvd.
6702 	 */
6703 	vdev_remove_child(newrootvd, newvd);
6704 	newvd->vdev_id = pvd->vdev_children;
6705 	newvd->vdev_crtxg = oldvd->vdev_crtxg;
6706 	vdev_add_child(pvd, newvd);
6707 
6708 	/*
6709 	 * Reevaluate the parent vdev state.
6710 	 */
6711 	vdev_propagate_state(pvd);
6712 
6713 	tvd = newvd->vdev_top;
6714 	ASSERT(pvd->vdev_top == tvd);
6715 	ASSERT(tvd->vdev_parent == rvd);
6716 
6717 	vdev_config_dirty(tvd);
6718 
6719 	/*
6720 	 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
6721 	 * for any dmu_sync-ed blocks.  It will propagate upward when
6722 	 * spa_vdev_exit() calls vdev_dtl_reassess().
6723 	 */
6724 	dtl_max_txg = txg + TXG_CONCURRENT_STATES;
6725 
6726 	vdev_dtl_dirty(newvd, DTL_MISSING,
6727 	    TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
6728 
6729 	if (newvd->vdev_isspare) {
6730 		spa_spare_activate(newvd);
6731 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
6732 	}
6733 
6734 	oldvdpath = spa_strdup(oldvd->vdev_path);
6735 	newvdpath = spa_strdup(newvd->vdev_path);
6736 	newvd_isspare = newvd->vdev_isspare;
6737 
6738 	/*
6739 	 * Mark newvd's DTL dirty in this txg.
6740 	 */
6741 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
6742 
6743 	/*
6744 	 * Schedule the resilver or rebuild to restart in the future. We do
6745 	 * this to ensure that dmu_sync-ed blocks have been stitched into the
6746 	 * respective datasets.
6747 	 */
6748 	if (rebuild) {
6749 		newvd->vdev_rebuild_txg = txg;
6750 
6751 		vdev_rebuild(tvd);
6752 	} else {
6753 		newvd->vdev_resilver_txg = txg;
6754 
6755 		if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
6756 		    spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
6757 			vdev_defer_resilver(newvd);
6758 		} else {
6759 			dsl_scan_restart_resilver(spa->spa_dsl_pool,
6760 			    dtl_max_txg);
6761 		}
6762 	}
6763 
6764 	if (spa->spa_bootfs)
6765 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
6766 
6767 	spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
6768 
6769 	/*
6770 	 * Commit the config
6771 	 */
6772 	(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
6773 
6774 	spa_history_log_internal(spa, "vdev attach", NULL,
6775 	    "%s vdev=%s %s vdev=%s",
6776 	    replacing && newvd_isspare ? "spare in" :
6777 	    replacing ? "replace" : "attach", newvdpath,
6778 	    replacing ? "for" : "to", oldvdpath);
6779 
6780 	spa_strfree(oldvdpath);
6781 	spa_strfree(newvdpath);
6782 
6783 	return (0);
6784 }
6785 
6786 /*
6787  * Detach a device from a mirror or replacing vdev.
6788  *
6789  * If 'replace_done' is specified, only detach if the parent
6790  * is a replacing vdev.
6791  */
6792 int
6793 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
6794 {
6795 	uint64_t txg;
6796 	int error;
6797 	vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
6798 	vdev_t *vd, *pvd, *cvd, *tvd;
6799 	boolean_t unspare = B_FALSE;
6800 	uint64_t unspare_guid = 0;
6801 	char *vdpath;
6802 
6803 	ASSERT(spa_writeable(spa));
6804 
6805 	txg = spa_vdev_detach_enter(spa, guid);
6806 
6807 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
6808 
6809 	/*
6810 	 * Besides being called directly from the userland through the
6811 	 * ioctl interface, spa_vdev_detach() can be potentially called
6812 	 * at the end of spa_vdev_resilver_done().
6813 	 *
6814 	 * In the regular case, when we have a checkpoint this shouldn't
6815 	 * happen as we never empty the DTLs of a vdev during the scrub
6816 	 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
6817 	 * should never get here when we have a checkpoint.
6818 	 *
6819 	 * That said, even in a case when we checkpoint the pool exactly
6820 	 * as spa_vdev_resilver_done() calls this function everything
6821 	 * should be fine as the resilver will return right away.
6822 	 */
6823 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
6824 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6825 		error = (spa_has_checkpoint(spa)) ?
6826 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
6827 		return (spa_vdev_exit(spa, NULL, txg, error));
6828 	}
6829 
6830 	if (vd == NULL)
6831 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6832 
6833 	if (!vd->vdev_ops->vdev_op_leaf)
6834 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6835 
6836 	pvd = vd->vdev_parent;
6837 
6838 	/*
6839 	 * If the parent/child relationship is not as expected, don't do it.
6840 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
6841 	 * vdev that's replacing B with C.  The user's intent in replacing
6842 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
6843 	 * the replace by detaching C, the expected behavior is to end up
6844 	 * M(A,B).  But suppose that right after deciding to detach C,
6845 	 * the replacement of B completes.  We would have M(A,C), and then
6846 	 * ask to detach C, which would leave us with just A -- not what
6847 	 * the user wanted.  To prevent this, we make sure that the
6848 	 * parent/child relationship hasn't changed -- in this example,
6849 	 * that C's parent is still the replacing vdev R.
6850 	 */
6851 	if (pvd->vdev_guid != pguid && pguid != 0)
6852 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6853 
6854 	/*
6855 	 * Only 'replacing' or 'spare' vdevs can be replaced.
6856 	 */
6857 	if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
6858 	    pvd->vdev_ops != &vdev_spare_ops)
6859 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6860 
6861 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
6862 	    spa_version(spa) >= SPA_VERSION_SPARES);
6863 
6864 	/*
6865 	 * Only mirror, replacing, and spare vdevs support detach.
6866 	 */
6867 	if (pvd->vdev_ops != &vdev_replacing_ops &&
6868 	    pvd->vdev_ops != &vdev_mirror_ops &&
6869 	    pvd->vdev_ops != &vdev_spare_ops)
6870 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6871 
6872 	/*
6873 	 * If this device has the only valid copy of some data,
6874 	 * we cannot safely detach it.
6875 	 */
6876 	if (vdev_dtl_required(vd))
6877 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6878 
6879 	ASSERT(pvd->vdev_children >= 2);
6880 
6881 	/*
6882 	 * If we are detaching the second disk from a replacing vdev, then
6883 	 * check to see if we changed the original vdev's path to have "/old"
6884 	 * at the end in spa_vdev_attach().  If so, undo that change now.
6885 	 */
6886 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
6887 	    vd->vdev_path != NULL) {
6888 		size_t len = strlen(vd->vdev_path);
6889 
6890 		for (int c = 0; c < pvd->vdev_children; c++) {
6891 			cvd = pvd->vdev_child[c];
6892 
6893 			if (cvd == vd || cvd->vdev_path == NULL)
6894 				continue;
6895 
6896 			if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
6897 			    strcmp(cvd->vdev_path + len, "/old") == 0) {
6898 				spa_strfree(cvd->vdev_path);
6899 				cvd->vdev_path = spa_strdup(vd->vdev_path);
6900 				break;
6901 			}
6902 		}
6903 	}
6904 
6905 	/*
6906 	 * If we are detaching the original disk from a spare, then it implies
6907 	 * that the spare should become a real disk, and be removed from the
6908 	 * active spare list for the pool.
6909 	 */
6910 	if (pvd->vdev_ops == &vdev_spare_ops &&
6911 	    vd->vdev_id == 0 &&
6912 	    pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
6913 		unspare = B_TRUE;
6914 
6915 	/*
6916 	 * Erase the disk labels so the disk can be used for other things.
6917 	 * This must be done after all other error cases are handled,
6918 	 * but before we disembowel vd (so we can still do I/O to it).
6919 	 * But if we can't do it, don't treat the error as fatal --
6920 	 * it may be that the unwritability of the disk is the reason
6921 	 * it's being detached!
6922 	 */
6923 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
6924 
6925 	/*
6926 	 * Remove vd from its parent and compact the parent's children.
6927 	 */
6928 	vdev_remove_child(pvd, vd);
6929 	vdev_compact_children(pvd);
6930 
6931 	/*
6932 	 * Remember one of the remaining children so we can get tvd below.
6933 	 */
6934 	cvd = pvd->vdev_child[pvd->vdev_children - 1];
6935 
6936 	/*
6937 	 * If we need to remove the remaining child from the list of hot spares,
6938 	 * do it now, marking the vdev as no longer a spare in the process.
6939 	 * We must do this before vdev_remove_parent(), because that can
6940 	 * change the GUID if it creates a new toplevel GUID.  For a similar
6941 	 * reason, we must remove the spare now, in the same txg as the detach;
6942 	 * otherwise someone could attach a new sibling, change the GUID, and
6943 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
6944 	 */
6945 	if (unspare) {
6946 		ASSERT(cvd->vdev_isspare);
6947 		spa_spare_remove(cvd);
6948 		unspare_guid = cvd->vdev_guid;
6949 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
6950 		cvd->vdev_unspare = B_TRUE;
6951 	}
6952 
6953 	/*
6954 	 * If the parent mirror/replacing vdev only has one child,
6955 	 * the parent is no longer needed.  Remove it from the tree.
6956 	 */
6957 	if (pvd->vdev_children == 1) {
6958 		if (pvd->vdev_ops == &vdev_spare_ops)
6959 			cvd->vdev_unspare = B_FALSE;
6960 		vdev_remove_parent(cvd);
6961 	}
6962 
6963 	/*
6964 	 * We don't set tvd until now because the parent we just removed
6965 	 * may have been the previous top-level vdev.
6966 	 */
6967 	tvd = cvd->vdev_top;
6968 	ASSERT(tvd->vdev_parent == rvd);
6969 
6970 	/*
6971 	 * Reevaluate the parent vdev state.
6972 	 */
6973 	vdev_propagate_state(cvd);
6974 
6975 	/*
6976 	 * If the 'autoexpand' property is set on the pool then automatically
6977 	 * try to expand the size of the pool. For example if the device we
6978 	 * just detached was smaller than the others, it may be possible to
6979 	 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
6980 	 * first so that we can obtain the updated sizes of the leaf vdevs.
6981 	 */
6982 	if (spa->spa_autoexpand) {
6983 		vdev_reopen(tvd);
6984 		vdev_expand(tvd, txg);
6985 	}
6986 
6987 	vdev_config_dirty(tvd);
6988 
6989 	/*
6990 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
6991 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
6992 	 * But first make sure we're not on any *other* txg's DTL list, to
6993 	 * prevent vd from being accessed after it's freed.
6994 	 */
6995 	vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
6996 	for (int t = 0; t < TXG_SIZE; t++)
6997 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
6998 	vd->vdev_detached = B_TRUE;
6999 	vdev_dirty(tvd, VDD_DTL, vd, txg);
7000 
7001 	spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
7002 	spa_notify_waiters(spa);
7003 
7004 	/* hang on to the spa before we release the lock */
7005 	spa_open_ref(spa, FTAG);
7006 
7007 	error = spa_vdev_exit(spa, vd, txg, 0);
7008 
7009 	spa_history_log_internal(spa, "detach", NULL,
7010 	    "vdev=%s", vdpath);
7011 	spa_strfree(vdpath);
7012 
7013 	/*
7014 	 * If this was the removal of the original device in a hot spare vdev,
7015 	 * then we want to go through and remove the device from the hot spare
7016 	 * list of every other pool.
7017 	 */
7018 	if (unspare) {
7019 		spa_t *altspa = NULL;
7020 
7021 		mutex_enter(&spa_namespace_lock);
7022 		while ((altspa = spa_next(altspa)) != NULL) {
7023 			if (altspa->spa_state != POOL_STATE_ACTIVE ||
7024 			    altspa == spa)
7025 				continue;
7026 
7027 			spa_open_ref(altspa, FTAG);
7028 			mutex_exit(&spa_namespace_lock);
7029 			(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
7030 			mutex_enter(&spa_namespace_lock);
7031 			spa_close(altspa, FTAG);
7032 		}
7033 		mutex_exit(&spa_namespace_lock);
7034 
7035 		/* search the rest of the vdevs for spares to remove */
7036 		spa_vdev_resilver_done(spa);
7037 	}
7038 
7039 	/* all done with the spa; OK to release */
7040 	mutex_enter(&spa_namespace_lock);
7041 	spa_close(spa, FTAG);
7042 	mutex_exit(&spa_namespace_lock);
7043 
7044 	return (error);
7045 }
7046 
7047 static int
7048 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
7049     list_t *vd_list)
7050 {
7051 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7052 
7053 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7054 
7055 	/* Look up vdev and ensure it's a leaf. */
7056 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7057 	if (vd == NULL || vd->vdev_detached) {
7058 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7059 		return (SET_ERROR(ENODEV));
7060 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
7061 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7062 		return (SET_ERROR(EINVAL));
7063 	} else if (!vdev_writeable(vd)) {
7064 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7065 		return (SET_ERROR(EROFS));
7066 	}
7067 	mutex_enter(&vd->vdev_initialize_lock);
7068 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7069 
7070 	/*
7071 	 * When we activate an initialize action we check to see
7072 	 * if the vdev_initialize_thread is NULL. We do this instead
7073 	 * of using the vdev_initialize_state since there might be
7074 	 * a previous initialization process which has completed but
7075 	 * the thread is not exited.
7076 	 */
7077 	if (cmd_type == POOL_INITIALIZE_START &&
7078 	    (vd->vdev_initialize_thread != NULL ||
7079 	    vd->vdev_top->vdev_removing)) {
7080 		mutex_exit(&vd->vdev_initialize_lock);
7081 		return (SET_ERROR(EBUSY));
7082 	} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
7083 	    (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
7084 	    vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
7085 		mutex_exit(&vd->vdev_initialize_lock);
7086 		return (SET_ERROR(ESRCH));
7087 	} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
7088 	    vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
7089 		mutex_exit(&vd->vdev_initialize_lock);
7090 		return (SET_ERROR(ESRCH));
7091 	}
7092 
7093 	switch (cmd_type) {
7094 	case POOL_INITIALIZE_START:
7095 		vdev_initialize(vd);
7096 		break;
7097 	case POOL_INITIALIZE_CANCEL:
7098 		vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
7099 		break;
7100 	case POOL_INITIALIZE_SUSPEND:
7101 		vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
7102 		break;
7103 	default:
7104 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
7105 	}
7106 	mutex_exit(&vd->vdev_initialize_lock);
7107 
7108 	return (0);
7109 }
7110 
7111 int
7112 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
7113     nvlist_t *vdev_errlist)
7114 {
7115 	int total_errors = 0;
7116 	list_t vd_list;
7117 
7118 	list_create(&vd_list, sizeof (vdev_t),
7119 	    offsetof(vdev_t, vdev_initialize_node));
7120 
7121 	/*
7122 	 * We hold the namespace lock through the whole function
7123 	 * to prevent any changes to the pool while we're starting or
7124 	 * stopping initialization. The config and state locks are held so that
7125 	 * we can properly assess the vdev state before we commit to
7126 	 * the initializing operation.
7127 	 */
7128 	mutex_enter(&spa_namespace_lock);
7129 
7130 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
7131 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
7132 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
7133 
7134 		int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
7135 		    &vd_list);
7136 		if (error != 0) {
7137 			char guid_as_str[MAXNAMELEN];
7138 
7139 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
7140 			    "%llu", (unsigned long long)vdev_guid);
7141 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
7142 			total_errors++;
7143 		}
7144 	}
7145 
7146 	/* Wait for all initialize threads to stop. */
7147 	vdev_initialize_stop_wait(spa, &vd_list);
7148 
7149 	/* Sync out the initializing state */
7150 	txg_wait_synced(spa->spa_dsl_pool, 0);
7151 	mutex_exit(&spa_namespace_lock);
7152 
7153 	list_destroy(&vd_list);
7154 
7155 	return (total_errors);
7156 }
7157 
7158 static int
7159 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
7160     uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
7161 {
7162 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7163 
7164 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7165 
7166 	/* Look up vdev and ensure it's a leaf. */
7167 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7168 	if (vd == NULL || vd->vdev_detached) {
7169 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7170 		return (SET_ERROR(ENODEV));
7171 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
7172 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7173 		return (SET_ERROR(EINVAL));
7174 	} else if (!vdev_writeable(vd)) {
7175 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7176 		return (SET_ERROR(EROFS));
7177 	} else if (!vd->vdev_has_trim) {
7178 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7179 		return (SET_ERROR(EOPNOTSUPP));
7180 	} else if (secure && !vd->vdev_has_securetrim) {
7181 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7182 		return (SET_ERROR(EOPNOTSUPP));
7183 	}
7184 	mutex_enter(&vd->vdev_trim_lock);
7185 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7186 
7187 	/*
7188 	 * When we activate a TRIM action we check to see if the
7189 	 * vdev_trim_thread is NULL. We do this instead of using the
7190 	 * vdev_trim_state since there might be a previous TRIM process
7191 	 * which has completed but the thread is not exited.
7192 	 */
7193 	if (cmd_type == POOL_TRIM_START &&
7194 	    (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
7195 		mutex_exit(&vd->vdev_trim_lock);
7196 		return (SET_ERROR(EBUSY));
7197 	} else if (cmd_type == POOL_TRIM_CANCEL &&
7198 	    (vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
7199 	    vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
7200 		mutex_exit(&vd->vdev_trim_lock);
7201 		return (SET_ERROR(ESRCH));
7202 	} else if (cmd_type == POOL_TRIM_SUSPEND &&
7203 	    vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
7204 		mutex_exit(&vd->vdev_trim_lock);
7205 		return (SET_ERROR(ESRCH));
7206 	}
7207 
7208 	switch (cmd_type) {
7209 	case POOL_TRIM_START:
7210 		vdev_trim(vd, rate, partial, secure);
7211 		break;
7212 	case POOL_TRIM_CANCEL:
7213 		vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
7214 		break;
7215 	case POOL_TRIM_SUSPEND:
7216 		vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
7217 		break;
7218 	default:
7219 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
7220 	}
7221 	mutex_exit(&vd->vdev_trim_lock);
7222 
7223 	return (0);
7224 }
7225 
7226 /*
7227  * Initiates a manual TRIM for the requested vdevs. This kicks off individual
7228  * TRIM threads for each child vdev.  These threads pass over all of the free
7229  * space in the vdev's metaslabs and issues TRIM commands for that space.
7230  */
7231 int
7232 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
7233     boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
7234 {
7235 	int total_errors = 0;
7236 	list_t vd_list;
7237 
7238 	list_create(&vd_list, sizeof (vdev_t),
7239 	    offsetof(vdev_t, vdev_trim_node));
7240 
7241 	/*
7242 	 * We hold the namespace lock through the whole function
7243 	 * to prevent any changes to the pool while we're starting or
7244 	 * stopping TRIM. The config and state locks are held so that
7245 	 * we can properly assess the vdev state before we commit to
7246 	 * the TRIM operation.
7247 	 */
7248 	mutex_enter(&spa_namespace_lock);
7249 
7250 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
7251 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
7252 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
7253 
7254 		int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
7255 		    rate, partial, secure, &vd_list);
7256 		if (error != 0) {
7257 			char guid_as_str[MAXNAMELEN];
7258 
7259 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
7260 			    "%llu", (unsigned long long)vdev_guid);
7261 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
7262 			total_errors++;
7263 		}
7264 	}
7265 
7266 	/* Wait for all TRIM threads to stop. */
7267 	vdev_trim_stop_wait(spa, &vd_list);
7268 
7269 	/* Sync out the TRIM state */
7270 	txg_wait_synced(spa->spa_dsl_pool, 0);
7271 	mutex_exit(&spa_namespace_lock);
7272 
7273 	list_destroy(&vd_list);
7274 
7275 	return (total_errors);
7276 }
7277 
7278 /*
7279  * Split a set of devices from their mirrors, and create a new pool from them.
7280  */
7281 int
7282 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
7283     nvlist_t *props, boolean_t exp)
7284 {
7285 	int error = 0;
7286 	uint64_t txg, *glist;
7287 	spa_t *newspa;
7288 	uint_t c, children, lastlog;
7289 	nvlist_t **child, *nvl, *tmp;
7290 	dmu_tx_t *tx;
7291 	char *altroot = NULL;
7292 	vdev_t *rvd, **vml = NULL;			/* vdev modify list */
7293 	boolean_t activate_slog;
7294 
7295 	ASSERT(spa_writeable(spa));
7296 
7297 	txg = spa_vdev_enter(spa);
7298 
7299 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7300 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
7301 		error = (spa_has_checkpoint(spa)) ?
7302 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
7303 		return (spa_vdev_exit(spa, NULL, txg, error));
7304 	}
7305 
7306 	/* clear the log and flush everything up to now */
7307 	activate_slog = spa_passivate_log(spa);
7308 	(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
7309 	error = spa_reset_logs(spa);
7310 	txg = spa_vdev_config_enter(spa);
7311 
7312 	if (activate_slog)
7313 		spa_activate_log(spa);
7314 
7315 	if (error != 0)
7316 		return (spa_vdev_exit(spa, NULL, txg, error));
7317 
7318 	/* check new spa name before going any further */
7319 	if (spa_lookup(newname) != NULL)
7320 		return (spa_vdev_exit(spa, NULL, txg, EEXIST));
7321 
7322 	/*
7323 	 * scan through all the children to ensure they're all mirrors
7324 	 */
7325 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
7326 	    nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
7327 	    &children) != 0)
7328 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7329 
7330 	/* first, check to ensure we've got the right child count */
7331 	rvd = spa->spa_root_vdev;
7332 	lastlog = 0;
7333 	for (c = 0; c < rvd->vdev_children; c++) {
7334 		vdev_t *vd = rvd->vdev_child[c];
7335 
7336 		/* don't count the holes & logs as children */
7337 		if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
7338 		    !vdev_is_concrete(vd))) {
7339 			if (lastlog == 0)
7340 				lastlog = c;
7341 			continue;
7342 		}
7343 
7344 		lastlog = 0;
7345 	}
7346 	if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
7347 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7348 
7349 	/* next, ensure no spare or cache devices are part of the split */
7350 	if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
7351 	    nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
7352 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7353 
7354 	vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
7355 	glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
7356 
7357 	/* then, loop over each vdev and validate it */
7358 	for (c = 0; c < children; c++) {
7359 		uint64_t is_hole = 0;
7360 
7361 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
7362 		    &is_hole);
7363 
7364 		if (is_hole != 0) {
7365 			if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
7366 			    spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
7367 				continue;
7368 			} else {
7369 				error = SET_ERROR(EINVAL);
7370 				break;
7371 			}
7372 		}
7373 
7374 		/* deal with indirect vdevs */
7375 		if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
7376 		    &vdev_indirect_ops)
7377 			continue;
7378 
7379 		/* which disk is going to be split? */
7380 		if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
7381 		    &glist[c]) != 0) {
7382 			error = SET_ERROR(EINVAL);
7383 			break;
7384 		}
7385 
7386 		/* look it up in the spa */
7387 		vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
7388 		if (vml[c] == NULL) {
7389 			error = SET_ERROR(ENODEV);
7390 			break;
7391 		}
7392 
7393 		/* make sure there's nothing stopping the split */
7394 		if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
7395 		    vml[c]->vdev_islog ||
7396 		    !vdev_is_concrete(vml[c]) ||
7397 		    vml[c]->vdev_isspare ||
7398 		    vml[c]->vdev_isl2cache ||
7399 		    !vdev_writeable(vml[c]) ||
7400 		    vml[c]->vdev_children != 0 ||
7401 		    vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
7402 		    c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
7403 			error = SET_ERROR(EINVAL);
7404 			break;
7405 		}
7406 
7407 		if (vdev_dtl_required(vml[c]) ||
7408 		    vdev_resilver_needed(vml[c], NULL, NULL)) {
7409 			error = SET_ERROR(EBUSY);
7410 			break;
7411 		}
7412 
7413 		/* we need certain info from the top level */
7414 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
7415 		    vml[c]->vdev_top->vdev_ms_array) == 0);
7416 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
7417 		    vml[c]->vdev_top->vdev_ms_shift) == 0);
7418 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
7419 		    vml[c]->vdev_top->vdev_asize) == 0);
7420 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
7421 		    vml[c]->vdev_top->vdev_ashift) == 0);
7422 
7423 		/* transfer per-vdev ZAPs */
7424 		ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
7425 		VERIFY0(nvlist_add_uint64(child[c],
7426 		    ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
7427 
7428 		ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
7429 		VERIFY0(nvlist_add_uint64(child[c],
7430 		    ZPOOL_CONFIG_VDEV_TOP_ZAP,
7431 		    vml[c]->vdev_parent->vdev_top_zap));
7432 	}
7433 
7434 	if (error != 0) {
7435 		kmem_free(vml, children * sizeof (vdev_t *));
7436 		kmem_free(glist, children * sizeof (uint64_t));
7437 		return (spa_vdev_exit(spa, NULL, txg, error));
7438 	}
7439 
7440 	/* stop writers from using the disks */
7441 	for (c = 0; c < children; c++) {
7442 		if (vml[c] != NULL)
7443 			vml[c]->vdev_offline = B_TRUE;
7444 	}
7445 	vdev_reopen(spa->spa_root_vdev);
7446 
7447 	/*
7448 	 * Temporarily record the splitting vdevs in the spa config.  This
7449 	 * will disappear once the config is regenerated.
7450 	 */
7451 	VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
7452 	VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
7453 	    glist, children) == 0);
7454 	kmem_free(glist, children * sizeof (uint64_t));
7455 
7456 	mutex_enter(&spa->spa_props_lock);
7457 	VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
7458 	    nvl) == 0);
7459 	mutex_exit(&spa->spa_props_lock);
7460 	spa->spa_config_splitting = nvl;
7461 	vdev_config_dirty(spa->spa_root_vdev);
7462 
7463 	/* configure and create the new pool */
7464 	VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
7465 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
7466 	    exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
7467 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
7468 	    spa_version(spa)) == 0);
7469 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
7470 	    spa->spa_config_txg) == 0);
7471 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
7472 	    spa_generate_guid(NULL)) == 0);
7473 	VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
7474 	(void) nvlist_lookup_string(props,
7475 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
7476 
7477 	/* add the new pool to the namespace */
7478 	newspa = spa_add(newname, config, altroot);
7479 	newspa->spa_avz_action = AVZ_ACTION_REBUILD;
7480 	newspa->spa_config_txg = spa->spa_config_txg;
7481 	spa_set_log_state(newspa, SPA_LOG_CLEAR);
7482 
7483 	/* release the spa config lock, retaining the namespace lock */
7484 	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
7485 
7486 	if (zio_injection_enabled)
7487 		zio_handle_panic_injection(spa, FTAG, 1);
7488 
7489 	spa_activate(newspa, spa_mode_global);
7490 	spa_async_suspend(newspa);
7491 
7492 	/*
7493 	 * Temporarily stop the initializing and TRIM activity.  We set the
7494 	 * state to ACTIVE so that we know to resume initializing or TRIM
7495 	 * once the split has completed.
7496 	 */
7497 	list_t vd_initialize_list;
7498 	list_create(&vd_initialize_list, sizeof (vdev_t),
7499 	    offsetof(vdev_t, vdev_initialize_node));
7500 
7501 	list_t vd_trim_list;
7502 	list_create(&vd_trim_list, sizeof (vdev_t),
7503 	    offsetof(vdev_t, vdev_trim_node));
7504 
7505 	for (c = 0; c < children; c++) {
7506 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
7507 			mutex_enter(&vml[c]->vdev_initialize_lock);
7508 			vdev_initialize_stop(vml[c],
7509 			    VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
7510 			mutex_exit(&vml[c]->vdev_initialize_lock);
7511 
7512 			mutex_enter(&vml[c]->vdev_trim_lock);
7513 			vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
7514 			mutex_exit(&vml[c]->vdev_trim_lock);
7515 		}
7516 	}
7517 
7518 	vdev_initialize_stop_wait(spa, &vd_initialize_list);
7519 	vdev_trim_stop_wait(spa, &vd_trim_list);
7520 
7521 	list_destroy(&vd_initialize_list);
7522 	list_destroy(&vd_trim_list);
7523 
7524 	newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
7525 	newspa->spa_is_splitting = B_TRUE;
7526 
7527 	/* create the new pool from the disks of the original pool */
7528 	error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
7529 	if (error)
7530 		goto out;
7531 
7532 	/* if that worked, generate a real config for the new pool */
7533 	if (newspa->spa_root_vdev != NULL) {
7534 		VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
7535 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
7536 		VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
7537 		    ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
7538 		spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
7539 		    B_TRUE));
7540 	}
7541 
7542 	/* set the props */
7543 	if (props != NULL) {
7544 		spa_configfile_set(newspa, props, B_FALSE);
7545 		error = spa_prop_set(newspa, props);
7546 		if (error)
7547 			goto out;
7548 	}
7549 
7550 	/* flush everything */
7551 	txg = spa_vdev_config_enter(newspa);
7552 	vdev_config_dirty(newspa->spa_root_vdev);
7553 	(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
7554 
7555 	if (zio_injection_enabled)
7556 		zio_handle_panic_injection(spa, FTAG, 2);
7557 
7558 	spa_async_resume(newspa);
7559 
7560 	/* finally, update the original pool's config */
7561 	txg = spa_vdev_config_enter(spa);
7562 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
7563 	error = dmu_tx_assign(tx, TXG_WAIT);
7564 	if (error != 0)
7565 		dmu_tx_abort(tx);
7566 	for (c = 0; c < children; c++) {
7567 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
7568 			vdev_t *tvd = vml[c]->vdev_top;
7569 
7570 			/*
7571 			 * Need to be sure the detachable VDEV is not
7572 			 * on any *other* txg's DTL list to prevent it
7573 			 * from being accessed after it's freed.
7574 			 */
7575 			for (int t = 0; t < TXG_SIZE; t++) {
7576 				(void) txg_list_remove_this(
7577 				    &tvd->vdev_dtl_list, vml[c], t);
7578 			}
7579 
7580 			vdev_split(vml[c]);
7581 			if (error == 0)
7582 				spa_history_log_internal(spa, "detach", tx,
7583 				    "vdev=%s", vml[c]->vdev_path);
7584 
7585 			vdev_free(vml[c]);
7586 		}
7587 	}
7588 	spa->spa_avz_action = AVZ_ACTION_REBUILD;
7589 	vdev_config_dirty(spa->spa_root_vdev);
7590 	spa->spa_config_splitting = NULL;
7591 	nvlist_free(nvl);
7592 	if (error == 0)
7593 		dmu_tx_commit(tx);
7594 	(void) spa_vdev_exit(spa, NULL, txg, 0);
7595 
7596 	if (zio_injection_enabled)
7597 		zio_handle_panic_injection(spa, FTAG, 3);
7598 
7599 	/* split is complete; log a history record */
7600 	spa_history_log_internal(newspa, "split", NULL,
7601 	    "from pool %s", spa_name(spa));
7602 
7603 	newspa->spa_is_splitting = B_FALSE;
7604 	kmem_free(vml, children * sizeof (vdev_t *));
7605 
7606 	/* if we're not going to mount the filesystems in userland, export */
7607 	if (exp)
7608 		error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
7609 		    B_FALSE, B_FALSE);
7610 
7611 	return (error);
7612 
7613 out:
7614 	spa_unload(newspa);
7615 	spa_deactivate(newspa);
7616 	spa_remove(newspa);
7617 
7618 	txg = spa_vdev_config_enter(spa);
7619 
7620 	/* re-online all offlined disks */
7621 	for (c = 0; c < children; c++) {
7622 		if (vml[c] != NULL)
7623 			vml[c]->vdev_offline = B_FALSE;
7624 	}
7625 
7626 	/* restart initializing or trimming disks as necessary */
7627 	spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
7628 	spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
7629 	spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
7630 
7631 	vdev_reopen(spa->spa_root_vdev);
7632 
7633 	nvlist_free(spa->spa_config_splitting);
7634 	spa->spa_config_splitting = NULL;
7635 	(void) spa_vdev_exit(spa, NULL, txg, error);
7636 
7637 	kmem_free(vml, children * sizeof (vdev_t *));
7638 	return (error);
7639 }
7640 
7641 /*
7642  * Find any device that's done replacing, or a vdev marked 'unspare' that's
7643  * currently spared, so we can detach it.
7644  */
7645 static vdev_t *
7646 spa_vdev_resilver_done_hunt(vdev_t *vd)
7647 {
7648 	vdev_t *newvd, *oldvd;
7649 
7650 	for (int c = 0; c < vd->vdev_children; c++) {
7651 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
7652 		if (oldvd != NULL)
7653 			return (oldvd);
7654 	}
7655 
7656 	/*
7657 	 * Check for a completed replacement.  We always consider the first
7658 	 * vdev in the list to be the oldest vdev, and the last one to be
7659 	 * the newest (see spa_vdev_attach() for how that works).  In
7660 	 * the case where the newest vdev is faulted, we will not automatically
7661 	 * remove it after a resilver completes.  This is OK as it will require
7662 	 * user intervention to determine which disk the admin wishes to keep.
7663 	 */
7664 	if (vd->vdev_ops == &vdev_replacing_ops) {
7665 		ASSERT(vd->vdev_children > 1);
7666 
7667 		newvd = vd->vdev_child[vd->vdev_children - 1];
7668 		oldvd = vd->vdev_child[0];
7669 
7670 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
7671 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
7672 		    !vdev_dtl_required(oldvd))
7673 			return (oldvd);
7674 	}
7675 
7676 	/*
7677 	 * Check for a completed resilver with the 'unspare' flag set.
7678 	 * Also potentially update faulted state.
7679 	 */
7680 	if (vd->vdev_ops == &vdev_spare_ops) {
7681 		vdev_t *first = vd->vdev_child[0];
7682 		vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
7683 
7684 		if (last->vdev_unspare) {
7685 			oldvd = first;
7686 			newvd = last;
7687 		} else if (first->vdev_unspare) {
7688 			oldvd = last;
7689 			newvd = first;
7690 		} else {
7691 			oldvd = NULL;
7692 		}
7693 
7694 		if (oldvd != NULL &&
7695 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
7696 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
7697 		    !vdev_dtl_required(oldvd))
7698 			return (oldvd);
7699 
7700 		vdev_propagate_state(vd);
7701 
7702 		/*
7703 		 * If there are more than two spares attached to a disk,
7704 		 * and those spares are not required, then we want to
7705 		 * attempt to free them up now so that they can be used
7706 		 * by other pools.  Once we're back down to a single
7707 		 * disk+spare, we stop removing them.
7708 		 */
7709 		if (vd->vdev_children > 2) {
7710 			newvd = vd->vdev_child[1];
7711 
7712 			if (newvd->vdev_isspare && last->vdev_isspare &&
7713 			    vdev_dtl_empty(last, DTL_MISSING) &&
7714 			    vdev_dtl_empty(last, DTL_OUTAGE) &&
7715 			    !vdev_dtl_required(newvd))
7716 				return (newvd);
7717 		}
7718 	}
7719 
7720 	return (NULL);
7721 }
7722 
7723 static void
7724 spa_vdev_resilver_done(spa_t *spa)
7725 {
7726 	vdev_t *vd, *pvd, *ppvd;
7727 	uint64_t guid, sguid, pguid, ppguid;
7728 
7729 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7730 
7731 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
7732 		pvd = vd->vdev_parent;
7733 		ppvd = pvd->vdev_parent;
7734 		guid = vd->vdev_guid;
7735 		pguid = pvd->vdev_guid;
7736 		ppguid = ppvd->vdev_guid;
7737 		sguid = 0;
7738 		/*
7739 		 * If we have just finished replacing a hot spared device, then
7740 		 * we need to detach the parent's first child (the original hot
7741 		 * spare) as well.
7742 		 */
7743 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
7744 		    ppvd->vdev_children == 2) {
7745 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
7746 			sguid = ppvd->vdev_child[1]->vdev_guid;
7747 		}
7748 		ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
7749 
7750 		spa_config_exit(spa, SCL_ALL, FTAG);
7751 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
7752 			return;
7753 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
7754 			return;
7755 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7756 	}
7757 
7758 	spa_config_exit(spa, SCL_ALL, FTAG);
7759 
7760 	/*
7761 	 * If a detach was not performed above replace waiters will not have
7762 	 * been notified.  In which case we must do so now.
7763 	 */
7764 	spa_notify_waiters(spa);
7765 }
7766 
7767 /*
7768  * Update the stored path or FRU for this vdev.
7769  */
7770 static int
7771 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
7772     boolean_t ispath)
7773 {
7774 	vdev_t *vd;
7775 	boolean_t sync = B_FALSE;
7776 
7777 	ASSERT(spa_writeable(spa));
7778 
7779 	spa_vdev_state_enter(spa, SCL_ALL);
7780 
7781 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
7782 		return (spa_vdev_state_exit(spa, NULL, ENOENT));
7783 
7784 	if (!vd->vdev_ops->vdev_op_leaf)
7785 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
7786 
7787 	if (ispath) {
7788 		if (strcmp(value, vd->vdev_path) != 0) {
7789 			spa_strfree(vd->vdev_path);
7790 			vd->vdev_path = spa_strdup(value);
7791 			sync = B_TRUE;
7792 		}
7793 	} else {
7794 		if (vd->vdev_fru == NULL) {
7795 			vd->vdev_fru = spa_strdup(value);
7796 			sync = B_TRUE;
7797 		} else if (strcmp(value, vd->vdev_fru) != 0) {
7798 			spa_strfree(vd->vdev_fru);
7799 			vd->vdev_fru = spa_strdup(value);
7800 			sync = B_TRUE;
7801 		}
7802 	}
7803 
7804 	return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
7805 }
7806 
7807 int
7808 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
7809 {
7810 	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
7811 }
7812 
7813 int
7814 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
7815 {
7816 	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
7817 }
7818 
7819 /*
7820  * ==========================================================================
7821  * SPA Scanning
7822  * ==========================================================================
7823  */
7824 int
7825 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
7826 {
7827 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7828 
7829 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
7830 		return (SET_ERROR(EBUSY));
7831 
7832 	return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
7833 }
7834 
7835 int
7836 spa_scan_stop(spa_t *spa)
7837 {
7838 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7839 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
7840 		return (SET_ERROR(EBUSY));
7841 	return (dsl_scan_cancel(spa->spa_dsl_pool));
7842 }
7843 
7844 int
7845 spa_scan(spa_t *spa, pool_scan_func_t func)
7846 {
7847 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7848 
7849 	if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
7850 		return (SET_ERROR(ENOTSUP));
7851 
7852 	if (func == POOL_SCAN_RESILVER &&
7853 	    !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
7854 		return (SET_ERROR(ENOTSUP));
7855 
7856 	/*
7857 	 * If a resilver was requested, but there is no DTL on a
7858 	 * writeable leaf device, we have nothing to do.
7859 	 */
7860 	if (func == POOL_SCAN_RESILVER &&
7861 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
7862 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
7863 		return (0);
7864 	}
7865 
7866 	return (dsl_scan(spa->spa_dsl_pool, func));
7867 }
7868 
7869 /*
7870  * ==========================================================================
7871  * SPA async task processing
7872  * ==========================================================================
7873  */
7874 
7875 static void
7876 spa_async_remove(spa_t *spa, vdev_t *vd)
7877 {
7878 	if (vd->vdev_remove_wanted) {
7879 		vd->vdev_remove_wanted = B_FALSE;
7880 		vd->vdev_delayed_close = B_FALSE;
7881 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
7882 
7883 		/*
7884 		 * We want to clear the stats, but we don't want to do a full
7885 		 * vdev_clear() as that will cause us to throw away
7886 		 * degraded/faulted state as well as attempt to reopen the
7887 		 * device, all of which is a waste.
7888 		 */
7889 		vd->vdev_stat.vs_read_errors = 0;
7890 		vd->vdev_stat.vs_write_errors = 0;
7891 		vd->vdev_stat.vs_checksum_errors = 0;
7892 
7893 		vdev_state_dirty(vd->vdev_top);
7894 	}
7895 
7896 	for (int c = 0; c < vd->vdev_children; c++)
7897 		spa_async_remove(spa, vd->vdev_child[c]);
7898 }
7899 
7900 static void
7901 spa_async_probe(spa_t *spa, vdev_t *vd)
7902 {
7903 	if (vd->vdev_probe_wanted) {
7904 		vd->vdev_probe_wanted = B_FALSE;
7905 		vdev_reopen(vd);	/* vdev_open() does the actual probe */
7906 	}
7907 
7908 	for (int c = 0; c < vd->vdev_children; c++)
7909 		spa_async_probe(spa, vd->vdev_child[c]);
7910 }
7911 
7912 static void
7913 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
7914 {
7915 	if (!spa->spa_autoexpand)
7916 		return;
7917 
7918 	for (int c = 0; c < vd->vdev_children; c++) {
7919 		vdev_t *cvd = vd->vdev_child[c];
7920 		spa_async_autoexpand(spa, cvd);
7921 	}
7922 
7923 	if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
7924 		return;
7925 
7926 	spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
7927 }
7928 
7929 static void
7930 spa_async_thread(void *arg)
7931 {
7932 	spa_t *spa = (spa_t *)arg;
7933 	dsl_pool_t *dp = spa->spa_dsl_pool;
7934 	int tasks;
7935 
7936 	ASSERT(spa->spa_sync_on);
7937 
7938 	mutex_enter(&spa->spa_async_lock);
7939 	tasks = spa->spa_async_tasks;
7940 	spa->spa_async_tasks = 0;
7941 	mutex_exit(&spa->spa_async_lock);
7942 
7943 	/*
7944 	 * See if the config needs to be updated.
7945 	 */
7946 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
7947 		uint64_t old_space, new_space;
7948 
7949 		mutex_enter(&spa_namespace_lock);
7950 		old_space = metaslab_class_get_space(spa_normal_class(spa));
7951 		old_space += metaslab_class_get_space(spa_special_class(spa));
7952 		old_space += metaslab_class_get_space(spa_dedup_class(spa));
7953 
7954 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
7955 
7956 		new_space = metaslab_class_get_space(spa_normal_class(spa));
7957 		new_space += metaslab_class_get_space(spa_special_class(spa));
7958 		new_space += metaslab_class_get_space(spa_dedup_class(spa));
7959 		mutex_exit(&spa_namespace_lock);
7960 
7961 		/*
7962 		 * If the pool grew as a result of the config update,
7963 		 * then log an internal history event.
7964 		 */
7965 		if (new_space != old_space) {
7966 			spa_history_log_internal(spa, "vdev online", NULL,
7967 			    "pool '%s' size: %llu(+%llu)",
7968 			    spa_name(spa), (u_longlong_t)new_space,
7969 			    (u_longlong_t)(new_space - old_space));
7970 		}
7971 	}
7972 
7973 	/*
7974 	 * See if any devices need to be marked REMOVED.
7975 	 */
7976 	if (tasks & SPA_ASYNC_REMOVE) {
7977 		spa_vdev_state_enter(spa, SCL_NONE);
7978 		spa_async_remove(spa, spa->spa_root_vdev);
7979 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
7980 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
7981 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
7982 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
7983 		(void) spa_vdev_state_exit(spa, NULL, 0);
7984 	}
7985 
7986 	if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
7987 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
7988 		spa_async_autoexpand(spa, spa->spa_root_vdev);
7989 		spa_config_exit(spa, SCL_CONFIG, FTAG);
7990 	}
7991 
7992 	/*
7993 	 * See if any devices need to be probed.
7994 	 */
7995 	if (tasks & SPA_ASYNC_PROBE) {
7996 		spa_vdev_state_enter(spa, SCL_NONE);
7997 		spa_async_probe(spa, spa->spa_root_vdev);
7998 		(void) spa_vdev_state_exit(spa, NULL, 0);
7999 	}
8000 
8001 	/*
8002 	 * If any devices are done replacing, detach them.
8003 	 */
8004 	if (tasks & SPA_ASYNC_RESILVER_DONE)
8005 		spa_vdev_resilver_done(spa);
8006 
8007 	/*
8008 	 * If any devices are done replacing, detach them.  Then if no
8009 	 * top-level vdevs are rebuilding attempt to kick off a scrub.
8010 	 */
8011 	if (tasks & SPA_ASYNC_REBUILD_DONE) {
8012 		spa_vdev_resilver_done(spa);
8013 
8014 		if (!vdev_rebuild_active(spa->spa_root_vdev))
8015 			(void) dsl_scan(spa->spa_dsl_pool, POOL_SCAN_SCRUB);
8016 	}
8017 
8018 	/*
8019 	 * Kick off a resilver.
8020 	 */
8021 	if (tasks & SPA_ASYNC_RESILVER &&
8022 	    !vdev_rebuild_active(spa->spa_root_vdev) &&
8023 	    (!dsl_scan_resilvering(dp) ||
8024 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
8025 		dsl_scan_restart_resilver(dp, 0);
8026 
8027 	if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
8028 		mutex_enter(&spa_namespace_lock);
8029 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8030 		vdev_initialize_restart(spa->spa_root_vdev);
8031 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8032 		mutex_exit(&spa_namespace_lock);
8033 	}
8034 
8035 	if (tasks & SPA_ASYNC_TRIM_RESTART) {
8036 		mutex_enter(&spa_namespace_lock);
8037 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8038 		vdev_trim_restart(spa->spa_root_vdev);
8039 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8040 		mutex_exit(&spa_namespace_lock);
8041 	}
8042 
8043 	if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
8044 		mutex_enter(&spa_namespace_lock);
8045 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8046 		vdev_autotrim_restart(spa);
8047 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8048 		mutex_exit(&spa_namespace_lock);
8049 	}
8050 
8051 	/*
8052 	 * Kick off L2 cache whole device TRIM.
8053 	 */
8054 	if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
8055 		mutex_enter(&spa_namespace_lock);
8056 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8057 		vdev_trim_l2arc(spa);
8058 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8059 		mutex_exit(&spa_namespace_lock);
8060 	}
8061 
8062 	/*
8063 	 * Kick off L2 cache rebuilding.
8064 	 */
8065 	if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
8066 		mutex_enter(&spa_namespace_lock);
8067 		spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
8068 		l2arc_spa_rebuild_start(spa);
8069 		spa_config_exit(spa, SCL_L2ARC, FTAG);
8070 		mutex_exit(&spa_namespace_lock);
8071 	}
8072 
8073 	/*
8074 	 * Let the world know that we're done.
8075 	 */
8076 	mutex_enter(&spa->spa_async_lock);
8077 	spa->spa_async_thread = NULL;
8078 	cv_broadcast(&spa->spa_async_cv);
8079 	mutex_exit(&spa->spa_async_lock);
8080 	thread_exit();
8081 }
8082 
8083 void
8084 spa_async_suspend(spa_t *spa)
8085 {
8086 	mutex_enter(&spa->spa_async_lock);
8087 	spa->spa_async_suspended++;
8088 	while (spa->spa_async_thread != NULL)
8089 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
8090 	mutex_exit(&spa->spa_async_lock);
8091 
8092 	spa_vdev_remove_suspend(spa);
8093 
8094 	zthr_t *condense_thread = spa->spa_condense_zthr;
8095 	if (condense_thread != NULL)
8096 		zthr_cancel(condense_thread);
8097 
8098 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
8099 	if (discard_thread != NULL)
8100 		zthr_cancel(discard_thread);
8101 
8102 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
8103 	if (ll_delete_thread != NULL)
8104 		zthr_cancel(ll_delete_thread);
8105 
8106 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
8107 	if (ll_condense_thread != NULL)
8108 		zthr_cancel(ll_condense_thread);
8109 }
8110 
8111 void
8112 spa_async_resume(spa_t *spa)
8113 {
8114 	mutex_enter(&spa->spa_async_lock);
8115 	ASSERT(spa->spa_async_suspended != 0);
8116 	spa->spa_async_suspended--;
8117 	mutex_exit(&spa->spa_async_lock);
8118 	spa_restart_removal(spa);
8119 
8120 	zthr_t *condense_thread = spa->spa_condense_zthr;
8121 	if (condense_thread != NULL)
8122 		zthr_resume(condense_thread);
8123 
8124 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
8125 	if (discard_thread != NULL)
8126 		zthr_resume(discard_thread);
8127 
8128 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
8129 	if (ll_delete_thread != NULL)
8130 		zthr_resume(ll_delete_thread);
8131 
8132 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
8133 	if (ll_condense_thread != NULL)
8134 		zthr_resume(ll_condense_thread);
8135 }
8136 
8137 static boolean_t
8138 spa_async_tasks_pending(spa_t *spa)
8139 {
8140 	uint_t non_config_tasks;
8141 	uint_t config_task;
8142 	boolean_t config_task_suspended;
8143 
8144 	non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
8145 	config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
8146 	if (spa->spa_ccw_fail_time == 0) {
8147 		config_task_suspended = B_FALSE;
8148 	} else {
8149 		config_task_suspended =
8150 		    (gethrtime() - spa->spa_ccw_fail_time) <
8151 		    ((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
8152 	}
8153 
8154 	return (non_config_tasks || (config_task && !config_task_suspended));
8155 }
8156 
8157 static void
8158 spa_async_dispatch(spa_t *spa)
8159 {
8160 	mutex_enter(&spa->spa_async_lock);
8161 	if (spa_async_tasks_pending(spa) &&
8162 	    !spa->spa_async_suspended &&
8163 	    spa->spa_async_thread == NULL)
8164 		spa->spa_async_thread = thread_create(NULL, 0,
8165 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
8166 	mutex_exit(&spa->spa_async_lock);
8167 }
8168 
8169 void
8170 spa_async_request(spa_t *spa, int task)
8171 {
8172 	zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
8173 	mutex_enter(&spa->spa_async_lock);
8174 	spa->spa_async_tasks |= task;
8175 	mutex_exit(&spa->spa_async_lock);
8176 }
8177 
8178 int
8179 spa_async_tasks(spa_t *spa)
8180 {
8181 	return (spa->spa_async_tasks);
8182 }
8183 
8184 /*
8185  * ==========================================================================
8186  * SPA syncing routines
8187  * ==========================================================================
8188  */
8189 
8190 
8191 static int
8192 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
8193     dmu_tx_t *tx)
8194 {
8195 	bpobj_t *bpo = arg;
8196 	bpobj_enqueue(bpo, bp, bp_freed, tx);
8197 	return (0);
8198 }
8199 
8200 int
8201 bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8202 {
8203 	return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
8204 }
8205 
8206 int
8207 bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8208 {
8209 	return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
8210 }
8211 
8212 static int
8213 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8214 {
8215 	zio_t *pio = arg;
8216 
8217 	zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
8218 	    pio->io_flags));
8219 	return (0);
8220 }
8221 
8222 static int
8223 bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
8224     dmu_tx_t *tx)
8225 {
8226 	ASSERT(!bp_freed);
8227 	return (spa_free_sync_cb(arg, bp, tx));
8228 }
8229 
8230 /*
8231  * Note: this simple function is not inlined to make it easier to dtrace the
8232  * amount of time spent syncing frees.
8233  */
8234 static void
8235 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
8236 {
8237 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
8238 	bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
8239 	VERIFY(zio_wait(zio) == 0);
8240 }
8241 
8242 /*
8243  * Note: this simple function is not inlined to make it easier to dtrace the
8244  * amount of time spent syncing deferred frees.
8245  */
8246 static void
8247 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
8248 {
8249 	if (spa_sync_pass(spa) != 1)
8250 		return;
8251 
8252 	/*
8253 	 * Note:
8254 	 * If the log space map feature is active, we stop deferring
8255 	 * frees to the next TXG and therefore running this function
8256 	 * would be considered a no-op as spa_deferred_bpobj should
8257 	 * not have any entries.
8258 	 *
8259 	 * That said we run this function anyway (instead of returning
8260 	 * immediately) for the edge-case scenario where we just
8261 	 * activated the log space map feature in this TXG but we have
8262 	 * deferred frees from the previous TXG.
8263 	 */
8264 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
8265 	VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
8266 	    bpobj_spa_free_sync_cb, zio, tx), ==, 0);
8267 	VERIFY0(zio_wait(zio));
8268 }
8269 
8270 static void
8271 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
8272 {
8273 	char *packed = NULL;
8274 	size_t bufsize;
8275 	size_t nvsize = 0;
8276 	dmu_buf_t *db;
8277 
8278 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
8279 
8280 	/*
8281 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
8282 	 * information.  This avoids the dmu_buf_will_dirty() path and
8283 	 * saves us a pre-read to get data we don't actually care about.
8284 	 */
8285 	bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
8286 	packed = vmem_alloc(bufsize, KM_SLEEP);
8287 
8288 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
8289 	    KM_SLEEP) == 0);
8290 	bzero(packed + nvsize, bufsize - nvsize);
8291 
8292 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
8293 
8294 	vmem_free(packed, bufsize);
8295 
8296 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
8297 	dmu_buf_will_dirty(db, tx);
8298 	*(uint64_t *)db->db_data = nvsize;
8299 	dmu_buf_rele(db, FTAG);
8300 }
8301 
8302 static void
8303 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
8304     const char *config, const char *entry)
8305 {
8306 	nvlist_t *nvroot;
8307 	nvlist_t **list;
8308 	int i;
8309 
8310 	if (!sav->sav_sync)
8311 		return;
8312 
8313 	/*
8314 	 * Update the MOS nvlist describing the list of available devices.
8315 	 * spa_validate_aux() will have already made sure this nvlist is
8316 	 * valid and the vdevs are labeled appropriately.
8317 	 */
8318 	if (sav->sav_object == 0) {
8319 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
8320 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
8321 		    sizeof (uint64_t), tx);
8322 		VERIFY(zap_update(spa->spa_meta_objset,
8323 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
8324 		    &sav->sav_object, tx) == 0);
8325 	}
8326 
8327 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
8328 	if (sav->sav_count == 0) {
8329 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
8330 	} else {
8331 		list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
8332 		for (i = 0; i < sav->sav_count; i++)
8333 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
8334 			    B_FALSE, VDEV_CONFIG_L2CACHE);
8335 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
8336 		    sav->sav_count) == 0);
8337 		for (i = 0; i < sav->sav_count; i++)
8338 			nvlist_free(list[i]);
8339 		kmem_free(list, sav->sav_count * sizeof (void *));
8340 	}
8341 
8342 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
8343 	nvlist_free(nvroot);
8344 
8345 	sav->sav_sync = B_FALSE;
8346 }
8347 
8348 /*
8349  * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
8350  * The all-vdev ZAP must be empty.
8351  */
8352 static void
8353 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
8354 {
8355 	spa_t *spa = vd->vdev_spa;
8356 
8357 	if (vd->vdev_top_zap != 0) {
8358 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
8359 		    vd->vdev_top_zap, tx));
8360 	}
8361 	if (vd->vdev_leaf_zap != 0) {
8362 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
8363 		    vd->vdev_leaf_zap, tx));
8364 	}
8365 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
8366 		spa_avz_build(vd->vdev_child[i], avz, tx);
8367 	}
8368 }
8369 
8370 static void
8371 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
8372 {
8373 	nvlist_t *config;
8374 
8375 	/*
8376 	 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
8377 	 * its config may not be dirty but we still need to build per-vdev ZAPs.
8378 	 * Similarly, if the pool is being assembled (e.g. after a split), we
8379 	 * need to rebuild the AVZ although the config may not be dirty.
8380 	 */
8381 	if (list_is_empty(&spa->spa_config_dirty_list) &&
8382 	    spa->spa_avz_action == AVZ_ACTION_NONE)
8383 		return;
8384 
8385 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
8386 
8387 	ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
8388 	    spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
8389 	    spa->spa_all_vdev_zaps != 0);
8390 
8391 	if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
8392 		/* Make and build the new AVZ */
8393 		uint64_t new_avz = zap_create(spa->spa_meta_objset,
8394 		    DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
8395 		spa_avz_build(spa->spa_root_vdev, new_avz, tx);
8396 
8397 		/* Diff old AVZ with new one */
8398 		zap_cursor_t zc;
8399 		zap_attribute_t za;
8400 
8401 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
8402 		    spa->spa_all_vdev_zaps);
8403 		    zap_cursor_retrieve(&zc, &za) == 0;
8404 		    zap_cursor_advance(&zc)) {
8405 			uint64_t vdzap = za.za_first_integer;
8406 			if (zap_lookup_int(spa->spa_meta_objset, new_avz,
8407 			    vdzap) == ENOENT) {
8408 				/*
8409 				 * ZAP is listed in old AVZ but not in new one;
8410 				 * destroy it
8411 				 */
8412 				VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
8413 				    tx));
8414 			}
8415 		}
8416 
8417 		zap_cursor_fini(&zc);
8418 
8419 		/* Destroy the old AVZ */
8420 		VERIFY0(zap_destroy(spa->spa_meta_objset,
8421 		    spa->spa_all_vdev_zaps, tx));
8422 
8423 		/* Replace the old AVZ in the dir obj with the new one */
8424 		VERIFY0(zap_update(spa->spa_meta_objset,
8425 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
8426 		    sizeof (new_avz), 1, &new_avz, tx));
8427 
8428 		spa->spa_all_vdev_zaps = new_avz;
8429 	} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
8430 		zap_cursor_t zc;
8431 		zap_attribute_t za;
8432 
8433 		/* Walk through the AVZ and destroy all listed ZAPs */
8434 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
8435 		    spa->spa_all_vdev_zaps);
8436 		    zap_cursor_retrieve(&zc, &za) == 0;
8437 		    zap_cursor_advance(&zc)) {
8438 			uint64_t zap = za.za_first_integer;
8439 			VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
8440 		}
8441 
8442 		zap_cursor_fini(&zc);
8443 
8444 		/* Destroy and unlink the AVZ itself */
8445 		VERIFY0(zap_destroy(spa->spa_meta_objset,
8446 		    spa->spa_all_vdev_zaps, tx));
8447 		VERIFY0(zap_remove(spa->spa_meta_objset,
8448 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
8449 		spa->spa_all_vdev_zaps = 0;
8450 	}
8451 
8452 	if (spa->spa_all_vdev_zaps == 0) {
8453 		spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
8454 		    DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
8455 		    DMU_POOL_VDEV_ZAP_MAP, tx);
8456 	}
8457 	spa->spa_avz_action = AVZ_ACTION_NONE;
8458 
8459 	/* Create ZAPs for vdevs that don't have them. */
8460 	vdev_construct_zaps(spa->spa_root_vdev, tx);
8461 
8462 	config = spa_config_generate(spa, spa->spa_root_vdev,
8463 	    dmu_tx_get_txg(tx), B_FALSE);
8464 
8465 	/*
8466 	 * If we're upgrading the spa version then make sure that
8467 	 * the config object gets updated with the correct version.
8468 	 */
8469 	if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
8470 		fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
8471 		    spa->spa_uberblock.ub_version);
8472 
8473 	spa_config_exit(spa, SCL_STATE, FTAG);
8474 
8475 	nvlist_free(spa->spa_config_syncing);
8476 	spa->spa_config_syncing = config;
8477 
8478 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
8479 }
8480 
8481 static void
8482 spa_sync_version(void *arg, dmu_tx_t *tx)
8483 {
8484 	uint64_t *versionp = arg;
8485 	uint64_t version = *versionp;
8486 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
8487 
8488 	/*
8489 	 * Setting the version is special cased when first creating the pool.
8490 	 */
8491 	ASSERT(tx->tx_txg != TXG_INITIAL);
8492 
8493 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
8494 	ASSERT(version >= spa_version(spa));
8495 
8496 	spa->spa_uberblock.ub_version = version;
8497 	vdev_config_dirty(spa->spa_root_vdev);
8498 	spa_history_log_internal(spa, "set", tx, "version=%lld",
8499 	    (longlong_t)version);
8500 }
8501 
8502 /*
8503  * Set zpool properties.
8504  */
8505 static void
8506 spa_sync_props(void *arg, dmu_tx_t *tx)
8507 {
8508 	nvlist_t *nvp = arg;
8509 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
8510 	objset_t *mos = spa->spa_meta_objset;
8511 	nvpair_t *elem = NULL;
8512 
8513 	mutex_enter(&spa->spa_props_lock);
8514 
8515 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
8516 		uint64_t intval;
8517 		char *strval, *fname;
8518 		zpool_prop_t prop;
8519 		const char *propname;
8520 		zprop_type_t proptype;
8521 		spa_feature_t fid;
8522 
8523 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
8524 		case ZPOOL_PROP_INVAL:
8525 			/*
8526 			 * We checked this earlier in spa_prop_validate().
8527 			 */
8528 			ASSERT(zpool_prop_feature(nvpair_name(elem)));
8529 
8530 			fname = strchr(nvpair_name(elem), '@') + 1;
8531 			VERIFY0(zfeature_lookup_name(fname, &fid));
8532 
8533 			spa_feature_enable(spa, fid, tx);
8534 			spa_history_log_internal(spa, "set", tx,
8535 			    "%s=enabled", nvpair_name(elem));
8536 			break;
8537 
8538 		case ZPOOL_PROP_VERSION:
8539 			intval = fnvpair_value_uint64(elem);
8540 			/*
8541 			 * The version is synced separately before other
8542 			 * properties and should be correct by now.
8543 			 */
8544 			ASSERT3U(spa_version(spa), >=, intval);
8545 			break;
8546 
8547 		case ZPOOL_PROP_ALTROOT:
8548 			/*
8549 			 * 'altroot' is a non-persistent property. It should
8550 			 * have been set temporarily at creation or import time.
8551 			 */
8552 			ASSERT(spa->spa_root != NULL);
8553 			break;
8554 
8555 		case ZPOOL_PROP_READONLY:
8556 		case ZPOOL_PROP_CACHEFILE:
8557 			/*
8558 			 * 'readonly' and 'cachefile' are also non-persistent
8559 			 * properties.
8560 			 */
8561 			break;
8562 		case ZPOOL_PROP_COMMENT:
8563 			strval = fnvpair_value_string(elem);
8564 			if (spa->spa_comment != NULL)
8565 				spa_strfree(spa->spa_comment);
8566 			spa->spa_comment = spa_strdup(strval);
8567 			/*
8568 			 * We need to dirty the configuration on all the vdevs
8569 			 * so that their labels get updated.  It's unnecessary
8570 			 * to do this for pool creation since the vdev's
8571 			 * configuration has already been dirtied.
8572 			 */
8573 			if (tx->tx_txg != TXG_INITIAL)
8574 				vdev_config_dirty(spa->spa_root_vdev);
8575 			spa_history_log_internal(spa, "set", tx,
8576 			    "%s=%s", nvpair_name(elem), strval);
8577 			break;
8578 		default:
8579 			/*
8580 			 * Set pool property values in the poolprops mos object.
8581 			 */
8582 			if (spa->spa_pool_props_object == 0) {
8583 				spa->spa_pool_props_object =
8584 				    zap_create_link(mos, DMU_OT_POOL_PROPS,
8585 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
8586 				    tx);
8587 			}
8588 
8589 			/* normalize the property name */
8590 			propname = zpool_prop_to_name(prop);
8591 			proptype = zpool_prop_get_type(prop);
8592 
8593 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
8594 				ASSERT(proptype == PROP_TYPE_STRING);
8595 				strval = fnvpair_value_string(elem);
8596 				VERIFY0(zap_update(mos,
8597 				    spa->spa_pool_props_object, propname,
8598 				    1, strlen(strval) + 1, strval, tx));
8599 				spa_history_log_internal(spa, "set", tx,
8600 				    "%s=%s", nvpair_name(elem), strval);
8601 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
8602 				intval = fnvpair_value_uint64(elem);
8603 
8604 				if (proptype == PROP_TYPE_INDEX) {
8605 					const char *unused;
8606 					VERIFY0(zpool_prop_index_to_string(
8607 					    prop, intval, &unused));
8608 				}
8609 				VERIFY0(zap_update(mos,
8610 				    spa->spa_pool_props_object, propname,
8611 				    8, 1, &intval, tx));
8612 				spa_history_log_internal(spa, "set", tx,
8613 				    "%s=%lld", nvpair_name(elem),
8614 				    (longlong_t)intval);
8615 			} else {
8616 				ASSERT(0); /* not allowed */
8617 			}
8618 
8619 			switch (prop) {
8620 			case ZPOOL_PROP_DELEGATION:
8621 				spa->spa_delegation = intval;
8622 				break;
8623 			case ZPOOL_PROP_BOOTFS:
8624 				spa->spa_bootfs = intval;
8625 				break;
8626 			case ZPOOL_PROP_FAILUREMODE:
8627 				spa->spa_failmode = intval;
8628 				break;
8629 			case ZPOOL_PROP_AUTOTRIM:
8630 				spa->spa_autotrim = intval;
8631 				spa_async_request(spa,
8632 				    SPA_ASYNC_AUTOTRIM_RESTART);
8633 				break;
8634 			case ZPOOL_PROP_AUTOEXPAND:
8635 				spa->spa_autoexpand = intval;
8636 				if (tx->tx_txg != TXG_INITIAL)
8637 					spa_async_request(spa,
8638 					    SPA_ASYNC_AUTOEXPAND);
8639 				break;
8640 			case ZPOOL_PROP_MULTIHOST:
8641 				spa->spa_multihost = intval;
8642 				break;
8643 			default:
8644 				break;
8645 			}
8646 		}
8647 
8648 	}
8649 
8650 	mutex_exit(&spa->spa_props_lock);
8651 }
8652 
8653 /*
8654  * Perform one-time upgrade on-disk changes.  spa_version() does not
8655  * reflect the new version this txg, so there must be no changes this
8656  * txg to anything that the upgrade code depends on after it executes.
8657  * Therefore this must be called after dsl_pool_sync() does the sync
8658  * tasks.
8659  */
8660 static void
8661 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
8662 {
8663 	if (spa_sync_pass(spa) != 1)
8664 		return;
8665 
8666 	dsl_pool_t *dp = spa->spa_dsl_pool;
8667 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
8668 
8669 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
8670 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
8671 		dsl_pool_create_origin(dp, tx);
8672 
8673 		/* Keeping the origin open increases spa_minref */
8674 		spa->spa_minref += 3;
8675 	}
8676 
8677 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
8678 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
8679 		dsl_pool_upgrade_clones(dp, tx);
8680 	}
8681 
8682 	if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
8683 	    spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
8684 		dsl_pool_upgrade_dir_clones(dp, tx);
8685 
8686 		/* Keeping the freedir open increases spa_minref */
8687 		spa->spa_minref += 3;
8688 	}
8689 
8690 	if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
8691 	    spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8692 		spa_feature_create_zap_objects(spa, tx);
8693 	}
8694 
8695 	/*
8696 	 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
8697 	 * when possibility to use lz4 compression for metadata was added
8698 	 * Old pools that have this feature enabled must be upgraded to have
8699 	 * this feature active
8700 	 */
8701 	if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8702 		boolean_t lz4_en = spa_feature_is_enabled(spa,
8703 		    SPA_FEATURE_LZ4_COMPRESS);
8704 		boolean_t lz4_ac = spa_feature_is_active(spa,
8705 		    SPA_FEATURE_LZ4_COMPRESS);
8706 
8707 		if (lz4_en && !lz4_ac)
8708 			spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
8709 	}
8710 
8711 	/*
8712 	 * If we haven't written the salt, do so now.  Note that the
8713 	 * feature may not be activated yet, but that's fine since
8714 	 * the presence of this ZAP entry is backwards compatible.
8715 	 */
8716 	if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
8717 	    DMU_POOL_CHECKSUM_SALT) == ENOENT) {
8718 		VERIFY0(zap_add(spa->spa_meta_objset,
8719 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
8720 		    sizeof (spa->spa_cksum_salt.zcs_bytes),
8721 		    spa->spa_cksum_salt.zcs_bytes, tx));
8722 	}
8723 
8724 	rrw_exit(&dp->dp_config_rwlock, FTAG);
8725 }
8726 
8727 static void
8728 vdev_indirect_state_sync_verify(vdev_t *vd)
8729 {
8730 	vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
8731 	vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
8732 
8733 	if (vd->vdev_ops == &vdev_indirect_ops) {
8734 		ASSERT(vim != NULL);
8735 		ASSERT(vib != NULL);
8736 	}
8737 
8738 	uint64_t obsolete_sm_object = 0;
8739 	ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
8740 	if (obsolete_sm_object != 0) {
8741 		ASSERT(vd->vdev_obsolete_sm != NULL);
8742 		ASSERT(vd->vdev_removing ||
8743 		    vd->vdev_ops == &vdev_indirect_ops);
8744 		ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
8745 		ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
8746 		ASSERT3U(obsolete_sm_object, ==,
8747 		    space_map_object(vd->vdev_obsolete_sm));
8748 		ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
8749 		    space_map_allocated(vd->vdev_obsolete_sm));
8750 	}
8751 	ASSERT(vd->vdev_obsolete_segments != NULL);
8752 
8753 	/*
8754 	 * Since frees / remaps to an indirect vdev can only
8755 	 * happen in syncing context, the obsolete segments
8756 	 * tree must be empty when we start syncing.
8757 	 */
8758 	ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
8759 }
8760 
8761 /*
8762  * Set the top-level vdev's max queue depth. Evaluate each top-level's
8763  * async write queue depth in case it changed. The max queue depth will
8764  * not change in the middle of syncing out this txg.
8765  */
8766 static void
8767 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
8768 {
8769 	ASSERT(spa_writeable(spa));
8770 
8771 	vdev_t *rvd = spa->spa_root_vdev;
8772 	uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
8773 	    zfs_vdev_queue_depth_pct / 100;
8774 	metaslab_class_t *normal = spa_normal_class(spa);
8775 	metaslab_class_t *special = spa_special_class(spa);
8776 	metaslab_class_t *dedup = spa_dedup_class(spa);
8777 
8778 	uint64_t slots_per_allocator = 0;
8779 	for (int c = 0; c < rvd->vdev_children; c++) {
8780 		vdev_t *tvd = rvd->vdev_child[c];
8781 
8782 		metaslab_group_t *mg = tvd->vdev_mg;
8783 		if (mg == NULL || !metaslab_group_initialized(mg))
8784 			continue;
8785 
8786 		metaslab_class_t *mc = mg->mg_class;
8787 		if (mc != normal && mc != special && mc != dedup)
8788 			continue;
8789 
8790 		/*
8791 		 * It is safe to do a lock-free check here because only async
8792 		 * allocations look at mg_max_alloc_queue_depth, and async
8793 		 * allocations all happen from spa_sync().
8794 		 */
8795 		for (int i = 0; i < mg->mg_allocators; i++) {
8796 			ASSERT0(zfs_refcount_count(
8797 			    &(mg->mg_allocator[i].mga_alloc_queue_depth)));
8798 		}
8799 		mg->mg_max_alloc_queue_depth = max_queue_depth;
8800 
8801 		for (int i = 0; i < mg->mg_allocators; i++) {
8802 			mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
8803 			    zfs_vdev_def_queue_depth;
8804 		}
8805 		slots_per_allocator += zfs_vdev_def_queue_depth;
8806 	}
8807 
8808 	for (int i = 0; i < spa->spa_alloc_count; i++) {
8809 		ASSERT0(zfs_refcount_count(&normal->mc_alloc_slots[i]));
8810 		ASSERT0(zfs_refcount_count(&special->mc_alloc_slots[i]));
8811 		ASSERT0(zfs_refcount_count(&dedup->mc_alloc_slots[i]));
8812 		normal->mc_alloc_max_slots[i] = slots_per_allocator;
8813 		special->mc_alloc_max_slots[i] = slots_per_allocator;
8814 		dedup->mc_alloc_max_slots[i] = slots_per_allocator;
8815 	}
8816 	normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8817 	special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8818 	dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8819 }
8820 
8821 static void
8822 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
8823 {
8824 	ASSERT(spa_writeable(spa));
8825 
8826 	vdev_t *rvd = spa->spa_root_vdev;
8827 	for (int c = 0; c < rvd->vdev_children; c++) {
8828 		vdev_t *vd = rvd->vdev_child[c];
8829 		vdev_indirect_state_sync_verify(vd);
8830 
8831 		if (vdev_indirect_should_condense(vd)) {
8832 			spa_condense_indirect_start_sync(vd, tx);
8833 			break;
8834 		}
8835 	}
8836 }
8837 
8838 static void
8839 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
8840 {
8841 	objset_t *mos = spa->spa_meta_objset;
8842 	dsl_pool_t *dp = spa->spa_dsl_pool;
8843 	uint64_t txg = tx->tx_txg;
8844 	bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
8845 
8846 	do {
8847 		int pass = ++spa->spa_sync_pass;
8848 
8849 		spa_sync_config_object(spa, tx);
8850 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
8851 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
8852 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
8853 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
8854 		spa_errlog_sync(spa, txg);
8855 		dsl_pool_sync(dp, txg);
8856 
8857 		if (pass < zfs_sync_pass_deferred_free ||
8858 		    spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
8859 			/*
8860 			 * If the log space map feature is active we don't
8861 			 * care about deferred frees and the deferred bpobj
8862 			 * as the log space map should effectively have the
8863 			 * same results (i.e. appending only to one object).
8864 			 */
8865 			spa_sync_frees(spa, free_bpl, tx);
8866 		} else {
8867 			/*
8868 			 * We can not defer frees in pass 1, because
8869 			 * we sync the deferred frees later in pass 1.
8870 			 */
8871 			ASSERT3U(pass, >, 1);
8872 			bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
8873 			    &spa->spa_deferred_bpobj, tx);
8874 		}
8875 
8876 		ddt_sync(spa, txg);
8877 		dsl_scan_sync(dp, tx);
8878 		svr_sync(spa, tx);
8879 		spa_sync_upgrades(spa, tx);
8880 
8881 		spa_flush_metaslabs(spa, tx);
8882 
8883 		vdev_t *vd = NULL;
8884 		while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
8885 		    != NULL)
8886 			vdev_sync(vd, txg);
8887 
8888 		/*
8889 		 * Note: We need to check if the MOS is dirty because we could
8890 		 * have marked the MOS dirty without updating the uberblock
8891 		 * (e.g. if we have sync tasks but no dirty user data). We need
8892 		 * to check the uberblock's rootbp because it is updated if we
8893 		 * have synced out dirty data (though in this case the MOS will
8894 		 * most likely also be dirty due to second order effects, we
8895 		 * don't want to rely on that here).
8896 		 */
8897 		if (pass == 1 &&
8898 		    spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
8899 		    !dmu_objset_is_dirty(mos, txg)) {
8900 			/*
8901 			 * Nothing changed on the first pass, therefore this
8902 			 * TXG is a no-op. Avoid syncing deferred frees, so
8903 			 * that we can keep this TXG as a no-op.
8904 			 */
8905 			ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
8906 			ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
8907 			ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
8908 			ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
8909 			break;
8910 		}
8911 
8912 		spa_sync_deferred_frees(spa, tx);
8913 	} while (dmu_objset_is_dirty(mos, txg));
8914 }
8915 
8916 /*
8917  * Rewrite the vdev configuration (which includes the uberblock) to
8918  * commit the transaction group.
8919  *
8920  * If there are no dirty vdevs, we sync the uberblock to a few random
8921  * top-level vdevs that are known to be visible in the config cache
8922  * (see spa_vdev_add() for a complete description). If there *are* dirty
8923  * vdevs, sync the uberblock to all vdevs.
8924  */
8925 static void
8926 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
8927 {
8928 	vdev_t *rvd = spa->spa_root_vdev;
8929 	uint64_t txg = tx->tx_txg;
8930 
8931 	for (;;) {
8932 		int error = 0;
8933 
8934 		/*
8935 		 * We hold SCL_STATE to prevent vdev open/close/etc.
8936 		 * while we're attempting to write the vdev labels.
8937 		 */
8938 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
8939 
8940 		if (list_is_empty(&spa->spa_config_dirty_list)) {
8941 			vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
8942 			int svdcount = 0;
8943 			int children = rvd->vdev_children;
8944 			int c0 = spa_get_random(children);
8945 
8946 			for (int c = 0; c < children; c++) {
8947 				vdev_t *vd =
8948 				    rvd->vdev_child[(c0 + c) % children];
8949 
8950 				/* Stop when revisiting the first vdev */
8951 				if (c > 0 && svd[0] == vd)
8952 					break;
8953 
8954 				if (vd->vdev_ms_array == 0 ||
8955 				    vd->vdev_islog ||
8956 				    !vdev_is_concrete(vd))
8957 					continue;
8958 
8959 				svd[svdcount++] = vd;
8960 				if (svdcount == SPA_SYNC_MIN_VDEVS)
8961 					break;
8962 			}
8963 			error = vdev_config_sync(svd, svdcount, txg);
8964 		} else {
8965 			error = vdev_config_sync(rvd->vdev_child,
8966 			    rvd->vdev_children, txg);
8967 		}
8968 
8969 		if (error == 0)
8970 			spa->spa_last_synced_guid = rvd->vdev_guid;
8971 
8972 		spa_config_exit(spa, SCL_STATE, FTAG);
8973 
8974 		if (error == 0)
8975 			break;
8976 		zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
8977 		zio_resume_wait(spa);
8978 	}
8979 }
8980 
8981 /*
8982  * Sync the specified transaction group.  New blocks may be dirtied as
8983  * part of the process, so we iterate until it converges.
8984  */
8985 void
8986 spa_sync(spa_t *spa, uint64_t txg)
8987 {
8988 	vdev_t *vd = NULL;
8989 
8990 	VERIFY(spa_writeable(spa));
8991 
8992 	/*
8993 	 * Wait for i/os issued in open context that need to complete
8994 	 * before this txg syncs.
8995 	 */
8996 	(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
8997 	spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
8998 	    ZIO_FLAG_CANFAIL);
8999 
9000 	/*
9001 	 * Lock out configuration changes.
9002 	 */
9003 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9004 
9005 	spa->spa_syncing_txg = txg;
9006 	spa->spa_sync_pass = 0;
9007 
9008 	for (int i = 0; i < spa->spa_alloc_count; i++) {
9009 		mutex_enter(&spa->spa_alloc_locks[i]);
9010 		VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
9011 		mutex_exit(&spa->spa_alloc_locks[i]);
9012 	}
9013 
9014 	/*
9015 	 * If there are any pending vdev state changes, convert them
9016 	 * into config changes that go out with this transaction group.
9017 	 */
9018 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
9019 	while (list_head(&spa->spa_state_dirty_list) != NULL) {
9020 		/*
9021 		 * We need the write lock here because, for aux vdevs,
9022 		 * calling vdev_config_dirty() modifies sav_config.
9023 		 * This is ugly and will become unnecessary when we
9024 		 * eliminate the aux vdev wart by integrating all vdevs
9025 		 * into the root vdev tree.
9026 		 */
9027 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9028 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
9029 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
9030 			vdev_state_clean(vd);
9031 			vdev_config_dirty(vd);
9032 		}
9033 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9034 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9035 	}
9036 	spa_config_exit(spa, SCL_STATE, FTAG);
9037 
9038 	dsl_pool_t *dp = spa->spa_dsl_pool;
9039 	dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
9040 
9041 	spa->spa_sync_starttime = gethrtime();
9042 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
9043 	spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
9044 	    spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
9045 	    NSEC_TO_TICK(spa->spa_deadman_synctime));
9046 
9047 	/*
9048 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
9049 	 * set spa_deflate if we have no raid-z vdevs.
9050 	 */
9051 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
9052 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
9053 		vdev_t *rvd = spa->spa_root_vdev;
9054 
9055 		int i;
9056 		for (i = 0; i < rvd->vdev_children; i++) {
9057 			vd = rvd->vdev_child[i];
9058 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
9059 				break;
9060 		}
9061 		if (i == rvd->vdev_children) {
9062 			spa->spa_deflate = TRUE;
9063 			VERIFY0(zap_add(spa->spa_meta_objset,
9064 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
9065 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
9066 		}
9067 	}
9068 
9069 	spa_sync_adjust_vdev_max_queue_depth(spa);
9070 
9071 	spa_sync_condense_indirect(spa, tx);
9072 
9073 	spa_sync_iterate_to_convergence(spa, tx);
9074 
9075 #ifdef ZFS_DEBUG
9076 	if (!list_is_empty(&spa->spa_config_dirty_list)) {
9077 	/*
9078 	 * Make sure that the number of ZAPs for all the vdevs matches
9079 	 * the number of ZAPs in the per-vdev ZAP list. This only gets
9080 	 * called if the config is dirty; otherwise there may be
9081 	 * outstanding AVZ operations that weren't completed in
9082 	 * spa_sync_config_object.
9083 	 */
9084 		uint64_t all_vdev_zap_entry_count;
9085 		ASSERT0(zap_count(spa->spa_meta_objset,
9086 		    spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
9087 		ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
9088 		    all_vdev_zap_entry_count);
9089 	}
9090 #endif
9091 
9092 	if (spa->spa_vdev_removal != NULL) {
9093 		ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
9094 	}
9095 
9096 	spa_sync_rewrite_vdev_config(spa, tx);
9097 	dmu_tx_commit(tx);
9098 
9099 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
9100 	spa->spa_deadman_tqid = 0;
9101 
9102 	/*
9103 	 * Clear the dirty config list.
9104 	 */
9105 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
9106 		vdev_config_clean(vd);
9107 
9108 	/*
9109 	 * Now that the new config has synced transactionally,
9110 	 * let it become visible to the config cache.
9111 	 */
9112 	if (spa->spa_config_syncing != NULL) {
9113 		spa_config_set(spa, spa->spa_config_syncing);
9114 		spa->spa_config_txg = txg;
9115 		spa->spa_config_syncing = NULL;
9116 	}
9117 
9118 	dsl_pool_sync_done(dp, txg);
9119 
9120 	for (int i = 0; i < spa->spa_alloc_count; i++) {
9121 		mutex_enter(&spa->spa_alloc_locks[i]);
9122 		VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
9123 		mutex_exit(&spa->spa_alloc_locks[i]);
9124 	}
9125 
9126 	/*
9127 	 * Update usable space statistics.
9128 	 */
9129 	while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
9130 	    != NULL)
9131 		vdev_sync_done(vd, txg);
9132 
9133 	metaslab_class_evict_old(spa->spa_normal_class, txg);
9134 	metaslab_class_evict_old(spa->spa_log_class, txg);
9135 
9136 	spa_sync_close_syncing_log_sm(spa);
9137 
9138 	spa_update_dspace(spa);
9139 
9140 	/*
9141 	 * It had better be the case that we didn't dirty anything
9142 	 * since vdev_config_sync().
9143 	 */
9144 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
9145 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
9146 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
9147 
9148 	while (zfs_pause_spa_sync)
9149 		delay(1);
9150 
9151 	spa->spa_sync_pass = 0;
9152 
9153 	/*
9154 	 * Update the last synced uberblock here. We want to do this at
9155 	 * the end of spa_sync() so that consumers of spa_last_synced_txg()
9156 	 * will be guaranteed that all the processing associated with
9157 	 * that txg has been completed.
9158 	 */
9159 	spa->spa_ubsync = spa->spa_uberblock;
9160 	spa_config_exit(spa, SCL_CONFIG, FTAG);
9161 
9162 	spa_handle_ignored_writes(spa);
9163 
9164 	/*
9165 	 * If any async tasks have been requested, kick them off.
9166 	 */
9167 	spa_async_dispatch(spa);
9168 }
9169 
9170 /*
9171  * Sync all pools.  We don't want to hold the namespace lock across these
9172  * operations, so we take a reference on the spa_t and drop the lock during the
9173  * sync.
9174  */
9175 void
9176 spa_sync_allpools(void)
9177 {
9178 	spa_t *spa = NULL;
9179 	mutex_enter(&spa_namespace_lock);
9180 	while ((spa = spa_next(spa)) != NULL) {
9181 		if (spa_state(spa) != POOL_STATE_ACTIVE ||
9182 		    !spa_writeable(spa) || spa_suspended(spa))
9183 			continue;
9184 		spa_open_ref(spa, FTAG);
9185 		mutex_exit(&spa_namespace_lock);
9186 		txg_wait_synced(spa_get_dsl(spa), 0);
9187 		mutex_enter(&spa_namespace_lock);
9188 		spa_close(spa, FTAG);
9189 	}
9190 	mutex_exit(&spa_namespace_lock);
9191 }
9192 
9193 /*
9194  * ==========================================================================
9195  * Miscellaneous routines
9196  * ==========================================================================
9197  */
9198 
9199 /*
9200  * Remove all pools in the system.
9201  */
9202 void
9203 spa_evict_all(void)
9204 {
9205 	spa_t *spa;
9206 
9207 	/*
9208 	 * Remove all cached state.  All pools should be closed now,
9209 	 * so every spa in the AVL tree should be unreferenced.
9210 	 */
9211 	mutex_enter(&spa_namespace_lock);
9212 	while ((spa = spa_next(NULL)) != NULL) {
9213 		/*
9214 		 * Stop async tasks.  The async thread may need to detach
9215 		 * a device that's been replaced, which requires grabbing
9216 		 * spa_namespace_lock, so we must drop it here.
9217 		 */
9218 		spa_open_ref(spa, FTAG);
9219 		mutex_exit(&spa_namespace_lock);
9220 		spa_async_suspend(spa);
9221 		mutex_enter(&spa_namespace_lock);
9222 		spa_close(spa, FTAG);
9223 
9224 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
9225 			spa_unload(spa);
9226 			spa_deactivate(spa);
9227 		}
9228 		spa_remove(spa);
9229 	}
9230 	mutex_exit(&spa_namespace_lock);
9231 }
9232 
9233 vdev_t *
9234 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
9235 {
9236 	vdev_t *vd;
9237 	int i;
9238 
9239 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
9240 		return (vd);
9241 
9242 	if (aux) {
9243 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
9244 			vd = spa->spa_l2cache.sav_vdevs[i];
9245 			if (vd->vdev_guid == guid)
9246 				return (vd);
9247 		}
9248 
9249 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
9250 			vd = spa->spa_spares.sav_vdevs[i];
9251 			if (vd->vdev_guid == guid)
9252 				return (vd);
9253 		}
9254 	}
9255 
9256 	return (NULL);
9257 }
9258 
9259 void
9260 spa_upgrade(spa_t *spa, uint64_t version)
9261 {
9262 	ASSERT(spa_writeable(spa));
9263 
9264 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
9265 
9266 	/*
9267 	 * This should only be called for a non-faulted pool, and since a
9268 	 * future version would result in an unopenable pool, this shouldn't be
9269 	 * possible.
9270 	 */
9271 	ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
9272 	ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
9273 
9274 	spa->spa_uberblock.ub_version = version;
9275 	vdev_config_dirty(spa->spa_root_vdev);
9276 
9277 	spa_config_exit(spa, SCL_ALL, FTAG);
9278 
9279 	txg_wait_synced(spa_get_dsl(spa), 0);
9280 }
9281 
9282 boolean_t
9283 spa_has_spare(spa_t *spa, uint64_t guid)
9284 {
9285 	int i;
9286 	uint64_t spareguid;
9287 	spa_aux_vdev_t *sav = &spa->spa_spares;
9288 
9289 	for (i = 0; i < sav->sav_count; i++)
9290 		if (sav->sav_vdevs[i]->vdev_guid == guid)
9291 			return (B_TRUE);
9292 
9293 	for (i = 0; i < sav->sav_npending; i++) {
9294 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
9295 		    &spareguid) == 0 && spareguid == guid)
9296 			return (B_TRUE);
9297 	}
9298 
9299 	return (B_FALSE);
9300 }
9301 
9302 /*
9303  * Check if a pool has an active shared spare device.
9304  * Note: reference count of an active spare is 2, as a spare and as a replace
9305  */
9306 static boolean_t
9307 spa_has_active_shared_spare(spa_t *spa)
9308 {
9309 	int i, refcnt;
9310 	uint64_t pool;
9311 	spa_aux_vdev_t *sav = &spa->spa_spares;
9312 
9313 	for (i = 0; i < sav->sav_count; i++) {
9314 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
9315 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
9316 		    refcnt > 2)
9317 			return (B_TRUE);
9318 	}
9319 
9320 	return (B_FALSE);
9321 }
9322 
9323 uint64_t
9324 spa_total_metaslabs(spa_t *spa)
9325 {
9326 	vdev_t *rvd = spa->spa_root_vdev;
9327 
9328 	uint64_t m = 0;
9329 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
9330 		vdev_t *vd = rvd->vdev_child[c];
9331 		if (!vdev_is_concrete(vd))
9332 			continue;
9333 		m += vd->vdev_ms_count;
9334 	}
9335 	return (m);
9336 }
9337 
9338 /*
9339  * Notify any waiting threads that some activity has switched from being in-
9340  * progress to not-in-progress so that the thread can wake up and determine
9341  * whether it is finished waiting.
9342  */
9343 void
9344 spa_notify_waiters(spa_t *spa)
9345 {
9346 	/*
9347 	 * Acquiring spa_activities_lock here prevents the cv_broadcast from
9348 	 * happening between the waiting thread's check and cv_wait.
9349 	 */
9350 	mutex_enter(&spa->spa_activities_lock);
9351 	cv_broadcast(&spa->spa_activities_cv);
9352 	mutex_exit(&spa->spa_activities_lock);
9353 }
9354 
9355 /*
9356  * Notify any waiting threads that the pool is exporting, and then block until
9357  * they are finished using the spa_t.
9358  */
9359 void
9360 spa_wake_waiters(spa_t *spa)
9361 {
9362 	mutex_enter(&spa->spa_activities_lock);
9363 	spa->spa_waiters_cancel = B_TRUE;
9364 	cv_broadcast(&spa->spa_activities_cv);
9365 	while (spa->spa_waiters != 0)
9366 		cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
9367 	spa->spa_waiters_cancel = B_FALSE;
9368 	mutex_exit(&spa->spa_activities_lock);
9369 }
9370 
9371 /* Whether the vdev or any of its descendants are being initialized/trimmed. */
9372 static boolean_t
9373 spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
9374 {
9375 	spa_t *spa = vd->vdev_spa;
9376 
9377 	ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
9378 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
9379 	ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
9380 	    activity == ZPOOL_WAIT_TRIM);
9381 
9382 	kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
9383 	    &vd->vdev_initialize_lock : &vd->vdev_trim_lock;
9384 
9385 	mutex_exit(&spa->spa_activities_lock);
9386 	mutex_enter(lock);
9387 	mutex_enter(&spa->spa_activities_lock);
9388 
9389 	boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
9390 	    (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
9391 	    (vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
9392 	mutex_exit(lock);
9393 
9394 	if (in_progress)
9395 		return (B_TRUE);
9396 
9397 	for (int i = 0; i < vd->vdev_children; i++) {
9398 		if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
9399 		    activity))
9400 			return (B_TRUE);
9401 	}
9402 
9403 	return (B_FALSE);
9404 }
9405 
9406 /*
9407  * If use_guid is true, this checks whether the vdev specified by guid is
9408  * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
9409  * is being initialized/trimmed. The caller must hold the config lock and
9410  * spa_activities_lock.
9411  */
9412 static int
9413 spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
9414     zpool_wait_activity_t activity, boolean_t *in_progress)
9415 {
9416 	mutex_exit(&spa->spa_activities_lock);
9417 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9418 	mutex_enter(&spa->spa_activities_lock);
9419 
9420 	vdev_t *vd;
9421 	if (use_guid) {
9422 		vd = spa_lookup_by_guid(spa, guid, B_FALSE);
9423 		if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
9424 			spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9425 			return (EINVAL);
9426 		}
9427 	} else {
9428 		vd = spa->spa_root_vdev;
9429 	}
9430 
9431 	*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
9432 
9433 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9434 	return (0);
9435 }
9436 
9437 /*
9438  * Locking for waiting threads
9439  * ---------------------------
9440  *
9441  * Waiting threads need a way to check whether a given activity is in progress,
9442  * and then, if it is, wait for it to complete. Each activity will have some
9443  * in-memory representation of the relevant on-disk state which can be used to
9444  * determine whether or not the activity is in progress. The in-memory state and
9445  * the locking used to protect it will be different for each activity, and may
9446  * not be suitable for use with a cvar (e.g., some state is protected by the
9447  * config lock). To allow waiting threads to wait without any races, another
9448  * lock, spa_activities_lock, is used.
9449  *
9450  * When the state is checked, both the activity-specific lock (if there is one)
9451  * and spa_activities_lock are held. In some cases, the activity-specific lock
9452  * is acquired explicitly (e.g. the config lock). In others, the locking is
9453  * internal to some check (e.g. bpobj_is_empty). After checking, the waiting
9454  * thread releases the activity-specific lock and, if the activity is in
9455  * progress, then cv_waits using spa_activities_lock.
9456  *
9457  * The waiting thread is woken when another thread, one completing some
9458  * activity, updates the state of the activity and then calls
9459  * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
9460  * needs to hold its activity-specific lock when updating the state, and this
9461  * lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
9462  *
9463  * Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
9464  * and because it is held when the waiting thread checks the state of the
9465  * activity, it can never be the case that the completing thread both updates
9466  * the activity state and cv_broadcasts in between the waiting thread's check
9467  * and cv_wait. Thus, a waiting thread can never miss a wakeup.
9468  *
9469  * In order to prevent deadlock, when the waiting thread does its check, in some
9470  * cases it will temporarily drop spa_activities_lock in order to acquire the
9471  * activity-specific lock. The order in which spa_activities_lock and the
9472  * activity specific lock are acquired in the waiting thread is determined by
9473  * the order in which they are acquired in the completing thread; if the
9474  * completing thread calls spa_notify_waiters with the activity-specific lock
9475  * held, then the waiting thread must also acquire the activity-specific lock
9476  * first.
9477  */
9478 
9479 static int
9480 spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
9481     boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
9482 {
9483 	int error = 0;
9484 
9485 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
9486 
9487 	switch (activity) {
9488 	case ZPOOL_WAIT_CKPT_DISCARD:
9489 		*in_progress =
9490 		    (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
9491 		    zap_contains(spa_meta_objset(spa),
9492 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
9493 		    ENOENT);
9494 		break;
9495 	case ZPOOL_WAIT_FREE:
9496 		*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
9497 		    !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
9498 		    spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
9499 		    spa_livelist_delete_check(spa));
9500 		break;
9501 	case ZPOOL_WAIT_INITIALIZE:
9502 	case ZPOOL_WAIT_TRIM:
9503 		error = spa_vdev_activity_in_progress(spa, use_tag, tag,
9504 		    activity, in_progress);
9505 		break;
9506 	case ZPOOL_WAIT_REPLACE:
9507 		mutex_exit(&spa->spa_activities_lock);
9508 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9509 		mutex_enter(&spa->spa_activities_lock);
9510 
9511 		*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
9512 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9513 		break;
9514 	case ZPOOL_WAIT_REMOVE:
9515 		*in_progress = (spa->spa_removing_phys.sr_state ==
9516 		    DSS_SCANNING);
9517 		break;
9518 	case ZPOOL_WAIT_RESILVER:
9519 		if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
9520 			break;
9521 		/* fall through */
9522 	case ZPOOL_WAIT_SCRUB:
9523 	{
9524 		boolean_t scanning, paused, is_scrub;
9525 		dsl_scan_t *scn =  spa->spa_dsl_pool->dp_scan;
9526 
9527 		is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
9528 		scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
9529 		paused = dsl_scan_is_paused_scrub(scn);
9530 		*in_progress = (scanning && !paused &&
9531 		    is_scrub == (activity == ZPOOL_WAIT_SCRUB));
9532 		break;
9533 	}
9534 	default:
9535 		panic("unrecognized value for activity %d", activity);
9536 	}
9537 
9538 	return (error);
9539 }
9540 
9541 static int
9542 spa_wait_common(const char *pool, zpool_wait_activity_t activity,
9543     boolean_t use_tag, uint64_t tag, boolean_t *waited)
9544 {
9545 	/*
9546 	 * The tag is used to distinguish between instances of an activity.
9547 	 * 'initialize' and 'trim' are the only activities that we use this for.
9548 	 * The other activities can only have a single instance in progress in a
9549 	 * pool at one time, making the tag unnecessary.
9550 	 *
9551 	 * There can be multiple devices being replaced at once, but since they
9552 	 * all finish once resilvering finishes, we don't bother keeping track
9553 	 * of them individually, we just wait for them all to finish.
9554 	 */
9555 	if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
9556 	    activity != ZPOOL_WAIT_TRIM)
9557 		return (EINVAL);
9558 
9559 	if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
9560 		return (EINVAL);
9561 
9562 	spa_t *spa;
9563 	int error = spa_open(pool, &spa, FTAG);
9564 	if (error != 0)
9565 		return (error);
9566 
9567 	/*
9568 	 * Increment the spa's waiter count so that we can call spa_close and
9569 	 * still ensure that the spa_t doesn't get freed before this thread is
9570 	 * finished with it when the pool is exported. We want to call spa_close
9571 	 * before we start waiting because otherwise the additional ref would
9572 	 * prevent the pool from being exported or destroyed throughout the
9573 	 * potentially long wait.
9574 	 */
9575 	mutex_enter(&spa->spa_activities_lock);
9576 	spa->spa_waiters++;
9577 	spa_close(spa, FTAG);
9578 
9579 	*waited = B_FALSE;
9580 	for (;;) {
9581 		boolean_t in_progress;
9582 		error = spa_activity_in_progress(spa, activity, use_tag, tag,
9583 		    &in_progress);
9584 
9585 		if (error || !in_progress || spa->spa_waiters_cancel)
9586 			break;
9587 
9588 		*waited = B_TRUE;
9589 
9590 		if (cv_wait_sig(&spa->spa_activities_cv,
9591 		    &spa->spa_activities_lock) == 0) {
9592 			error = EINTR;
9593 			break;
9594 		}
9595 	}
9596 
9597 	spa->spa_waiters--;
9598 	cv_signal(&spa->spa_waiters_cv);
9599 	mutex_exit(&spa->spa_activities_lock);
9600 
9601 	return (error);
9602 }
9603 
9604 /*
9605  * Wait for a particular instance of the specified activity to complete, where
9606  * the instance is identified by 'tag'
9607  */
9608 int
9609 spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
9610     boolean_t *waited)
9611 {
9612 	return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
9613 }
9614 
9615 /*
9616  * Wait for all instances of the specified activity complete
9617  */
9618 int
9619 spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
9620 {
9621 
9622 	return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
9623 }
9624 
9625 sysevent_t *
9626 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
9627 {
9628 	sysevent_t *ev = NULL;
9629 #ifdef _KERNEL
9630 	nvlist_t *resource;
9631 
9632 	resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
9633 	if (resource) {
9634 		ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
9635 		ev->resource = resource;
9636 	}
9637 #endif
9638 	return (ev);
9639 }
9640 
9641 void
9642 spa_event_post(sysevent_t *ev)
9643 {
9644 #ifdef _KERNEL
9645 	if (ev) {
9646 		zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
9647 		kmem_free(ev, sizeof (*ev));
9648 	}
9649 #endif
9650 }
9651 
9652 /*
9653  * Post a zevent corresponding to the given sysevent.   The 'name' must be one
9654  * of the event definitions in sys/sysevent/eventdefs.h.  The payload will be
9655  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
9656  * in the userland libzpool, as we don't want consumers to misinterpret ztest
9657  * or zdb as real changes.
9658  */
9659 void
9660 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
9661 {
9662 	spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
9663 }
9664 
9665 /* state manipulation functions */
9666 EXPORT_SYMBOL(spa_open);
9667 EXPORT_SYMBOL(spa_open_rewind);
9668 EXPORT_SYMBOL(spa_get_stats);
9669 EXPORT_SYMBOL(spa_create);
9670 EXPORT_SYMBOL(spa_import);
9671 EXPORT_SYMBOL(spa_tryimport);
9672 EXPORT_SYMBOL(spa_destroy);
9673 EXPORT_SYMBOL(spa_export);
9674 EXPORT_SYMBOL(spa_reset);
9675 EXPORT_SYMBOL(spa_async_request);
9676 EXPORT_SYMBOL(spa_async_suspend);
9677 EXPORT_SYMBOL(spa_async_resume);
9678 EXPORT_SYMBOL(spa_inject_addref);
9679 EXPORT_SYMBOL(spa_inject_delref);
9680 EXPORT_SYMBOL(spa_scan_stat_init);
9681 EXPORT_SYMBOL(spa_scan_get_stats);
9682 
9683 /* device manipulation */
9684 EXPORT_SYMBOL(spa_vdev_add);
9685 EXPORT_SYMBOL(spa_vdev_attach);
9686 EXPORT_SYMBOL(spa_vdev_detach);
9687 EXPORT_SYMBOL(spa_vdev_setpath);
9688 EXPORT_SYMBOL(spa_vdev_setfru);
9689 EXPORT_SYMBOL(spa_vdev_split_mirror);
9690 
9691 /* spare statech is global across all pools) */
9692 EXPORT_SYMBOL(spa_spare_add);
9693 EXPORT_SYMBOL(spa_spare_remove);
9694 EXPORT_SYMBOL(spa_spare_exists);
9695 EXPORT_SYMBOL(spa_spare_activate);
9696 
9697 /* L2ARC statech is global across all pools) */
9698 EXPORT_SYMBOL(spa_l2cache_add);
9699 EXPORT_SYMBOL(spa_l2cache_remove);
9700 EXPORT_SYMBOL(spa_l2cache_exists);
9701 EXPORT_SYMBOL(spa_l2cache_activate);
9702 EXPORT_SYMBOL(spa_l2cache_drop);
9703 
9704 /* scanning */
9705 EXPORT_SYMBOL(spa_scan);
9706 EXPORT_SYMBOL(spa_scan_stop);
9707 
9708 /* spa syncing */
9709 EXPORT_SYMBOL(spa_sync); /* only for DMU use */
9710 EXPORT_SYMBOL(spa_sync_allpools);
9711 
9712 /* properties */
9713 EXPORT_SYMBOL(spa_prop_set);
9714 EXPORT_SYMBOL(spa_prop_get);
9715 EXPORT_SYMBOL(spa_prop_clear_bootfs);
9716 
9717 /* asynchronous event notification */
9718 EXPORT_SYMBOL(spa_event_notify);
9719 
9720 /* BEGIN CSTYLED */
9721 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
9722 	"log2(fraction of arc that can be used by inflight I/Os when "
9723 	"verifying pool during import");
9724 
9725 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
9726 	"Set to traverse metadata on pool import");
9727 
9728 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
9729 	"Set to traverse data on pool import");
9730 
9731 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
9732 	"Print vdev tree to zfs_dbgmsg during pool import");
9733 
9734 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
9735 	"Percentage of CPUs to run an IO worker thread");
9736 
9737 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW,
9738 	"Allow importing pool with up to this number of missing top-level "
9739 	"vdevs (in read-only mode)");
9740 
9741 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW,
9742 	"Set the livelist condense zthr to pause");
9743 
9744 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW,
9745 	"Set the livelist condense synctask to pause");
9746 
9747 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW,
9748 	"Whether livelist condensing was canceled in the synctask");
9749 
9750 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW,
9751 	"Whether livelist condensing was canceled in the zthr function");
9752 
9753 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW,
9754 	"Whether extra ALLOC blkptrs were added to a livelist entry while it "
9755 	"was being condensed");
9756 /* END CSTYLED */
9757