xref: /netbsd-src/external/cddl/osnet/dist/lib/libzfs/common/libzfs_pool.c (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <ctype.h>
28 #include <errno.h>
29 #include <devid.h>
30 #include <fcntl.h>
31 #include <libintl.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <strings.h>
35 #include <unistd.h>
36 #include <sys/efi_partition.h>
37 #include <sys/vtoc.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/stat.h>
40 #include <dlfcn.h>
41 
42 #include "zfs_namecheck.h"
43 #include "zfs_prop.h"
44 #include "libzfs_impl.h"
45 #include "zfs_comutil.h"
46 
47 const char *hist_event_table[LOG_END] = {
48 	"invalid event",
49 	"pool create",
50 	"vdev add",
51 	"pool remove",
52 	"pool destroy",
53 	"pool export",
54 	"pool import",
55 	"vdev attach",
56 	"vdev replace",
57 	"vdev detach",
58 	"vdev online",
59 	"vdev offline",
60 	"vdev upgrade",
61 	"pool clear",
62 	"pool scrub",
63 	"pool property set",
64 	"create",
65 	"clone",
66 	"destroy",
67 	"destroy_begin_sync",
68 	"inherit",
69 	"property set",
70 	"quota set",
71 	"permission update",
72 	"permission remove",
73 	"permission who remove",
74 	"promote",
75 	"receive",
76 	"rename",
77 	"reservation set",
78 	"replay_inc_sync",
79 	"replay_full_sync",
80 	"rollback",
81 	"snapshot",
82 	"filesystem version upgrade",
83 	"refquota set",
84 	"refreservation set",
85 	"pool scrub done",
86 	"user hold",
87 	"user release",
88 	"pool split",
89 };
90 
91 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
92 
93 #if defined(__i386) || defined(__amd64)
94 #define	BOOTCMD	"installgrub(1M)"
95 #else
96 #define	BOOTCMD	"installboot(1M)"
97 #endif
98 
99 #define	DISK_ROOT	"/dev/dsk"
100 #define	RDISK_ROOT	"/dev/rdsk"
101 #define	BACKUP_SLICE	"s2"
102 
103 /*
104  * ====================================================================
105  *   zpool property functions
106  * ====================================================================
107  */
108 
109 static int
110 zpool_get_all_props(zpool_handle_t *zhp)
111 {
112 	zfs_cmd_t zc = { 0 };
113 	libzfs_handle_t *hdl = zhp->zpool_hdl;
114 
115 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
116 
117 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
118 		return (-1);
119 
120 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
121 		if (errno == ENOMEM) {
122 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
123 				zcmd_free_nvlists(&zc);
124 				return (-1);
125 			}
126 		} else {
127 			zcmd_free_nvlists(&zc);
128 			return (-1);
129 		}
130 	}
131 
132 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
133 		zcmd_free_nvlists(&zc);
134 		return (-1);
135 	}
136 
137 	zcmd_free_nvlists(&zc);
138 
139 	return (0);
140 }
141 
142 static int
143 zpool_props_refresh(zpool_handle_t *zhp)
144 {
145 	nvlist_t *old_props;
146 
147 	old_props = zhp->zpool_props;
148 
149 	if (zpool_get_all_props(zhp) != 0)
150 		return (-1);
151 
152 	nvlist_free(old_props);
153 	return (0);
154 }
155 
156 static char *
157 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
158     zprop_source_t *src)
159 {
160 	nvlist_t *nv, *nvl;
161 	uint64_t ival;
162 	char *value;
163 	zprop_source_t source;
164 
165 	nvl = zhp->zpool_props;
166 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
167 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
168 		source = ival;
169 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
170 	} else {
171 		source = ZPROP_SRC_DEFAULT;
172 		if ((value = __UNCONST(zpool_prop_default_string(prop))) == NULL)
173 			value = __UNCONST("-");
174 	}
175 
176 	if (src)
177 		*src = source;
178 
179 	return value;
180 }
181 
182 uint64_t
183 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
184 {
185 	nvlist_t *nv, *nvl;
186 	uint64_t value;
187 	zprop_source_t source;
188 
189 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
190 		/*
191 		 * zpool_get_all_props() has most likely failed because
192 		 * the pool is faulted, but if all we need is the top level
193 		 * vdev's guid then get it from the zhp config nvlist.
194 		 */
195 		if ((prop == ZPOOL_PROP_GUID) &&
196 		    (nvlist_lookup_nvlist(zhp->zpool_config,
197 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
198 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
199 		    == 0)) {
200 			return (value);
201 		}
202 		return (zpool_prop_default_numeric(prop));
203 	}
204 
205 	nvl = zhp->zpool_props;
206 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
207 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
208 		source = value;
209 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
210 	} else {
211 		source = ZPROP_SRC_DEFAULT;
212 		value = zpool_prop_default_numeric(prop);
213 	}
214 
215 	if (src)
216 		*src = source;
217 
218 	return (value);
219 }
220 
221 /*
222  * Map VDEV STATE to printed strings.
223  */
224 const char *
225 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
226 {
227 	switch (state) {
228 	case VDEV_STATE_CLOSED:
229 	case VDEV_STATE_OFFLINE:
230 		return (gettext("OFFLINE"));
231 	case VDEV_STATE_REMOVED:
232 		return (gettext("REMOVED"));
233 	case VDEV_STATE_CANT_OPEN:
234 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
235 			return (gettext("FAULTED"));
236 		else if (aux == VDEV_AUX_SPLIT_POOL)
237 			return (gettext("SPLIT"));
238 		else
239 			return (gettext("UNAVAIL"));
240 	case VDEV_STATE_FAULTED:
241 		return (gettext("FAULTED"));
242 	case VDEV_STATE_DEGRADED:
243 		return (gettext("DEGRADED"));
244 	case VDEV_STATE_HEALTHY:
245 		return (gettext("ONLINE"));
246 	case VDEV_STATE_UNKNOWN:
247 	default:
248 		return (gettext("UNKNOWN"));
249 	}
250 }
251 
252 /*
253  * Get a zpool property value for 'prop' and return the value in
254  * a pre-allocated buffer.
255  */
256 int
257 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
258     zprop_source_t *srctype)
259 {
260 	uint64_t intval;
261 	const char *strval;
262 	zprop_source_t src = ZPROP_SRC_NONE;
263 	nvlist_t *nvroot;
264 	vdev_stat_t *vs;
265 	uint_t vsc;
266 
267 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
268 		switch (prop) {
269 		case ZPOOL_PROP_NAME:
270 			(void) strlcpy(buf, zpool_get_name(zhp), len);
271 			break;
272 
273 		case ZPOOL_PROP_HEALTH:
274 			(void) strlcpy(buf, "FAULTED", len);
275 			break;
276 
277 		case ZPOOL_PROP_GUID:
278 			intval = zpool_get_prop_int(zhp, prop, &src);
279 			(void) snprintf(buf, len, "%" PRIu64, intval);
280 			break;
281 
282 		case ZPOOL_PROP_ALTROOT:
283 		case ZPOOL_PROP_CACHEFILE:
284 			if (zhp->zpool_props != NULL ||
285 			    zpool_get_all_props(zhp) == 0) {
286 				(void) strlcpy(buf,
287 				    zpool_get_prop_string(zhp, prop, &src),
288 				    len);
289 				if (srctype != NULL)
290 					*srctype = src;
291 				return (0);
292 			}
293 			/* FALLTHROUGH */
294 		default:
295 			(void) strlcpy(buf, "-", len);
296 			break;
297 		}
298 
299 		if (srctype != NULL)
300 			*srctype = src;
301 		return (0);
302 	}
303 
304 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
305 	    prop != ZPOOL_PROP_NAME)
306 		return (-1);
307 
308 	switch (zpool_prop_get_type(prop)) {
309 	case PROP_TYPE_STRING:
310 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
311 		    len);
312 		break;
313 
314 	case PROP_TYPE_NUMBER:
315 		intval = zpool_get_prop_int(zhp, prop, &src);
316 
317 		switch (prop) {
318 		case ZPOOL_PROP_SIZE:
319 		case ZPOOL_PROP_ALLOCATED:
320 		case ZPOOL_PROP_FREE:
321 			(void) zfs_nicenum(intval, buf, len);
322 			break;
323 
324 		case ZPOOL_PROP_CAPACITY:
325 			(void) snprintf(buf, len, "%ju%%",
326 			    (uintmax_t)intval);
327 			break;
328 
329 		case ZPOOL_PROP_DEDUPRATIO:
330 			(void) snprintf(buf, len, "%ju.%02jux",
331 			    (uintmax_t)(intval / 100),
332 			    (uintmax_t)(intval % 100));
333 			break;
334 
335 		case ZPOOL_PROP_HEALTH:
336 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
337 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
338 			verify(nvlist_lookup_uint64_array(nvroot,
339 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
340 
341 			(void) strlcpy(buf, zpool_state_to_name(intval,
342 			    vs->vs_aux), len);
343 			break;
344 		default:
345 			(void) snprintf(buf, len, "%ju", (uintmax_t)intval);
346 		}
347 		break;
348 
349 	case PROP_TYPE_INDEX:
350 		intval = zpool_get_prop_int(zhp, prop, &src);
351 		if (zpool_prop_index_to_string(prop, intval, &strval)
352 		    != 0)
353 			return (-1);
354 		(void) strlcpy(buf, strval, len);
355 		break;
356 
357 	default:
358 		abort();
359 	}
360 
361 	if (srctype)
362 		*srctype = src;
363 
364 	return (0);
365 }
366 
367 /*
368  * Check if the bootfs name has the same pool name as it is set to.
369  * Assuming bootfs is a valid dataset name.
370  */
371 static boolean_t
372 bootfs_name_valid(const char *pool, char *bootfs)
373 {
374 	int len = strlen(pool);
375 
376 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
377 		return (B_FALSE);
378 
379 	if (strncmp(pool, bootfs, len) == 0 &&
380 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
381 		return (B_TRUE);
382 
383 	return (B_FALSE);
384 }
385 
386 /*
387  * Inspect the configuration to determine if any of the devices contain
388  * an EFI label.
389  */
390 static boolean_t
391 pool_uses_efi(nvlist_t *config)
392 {
393 	nvlist_t **child;
394 	uint_t c, children;
395 
396 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
397 	    &child, &children) != 0)
398 		return (read_efi_label(config, NULL) >= 0);
399 
400 	for (c = 0; c < children; c++) {
401 		if (pool_uses_efi(child[c]))
402 			return (B_TRUE);
403 	}
404 	return (B_FALSE);
405 }
406 
407 static boolean_t
408 pool_is_bootable(zpool_handle_t *zhp)
409 {
410 	char bootfs[ZPOOL_MAXNAMELEN];
411 
412 	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
413 	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
414 	    sizeof (bootfs)) != 0);
415 }
416 
417 
418 /*
419  * Given an nvlist of zpool properties to be set, validate that they are
420  * correct, and parse any numeric properties (index, boolean, etc) if they are
421  * specified as strings.
422  */
423 static nvlist_t *
424 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
425     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
426 {
427 	nvpair_t *elem;
428 	nvlist_t *retprops;
429 	zpool_prop_t prop;
430 	char *strval;
431 	uint64_t intval;
432 	char *slash;
433 	struct stat64 statbuf;
434 	zpool_handle_t *zhp;
435 	nvlist_t *nvroot;
436 
437 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
438 		(void) no_memory(hdl);
439 		return (NULL);
440 	}
441 
442 	elem = NULL;
443 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
444 		const char *propname = nvpair_name(elem);
445 
446 		/*
447 		 * Make sure this property is valid and applies to this type.
448 		 */
449 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
450 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
451 			    "invalid property '%s'"), propname);
452 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
453 			goto error;
454 		}
455 
456 		if (zpool_prop_readonly(prop)) {
457 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
458 			    "is readonly"), propname);
459 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
460 			goto error;
461 		}
462 
463 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
464 		    &strval, &intval, errbuf) != 0)
465 			goto error;
466 
467 		/*
468 		 * Perform additional checking for specific properties.
469 		 */
470 		switch (prop) {
471 		case ZPOOL_PROP_VERSION:
472 			if (intval < version || intval > SPA_VERSION) {
473 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
474 				    "property '%s' number %d is invalid."),
475 				    propname, intval);
476 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
477 				goto error;
478 			}
479 			break;
480 
481 		case ZPOOL_PROP_BOOTFS:
482 			if (create_or_import) {
483 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
484 				    "property '%s' cannot be set at creation "
485 				    "or import time"), propname);
486 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
487 				goto error;
488 			}
489 
490 			if (version < SPA_VERSION_BOOTFS) {
491 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
492 				    "pool must be upgraded to support "
493 				    "'%s' property"), propname);
494 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
495 				goto error;
496 			}
497 
498 			/*
499 			 * bootfs property value has to be a dataset name and
500 			 * the dataset has to be in the same pool as it sets to.
501 			 */
502 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
503 			    strval)) {
504 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
505 				    "is an invalid name"), strval);
506 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
507 				goto error;
508 			}
509 
510 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
511 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
512 				    "could not open pool '%s'"), poolname);
513 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
514 				goto error;
515 			}
516 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
517 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
518 
519 			/*
520 			 * bootfs property cannot be set on a disk which has
521 			 * been EFI labeled.
522 			 */
523 			if (pool_uses_efi(nvroot)) {
524 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 				    "property '%s' not supported on "
526 				    "EFI labeled devices"), propname);
527 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
528 				zpool_close(zhp);
529 				goto error;
530 			}
531 			zpool_close(zhp);
532 			break;
533 
534 		case ZPOOL_PROP_ALTROOT:
535 			if (!create_or_import) {
536 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
537 				    "property '%s' can only be set during pool "
538 				    "creation or import"), propname);
539 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
540 				goto error;
541 			}
542 
543 			if (strval[0] != '/') {
544 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
545 				    "bad alternate root '%s'"), strval);
546 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
547 				goto error;
548 			}
549 			break;
550 
551 		case ZPOOL_PROP_CACHEFILE:
552 			if (strval[0] == '\0')
553 				break;
554 
555 			if (strcmp(strval, "none") == 0)
556 				break;
557 
558 			if (strval[0] != '/') {
559 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
560 				    "property '%s' must be empty, an "
561 				    "absolute path, or 'none'"), propname);
562 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
563 				goto error;
564 			}
565 
566 			slash = strrchr(strval, '/');
567 
568 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
569 			    strcmp(slash, "/..") == 0) {
570 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
571 				    "'%s' is not a valid file"), strval);
572 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
573 				goto error;
574 			}
575 
576 			*slash = '\0';
577 
578 			if (strval[0] != '\0' &&
579 			    (stat64(strval, &statbuf) != 0 ||
580 			    !S_ISDIR(statbuf.st_mode))) {
581 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
582 				    "'%s' is not a valid directory"),
583 				    strval);
584 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
585 				goto error;
586 			}
587 
588 			*slash = '/';
589 			break;
590 		case ZPOOL_PROP_FREE:
591 		case ZPOOL_PROP_ALLOCATED:
592 		case ZPOOL_NUM_PROPS:
593 		case ZPOOL_PROP_AUTOEXPAND:
594 		case ZPOOL_PROP_DEDUPDITTO:
595 		case ZPOOL_PROP_SIZE:
596 		case ZPOOL_PROP_CAPACITY:
597 		case ZPOOL_PROP_HEALTH:
598 		case ZPOOL_PROP_GUID:
599 		case ZPOOL_PROP_DELEGATION:
600 		case ZPOOL_PROP_AUTOREPLACE:
601 		case ZPOOL_PROP_FAILUREMODE:
602 		case ZPOOL_PROP_LISTSNAPS:
603 		case ZPOOL_PROP_DEDUPRATIO:
604 		case ZPOOL_PROP_NAME:
605 			break;
606 		}
607 	}
608 
609 	return (retprops);
610 error:
611 	nvlist_free(retprops);
612 	return (NULL);
613 }
614 
615 /*
616  * Set zpool property : propname=propval.
617  */
618 int
619 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
620 {
621 	zfs_cmd_t zc = { 0 };
622 	int ret = -1;
623 	char errbuf[1024];
624 	nvlist_t *nvl = NULL;
625 	nvlist_t *realprops;
626 	uint64_t version;
627 
628 	(void) snprintf(errbuf, sizeof (errbuf),
629 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
630 	    zhp->zpool_name);
631 
632 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
633 		return (no_memory(zhp->zpool_hdl));
634 
635 	if (nvlist_add_string(nvl, propname, propval) != 0) {
636 		nvlist_free(nvl);
637 		return (no_memory(zhp->zpool_hdl));
638 	}
639 
640 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
641 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
642 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
643 		nvlist_free(nvl);
644 		return (-1);
645 	}
646 
647 	nvlist_free(nvl);
648 	nvl = realprops;
649 
650 	/*
651 	 * Execute the corresponding ioctl() to set this property.
652 	 */
653 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
654 
655 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
656 		nvlist_free(nvl);
657 		return (-1);
658 	}
659 
660 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
661 
662 	zcmd_free_nvlists(&zc);
663 	nvlist_free(nvl);
664 
665 	if (ret)
666 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
667 	else
668 		(void) zpool_props_refresh(zhp);
669 
670 	return (ret);
671 }
672 
673 int
674 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
675 {
676 	libzfs_handle_t *hdl = zhp->zpool_hdl;
677 	zprop_list_t *entry;
678 	char buf[ZFS_MAXPROPLEN];
679 
680 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
681 		return (-1);
682 
683 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
684 
685 		if (entry->pl_fixed)
686 			continue;
687 
688 		if (entry->pl_prop != ZPROP_INVAL &&
689 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
690 		    NULL) == 0) {
691 			if (strlen(buf) > entry->pl_width)
692 				entry->pl_width = strlen(buf);
693 		}
694 	}
695 
696 	return (0);
697 }
698 
699 
700 /*
701  * Don't start the slice at the default block of 34; many storage
702  * devices will use a stripe width of 128k, so start there instead.
703  */
704 #define	NEW_START_BLOCK	256
705 
706 /*
707  * Validate the given pool name, optionally putting an extended error message in
708  * 'buf'.
709  */
710 boolean_t
711 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
712 {
713 	namecheck_err_t why;
714 	char what;
715 	int ret;
716 
717 	ret = pool_namecheck(pool, &why, &what);
718 
719 	/*
720 	 * The rules for reserved pool names were extended at a later point.
721 	 * But we need to support users with existing pools that may now be
722 	 * invalid.  So we only check for this expanded set of names during a
723 	 * create (or import), and only in userland.
724 	 */
725 	if (ret == 0 && !isopen &&
726 	    (strncmp(pool, "mirror", 6) == 0 ||
727 	    strncmp(pool, "raidz", 5) == 0 ||
728 	    strncmp(pool, "spare", 5) == 0 ||
729 	    strcmp(pool, "log") == 0)) {
730 		if (hdl != NULL)
731 			zfs_error_aux(hdl,
732 			    dgettext(TEXT_DOMAIN, "name is reserved"));
733 		return (B_FALSE);
734 	}
735 
736 
737 	if (ret != 0) {
738 		if (hdl != NULL) {
739 			switch (why) {
740 			case NAME_ERR_TOOLONG:
741 				zfs_error_aux(hdl,
742 				    dgettext(TEXT_DOMAIN, "name is too long"));
743 				break;
744 
745 			case NAME_ERR_INVALCHAR:
746 				zfs_error_aux(hdl,
747 				    dgettext(TEXT_DOMAIN, "invalid character "
748 				    "'%c' in pool name"), what);
749 				break;
750 
751 			case NAME_ERR_NOLETTER:
752 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
753 				    "name must begin with a letter"));
754 				break;
755 
756 			case NAME_ERR_RESERVED:
757 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
758 				    "name is reserved"));
759 				break;
760 
761 			case NAME_ERR_DISKLIKE:
762 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
763 				    "pool name is reserved"));
764 				break;
765 
766 			case NAME_ERR_LEADING_SLASH:
767 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
768 				    "leading slash in name"));
769 				break;
770 
771 			case NAME_ERR_EMPTY_COMPONENT:
772 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
773 				    "empty component in name"));
774 				break;
775 
776 			case NAME_ERR_TRAILING_SLASH:
777 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
778 				    "trailing slash in name"));
779 				break;
780 
781 			case NAME_ERR_MULTIPLE_AT:
782 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
783 				    "multiple '@' delimiters in name"));
784 				break;
785 			case NAME_ERR_NO_AT:
786 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
787 				    "no attribute in name"));
788 				break;
789 			}
790 		}
791 		return (B_FALSE);
792 	}
793 
794 	return (B_TRUE);
795 }
796 
797 /*
798  * Open a handle to the given pool, even if the pool is currently in the FAULTED
799  * state.
800  */
801 zpool_handle_t *
802 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
803 {
804 	zpool_handle_t *zhp;
805 	boolean_t missing;
806 
807 	/*
808 	 * Make sure the pool name is valid.
809 	 */
810 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
811 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
812 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
813 		    pool);
814 		return (NULL);
815 	}
816 
817 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
818 		return (NULL);
819 
820 	zhp->zpool_hdl = hdl;
821 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
822 
823 	if (zpool_refresh_stats(zhp, &missing) != 0) {
824 		zpool_close(zhp);
825 		return (NULL);
826 	}
827 
828 	if (missing) {
829 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
830 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
831 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
832 		zpool_close(zhp);
833 		return (NULL);
834 	}
835 
836 	return (zhp);
837 }
838 
839 /*
840  * Like the above, but silent on error.  Used when iterating over pools (because
841  * the configuration cache may be out of date).
842  */
843 int
844 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
845 {
846 	zpool_handle_t *zhp;
847 	boolean_t missing;
848 
849 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
850 		return (-1);
851 
852 	zhp->zpool_hdl = hdl;
853 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
854 
855 	if (zpool_refresh_stats(zhp, &missing) != 0) {
856 		zpool_close(zhp);
857 		return (-1);
858 	}
859 
860 	if (missing) {
861 		zpool_close(zhp);
862 		*ret = NULL;
863 		return (0);
864 	}
865 
866 	*ret = zhp;
867 	return (0);
868 }
869 
870 /*
871  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
872  * state.
873  */
874 zpool_handle_t *
875 zpool_open(libzfs_handle_t *hdl, const char *pool)
876 {
877 	zpool_handle_t *zhp;
878 
879 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
880 		return (NULL);
881 
882 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
883 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
884 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
885 		zpool_close(zhp);
886 		return (NULL);
887 	}
888 
889 	return (zhp);
890 }
891 
892 /*
893  * Close the handle.  Simply frees the memory associated with the handle.
894  */
895 void
896 zpool_close(zpool_handle_t *zhp)
897 {
898 	if (zhp->zpool_config)
899 		nvlist_free(zhp->zpool_config);
900 	if (zhp->zpool_old_config)
901 		nvlist_free(zhp->zpool_old_config);
902 	if (zhp->zpool_props)
903 		nvlist_free(zhp->zpool_props);
904 	free(zhp);
905 }
906 
907 /*
908  * Return the name of the pool.
909  */
910 const char *
911 zpool_get_name(zpool_handle_t *zhp)
912 {
913 	return (zhp->zpool_name);
914 }
915 
916 
917 /*
918  * Return the state of the pool (ACTIVE or UNAVAILABLE)
919  */
920 int
921 zpool_get_state(zpool_handle_t *zhp)
922 {
923 	return (zhp->zpool_state);
924 }
925 
926 /*
927  * Create the named pool, using the provided vdev list.  It is assumed
928  * that the consumer has already validated the contents of the nvlist, so we
929  * don't have to worry about error semantics.
930  */
931 int
932 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
933     nvlist_t *props, nvlist_t *fsprops)
934 {
935 	zfs_cmd_t zc = { 0 };
936 	nvlist_t *zc_fsprops = NULL;
937 	nvlist_t *zc_props = NULL;
938 	char msg[1024];
939 	char *altroot;
940 	int ret = -1;
941 
942 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
943 	    "cannot create '%s'"), pool);
944 
945 	if (!zpool_name_valid(hdl, B_FALSE, pool))
946 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
947 
948 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
949 		return (-1);
950 
951 	if (props) {
952 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
953 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
954 			goto create_failed;
955 		}
956 	}
957 
958 	if (fsprops) {
959 		uint64_t zoned;
960 		char *zonestr;
961 
962 		zoned = ((nvlist_lookup_string(fsprops,
963 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
964 		    strcmp(zonestr, "on") == 0);
965 
966 		if ((zc_fsprops = zfs_valid_proplist(hdl,
967 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
968 			goto create_failed;
969 		}
970 		if (!zc_props &&
971 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
972 			goto create_failed;
973 		}
974 		if (nvlist_add_nvlist(zc_props,
975 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
976 			goto create_failed;
977 		}
978 	}
979 
980 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
981 		goto create_failed;
982 
983 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
984 
985 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
986 
987 		zcmd_free_nvlists(&zc);
988 		nvlist_free(zc_props);
989 		nvlist_free(zc_fsprops);
990 
991 		switch (errno) {
992 		case EBUSY:
993 			/*
994 			 * This can happen if the user has specified the same
995 			 * device multiple times.  We can't reliably detect this
996 			 * until we try to add it and see we already have a
997 			 * label.
998 			 */
999 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 			    "one or more vdevs refer to the same device"));
1001 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1002 
1003 		case EOVERFLOW:
1004 			/*
1005 			 * This occurs when one of the devices is below
1006 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1007 			 * device was the problem device since there's no
1008 			 * reliable way to determine device size from userland.
1009 			 */
1010 			{
1011 				char buf[64];
1012 
1013 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1014 
1015 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1016 				    "one or more devices is less than the "
1017 				    "minimum size (%s)"), buf);
1018 			}
1019 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1020 
1021 		case ENOSPC:
1022 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1023 			    "one or more devices is out of space"));
1024 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1025 
1026 		case ENOTBLK:
1027 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1028 			    "cache device must be a disk or disk slice"));
1029 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1030 
1031 		default:
1032 			return (zpool_standard_error(hdl, errno, msg));
1033 		}
1034 	}
1035 
1036 	/*
1037 	 * If this is an alternate root pool, then we automatically set the
1038 	 * mountpoint of the root dataset to be '/'.
1039 	 */
1040 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1041 	    &altroot) == 0) {
1042 		zfs_handle_t *zhp;
1043 
1044 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1045 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1046 		    "/") == 0);
1047 
1048 		zfs_close(zhp);
1049 	}
1050 
1051 create_failed:
1052 	zcmd_free_nvlists(&zc);
1053 	nvlist_free(zc_props);
1054 	nvlist_free(zc_fsprops);
1055 	return (ret);
1056 }
1057 
1058 /*
1059  * Destroy the given pool.  It is up to the caller to ensure that there are no
1060  * datasets left in the pool.
1061  */
1062 int
1063 zpool_destroy(zpool_handle_t *zhp)
1064 {
1065 	zfs_cmd_t zc = { 0 };
1066 	zfs_handle_t *zfp = NULL;
1067 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1068 	char msg[1024];
1069 
1070 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1071 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1072 	    ZFS_TYPE_FILESYSTEM)) == NULL)
1073 		return (-1);
1074 
1075 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1076 
1077 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1078 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1079 		    "cannot destroy '%s'"), zhp->zpool_name);
1080 
1081 		if (errno == EROFS) {
1082 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1083 			    "one or more devices is read only"));
1084 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1085 		} else {
1086 			(void) zpool_standard_error(hdl, errno, msg);
1087 		}
1088 
1089 		if (zfp)
1090 			zfs_close(zfp);
1091 		return (-1);
1092 	}
1093 
1094 	if (zfp) {
1095 		remove_mountpoint(zfp);
1096 		zfs_close(zfp);
1097 	}
1098 
1099 	return (0);
1100 }
1101 
1102 /*
1103  * Add the given vdevs to the pool.  The caller must have already performed the
1104  * necessary verification to ensure that the vdev specification is well-formed.
1105  */
1106 int
1107 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1108 {
1109 	zfs_cmd_t zc = { 0 };
1110 	int ret;
1111 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1112 	char msg[1024];
1113 	nvlist_t **spares, **l2cache;
1114 	uint_t nspares, nl2cache;
1115 
1116 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1117 	    "cannot add to '%s'"), zhp->zpool_name);
1118 
1119 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1120 	    SPA_VERSION_SPARES &&
1121 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1122 	    &spares, &nspares) == 0) {
1123 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1124 		    "upgraded to add hot spares"));
1125 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1126 	}
1127 
1128 	if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1129 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1130 		uint64_t s;
1131 
1132 		for (s = 0; s < nspares; s++) {
1133 			char *path;
1134 
1135 			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1136 			    &path) == 0 && pool_uses_efi(spares[s])) {
1137 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1138 				    "device '%s' contains an EFI label and "
1139 				    "cannot be used on root pools."),
1140 				    zpool_vdev_name(hdl, NULL, spares[s],
1141 				    B_FALSE));
1142 				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1143 			}
1144 		}
1145 	}
1146 
1147 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1148 	    SPA_VERSION_L2CACHE &&
1149 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1150 	    &l2cache, &nl2cache) == 0) {
1151 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1152 		    "upgraded to add cache devices"));
1153 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1154 	}
1155 
1156 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1157 		return (-1);
1158 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1159 
1160 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1161 		switch (errno) {
1162 		case EBUSY:
1163 			/*
1164 			 * This can happen if the user has specified the same
1165 			 * device multiple times.  We can't reliably detect this
1166 			 * until we try to add it and see we already have a
1167 			 * label.
1168 			 */
1169 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1170 			    "one or more vdevs refer to the same device"));
1171 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1172 			break;
1173 
1174 		case EOVERFLOW:
1175 			/*
1176 			 * This occurrs when one of the devices is below
1177 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1178 			 * device was the problem device since there's no
1179 			 * reliable way to determine device size from userland.
1180 			 */
1181 			{
1182 				char buf[64];
1183 
1184 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1185 
1186 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1187 				    "device is less than the minimum "
1188 				    "size (%s)"), buf);
1189 			}
1190 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1191 			break;
1192 
1193 		case ENOTSUP:
1194 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1195 			    "pool must be upgraded to add these vdevs"));
1196 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1197 			break;
1198 
1199 		case EDOM:
1200 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1201 			    "root pool can not have multiple vdevs"
1202 			    " or separate logs"));
1203 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1204 			break;
1205 
1206 		case ENOTBLK:
1207 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1208 			    "cache device must be a disk or disk slice"));
1209 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1210 			break;
1211 
1212 		default:
1213 			(void) zpool_standard_error(hdl, errno, msg);
1214 		}
1215 
1216 		ret = -1;
1217 	} else {
1218 		ret = 0;
1219 	}
1220 
1221 	zcmd_free_nvlists(&zc);
1222 
1223 	return (ret);
1224 }
1225 
1226 /*
1227  * Exports the pool from the system.  The caller must ensure that there are no
1228  * mounted datasets in the pool.
1229  */
1230 static int
1231 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1232 {
1233 	zfs_cmd_t zc = { 0 };
1234 	char msg[1024];
1235 
1236 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1237 	    "cannot export '%s'"), zhp->zpool_name);
1238 
1239 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1240 	zc.zc_cookie = force;
1241 	zc.zc_guid = hardforce;
1242 
1243 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1244 		switch (errno) {
1245 		case EXDEV:
1246 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1247 			    "use '-f' to override the following errors:\n"
1248 			    "'%s' has an active shared spare which could be"
1249 			    " used by other pools once '%s' is exported."),
1250 			    zhp->zpool_name, zhp->zpool_name);
1251 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1252 			    msg));
1253 		default:
1254 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1255 			    msg));
1256 		}
1257 	}
1258 
1259 	return (0);
1260 }
1261 
1262 int
1263 zpool_export(zpool_handle_t *zhp, boolean_t force)
1264 {
1265 	return (zpool_export_common(zhp, force, B_FALSE));
1266 }
1267 
1268 int
1269 zpool_export_force(zpool_handle_t *zhp)
1270 {
1271 	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1272 }
1273 
1274 static void
1275 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1276     nvlist_t *rbi)
1277 {
1278 	uint64_t rewindto;
1279 	int64_t loss = -1;
1280 	struct tm t;
1281 	char timestr[128];
1282 
1283 	if (!hdl->libzfs_printerr || rbi == NULL)
1284 		return;
1285 
1286 	if (nvlist_lookup_uint64(rbi, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1287 		return;
1288 	(void) nvlist_lookup_int64(rbi, ZPOOL_CONFIG_REWIND_TIME, &loss);
1289 
1290 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1291 	    strftime(timestr, 128, "", &t) != 0) {
1292 		if (dryrun) {
1293 			(void) printf(dgettext(TEXT_DOMAIN,
1294 			    "Would be able to return %s "
1295 			    "to its state as of %s.\n"),
1296 			    name, timestr);
1297 		} else {
1298 			(void) printf(dgettext(TEXT_DOMAIN,
1299 			    "Pool %s returned to its state as of %s.\n"),
1300 			    name, timestr);
1301 		}
1302 		if (loss > 120) {
1303 			(void) printf(dgettext(TEXT_DOMAIN,
1304 			    "%s approximately %jd "),
1305 			    dryrun ? "Would discard" : "Discarded",
1306 			    ((uintmax_t)(loss + 30) / 60));
1307 			(void) printf(dgettext(TEXT_DOMAIN,
1308 			    "minutes of transactions.\n"));
1309 		} else if (loss > 0) {
1310 			(void) printf(dgettext(TEXT_DOMAIN,
1311 			    "%s approximately %jd "),
1312 			    dryrun ? "Would discard" : "Discarded",
1313 			    (uintmax_t)loss);
1314 			(void) printf(dgettext(TEXT_DOMAIN,
1315 			    "seconds of transactions.\n"));
1316 		}
1317 	}
1318 }
1319 
1320 void
1321 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1322     nvlist_t *config)
1323 {
1324 	int64_t loss = -1;
1325 	uint64_t edata = UINT64_MAX;
1326 	uint64_t rewindto;
1327 	struct tm t;
1328 	char timestr[128];
1329 
1330 	if (!hdl->libzfs_printerr)
1331 		return;
1332 
1333 	if (reason >= 0)
1334 		(void) printf(dgettext(TEXT_DOMAIN, "action: "));
1335 	else
1336 		(void) printf(dgettext(TEXT_DOMAIN, "\t"));
1337 
1338 	/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1339 	if (nvlist_lookup_uint64(config,
1340 	    ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1341 		goto no_info;
1342 
1343 	(void) nvlist_lookup_int64(config, ZPOOL_CONFIG_REWIND_TIME, &loss);
1344 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1345 	    &edata);
1346 
1347 	(void) printf(dgettext(TEXT_DOMAIN,
1348 	    "Recovery is possible, but will result in some data loss.\n"));
1349 
1350 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1351 	    strftime(timestr, 128, "", &t) != 0) {
1352 		(void) printf(dgettext(TEXT_DOMAIN,
1353 		    "\tReturning the pool to its state as of %s\n"
1354 		    "\tshould correct the problem.  "),
1355 		    timestr);
1356 	} else {
1357 		(void) printf(dgettext(TEXT_DOMAIN,
1358 		    "\tReverting the pool to an earlier state "
1359 		    "should correct the problem.\n\t"));
1360 	}
1361 
1362 	if (loss > 120) {
1363 		(void) printf(dgettext(TEXT_DOMAIN,
1364 		    "Approximately %jd minutes of data\n"
1365 		    "\tmust be discarded, irreversibly.  "),
1366 		    (uintmax_t)((loss + 30) / 60));
1367 	} else if (loss > 0) {
1368 		(void) printf(dgettext(TEXT_DOMAIN,
1369 		    "Approximately %jd seconds of data\n"
1370 		    "\tmust be discarded, irreversibly.  "), (uintmax_t)loss);
1371 	}
1372 	if (edata != 0 && edata != UINT64_MAX) {
1373 		if (edata == 1) {
1374 			(void) printf(dgettext(TEXT_DOMAIN,
1375 			    "After rewind, at least\n"
1376 			    "\tone persistent user-data error will remain.  "));
1377 		} else {
1378 			(void) printf(dgettext(TEXT_DOMAIN,
1379 			    "After rewind, several\n"
1380 			    "\tpersistent user-data errors will remain.  "));
1381 		}
1382 	}
1383 	(void) printf(dgettext(TEXT_DOMAIN,
1384 	    "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1385 	    reason >= 0 ? "clear" : "import", name);
1386 
1387 	(void) printf(dgettext(TEXT_DOMAIN,
1388 	    "A scrub of the pool\n"
1389 	    "\tis strongly recommended after recovery.\n"));
1390 	return;
1391 
1392 no_info:
1393 	(void) printf(dgettext(TEXT_DOMAIN,
1394 	    "Destroy and re-create the pool from\n\ta backup source.\n"));
1395 }
1396 
1397 /*
1398  * zpool_import() is a contracted interface. Should be kept the same
1399  * if possible.
1400  *
1401  * Applications should use zpool_import_props() to import a pool with
1402  * new properties value to be set.
1403  */
1404 int
1405 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1406     char *altroot)
1407 {
1408 	nvlist_t *props = NULL;
1409 	int ret;
1410 
1411 	if (altroot != NULL) {
1412 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1413 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1414 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1415 			    newname));
1416 		}
1417 
1418 		if (nvlist_add_string(props,
1419 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1420 		    nvlist_add_string(props,
1421 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1422 			nvlist_free(props);
1423 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1424 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1425 			    newname));
1426 		}
1427 	}
1428 
1429 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1430 	if (props)
1431 		nvlist_free(props);
1432 	return (ret);
1433 }
1434 
1435 /*
1436  * Import the given pool using the known configuration and a list of
1437  * properties to be set. The configuration should have come from
1438  * zpool_find_import(). The 'newname' parameters control whether the pool
1439  * is imported with a different name.
1440  */
1441 int
1442 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1443     nvlist_t *props, boolean_t importfaulted)
1444 {
1445 	zfs_cmd_t zc = { 0 };
1446 	zpool_rewind_policy_t policy;
1447 	nvlist_t *nvi = NULL;
1448 	char *thename;
1449 	char *origname;
1450 	uint64_t returned_size;
1451 	int ret;
1452 	char errbuf[1024];
1453 
1454 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1455 	    &origname) == 0);
1456 
1457 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1458 	    "cannot import pool '%s'"), origname);
1459 
1460 	if (newname != NULL) {
1461 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1462 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1463 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1464 			    newname));
1465 		thename = __UNCONST(newname);
1466 	} else {
1467 		thename = origname;
1468 	}
1469 
1470 	if (props) {
1471 		uint64_t version;
1472 
1473 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1474 		    &version) == 0);
1475 
1476 		if ((props = zpool_valid_proplist(hdl, origname,
1477 		    props, version, B_TRUE, errbuf)) == NULL) {
1478 			return (-1);
1479 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1480 			nvlist_free(props);
1481 			return (-1);
1482 		}
1483 	}
1484 
1485 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1486 
1487 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1488 	    &zc.zc_guid) == 0);
1489 
1490 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1491 		nvlist_free(props);
1492 		return (-1);
1493 	}
1494 	returned_size =  zc.zc_nvlist_conf_size + 512;
1495 	if (zcmd_alloc_dst_nvlist(hdl, &zc, returned_size) != 0) {
1496 		nvlist_free(props);
1497 		return (-1);
1498 	}
1499 
1500 	zc.zc_cookie = (uint64_t)importfaulted;
1501 	ret = 0;
1502 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1503 		char desc[1024];
1504 
1505 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1506 		zpool_get_rewind_policy(config, &policy);
1507 		/*
1508 		 * Dry-run failed, but we print out what success
1509 		 * looks like if we found a best txg
1510 		 */
1511 		if ((policy.zrp_request & ZPOOL_TRY_REWIND) && nvi) {
1512 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1513 			    B_TRUE, nvi);
1514 			nvlist_free(nvi);
1515 			return (-1);
1516 		}
1517 
1518 		if (newname == NULL)
1519 			(void) snprintf(desc, sizeof (desc),
1520 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1521 			    thename);
1522 		else
1523 			(void) snprintf(desc, sizeof (desc),
1524 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1525 			    origname, thename);
1526 
1527 		switch (errno) {
1528 		case ENOTSUP:
1529 			/*
1530 			 * Unsupported version.
1531 			 */
1532 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1533 			break;
1534 
1535 		case EINVAL:
1536 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1537 			break;
1538 
1539 		default:
1540 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1541 			(void) zpool_standard_error(hdl, errno, desc);
1542 			zpool_explain_recover(hdl,
1543 			    newname ? origname : thename, -errno, nvi);
1544 			nvlist_free(nvi);
1545 			break;
1546 		}
1547 
1548 		ret = -1;
1549 	} else {
1550 		zpool_handle_t *zhp;
1551 
1552 		/*
1553 		 * This should never fail, but play it safe anyway.
1554 		 */
1555 		if (zpool_open_silent(hdl, thename, &zhp) != 0)
1556 			ret = -1;
1557 		else if (zhp != NULL)
1558 			zpool_close(zhp);
1559 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1560 		zpool_get_rewind_policy(config, &policy);
1561 		if (policy.zrp_request &
1562 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1563 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1564 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
1565 			    nvi);
1566 		}
1567 		nvlist_free(nvi);
1568 		return (0);
1569 	}
1570 
1571 	zcmd_free_nvlists(&zc);
1572 	nvlist_free(props);
1573 
1574 	return (ret);
1575 }
1576 
1577 /*
1578  * Scrub the pool.
1579  */
1580 int
1581 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1582 {
1583 	zfs_cmd_t zc = { 0 };
1584 	char msg[1024];
1585 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1586 
1587 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1588 	zc.zc_cookie = type;
1589 
1590 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1591 		return (0);
1592 
1593 	(void) snprintf(msg, sizeof (msg),
1594 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1595 
1596 	if (errno == EBUSY)
1597 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1598 	else
1599 		return (zpool_standard_error(hdl, errno, msg));
1600 }
1601 
1602 /*
1603  * Find a vdev that matches the search criteria specified. We use the
1604  * the nvpair name to determine how we should look for the device.
1605  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1606  * spare; but FALSE if its an INUSE spare.
1607  */
1608 static nvlist_t *
1609 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1610     boolean_t *l2cache, boolean_t *log)
1611 {
1612 	uint_t c, children;
1613 	nvlist_t **child;
1614 	nvlist_t *ret;
1615 	uint64_t is_log;
1616 	char *srchkey;
1617 	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1618 
1619 	/* Nothing to look for */
1620 	if (search == NULL || pair == NULL)
1621 		return (NULL);
1622 
1623 	/* Obtain the key we will use to search */
1624 	srchkey = nvpair_name(pair);
1625 
1626 	switch (nvpair_type(pair)) {
1627 	case DATA_TYPE_UINT64: {
1628 		uint64_t srchval, theguid, present;
1629 
1630 		verify(nvpair_value_uint64(pair, &srchval) == 0);
1631 		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1632 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1633 			    &present) == 0) {
1634 				/*
1635 				 * If the device has never been present since
1636 				 * import, the only reliable way to match the
1637 				 * vdev is by GUID.
1638 				 */
1639 				verify(nvlist_lookup_uint64(nv,
1640 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
1641 				if (theguid == srchval)
1642 					return (nv);
1643 			}
1644 		}
1645 		break;
1646 	}
1647 
1648 	case DATA_TYPE_STRING: {
1649 		char *srchval, *val;
1650 
1651 		verify(nvpair_value_string(pair, &srchval) == 0);
1652 		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1653 			break;
1654 
1655 		/*
1656 		 * Search for the requested value. We special case the search
1657 		 * for ZPOOL_CONFIG_PATH when it's a wholedisk and when
1658 		 * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1659 		 * Otherwise, all other searches are simple string compares.
1660 		 */
1661 		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
1662 			uint64_t wholedisk = 0;
1663 
1664 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1665 			    &wholedisk);
1666 			if (wholedisk) {
1667 				/*
1668 				 * For whole disks, the internal path has 's0',
1669 				 * but the path passed in by the user doesn't.
1670 				 */
1671 				if (strlen(srchval) == strlen(val) - 2 &&
1672 				    strncmp(srchval, val, strlen(srchval)) == 0)
1673 					return (nv);
1674 				break;
1675 			}
1676 		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1677 			char *type, *idx, *end, *p;
1678 			uint64_t id, vdev_id;
1679 
1680 			/*
1681 			 * Determine our vdev type, keeping in mind
1682 			 * that the srchval is composed of a type and
1683 			 * vdev id pair (i.e. mirror-4).
1684 			 */
1685 			if ((type = strdup(srchval)) == NULL)
1686 				return (NULL);
1687 
1688 			if ((p = strrchr(type, '-')) == NULL) {
1689 				free(type);
1690 				break;
1691 			}
1692 			idx = p + 1;
1693 			*p = '\0';
1694 
1695 			/*
1696 			 * If the types don't match then keep looking.
1697 			 */
1698 			if (strncmp(val, type, strlen(val)) != 0) {
1699 				free(type);
1700 				break;
1701 			}
1702 
1703 			verify(strncmp(type, VDEV_TYPE_RAIDZ,
1704 			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1705 			    strncmp(type, VDEV_TYPE_MIRROR,
1706 			    strlen(VDEV_TYPE_MIRROR)) == 0);
1707 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1708 			    &id) == 0);
1709 
1710 			errno = 0;
1711 			vdev_id = strtoull(idx, &end, 10);
1712 
1713 			free(type);
1714 			if (errno != 0)
1715 				return (NULL);
1716 
1717 			/*
1718 			 * Now verify that we have the correct vdev id.
1719 			 */
1720 			if (vdev_id == id)
1721 				return (nv);
1722 		}
1723 
1724 		/*
1725 		 * Common case
1726 		 */
1727 		if (strcmp(srchval, val) == 0)
1728 			return (nv);
1729 		break;
1730 	}
1731 
1732 	default:
1733 		break;
1734 	}
1735 
1736 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1737 	    &child, &children) != 0)
1738 		return (NULL);
1739 
1740 	for (c = 0; c < children; c++) {
1741 		if ((ret = vdev_to_nvlist_iter(child[c], search,
1742 		    avail_spare, l2cache, NULL)) != NULL) {
1743 			/*
1744 			 * The 'is_log' value is only set for the toplevel
1745 			 * vdev, not the leaf vdevs.  So we always lookup the
1746 			 * log device from the root of the vdev tree (where
1747 			 * 'log' is non-NULL).
1748 			 */
1749 			if (log != NULL &&
1750 			    nvlist_lookup_uint64(child[c],
1751 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1752 			    is_log) {
1753 				*log = B_TRUE;
1754 			}
1755 			return (ret);
1756 		}
1757 	}
1758 
1759 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1760 	    &child, &children) == 0) {
1761 		for (c = 0; c < children; c++) {
1762 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1763 			    avail_spare, l2cache, NULL)) != NULL) {
1764 				*avail_spare = B_TRUE;
1765 				return (ret);
1766 			}
1767 		}
1768 	}
1769 
1770 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1771 	    &child, &children) == 0) {
1772 		for (c = 0; c < children; c++) {
1773 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1774 			    avail_spare, l2cache, NULL)) != NULL) {
1775 				*l2cache = B_TRUE;
1776 				return (ret);
1777 			}
1778 		}
1779 	}
1780 
1781 	return (NULL);
1782 }
1783 
1784 /*
1785  * Given a physical path (minus the "/devices" prefix), find the
1786  * associated vdev.
1787  */
1788 nvlist_t *
1789 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1790     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1791 {
1792 	nvlist_t *search, *nvroot, *ret;
1793 
1794 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1795 	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1796 
1797 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1798 	    &nvroot) == 0);
1799 
1800 	*avail_spare = B_FALSE;
1801 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1802 	nvlist_free(search);
1803 
1804 	return (ret);
1805 }
1806 
1807 /*
1808  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1809  */
1810 static boolean_t
1811 zpool_vdev_is_interior(const char *name)
1812 {
1813 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1814 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1815 		return (B_TRUE);
1816 	return (B_FALSE);
1817 }
1818 
1819 nvlist_t *
1820 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1821     boolean_t *l2cache, boolean_t *log)
1822 {
1823 	char buf[MAXPATHLEN];
1824 	char *end;
1825 	nvlist_t *nvroot, *search, *ret;
1826 	uint64_t guid;
1827 
1828 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1829 
1830 	guid = strtoull(path, &end, 10);
1831 	if (guid != 0 && *end == '\0') {
1832 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1833 	} else if (zpool_vdev_is_interior(path)) {
1834 		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1835 	} else if (path[0] != '/') {
1836 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1837 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1838 	} else {
1839 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1840 	}
1841 
1842 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1843 	    &nvroot) == 0);
1844 
1845 	*avail_spare = B_FALSE;
1846 	*l2cache = B_FALSE;
1847 	if (log != NULL)
1848 		*log = B_FALSE;
1849 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1850 	nvlist_free(search);
1851 
1852 	return (ret);
1853 }
1854 
1855 static int
1856 vdev_online(nvlist_t *nv)
1857 {
1858 	uint64_t ival;
1859 
1860 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1861 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1862 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1863 		return (0);
1864 
1865 	return (1);
1866 }
1867 
1868 /*
1869  * Helper function for zpool_get_physpaths().
1870  */
1871 static int
1872 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1873     size_t *bytes_written)
1874 {
1875 	size_t bytes_left, pos, rsz;
1876 	char *tmppath;
1877 	const char *format;
1878 
1879 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1880 	    &tmppath) != 0)
1881 		return (EZFS_NODEVICE);
1882 
1883 	pos = *bytes_written;
1884 	bytes_left = physpath_size - pos;
1885 	format = (pos == 0) ? "%s" : " %s";
1886 
1887 	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1888 	*bytes_written += rsz;
1889 
1890 	if (rsz >= bytes_left) {
1891 		/* if physpath was not copied properly, clear it */
1892 		if (bytes_left != 0) {
1893 			physpath[pos] = 0;
1894 		}
1895 		return (EZFS_NOSPC);
1896 	}
1897 	return (0);
1898 }
1899 
1900 static int
1901 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1902     size_t *rsz, boolean_t is_spare)
1903 {
1904 	char *type;
1905 	int ret;
1906 
1907 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1908 		return (EZFS_INVALCONFIG);
1909 
1910 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1911 		/*
1912 		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1913 		 * For a spare vdev, we only want to boot from the active
1914 		 * spare device.
1915 		 */
1916 		if (is_spare) {
1917 			uint64_t spare = 0;
1918 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1919 			    &spare);
1920 			if (!spare)
1921 				return (EZFS_INVALCONFIG);
1922 		}
1923 
1924 		if (vdev_online(nv)) {
1925 			if ((ret = vdev_get_one_physpath(nv, physpath,
1926 			    phypath_size, rsz)) != 0)
1927 				return (ret);
1928 		}
1929 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1930 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1931 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1932 		nvlist_t **child;
1933 		uint_t count;
1934 		int i, rv;
1935 
1936 		if (nvlist_lookup_nvlist_array(nv,
1937 		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1938 			return (EZFS_INVALCONFIG);
1939 
1940 		for (i = 0; i < count; i++) {
1941 			rv = vdev_get_physpaths(child[i], physpath,
1942 			    phypath_size, rsz, is_spare);
1943 			if (rv == EZFS_NOSPC)
1944 				return (rv);
1945 		}
1946 	}
1947 
1948 	return (EZFS_POOL_INVALARG);
1949 }
1950 
1951 /*
1952  * Get phys_path for a root pool config.
1953  * Return 0 on success; non-zero on failure.
1954  */
1955 static int
1956 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1957 {
1958 	size_t rsz;
1959 	nvlist_t *vdev_root;
1960 	nvlist_t **child;
1961 	uint_t count;
1962 	char *type;
1963 
1964 	rsz = 0;
1965 
1966 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1967 	    &vdev_root) != 0)
1968 		return (EZFS_INVALCONFIG);
1969 
1970 	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1971 	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1972 	    &child, &count) != 0)
1973 		return (EZFS_INVALCONFIG);
1974 
1975 	/*
1976 	 * root pool can not have EFI labeled disks and can only have
1977 	 * a single top-level vdev.
1978 	 */
1979 	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
1980 	    pool_uses_efi(vdev_root))
1981 		return (EZFS_POOL_INVALARG);
1982 
1983 	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
1984 	    B_FALSE);
1985 
1986 	/* No online devices */
1987 	if (rsz == 0)
1988 		return (EZFS_NODEVICE);
1989 
1990 	return (0);
1991 }
1992 
1993 /*
1994  * Get phys_path for a root pool
1995  * Return 0 on success; non-zero on failure.
1996  */
1997 int
1998 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
1999 {
2000 	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2001 	    phypath_size));
2002 }
2003 
2004 /*
2005  * If the device has being dynamically expanded then we need to relabel
2006  * the disk to use the new unallocated space.
2007  */
2008 static int
2009 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2010 {
2011 	char path[MAXPATHLEN];
2012 	char errbuf[1024];
2013 	int fd, error;
2014 	int (*_efi_use_whole_disk)(int);
2015 
2016 	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2017 	    "efi_use_whole_disk")) == NULL)
2018 		return (-1);
2019 
2020 	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2021 
2022 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2023 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2024 		    "relabel '%s': unable to open device"), name);
2025 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2026 	}
2027 
2028 	/*
2029 	 * It's possible that we might encounter an error if the device
2030 	 * does not have any unallocated space left. If so, we simply
2031 	 * ignore that error and continue on.
2032 	 */
2033 	error = _efi_use_whole_disk(fd);
2034 	(void) close(fd);
2035 	if (error && error != VT_ENOSPC) {
2036 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2037 		    "relabel '%s': unable to read disk capacity"), name);
2038 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2039 	}
2040 	return (0);
2041 }
2042 
2043 /*
2044  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2045  * ZFS_ONLINE_* flags.
2046  */
2047 int
2048 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2049     vdev_state_t *newstate)
2050 {
2051 	zfs_cmd_t zc = { 0 };
2052 	char msg[1024];
2053 	nvlist_t *tgt;
2054 	boolean_t avail_spare, l2cache, islog;
2055 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2056 
2057 	if (flags & ZFS_ONLINE_EXPAND) {
2058 		(void) snprintf(msg, sizeof (msg),
2059 		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2060 	} else {
2061 		(void) snprintf(msg, sizeof (msg),
2062 		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2063 	}
2064 
2065 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2066 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2067 	    &islog)) == NULL)
2068 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2069 
2070 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2071 
2072 	if (avail_spare)
2073 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2074 
2075 	if (flags & ZFS_ONLINE_EXPAND ||
2076 	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2077 		char *pathname = NULL;
2078 		uint64_t wholedisk = 0;
2079 
2080 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2081 		    &wholedisk);
2082 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2083 		    &pathname) == 0);
2084 
2085 		/*
2086 		 * XXX - L2ARC 1.0 devices can't support expansion.
2087 		 */
2088 		if (l2cache) {
2089 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2090 			    "cannot expand cache devices"));
2091 			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2092 		}
2093 
2094 		if (wholedisk) {
2095 			pathname += strlen(DISK_ROOT) + 1;
2096 			(void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
2097 		}
2098 	}
2099 
2100 	zc.zc_cookie = VDEV_STATE_ONLINE;
2101 	zc.zc_obj = flags;
2102 
2103 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2104 		if (errno == EINVAL) {
2105 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2106 			    "from this pool into a new one.  Use '%s' "
2107 			    "instead"), "zpool detach");
2108 			return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2109 		}
2110 		return (zpool_standard_error(hdl, errno, msg));
2111 	}
2112 
2113 	*newstate = zc.zc_cookie;
2114 	return (0);
2115 }
2116 
2117 /*
2118  * Take the specified vdev offline
2119  */
2120 int
2121 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2122 {
2123 	zfs_cmd_t zc = { 0 };
2124 	char msg[1024];
2125 	nvlist_t *tgt;
2126 	boolean_t avail_spare, l2cache;
2127 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2128 
2129 	(void) snprintf(msg, sizeof (msg),
2130 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2131 
2132 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2133 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2134 	    NULL)) == NULL)
2135 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2136 
2137 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2138 
2139 	if (avail_spare)
2140 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2141 
2142 	zc.zc_cookie = VDEV_STATE_OFFLINE;
2143 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2144 
2145 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2146 		return (0);
2147 
2148 	switch (errno) {
2149 	case EBUSY:
2150 
2151 		/*
2152 		 * There are no other replicas of this device.
2153 		 */
2154 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2155 
2156 	case EEXIST:
2157 		/*
2158 		 * The log device has unplayed logs
2159 		 */
2160 		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2161 
2162 	default:
2163 		return (zpool_standard_error(hdl, errno, msg));
2164 	}
2165 }
2166 
2167 /*
2168  * Mark the given vdev faulted.
2169  */
2170 int
2171 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2172 {
2173 	zfs_cmd_t zc = { 0 };
2174 	char msg[1024];
2175 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2176 
2177 	(void) snprintf(msg, sizeof (msg),
2178 	    dgettext(TEXT_DOMAIN, "cannot fault %ju"), (uintmax_t)guid);
2179 
2180 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2181 	zc.zc_guid = guid;
2182 	zc.zc_cookie = VDEV_STATE_FAULTED;
2183 	zc.zc_obj = aux;
2184 
2185 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2186 		return (0);
2187 
2188 	switch (errno) {
2189 	case EBUSY:
2190 
2191 		/*
2192 		 * There are no other replicas of this device.
2193 		 */
2194 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2195 
2196 	default:
2197 		return (zpool_standard_error(hdl, errno, msg));
2198 	}
2199 
2200 }
2201 
2202 /*
2203  * Mark the given vdev degraded.
2204  */
2205 int
2206 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2207 {
2208 	zfs_cmd_t zc = { 0 };
2209 	char msg[1024];
2210 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2211 
2212 	(void) snprintf(msg, sizeof (msg),
2213 	    dgettext(TEXT_DOMAIN, "cannot degrade %ju"), (uintmax_t)guid);
2214 
2215 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2216 	zc.zc_guid = guid;
2217 	zc.zc_cookie = VDEV_STATE_DEGRADED;
2218 	zc.zc_obj = aux;
2219 
2220 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2221 		return (0);
2222 
2223 	return (zpool_standard_error(hdl, errno, msg));
2224 }
2225 
2226 /*
2227  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2228  * a hot spare.
2229  */
2230 static boolean_t
2231 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2232 {
2233 	nvlist_t **child;
2234 	uint_t c, children;
2235 	char *type;
2236 
2237 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2238 	    &children) == 0) {
2239 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2240 		    &type) == 0);
2241 
2242 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2243 		    children == 2 && child[which] == tgt)
2244 			return (B_TRUE);
2245 
2246 		for (c = 0; c < children; c++)
2247 			if (is_replacing_spare(child[c], tgt, which))
2248 				return (B_TRUE);
2249 	}
2250 
2251 	return (B_FALSE);
2252 }
2253 
2254 /*
2255  * Attach new_disk (fully described by nvroot) to old_disk.
2256  * If 'replacing' is specified, the new disk will replace the old one.
2257  */
2258 int
2259 zpool_vdev_attach(zpool_handle_t *zhp,
2260     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2261 {
2262 	zfs_cmd_t zc = { 0 };
2263 	char msg[1024];
2264 	int ret;
2265 	nvlist_t *tgt;
2266 	boolean_t avail_spare, l2cache, islog;
2267 	uint64_t val;
2268 	char *path, *newname;
2269 	nvlist_t **child;
2270 	uint_t children;
2271 	nvlist_t *config_root;
2272 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2273 	boolean_t rootpool = pool_is_bootable(zhp);
2274 
2275 	if (replacing)
2276 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2277 		    "cannot replace %s with %s"), old_disk, new_disk);
2278 	else
2279 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2280 		    "cannot attach %s to %s"), new_disk, old_disk);
2281 
2282 	/*
2283 	 * If this is a root pool, make sure that we're not attaching an
2284 	 * EFI labeled device.
2285 	 */
2286 	if (rootpool && pool_uses_efi(nvroot)) {
2287 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2288 		    "EFI labeled devices are not supported on root pools."));
2289 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2290 	}
2291 
2292 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2293 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2294 	    &islog)) == 0)
2295 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2296 
2297 	if (avail_spare)
2298 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2299 
2300 	if (l2cache)
2301 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2302 
2303 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2304 	zc.zc_cookie = replacing;
2305 
2306 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2307 	    &child, &children) != 0 || children != 1) {
2308 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2309 		    "new device must be a single disk"));
2310 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2311 	}
2312 
2313 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2314 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2315 
2316 	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2317 		return (-1);
2318 
2319 	/*
2320 	 * If the target is a hot spare that has been swapped in, we can only
2321 	 * replace it with another hot spare.
2322 	 */
2323 	if (replacing &&
2324 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2325 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2326 	    NULL) == NULL || !avail_spare) &&
2327 	    is_replacing_spare(config_root, tgt, 1)) {
2328 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2329 		    "can only be replaced by another hot spare"));
2330 		free(newname);
2331 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2332 	}
2333 
2334 	/*
2335 	 * If we are attempting to replace a spare, it canot be applied to an
2336 	 * already spared device.
2337 	 */
2338 	if (replacing &&
2339 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2340 	    zpool_find_vdev(zhp, newname, &avail_spare,
2341 	    &l2cache, NULL) != NULL && avail_spare &&
2342 	    is_replacing_spare(config_root, tgt, 0)) {
2343 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2344 		    "device has already been replaced with a spare"));
2345 		free(newname);
2346 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2347 	}
2348 
2349 	free(newname);
2350 
2351 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2352 		return (-1);
2353 
2354 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2355 
2356 	zcmd_free_nvlists(&zc);
2357 
2358 	if (ret == 0) {
2359 		if (rootpool) {
2360 			/*
2361 			 * XXX - This should be removed once we can
2362 			 * automatically install the bootblocks on the
2363 			 * newly attached disk.
2364 			 */
2365 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2366 			    "be sure to invoke %s to make '%s' bootable.\n"),
2367 			    BOOTCMD, new_disk);
2368 
2369 			/*
2370 			 * XXX need a better way to prevent user from
2371 			 * booting up a half-baked vdev.
2372 			 */
2373 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2374 			    "sure to wait until resilver is done "
2375 			    "before rebooting.\n"));
2376 		}
2377 		return (0);
2378 	}
2379 
2380 	switch (errno) {
2381 	case ENOTSUP:
2382 		/*
2383 		 * Can't attach to or replace this type of vdev.
2384 		 */
2385 		if (replacing) {
2386 			if (islog)
2387 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2388 				    "cannot replace a log with a spare"));
2389 			else
2390 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2391 				    "cannot replace a replacing device"));
2392 		} else {
2393 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2394 			    "can only attach to mirrors and top-level "
2395 			    "disks"));
2396 		}
2397 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2398 		break;
2399 
2400 	case EINVAL:
2401 		/*
2402 		 * The new device must be a single disk.
2403 		 */
2404 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2405 		    "new device must be a single disk"));
2406 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2407 		break;
2408 
2409 	case EBUSY:
2410 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2411 		    new_disk);
2412 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2413 		break;
2414 
2415 	case EOVERFLOW:
2416 		/*
2417 		 * The new device is too small.
2418 		 */
2419 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2420 		    "device is too small"));
2421 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2422 		break;
2423 
2424 	case EDOM:
2425 		/*
2426 		 * The new device has a different alignment requirement.
2427 		 */
2428 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2429 		    "devices have different sector alignment"));
2430 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2431 		break;
2432 
2433 	case ENAMETOOLONG:
2434 		/*
2435 		 * The resulting top-level vdev spec won't fit in the label.
2436 		 */
2437 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2438 		break;
2439 
2440 	default:
2441 		(void) zpool_standard_error(hdl, errno, msg);
2442 	}
2443 
2444 	return (-1);
2445 }
2446 
2447 /*
2448  * Detach the specified device.
2449  */
2450 int
2451 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2452 {
2453 	zfs_cmd_t zc = { 0 };
2454 	char msg[1024];
2455 	nvlist_t *tgt;
2456 	boolean_t avail_spare, l2cache;
2457 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2458 
2459 	(void) snprintf(msg, sizeof (msg),
2460 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2461 
2462 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2463 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2464 	    NULL)) == 0)
2465 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2466 
2467 	if (avail_spare)
2468 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2469 
2470 	if (l2cache)
2471 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2472 
2473 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2474 
2475 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2476 		return (0);
2477 
2478 	switch (errno) {
2479 
2480 	case ENOTSUP:
2481 		/*
2482 		 * Can't detach from this type of vdev.
2483 		 */
2484 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2485 		    "applicable to mirror and replacing vdevs"));
2486 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2487 		break;
2488 
2489 	case EBUSY:
2490 		/*
2491 		 * There are no other replicas of this device.
2492 		 */
2493 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2494 		break;
2495 
2496 	default:
2497 		(void) zpool_standard_error(hdl, errno, msg);
2498 	}
2499 
2500 	return (-1);
2501 }
2502 
2503 /*
2504  * Find a mirror vdev in the source nvlist.
2505  *
2506  * The mchild array contains a list of disks in one of the top-level mirrors
2507  * of the source pool.  The schild array contains a list of disks that the
2508  * user specified on the command line.  We loop over the mchild array to
2509  * see if any entry in the schild array matches.
2510  *
2511  * If a disk in the mchild array is found in the schild array, we return
2512  * the index of that entry.  Otherwise we return -1.
2513  */
2514 static int
2515 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2516     nvlist_t **schild, uint_t schildren)
2517 {
2518 	uint_t mc;
2519 
2520 	for (mc = 0; mc < mchildren; mc++) {
2521 		uint_t sc;
2522 		char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2523 		    mchild[mc], B_FALSE);
2524 
2525 		for (sc = 0; sc < schildren; sc++) {
2526 			char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2527 			    schild[sc], B_FALSE);
2528 			boolean_t result = (strcmp(mpath, spath) == 0);
2529 
2530 			free(spath);
2531 			if (result) {
2532 				free(mpath);
2533 				return (mc);
2534 			}
2535 		}
2536 
2537 		free(mpath);
2538 	}
2539 
2540 	return (-1);
2541 }
2542 
2543 /*
2544  * Split a mirror pool.  If newroot points to null, then a new nvlist
2545  * is generated and it is the responsibility of the caller to free it.
2546  */
2547 int
2548 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2549     nvlist_t *props, splitflags_t flags)
2550 {
2551 	zfs_cmd_t zc = { 0 };
2552 	char msg[1024];
2553 	nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2554 	nvlist_t **varray = NULL, *zc_props = NULL;
2555 	uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2556 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2557 	uint64_t vers;
2558 	boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2559 	int retval = 0;
2560 
2561 	(void) snprintf(msg, sizeof (msg),
2562 	    dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2563 
2564 	if (!zpool_name_valid(hdl, B_FALSE, newname))
2565 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2566 
2567 	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2568 		(void) fprintf(stderr, gettext("Internal error: unable to "
2569 		    "retrieve pool configuration\n"));
2570 		return (-1);
2571 	}
2572 
2573 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2574 	    == 0);
2575 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2576 
2577 	if (props) {
2578 		if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2579 		    props, vers, B_TRUE, msg)) == NULL)
2580 			return (-1);
2581 	}
2582 
2583 	if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2584 	    &children) != 0) {
2585 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2586 		    "Source pool is missing vdev tree"));
2587 		if (zc_props)
2588 			nvlist_free(zc_props);
2589 		return (-1);
2590 	}
2591 
2592 	varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2593 	vcount = 0;
2594 
2595 	if (*newroot == NULL ||
2596 	    nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2597 	    &newchild, &newchildren) != 0)
2598 		newchildren = 0;
2599 
2600 	for (c = 0; c < children; c++) {
2601 		uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2602 		char *type;
2603 		nvlist_t **mchild, *vdev;
2604 		uint_t mchildren;
2605 		int entry;
2606 
2607 		/*
2608 		 * Unlike cache & spares, slogs are stored in the
2609 		 * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
2610 		 */
2611 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2612 		    &is_log);
2613 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2614 		    &is_hole);
2615 		if (is_log || is_hole) {
2616 			/*
2617 			 * Create a hole vdev and put it in the config.
2618 			 */
2619 			if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2620 				goto out;
2621 			if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2622 			    VDEV_TYPE_HOLE) != 0)
2623 				goto out;
2624 			if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2625 			    1) != 0)
2626 				goto out;
2627 			if (lastlog == 0)
2628 				lastlog = vcount;
2629 			varray[vcount++] = vdev;
2630 			continue;
2631 		}
2632 		lastlog = 0;
2633 		verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2634 		    == 0);
2635 		if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2636 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2637 			    "Source pool must be composed only of mirrors\n"));
2638 			retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2639 			goto out;
2640 		}
2641 
2642 		verify(nvlist_lookup_nvlist_array(child[c],
2643 		    ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2644 
2645 		/* find or add an entry for this top-level vdev */
2646 		if (newchildren > 0 &&
2647 		    (entry = find_vdev_entry(zhp, mchild, mchildren,
2648 		    newchild, newchildren)) >= 0) {
2649 			/* We found a disk that the user specified. */
2650 			vdev = mchild[entry];
2651 			++found;
2652 		} else {
2653 			/* User didn't specify a disk for this vdev. */
2654 			vdev = mchild[mchildren - 1];
2655 		}
2656 
2657 		if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2658 			goto out;
2659 	}
2660 
2661 	/* did we find every disk the user specified? */
2662 	if (found != newchildren) {
2663 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2664 		    "include at most one disk from each mirror"));
2665 		retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2666 		goto out;
2667 	}
2668 
2669 	/* Prepare the nvlist for populating. */
2670 	if (*newroot == NULL) {
2671 		if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2672 			goto out;
2673 		freelist = B_TRUE;
2674 		if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2675 		    VDEV_TYPE_ROOT) != 0)
2676 			goto out;
2677 	} else {
2678 		verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2679 	}
2680 
2681 	/* Add all the children we found */
2682 	if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2683 	    lastlog == 0 ? vcount : lastlog) != 0)
2684 		goto out;
2685 
2686 	/*
2687 	 * If we're just doing a dry run, exit now with success.
2688 	 */
2689 	if (flags.dryrun) {
2690 		memory_err = B_FALSE;
2691 		freelist = B_FALSE;
2692 		goto out;
2693 	}
2694 
2695 	/* now build up the config list & call the ioctl */
2696 	if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2697 		goto out;
2698 
2699 	if (nvlist_add_nvlist(newconfig,
2700 	    ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2701 	    nvlist_add_string(newconfig,
2702 	    ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2703 	    nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2704 		goto out;
2705 
2706 	/*
2707 	 * The new pool is automatically part of the namespace unless we
2708 	 * explicitly export it.
2709 	 */
2710 	if (!flags.import)
2711 		zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2712 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2713 	(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2714 	if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2715 		goto out;
2716 	if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2717 		goto out;
2718 
2719 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2720 		retval = zpool_standard_error(hdl, errno, msg);
2721 		goto out;
2722 	}
2723 
2724 	freelist = B_FALSE;
2725 	memory_err = B_FALSE;
2726 
2727 out:
2728 	if (varray != NULL) {
2729 		int v;
2730 
2731 		for (v = 0; v < vcount; v++)
2732 			nvlist_free(varray[v]);
2733 		free(varray);
2734 	}
2735 	zcmd_free_nvlists(&zc);
2736 	if (zc_props)
2737 		nvlist_free(zc_props);
2738 	if (newconfig)
2739 		nvlist_free(newconfig);
2740 	if (freelist) {
2741 		nvlist_free(*newroot);
2742 		*newroot = NULL;
2743 	}
2744 
2745 	if (retval != 0)
2746 		return (retval);
2747 
2748 	if (memory_err)
2749 		return (no_memory(hdl));
2750 
2751 	return (0);
2752 }
2753 
2754 /*
2755  * Remove the given device.  Currently, this is supported only for hot spares
2756  * and level 2 cache devices.
2757  */
2758 int
2759 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2760 {
2761 	zfs_cmd_t zc = { 0 };
2762 	char msg[1024];
2763 	nvlist_t *tgt;
2764 	boolean_t avail_spare, l2cache, islog;
2765 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2766 	uint64_t version;
2767 
2768 	(void) snprintf(msg, sizeof (msg),
2769 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2770 
2771 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2772 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2773 	    &islog)) == 0)
2774 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2775 	/*
2776 	 * XXX - this should just go away.
2777 	 */
2778 	if (!avail_spare && !l2cache && !islog) {
2779 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2780 		    "only inactive hot spares, cache, top-level, "
2781 		    "or log devices can be removed"));
2782 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2783 	}
2784 
2785 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2786 	if (islog && version < SPA_VERSION_HOLES) {
2787 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2788 		    "pool must be upgrade to support log removal"));
2789 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
2790 	}
2791 
2792 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2793 
2794 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2795 		return (0);
2796 
2797 	return (zpool_standard_error(hdl, errno, msg));
2798 }
2799 
2800 /*
2801  * Clear the errors for the pool, or the particular device if specified.
2802  */
2803 int
2804 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2805 {
2806 	zfs_cmd_t zc = { 0 };
2807 	char msg[1024];
2808 	nvlist_t *tgt;
2809 	zpool_rewind_policy_t policy;
2810 	boolean_t avail_spare, l2cache;
2811 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2812 	nvlist_t *nvi = NULL;
2813 
2814 	if (path)
2815 		(void) snprintf(msg, sizeof (msg),
2816 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2817 		    path);
2818 	else
2819 		(void) snprintf(msg, sizeof (msg),
2820 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2821 		    zhp->zpool_name);
2822 
2823 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2824 	if (path) {
2825 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2826 		    &l2cache, NULL)) == 0)
2827 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2828 
2829 		/*
2830 		 * Don't allow error clearing for hot spares.  Do allow
2831 		 * error clearing for l2cache devices.
2832 		 */
2833 		if (avail_spare)
2834 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2835 
2836 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2837 		    &zc.zc_guid) == 0);
2838 	}
2839 
2840 	zpool_get_rewind_policy(rewindnvl, &policy);
2841 	zc.zc_cookie = policy.zrp_request;
2842 
2843 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 8192) != 0)
2844 		return (-1);
2845 
2846 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, rewindnvl) != 0)
2847 		return (-1);
2848 
2849 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0 ||
2850 	    ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2851 	    errno != EPERM && errno != EACCES)) {
2852 		if (policy.zrp_request &
2853 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2854 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2855 			zpool_rewind_exclaim(hdl, zc.zc_name,
2856 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2857 			    nvi);
2858 			nvlist_free(nvi);
2859 		}
2860 		zcmd_free_nvlists(&zc);
2861 		return (0);
2862 	}
2863 
2864 	zcmd_free_nvlists(&zc);
2865 	return (zpool_standard_error(hdl, errno, msg));
2866 }
2867 
2868 /*
2869  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2870  */
2871 int
2872 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2873 {
2874 	zfs_cmd_t zc = { 0 };
2875 	char msg[1024];
2876 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2877 
2878 	(void) snprintf(msg, sizeof (msg),
2879 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %x"),
2880 	    (uintmax_t)guid);
2881 
2882 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2883 	zc.zc_guid = guid;
2884 
2885 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2886 		return (0);
2887 
2888 	return (zpool_standard_error(hdl, errno, msg));
2889 }
2890 
2891 /*
2892  * Convert from a devid string to a path.
2893  */
2894 static char *
2895 devid_to_path(char *devid_str)
2896 {
2897 	ddi_devid_t devid;
2898 	char *minor;
2899 	char *path;
2900 	devid_nmlist_t *list = NULL;
2901 	int ret;
2902 
2903 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2904 		return (NULL);
2905 
2906 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2907 
2908 	devid_str_free(minor);
2909 	devid_free(devid);
2910 
2911 	if (ret != 0)
2912 		return (NULL);
2913 
2914 	if ((path = strdup(list[0].devname)) == NULL)
2915 		return (NULL);
2916 
2917 	devid_free_nmlist(list);
2918 
2919 	return (path);
2920 }
2921 
2922 /*
2923  * Convert from a path to a devid string.
2924  */
2925 static char *
2926 path_to_devid(const char *path)
2927 {
2928 	int fd;
2929 	ddi_devid_t devid;
2930 	char *minor, *ret;
2931 
2932 	if ((fd = open(path, O_RDONLY)) < 0)
2933 		return (NULL);
2934 
2935 	minor = NULL;
2936 	ret = NULL;
2937 	if (devid_get(fd, &devid) == 0) {
2938 		if (devid_get_minor_name(fd, &minor) == 0)
2939 			ret = devid_str_encode(devid, minor);
2940 		if (minor != NULL)
2941 			devid_str_free(minor);
2942 		devid_free(devid);
2943 	}
2944 	(void) close(fd);
2945 
2946 	return (ret);
2947 }
2948 
2949 /*
2950  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2951  * ignore any failure here, since a common case is for an unprivileged user to
2952  * type 'zpool status', and we'll display the correct information anyway.
2953  */
2954 static void
2955 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2956 {
2957 	zfs_cmd_t zc = { 0 };
2958 
2959 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2960 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2961 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2962 	    &zc.zc_guid) == 0);
2963 
2964 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2965 }
2966 
2967 /*
2968  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2969  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2970  * We also check if this is a whole disk, in which case we strip off the
2971  * trailing 's0' slice name.
2972  *
2973  * This routine is also responsible for identifying when disks have been
2974  * reconfigured in a new location.  The kernel will have opened the device by
2975  * devid, but the path will still refer to the old location.  To catch this, we
2976  * first do a path -> devid translation (which is fast for the common case).  If
2977  * the devid matches, we're done.  If not, we do a reverse devid -> path
2978  * translation and issue the appropriate ioctl() to update the path of the vdev.
2979  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2980  * of these checks.
2981  */
2982 char *
2983 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
2984     boolean_t verbose)
2985 {
2986 	char *path, *devid;
2987 	uint64_t value;
2988 	char buf[64];
2989 	vdev_stat_t *vs;
2990 	uint_t vsc;
2991 
2992 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2993 	    &value) == 0) {
2994 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2995 		    &value) == 0);
2996 		(void) snprintf(buf, sizeof (buf), "%ju",
2997 		    (uintmax_t)value);
2998 		path = buf;
2999 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3000 
3001 		/*
3002 		 * If the device is dead (faulted, offline, etc) then don't
3003 		 * bother opening it.  Otherwise we may be forcing the user to
3004 		 * open a misbehaving device, which can have undesirable
3005 		 * effects.
3006 		 */
3007 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
3008 		    (uint64_t **)&vs, &vsc) != 0 ||
3009 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
3010 		    zhp != NULL &&
3011 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3012 			/*
3013 			 * Determine if the current path is correct.
3014 			 */
3015 			char *newdevid = path_to_devid(path);
3016 
3017 			if (newdevid == NULL ||
3018 			    strcmp(devid, newdevid) != 0) {
3019 				char *newpath;
3020 
3021 				if ((newpath = devid_to_path(devid)) != NULL) {
3022 					/*
3023 					 * Update the path appropriately.
3024 					 */
3025 					set_path(zhp, nv, newpath);
3026 					if (nvlist_add_string(nv,
3027 					    ZPOOL_CONFIG_PATH, newpath) == 0)
3028 						verify(nvlist_lookup_string(nv,
3029 						    ZPOOL_CONFIG_PATH,
3030 						    &path) == 0);
3031 					free(newpath);
3032 				}
3033 			}
3034 
3035 			if (newdevid)
3036 				devid_str_free(newdevid);
3037 		}
3038 
3039 		if (strncmp(path, "/dev/dsk/", 9) == 0)
3040 			path += 9;
3041 
3042 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3043 		    &value) == 0 && value) {
3044 			char *tmp = zfs_strdup(hdl, path);
3045 			if (tmp == NULL)
3046 				return (NULL);
3047 			tmp[strlen(path) - 2] = '\0';
3048 			return (tmp);
3049 		}
3050 	} else {
3051 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3052 
3053 		/*
3054 		 * If it's a raidz device, we need to stick in the parity level.
3055 		 */
3056 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3057 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3058 			    &value) == 0);
3059 			(void) snprintf(buf, sizeof (buf), "%s%ju", path,
3060 			    (uintmax_t)value);
3061 			path = buf;
3062 		}
3063 
3064 		/*
3065 		 * We identify each top-level vdev by using a <type-id>
3066 		 * naming convention.
3067 		 */
3068 		if (verbose) {
3069 			uint64_t id;
3070 
3071 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3072 			    &id) == 0);
3073 			(void) snprintf(buf, sizeof (buf), "%s-%ju", path,
3074 			    (uintmax_t)id);
3075 			path = buf;
3076 		}
3077 	}
3078 
3079 	return (zfs_strdup(hdl, path));
3080 }
3081 
3082 static int
3083 zbookmark_compare(const void *a, const void *b)
3084 {
3085 	return (memcmp(a, b, sizeof (zbookmark_t)));
3086 }
3087 
3088 /*
3089  * Retrieve the persistent error log, uniquify the members, and return to the
3090  * caller.
3091  */
3092 int
3093 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3094 {
3095 	zfs_cmd_t zc = { 0 };
3096 	uint64_t count;
3097 	zbookmark_t *zb = NULL;
3098 	int i;
3099 
3100 	/*
3101 	 * Retrieve the raw error list from the kernel.  If the number of errors
3102 	 * has increased, allocate more space and continue until we get the
3103 	 * entire list.
3104 	 */
3105 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3106 	    &count) == 0);
3107 	if (count == 0)
3108 		return (0);
3109 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3110 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3111 		return (-1);
3112 	zc.zc_nvlist_dst_size = count;
3113 	(void) strcpy(zc.zc_name, zhp->zpool_name);
3114 	for (;;) {
3115 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3116 		    &zc) != 0) {
3117 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
3118 			if (errno == ENOMEM) {
3119 				count = zc.zc_nvlist_dst_size;
3120 				if ((zc.zc_nvlist_dst = (uintptr_t)
3121 				    zfs_alloc(zhp->zpool_hdl, count *
3122 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
3123 					return (-1);
3124 			} else {
3125 				return (-1);
3126 			}
3127 		} else {
3128 			break;
3129 		}
3130 	}
3131 
3132 	/*
3133 	 * Sort the resulting bookmarks.  This is a little confusing due to the
3134 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
3135 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3136 	 * _not_ copied as part of the process.  So we point the start of our
3137 	 * array appropriate and decrement the total number of elements.
3138 	 */
3139 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3140 	    zc.zc_nvlist_dst_size;
3141 	count -= zc.zc_nvlist_dst_size;
3142 
3143 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3144 
3145 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3146 
3147 	/*
3148 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3149 	 */
3150 	for (i = 0; i < count; i++) {
3151 		nvlist_t *nv;
3152 
3153 		/* ignoring zb_blkid and zb_level for now */
3154 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3155 		    zb[i-1].zb_object == zb[i].zb_object)
3156 			continue;
3157 
3158 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3159 			goto nomem;
3160 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3161 		    zb[i].zb_objset) != 0) {
3162 			nvlist_free(nv);
3163 			goto nomem;
3164 		}
3165 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3166 		    zb[i].zb_object) != 0) {
3167 			nvlist_free(nv);
3168 			goto nomem;
3169 		}
3170 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3171 			nvlist_free(nv);
3172 			goto nomem;
3173 		}
3174 		nvlist_free(nv);
3175 	}
3176 
3177 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3178 	return (0);
3179 
3180 nomem:
3181 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3182 	return (no_memory(zhp->zpool_hdl));
3183 }
3184 
3185 /*
3186  * Upgrade a ZFS pool to the latest on-disk version.
3187  */
3188 int
3189 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3190 {
3191 	zfs_cmd_t zc = { 0 };
3192 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3193 
3194 	(void) strcpy(zc.zc_name, zhp->zpool_name);
3195 	zc.zc_cookie = new_version;
3196 
3197 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3198 		return (zpool_standard_error_fmt(hdl, errno,
3199 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3200 		    zhp->zpool_name));
3201 	return (0);
3202 }
3203 
3204 void
3205 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3206     char *history_str)
3207 {
3208 	int i;
3209 
3210 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3211 	for (i = 1; i < argc; i++) {
3212 		if (strlen(history_str) + 1 + strlen(argv[i]) >
3213 		    HIS_MAX_RECORD_LEN)
3214 			break;
3215 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3216 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3217 	}
3218 }
3219 
3220 /*
3221  * Stage command history for logging.
3222  */
3223 int
3224 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3225 {
3226 	if (history_str == NULL)
3227 		return (EINVAL);
3228 
3229 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3230 		return (EINVAL);
3231 
3232 	if (hdl->libzfs_log_str != NULL)
3233 		free(hdl->libzfs_log_str);
3234 
3235 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3236 		return (no_memory(hdl));
3237 
3238 	return (0);
3239 }
3240 
3241 /*
3242  * Perform ioctl to get some command history of a pool.
3243  *
3244  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
3245  * logical offset of the history buffer to start reading from.
3246  *
3247  * Upon return, 'off' is the next logical offset to read from and
3248  * 'len' is the actual amount of bytes read into 'buf'.
3249  */
3250 static int
3251 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3252 {
3253 	zfs_cmd_t zc = { 0 };
3254 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3255 
3256 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3257 
3258 	zc.zc_history = (uint64_t)(uintptr_t)buf;
3259 	zc.zc_history_len = *len;
3260 	zc.zc_history_offset = *off;
3261 
3262 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3263 		switch (errno) {
3264 		case EPERM:
3265 			return (zfs_error_fmt(hdl, EZFS_PERM,
3266 			    dgettext(TEXT_DOMAIN,
3267 			    "cannot show history for pool '%s'"),
3268 			    zhp->zpool_name));
3269 		case ENOENT:
3270 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3271 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3272 			    "'%s'"), zhp->zpool_name));
3273 		case ENOTSUP:
3274 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3275 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3276 			    "'%s', pool must be upgraded"), zhp->zpool_name));
3277 		default:
3278 			return (zpool_standard_error_fmt(hdl, errno,
3279 			    dgettext(TEXT_DOMAIN,
3280 			    "cannot get history for '%s'"), zhp->zpool_name));
3281 		}
3282 	}
3283 
3284 	*len = zc.zc_history_len;
3285 	*off = zc.zc_history_offset;
3286 
3287 	return (0);
3288 }
3289 
3290 /*
3291  * Process the buffer of nvlists, unpacking and storing each nvlist record
3292  * into 'records'.  'leftover' is set to the number of bytes that weren't
3293  * processed as there wasn't a complete record.
3294  */
3295 int
3296 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3297     nvlist_t ***records, uint_t *numrecords)
3298 {
3299 	uint64_t reclen;
3300 	nvlist_t *nv;
3301 	int i;
3302 
3303 	while (bytes_read > sizeof (reclen)) {
3304 
3305 		/* get length of packed record (stored as little endian) */
3306 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3307 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3308 
3309 		if (bytes_read < sizeof (reclen) + reclen)
3310 			break;
3311 
3312 		/* unpack record */
3313 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3314 			return (ENOMEM);
3315 		bytes_read -= sizeof (reclen) + reclen;
3316 		buf += sizeof (reclen) + reclen;
3317 
3318 		/* add record to nvlist array */
3319 		(*numrecords)++;
3320 		if (ISP2(*numrecords + 1)) {
3321 			*records = realloc(*records,
3322 			    *numrecords * 2 * sizeof (nvlist_t *));
3323 		}
3324 		(*records)[*numrecords - 1] = nv;
3325 	}
3326 
3327 	*leftover = bytes_read;
3328 	return (0);
3329 }
3330 
3331 #define	HIS_BUF_LEN	(128*1024)
3332 
3333 /*
3334  * Retrieve the command history of a pool.
3335  */
3336 int
3337 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3338 {
3339 	char buf[HIS_BUF_LEN];
3340 	uint64_t off = 0;
3341 	nvlist_t **records = NULL;
3342 	uint_t numrecords = 0;
3343 	int err, i;
3344 
3345 	do {
3346 		uint64_t bytes_read = sizeof (buf);
3347 		uint64_t leftover;
3348 
3349 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3350 			break;
3351 
3352 		/* if nothing else was read in, we're at EOF, just return */
3353 		if (!bytes_read)
3354 			break;
3355 
3356 		if ((err = zpool_history_unpack(buf, bytes_read,
3357 		    &leftover, &records, &numrecords)) != 0)
3358 			break;
3359 		off -= leftover;
3360 
3361 		/* CONSTCOND */
3362 	} while (1);
3363 
3364 	if (!err) {
3365 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3366 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3367 		    records, numrecords) == 0);
3368 	}
3369 	for (i = 0; i < numrecords; i++)
3370 		nvlist_free(records[i]);
3371 	free(records);
3372 
3373 	return (err);
3374 }
3375 
3376 void
3377 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3378     char *pathname, size_t len)
3379 {
3380 	zfs_cmd_t zc = { 0 };
3381 	boolean_t mounted = B_FALSE;
3382 	char *mntpnt = NULL;
3383 	char dsname[MAXNAMELEN];
3384 
3385 	if (dsobj == 0) {
3386 		/* special case for the MOS */
3387 		(void) snprintf(pathname, len, "<metadata>:<%#jx>",
3388 		    (uintmax_t)obj);
3389 		return;
3390 	}
3391 
3392 	/* get the dataset's name */
3393 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3394 	zc.zc_obj = dsobj;
3395 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
3396 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3397 		/* just write out a path of two object numbers */
3398 		(void) snprintf(pathname, len, "<%#jx>:<%#jx>",
3399 		    (uintmax_t)dsobj, (uintmax_t)obj);
3400 		return;
3401 	}
3402 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3403 
3404 	/* find out if the dataset is mounted */
3405 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3406 
3407 	/* get the corrupted object's path */
3408 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3409 	zc.zc_obj = obj;
3410 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3411 	    &zc) == 0) {
3412 		if (mounted) {
3413 			(void) snprintf(pathname, len, "%s%s", mntpnt,
3414 			    zc.zc_value);
3415 		} else {
3416 			(void) snprintf(pathname, len, "%s:%s",
3417 			    dsname, zc.zc_value);
3418 		}
3419 	} else {
3420 		(void) snprintf(pathname, len, "%s:<%#jx>", dsname,
3421 		    (uintmax_t)obj);
3422 	}
3423 	free(mntpnt);
3424 }
3425 
3426 /*
3427  * Read the EFI label from the config, if a label does not exist then
3428  * pass back the error to the caller. If the caller has passed a non-NULL
3429  * diskaddr argument then we set it to the starting address of the EFI
3430  * partition.
3431  */
3432 static int
3433 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3434 {
3435 	char *path;
3436 	int fd;
3437 	char diskname[MAXPATHLEN];
3438 	int err = -1;
3439 
3440 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3441 		return (err);
3442 
3443 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3444 	    strrchr(path, '/'));
3445 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3446 		struct dk_gpt *vtoc;
3447 
3448 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3449 			if (sb != NULL)
3450 				*sb = vtoc->efi_parts[0].p_start;
3451 			efi_free(vtoc);
3452 		}
3453 		(void) close(fd);
3454 	}
3455 	return (err);
3456 }
3457 
3458 /*
3459  * determine where a partition starts on a disk in the current
3460  * configuration
3461  */
3462 static diskaddr_t
3463 find_start_block(nvlist_t *config)
3464 {
3465 	nvlist_t **child;
3466 	uint_t c, children;
3467 	diskaddr_t sb = MAXOFFSET_T;
3468 	uint64_t wholedisk;
3469 
3470 	if (nvlist_lookup_nvlist_array(config,
3471 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3472 		if (nvlist_lookup_uint64(config,
3473 		    ZPOOL_CONFIG_WHOLE_DISK,
3474 		    &wholedisk) != 0 || !wholedisk) {
3475 			return (MAXOFFSET_T);
3476 		}
3477 		if (read_efi_label(config, &sb) < 0)
3478 			sb = MAXOFFSET_T;
3479 		return (sb);
3480 	}
3481 
3482 	for (c = 0; c < children; c++) {
3483 		sb = find_start_block(child[c]);
3484 		if (sb != MAXOFFSET_T) {
3485 			return (sb);
3486 		}
3487 	}
3488 	return (MAXOFFSET_T);
3489 }
3490 
3491 /*
3492  * Label an individual disk.  The name provided is the short name,
3493  * stripped of any leading /dev path.
3494  */
3495 int
3496 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3497 {
3498 	char path[MAXPATHLEN];
3499 	struct dk_gpt *vtoc;
3500 	int fd;
3501 	size_t resv = EFI_MIN_RESV_SIZE;
3502 	uint64_t slice_size;
3503 	diskaddr_t start_block;
3504 	char errbuf[1024];
3505 
3506 	/* prepare an error message just in case */
3507 	(void) snprintf(errbuf, sizeof (errbuf),
3508 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3509 
3510 	if (zhp) {
3511 		nvlist_t *nvroot;
3512 
3513 		if (pool_is_bootable(zhp)) {
3514 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3515 			    "EFI labeled devices are not supported on root "
3516 			    "pools."));
3517 			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3518 		}
3519 
3520 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
3521 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3522 
3523 		if (zhp->zpool_start_block == 0)
3524 			start_block = find_start_block(nvroot);
3525 		else
3526 			start_block = zhp->zpool_start_block;
3527 		zhp->zpool_start_block = start_block;
3528 	} else {
3529 		/* new pool */
3530 		start_block = NEW_START_BLOCK;
3531 	}
3532 
3533 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3534 	    BACKUP_SLICE);
3535 
3536 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3537 		/*
3538 		 * This shouldn't happen.  We've long since verified that this
3539 		 * is a valid device.
3540 		 */
3541 		zfs_error_aux(hdl,
3542 		    dgettext(TEXT_DOMAIN, "unable to open device"));
3543 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3544 	}
3545 
3546 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3547 		/*
3548 		 * The only way this can fail is if we run out of memory, or we
3549 		 * were unable to read the disk's capacity
3550 		 */
3551 		if (errno == ENOMEM)
3552 			(void) no_memory(hdl);
3553 
3554 		(void) close(fd);
3555 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3556 		    "unable to read disk capacity"), name);
3557 
3558 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3559 	}
3560 
3561 	slice_size = vtoc->efi_last_u_lba + 1;
3562 	slice_size -= EFI_MIN_RESV_SIZE;
3563 	if (start_block == MAXOFFSET_T)
3564 		start_block = NEW_START_BLOCK;
3565 	slice_size -= start_block;
3566 
3567 	vtoc->efi_parts[0].p_start = start_block;
3568 	vtoc->efi_parts[0].p_size = slice_size;
3569 
3570 	/*
3571 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
3572 	 * disposable by some EFI utilities (since EFI doesn't have a backup
3573 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
3574 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3575 	 * etc. were all pretty specific.  V_USR is as close to reality as we
3576 	 * can get, in the absence of V_OTHER.
3577 	 */
3578 	vtoc->efi_parts[0].p_tag = V_USR;
3579 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3580 
3581 	vtoc->efi_parts[8].p_start = slice_size + start_block;
3582 	vtoc->efi_parts[8].p_size = resv;
3583 	vtoc->efi_parts[8].p_tag = V_RESERVED;
3584 
3585 	if (efi_write(fd, vtoc) != 0) {
3586 		/*
3587 		 * Some block drivers (like pcata) may not support EFI
3588 		 * GPT labels.  Print out a helpful error message dir-
3589 		 * ecting the user to manually label the disk and give
3590 		 * a specific slice.
3591 		 */
3592 		(void) close(fd);
3593 		efi_free(vtoc);
3594 
3595 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3596 		    "try using fdisk(1M) and then provide a specific slice"));
3597 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3598 	}
3599 
3600 	(void) close(fd);
3601 	efi_free(vtoc);
3602 	return (0);
3603 }
3604 
3605 static boolean_t
3606 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3607 {
3608 	char *type;
3609 	nvlist_t **child;
3610 	uint_t children, c;
3611 
3612 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3613 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3614 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
3615 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
3616 	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3617 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
3618 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3619 		    "vdev type '%s' is not supported"), type);
3620 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3621 		return (B_FALSE);
3622 	}
3623 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3624 	    &child, &children) == 0) {
3625 		for (c = 0; c < children; c++) {
3626 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3627 				return (B_FALSE);
3628 		}
3629 	}
3630 	return (B_TRUE);
3631 }
3632 
3633 /*
3634  * check if this zvol is allowable for use as a dump device; zero if
3635  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3636  */
3637 int
3638 zvol_check_dump_config(char *arg)
3639 {
3640 	zpool_handle_t *zhp = NULL;
3641 	nvlist_t *config, *nvroot;
3642 	char *p, *volname;
3643 	nvlist_t **top;
3644 	uint_t toplevels;
3645 	libzfs_handle_t *hdl;
3646 	char errbuf[1024];
3647 	char poolname[ZPOOL_MAXNAMELEN];
3648 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3649 	int ret = 1;
3650 
3651 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3652 		return (-1);
3653 	}
3654 
3655 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3656 	    "dump is not supported on device '%s'"), arg);
3657 
3658 	if ((hdl = libzfs_init()) == NULL)
3659 		return (1);
3660 	libzfs_print_on_error(hdl, B_TRUE);
3661 
3662 	volname = arg + pathlen;
3663 
3664 	/* check the configuration of the pool */
3665 	if ((p = strchr(volname, '/')) == NULL) {
3666 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3667 		    "malformed dataset name"));
3668 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3669 		return (1);
3670 	} else if (p - volname >= ZFS_MAXNAMELEN) {
3671 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3672 		    "dataset name is too long"));
3673 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3674 		return (1);
3675 	} else {
3676 		(void) strncpy(poolname, volname, p - volname);
3677 		poolname[p - volname] = '\0';
3678 	}
3679 
3680 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3681 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3682 		    "could not open pool '%s'"), poolname);
3683 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3684 		goto out;
3685 	}
3686 	config = zpool_get_config(zhp, NULL);
3687 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3688 	    &nvroot) != 0) {
3689 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3690 		    "could not obtain vdev configuration for  '%s'"), poolname);
3691 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3692 		goto out;
3693 	}
3694 
3695 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3696 	    &top, &toplevels) == 0);
3697 	if (toplevels != 1) {
3698 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3699 		    "'%s' has multiple top level vdevs"), poolname);
3700 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3701 		goto out;
3702 	}
3703 
3704 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3705 		goto out;
3706 	}
3707 	ret = 0;
3708 
3709 out:
3710 	if (zhp)
3711 		zpool_close(zhp);
3712 	libzfs_fini(hdl);
3713 	return (ret);
3714 }
3715