xref: /netbsd-src/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_mirror.c (revision ba2539a9805a0544ff82c0003cc02fe1eee5603d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
28  */
29 
30 #include <sys/zfs_context.h>
31 #include <sys/spa.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/zio.h>
34 #include <sys/fs/zfs.h>
35 
36 /*
37  * Virtual device vector for mirroring.
38  */
39 
40 typedef struct mirror_child {
41 	vdev_t		*mc_vd;
42 	uint64_t	mc_offset;
43 	int		mc_error;
44 	int		mc_load;
45 	uint8_t		mc_tried;
46 	uint8_t		mc_skipped;
47 	uint8_t		mc_speculative;
48 } mirror_child_t;
49 
50 typedef struct mirror_map {
51 	int		*mm_preferred;
52 	int		mm_preferred_cnt;
53 	int		mm_children;
54 	boolean_t	mm_replacing;
55 	boolean_t	mm_root;
56 	mirror_child_t	mm_child[];
57 } mirror_map_t;
58 
59 static int vdev_mirror_shift = 21;
60 
61 #ifdef __FreeBSD__
62 #ifdef _KERNEL
63 SYSCTL_DECL(_vfs_zfs_vdev);
64 static SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
65     "ZFS VDEV Mirror");
66 #endif
67 #endif
68 
69 /*
70  * The load configuration settings below are tuned by default for
71  * the case where all devices are of the same rotational type.
72  *
73  * If there is a mixture of rotating and non-rotating media, setting
74  * non_rotating_seek_inc to 0 may well provide better results as it
75  * will direct more reads to the non-rotating vdevs which are more
76  * likely to have a higher performance.
77  */
78 
79 /* Rotating media load calculation configuration. */
80 static int rotating_inc = 0;
81 #ifdef _KERNEL
82 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RWTUN,
83     &rotating_inc, 0, "Rotating media load increment for non-seeking I/O's");
84 #endif
85 
86 static int rotating_seek_inc = 5;
87 #ifdef _KERNEL
88 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RWTUN,
89     &rotating_seek_inc, 0, "Rotating media load increment for seeking I/O's");
90 #endif
91 
92 static int rotating_seek_offset = 1 * 1024 * 1024;
93 #ifdef _KERNEL
94 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RWTUN,
95     &rotating_seek_offset, 0, "Offset in bytes from the last I/O which "
96     "triggers a reduced rotating media seek increment");
97 #endif
98 
99 /* Non-rotating media load calculation configuration. */
100 static int non_rotating_inc = 0;
101 #ifdef _KERNEL
102 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RWTUN,
103     &non_rotating_inc, 0,
104     "Non-rotating media load increment for non-seeking I/O's");
105 #endif
106 
107 static int non_rotating_seek_inc = 1;
108 #ifdef _KERNEL
109 SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RWTUN,
110     &non_rotating_seek_inc, 0,
111     "Non-rotating media load increment for seeking I/O's");
112 #endif
113 
114 
115 static inline size_t
vdev_mirror_map_size(int children)116 vdev_mirror_map_size(int children)
117 {
118 	return (offsetof(mirror_map_t, mm_child[children]) +
119 	    sizeof(int) * children);
120 }
121 
122 static inline mirror_map_t *
vdev_mirror_map_alloc(int children,boolean_t replacing,boolean_t root)123 vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
124 {
125 	mirror_map_t *mm;
126 
127 	mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
128 	mm->mm_children = children;
129 	mm->mm_replacing = replacing;
130 	mm->mm_root = root;
131 	mm->mm_preferred = (int *)((uintptr_t)mm +
132 	    offsetof(mirror_map_t, mm_child[children]));
133 
134 	return mm;
135 }
136 
137 static void
vdev_mirror_map_free(zio_t * zio)138 vdev_mirror_map_free(zio_t *zio)
139 {
140 	mirror_map_t *mm = zio->io_vsd;
141 
142 	kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
143 }
144 
145 static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
146 	vdev_mirror_map_free,
147 	zio_vsd_default_cksum_report
148 };
149 
150 static int
vdev_mirror_load(mirror_map_t * mm,vdev_t * vd,uint64_t zio_offset)151 vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
152 {
153 	uint64_t lastoffset;
154 	int load;
155 
156 	/* All DVAs have equal weight at the root. */
157 	if (mm->mm_root)
158 		return (INT_MAX);
159 
160 	/*
161 	 * We don't return INT_MAX if the device is resilvering i.e.
162 	 * vdev_resilver_txg != 0 as when tested performance was slightly
163 	 * worse overall when resilvering with compared to without.
164 	 */
165 
166 	/* Standard load based on pending queue length. */
167 	load = vdev_queue_length(vd);
168 	lastoffset = vdev_queue_lastoffset(vd);
169 
170 	if (vd->vdev_rotation_rate == VDEV_RATE_NON_ROTATING) {
171 		/* Non-rotating media. */
172 		if (lastoffset == zio_offset)
173 			return (load + non_rotating_inc);
174 
175 		/*
176 		 * Apply a seek penalty even for non-rotating devices as
177 		 * sequential I/O'a can be aggregated into fewer operations
178 		 * on the device, thus avoiding unnecessary per-command
179 		 * overhead and boosting performance.
180 		 */
181 		return (load + non_rotating_seek_inc);
182 	}
183 
184 	/* Rotating media I/O's which directly follow the last I/O. */
185 	if (lastoffset == zio_offset)
186 		return (load + rotating_inc);
187 
188 	/*
189 	 * Apply half the seek increment to I/O's within seek offset
190 	 * of the last I/O queued to this vdev as they should incure less
191 	 * of a seek increment.
192 	 */
193 	if (ABS(lastoffset - zio_offset) < rotating_seek_offset)
194 		return (load + (rotating_seek_inc / 2));
195 
196 	/* Apply the full seek increment to all other I/O's. */
197 	return (load + rotating_seek_inc);
198 }
199 
200 
201 static mirror_map_t *
vdev_mirror_map_init(zio_t * zio)202 vdev_mirror_map_init(zio_t *zio)
203 {
204 	mirror_map_t *mm = NULL;
205 	mirror_child_t *mc;
206 	vdev_t *vd = zio->io_vd;
207 	int c;
208 
209 	if (vd == NULL) {
210 		dva_t *dva = zio->io_bp->blk_dva;
211 		spa_t *spa = zio->io_spa;
212 
213 		mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
214 		    B_TRUE);
215 		for (c = 0; c < mm->mm_children; c++) {
216 			mc = &mm->mm_child[c];
217 			mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
218 			mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
219 		}
220 	} else {
221 		mm = vdev_mirror_map_alloc(vd->vdev_children,
222 		    (vd->vdev_ops == &vdev_replacing_ops ||
223                     vd->vdev_ops == &vdev_spare_ops), B_FALSE);
224 		for (c = 0; c < mm->mm_children; c++) {
225 			mc = &mm->mm_child[c];
226 			mc->mc_vd = vd->vdev_child[c];
227 			mc->mc_offset = zio->io_offset;
228 		}
229 	}
230 
231 	zio->io_vsd = mm;
232 	zio->io_vsd_ops = &vdev_mirror_vsd_ops;
233 	return (mm);
234 }
235 
236 static int
vdev_mirror_open(vdev_t * vd,uint64_t * asize,uint64_t * max_asize,uint64_t * logical_ashift,uint64_t * physical_ashift)237 vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
238     uint64_t *logical_ashift, uint64_t *physical_ashift)
239 {
240 	int numerrors = 0;
241 	int lasterror = 0;
242 
243 	if (vd->vdev_children == 0) {
244 		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
245 		return (SET_ERROR(EINVAL));
246 	}
247 
248 	vdev_open_children(vd);
249 
250 	for (int c = 0; c < vd->vdev_children; c++) {
251 		vdev_t *cvd = vd->vdev_child[c];
252 
253 		if (cvd->vdev_open_error) {
254 			lasterror = cvd->vdev_open_error;
255 			numerrors++;
256 			continue;
257 		}
258 
259 		*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
260 		*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
261 		*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
262 		*physical_ashift = MAX(*physical_ashift,
263 		    cvd->vdev_physical_ashift);
264 	}
265 
266 	if (numerrors == vd->vdev_children) {
267 		vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
268 		return (lasterror);
269 	}
270 
271 	return (0);
272 }
273 
274 static void
vdev_mirror_close(vdev_t * vd)275 vdev_mirror_close(vdev_t *vd)
276 {
277 	for (int c = 0; c < vd->vdev_children; c++)
278 		vdev_close(vd->vdev_child[c]);
279 }
280 
281 static void
vdev_mirror_child_done(zio_t * zio)282 vdev_mirror_child_done(zio_t *zio)
283 {
284 	mirror_child_t *mc = zio->io_private;
285 
286 	mc->mc_error = zio->io_error;
287 	mc->mc_tried = 1;
288 	mc->mc_skipped = 0;
289 }
290 
291 static void
vdev_mirror_scrub_done(zio_t * zio)292 vdev_mirror_scrub_done(zio_t *zio)
293 {
294 	mirror_child_t *mc = zio->io_private;
295 
296 	if (zio->io_error == 0) {
297 		zio_t *pio;
298 		zio_link_t *zl = NULL;
299 
300 		mutex_enter(&zio->io_lock);
301 		while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
302 			mutex_enter(&pio->io_lock);
303 			ASSERT3U(zio->io_size, >=, pio->io_size);
304 			bcopy(zio->io_data, pio->io_data, pio->io_size);
305 			mutex_exit(&pio->io_lock);
306 		}
307 		mutex_exit(&zio->io_lock);
308 	}
309 
310 	zio_buf_free(zio->io_data, zio->io_size);
311 
312 	mc->mc_error = zio->io_error;
313 	mc->mc_tried = 1;
314 	mc->mc_skipped = 0;
315 }
316 
317 /*
318  * Check the other, lower-index DVAs to see if they're on the same
319  * vdev as the child we picked.  If they are, use them since they
320  * are likely to have been allocated from the primary metaslab in
321  * use at the time, and hence are more likely to have locality with
322  * single-copy data.
323  */
324 static int
vdev_mirror_dva_select(zio_t * zio,int p)325 vdev_mirror_dva_select(zio_t *zio, int p)
326 {
327 	dva_t *dva = zio->io_bp->blk_dva;
328 	mirror_map_t *mm = zio->io_vsd;
329 	int preferred;
330 	int c;
331 
332 	preferred = mm->mm_preferred[p];
333 	for (p-- ; p >= 0; p--) {
334 		c = mm->mm_preferred[p];
335 		if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
336 			preferred = c;
337 	}
338 	return (preferred);
339 }
340 
341 static int
vdev_mirror_preferred_child_randomize(zio_t * zio)342 vdev_mirror_preferred_child_randomize(zio_t *zio)
343 {
344 	mirror_map_t *mm = zio->io_vsd;
345 	int p;
346 
347 	if (mm->mm_root) {
348 		p = spa_get_random(mm->mm_preferred_cnt);
349 		return (vdev_mirror_dva_select(zio, p));
350 	}
351 
352 	/*
353 	 * To ensure we don't always favour the first matching vdev,
354 	 * which could lead to wear leveling issues on SSD's, we
355 	 * use the I/O offset as a pseudo random seed into the vdevs
356 	 * which have the lowest load.
357 	 */
358 	p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
359 	return (mm->mm_preferred[p]);
360 }
361 
362 /*
363  * Try to find a vdev whose DTL doesn't contain the block we want to read
364  * prefering vdevs based on determined load.
365  *
366  * If we can't, try the read on any vdev we haven't already tried.
367  */
368 static int
vdev_mirror_child_select(zio_t * zio)369 vdev_mirror_child_select(zio_t *zio)
370 {
371 	mirror_map_t *mm = zio->io_vsd;
372 	uint64_t txg = zio->io_txg;
373 	int c, lowest_load;
374 
375 	ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
376 
377 	lowest_load = INT_MAX;
378 	mm->mm_preferred_cnt = 0;
379 	for (c = 0; c < mm->mm_children; c++) {
380 		mirror_child_t *mc;
381 
382 		mc = &mm->mm_child[c];
383 		if (mc->mc_tried || mc->mc_skipped)
384 			continue;
385 
386 		if (!vdev_readable(mc->mc_vd)) {
387 			mc->mc_error = SET_ERROR(ENXIO);
388 			mc->mc_tried = 1;	/* don't even try */
389 			mc->mc_skipped = 1;
390 			continue;
391 		}
392 
393 		if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
394 			mc->mc_error = SET_ERROR(ESTALE);
395 			mc->mc_skipped = 1;
396 			mc->mc_speculative = 1;
397 			continue;
398 		}
399 
400 		mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
401 		if (mc->mc_load > lowest_load)
402 			continue;
403 
404 		if (mc->mc_load < lowest_load) {
405 			lowest_load = mc->mc_load;
406 			mm->mm_preferred_cnt = 0;
407 		}
408 		mm->mm_preferred[mm->mm_preferred_cnt] = c;
409 		mm->mm_preferred_cnt++;
410 	}
411 
412 	if (mm->mm_preferred_cnt == 1) {
413 		vdev_queue_register_lastoffset(
414 		    mm->mm_child[mm->mm_preferred[0]].mc_vd, zio);
415 		return (mm->mm_preferred[0]);
416 	}
417 
418 	if (mm->mm_preferred_cnt > 1) {
419 		int c = vdev_mirror_preferred_child_randomize(zio);
420 
421 		vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd, zio);
422 		return (c);
423 	}
424 
425 	/*
426 	 * Every device is either missing or has this txg in its DTL.
427 	 * Look for any child we haven't already tried before giving up.
428 	 */
429 	for (c = 0; c < mm->mm_children; c++) {
430 		if (!mm->mm_child[c].mc_tried) {
431 			vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd,
432 			    zio);
433 			return (c);
434 		}
435 	}
436 
437 	/*
438 	 * Every child failed.  There's no place left to look.
439 	 */
440 	return (-1);
441 }
442 
443 static void
vdev_mirror_io_start(zio_t * zio)444 vdev_mirror_io_start(zio_t *zio)
445 {
446 	mirror_map_t *mm;
447 	mirror_child_t *mc;
448 	int c, children;
449 
450 	mm = vdev_mirror_map_init(zio);
451 
452 	if (zio->io_type == ZIO_TYPE_READ) {
453 		if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing &&
454 		    mm->mm_children > 1) {
455 			/*
456 			 * For scrubbing reads we need to allocate a read
457 			 * buffer for each child and issue reads to all
458 			 * children.  If any child succeeds, it will copy its
459 			 * data into zio->io_data in vdev_mirror_scrub_done.
460 			 */
461 			for (c = 0; c < mm->mm_children; c++) {
462 				mc = &mm->mm_child[c];
463 				zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
464 				    mc->mc_vd, mc->mc_offset,
465 				    zio_buf_alloc(zio->io_size), zio->io_size,
466 				    zio->io_type, zio->io_priority, 0,
467 				    vdev_mirror_scrub_done, mc));
468 			}
469 			zio_execute(zio);
470 			return;
471 		}
472 		/*
473 		 * For normal reads just pick one child.
474 		 */
475 		c = vdev_mirror_child_select(zio);
476 		children = (c >= 0);
477 	} else {
478 		ASSERT(zio->io_type == ZIO_TYPE_WRITE ||
479 		    zio->io_type == ZIO_TYPE_FREE);
480 
481 		/*
482 		 * Writes and frees go to all children.
483 		 */
484 		c = 0;
485 		children = mm->mm_children;
486 	}
487 
488 	while (children--) {
489 		mc = &mm->mm_child[c];
490 		zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
491 		    mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
492 		    zio->io_type, zio->io_priority, 0,
493 		    vdev_mirror_child_done, mc));
494 		c++;
495 	}
496 
497 	zio_execute(zio);
498 }
499 
500 static int
vdev_mirror_worst_error(mirror_map_t * mm)501 vdev_mirror_worst_error(mirror_map_t *mm)
502 {
503 	int error[2] = { 0, 0 };
504 
505 	for (int c = 0; c < mm->mm_children; c++) {
506 		mirror_child_t *mc = &mm->mm_child[c];
507 		int s = mc->mc_speculative;
508 		error[s] = zio_worst_error(error[s], mc->mc_error);
509 	}
510 
511 	return (error[0] ? error[0] : error[1]);
512 }
513 
514 static void
vdev_mirror_io_done(zio_t * zio)515 vdev_mirror_io_done(zio_t *zio)
516 {
517 	mirror_map_t *mm = zio->io_vsd;
518 	mirror_child_t *mc;
519 	int c;
520 	int good_copies = 0;
521 	int unexpected_errors = 0;
522 
523 	for (c = 0; c < mm->mm_children; c++) {
524 		mc = &mm->mm_child[c];
525 
526 		if (mc->mc_error) {
527 			if (!mc->mc_skipped)
528 				unexpected_errors++;
529 		} else if (mc->mc_tried) {
530 			good_copies++;
531 		}
532 	}
533 
534 	if (zio->io_type == ZIO_TYPE_WRITE) {
535 		/*
536 		 * XXX -- for now, treat partial writes as success.
537 		 *
538 		 * Now that we support write reallocation, it would be better
539 		 * to treat partial failure as real failure unless there are
540 		 * no non-degraded top-level vdevs left, and not update DTLs
541 		 * if we intend to reallocate.
542 		 */
543 		/* XXPOLICY */
544 		if (good_copies != mm->mm_children) {
545 			/*
546 			 * Always require at least one good copy.
547 			 *
548 			 * For ditto blocks (io_vd == NULL), require
549 			 * all copies to be good.
550 			 *
551 			 * XXX -- for replacing vdevs, there's no great answer.
552 			 * If the old device is really dead, we may not even
553 			 * be able to access it -- so we only want to
554 			 * require good writes to the new device.  But if
555 			 * the new device turns out to be flaky, we want
556 			 * to be able to detach it -- which requires all
557 			 * writes to the old device to have succeeded.
558 			 */
559 			if (good_copies == 0 || zio->io_vd == NULL)
560 				zio->io_error = vdev_mirror_worst_error(mm);
561 		}
562 		return;
563 	} else if (zio->io_type == ZIO_TYPE_FREE) {
564 		return;
565 	}
566 
567 	ASSERT(zio->io_type == ZIO_TYPE_READ);
568 
569 	/*
570 	 * If we don't have a good copy yet, keep trying other children.
571 	 */
572 	/* XXPOLICY */
573 	if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
574 		ASSERT(c >= 0 && c < mm->mm_children);
575 		mc = &mm->mm_child[c];
576 		zio_vdev_io_redone(zio);
577 		zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
578 		    mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
579 		    ZIO_TYPE_READ, zio->io_priority, 0,
580 		    vdev_mirror_child_done, mc));
581 		return;
582 	}
583 
584 	/* XXPOLICY */
585 	if (good_copies == 0) {
586 		zio->io_error = vdev_mirror_worst_error(mm);
587 		ASSERT(zio->io_error != 0);
588 	}
589 
590 	if (good_copies && spa_writeable(zio->io_spa) &&
591 	    (unexpected_errors ||
592 	    (zio->io_flags & ZIO_FLAG_RESILVER) ||
593 	    ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) {
594 		/*
595 		 * Use the good data we have in hand to repair damaged children.
596 		 */
597 		for (c = 0; c < mm->mm_children; c++) {
598 			/*
599 			 * Don't rewrite known good children.
600 			 * Not only is it unnecessary, it could
601 			 * actually be harmful: if the system lost
602 			 * power while rewriting the only good copy,
603 			 * there would be no good copies left!
604 			 */
605 			mc = &mm->mm_child[c];
606 
607 			if (mc->mc_error == 0) {
608 				if (mc->mc_tried)
609 					continue;
610 				if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
611 				    !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
612 				    zio->io_txg, 1))
613 					continue;
614 				mc->mc_error = SET_ERROR(ESTALE);
615 			}
616 
617 			zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
618 			    mc->mc_vd, mc->mc_offset,
619 			    zio->io_data, zio->io_size,
620 			    ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
621 			    ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
622 			    ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
623 		}
624 	}
625 }
626 
627 static void
vdev_mirror_state_change(vdev_t * vd,int faulted,int degraded)628 vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
629 {
630 	if (faulted == vd->vdev_children)
631 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
632 		    VDEV_AUX_NO_REPLICAS);
633 	else if (degraded + faulted != 0)
634 		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
635 	else
636 		vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
637 }
638 
639 vdev_ops_t vdev_mirror_ops = {
640 	vdev_mirror_open,
641 	vdev_mirror_close,
642 	vdev_default_asize,
643 	vdev_mirror_io_start,
644 	vdev_mirror_io_done,
645 	vdev_mirror_state_change,
646 	NULL,
647 	NULL,
648 	VDEV_TYPE_MIRROR,	/* name of this vdev type */
649 	B_FALSE			/* not a leaf vdev */
650 };
651 
652 vdev_ops_t vdev_replacing_ops = {
653 	vdev_mirror_open,
654 	vdev_mirror_close,
655 	vdev_default_asize,
656 	vdev_mirror_io_start,
657 	vdev_mirror_io_done,
658 	vdev_mirror_state_change,
659 	NULL,
660 	NULL,
661 	VDEV_TYPE_REPLACING,	/* name of this vdev type */
662 	B_FALSE			/* not a leaf vdev */
663 };
664 
665 vdev_ops_t vdev_spare_ops = {
666 	vdev_mirror_open,
667 	vdev_mirror_close,
668 	vdev_default_asize,
669 	vdev_mirror_io_start,
670 	vdev_mirror_io_done,
671 	vdev_mirror_state_change,
672 	NULL,
673 	NULL,
674 	VDEV_TYPE_SPARE,	/* name of this vdev type */
675 	B_FALSE			/* not a leaf vdev */
676 };
677