1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate * CDDL HEADER START
3*0Sstevel@tonic-gate *
4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance
7*0Sstevel@tonic-gate * with the License.
8*0Sstevel@tonic-gate *
9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate * and limitations under the License.
13*0Sstevel@tonic-gate *
14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate *
20*0Sstevel@tonic-gate * CDDL HEADER END
21*0Sstevel@tonic-gate */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24*0Sstevel@tonic-gate * Use is subject to license terms.
25*0Sstevel@tonic-gate */
26*0Sstevel@tonic-gate
27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
28*0Sstevel@tonic-gate
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate * NAME: raid_resync.c
31*0Sstevel@tonic-gate * DESCRIPTION: RAID driver source file containing routines related to resync
32*0Sstevel@tonic-gate * operation.
33*0Sstevel@tonic-gate * ROUTINES PROVIDED FOR EXTERNAL USE:
34*0Sstevel@tonic-gate * resync_request() - get resync lock if available
35*0Sstevel@tonic-gate * release_resync_request() - relinquish resync lock
36*0Sstevel@tonic-gate * erred_check_line() - provide write instruction for erred column
37*0Sstevel@tonic-gate * init_pw_area() - initialize pre-write area
38*0Sstevel@tonic-gate * copy_pw_area() - copy pre-write area from one device to another
39*0Sstevel@tonic-gate */
40*0Sstevel@tonic-gate
41*0Sstevel@tonic-gate #include <sys/param.h>
42*0Sstevel@tonic-gate #include <sys/systm.h>
43*0Sstevel@tonic-gate #include <sys/conf.h>
44*0Sstevel@tonic-gate #include <sys/file.h>
45*0Sstevel@tonic-gate #include <sys/user.h>
46*0Sstevel@tonic-gate #include <sys/uio.h>
47*0Sstevel@tonic-gate #include <sys/t_lock.h>
48*0Sstevel@tonic-gate #include <sys/buf.h>
49*0Sstevel@tonic-gate #include <sys/dkio.h>
50*0Sstevel@tonic-gate #include <sys/vtoc.h>
51*0Sstevel@tonic-gate #include <sys/kmem.h>
52*0Sstevel@tonic-gate #include <vm/page.h>
53*0Sstevel@tonic-gate #include <sys/sysmacros.h>
54*0Sstevel@tonic-gate #include <sys/types.h>
55*0Sstevel@tonic-gate #include <sys/mkdev.h>
56*0Sstevel@tonic-gate #include <sys/stat.h>
57*0Sstevel@tonic-gate #include <sys/open.h>
58*0Sstevel@tonic-gate #include <sys/disp.h>
59*0Sstevel@tonic-gate #include <sys/modctl.h>
60*0Sstevel@tonic-gate #include <sys/ddi.h>
61*0Sstevel@tonic-gate #include <sys/sunddi.h>
62*0Sstevel@tonic-gate #include <sys/lvm/md_raid.h>
63*0Sstevel@tonic-gate
64*0Sstevel@tonic-gate #include <sys/sysevent/eventdefs.h>
65*0Sstevel@tonic-gate #include <sys/sysevent/svm.h>
66*0Sstevel@tonic-gate
67*0Sstevel@tonic-gate #define NOCOLUMN (-1)
68*0Sstevel@tonic-gate
69*0Sstevel@tonic-gate extern md_set_t md_set[];
70*0Sstevel@tonic-gate extern kmem_cache_t *raid_child_cache;
71*0Sstevel@tonic-gate extern kmem_cache_t *raid_parent_cache;
72*0Sstevel@tonic-gate extern md_resync_t md_cpr_resync;
73*0Sstevel@tonic-gate extern major_t md_major;
74*0Sstevel@tonic-gate extern void raid_parent_init(md_raidps_t *ps);
75*0Sstevel@tonic-gate extern void raid_child_init(md_raidcs_t *ps);
76*0Sstevel@tonic-gate
77*0Sstevel@tonic-gate /*
78*0Sstevel@tonic-gate * NAMES: xor
79*0Sstevel@tonic-gate * DESCRIPTION: Xor two chunks of data together. The data referenced by
80*0Sstevel@tonic-gate * addr1 and addr2 are xor'd together for size and written into
81*0Sstevel@tonic-gate * addr1.
82*0Sstevel@tonic-gate * PARAMETERS: caddr_t addr1 - address of first chunk of data and destination
83*0Sstevel@tonic-gate * caddr_t addr2 - address of second chunk of data
84*0Sstevel@tonic-gate * u_int size - number to xor
85*0Sstevel@tonic-gate */
86*0Sstevel@tonic-gate static void
xor(caddr_t addr1,caddr_t addr2,size_t size)87*0Sstevel@tonic-gate xor(caddr_t addr1, caddr_t addr2, size_t size)
88*0Sstevel@tonic-gate {
89*0Sstevel@tonic-gate while (size--) {
90*0Sstevel@tonic-gate *addr1++ ^= *addr2++;
91*0Sstevel@tonic-gate }
92*0Sstevel@tonic-gate }
93*0Sstevel@tonic-gate
94*0Sstevel@tonic-gate /*
95*0Sstevel@tonic-gate * NAME: release_resync_request
96*0Sstevel@tonic-gate *
97*0Sstevel@tonic-gate * DESCRIPTION: Release resync active flag and reset unit values accordingly.
98*0Sstevel@tonic-gate *
99*0Sstevel@tonic-gate * PARAMETERS: minor_t mnum - minor number identity of metadevice
100*0Sstevel@tonic-gate *
101*0Sstevel@tonic-gate * LOCKS: Expects Unit Writer Lock to be held across call.
102*0Sstevel@tonic-gate */
103*0Sstevel@tonic-gate void
release_resync_request(minor_t mnum)104*0Sstevel@tonic-gate release_resync_request(
105*0Sstevel@tonic-gate minor_t mnum
106*0Sstevel@tonic-gate )
107*0Sstevel@tonic-gate {
108*0Sstevel@tonic-gate mr_unit_t *un;
109*0Sstevel@tonic-gate
110*0Sstevel@tonic-gate un = MD_UNIT(mnum);
111*0Sstevel@tonic-gate ASSERT(un != NULL);
112*0Sstevel@tonic-gate
113*0Sstevel@tonic-gate un->c.un_status &= ~MD_UN_RESYNC_ACTIVE;
114*0Sstevel@tonic-gate
115*0Sstevel@tonic-gate un->un_column[un->un_resync_index].un_devflags &= ~MD_RAID_RESYNC;
116*0Sstevel@tonic-gate un->un_column[un->un_resync_index].un_devflags &= ~MD_RAID_RESYNC_ERRED;
117*0Sstevel@tonic-gate un->un_column[un->un_resync_index].un_devflags &=
118*0Sstevel@tonic-gate ~(MD_RAID_COPY_RESYNC | MD_RAID_REGEN_RESYNC);
119*0Sstevel@tonic-gate
120*0Sstevel@tonic-gate un->un_resync_line_index = 0;
121*0Sstevel@tonic-gate un->un_resync_index = NOCOLUMN;
122*0Sstevel@tonic-gate }
123*0Sstevel@tonic-gate
124*0Sstevel@tonic-gate /*
125*0Sstevel@tonic-gate * NAME: resync_request
126*0Sstevel@tonic-gate *
127*0Sstevel@tonic-gate * DESCRIPTION: Request resync. If resync is available (no current active
128*0Sstevel@tonic-gate * resync), mark unit as resync active and initialize.
129*0Sstevel@tonic-gate *
130*0Sstevel@tonic-gate * PARAMETERS: minor_t mnum - minor number identity of metadevice
131*0Sstevel@tonic-gate * int column_index - index of column to resync
132*0Sstevel@tonic-gate * int copysize - copysize of ioctl request
133*0Sstevel@tonic-gate * md_error_t *ep - error output parameter
134*0Sstevel@tonic-gate *
135*0Sstevel@tonic-gate * RETURN: 0 if resync is available, 1 otherwise.
136*0Sstevel@tonic-gate *
137*0Sstevel@tonic-gate * LOCKS: Expects Unit Writer Lock to be held across call.
138*0Sstevel@tonic-gate *
139*0Sstevel@tonic-gate * NOTE: Sets un_resync_copysize to the input value in copysize, the
140*0Sstevel@tonic-gate * existing value from an incomplete previous resync with an
141*0Sstevel@tonic-gate * input value in copysize, or the lesser of the unit segment
142*0Sstevel@tonic-gate * size or maxio.
143*0Sstevel@tonic-gate */
144*0Sstevel@tonic-gate /* ARGSUSED */
145*0Sstevel@tonic-gate int
resync_request(minor_t mnum,int column_index,size_t copysize,md_error_t * mde)146*0Sstevel@tonic-gate resync_request(
147*0Sstevel@tonic-gate minor_t mnum,
148*0Sstevel@tonic-gate int column_index,
149*0Sstevel@tonic-gate size_t copysize,
150*0Sstevel@tonic-gate md_error_t *mde
151*0Sstevel@tonic-gate )
152*0Sstevel@tonic-gate {
153*0Sstevel@tonic-gate mr_unit_t *un;
154*0Sstevel@tonic-gate
155*0Sstevel@tonic-gate un = MD_UNIT(mnum);
156*0Sstevel@tonic-gate ASSERT(un != NULL);
157*0Sstevel@tonic-gate
158*0Sstevel@tonic-gate /* if resync or grow not already active, set resync active for unit */
159*0Sstevel@tonic-gate if (! (un->un_column[column_index].un_devflags & MD_RAID_RESYNC) &&
160*0Sstevel@tonic-gate ((un->c.un_status & MD_UN_RESYNC_ACTIVE) ||
161*0Sstevel@tonic-gate (un->c.un_status & MD_UN_GROW_PENDING) ||
162*0Sstevel@tonic-gate (un->un_column[column_index].un_devstate & RCS_RESYNC))) {
163*0Sstevel@tonic-gate if (mde)
164*0Sstevel@tonic-gate return (mdmderror(mde, MDE_GROW_DELAYED, mnum));
165*0Sstevel@tonic-gate return (1);
166*0Sstevel@tonic-gate }
167*0Sstevel@tonic-gate
168*0Sstevel@tonic-gate if (un->un_column[column_index].un_devstate &
169*0Sstevel@tonic-gate (RCS_ERRED | RCS_LAST_ERRED))
170*0Sstevel@tonic-gate un->un_column[column_index].un_devflags |= MD_RAID_DEV_ERRED;
171*0Sstevel@tonic-gate else
172*0Sstevel@tonic-gate un->un_column[column_index].un_devflags &= ~MD_RAID_DEV_ERRED;
173*0Sstevel@tonic-gate un->c.un_status |= MD_UN_RESYNC_ACTIVE;
174*0Sstevel@tonic-gate un->un_resync_index = column_index;
175*0Sstevel@tonic-gate un->un_resync_line_index = 0;
176*0Sstevel@tonic-gate raid_set_state(un, column_index, RCS_RESYNC, 0);
177*0Sstevel@tonic-gate
178*0Sstevel@tonic-gate return (0);
179*0Sstevel@tonic-gate }
180*0Sstevel@tonic-gate
181*0Sstevel@tonic-gate /*
182*0Sstevel@tonic-gate * Name: alloc_bufs
183*0Sstevel@tonic-gate *
184*0Sstevel@tonic-gate * DESCRIPTION: Initialize resync_comp buffers.
185*0Sstevel@tonic-gate *
186*0Sstevel@tonic-gate * PARAMETERS: size_t bsize - size of buffer
187*0Sstevel@tonic-gate * buf_t *read_buf1 - first read buf
188*0Sstevel@tonic-gate * buf_t *read_buf2 - second read buf
189*0Sstevel@tonic-gate * buf_t *write_buf - write buf
190*0Sstevel@tonic-gate */
191*0Sstevel@tonic-gate static void
alloc_bufs(md_raidcs_t * cs,size_t bsize)192*0Sstevel@tonic-gate alloc_bufs(md_raidcs_t *cs, size_t bsize)
193*0Sstevel@tonic-gate {
194*0Sstevel@tonic-gate /* allocate buffers, write uses the read_buf1 buffer */
195*0Sstevel@tonic-gate cs->cs_dbuffer = kmem_zalloc(bsize, KM_SLEEP);
196*0Sstevel@tonic-gate cs->cs_pbuffer = kmem_zalloc(bsize, KM_SLEEP);
197*0Sstevel@tonic-gate }
198*0Sstevel@tonic-gate
199*0Sstevel@tonic-gate void
init_buf(buf_t * bp,int flags,size_t size)200*0Sstevel@tonic-gate init_buf(buf_t *bp, int flags, size_t size)
201*0Sstevel@tonic-gate {
202*0Sstevel@tonic-gate /* zero buf */
203*0Sstevel@tonic-gate bzero((caddr_t)bp, sizeof (buf_t));
204*0Sstevel@tonic-gate
205*0Sstevel@tonic-gate /* set b_back and b_forw to point back to buf */
206*0Sstevel@tonic-gate bp->b_back = bp;
207*0Sstevel@tonic-gate bp->b_forw = bp;
208*0Sstevel@tonic-gate
209*0Sstevel@tonic-gate /* set flags size */
210*0Sstevel@tonic-gate bp->b_flags = flags;
211*0Sstevel@tonic-gate bp->b_bufsize = size;
212*0Sstevel@tonic-gate bp->b_offset = -1;
213*0Sstevel@tonic-gate
214*0Sstevel@tonic-gate /* setup semaphores */
215*0Sstevel@tonic-gate sema_init(&bp->b_io, 0, NULL, SEMA_DEFAULT, NULL);
216*0Sstevel@tonic-gate sema_init(&bp->b_sem, 0, NULL, SEMA_DEFAULT, NULL);
217*0Sstevel@tonic-gate }
218*0Sstevel@tonic-gate
219*0Sstevel@tonic-gate void
destroy_buf(buf_t * bp)220*0Sstevel@tonic-gate destroy_buf(buf_t *bp)
221*0Sstevel@tonic-gate {
222*0Sstevel@tonic-gate sema_destroy(&bp->b_io);
223*0Sstevel@tonic-gate sema_destroy(&bp->b_sem);
224*0Sstevel@tonic-gate }
225*0Sstevel@tonic-gate
226*0Sstevel@tonic-gate void
reset_buf(buf_t * bp,int flags,size_t size)227*0Sstevel@tonic-gate reset_buf(buf_t *bp, int flags, size_t size)
228*0Sstevel@tonic-gate {
229*0Sstevel@tonic-gate destroy_buf(bp);
230*0Sstevel@tonic-gate init_buf(bp, flags, size);
231*0Sstevel@tonic-gate }
232*0Sstevel@tonic-gate
233*0Sstevel@tonic-gate /*
234*0Sstevel@tonic-gate * NAME: free_bufs
235*0Sstevel@tonic-gate *
236*0Sstevel@tonic-gate * DESCRIPTION: Free up buffers.
237*0Sstevel@tonic-gate *
238*0Sstevel@tonic-gate * PARAMETERS: size_t bsize - size of buffer
239*0Sstevel@tonic-gate * buf_t *read_buf1 - first read buf
240*0Sstevel@tonic-gate * buf_t *read_buf2 - second read buf
241*0Sstevel@tonic-gate * buf_t *write_buf - write buf
242*0Sstevel@tonic-gate */
243*0Sstevel@tonic-gate static void
free_bufs(size_t bsize,md_raidcs_t * cs)244*0Sstevel@tonic-gate free_bufs(size_t bsize, md_raidcs_t *cs)
245*0Sstevel@tonic-gate {
246*0Sstevel@tonic-gate kmem_free(cs->cs_dbuffer, bsize);
247*0Sstevel@tonic-gate kmem_free(cs->cs_pbuffer, bsize);
248*0Sstevel@tonic-gate }
249*0Sstevel@tonic-gate
250*0Sstevel@tonic-gate /*
251*0Sstevel@tonic-gate * NAME: init_pw_area
252*0Sstevel@tonic-gate *
253*0Sstevel@tonic-gate * DESCRIPTION: Initialize pre-write area to all zeros.
254*0Sstevel@tonic-gate *
255*0Sstevel@tonic-gate * PARAMETERS: minor_t mnum - minor number identity of metadevice
256*0Sstevel@tonic-gate * md_dev64_t dev_to_write - index of column to resync
257*0Sstevel@tonic-gate * int column_index - index of column to resync
258*0Sstevel@tonic-gate *
259*0Sstevel@tonic-gate * RETURN: 1 if write error on resync device, otherwise 0
260*0Sstevel@tonic-gate *
261*0Sstevel@tonic-gate * LOCKS: Expects Unit Reader Lock to be held across call.
262*0Sstevel@tonic-gate */
263*0Sstevel@tonic-gate int
init_pw_area(mr_unit_t * un,md_dev64_t dev_to_write,diskaddr_t pwstart,uint_t col)264*0Sstevel@tonic-gate init_pw_area(
265*0Sstevel@tonic-gate mr_unit_t *un,
266*0Sstevel@tonic-gate md_dev64_t dev_to_write,
267*0Sstevel@tonic-gate diskaddr_t pwstart,
268*0Sstevel@tonic-gate uint_t col
269*0Sstevel@tonic-gate )
270*0Sstevel@tonic-gate {
271*0Sstevel@tonic-gate buf_t buf;
272*0Sstevel@tonic-gate caddr_t databuffer;
273*0Sstevel@tonic-gate size_t copysize;
274*0Sstevel@tonic-gate size_t bsize;
275*0Sstevel@tonic-gate int error = 0;
276*0Sstevel@tonic-gate int i;
277*0Sstevel@tonic-gate
278*0Sstevel@tonic-gate ASSERT(un != NULL);
279*0Sstevel@tonic-gate ASSERT(un->un_column[col].un_devflags & MD_RAID_DEV_ISOPEN);
280*0Sstevel@tonic-gate
281*0Sstevel@tonic-gate bsize = un->un_iosize;
282*0Sstevel@tonic-gate copysize = dbtob(bsize);
283*0Sstevel@tonic-gate databuffer = kmem_zalloc(copysize, KM_SLEEP);
284*0Sstevel@tonic-gate init_buf(&buf, (B_BUSY | B_WRITE), copysize);
285*0Sstevel@tonic-gate
286*0Sstevel@tonic-gate for (i = 0; i < un->un_pwcnt; i++) {
287*0Sstevel@tonic-gate /* magic field is 0 for 4.0 compatability */
288*0Sstevel@tonic-gate RAID_FILLIN_RPW(databuffer, un, 0, 0,
289*0Sstevel@tonic-gate 0, 0, 0,
290*0Sstevel@tonic-gate 0, col, 0);
291*0Sstevel@tonic-gate buf.b_un.b_addr = (caddr_t)databuffer;
292*0Sstevel@tonic-gate buf.b_edev = md_dev64_to_dev(dev_to_write);
293*0Sstevel@tonic-gate buf.b_bcount = dbtob(bsize);
294*0Sstevel@tonic-gate buf.b_lblkno = pwstart + (i * un->un_iosize);
295*0Sstevel@tonic-gate
296*0Sstevel@tonic-gate /* write buf */
297*0Sstevel@tonic-gate (void) md_call_strategy(&buf, MD_STR_NOTTOP, NULL);
298*0Sstevel@tonic-gate
299*0Sstevel@tonic-gate if (biowait(&buf)) {
300*0Sstevel@tonic-gate error = 1;
301*0Sstevel@tonic-gate break;
302*0Sstevel@tonic-gate }
303*0Sstevel@tonic-gate reset_buf(&buf, (B_BUSY | B_WRITE), copysize);
304*0Sstevel@tonic-gate } /* for */
305*0Sstevel@tonic-gate
306*0Sstevel@tonic-gate destroy_buf(&buf);
307*0Sstevel@tonic-gate kmem_free(databuffer, copysize);
308*0Sstevel@tonic-gate
309*0Sstevel@tonic-gate return (error);
310*0Sstevel@tonic-gate }
311*0Sstevel@tonic-gate
312*0Sstevel@tonic-gate /*
313*0Sstevel@tonic-gate * NAME: raid_open_alt
314*0Sstevel@tonic-gate *
315*0Sstevel@tonic-gate * DESCRIPTION: opens the alt device used during resync.
316*0Sstevel@tonic-gate *
317*0Sstevel@tonic-gate * PARAMETERS: un
318*0Sstevel@tonic-gate *
319*0Sstevel@tonic-gate * RETURN: 0 - successfull
320*0Sstevel@tonic-gate * 1 - failed
321*0Sstevel@tonic-gate *
322*0Sstevel@tonic-gate * LOCKS: requires unit writer lock
323*0Sstevel@tonic-gate */
324*0Sstevel@tonic-gate
325*0Sstevel@tonic-gate static int
raid_open_alt(mr_unit_t * un,int index)326*0Sstevel@tonic-gate raid_open_alt(mr_unit_t *un, int index)
327*0Sstevel@tonic-gate {
328*0Sstevel@tonic-gate mr_column_t *column = &un->un_column[index];
329*0Sstevel@tonic-gate set_t setno = MD_MIN2SET(MD_SID(un));
330*0Sstevel@tonic-gate side_t side = mddb_getsidenum(setno);
331*0Sstevel@tonic-gate md_dev64_t tmpdev = column->un_alt_dev;
332*0Sstevel@tonic-gate
333*0Sstevel@tonic-gate /* correct locks */
334*0Sstevel@tonic-gate ASSERT(UNIT_WRITER_HELD(un));
335*0Sstevel@tonic-gate /* not already writing to */
336*0Sstevel@tonic-gate ASSERT(! (column->un_devflags & MD_RAID_WRITE_ALT));
337*0Sstevel@tonic-gate /* not already open */
338*0Sstevel@tonic-gate ASSERT(! (column->un_devflags & MD_RAID_ALT_ISOPEN));
339*0Sstevel@tonic-gate
340*0Sstevel@tonic-gate if (tmpdev != NODEV64) {
341*0Sstevel@tonic-gate /*
342*0Sstevel@tonic-gate * Open by device id. We use orig_key since alt_dev
343*0Sstevel@tonic-gate * has been set by the caller to be the same as orig_dev.
344*0Sstevel@tonic-gate */
345*0Sstevel@tonic-gate if ((md_getmajor(tmpdev) != md_major) &&
346*0Sstevel@tonic-gate md_devid_found(setno, side, column->un_orig_key) == 1) {
347*0Sstevel@tonic-gate tmpdev = md_resolve_bydevid(MD_SID(un), tmpdev,
348*0Sstevel@tonic-gate column->un_orig_key);
349*0Sstevel@tonic-gate }
350*0Sstevel@tonic-gate if (md_layered_open(MD_SID(un), &tmpdev, MD_OFLG_NULL)) {
351*0Sstevel@tonic-gate /* failed open */
352*0Sstevel@tonic-gate column->un_alt_dev = tmpdev;
353*0Sstevel@tonic-gate return (1);
354*0Sstevel@tonic-gate } else {
355*0Sstevel@tonic-gate /* open suceeded */
356*0Sstevel@tonic-gate column->un_alt_dev = tmpdev;
357*0Sstevel@tonic-gate column->un_devflags |= MD_RAID_ALT_ISOPEN;
358*0Sstevel@tonic-gate return (0);
359*0Sstevel@tonic-gate }
360*0Sstevel@tonic-gate } else
361*0Sstevel@tonic-gate /* no alt device to open */
362*0Sstevel@tonic-gate return (1);
363*0Sstevel@tonic-gate }
364*0Sstevel@tonic-gate
365*0Sstevel@tonic-gate
366*0Sstevel@tonic-gate /*
367*0Sstevel@tonic-gate * NAME: raid_close_alt
368*0Sstevel@tonic-gate *
369*0Sstevel@tonic-gate * DESCRIPTION: closes the alt device used during resync.
370*0Sstevel@tonic-gate *
371*0Sstevel@tonic-gate * PARAMETERS: un - raid unit structure
372*0Sstevel@tonic-gate * indes - raid column
373*0Sstevel@tonic-gate *
374*0Sstevel@tonic-gate * RETURN: none
375*0Sstevel@tonic-gate *
376*0Sstevel@tonic-gate * LOCKS: requires unit writer lock
377*0Sstevel@tonic-gate */
378*0Sstevel@tonic-gate
379*0Sstevel@tonic-gate static void
raid_close_alt(mr_unit_t * un,int index)380*0Sstevel@tonic-gate raid_close_alt(mr_unit_t *un, int index)
381*0Sstevel@tonic-gate {
382*0Sstevel@tonic-gate mr_column_t *column = &un->un_column[index];
383*0Sstevel@tonic-gate md_dev64_t tmpdev = column->un_alt_dev;
384*0Sstevel@tonic-gate
385*0Sstevel@tonic-gate ASSERT(UNIT_WRITER_HELD(un)); /* correct locks */
386*0Sstevel@tonic-gate ASSERT(! (column->un_devflags & MD_RAID_WRITE_ALT)); /* not writing */
387*0Sstevel@tonic-gate ASSERT(column->un_devflags & MD_RAID_ALT_ISOPEN); /* already open */
388*0Sstevel@tonic-gate ASSERT(tmpdev != NODEV64); /* is a device */
389*0Sstevel@tonic-gate
390*0Sstevel@tonic-gate md_layered_close(column->un_alt_dev, MD_OFLG_NULL);
391*0Sstevel@tonic-gate column->un_devflags &= ~MD_RAID_ALT_ISOPEN;
392*0Sstevel@tonic-gate column->un_alt_dev = NODEV64;
393*0Sstevel@tonic-gate }
394*0Sstevel@tonic-gate
395*0Sstevel@tonic-gate static diskaddr_t
raid_resync_fillin_cs(diskaddr_t line,uint_t line_count,md_raidcs_t * cs)396*0Sstevel@tonic-gate raid_resync_fillin_cs(diskaddr_t line, uint_t line_count, md_raidcs_t *cs)
397*0Sstevel@tonic-gate {
398*0Sstevel@tonic-gate mr_unit_t *un = cs->cs_un;
399*0Sstevel@tonic-gate
400*0Sstevel@tonic-gate ASSERT(line < un->un_segsincolumn);
401*0Sstevel@tonic-gate
402*0Sstevel@tonic-gate cs->cs_line = line;
403*0Sstevel@tonic-gate cs->cs_blkno = line * un->un_segsize;
404*0Sstevel@tonic-gate cs->cs_blkcnt = un->un_segsize * line_count;
405*0Sstevel@tonic-gate cs->cs_lastblk = cs->cs_blkno + cs->cs_blkcnt - 1;
406*0Sstevel@tonic-gate raid_line_reader_lock(cs, 1);
407*0Sstevel@tonic-gate
408*0Sstevel@tonic-gate return (line + line_count);
409*0Sstevel@tonic-gate }
410*0Sstevel@tonic-gate
411*0Sstevel@tonic-gate /* states returned by raid_resync_line */
412*0Sstevel@tonic-gate
413*0Sstevel@tonic-gate #define RAID_RESYNC_OKAY 0
414*0Sstevel@tonic-gate #define RAID_RESYNC_RDERROR 2
415*0Sstevel@tonic-gate #define RAID_RESYNC_WRERROR 3
416*0Sstevel@tonic-gate #define RAID_RESYNC_STATE 4
417*0Sstevel@tonic-gate
418*0Sstevel@tonic-gate int
raid_resync_region(md_raidcs_t * cs,diskaddr_t line,uint_t line_count,int * single_read,hs_cmds_t * hs_state,int * err_col,md_dev64_t dev_to_write,diskaddr_t write_dev_start)419*0Sstevel@tonic-gate raid_resync_region(
420*0Sstevel@tonic-gate md_raidcs_t *cs,
421*0Sstevel@tonic-gate diskaddr_t line,
422*0Sstevel@tonic-gate uint_t line_count,
423*0Sstevel@tonic-gate int *single_read,
424*0Sstevel@tonic-gate hs_cmds_t *hs_state,
425*0Sstevel@tonic-gate int *err_col,
426*0Sstevel@tonic-gate md_dev64_t dev_to_write,
427*0Sstevel@tonic-gate diskaddr_t write_dev_start)
428*0Sstevel@tonic-gate {
429*0Sstevel@tonic-gate mr_unit_t *un = cs->cs_un;
430*0Sstevel@tonic-gate buf_t *readb1 = &cs->cs_pbuf;
431*0Sstevel@tonic-gate buf_t *readb2 = &cs->cs_dbuf;
432*0Sstevel@tonic-gate buf_t *writeb = &cs->cs_hbuf;
433*0Sstevel@tonic-gate diskaddr_t off;
434*0Sstevel@tonic-gate size_t tcopysize;
435*0Sstevel@tonic-gate size_t copysize;
436*0Sstevel@tonic-gate int resync;
437*0Sstevel@tonic-gate int quit = 0;
438*0Sstevel@tonic-gate size_t leftinseg;
439*0Sstevel@tonic-gate int i;
440*0Sstevel@tonic-gate
441*0Sstevel@tonic-gate resync = un->un_resync_index;
442*0Sstevel@tonic-gate off = line * un->un_segsize;
443*0Sstevel@tonic-gate copysize = un->un_resync_copysize;
444*0Sstevel@tonic-gate
445*0Sstevel@tonic-gate /* find first column to read, skip resync column */
446*0Sstevel@tonic-gate
447*0Sstevel@tonic-gate leftinseg = un->un_segsize * line_count;
448*0Sstevel@tonic-gate while (leftinseg) {
449*0Sstevel@tonic-gate
450*0Sstevel@tonic-gate /* truncate last chunk to end if needed */
451*0Sstevel@tonic-gate if (copysize > leftinseg)
452*0Sstevel@tonic-gate tcopysize = leftinseg;
453*0Sstevel@tonic-gate else
454*0Sstevel@tonic-gate tcopysize = copysize;
455*0Sstevel@tonic-gate leftinseg -= tcopysize;
456*0Sstevel@tonic-gate
457*0Sstevel@tonic-gate /*
458*0Sstevel@tonic-gate * One of two scenarios:
459*0Sstevel@tonic-gate * 1) resync device with hotspare ok. This implies that
460*0Sstevel@tonic-gate * we are copying from a good hotspare to a new good original
461*0Sstevel@tonic-gate * device. In this case readb1 is used as the buf for
462*0Sstevel@tonic-gate * the read from the hotspare device.
463*0Sstevel@tonic-gate * 2) For all other cases, including when in case 1) and an
464*0Sstevel@tonic-gate * error is detected on the (formerly good) hotspare device,
465*0Sstevel@tonic-gate * readb1 is used for the initial read. readb2 is used for
466*0Sstevel@tonic-gate * all other reads. Each readb2 buffer is xor'd into the
467*0Sstevel@tonic-gate * readb1 buffer.
468*0Sstevel@tonic-gate *
469*0Sstevel@tonic-gate * In both cases, writeb is used for the write, using readb1's
470*0Sstevel@tonic-gate * buffer.
471*0Sstevel@tonic-gate *
472*0Sstevel@tonic-gate * For case 2, we could alternatively perform the read for all
473*0Sstevel@tonic-gate * devices concurrently to improve performance. However,
474*0Sstevel@tonic-gate * this could diminish performance for concurrent reads and
475*0Sstevel@tonic-gate * writes if low on memory.
476*0Sstevel@tonic-gate */
477*0Sstevel@tonic-gate
478*0Sstevel@tonic-gate /* read first buffer */
479*0Sstevel@tonic-gate
480*0Sstevel@tonic-gate /* switch to read from good columns if single_read */
481*0Sstevel@tonic-gate if (*single_read) {
482*0Sstevel@tonic-gate if (un->un_column[resync].un_dev == NODEV64)
483*0Sstevel@tonic-gate return (RAID_RESYNC_RDERROR);
484*0Sstevel@tonic-gate
485*0Sstevel@tonic-gate reset_buf(readb1, B_READ | B_BUSY,
486*0Sstevel@tonic-gate dbtob(copysize));
487*0Sstevel@tonic-gate readb1->b_bcount = dbtob(tcopysize);
488*0Sstevel@tonic-gate readb1->b_un.b_addr = cs->cs_pbuffer;
489*0Sstevel@tonic-gate readb1->b_edev = md_dev64_to_dev(
490*0Sstevel@tonic-gate un->un_column[resync].un_dev);
491*0Sstevel@tonic-gate readb1->b_lblkno =
492*0Sstevel@tonic-gate un->un_column[resync].un_devstart + off;
493*0Sstevel@tonic-gate (void) md_call_strategy(readb1, MD_STR_NOTTOP, NULL);
494*0Sstevel@tonic-gate if (biowait(readb1)) {
495*0Sstevel@tonic-gate /*
496*0Sstevel@tonic-gate * at this point just start rebuilding the
497*0Sstevel@tonic-gate * data and go on since the other column
498*0Sstevel@tonic-gate * are ok.
499*0Sstevel@tonic-gate */
500*0Sstevel@tonic-gate *single_read = 0;
501*0Sstevel@tonic-gate *hs_state = HS_BAD;
502*0Sstevel@tonic-gate un->un_column[resync].un_devflags &=
503*0Sstevel@tonic-gate ~MD_RAID_COPY_RESYNC;
504*0Sstevel@tonic-gate un->un_column[resync].un_devflags |=
505*0Sstevel@tonic-gate MD_RAID_REGEN_RESYNC;
506*0Sstevel@tonic-gate }
507*0Sstevel@tonic-gate }
508*0Sstevel@tonic-gate
509*0Sstevel@tonic-gate /* if reading from all non-resync columns */
510*0Sstevel@tonic-gate if (!*single_read) {
511*0Sstevel@tonic-gate /* for each column, read line and xor into write buf */
512*0Sstevel@tonic-gate bzero(cs->cs_pbuffer, dbtob(tcopysize));
513*0Sstevel@tonic-gate for (i = 0; i < un->un_totalcolumncnt; i++) {
514*0Sstevel@tonic-gate
515*0Sstevel@tonic-gate if (un->un_column[i].un_dev == NODEV64)
516*0Sstevel@tonic-gate return (RAID_RESYNC_RDERROR);
517*0Sstevel@tonic-gate
518*0Sstevel@tonic-gate /* skip column getting resync'ed */
519*0Sstevel@tonic-gate if (i == resync) {
520*0Sstevel@tonic-gate continue;
521*0Sstevel@tonic-gate }
522*0Sstevel@tonic-gate reset_buf(readb1, B_READ | B_BUSY,
523*0Sstevel@tonic-gate dbtob(copysize));
524*0Sstevel@tonic-gate readb1->b_bcount = dbtob(tcopysize);
525*0Sstevel@tonic-gate readb1->b_un.b_addr = cs->cs_dbuffer;
526*0Sstevel@tonic-gate readb1->b_edev = md_dev64_to_dev(
527*0Sstevel@tonic-gate un->un_column[i].un_dev);
528*0Sstevel@tonic-gate readb1->b_lblkno =
529*0Sstevel@tonic-gate un->un_column[i].un_devstart + off;
530*0Sstevel@tonic-gate
531*0Sstevel@tonic-gate (void) md_call_strategy(readb1, MD_STR_NOTTOP,
532*0Sstevel@tonic-gate NULL);
533*0Sstevel@tonic-gate if (biowait(readb1)) {
534*0Sstevel@tonic-gate *err_col = i;
535*0Sstevel@tonic-gate quit = RAID_RESYNC_RDERROR;
536*0Sstevel@tonic-gate }
537*0Sstevel@tonic-gate
538*0Sstevel@tonic-gate if (quit)
539*0Sstevel@tonic-gate return (quit);
540*0Sstevel@tonic-gate
541*0Sstevel@tonic-gate /* xor readb2 data into readb1 */
542*0Sstevel@tonic-gate xor(cs->cs_pbuffer, readb1->b_un.b_addr,
543*0Sstevel@tonic-gate dbtob(tcopysize));
544*0Sstevel@tonic-gate } /* for */
545*0Sstevel@tonic-gate }
546*0Sstevel@tonic-gate
547*0Sstevel@tonic-gate reset_buf(writeb, B_WRITE | B_BUSY,
548*0Sstevel@tonic-gate dbtob(copysize));
549*0Sstevel@tonic-gate writeb->b_bcount = dbtob(tcopysize);
550*0Sstevel@tonic-gate writeb->b_un.b_addr = cs->cs_pbuffer;
551*0Sstevel@tonic-gate writeb->b_lblkno = off + write_dev_start;
552*0Sstevel@tonic-gate writeb->b_edev = md_dev64_to_dev(dev_to_write);
553*0Sstevel@tonic-gate
554*0Sstevel@tonic-gate /* set write block number and perform the write */
555*0Sstevel@tonic-gate (void) md_call_strategy(writeb, MD_STR_NOTTOP, NULL);
556*0Sstevel@tonic-gate if (biowait(writeb)) {
557*0Sstevel@tonic-gate if (*single_read == 0) {
558*0Sstevel@tonic-gate *hs_state = HS_BAD;
559*0Sstevel@tonic-gate }
560*0Sstevel@tonic-gate return (RAID_RESYNC_WRERROR);
561*0Sstevel@tonic-gate }
562*0Sstevel@tonic-gate writeb->b_blkno += tcopysize;
563*0Sstevel@tonic-gate off += tcopysize;
564*0Sstevel@tonic-gate } /* while */
565*0Sstevel@tonic-gate sema_destroy(&readb1->b_io);
566*0Sstevel@tonic-gate sema_destroy(&readb1->b_sem);
567*0Sstevel@tonic-gate sema_destroy(&readb2->b_io);
568*0Sstevel@tonic-gate sema_destroy(&readb2->b_sem);
569*0Sstevel@tonic-gate sema_destroy(&writeb->b_io);
570*0Sstevel@tonic-gate sema_destroy(&writeb->b_sem);
571*0Sstevel@tonic-gate return (RAID_RESYNC_OKAY);
572*0Sstevel@tonic-gate }
573*0Sstevel@tonic-gate
574*0Sstevel@tonic-gate /*
575*0Sstevel@tonic-gate * NAME: resync_comp
576*0Sstevel@tonic-gate *
577*0Sstevel@tonic-gate * DESCRIPTION: Resync the component. Iterate through the raid unit a line at
578*0Sstevel@tonic-gate * a time, read from the good device(s) and write the resync
579*0Sstevel@tonic-gate * device.
580*0Sstevel@tonic-gate *
581*0Sstevel@tonic-gate * PARAMETERS: minor_t mnum - minor number identity of metadevice
582*0Sstevel@tonic-gate * md_raidcs_t *cs - child save struct
583*0Sstevel@tonic-gate *
584*0Sstevel@tonic-gate * RETURN: 0 - successfull
585*0Sstevel@tonic-gate * 1 - failed
586*0Sstevel@tonic-gate * -1 - aborted
587*0Sstevel@tonic-gate *
588*0Sstevel@tonic-gate * LOCKS: Expects Unit Reader Lock to be held across call. Acquires and
589*0Sstevel@tonic-gate * releases Line Reader Lock for per-line I/O.
590*0Sstevel@tonic-gate */
591*0Sstevel@tonic-gate static void
resync_comp(minor_t mnum,md_raidcs_t * cs)592*0Sstevel@tonic-gate resync_comp(
593*0Sstevel@tonic-gate minor_t mnum,
594*0Sstevel@tonic-gate md_raidcs_t *cs
595*0Sstevel@tonic-gate )
596*0Sstevel@tonic-gate {
597*0Sstevel@tonic-gate mdi_unit_t *ui;
598*0Sstevel@tonic-gate mr_unit_t *un;
599*0Sstevel@tonic-gate mddb_recid_t recids[2];
600*0Sstevel@tonic-gate rcs_state_t state;
601*0Sstevel@tonic-gate md_dev64_t dev_to_write;
602*0Sstevel@tonic-gate diskaddr_t write_pwstart;
603*0Sstevel@tonic-gate diskaddr_t write_devstart;
604*0Sstevel@tonic-gate md_dev64_t dev;
605*0Sstevel@tonic-gate int resync;
606*0Sstevel@tonic-gate int i;
607*0Sstevel@tonic-gate int single_read = 0;
608*0Sstevel@tonic-gate int err;
609*0Sstevel@tonic-gate int err_cnt;
610*0Sstevel@tonic-gate int last_err;
611*0Sstevel@tonic-gate diskaddr_t line;
612*0Sstevel@tonic-gate diskaddr_t segsincolumn;
613*0Sstevel@tonic-gate size_t bsize;
614*0Sstevel@tonic-gate uint_t line_count;
615*0Sstevel@tonic-gate
616*0Sstevel@tonic-gate /*
617*0Sstevel@tonic-gate * hs_state is the state of the hotspare on the column being resynced
618*0Sstevel@tonic-gate * dev_state is the state of the resync target
619*0Sstevel@tonic-gate */
620*0Sstevel@tonic-gate hs_cmds_t hs_state;
621*0Sstevel@tonic-gate int err_col = -1;
622*0Sstevel@tonic-gate diskaddr_t resync_end_pos;
623*0Sstevel@tonic-gate
624*0Sstevel@tonic-gate ui = MDI_UNIT(mnum);
625*0Sstevel@tonic-gate ASSERT(ui != NULL);
626*0Sstevel@tonic-gate
627*0Sstevel@tonic-gate un = cs->cs_un;
628*0Sstevel@tonic-gate
629*0Sstevel@tonic-gate md_unit_readerexit(ui);
630*0Sstevel@tonic-gate un = (mr_unit_t *)md_io_writerlock(ui);
631*0Sstevel@tonic-gate un = (mr_unit_t *)md_unit_writerlock(ui);
632*0Sstevel@tonic-gate resync = un->un_resync_index;
633*0Sstevel@tonic-gate state = un->un_column[resync].un_devstate;
634*0Sstevel@tonic-gate line_count = un->un_maxio / un->un_segsize;
635*0Sstevel@tonic-gate if (line_count == 0) { /* handle the case of segsize > maxio */
636*0Sstevel@tonic-gate line_count = 1;
637*0Sstevel@tonic-gate bsize = un->un_maxio;
638*0Sstevel@tonic-gate } else
639*0Sstevel@tonic-gate bsize = line_count * un->un_segsize;
640*0Sstevel@tonic-gate
641*0Sstevel@tonic-gate un->un_resync_copysize = (uint_t)bsize;
642*0Sstevel@tonic-gate
643*0Sstevel@tonic-gate ASSERT(un->c.un_status & MD_UN_RESYNC_ACTIVE);
644*0Sstevel@tonic-gate ASSERT(un->un_column[resync].un_devflags &
645*0Sstevel@tonic-gate (MD_RAID_COPY_RESYNC | MD_RAID_REGEN_RESYNC));
646*0Sstevel@tonic-gate
647*0Sstevel@tonic-gate /*
648*0Sstevel@tonic-gate * if the column is not in resync then just bail out.
649*0Sstevel@tonic-gate */
650*0Sstevel@tonic-gate if (! (un->un_column[resync].un_devstate & RCS_RESYNC)) {
651*0Sstevel@tonic-gate md_unit_writerexit(ui);
652*0Sstevel@tonic-gate md_io_writerexit(ui);
653*0Sstevel@tonic-gate un = (mr_unit_t *)md_unit_readerlock(ui);
654*0Sstevel@tonic-gate return;
655*0Sstevel@tonic-gate }
656*0Sstevel@tonic-gate SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_START, SVM_TAG_METADEVICE,
657*0Sstevel@tonic-gate MD_UN2SET(un), MD_SID(un));
658*0Sstevel@tonic-gate
659*0Sstevel@tonic-gate /* identify device to write and its start block */
660*0Sstevel@tonic-gate
661*0Sstevel@tonic-gate if (un->un_column[resync].un_alt_dev != NODEV64) {
662*0Sstevel@tonic-gate if (raid_open_alt(un, resync)) {
663*0Sstevel@tonic-gate raid_set_state(un, resync, state, 0);
664*0Sstevel@tonic-gate md_unit_writerexit(ui);
665*0Sstevel@tonic-gate md_io_writerexit(ui);
666*0Sstevel@tonic-gate un = (mr_unit_t *)md_unit_readerlock(ui);
667*0Sstevel@tonic-gate cmn_err(CE_WARN, "md: %s: %s open failed replace "
668*0Sstevel@tonic-gate "terminated", md_shortname(MD_SID(un)),
669*0Sstevel@tonic-gate md_devname(MD_UN2SET(un),
670*0Sstevel@tonic-gate un->un_column[resync].un_alt_dev,
671*0Sstevel@tonic-gate NULL, 0));
672*0Sstevel@tonic-gate SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_FAILED,
673*0Sstevel@tonic-gate SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un));
674*0Sstevel@tonic-gate return;
675*0Sstevel@tonic-gate }
676*0Sstevel@tonic-gate ASSERT(un->un_column[resync].un_devflags & MD_RAID_COPY_RESYNC);
677*0Sstevel@tonic-gate dev_to_write = un->un_column[resync].un_alt_dev;
678*0Sstevel@tonic-gate write_devstart = un->un_column[resync].un_alt_devstart;
679*0Sstevel@tonic-gate write_pwstart = un->un_column[resync].un_alt_pwstart;
680*0Sstevel@tonic-gate if (un->un_column[resync].un_devflags & MD_RAID_DEV_ERRED) {
681*0Sstevel@tonic-gate single_read = 0;
682*0Sstevel@tonic-gate hs_state = HS_BAD;
683*0Sstevel@tonic-gate } else {
684*0Sstevel@tonic-gate hs_state = HS_FREE;
685*0Sstevel@tonic-gate single_read = 1;
686*0Sstevel@tonic-gate }
687*0Sstevel@tonic-gate un->un_column[resync].un_devflags |= MD_RAID_WRITE_ALT;
688*0Sstevel@tonic-gate } else {
689*0Sstevel@tonic-gate dev_to_write = un->un_column[resync].un_dev;
690*0Sstevel@tonic-gate write_devstart = un->un_column[resync].un_devstart;
691*0Sstevel@tonic-gate write_pwstart = un->un_column[resync].un_pwstart;
692*0Sstevel@tonic-gate single_read = 0;
693*0Sstevel@tonic-gate hs_state = HS_FREE;
694*0Sstevel@tonic-gate ASSERT(un->un_column[resync].un_devflags &
695*0Sstevel@tonic-gate MD_RAID_REGEN_RESYNC);
696*0Sstevel@tonic-gate }
697*0Sstevel@tonic-gate
698*0Sstevel@tonic-gate alloc_bufs(cs, dbtob(bsize));
699*0Sstevel@tonic-gate /* initialize pre-write area */
700*0Sstevel@tonic-gate if (init_pw_area(un, dev_to_write, write_pwstart, resync)) {
701*0Sstevel@tonic-gate un->un_column[resync].un_devflags &= ~MD_RAID_WRITE_ALT;
702*0Sstevel@tonic-gate if (un->un_column[resync].un_alt_dev != NODEV64) {
703*0Sstevel@tonic-gate raid_close_alt(un, resync);
704*0Sstevel@tonic-gate }
705*0Sstevel@tonic-gate md_unit_writerexit(ui);
706*0Sstevel@tonic-gate md_io_writerexit(ui);
707*0Sstevel@tonic-gate if (dev_to_write == un->un_column[resync].un_dev)
708*0Sstevel@tonic-gate hs_state = HS_BAD;
709*0Sstevel@tonic-gate err = RAID_RESYNC_WRERROR;
710*0Sstevel@tonic-gate goto resync_comp_error;
711*0Sstevel@tonic-gate }
712*0Sstevel@tonic-gate
713*0Sstevel@tonic-gate un->c.un_status &= ~MD_UN_RESYNC_CANCEL;
714*0Sstevel@tonic-gate segsincolumn = un->un_segsincolumn;
715*0Sstevel@tonic-gate err_cnt = raid_state_cnt(un, RCS_ERRED | RCS_LAST_ERRED);
716*0Sstevel@tonic-gate
717*0Sstevel@tonic-gate /* commit the record */
718*0Sstevel@tonic-gate
719*0Sstevel@tonic-gate md_unit_writerexit(ui);
720*0Sstevel@tonic-gate md_io_writerexit(ui);
721*0Sstevel@tonic-gate
722*0Sstevel@tonic-gate
723*0Sstevel@tonic-gate /* resync each line of the unit */
724*0Sstevel@tonic-gate for (line = 0; line < segsincolumn; line += line_count) {
725*0Sstevel@tonic-gate /*
726*0Sstevel@tonic-gate * Update address range in child struct and lock the line.
727*0Sstevel@tonic-gate *
728*0Sstevel@tonic-gate * The reader version of the line lock is used since only
729*0Sstevel@tonic-gate * resync will use data beyond un_resync_line_index on the
730*0Sstevel@tonic-gate * resync device.
731*0Sstevel@tonic-gate */
732*0Sstevel@tonic-gate un = (mr_unit_t *)md_io_readerlock(ui);
733*0Sstevel@tonic-gate if (line + line_count > segsincolumn)
734*0Sstevel@tonic-gate line_count = segsincolumn - line;
735*0Sstevel@tonic-gate resync_end_pos = raid_resync_fillin_cs(line, line_count, cs);
736*0Sstevel@tonic-gate (void) md_unit_readerlock(ui);
737*0Sstevel@tonic-gate ASSERT(un->un_resync_line_index == resync_end_pos);
738*0Sstevel@tonic-gate err = raid_resync_region(cs, line, (int)line_count,
739*0Sstevel@tonic-gate &single_read, &hs_state, &err_col, dev_to_write,
740*0Sstevel@tonic-gate write_devstart);
741*0Sstevel@tonic-gate
742*0Sstevel@tonic-gate /*
743*0Sstevel@tonic-gate * if the column failed to resync then stop writing directly
744*0Sstevel@tonic-gate * to the column.
745*0Sstevel@tonic-gate */
746*0Sstevel@tonic-gate if (err)
747*0Sstevel@tonic-gate un->un_resync_line_index = 0;
748*0Sstevel@tonic-gate
749*0Sstevel@tonic-gate md_unit_readerexit(ui);
750*0Sstevel@tonic-gate raid_line_exit(cs);
751*0Sstevel@tonic-gate md_io_readerexit(ui);
752*0Sstevel@tonic-gate
753*0Sstevel@tonic-gate if (err)
754*0Sstevel@tonic-gate break;
755*0Sstevel@tonic-gate
756*0Sstevel@tonic-gate un = (mr_unit_t *)md_unit_writerlock(ui);
757*0Sstevel@tonic-gate
758*0Sstevel@tonic-gate if (raid_state_cnt(un, RCS_ERRED | RCS_LAST_ERRED) != err_cnt) {
759*0Sstevel@tonic-gate err = RAID_RESYNC_STATE;
760*0Sstevel@tonic-gate md_unit_writerexit(ui);
761*0Sstevel@tonic-gate break;
762*0Sstevel@tonic-gate }
763*0Sstevel@tonic-gate md_unit_writerexit(ui);
764*0Sstevel@tonic-gate } /* for */
765*0Sstevel@tonic-gate
766*0Sstevel@tonic-gate resync_comp_error:
767*0Sstevel@tonic-gate un = (mr_unit_t *)md_io_writerlock(ui);
768*0Sstevel@tonic-gate (void) md_unit_writerlock(ui);
769*0Sstevel@tonic-gate un->un_column[resync].un_devflags &= ~MD_RAID_WRITE_ALT;
770*0Sstevel@tonic-gate
771*0Sstevel@tonic-gate recids[0] = 0;
772*0Sstevel@tonic-gate recids[1] = 0;
773*0Sstevel@tonic-gate switch (err) {
774*0Sstevel@tonic-gate /*
775*0Sstevel@tonic-gate * successful resync
776*0Sstevel@tonic-gate */
777*0Sstevel@tonic-gate case RAID_RESYNC_OKAY:
778*0Sstevel@tonic-gate /* initialize pre-write area */
779*0Sstevel@tonic-gate if ((un->un_column[resync].un_orig_dev != NODEV64) &&
780*0Sstevel@tonic-gate (un->un_column[resync].un_orig_dev ==
781*0Sstevel@tonic-gate un->un_column[resync].un_alt_dev)) {
782*0Sstevel@tonic-gate /*
783*0Sstevel@tonic-gate * replacing a hot spare
784*0Sstevel@tonic-gate * release the hot spare, which will close the hotspare
785*0Sstevel@tonic-gate * and mark it closed.
786*0Sstevel@tonic-gate */
787*0Sstevel@tonic-gate raid_hs_release(hs_state, un, &recids[0], resync);
788*0Sstevel@tonic-gate /*
789*0Sstevel@tonic-gate * make the resync target the main device and
790*0Sstevel@tonic-gate * mark open
791*0Sstevel@tonic-gate */
792*0Sstevel@tonic-gate un->un_column[resync].un_hs_id = 0;
793*0Sstevel@tonic-gate un->un_column[resync].un_dev =
794*0Sstevel@tonic-gate un->un_column[resync].un_orig_dev;
795*0Sstevel@tonic-gate un->un_column[resync].un_devstart =
796*0Sstevel@tonic-gate un->un_column[resync].un_orig_devstart;
797*0Sstevel@tonic-gate un->un_column[resync].un_pwstart =
798*0Sstevel@tonic-gate un->un_column[resync].un_orig_pwstart;
799*0Sstevel@tonic-gate un->un_column[resync].un_devflags |= MD_RAID_DEV_ISOPEN;
800*0Sstevel@tonic-gate /* alt becomes the device so don't close it */
801*0Sstevel@tonic-gate un->un_column[resync].un_devflags &= ~MD_RAID_WRITE_ALT;
802*0Sstevel@tonic-gate un->un_column[resync].un_devflags &=
803*0Sstevel@tonic-gate ~MD_RAID_ALT_ISOPEN;
804*0Sstevel@tonic-gate un->un_column[resync].un_alt_dev = NODEV64;
805*0Sstevel@tonic-gate }
806*0Sstevel@tonic-gate raid_set_state(un, resync, RCS_OKAY, 0);
807*0Sstevel@tonic-gate break;
808*0Sstevel@tonic-gate
809*0Sstevel@tonic-gate case RAID_RESYNC_WRERROR:
810*0Sstevel@tonic-gate if (HOTSPARED(un, resync) && single_read &&
811*0Sstevel@tonic-gate (un->un_column[resync].un_devflags & MD_RAID_COPY_RESYNC)) {
812*0Sstevel@tonic-gate /*
813*0Sstevel@tonic-gate * this is the case where the resync target is
814*0Sstevel@tonic-gate * bad but there is a good hotspare. In this
815*0Sstevel@tonic-gate * case keep the hotspare, and go back to okay.
816*0Sstevel@tonic-gate */
817*0Sstevel@tonic-gate raid_set_state(un, resync, RCS_OKAY, 0);
818*0Sstevel@tonic-gate cmn_err(CE_WARN, "md: %s: %s write error, replace "
819*0Sstevel@tonic-gate "terminated", md_shortname(MD_SID(un)),
820*0Sstevel@tonic-gate md_devname(MD_UN2SET(un),
821*0Sstevel@tonic-gate un->un_column[resync].un_orig_dev,
822*0Sstevel@tonic-gate NULL, 0));
823*0Sstevel@tonic-gate break;
824*0Sstevel@tonic-gate }
825*0Sstevel@tonic-gate if (HOTSPARED(un, resync)) {
826*0Sstevel@tonic-gate raid_hs_release(hs_state, un, &recids[0], resync);
827*0Sstevel@tonic-gate un->un_column[resync].un_dev =
828*0Sstevel@tonic-gate un->un_column[resync].un_orig_dev;
829*0Sstevel@tonic-gate un->un_column[resync].un_devstart =
830*0Sstevel@tonic-gate un->un_column[resync].un_orig_devstart;
831*0Sstevel@tonic-gate un->un_column[resync].un_pwstart =
832*0Sstevel@tonic-gate un->un_column[resync].un_orig_pwstart;
833*0Sstevel@tonic-gate }
834*0Sstevel@tonic-gate raid_set_state(un, resync, RCS_ERRED, 0);
835*0Sstevel@tonic-gate if (un->un_column[resync].un_devflags & MD_RAID_REGEN_RESYNC)
836*0Sstevel@tonic-gate dev = un->un_column[resync].un_dev;
837*0Sstevel@tonic-gate else
838*0Sstevel@tonic-gate dev = un->un_column[resync].un_alt_dev;
839*0Sstevel@tonic-gate cmn_err(CE_WARN, "md: %s: %s write error replace terminated",
840*0Sstevel@tonic-gate md_shortname(MD_SID(un)), md_devname(MD_UN2SET(un), dev,
841*0Sstevel@tonic-gate NULL, 0));
842*0Sstevel@tonic-gate break;
843*0Sstevel@tonic-gate
844*0Sstevel@tonic-gate case RAID_RESYNC_STATE:
845*0Sstevel@tonic-gate if (HOTSPARED(un, resync) && single_read &&
846*0Sstevel@tonic-gate (un->un_column[resync].un_devflags & MD_RAID_COPY_RESYNC)) {
847*0Sstevel@tonic-gate /*
848*0Sstevel@tonic-gate * this is the case where the resync target is
849*0Sstevel@tonic-gate * bad but there is a good hotspare. In this
850*0Sstevel@tonic-gate * case keep the hotspare, and go back to okay.
851*0Sstevel@tonic-gate */
852*0Sstevel@tonic-gate raid_set_state(un, resync, RCS_OKAY, 0);
853*0Sstevel@tonic-gate cmn_err(CE_WARN, "md: %s: needs maintenance, replace "
854*0Sstevel@tonic-gate "terminated", md_shortname(MD_SID(un)));
855*0Sstevel@tonic-gate break;
856*0Sstevel@tonic-gate }
857*0Sstevel@tonic-gate if (HOTSPARED(un, resync)) {
858*0Sstevel@tonic-gate raid_hs_release(hs_state, un, &recids[0], resync);
859*0Sstevel@tonic-gate un->un_column[resync].un_dev =
860*0Sstevel@tonic-gate un->un_column[resync].un_orig_dev;
861*0Sstevel@tonic-gate un->un_column[resync].un_devstart =
862*0Sstevel@tonic-gate un->un_column[resync].un_orig_devstart;
863*0Sstevel@tonic-gate un->un_column[resync].un_pwstart =
864*0Sstevel@tonic-gate un->un_column[resync].un_orig_pwstart;
865*0Sstevel@tonic-gate }
866*0Sstevel@tonic-gate break;
867*0Sstevel@tonic-gate case RAID_RESYNC_RDERROR:
868*0Sstevel@tonic-gate if (HOTSPARED(un, resync)) {
869*0Sstevel@tonic-gate raid_hs_release(hs_state, un, &recids[0], resync);
870*0Sstevel@tonic-gate un->un_column[resync].un_dev =
871*0Sstevel@tonic-gate un->un_column[resync].un_orig_dev;
872*0Sstevel@tonic-gate un->un_column[resync].un_devstart =
873*0Sstevel@tonic-gate un->un_column[resync].un_orig_devstart;
874*0Sstevel@tonic-gate un->un_column[resync].un_pwstart =
875*0Sstevel@tonic-gate un->un_column[resync].un_orig_pwstart;
876*0Sstevel@tonic-gate }
877*0Sstevel@tonic-gate
878*0Sstevel@tonic-gate if ((resync != err_col) && (err_col != NOCOLUMN))
879*0Sstevel@tonic-gate raid_set_state(un, err_col, RCS_ERRED, 0);
880*0Sstevel@tonic-gate break;
881*0Sstevel@tonic-gate
882*0Sstevel@tonic-gate default:
883*0Sstevel@tonic-gate ASSERT(0);
884*0Sstevel@tonic-gate }
885*0Sstevel@tonic-gate if (un->un_column[resync].un_alt_dev != NODEV64) {
886*0Sstevel@tonic-gate raid_close_alt(un, resync);
887*0Sstevel@tonic-gate }
888*0Sstevel@tonic-gate
889*0Sstevel@tonic-gate /*
890*0Sstevel@tonic-gate * an io operation may have gotten an error and placed a
891*0Sstevel@tonic-gate * column in erred state. This will abort the resync, which
892*0Sstevel@tonic-gate * will end up in last erred. This is ugly so go through
893*0Sstevel@tonic-gate * the columns and do cleanup
894*0Sstevel@tonic-gate */
895*0Sstevel@tonic-gate err_cnt = 0;
896*0Sstevel@tonic-gate last_err = 0;
897*0Sstevel@tonic-gate for (i = 0; i < un->un_totalcolumncnt; i++) {
898*0Sstevel@tonic-gate if (un->un_column[i].un_devstate & RCS_OKAY)
899*0Sstevel@tonic-gate continue;
900*0Sstevel@tonic-gate if (i == resync) {
901*0Sstevel@tonic-gate raid_set_state(un, i, RCS_ERRED, 1);
902*0Sstevel@tonic-gate err_cnt++;
903*0Sstevel@tonic-gate } else if (err == RAID_RESYNC_OKAY) {
904*0Sstevel@tonic-gate err_cnt++;
905*0Sstevel@tonic-gate } else {
906*0Sstevel@tonic-gate raid_set_state(un, i, RCS_LAST_ERRED, 1);
907*0Sstevel@tonic-gate last_err++;
908*0Sstevel@tonic-gate }
909*0Sstevel@tonic-gate }
910*0Sstevel@tonic-gate if ((err_cnt == 0) && (last_err == 0))
911*0Sstevel@tonic-gate un->un_state = RUS_OKAY;
912*0Sstevel@tonic-gate else if (last_err == 0) {
913*0Sstevel@tonic-gate un->un_state = RUS_ERRED;
914*0Sstevel@tonic-gate ASSERT(err_cnt == 1);
915*0Sstevel@tonic-gate } else if (last_err > 0) {
916*0Sstevel@tonic-gate un->un_state = RUS_LAST_ERRED;
917*0Sstevel@tonic-gate }
918*0Sstevel@tonic-gate
919*0Sstevel@tonic-gate uniqtime32(&un->un_column[resync].un_devtimestamp);
920*0Sstevel@tonic-gate un->un_resync_copysize = 0;
921*0Sstevel@tonic-gate un->un_column[resync].un_devflags &=
922*0Sstevel@tonic-gate ~(MD_RAID_REGEN_RESYNC | MD_RAID_COPY_RESYNC);
923*0Sstevel@tonic-gate raid_commit(un, recids);
924*0Sstevel@tonic-gate /* release unit writer lock and acquire unit reader lock */
925*0Sstevel@tonic-gate md_unit_writerexit(ui);
926*0Sstevel@tonic-gate md_io_writerexit(ui);
927*0Sstevel@tonic-gate (void) md_unit_readerlock(ui);
928*0Sstevel@tonic-gate if (err == RAID_RESYNC_OKAY) {
929*0Sstevel@tonic-gate SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_DONE,
930*0Sstevel@tonic-gate SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un));
931*0Sstevel@tonic-gate } else {
932*0Sstevel@tonic-gate SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_FAILED,
933*0Sstevel@tonic-gate SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un));
934*0Sstevel@tonic-gate if (raid_state_cnt(un, RCS_ERRED |
935*0Sstevel@tonic-gate RCS_LAST_ERRED) > 1) {
936*0Sstevel@tonic-gate SE_NOTIFY(EC_SVM_STATE, ESC_SVM_LASTERRED,
937*0Sstevel@tonic-gate SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un));
938*0Sstevel@tonic-gate } else {
939*0Sstevel@tonic-gate SE_NOTIFY(EC_SVM_STATE, ESC_SVM_ERRED,
940*0Sstevel@tonic-gate SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un));
941*0Sstevel@tonic-gate }
942*0Sstevel@tonic-gate }
943*0Sstevel@tonic-gate
944*0Sstevel@tonic-gate free_bufs(dbtob(bsize), cs);
945*0Sstevel@tonic-gate }
946*0Sstevel@tonic-gate
947*0Sstevel@tonic-gate /*
948*0Sstevel@tonic-gate * NAME: resync_unit
949*0Sstevel@tonic-gate *
950*0Sstevel@tonic-gate * DESCRIPTION: Start of RAID resync thread. Perform up front allocations,
951*0Sstevel@tonic-gate * initializations and consistency checking, then call
952*0Sstevel@tonic-gate * resync_comp to resync the component.
953*0Sstevel@tonic-gate *
954*0Sstevel@tonic-gate * PARAMETERS: minor_t mnum - minor number identity of metadevice
955*0Sstevel@tonic-gate *
956*0Sstevel@tonic-gate * LOCKS: Acquires and releases Unit Reader Lock to maintain unit
957*0Sstevel@tonic-gate * existence during resync.
958*0Sstevel@tonic-gate * Acquires and releases the resync count lock for cpr.
959*0Sstevel@tonic-gate */
960*0Sstevel@tonic-gate static void
resync_unit(minor_t mnum)961*0Sstevel@tonic-gate resync_unit(
962*0Sstevel@tonic-gate minor_t mnum
963*0Sstevel@tonic-gate )
964*0Sstevel@tonic-gate {
965*0Sstevel@tonic-gate mdi_unit_t *ui;
966*0Sstevel@tonic-gate mr_unit_t *un;
967*0Sstevel@tonic-gate md_raidps_t *ps = NULL;
968*0Sstevel@tonic-gate md_raidcs_t *cs = NULL;
969*0Sstevel@tonic-gate int resync;
970*0Sstevel@tonic-gate
971*0Sstevel@tonic-gate /*
972*0Sstevel@tonic-gate * Increment the raid resync count for cpr
973*0Sstevel@tonic-gate */
974*0Sstevel@tonic-gate mutex_enter(&md_cpr_resync.md_resync_mutex);
975*0Sstevel@tonic-gate md_cpr_resync.md_raid_resync++;
976*0Sstevel@tonic-gate mutex_exit(&md_cpr_resync.md_resync_mutex);
977*0Sstevel@tonic-gate
978*0Sstevel@tonic-gate ui = MDI_UNIT(mnum);
979*0Sstevel@tonic-gate ASSERT(ui != NULL);
980*0Sstevel@tonic-gate
981*0Sstevel@tonic-gate un = (mr_unit_t *)md_unit_readerlock(ui);
982*0Sstevel@tonic-gate
983*0Sstevel@tonic-gate /*
984*0Sstevel@tonic-gate * Allocate parent and child memory pool structures. These are
985*0Sstevel@tonic-gate * only needed to lock raid lines, so only the minimal
986*0Sstevel@tonic-gate * required fields for this purpose are initialized.
987*0Sstevel@tonic-gate *
988*0Sstevel@tonic-gate * Do not use the reserve pool for resync.
989*0Sstevel@tonic-gate */
990*0Sstevel@tonic-gate ps = kmem_cache_alloc(raid_parent_cache, MD_ALLOCFLAGS);
991*0Sstevel@tonic-gate raid_parent_init(ps);
992*0Sstevel@tonic-gate cs = kmem_cache_alloc(raid_child_cache, MD_ALLOCFLAGS);
993*0Sstevel@tonic-gate raid_child_init(cs);
994*0Sstevel@tonic-gate resync = un->un_resync_index;
995*0Sstevel@tonic-gate ps->ps_un = un;
996*0Sstevel@tonic-gate ps->ps_ui = ui;
997*0Sstevel@tonic-gate ps->ps_flags = MD_RPS_INUSE;
998*0Sstevel@tonic-gate cs->cs_ps = ps;
999*0Sstevel@tonic-gate cs->cs_un = un;
1000*0Sstevel@tonic-gate
1001*0Sstevel@tonic-gate ASSERT(!(un->un_column[resync].un_devflags & MD_RAID_WRITE_ALT));
1002*0Sstevel@tonic-gate
1003*0Sstevel@tonic-gate resync_comp(mnum, cs);
1004*0Sstevel@tonic-gate release_resync_request(mnum);
1005*0Sstevel@tonic-gate
1006*0Sstevel@tonic-gate kmem_cache_free(raid_child_cache, cs);
1007*0Sstevel@tonic-gate kmem_cache_free(raid_parent_cache, ps);
1008*0Sstevel@tonic-gate
1009*0Sstevel@tonic-gate md_unit_readerexit(ui);
1010*0Sstevel@tonic-gate
1011*0Sstevel@tonic-gate /* close raid unit */
1012*0Sstevel@tonic-gate (void) raid_internal_close(mnum, OTYP_LYR, 0, 0);
1013*0Sstevel@tonic-gate
1014*0Sstevel@tonic-gate /* poke hot spare daemon */
1015*0Sstevel@tonic-gate (void) raid_hotspares();
1016*0Sstevel@tonic-gate
1017*0Sstevel@tonic-gate /*
1018*0Sstevel@tonic-gate * Decrement the raid resync count for cpr
1019*0Sstevel@tonic-gate */
1020*0Sstevel@tonic-gate mutex_enter(&md_cpr_resync.md_resync_mutex);
1021*0Sstevel@tonic-gate md_cpr_resync.md_raid_resync--;
1022*0Sstevel@tonic-gate mutex_exit(&md_cpr_resync.md_resync_mutex);
1023*0Sstevel@tonic-gate
1024*0Sstevel@tonic-gate thread_exit();
1025*0Sstevel@tonic-gate }
1026*0Sstevel@tonic-gate
1027*0Sstevel@tonic-gate /*
1028*0Sstevel@tonic-gate * NAME: raid_resync_unit
1029*0Sstevel@tonic-gate *
1030*0Sstevel@tonic-gate * DESCRIPTION: RAID metadevice specific resync routine.
1031*0Sstevel@tonic-gate * Open the unit and start resync_unit as a separate thread.
1032*0Sstevel@tonic-gate *
1033*0Sstevel@tonic-gate * PARAMETERS: minor_t mnum - minor number identity of metadevice
1034*0Sstevel@tonic-gate * md_error_t *ep - output error parameter
1035*0Sstevel@tonic-gate *
1036*0Sstevel@tonic-gate * RETURN: On error return 1 or set ep to nonzero, otherwise return 0.
1037*0Sstevel@tonic-gate *
1038*0Sstevel@tonic-gate * LOCKS: Acquires and releases Unit Writer Lock.
1039*0Sstevel@tonic-gate */
1040*0Sstevel@tonic-gate int
raid_resync_unit(minor_t mnum,md_error_t * ep)1041*0Sstevel@tonic-gate raid_resync_unit(
1042*0Sstevel@tonic-gate minor_t mnum,
1043*0Sstevel@tonic-gate md_error_t *ep
1044*0Sstevel@tonic-gate )
1045*0Sstevel@tonic-gate {
1046*0Sstevel@tonic-gate mdi_unit_t *ui;
1047*0Sstevel@tonic-gate set_t setno = MD_MIN2SET(mnum);
1048*0Sstevel@tonic-gate mr_unit_t *un;
1049*0Sstevel@tonic-gate
1050*0Sstevel@tonic-gate ui = MDI_UNIT(mnum);
1051*0Sstevel@tonic-gate un = MD_UNIT(mnum);
1052*0Sstevel@tonic-gate
1053*0Sstevel@tonic-gate if (md_get_setstatus(setno) & MD_SET_STALE)
1054*0Sstevel@tonic-gate return (mdmddberror(ep, MDE_DB_STALE, mnum, setno));
1055*0Sstevel@tonic-gate
1056*0Sstevel@tonic-gate ASSERT(un->un_column[un->un_resync_index].un_devflags &
1057*0Sstevel@tonic-gate (MD_RAID_COPY_RESYNC | MD_RAID_REGEN_RESYNC));
1058*0Sstevel@tonic-gate
1059*0Sstevel@tonic-gate /* Don't start a resync if the device is not available */
1060*0Sstevel@tonic-gate if ((ui == NULL) || (ui->ui_tstate & MD_DEV_ERRORED)) {
1061*0Sstevel@tonic-gate return (mdmderror(ep, MDE_RAID_OPEN_FAILURE, mnum));
1062*0Sstevel@tonic-gate }
1063*0Sstevel@tonic-gate
1064*0Sstevel@tonic-gate if (raid_internal_open(mnum, FREAD | FWRITE, OTYP_LYR, 0)) {
1065*0Sstevel@tonic-gate (void) md_unit_writerlock(ui);
1066*0Sstevel@tonic-gate release_resync_request(mnum);
1067*0Sstevel@tonic-gate md_unit_writerexit(ui);
1068*0Sstevel@tonic-gate SE_NOTIFY(EC_SVM_STATE, ESC_SVM_OPEN_FAIL, SVM_TAG_METADEVICE,
1069*0Sstevel@tonic-gate setno, MD_SID(un));
1070*0Sstevel@tonic-gate return (mdmderror(ep, MDE_RAID_OPEN_FAILURE, mnum));
1071*0Sstevel@tonic-gate }
1072*0Sstevel@tonic-gate
1073*0Sstevel@tonic-gate /* start resync_unit thread */
1074*0Sstevel@tonic-gate (void) thread_create(NULL, 0, resync_unit, (void *)(uintptr_t)mnum,
1075*0Sstevel@tonic-gate 0, &p0, TS_RUN, minclsyspri);
1076*0Sstevel@tonic-gate
1077*0Sstevel@tonic-gate return (0);
1078*0Sstevel@tonic-gate }
1079