xref: /netbsd-src/sys/dev/raidframe/rf_reconutil.c (revision 8a8f936f250a330d54f8a24ed0e92aadf9743a7b)
1 /*	$NetBSD: rf_reconutil.c,v 1.4 2001/10/04 15:58:56 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author: Mark Holland
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 
29 /********************************************
30  * rf_reconutil.c -- reconstruction utilities
31  ********************************************/
32 
33 #include <dev/raidframe/raidframevar.h>
34 
35 #include "rf_raid.h"
36 #include "rf_desc.h"
37 #include "rf_reconutil.h"
38 #include "rf_reconbuffer.h"
39 #include "rf_general.h"
40 #include "rf_decluster.h"
41 #include "rf_raid5_rotatedspare.h"
42 #include "rf_interdecluster.h"
43 #include "rf_chaindecluster.h"
44 
45 /*******************************************************************
46  * allocates/frees the reconstruction control information structures
47  *******************************************************************/
48 RF_ReconCtrl_t *
49 rf_MakeReconControl(reconDesc, frow, fcol, srow, scol)
50 	RF_RaidReconDesc_t *reconDesc;
51 	RF_RowCol_t frow;	/* failed row and column */
52 	RF_RowCol_t fcol;
53 	RF_RowCol_t srow;	/* identifies which spare we're using */
54 	RF_RowCol_t scol;
55 {
56 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
57 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
58 	RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
59 	RF_ReconUnitCount_t numSpareRUs;
60 	RF_ReconCtrl_t *reconCtrlPtr;
61 	RF_ReconBuffer_t *rbuf;
62 	RF_LayoutSW_t *lp;
63 	int     retcode, rc;
64 	RF_RowCol_t i;
65 
66 	lp = raidPtr->Layout.map;
67 
68 	/* make and zero the global reconstruction structure and the per-disk
69 	 * structure */
70 	RF_Calloc(reconCtrlPtr, 1, sizeof(RF_ReconCtrl_t), (RF_ReconCtrl_t *));
71 	RF_Calloc(reconCtrlPtr->perDiskInfo, raidPtr->numCol, sizeof(RF_PerDiskReconCtrl_t), (RF_PerDiskReconCtrl_t *));	/* this zeros it */
72 	reconCtrlPtr->reconDesc = reconDesc;
73 	reconCtrlPtr->fcol = fcol;
74 	reconCtrlPtr->spareRow = srow;
75 	reconCtrlPtr->spareCol = scol;
76 	reconCtrlPtr->lastPSID = layoutPtr->numStripe / layoutPtr->SUsPerPU;
77 	reconCtrlPtr->percentComplete = 0;
78 
79 	/* initialize each per-disk recon information structure */
80 	for (i = 0; i < raidPtr->numCol; i++) {
81 		reconCtrlPtr->perDiskInfo[i].reconCtrl = reconCtrlPtr;
82 		reconCtrlPtr->perDiskInfo[i].row = frow;
83 		reconCtrlPtr->perDiskInfo[i].col = i;
84 		reconCtrlPtr->perDiskInfo[i].curPSID = -1;	/* make it appear as if
85 								 * we just finished an
86 								 * RU */
87 		reconCtrlPtr->perDiskInfo[i].ru_count = RUsPerPU - 1;
88 	}
89 
90 	/* Get the number of spare units per disk and the sparemap in case
91 	 * spare is distributed  */
92 
93 	if (lp->GetNumSpareRUs) {
94 		numSpareRUs = lp->GetNumSpareRUs(raidPtr);
95 	} else {
96 		numSpareRUs = 0;
97 	}
98 
99 	/*
100          * Not all distributed sparing archs need dynamic mappings
101          */
102 	if (lp->InstallSpareTable) {
103 		retcode = rf_InstallSpareTable(raidPtr, frow, fcol);
104 		if (retcode) {
105 			RF_PANIC();	/* XXX fix this */
106 		}
107 	}
108 	/* make the reconstruction map */
109 	reconCtrlPtr->reconMap = rf_MakeReconMap(raidPtr, (int) (layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit),
110 	    raidPtr->sectorsPerDisk, numSpareRUs);
111 
112 	/* make the per-disk reconstruction buffers */
113 	for (i = 0; i < raidPtr->numCol; i++) {
114 		reconCtrlPtr->perDiskInfo[i].rbuf = (i == fcol) ? NULL : rf_MakeReconBuffer(raidPtr, frow, i, RF_RBUF_TYPE_EXCLUSIVE);
115 	}
116 
117 	/* initialize the event queue */
118 	rc = rf_mutex_init(&reconCtrlPtr->eq_mutex);
119 	if (rc) {
120 		/* XXX deallocate, cleanup */
121 		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
122 		    __LINE__, rc);
123 		return (NULL);
124 	}
125 	rc = rf_cond_init(&reconCtrlPtr->eq_cond);
126 	if (rc) {
127 		/* XXX deallocate, cleanup */
128 		RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
129 		    __LINE__, rc);
130 		return (NULL);
131 	}
132 	reconCtrlPtr->eventQueue = NULL;
133 	reconCtrlPtr->eq_count = 0;
134 
135 	/* make the floating recon buffers and append them to the free list */
136 	rc = rf_mutex_init(&reconCtrlPtr->rb_mutex);
137 	if (rc) {
138 		/* XXX deallocate, cleanup */
139 		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
140 		    __LINE__, rc);
141 		return (NULL);
142 	}
143 	reconCtrlPtr->fullBufferList = NULL;
144 	reconCtrlPtr->priorityList = NULL;
145 	reconCtrlPtr->floatingRbufs = NULL;
146 	reconCtrlPtr->committedRbufs = NULL;
147 	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
148 		rbuf = rf_MakeReconBuffer(raidPtr, frow, fcol, RF_RBUF_TYPE_FLOATING);
149 		rbuf->next = reconCtrlPtr->floatingRbufs;
150 		reconCtrlPtr->floatingRbufs = rbuf;
151 	}
152 
153 	/* create the parity stripe status table */
154 	reconCtrlPtr->pssTable = rf_MakeParityStripeStatusTable(raidPtr);
155 
156 	/* set the initial min head sep counter val */
157 	reconCtrlPtr->minHeadSepCounter = 0;
158 
159 	return (reconCtrlPtr);
160 }
161 
162 void
163 rf_FreeReconControl(raidPtr, row)
164 	RF_Raid_t *raidPtr;
165 	RF_RowCol_t row;
166 {
167 	RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl[row];
168 	RF_ReconBuffer_t *t;
169 	RF_ReconUnitNum_t i;
170 
171 	RF_ASSERT(reconCtrlPtr);
172 	for (i = 0; i < raidPtr->numCol; i++)
173 		if (reconCtrlPtr->perDiskInfo[i].rbuf)
174 			rf_FreeReconBuffer(reconCtrlPtr->perDiskInfo[i].rbuf);
175 	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
176 		t = reconCtrlPtr->floatingRbufs;
177 		RF_ASSERT(t);
178 		reconCtrlPtr->floatingRbufs = t->next;
179 		rf_FreeReconBuffer(t);
180 	}
181 	rf_mutex_destroy(&reconCtrlPtr->rb_mutex);
182 	rf_mutex_destroy(&reconCtrlPtr->eq_mutex);
183 	rf_cond_destroy(&reconCtrlPtr->eq_cond);
184 	rf_FreeReconMap(reconCtrlPtr->reconMap);
185 	rf_FreeParityStripeStatusTable(raidPtr, reconCtrlPtr->pssTable);
186 	RF_Free(reconCtrlPtr->perDiskInfo, raidPtr->numCol * sizeof(RF_PerDiskReconCtrl_t));
187 	RF_Free(reconCtrlPtr, sizeof(*reconCtrlPtr));
188 }
189 
190 
191 /******************************************************************************
192  * computes the default head separation limit
193  *****************************************************************************/
194 RF_HeadSepLimit_t
195 rf_GetDefaultHeadSepLimit(raidPtr)
196 	RF_Raid_t *raidPtr;
197 {
198 	RF_HeadSepLimit_t hsl;
199 	RF_LayoutSW_t *lp;
200 
201 	lp = raidPtr->Layout.map;
202 	if (lp->GetDefaultHeadSepLimit == NULL)
203 		return (-1);
204 	hsl = lp->GetDefaultHeadSepLimit(raidPtr);
205 	return (hsl);
206 }
207 
208 
209 /******************************************************************************
210  * computes the default number of floating recon buffers
211  *****************************************************************************/
212 int
213 rf_GetDefaultNumFloatingReconBuffers(raidPtr)
214 	RF_Raid_t *raidPtr;
215 {
216 	RF_LayoutSW_t *lp;
217 	int     nrb;
218 
219 	lp = raidPtr->Layout.map;
220 	if (lp->GetDefaultNumFloatingReconBuffers == NULL)
221 		return (3 * raidPtr->numCol);
222 	nrb = lp->GetDefaultNumFloatingReconBuffers(raidPtr);
223 	return (nrb);
224 }
225 
226 
227 /******************************************************************************
228  * creates and initializes a reconstruction buffer
229  *****************************************************************************/
230 RF_ReconBuffer_t *
231 rf_MakeReconBuffer(
232     RF_Raid_t * raidPtr,
233     RF_RowCol_t row,
234     RF_RowCol_t col,
235     RF_RbufType_t type)
236 {
237 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
238 	RF_ReconBuffer_t *t;
239 	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit);
240 
241 	RF_Malloc(t, sizeof(RF_ReconBuffer_t), (RF_ReconBuffer_t *));
242 	RF_Malloc(t->buffer, recon_buffer_size, (caddr_t));
243 	RF_Malloc(t->arrived, raidPtr->numCol * sizeof(char), (char *));
244 	t->raidPtr = raidPtr;
245 	t->row = row;
246 	t->col = col;
247 	t->priority = RF_IO_RECON_PRIORITY;
248 	t->type = type;
249 	t->pssPtr = NULL;
250 	t->next = NULL;
251 	return (t);
252 }
253 /******************************************************************************
254  * frees a reconstruction buffer
255  *****************************************************************************/
256 void
257 rf_FreeReconBuffer(rbuf)
258 	RF_ReconBuffer_t *rbuf;
259 {
260 	RF_Raid_t *raidPtr = rbuf->raidPtr;
261 	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, raidPtr->Layout.SUsPerRU * raidPtr->Layout.sectorsPerStripeUnit);
262 
263 	RF_Free(rbuf->arrived, raidPtr->numCol * sizeof(char));
264 	RF_Free(rbuf->buffer, recon_buffer_size);
265 	RF_Free(rbuf, sizeof(*rbuf));
266 }
267 
268 
269 /******************************************************************************
270  * debug only:  sanity check the number of floating recon bufs in use
271  *****************************************************************************/
272 void
273 rf_CheckFloatingRbufCount(raidPtr, dolock)
274 	RF_Raid_t *raidPtr;
275 	int     dolock;
276 {
277 	RF_ReconParityStripeStatus_t *p;
278 	RF_PSStatusHeader_t *pssTable;
279 	RF_ReconBuffer_t *rbuf;
280 	int     i, j, sum = 0;
281 	RF_RowCol_t frow = 0;
282 
283 	for (i = 0; i < raidPtr->numRow; i++)
284 		if (raidPtr->reconControl[i]) {
285 			frow = i;
286 			break;
287 		}
288 	RF_ASSERT(frow >= 0);
289 
290 	if (dolock)
291 		RF_LOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
292 	pssTable = raidPtr->reconControl[frow]->pssTable;
293 
294 	for (i = 0; i < raidPtr->pssTableSize; i++) {
295 		RF_LOCK_MUTEX(pssTable[i].mutex);
296 		for (p = pssTable[i].chain; p; p = p->next) {
297 			rbuf = (RF_ReconBuffer_t *) p->rbuf;
298 			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
299 				sum++;
300 
301 			rbuf = (RF_ReconBuffer_t *) p->writeRbuf;
302 			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
303 				sum++;
304 
305 			for (j = 0; j < p->xorBufCount; j++) {
306 				rbuf = (RF_ReconBuffer_t *) p->rbufsForXor[j];
307 				RF_ASSERT(rbuf);
308 				if (rbuf->type == RF_RBUF_TYPE_FLOATING)
309 					sum++;
310 			}
311 		}
312 		RF_UNLOCK_MUTEX(pssTable[i].mutex);
313 	}
314 
315 	for (rbuf = raidPtr->reconControl[frow]->floatingRbufs; rbuf; rbuf = rbuf->next) {
316 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
317 			sum++;
318 	}
319 	for (rbuf = raidPtr->reconControl[frow]->committedRbufs; rbuf; rbuf = rbuf->next) {
320 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
321 			sum++;
322 	}
323 	for (rbuf = raidPtr->reconControl[frow]->fullBufferList; rbuf; rbuf = rbuf->next) {
324 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
325 			sum++;
326 	}
327 	for (rbuf = raidPtr->reconControl[frow]->priorityList; rbuf; rbuf = rbuf->next) {
328 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
329 			sum++;
330 	}
331 
332 	RF_ASSERT(sum == raidPtr->numFloatingReconBufs);
333 
334 	if (dolock)
335 		RF_UNLOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
336 }
337