xref: /netbsd-src/sys/dev/raidframe/rf_raid5.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: rf_raid5.c,v 1.7 2002/09/23 02:40:09 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author: Mark Holland
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 
29 /******************************************************************************
30  *
31  * rf_raid5.c -- implements RAID Level 5
32  *
33  *****************************************************************************/
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rf_raid5.c,v 1.7 2002/09/23 02:40:09 oster Exp $");
37 
38 #include <dev/raidframe/raidframevar.h>
39 
40 #include "rf_raid.h"
41 #include "rf_raid5.h"
42 #include "rf_dag.h"
43 #include "rf_dagffrd.h"
44 #include "rf_dagffwr.h"
45 #include "rf_dagdegrd.h"
46 #include "rf_dagdegwr.h"
47 #include "rf_dagutils.h"
48 #include "rf_general.h"
49 #include "rf_map.h"
50 #include "rf_utils.h"
51 
52 typedef struct RF_Raid5ConfigInfo_s {
53 	RF_RowCol_t **stripeIdentifier;	/* filled in at config time and used
54 					 * by IdentifyStripe */
55 }       RF_Raid5ConfigInfo_t;
56 
57 int
58 rf_ConfigureRAID5(
59     RF_ShutdownList_t ** listp,
60     RF_Raid_t * raidPtr,
61     RF_Config_t * cfgPtr)
62 {
63 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
64 	RF_Raid5ConfigInfo_t *info;
65 	RF_RowCol_t i, j, startdisk;
66 
67 	/* create a RAID level 5 configuration structure */
68 	RF_MallocAndAdd(info, sizeof(RF_Raid5ConfigInfo_t), (RF_Raid5ConfigInfo_t *), raidPtr->cleanupList);
69 	if (info == NULL)
70 		return (ENOMEM);
71 	layoutPtr->layoutSpecificInfo = (void *) info;
72 
73 	RF_ASSERT(raidPtr->numRow == 1);
74 
75 	/* the stripe identifier must identify the disks in each stripe, IN
76 	 * THE ORDER THAT THEY APPEAR IN THE STRIPE. */
77 	info->stripeIdentifier = rf_make_2d_array(raidPtr->numCol, raidPtr->numCol, raidPtr->cleanupList);
78 	if (info->stripeIdentifier == NULL)
79 		return (ENOMEM);
80 	startdisk = 0;
81 	for (i = 0; i < raidPtr->numCol; i++) {
82 		for (j = 0; j < raidPtr->numCol; j++) {
83 			info->stripeIdentifier[i][j] = (startdisk + j) % raidPtr->numCol;
84 		}
85 		if ((--startdisk) < 0)
86 			startdisk = raidPtr->numCol - 1;
87 	}
88 
89 	/* fill in the remaining layout parameters */
90 	layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk;
91 	layoutPtr->numDataCol = raidPtr->numCol - 1;
92 	layoutPtr->dataSectorsPerStripe = layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
93 	layoutPtr->numParityCol = 1;
94 	layoutPtr->dataStripeUnitsPerDisk = layoutPtr->stripeUnitsPerDisk;
95 
96 	raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
97 
98 	return (0);
99 }
100 
101 int
102 rf_GetDefaultNumFloatingReconBuffersRAID5(RF_Raid_t * raidPtr)
103 {
104 	return (20);
105 }
106 
107 RF_HeadSepLimit_t
108 rf_GetDefaultHeadSepLimitRAID5(RF_Raid_t * raidPtr)
109 {
110 	return (10);
111 }
112 #if !defined(__NetBSD__) && !defined(_KERNEL)
113 /* not currently used */
114 int
115 rf_ShutdownRAID5(RF_Raid_t * raidPtr)
116 {
117 	return (0);
118 }
119 #endif
120 
121 void
122 rf_MapSectorRAID5(
123     RF_Raid_t * raidPtr,
124     RF_RaidAddr_t raidSector,
125     RF_RowCol_t * row,
126     RF_RowCol_t * col,
127     RF_SectorNum_t * diskSector,
128     int remap)
129 {
130 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
131 	*row = 0;
132 	*col = (SUID % raidPtr->numCol);
133 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
134 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
135 }
136 
137 void
138 rf_MapParityRAID5(
139     RF_Raid_t * raidPtr,
140     RF_RaidAddr_t raidSector,
141     RF_RowCol_t * row,
142     RF_RowCol_t * col,
143     RF_SectorNum_t * diskSector,
144     int remap)
145 {
146 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
147 
148 	*row = 0;
149 	*col = raidPtr->Layout.numDataCol - (SUID / raidPtr->Layout.numDataCol) % raidPtr->numCol;
150 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
151 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
152 }
153 
154 void
155 rf_IdentifyStripeRAID5(
156     RF_Raid_t * raidPtr,
157     RF_RaidAddr_t addr,
158     RF_RowCol_t ** diskids,
159     RF_RowCol_t * outRow)
160 {
161 	RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr);
162 	RF_Raid5ConfigInfo_t *info = (RF_Raid5ConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
163 
164 	*outRow = 0;
165 	*diskids = info->stripeIdentifier[stripeID % raidPtr->numCol];
166 }
167 
168 void
169 rf_MapSIDToPSIDRAID5(
170     RF_RaidLayout_t * layoutPtr,
171     RF_StripeNum_t stripeID,
172     RF_StripeNum_t * psID,
173     RF_ReconUnitNum_t * which_ru)
174 {
175 	*which_ru = 0;
176 	*psID = stripeID;
177 }
178 /* select an algorithm for performing an access.  Returns two pointers,
179  * one to a function that will return information about the DAG, and
180  * another to a function that will create the dag.
181  */
182 void
183 rf_RaidFiveDagSelect(
184     RF_Raid_t * raidPtr,
185     RF_IoType_t type,
186     RF_AccessStripeMap_t * asmap,
187     RF_VoidFuncPtr * createFunc)
188 {
189 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
190 	RF_PhysDiskAddr_t *failedPDA = NULL;
191 	RF_RowCol_t frow, fcol;
192 	RF_RowStatus_t rstat;
193 	int     prior_recon;
194 
195 	RF_ASSERT(RF_IO_IS_R_OR_W(type));
196 
197 	if (asmap->numDataFailed + asmap->numParityFailed > 1) {
198 		RF_ERRORMSG("Multiple disks failed in a single group!  Aborting I/O operation.\n");
199 		 /* *infoFunc = */ *createFunc = NULL;
200 		return;
201 	} else
202 		if (asmap->numDataFailed + asmap->numParityFailed == 1) {
203 
204 			/* if under recon & already reconstructed, redirect
205 			 * the access to the spare drive and eliminate the
206 			 * failure indication */
207 			failedPDA = asmap->failedPDAs[0];
208 			frow = failedPDA->row;
209 			fcol = failedPDA->col;
210 			rstat = raidPtr->status[failedPDA->row];
211 			prior_recon = (rstat == rf_rs_reconfigured) || (
212 			    (rstat == rf_rs_reconstructing) ?
213 			    rf_CheckRUReconstructed(raidPtr->reconControl[frow]->reconMap, failedPDA->startSector) : 0
214 			    );
215 			if (prior_recon) {
216 				RF_RowCol_t or = failedPDA->row, oc = failedPDA->col;
217 				RF_SectorNum_t oo = failedPDA->startSector;
218 
219 				if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {	/* redirect to dist
220 											 * spare space */
221 
222 					if (failedPDA == asmap->parityInfo) {
223 
224 						/* parity has failed */
225 						(layoutPtr->map->MapParity) (raidPtr, failedPDA->raidAddress, &failedPDA->row,
226 						    &failedPDA->col, &failedPDA->startSector, RF_REMAP);
227 
228 						if (asmap->parityInfo->next) {	/* redir 2nd component,
229 										 * if any */
230 							RF_PhysDiskAddr_t *p = asmap->parityInfo->next;
231 							RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit;
232 							p->row = failedPDA->row;
233 							p->col = failedPDA->col;
234 							p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) +
235 							    SUoffs;	/* cheating:
236 									 * startSector is not
237 									 * really a RAID address */
238 						}
239 					} else
240 						if (asmap->parityInfo->next && failedPDA == asmap->parityInfo->next) {
241 							RF_ASSERT(0);	/* should not ever
242 									 * happen */
243 						} else {
244 
245 							/* data has failed */
246 							(layoutPtr->map->MapSector) (raidPtr, failedPDA->raidAddress, &failedPDA->row,
247 							    &failedPDA->col, &failedPDA->startSector, RF_REMAP);
248 
249 						}
250 
251 				} else {	/* redirect to dedicated spare
252 						 * space */
253 
254 					failedPDA->row = raidPtr->Disks[frow][fcol].spareRow;
255 					failedPDA->col = raidPtr->Disks[frow][fcol].spareCol;
256 
257 					/* the parity may have two distinct
258 					 * components, both of which may need
259 					 * to be redirected */
260 					if (asmap->parityInfo->next) {
261 						if (failedPDA == asmap->parityInfo) {
262 							failedPDA->next->row = failedPDA->row;
263 							failedPDA->next->col = failedPDA->col;
264 						} else
265 							if (failedPDA == asmap->parityInfo->next) {	/* paranoid:  should
266 													 * never occur */
267 								asmap->parityInfo->row = failedPDA->row;
268 								asmap->parityInfo->col = failedPDA->col;
269 							}
270 					}
271 				}
272 
273 				RF_ASSERT(failedPDA->col != -1);
274 
275 				if (rf_dagDebug || rf_mapDebug) {
276 					printf("raid%d: Redirected type '%c' r %d c %d o %ld -> r %d c %d o %ld\n",
277 					       raidPtr->raidid, type, or, oc,
278 					       (long) oo, failedPDA->row,
279 					       failedPDA->col,
280 					       (long) failedPDA->startSector);
281 				}
282 				asmap->numDataFailed = asmap->numParityFailed = 0;
283 			}
284 		}
285 	/* all dags begin/end with block/unblock node therefore, hdrSucc &
286 	 * termAnt counts should always be 1 also, these counts should not be
287 	 * visible outside dag creation routines - manipulating the counts
288 	 * here should be removed */
289 	if (type == RF_IO_TYPE_READ) {
290 		if (asmap->numDataFailed == 0)
291 			*createFunc = (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG;
292 		else
293 			*createFunc = (RF_VoidFuncPtr) rf_CreateRaidFiveDegradedReadDAG;
294 	} else {
295 
296 
297 		/* if mirroring, always use large writes.  If the access
298 		 * requires two distinct parity updates, always do a small
299 		 * write.  If the stripe contains a failure but the access
300 		 * does not, do a small write. The first conditional
301 		 * (numStripeUnitsAccessed <= numDataCol/2) uses a
302 		 * less-than-or-equal rather than just a less-than because
303 		 * when G is 3 or 4, numDataCol/2 is 1, and I want
304 		 * single-stripe-unit updates to use just one disk. */
305 		if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
306 			if (rf_suppressLocksAndLargeWrites ||
307 			    (((asmap->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol != 1)) ||
308 				(asmap->parityInfo->next != NULL) || rf_CheckStripeForFailures(raidPtr, asmap))) {
309 				*createFunc = (RF_VoidFuncPtr) rf_CreateSmallWriteDAG;
310 			} else
311 				*createFunc = (RF_VoidFuncPtr) rf_CreateLargeWriteDAG;
312 		} else {
313 			if (asmap->numParityFailed == 1)
314 				*createFunc = (RF_VoidFuncPtr) rf_CreateNonRedundantWriteDAG;
315 			else
316 				if (asmap->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit)
317 					*createFunc = NULL;
318 				else
319 					*createFunc = (RF_VoidFuncPtr) rf_CreateDegradedWriteDAG;
320 		}
321 	}
322 }
323