xref: /netbsd-src/sys/dev/raidframe/rf_raid5.c (revision d20841bb642898112fe68f0ad3f7b26dddf56f07)
1 /*	$NetBSD: rf_raid5.c,v 1.11 2004/01/02 21:41:08 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author: Mark Holland
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 
29 /******************************************************************************
30  *
31  * rf_raid5.c -- implements RAID Level 5
32  *
33  *****************************************************************************/
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rf_raid5.c,v 1.11 2004/01/02 21:41:08 oster Exp $");
37 
38 #include <dev/raidframe/raidframevar.h>
39 
40 #include "rf_raid.h"
41 #include "rf_raid5.h"
42 #include "rf_dag.h"
43 #include "rf_dagffrd.h"
44 #include "rf_dagffwr.h"
45 #include "rf_dagdegrd.h"
46 #include "rf_dagdegwr.h"
47 #include "rf_dagutils.h"
48 #include "rf_general.h"
49 #include "rf_map.h"
50 #include "rf_utils.h"
51 
52 typedef struct RF_Raid5ConfigInfo_s {
53 	RF_RowCol_t **stripeIdentifier;	/* filled in at config time and used
54 					 * by IdentifyStripe */
55 }       RF_Raid5ConfigInfo_t;
56 
57 int
58 rf_ConfigureRAID5(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
59 		  RF_Config_t *cfgPtr)
60 {
61 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
62 	RF_Raid5ConfigInfo_t *info;
63 	RF_RowCol_t i, j, startdisk;
64 
65 	/* create a RAID level 5 configuration structure */
66 	RF_MallocAndAdd(info, sizeof(RF_Raid5ConfigInfo_t), (RF_Raid5ConfigInfo_t *), raidPtr->cleanupList);
67 	if (info == NULL)
68 		return (ENOMEM);
69 	layoutPtr->layoutSpecificInfo = (void *) info;
70 
71 	/* the stripe identifier must identify the disks in each stripe, IN
72 	 * THE ORDER THAT THEY APPEAR IN THE STRIPE. */
73 	info->stripeIdentifier = rf_make_2d_array(raidPtr->numCol, raidPtr->numCol, raidPtr->cleanupList);
74 	if (info->stripeIdentifier == NULL)
75 		return (ENOMEM);
76 	startdisk = 0;
77 	for (i = 0; i < raidPtr->numCol; i++) {
78 		for (j = 0; j < raidPtr->numCol; j++) {
79 			info->stripeIdentifier[i][j] = (startdisk + j) % raidPtr->numCol;
80 		}
81 		if ((--startdisk) < 0)
82 			startdisk = raidPtr->numCol - 1;
83 	}
84 
85 	/* fill in the remaining layout parameters */
86 	layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk;
87 	layoutPtr->numDataCol = raidPtr->numCol - 1;
88 	layoutPtr->dataSectorsPerStripe = layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
89 	layoutPtr->numParityCol = 1;
90 	layoutPtr->dataStripeUnitsPerDisk = layoutPtr->stripeUnitsPerDisk;
91 
92 	raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
93 
94 	return (0);
95 }
96 
97 int
98 rf_GetDefaultNumFloatingReconBuffersRAID5(RF_Raid_t *raidPtr)
99 {
100 	return (20);
101 }
102 
103 RF_HeadSepLimit_t
104 rf_GetDefaultHeadSepLimitRAID5(RF_Raid_t *raidPtr)
105 {
106 	return (10);
107 }
108 #if !defined(__NetBSD__) && !defined(_KERNEL)
109 /* not currently used */
110 int
111 rf_ShutdownRAID5(RF_Raid_t *raidPtr)
112 {
113 	return (0);
114 }
115 #endif
116 
117 void
118 rf_MapSectorRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
119 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
120 {
121 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
122 	*col = (SUID % raidPtr->numCol);
123 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
124 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
125 }
126 
127 void
128 rf_MapParityRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
129 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
130 {
131 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
132 
133 	*col = raidPtr->Layout.numDataCol - (SUID / raidPtr->Layout.numDataCol) % raidPtr->numCol;
134 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
135 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
136 }
137 
138 void
139 rf_IdentifyStripeRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t addr,
140 		       RF_RowCol_t **diskids)
141 {
142 	RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr);
143 	RF_Raid5ConfigInfo_t *info = (RF_Raid5ConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
144 
145 	*diskids = info->stripeIdentifier[stripeID % raidPtr->numCol];
146 }
147 
148 void
149 rf_MapSIDToPSIDRAID5(RF_RaidLayout_t *layoutPtr, RF_StripeNum_t stripeID,
150 		     RF_StripeNum_t *psID, RF_ReconUnitNum_t *which_ru)
151 {
152 	*which_ru = 0;
153 	*psID = stripeID;
154 }
155 /* select an algorithm for performing an access.  Returns two pointers,
156  * one to a function that will return information about the DAG, and
157  * another to a function that will create the dag.
158  */
159 void
160 rf_RaidFiveDagSelect(RF_Raid_t *raidPtr, RF_IoType_t type,
161 		     RF_AccessStripeMap_t *asmap,
162 		     RF_VoidFuncPtr *createFunc)
163 {
164 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
165 	RF_PhysDiskAddr_t *failedPDA = NULL;
166 	RF_RowCol_t fcol;
167 	RF_RowStatus_t rstat;
168 	int     prior_recon;
169 
170 	RF_ASSERT(RF_IO_IS_R_OR_W(type));
171 
172 	if ((asmap->numDataFailed + asmap->numParityFailed > 1) ||
173 	    (raidPtr->numFailures > 1)){
174 		if (rf_dagDebug)
175 			RF_ERRORMSG("Multiple disks failed in a single group!  Aborting I/O operation.\n");
176 		*createFunc = NULL;
177 		return;
178 	} else
179 		if (asmap->numDataFailed + asmap->numParityFailed == 1) {
180 
181 			/* if under recon & already reconstructed, redirect
182 			 * the access to the spare drive and eliminate the
183 			 * failure indication */
184 			failedPDA = asmap->failedPDAs[0];
185 			fcol = failedPDA->col;
186 			rstat = raidPtr->status;
187 			prior_recon = (rstat == rf_rs_reconfigured) || (
188 			    (rstat == rf_rs_reconstructing) ?
189 			    rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, failedPDA->startSector) : 0
190 			    );
191 			if (prior_recon) {
192 				RF_RowCol_t oc = failedPDA->col;
193 				RF_SectorNum_t oo = failedPDA->startSector;
194 
195 				if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {	/* redirect to dist
196 											 * spare space */
197 
198 					if (failedPDA == asmap->parityInfo) {
199 
200 						/* parity has failed */
201 						(layoutPtr->map->MapParity) (raidPtr, failedPDA->raidAddress,
202 						    &failedPDA->col, &failedPDA->startSector, RF_REMAP);
203 
204 						if (asmap->parityInfo->next) {	/* redir 2nd component,
205 										 * if any */
206 							RF_PhysDiskAddr_t *p = asmap->parityInfo->next;
207 							RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit;
208 							p->col = failedPDA->col;
209 							p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) +
210 							    SUoffs;	/* cheating:
211 									 * startSector is not
212 									 * really a RAID address */
213 						}
214 					} else
215 						if (asmap->parityInfo->next && failedPDA == asmap->parityInfo->next) {
216 							RF_ASSERT(0);	/* should not ever
217 									 * happen */
218 						} else {
219 
220 							/* data has failed */
221 							(layoutPtr->map->MapSector) (raidPtr, failedPDA->raidAddress,
222 							    &failedPDA->col, &failedPDA->startSector, RF_REMAP);
223 
224 						}
225 
226 				} else {	/* redirect to dedicated spare
227 						 * space */
228 
229 					failedPDA->col = raidPtr->Disks[fcol].spareCol;
230 
231 					/* the parity may have two distinct
232 					 * components, both of which may need
233 					 * to be redirected */
234 					if (asmap->parityInfo->next) {
235 						if (failedPDA == asmap->parityInfo) {
236 							failedPDA->next->col = failedPDA->col;
237 						} else
238 							if (failedPDA == asmap->parityInfo->next) {	/* paranoid:  should
239 													 * never occur */
240 								asmap->parityInfo->col = failedPDA->col;
241 							}
242 					}
243 				}
244 
245 				RF_ASSERT(failedPDA->col != -1);
246 
247 				if (rf_dagDebug || rf_mapDebug) {
248 					printf("raid%d: Redirected type '%c' c %d o %ld -> c %d o %ld\n",
249 					       raidPtr->raidid, type, oc,
250 					       (long) oo, failedPDA->col,
251 					       (long) failedPDA->startSector);
252 				}
253 				asmap->numDataFailed = asmap->numParityFailed = 0;
254 			}
255 		}
256 	/* all dags begin/end with block/unblock node therefore, hdrSucc &
257 	 * termAnt counts should always be 1 also, these counts should not be
258 	 * visible outside dag creation routines - manipulating the counts
259 	 * here should be removed */
260 	if (type == RF_IO_TYPE_READ) {
261 		if (asmap->numDataFailed == 0)
262 			*createFunc = (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG;
263 		else
264 			*createFunc = (RF_VoidFuncPtr) rf_CreateRaidFiveDegradedReadDAG;
265 	} else {
266 
267 
268 		/* if mirroring, always use large writes.  If the access
269 		 * requires two distinct parity updates, always do a small
270 		 * write.  If the stripe contains a failure but the access
271 		 * does not, do a small write. The first conditional
272 		 * (numStripeUnitsAccessed <= numDataCol/2) uses a
273 		 * less-than-or-equal rather than just a less-than because
274 		 * when G is 3 or 4, numDataCol/2 is 1, and I want
275 		 * single-stripe-unit updates to use just one disk. */
276 		if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
277 			if (rf_suppressLocksAndLargeWrites ||
278 			    (((asmap->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol != 1)) ||
279 				(asmap->parityInfo->next != NULL) || rf_CheckStripeForFailures(raidPtr, asmap))) {
280 				*createFunc = (RF_VoidFuncPtr) rf_CreateSmallWriteDAG;
281 			} else
282 				*createFunc = (RF_VoidFuncPtr) rf_CreateLargeWriteDAG;
283 		} else {
284 			if (asmap->numParityFailed == 1)
285 				*createFunc = (RF_VoidFuncPtr) rf_CreateNonRedundantWriteDAG;
286 			else
287 				if (asmap->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit)
288 					*createFunc = NULL;
289 				else
290 					*createFunc = (RF_VoidFuncPtr) rf_CreateDegradedWriteDAG;
291 		}
292 	}
293 }
294