xref: /netbsd-src/sys/dev/raidframe/rf_raid5.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: rf_raid5.c,v 1.14 2004/03/05 03:58:21 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author: Mark Holland
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 
29 /******************************************************************************
30  *
31  * rf_raid5.c -- implements RAID Level 5
32  *
33  *****************************************************************************/
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rf_raid5.c,v 1.14 2004/03/05 03:58:21 oster Exp $");
37 
38 #include <dev/raidframe/raidframevar.h>
39 
40 #include "rf_raid.h"
41 #include "rf_raid5.h"
42 #include "rf_dag.h"
43 #include "rf_dagffrd.h"
44 #include "rf_dagffwr.h"
45 #include "rf_dagdegrd.h"
46 #include "rf_dagdegwr.h"
47 #include "rf_dagutils.h"
48 #include "rf_general.h"
49 #include "rf_map.h"
50 #include "rf_utils.h"
51 
52 typedef struct RF_Raid5ConfigInfo_s {
53 	RF_RowCol_t **stripeIdentifier;	/* filled in at config time and used
54 					 * by IdentifyStripe */
55 }       RF_Raid5ConfigInfo_t;
56 
57 int
58 rf_ConfigureRAID5(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
59 		  RF_Config_t *cfgPtr)
60 {
61 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
62 	RF_Raid5ConfigInfo_t *info;
63 	RF_RowCol_t i, j, startdisk;
64 
65 	/* create a RAID level 5 configuration structure */
66 	RF_MallocAndAdd(info, sizeof(RF_Raid5ConfigInfo_t), (RF_Raid5ConfigInfo_t *), raidPtr->cleanupList);
67 	if (info == NULL)
68 		return (ENOMEM);
69 	layoutPtr->layoutSpecificInfo = (void *) info;
70 
71 	/* the stripe identifier must identify the disks in each stripe, IN
72 	 * THE ORDER THAT THEY APPEAR IN THE STRIPE. */
73 	info->stripeIdentifier = rf_make_2d_array(raidPtr->numCol, raidPtr->numCol, raidPtr->cleanupList);
74 	if (info->stripeIdentifier == NULL)
75 		return (ENOMEM);
76 	startdisk = 0;
77 	for (i = 0; i < raidPtr->numCol; i++) {
78 		for (j = 0; j < raidPtr->numCol; j++) {
79 			info->stripeIdentifier[i][j] = (startdisk + j) % raidPtr->numCol;
80 		}
81 		if ((--startdisk) < 0)
82 			startdisk = raidPtr->numCol - 1;
83 	}
84 
85 	/* fill in the remaining layout parameters */
86 	layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk;
87 	layoutPtr->numDataCol = raidPtr->numCol - 1;
88 	layoutPtr->dataSectorsPerStripe = layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
89 	layoutPtr->numParityCol = 1;
90 	layoutPtr->dataStripeUnitsPerDisk = layoutPtr->stripeUnitsPerDisk;
91 
92 	raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
93 
94 	return (0);
95 }
96 
97 int
98 rf_GetDefaultNumFloatingReconBuffersRAID5(RF_Raid_t *raidPtr)
99 {
100 	return (20);
101 }
102 
103 RF_HeadSepLimit_t
104 rf_GetDefaultHeadSepLimitRAID5(RF_Raid_t *raidPtr)
105 {
106 	return (10);
107 }
108 #if !defined(__NetBSD__) && !defined(_KERNEL)
109 /* not currently used */
110 int
111 rf_ShutdownRAID5(RF_Raid_t *raidPtr)
112 {
113 	return (0);
114 }
115 #endif
116 
117 void
118 rf_MapSectorRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
119 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
120 {
121 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
122 	*col = (SUID % raidPtr->numCol);
123 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
124 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
125 }
126 
127 void
128 rf_MapParityRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
129 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
130 {
131 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
132 
133 	*col = raidPtr->Layout.numDataCol - (SUID / raidPtr->Layout.numDataCol) % raidPtr->numCol;
134 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
135 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
136 }
137 
138 void
139 rf_IdentifyStripeRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t addr,
140 		       RF_RowCol_t **diskids)
141 {
142 	RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr);
143 	RF_Raid5ConfigInfo_t *info = (RF_Raid5ConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
144 
145 	*diskids = info->stripeIdentifier[stripeID % raidPtr->numCol];
146 }
147 
148 void
149 rf_MapSIDToPSIDRAID5(RF_RaidLayout_t *layoutPtr, RF_StripeNum_t stripeID,
150 		     RF_StripeNum_t *psID, RF_ReconUnitNum_t *which_ru)
151 {
152 	*which_ru = 0;
153 	*psID = stripeID;
154 }
155 /* select an algorithm for performing an access.  Returns two pointers,
156  * one to a function that will return information about the DAG, and
157  * another to a function that will create the dag.
158  */
159 void
160 rf_RaidFiveDagSelect(RF_Raid_t *raidPtr, RF_IoType_t type,
161 		     RF_AccessStripeMap_t *asmap,
162 		     RF_VoidFuncPtr *createFunc)
163 {
164 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
165 	RF_PhysDiskAddr_t *failedPDA = NULL;
166 	RF_RowCol_t fcol;
167 	RF_RowStatus_t rstat;
168 	int     prior_recon;
169 
170 	RF_ASSERT(RF_IO_IS_R_OR_W(type));
171 
172 	if ((asmap->numDataFailed + asmap->numParityFailed > 1) ||
173 	    (raidPtr->numFailures > 1)){
174 #if RF_DEBUG_DAG
175 		if (rf_dagDebug)
176 			RF_ERRORMSG("Multiple disks failed in a single group!  Aborting I/O operation.\n");
177 #endif
178 		*createFunc = NULL;
179 		return;
180 	}
181 
182 	if (asmap->numDataFailed + asmap->numParityFailed == 1) {
183 
184 		/* if under recon & already reconstructed, redirect
185 		 * the access to the spare drive and eliminate the
186 		 * failure indication */
187 		failedPDA = asmap->failedPDAs[0];
188 		fcol = failedPDA->col;
189 		rstat = raidPtr->status;
190 		prior_recon = (rstat == rf_rs_reconfigured) || (
191 			    (rstat == rf_rs_reconstructing) ?
192 			    rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, failedPDA->startSector) : 0
193 			    );
194 		if (prior_recon) {
195 #if RF_DEBUG_DAG > 0 || RF_DEBUG_MAP > 0
196 			RF_RowCol_t oc = failedPDA->col;
197 			RF_SectorNum_t oo = failedPDA->startSector;
198 #endif
199 #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
200 			if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {	/* redirect to dist
201 										 * spare space */
202 
203 				if (failedPDA == asmap->parityInfo) {
204 
205 					/* parity has failed */
206 					(layoutPtr->map->MapParity) (raidPtr, failedPDA->raidAddress,
207 								     &failedPDA->col, &failedPDA->startSector, RF_REMAP);
208 
209 					if (asmap->parityInfo->next) {	/* redir 2nd component,
210 									 * if any */
211 						RF_PhysDiskAddr_t *p = asmap->parityInfo->next;
212 						RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit;
213 						p->col = failedPDA->col;
214 						p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) +
215 							SUoffs;	/* cheating:
216 								 * startSector is not
217 								 * really a RAID address */
218 					}
219 				} else
220 					if (asmap->parityInfo->next && failedPDA == asmap->parityInfo->next) {
221 						RF_ASSERT(0);	/* should not ever
222 								 * happen */
223 					} else {
224 
225 						/* data has failed */
226 						(layoutPtr->map->MapSector) (raidPtr, failedPDA->raidAddress,
227 									     &failedPDA->col, &failedPDA->startSector, RF_REMAP);
228 
229 					}
230 
231 			} else {
232 #endif
233 				/* redirect to dedicated spare space */
234 
235 				failedPDA->col = raidPtr->Disks[fcol].spareCol;
236 
237 				/* the parity may have two distinct
238 				 * components, both of which may need
239 				 * to be redirected */
240 				if (asmap->parityInfo->next) {
241 					if (failedPDA == asmap->parityInfo) {
242 						failedPDA->next->col = failedPDA->col;
243 					} else
244 						if (failedPDA == asmap->parityInfo->next) {	/* paranoid:  should
245 												 * never occur */
246 							asmap->parityInfo->col = failedPDA->col;
247 						}
248 				}
249 #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
250 			}
251 #endif
252 			RF_ASSERT(failedPDA->col != -1);
253 
254 #if RF_DEBUG_DAG > 0 || RF_DEBUG_MAP > 0
255 			if (rf_dagDebug || rf_mapDebug) {
256 				printf("raid%d: Redirected type '%c' c %d o %ld -> c %d o %ld\n",
257 				       raidPtr->raidid, type, oc,
258 				       (long) oo, failedPDA->col,
259 				       (long) failedPDA->startSector);
260 			}
261 #endif
262 			asmap->numDataFailed = asmap->numParityFailed = 0;
263 		}
264 	}
265 	/* all dags begin/end with block/unblock node therefore, hdrSucc &
266 	 * termAnt counts should always be 1 also, these counts should not be
267 	 * visible outside dag creation routines - manipulating the counts
268 	 * here should be removed */
269 	if (type == RF_IO_TYPE_READ) {
270 		if (asmap->numDataFailed == 0)
271 			*createFunc = (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG;
272 		else
273 			*createFunc = (RF_VoidFuncPtr) rf_CreateRaidFiveDegradedReadDAG;
274 	} else {
275 
276 
277 		/* if mirroring, always use large writes.  If the access
278 		 * requires two distinct parity updates, always do a small
279 		 * write.  If the stripe contains a failure but the access
280 		 * does not, do a small write. The first conditional
281 		 * (numStripeUnitsAccessed <= numDataCol/2) uses a
282 		 * less-than-or-equal rather than just a less-than because
283 		 * when G is 3 or 4, numDataCol/2 is 1, and I want
284 		 * single-stripe-unit updates to use just one disk. */
285 		if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
286 			if (rf_suppressLocksAndLargeWrites ||
287 			    (((asmap->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol != 1)) ||
288 				(asmap->parityInfo->next != NULL) || rf_CheckStripeForFailures(raidPtr, asmap))) {
289 				*createFunc = (RF_VoidFuncPtr) rf_CreateSmallWriteDAG;
290 			} else
291 				*createFunc = (RF_VoidFuncPtr) rf_CreateLargeWriteDAG;
292 		} else {
293 			if (asmap->numParityFailed == 1)
294 				*createFunc = (RF_VoidFuncPtr) rf_CreateNonRedundantWriteDAG;
295 			else
296 				if (asmap->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit)
297 					*createFunc = NULL;
298 				else
299 					*createFunc = (RF_VoidFuncPtr) rf_CreateDegradedWriteDAG;
300 		}
301 	}
302 }
303