xref: /netbsd-src/sys/dev/raidframe/rf_map.c (revision 2cf3739a9f3a821fe3c5b66ce46809392a3766a9)
1 /*	$NetBSD: rf_map.c,v 1.51 2021/07/23 00:54:45 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author: Mark Holland
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 
29 /**************************************************************************
30  *
31  * map.c -- main code for mapping RAID addresses to physical disk addresses
32  *
33  **************************************************************************/
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rf_map.c,v 1.51 2021/07/23 00:54:45 oster Exp $");
37 
38 #include <dev/raidframe/raidframevar.h>
39 
40 #include "rf_threadstuff.h"
41 #include "rf_raid.h"
42 #include "rf_general.h"
43 #include "rf_map.h"
44 #include "rf_shutdown.h"
45 
46 static void rf_FreePDAList(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda_list);
47 static void rf_FreeASMList(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asm_list);
48 
49 /***************************************************************************
50  *
51  * MapAccess -- main 1st order mapping routine.  Maps an access in the
52  * RAID address space to the corresponding set of physical disk
53  * addresses.  The result is returned as a list of AccessStripeMap
54  * structures, one per stripe accessed.  Each ASM structure contains a
55  * pointer to a list of PhysDiskAddr structures, which describe the
56  * physical locations touched by the user access.  Note that this
57  * routine returns only static mapping information, i.e. the list of
58  * physical addresses returned does not necessarily identify the set
59  * of physical locations that will actually be read or written.  The
60  * routine also maps the parity.  The physical disk location returned
61  * always indicates the entire parity unit, even when only a subset of
62  * it is being accessed.  This is because an access that is not stripe
63  * unit aligned but that spans a stripe unit boundary may require
64  * access two distinct portions of the parity unit, and we can't yet
65  * tell which portion(s) we'll actually need.  We leave it up to the
66  * algorithm selection code to decide what subset of the parity unit
67  * to access.  Note that addresses in the RAID address space must
68  * always be maintained as longs, instead of ints.
69  *
70  * This routine returns NULL if numBlocks is 0
71  *
72  * raidAddress - starting address in RAID address space
73  * numBlocks   - number of blocks in RAID address space to access
74  * buffer      - buffer to supply/receive data
75  * remap       - 1 => remap address to spare space
76  ***************************************************************************/
77 
78 RF_AccessStripeMapHeader_t *
rf_MapAccess(RF_Raid_t * raidPtr,RF_RaidAddr_t raidAddress,RF_SectorCount_t numBlocks,void * buffer,int remap)79 rf_MapAccess(RF_Raid_t *raidPtr, RF_RaidAddr_t raidAddress,
80 	     RF_SectorCount_t numBlocks, void *buffer, int remap)
81 {
82 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
83 	RF_AccessStripeMapHeader_t *asm_hdr = NULL;
84 	RF_AccessStripeMap_t *asm_list = NULL, *asm_p = NULL;
85 	int     faultsTolerated = layoutPtr->map->faultsTolerated;
86 	/* we'll change raidAddress along the way */
87 	RF_RaidAddr_t startAddress = raidAddress;
88 	RF_RaidAddr_t endAddress = raidAddress + numBlocks;
89 	RF_RaidDisk_t *disks = raidPtr->Disks;
90 	RF_PhysDiskAddr_t *pda_p;
91 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0)
92 	RF_PhysDiskAddr_t *pda_q;
93 #endif
94 	RF_StripeCount_t numStripes = 0;
95 	RF_RaidAddr_t stripeRealEndAddress, stripeEndAddress,
96 		nextStripeUnitAddress;
97 	RF_RaidAddr_t startAddrWithinStripe, lastRaidAddr;
98 	RF_StripeCount_t totStripes;
99 	RF_StripeNum_t stripeID, lastSID, SUID, lastSUID;
100 	RF_AccessStripeMap_t *asmList, *t_asm;
101 	RF_PhysDiskAddr_t *pdaList, *t_pda;
102 
103 	/* allocate all the ASMs and PDAs up front */
104 	lastRaidAddr = raidAddress + numBlocks - 1;
105 	stripeID = rf_RaidAddressToStripeID(layoutPtr, raidAddress);
106 	lastSID = rf_RaidAddressToStripeID(layoutPtr, lastRaidAddr);
107 	totStripes = lastSID - stripeID + 1;
108 	SUID = rf_RaidAddressToStripeUnitID(layoutPtr, raidAddress);
109 	lastSUID = rf_RaidAddressToStripeUnitID(layoutPtr, lastRaidAddr);
110 
111 	asmList = rf_AllocASMList(raidPtr, totStripes);
112 
113 	/* may also need pda(s) per stripe for parity */
114 	pdaList = rf_AllocPDAList(raidPtr, lastSUID - SUID + 1 +
115 				  faultsTolerated * totStripes);
116 
117 
118 	if (raidAddress + numBlocks > raidPtr->totalSectors) {
119 		RF_ERRORMSG1("Unable to map access because offset (%d) was invalid\n",
120 		    (int) raidAddress);
121 		return (NULL);
122 	}
123 #if RF_DEBUG_MAP
124 	if (rf_mapDebug)
125 		rf_PrintRaidAddressInfo(raidPtr, raidAddress, numBlocks);
126 #endif
127 	for (; raidAddress < endAddress;) {
128 		/* make the next stripe structure */
129 		RF_ASSERT(asmList);
130 		t_asm = asmList;
131 		asmList = asmList->next;
132 		memset(t_asm, 0, sizeof(*t_asm));
133 		if (!asm_p)
134 			asm_list = asm_p = t_asm;
135 		else {
136 			asm_p->next = t_asm;
137 			asm_p = asm_p->next;
138 		}
139 		numStripes++;
140 
141 		/* map SUs from current location to the end of the stripe */
142 		asm_p->stripeID =	/* rf_RaidAddressToStripeID(layoutPtr,
143 		        raidAddress) */ stripeID++;
144 		stripeRealEndAddress = rf_RaidAddressOfNextStripeBoundary(layoutPtr, raidAddress);
145 		stripeEndAddress = RF_MIN(endAddress, stripeRealEndAddress);
146 		asm_p->raidAddress = raidAddress;
147 		asm_p->endRaidAddress = stripeEndAddress;
148 
149 		/* map each stripe unit in the stripe */
150 		pda_p = NULL;
151 
152 		/* Raid addr of start of portion of access that is
153                    within this stripe */
154 		startAddrWithinStripe = raidAddress;
155 
156 		for (; raidAddress < stripeEndAddress;) {
157 			RF_ASSERT(pdaList);
158 			t_pda = pdaList;
159 			pdaList = pdaList->next;
160 			memset(t_pda, 0, sizeof(*t_pda));
161 			if (!pda_p)
162 				asm_p->physInfo = pda_p = t_pda;
163 			else {
164 				pda_p->next = t_pda;
165 				pda_p = pda_p->next;
166 			}
167 
168 			pda_p->type = RF_PDA_TYPE_DATA;
169 			(layoutPtr->map->MapSector) (raidPtr, raidAddress,
170 						     &(pda_p->col),
171 						     &(pda_p->startSector),
172 						     remap);
173 
174 			/* mark any failures we find.  failedPDA is
175 			 * don't-care if there is more than one
176 			 * failure */
177 
178 			/* the RAID address corresponding to this
179                            physical diskaddress */
180 			pda_p->raidAddress = raidAddress;
181 			nextStripeUnitAddress = rf_RaidAddressOfNextStripeUnitBoundary(layoutPtr, raidAddress);
182 			pda_p->numSector = RF_MIN(endAddress, nextStripeUnitAddress) - raidAddress;
183 			RF_ASSERT(pda_p->numSector != 0);
184 			rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 0);
185 			pda_p->bufPtr = (char *)buffer + rf_RaidAddressToByte(raidPtr, (raidAddress - startAddress));
186 			asm_p->totalSectorsAccessed += pda_p->numSector;
187 			asm_p->numStripeUnitsAccessed++;
188 
189 			raidAddress = RF_MIN(endAddress, nextStripeUnitAddress);
190 		}
191 
192 		/* Map the parity. At this stage, the startSector and
193 		 * numSector fields for the parity unit are always set
194 		 * to indicate the entire parity unit. We may modify
195 		 * this after mapping the data portion. */
196 		switch (faultsTolerated) {
197 		case 0:
198 			break;
199 		case 1:	/* single fault tolerant */
200 			RF_ASSERT(pdaList);
201 			t_pda = pdaList;
202 			pdaList = pdaList->next;
203 			memset(t_pda, 0, sizeof(*t_pda));
204 			pda_p = asm_p->parityInfo = t_pda;
205 			pda_p->type = RF_PDA_TYPE_PARITY;
206 			(layoutPtr->map->MapParity) (raidPtr, rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe),
207 			    &(pda_p->col), &(pda_p->startSector), remap);
208 			pda_p->numSector = layoutPtr->sectorsPerStripeUnit;
209 			/* raidAddr may be needed to find unit to redirect to */
210 			pda_p->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe);
211 			rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1);
212 			rf_ASMParityAdjust(raidPtr, asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
213 
214 			break;
215 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0)
216 		case 2:	/* two fault tolerant */
217 			RF_ASSERT(pdaList && pdaList->next);
218 			t_pda = pdaList;
219 			pdaList = pdaList->next;
220 			memset(t_pda, 0, sizeof(*t_pda));
221 			pda_p = asm_p->parityInfo = t_pda;
222 			pda_p->type = RF_PDA_TYPE_PARITY;
223 			t_pda = pdaList;
224 			pdaList = pdaList->next;
225 			memset(t_pda, 0, sizeof(*t_pda));
226 			pda_q = asm_p->qInfo = t_pda;
227 			pda_q->type = RF_PDA_TYPE_Q;
228 			(layoutPtr->map->MapParity) (raidPtr, rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe),
229 			    &(pda_p->col), &(pda_p->startSector), remap);
230 			(layoutPtr->map->MapQ) (raidPtr, rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe),
231 			    &(pda_q->col), &(pda_q->startSector), remap);
232 			pda_q->numSector = pda_p->numSector = layoutPtr->sectorsPerStripeUnit;
233 			/* raidAddr may be needed to find unit to redirect to */
234 			pda_p->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe);
235 			pda_q->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe);
236 			/* failure mode stuff */
237 			rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1);
238 			rf_ASMCheckStatus(raidPtr, pda_q, asm_p, disks, 1);
239 			rf_ASMParityAdjust(raidPtr, asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
240 			rf_ASMParityAdjust(raidPtr, asm_p->qInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
241 			break;
242 #endif
243 		}
244 	}
245 	RF_ASSERT(asmList == NULL && pdaList == NULL);
246 	/* make the header structure */
247 	asm_hdr = rf_AllocAccessStripeMapHeader(raidPtr);
248 	RF_ASSERT(numStripes == totStripes);
249 	asm_hdr->numStripes = numStripes;
250 	asm_hdr->stripeMap = asm_list;
251 
252 #if RF_DEBUG_MAP
253 	if (rf_mapDebug)
254 		rf_PrintAccessStripeMap(asm_hdr);
255 #endif
256 	return (asm_hdr);
257 }
258 
259 /***************************************************************************
260  * This routine walks through an ASM list and marks the PDAs that have
261  * failed.  It's called only when a disk failure causes an in-flight
262  * DAG to fail.  The parity may consist of two components, but we want
263  * to use only one failedPDA pointer.  Thus we set failedPDA to point
264  * to the first parity component, and rely on the rest of the code to
265  * do the right thing with this.
266  ***************************************************************************/
267 
268 void
rf_MarkFailuresInASMList(RF_Raid_t * raidPtr,RF_AccessStripeMapHeader_t * asm_h)269 rf_MarkFailuresInASMList(RF_Raid_t *raidPtr,
270 			 RF_AccessStripeMapHeader_t *asm_h)
271 {
272 	RF_RaidDisk_t *disks = raidPtr->Disks;
273 	RF_AccessStripeMap_t *asmap;
274 	RF_PhysDiskAddr_t *pda;
275 
276 	for (asmap = asm_h->stripeMap; asmap; asmap = asmap->next) {
277 		asmap->numDataFailed = 0;
278 		asmap->numParityFailed = 0;
279 		asmap->numQFailed = 0;
280 		asmap->numFailedPDAs = 0;
281 		memset(asmap->failedPDAs, 0,
282 		    RF_MAX_FAILED_PDA * sizeof(*asmap->failedPDAs));
283 		for (pda = asmap->physInfo; pda; pda = pda->next) {
284 			if (RF_DEAD_DISK(disks[pda->col].status)) {
285 				asmap->numDataFailed++;
286 				asmap->failedPDAs[asmap->numFailedPDAs] = pda;
287 				asmap->numFailedPDAs++;
288 			}
289 		}
290 		pda = asmap->parityInfo;
291 		if (pda && RF_DEAD_DISK(disks[pda->col].status)) {
292 			asmap->numParityFailed++;
293 			asmap->failedPDAs[asmap->numFailedPDAs] = pda;
294 			asmap->numFailedPDAs++;
295 		}
296 		pda = asmap->qInfo;
297 		if (pda && RF_DEAD_DISK(disks[pda->col].status)) {
298 			asmap->numQFailed++;
299 			asmap->failedPDAs[asmap->numFailedPDAs] = pda;
300 			asmap->numFailedPDAs++;
301 		}
302 	}
303 }
304 
305 /***************************************************************************
306  *
307  * routines to allocate and free list elements.  All allocation
308  * routines zero the structure before returning it.
309  *
310  * FreePhysDiskAddr is static.  It should never be called directly,
311  * because FreeAccessStripeMap takes care of freeing the PhysDiskAddr
312  * list.
313  *
314  ***************************************************************************/
315 
316 #define RF_MAX_FREE_ASMHDR 128
317 #define RF_MIN_FREE_ASMHDR  32
318 
319 #define RF_MAX_FREE_ASM 192
320 #define RF_MIN_FREE_ASM  64
321 
322 #define RF_MAX_FREE_PDA 192
323 #define RF_MIN_FREE_PDA  64
324 
325 #define RF_MAX_FREE_ASMHLE 64
326 #define RF_MIN_FREE_ASMHLE 16
327 
328 #define RF_MAX_FREE_FSS 128
329 #define RF_MIN_FREE_FSS  32
330 
331 #define RF_MAX_FREE_VFPLE 128
332 #define RF_MIN_FREE_VFPLE  32
333 
334 #define RF_MAX_FREE_VPLE 128
335 #define RF_MIN_FREE_VPLE  32
336 
337 
338 /* called at shutdown time.  So far, all that is necessary is to
339    release all the free lists */
340 static void rf_ShutdownMapModule(void *);
341 static void
rf_ShutdownMapModule(void * arg)342 rf_ShutdownMapModule(void *arg)
343 {
344 	RF_Raid_t *raidPtr;
345 
346 	raidPtr = (RF_Raid_t *) arg;
347 
348 	pool_destroy(&raidPtr->pools.asm_hdr);
349 	pool_destroy(&raidPtr->pools.asmap);
350 	pool_destroy(&raidPtr->pools.asmhle);
351 	pool_destroy(&raidPtr->pools.pda);
352 	pool_destroy(&raidPtr->pools.fss);
353 	pool_destroy(&raidPtr->pools.vfple);
354 	pool_destroy(&raidPtr->pools.vple);
355 }
356 
357 int
rf_ConfigureMapModule(RF_ShutdownList_t ** listp,RF_Raid_t * raidPtr,RF_Config_t * cfgPtr)358 rf_ConfigureMapModule(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
359 		      RF_Config_t *cfgPtr)
360 {
361 
362 	rf_pool_init(raidPtr, raidPtr->poolNames.asm_hdr, &raidPtr->pools.asm_hdr, sizeof(RF_AccessStripeMapHeader_t),
363 		     "asmhdr", RF_MIN_FREE_ASMHDR, RF_MAX_FREE_ASMHDR);
364 	rf_pool_init(raidPtr, raidPtr->poolNames.asmap, &raidPtr->pools.asmap, sizeof(RF_AccessStripeMap_t),
365 		     "asmap", RF_MIN_FREE_ASM, RF_MAX_FREE_ASM);
366 	rf_pool_init(raidPtr, raidPtr->poolNames.asmhle, &raidPtr->pools.asmhle, sizeof(RF_ASMHeaderListElem_t),
367 		     "asmhle", RF_MIN_FREE_ASMHLE, RF_MAX_FREE_ASMHLE);
368 	rf_pool_init(raidPtr, raidPtr->poolNames.pda, &raidPtr->pools.pda, sizeof(RF_PhysDiskAddr_t),
369 		     "pda", RF_MIN_FREE_PDA, RF_MAX_FREE_PDA);
370 	rf_pool_init(raidPtr, raidPtr->poolNames.fss, &raidPtr->pools.fss, sizeof(RF_FailedStripe_t),
371 		     "fss", RF_MIN_FREE_FSS, RF_MAX_FREE_FSS);
372 	rf_pool_init(raidPtr, raidPtr->poolNames.vfple, &raidPtr->pools.vfple, sizeof(RF_VoidFunctionPointerListElem_t),
373 		     "vfple", RF_MIN_FREE_VFPLE, RF_MAX_FREE_VFPLE);
374 	rf_pool_init(raidPtr, raidPtr->poolNames.vple, &raidPtr->pools.vple, sizeof(RF_VoidPointerListElem_t),
375 		     "vple", RF_MIN_FREE_VPLE, RF_MAX_FREE_VPLE);
376 	rf_ShutdownCreate(listp, rf_ShutdownMapModule, raidPtr);
377 
378 	return (0);
379 }
380 
381 RF_AccessStripeMapHeader_t *
rf_AllocAccessStripeMapHeader(RF_Raid_t * raidPtr)382 rf_AllocAccessStripeMapHeader(RF_Raid_t *raidPtr)
383 {
384 	return pool_get(&raidPtr->pools.asm_hdr, PR_WAITOK | PR_ZERO);
385 }
386 
387 void
rf_FreeAccessStripeMapHeader(RF_Raid_t * raidPtr,RF_AccessStripeMapHeader_t * p)388 rf_FreeAccessStripeMapHeader(RF_Raid_t *raidPtr, RF_AccessStripeMapHeader_t *p)
389 {
390 	pool_put(&raidPtr->pools.asm_hdr, p);
391 }
392 
393 
394 RF_VoidFunctionPointerListElem_t *
rf_AllocVFPListElem(RF_Raid_t * raidPtr)395 rf_AllocVFPListElem(RF_Raid_t *raidPtr)
396 {
397 	return pool_get(&raidPtr->pools.vfple, PR_WAITOK | PR_ZERO);
398 }
399 
400 void
rf_FreeVFPListElem(RF_Raid_t * raidPtr,RF_VoidFunctionPointerListElem_t * p)401 rf_FreeVFPListElem(RF_Raid_t *raidPtr, RF_VoidFunctionPointerListElem_t *p)
402 {
403 
404 	pool_put(&raidPtr->pools.vfple, p);
405 }
406 
407 
408 RF_VoidPointerListElem_t *
rf_AllocVPListElem(RF_Raid_t * raidPtr)409 rf_AllocVPListElem(RF_Raid_t *raidPtr)
410 {
411 	return pool_get(&raidPtr->pools.vple, PR_WAITOK | PR_ZERO);
412 }
413 
414 void
rf_FreeVPListElem(RF_Raid_t * raidPtr,RF_VoidPointerListElem_t * p)415 rf_FreeVPListElem(RF_Raid_t *raidPtr, RF_VoidPointerListElem_t *p)
416 {
417 
418 	pool_put(&raidPtr->pools.vple, p);
419 }
420 
421 RF_ASMHeaderListElem_t *
rf_AllocASMHeaderListElem(RF_Raid_t * raidPtr)422 rf_AllocASMHeaderListElem(RF_Raid_t *raidPtr)
423 {
424 	return pool_get(&raidPtr->pools.asmhle, PR_WAITOK | PR_ZERO);
425 }
426 
427 void
rf_FreeASMHeaderListElem(RF_Raid_t * raidPtr,RF_ASMHeaderListElem_t * p)428 rf_FreeASMHeaderListElem(RF_Raid_t *raidPtr, RF_ASMHeaderListElem_t *p)
429 {
430 
431 	pool_put(&raidPtr->pools.asmhle, p);
432 }
433 
434 RF_FailedStripe_t *
rf_AllocFailedStripeStruct(RF_Raid_t * raidPtr)435 rf_AllocFailedStripeStruct(RF_Raid_t *raidPtr)
436 {
437 	return pool_get(&raidPtr->pools.fss, PR_WAITOK | PR_ZERO);
438 }
439 
440 void
rf_FreeFailedStripeStruct(RF_Raid_t * raidPtr,RF_FailedStripe_t * p)441 rf_FreeFailedStripeStruct(RF_Raid_t *raidPtr, RF_FailedStripe_t *p)
442 {
443 	pool_put(&raidPtr->pools.fss, p);
444 }
445 
446 
447 
448 
449 
450 RF_PhysDiskAddr_t *
rf_AllocPhysDiskAddr(RF_Raid_t * raidPtr)451 rf_AllocPhysDiskAddr(RF_Raid_t *raidPtr)
452 {
453 	return pool_get(&raidPtr->pools.pda, PR_WAITOK | PR_ZERO);
454 }
455 /* allocates a list of PDAs, locking the free list only once when we
456  * have to call calloc, we do it one component at a time to simplify
457  * the process of freeing the list at program shutdown.  This should
458  * not be much of a performance hit, because it should be very
459  * infrequently executed.  */
460 RF_PhysDiskAddr_t *
rf_AllocPDAList(RF_Raid_t * raidPtr,int count)461 rf_AllocPDAList(RF_Raid_t *raidPtr, int count)
462 {
463 	RF_PhysDiskAddr_t *p, *prev;
464 	int i;
465 
466 	p = NULL;
467 	prev = NULL;
468 	for (i = 0; i < count; i++) {
469 		p = pool_get(&raidPtr->pools.pda, PR_WAITOK);
470 		p->next = prev;
471 		prev = p;
472 	}
473 
474 	return (p);
475 }
476 
477 void
rf_FreePhysDiskAddr(RF_Raid_t * raidPtr,RF_PhysDiskAddr_t * p)478 rf_FreePhysDiskAddr(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *p)
479 {
480 	pool_put(&raidPtr->pools.pda, p);
481 }
482 
483 static void
rf_FreePDAList(RF_Raid_t * raidPtr,RF_PhysDiskAddr_t * pda_list)484 rf_FreePDAList(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda_list)
485 {
486 	RF_PhysDiskAddr_t *p, *tmp;
487 
488 	p=pda_list;
489 	while (p) {
490 		tmp = p->next;
491 		pool_put(&raidPtr->pools.pda, p);
492 		p = tmp;
493 	}
494 }
495 
496 /* this is essentially identical to AllocPDAList.  I should combine
497  * the two.  when we have to call calloc, we do it one component at a
498  * time to simplify the process of freeing the list at program
499  * shutdown.  This should not be much of a performance hit, because it
500  * should be very infrequently executed.  */
501 RF_AccessStripeMap_t *
rf_AllocASMList(RF_Raid_t * raidPtr,int count)502 rf_AllocASMList(RF_Raid_t *raidPtr, int count)
503 {
504 	RF_AccessStripeMap_t *p, *prev;
505 	int i;
506 
507 	p = NULL;
508 	prev = NULL;
509 	for (i = 0; i < count; i++) {
510 		p = pool_get(&raidPtr->pools.asmap, PR_WAITOK);
511 		p->next = prev;
512 		prev = p;
513 	}
514 	return (p);
515 }
516 
517 static void
rf_FreeASMList(RF_Raid_t * raidPtr,RF_AccessStripeMap_t * asm_list)518 rf_FreeASMList(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asm_list)
519 {
520 	RF_AccessStripeMap_t *p, *tmp;
521 
522 	p=asm_list;
523 	while (p) {
524 		tmp = p->next;
525 		pool_put(&raidPtr->pools.asmap, p);
526 		p = tmp;
527 	}
528 }
529 
530 void
rf_FreeAccessStripeMap(RF_Raid_t * raidPtr,RF_AccessStripeMapHeader_t * hdr)531 rf_FreeAccessStripeMap(RF_Raid_t *raidPtr, RF_AccessStripeMapHeader_t *hdr)
532 {
533 	RF_AccessStripeMap_t *p;
534 	RF_PhysDiskAddr_t *pdp, *trailer, *pdaList = NULL, *pdaEnd = NULL;
535 	int     count = 0, t;
536 
537 	for (p = hdr->stripeMap; p; p = p->next) {
538 
539 		/* link the 3 pda lists into the accumulating pda list */
540 
541 		if (!pdaList)
542 			pdaList = p->qInfo;
543 		else
544 			pdaEnd->next = p->qInfo;
545 		for (trailer = NULL, pdp = p->qInfo; pdp;) {
546 			trailer = pdp;
547 			pdp = pdp->next;
548 			count++;
549 		}
550 		if (trailer)
551 			pdaEnd = trailer;
552 
553 		if (!pdaList)
554 			pdaList = p->parityInfo;
555 		else
556 			pdaEnd->next = p->parityInfo;
557 		for (trailer = NULL, pdp = p->parityInfo; pdp;) {
558 			trailer = pdp;
559 			pdp = pdp->next;
560 			count++;
561 		}
562 		if (trailer)
563 			pdaEnd = trailer;
564 
565 		if (!pdaList)
566 			pdaList = p->physInfo;
567 		else
568 			pdaEnd->next = p->physInfo;
569 		for (trailer = NULL, pdp = p->physInfo; pdp;) {
570 			trailer = pdp;
571 			pdp = pdp->next;
572 			count++;
573 		}
574 		if (trailer)
575 			pdaEnd = trailer;
576 	}
577 
578 	/* debug only */
579 	for (t = 0, pdp = pdaList; pdp; pdp = pdp->next)
580 		t++;
581 	RF_ASSERT(t == count);
582 
583 	if (pdaList)
584 		rf_FreePDAList(raidPtr, pdaList);
585 	rf_FreeASMList(raidPtr, hdr->stripeMap);
586 	rf_FreeAccessStripeMapHeader(raidPtr, hdr);
587 }
588 /* We can't use the large write optimization if there are any failures
589  * in the stripe.  In the declustered layout, there is no way to
590  * immediately determine what disks constitute a stripe, so we
591  * actually have to hunt through the stripe looking for failures.  The
592  * reason we map the parity instead of just using asm->parityInfo->col
593  * is because the latter may have been already redirected to a spare
594  * drive, which would mess up the computation of the stripe offset.
595  *
596  * ASSUMES AT MOST ONE FAILURE IN THE STRIPE.  */
597 int
rf_CheckStripeForFailures(RF_Raid_t * raidPtr,RF_AccessStripeMap_t * asmap)598 rf_CheckStripeForFailures(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
599 {
600 	RF_RowCol_t tcol, pcol, *diskids, i;
601 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
602 	RF_StripeCount_t stripeOffset;
603 	int     numFailures;
604 	RF_RaidAddr_t sosAddr;
605 	RF_SectorNum_t diskOffset, poffset;
606 
607 	/* quick out in the fault-free case.  */
608 	rf_lock_mutex2(raidPtr->mutex);
609 	numFailures = raidPtr->numFailures;
610 	rf_unlock_mutex2(raidPtr->mutex);
611 	if (numFailures == 0)
612 		return (0);
613 
614 	sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr,
615 						     asmap->raidAddress);
616 	(layoutPtr->map->IdentifyStripe) (raidPtr, asmap->raidAddress,
617 					  &diskids);
618 	(layoutPtr->map->MapParity) (raidPtr, asmap->raidAddress,
619 				     &pcol, &poffset, 0);	/* get pcol */
620 
621 	/* this need not be true if we've redirected the access to a
622 	 * spare in another row RF_ASSERT(row == testrow); */
623 	stripeOffset = 0;
624 	for (i = 0; i < layoutPtr->numDataCol + layoutPtr->numParityCol; i++) {
625 		if (diskids[i] != pcol) {
626 			if (RF_DEAD_DISK(raidPtr->Disks[diskids[i]].status)) {
627 				if (raidPtr->status != rf_rs_reconstructing)
628 					return (1);
629 				RF_ASSERT(raidPtr->reconControl->fcol == diskids[i]);
630 				layoutPtr->map->MapSector(raidPtr,
631 				    sosAddr + stripeOffset * layoutPtr->sectorsPerStripeUnit,
632 				    &tcol, &diskOffset, 0);
633 				RF_ASSERT(tcol == diskids[i]);
634 				if (!rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, diskOffset))
635 					return (1);
636 				asmap->flags |= RF_ASM_REDIR_LARGE_WRITE;
637 				return (0);
638 			}
639 			stripeOffset++;
640 		}
641 	}
642 	return (0);
643 }
644 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) || (RF_INCLUDE_EVENODD >0)
645 /*
646    return the number of failed data units in the stripe.
647 */
648 
649 int
rf_NumFailedDataUnitsInStripe(RF_Raid_t * raidPtr,RF_AccessStripeMap_t * asmap)650 rf_NumFailedDataUnitsInStripe(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
651 {
652 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
653 	RF_RowCol_t tcol, i;
654 	RF_SectorNum_t diskOffset;
655 	RF_RaidAddr_t sosAddr;
656 	int     numFailures;
657 
658 	/* quick out in the fault-free case.  */
659 	rf_lock_mutex2(raidPtr->mutex);
660 	numFailures = raidPtr->numFailures;
661 	rf_unlock_mutex2(raidPtr->mutex);
662 	if (numFailures == 0)
663 		return (0);
664 	numFailures = 0;
665 
666 	sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr,
667 						     asmap->raidAddress);
668 	for (i = 0; i < layoutPtr->numDataCol; i++) {
669 		(layoutPtr->map->MapSector) (raidPtr, sosAddr + i * layoutPtr->sectorsPerStripeUnit,
670 		    &tcol, &diskOffset, 0);
671 		if (RF_DEAD_DISK(raidPtr->Disks[tcol].status))
672 			numFailures++;
673 	}
674 
675 	return numFailures;
676 }
677 #endif
678 
679 /****************************************************************************
680  *
681  * debug routines
682  *
683  ***************************************************************************/
684 #if RF_DEBUG_MAP
685 void
rf_PrintAccessStripeMap(RF_AccessStripeMapHeader_t * asm_h)686 rf_PrintAccessStripeMap(RF_AccessStripeMapHeader_t *asm_h)
687 {
688 	rf_PrintFullAccessStripeMap(asm_h, 0);
689 }
690 #endif
691 
692 /* prbuf - flag to print buffer pointers */
693 void
rf_PrintFullAccessStripeMap(RF_AccessStripeMapHeader_t * asm_h,int prbuf)694 rf_PrintFullAccessStripeMap(RF_AccessStripeMapHeader_t *asm_h, int prbuf)
695 {
696 	int     i;
697 	RF_AccessStripeMap_t *asmap = asm_h->stripeMap;
698 	RF_PhysDiskAddr_t *p;
699 	printf("%d stripes total\n", (int) asm_h->numStripes);
700 	for (; asmap; asmap = asmap->next) {
701 		/* printf("Num failures: %d\n",asmap->numDataFailed); */
702 		/* printf("Num sectors:
703 		 * %d\n",(int)asmap->totalSectorsAccessed); */
704 		printf("Stripe %d (%d sectors), failures: %d data, %d parity: ",
705 		    (int) asmap->stripeID,
706 		    (int) asmap->totalSectorsAccessed,
707 		    (int) asmap->numDataFailed,
708 		    (int) asmap->numParityFailed);
709 		if (asmap->parityInfo) {
710 			printf("Parity [c%d s%d-%d", asmap->parityInfo->col,
711 			    (int) asmap->parityInfo->startSector,
712 			    (int) (asmap->parityInfo->startSector +
713 				asmap->parityInfo->numSector - 1));
714 			if (prbuf)
715 				printf(" b0x%lx", (unsigned long) asmap->parityInfo->bufPtr);
716 			if (asmap->parityInfo->next) {
717 				printf(", c%d s%d-%d", asmap->parityInfo->next->col,
718 				    (int) asmap->parityInfo->next->startSector,
719 				    (int) (asmap->parityInfo->next->startSector +
720 					asmap->parityInfo->next->numSector - 1));
721 				if (prbuf)
722 					printf(" b0x%lx", (unsigned long) asmap->parityInfo->next->bufPtr);
723 				RF_ASSERT(asmap->parityInfo->next->next == NULL);
724 			}
725 			printf("]\n\t");
726 		}
727 		for (i = 0, p = asmap->physInfo; p; p = p->next, i++) {
728 			printf("SU c%d s%d-%d ", p->col, (int) p->startSector,
729 			    (int) (p->startSector + p->numSector - 1));
730 			if (prbuf)
731 				printf("b0x%lx ", (unsigned long) p->bufPtr);
732 			if (i && !(i & 1))
733 				printf("\n\t");
734 		}
735 		printf("\n");
736 		p = asm_h->stripeMap->failedPDAs[0];
737 		if (asm_h->stripeMap->numDataFailed + asm_h->stripeMap->numParityFailed > 1)
738 			printf("[multiple failures]\n");
739 		else
740 			if (asm_h->stripeMap->numDataFailed + asm_h->stripeMap->numParityFailed > 0)
741 				printf("\t[Failed PDA: c%d s%d-%d]\n", p->col,
742 				    (int) p->startSector, (int) (p->startSector + p->numSector - 1));
743 	}
744 }
745 
746 #if RF_MAP_DEBUG
747 void
rf_PrintRaidAddressInfo(RF_Raid_t * raidPtr,RF_RaidAddr_t raidAddr,RF_SectorCount_t numBlocks)748 rf_PrintRaidAddressInfo(RF_Raid_t *raidPtr, RF_RaidAddr_t raidAddr,
749 			RF_SectorCount_t numBlocks)
750 {
751 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
752 	RF_RaidAddr_t ra, sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, raidAddr);
753 
754 	printf("Raid addrs of SU boundaries from start of stripe to end of access:\n\t");
755 	for (ra = sosAddr; ra <= raidAddr + numBlocks; ra += layoutPtr->sectorsPerStripeUnit) {
756 		printf("%d (0x%x), ", (int) ra, (int) ra);
757 	}
758 	printf("\n");
759 	printf("Offset into stripe unit: %d (0x%x)\n",
760 	    (int) (raidAddr % layoutPtr->sectorsPerStripeUnit),
761 	    (int) (raidAddr % layoutPtr->sectorsPerStripeUnit));
762 }
763 #endif
764 /* given a parity descriptor and the starting address within a stripe,
765  * range restrict the parity descriptor to touch only the correct
766  * stuff.  */
767 void
rf_ASMParityAdjust(RF_Raid_t * raidPtr,RF_PhysDiskAddr_t * toAdjust,RF_StripeNum_t startAddrWithinStripe,RF_SectorNum_t endAddress,RF_RaidLayout_t * layoutPtr,RF_AccessStripeMap_t * asm_p)768 rf_ASMParityAdjust(RF_Raid_t *raidPtr,
769 		   RF_PhysDiskAddr_t *toAdjust,
770 		   RF_StripeNum_t startAddrWithinStripe,
771 		   RF_SectorNum_t endAddress,
772 		   RF_RaidLayout_t *layoutPtr,
773 		   RF_AccessStripeMap_t *asm_p)
774 {
775 	RF_PhysDiskAddr_t *new_pda;
776 
777 	/* when we're accessing only a portion of one stripe unit, we
778 	 * want the parity descriptor to identify only the chunk of
779 	 * parity associated with the data.  When the access spans
780 	 * exactly one stripe unit boundary and is less than a stripe
781 	 * unit in size, it uses two disjoint regions of the parity
782 	 * unit.  When an access spans more than one stripe unit
783 	 * boundary, it uses all of the parity unit.
784 	 *
785 	 * To better handle the case where stripe units are small, we
786 	 * may eventually want to change the 2nd case so that if the
787 	 * SU size is below some threshold, we just read/write the
788 	 * whole thing instead of breaking it up into two accesses. */
789 	if (asm_p->numStripeUnitsAccessed == 1) {
790 		int     x = (startAddrWithinStripe % layoutPtr->sectorsPerStripeUnit);
791 		toAdjust->startSector += x;
792 		toAdjust->raidAddress += x;
793 		toAdjust->numSector = asm_p->physInfo->numSector;
794 		RF_ASSERT(toAdjust->numSector != 0);
795 	} else
796 		if (asm_p->numStripeUnitsAccessed == 2 && asm_p->totalSectorsAccessed < layoutPtr->sectorsPerStripeUnit) {
797 			int     x = (startAddrWithinStripe % layoutPtr->sectorsPerStripeUnit);
798 
799 			/* create a second pda and copy the parity map info
800 			 * into it */
801 			RF_ASSERT(toAdjust->next == NULL);
802 			/* the following will get freed in rf_FreeAccessStripeMap() via
803 			   rf_FreePDAList() */
804 			new_pda = toAdjust->next = rf_AllocPhysDiskAddr(raidPtr);
805 			*new_pda = *toAdjust;	/* structure assignment */
806 			new_pda->next = NULL;
807 
808 			/* adjust the start sector & number of blocks for the
809 			 * first parity pda */
810 			toAdjust->startSector += x;
811 			toAdjust->raidAddress += x;
812 			toAdjust->numSector = rf_RaidAddressOfNextStripeUnitBoundary(layoutPtr, startAddrWithinStripe) - startAddrWithinStripe;
813 			RF_ASSERT(toAdjust->numSector != 0);
814 
815 			/* adjust the second pda */
816 			new_pda->numSector = endAddress - rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, endAddress);
817 			/* new_pda->raidAddress =
818 			 * rf_RaidAddressOfNextStripeUnitBoundary(layoutPtr,
819 			 * toAdjust->raidAddress); */
820 			RF_ASSERT(new_pda->numSector != 0);
821 		}
822 }
823 
824 /* Check if a disk has been spared or failed. If spared, redirect the
825  * I/O.  If it has been failed, record it in the asm pointer.  Fifth
826  * arg is whether data or parity.  */
827 void
rf_ASMCheckStatus(RF_Raid_t * raidPtr,RF_PhysDiskAddr_t * pda_p,RF_AccessStripeMap_t * asm_p,RF_RaidDisk_t * disks,int parity)828 rf_ASMCheckStatus(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda_p,
829 		  RF_AccessStripeMap_t *asm_p, RF_RaidDisk_t *disks,
830 		  int parity)
831 {
832 	RF_DiskStatus_t dstatus;
833 	RF_RowCol_t fcol;
834 
835 	dstatus = disks[pda_p->col].status;
836 
837 	if (dstatus == rf_ds_spared) {
838 		/* if the disk has been spared, redirect access to the spare */
839 		fcol = pda_p->col;
840 		pda_p->col = disks[fcol].spareCol;
841 	} else
842 		if (dstatus == rf_ds_dist_spared) {
843 			/* ditto if disk has been spared to dist spare space */
844 #if RF_DEBUG_MAP
845 			RF_RowCol_t oc = pda_p->col;
846 			RF_SectorNum_t oo = pda_p->startSector;
847 #endif
848 			if (pda_p->type == RF_PDA_TYPE_DATA)
849 				raidPtr->Layout.map->MapSector(raidPtr, pda_p->raidAddress, &pda_p->col, &pda_p->startSector, RF_REMAP);
850 			else
851 				raidPtr->Layout.map->MapParity(raidPtr, pda_p->raidAddress, &pda_p->col, &pda_p->startSector, RF_REMAP);
852 
853 #if RF_DEBUG_MAP
854 			if (rf_mapDebug) {
855 				printf("Redirected c %d o %d -> c %d o %d\n", oc, (int) oo,
856 				    pda_p->col, (int) pda_p->startSector);
857 			}
858 #endif
859 		} else
860 			if (RF_DEAD_DISK(dstatus)) {
861 				/* if the disk is inaccessible, mark the
862 				 * failure */
863 				if (parity)
864 					asm_p->numParityFailed++;
865 				else {
866 					asm_p->numDataFailed++;
867 				}
868 				asm_p->failedPDAs[asm_p->numFailedPDAs] = pda_p;
869 				asm_p->numFailedPDAs++;
870 #if 0
871 				switch (asm_p->numParityFailed + asm_p->numDataFailed) {
872 				case 1:
873 					asm_p->failedPDAs[0] = pda_p;
874 					break;
875 				case 2:
876 					asm_p->failedPDAs[1] = pda_p;
877 				default:
878 					break;
879 				}
880 #endif
881 			}
882 	/* the redirected access should never span a stripe unit boundary */
883 	RF_ASSERT(rf_RaidAddressToStripeUnitID(&raidPtr->Layout, pda_p->raidAddress) ==
884 	    rf_RaidAddressToStripeUnitID(&raidPtr->Layout, pda_p->raidAddress + pda_p->numSector - 1));
885 	RF_ASSERT(pda_p->col != -1);
886 }
887