xref: /netbsd-src/sys/dev/raidframe/rf_reconmap.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /*	$NetBSD: rf_reconmap.c,v 1.36 2017/11/14 14:27:54 christos Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author: Mark Holland
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 
29 /*************************************************************************
30  * rf_reconmap.c
31  *
32  * code to maintain a map of what sectors have/have not been reconstructed
33  *
34  *************************************************************************/
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: rf_reconmap.c,v 1.36 2017/11/14 14:27:54 christos Exp $");
38 
39 #include "rf_raid.h"
40 #include <sys/time.h>
41 #include "rf_general.h"
42 #include "rf_utils.h"
43 
44 /* special pointer values indicating that a reconstruction unit
45  * has been either totally reconstructed or not at all.  Both
46  * are illegal pointer values, so you have to be careful not to
47  * dereference through them.  RU_NOTHING must be zero, since
48  * MakeReconMap uses memset to initialize the structure.  These are used
49  * only at the head of the list.
50  */
51 #define RU_ALL      ((RF_ReconMapListElem_t *) -1)
52 #define RU_NOTHING  ((RF_ReconMapListElem_t *) 0)
53 
54 /* For most reconstructs we need at most 3 RF_ReconMapListElem_t's.
55  * Bounding the number we need is quite difficult, as it depends on how
56  * badly the sectors to be reconstructed get divided up.  In the current
57  * code, the reconstructed sectors appeared aligned on stripe boundaries,
58  * and are always presented in stripe width units, so we're probably
59  * allocating quite a bit more than we'll ever need.
60  */
61 #define RF_NUM_RECON_POOL_ELEM 100
62 
63 static void
64 compact_stat_entry(RF_Raid_t *, RF_ReconMap_t *, int, int);
65 static void crunch_list(RF_ReconMap_t *, RF_ReconMapListElem_t *);
66 static RF_ReconMapListElem_t *
67 MakeReconMapListElem(RF_ReconMap_t *, RF_SectorNum_t, RF_SectorNum_t,
68 		     RF_ReconMapListElem_t *);
69 static void
70 FreeReconMapListElem(RF_ReconMap_t *mapPtr, RF_ReconMapListElem_t * p);
71 
72 /*---------------------------------------------------------------------------
73  *
74  * Creates and initializes new Reconstruction map
75  *
76  * ru_sectors   - size of reconstruction unit in sectors
77  * disk_sectors - size of disk in sectors
78  * spareUnitsPerDisk - zero unless distributed sparing
79  *-------------------------------------------------------------------------*/
80 
81 RF_ReconMap_t *
82 rf_MakeReconMap(RF_Raid_t *raidPtr, RF_SectorCount_t ru_sectors,
83 		RF_SectorCount_t disk_sectors,
84 		RF_ReconUnitCount_t spareUnitsPerDisk)
85 {
86 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
87 	RF_ReconUnitCount_t num_rus = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerRU;
88 	RF_ReconMap_t *p;
89 	int error;
90 
91 	RF_Malloc(p, sizeof(RF_ReconMap_t), (RF_ReconMap_t *));
92 	p->sectorsPerReconUnit = ru_sectors;
93 	p->sectorsInDisk = disk_sectors;
94 
95 	p->totalRUs = num_rus;
96 	p->spareRUs = spareUnitsPerDisk;
97 	p->unitsLeft = num_rus - spareUnitsPerDisk;
98 	p->low_ru = 0;
99 	p->status_size = RF_RECONMAP_SIZE;
100 	p->high_ru = p->status_size - 1;
101 	p->head = 0;
102 
103 	RF_Malloc(p->status, p->status_size * sizeof(RF_ReconMapListElem_t *), (RF_ReconMapListElem_t **));
104 	RF_ASSERT(p->status != NULL);
105 
106 	(void) memset((char *) p->status, 0,
107 	    p->status_size * sizeof(RF_ReconMapListElem_t *));
108 
109 	pool_init(&p->elem_pool, sizeof(RF_ReconMapListElem_t), 0,
110 	    0, 0, "raidreconpl", NULL, IPL_BIO);
111 	if ((error = pool_prime(&p->elem_pool, RF_NUM_RECON_POOL_ELEM)) != 0)
112 		panic("%s: failed to prime pool: %d", __func__, error);
113 
114 	rf_init_mutex2(p->mutex, IPL_VM);
115 	rf_init_cond2(p->cv, "reconupdate");
116 
117 	return (p);
118 }
119 
120 
121 /*---------------------------------------------------------------------------
122  *
123  * marks a new set of sectors as reconstructed.  All the possible
124  * mergings get complicated.  To simplify matters, the approach I take
125  * is to just dump something into the list, and then clean it up
126  * (i.e. merge elements and eliminate redundant ones) in a second pass
127  * over the list (compact_stat_entry()).  Not 100% efficient, since a
128  * structure can be allocated and then immediately freed, but it keeps
129  * this code from becoming (more of) a nightmare of special cases.
130  * The only thing that compact_stat_entry() assumes is that the list
131  * is sorted by startSector, and so this is the only condition I
132  * maintain here.  (MCH)
133  *
134  * This code now uses a pool instead of the previous malloc/free
135  * stuff.
136  *-------------------------------------------------------------------------*/
137 
138 void
139 rf_ReconMapUpdate(RF_Raid_t *raidPtr, RF_ReconMap_t *mapPtr,
140 		  RF_SectorNum_t startSector, RF_SectorNum_t stopSector)
141 {
142 	RF_SectorCount_t sectorsPerReconUnit = mapPtr->sectorsPerReconUnit;
143 	RF_SectorNum_t i, first_in_RU, last_in_RU, ru;
144 	RF_ReconMapListElem_t *p, *pt;
145 
146 	rf_lock_mutex2(mapPtr->mutex);
147 	while(mapPtr->lock) {
148 		rf_wait_cond2(mapPtr->cv, mapPtr->mutex);
149 	}
150 	mapPtr->lock = 1;
151 	rf_unlock_mutex2(mapPtr->mutex);
152 	RF_ASSERT(startSector >= 0 && stopSector < mapPtr->sectorsInDisk &&
153 		  stopSector >= startSector);
154 
155 	while (startSector <= stopSector) {
156 		i = startSector / mapPtr->sectorsPerReconUnit;
157 		first_in_RU = i * sectorsPerReconUnit;
158 		last_in_RU = first_in_RU + sectorsPerReconUnit - 1;
159 
160 		/* do we need to move the queue? */
161 		while (i > mapPtr->high_ru) {
162 #if 0
163 #ifdef DIAGNOSTIC
164 			/* XXX: The check below is not valid for
165 			 * RAID5_RS.  It is valid for RAID 1 and RAID 5.
166 			 * The issue is that we can easily have
167 			 * RU_NOTHING entries here too, and those are
168 			 * quite correct.
169 			 */
170 			if (mapPtr->status[mapPtr->head]!=RU_ALL) {
171 				printf("\nraid%d: reconmap incorrect -- working on i %" PRIu64 "\n",
172 				       raidPtr->raidid, i);
173 				printf("raid%d: ru %" PRIu64 " not completed!!!\n",
174 				       raidPtr->raidid, mapPtr->head);
175 
176 				printf("raid%d: low: %" PRIu64 " high: %" PRIu64 "\n",
177 				       raidPtr->raidid, mapPtr->low_ru, mapPtr->high_ru);
178 
179 				panic("reconmap incorrect");
180 			}
181 #endif
182 #endif
183 			mapPtr->low_ru++;
184 			mapPtr->high_ru++;
185 			/* initialize "highest" RU status entry, which
186 			   will take over the current head postion */
187 			mapPtr->status[mapPtr->head]=RU_NOTHING;
188 
189 			/* move head too */
190 			mapPtr->head++;
191 			if (mapPtr->head >= mapPtr->status_size)
192 				mapPtr->head = 0;
193 
194 		}
195 
196 		ru = i - mapPtr->low_ru + mapPtr->head;
197 		if (ru >= mapPtr->status_size)
198 			ru = ru - mapPtr->status_size;
199 
200 		if ((ru < 0) || (ru >= mapPtr->status_size)) {
201 			printf("raid%d: ru is bogus %" PRIu64 "%" PRIu64 "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n",
202 			       raidPtr->raidid, i, ru, mapPtr->head, mapPtr->low_ru, mapPtr->high_ru);
203 			panic("bogus ru in reconmap");
204 		}
205 
206 		p = mapPtr->status[ru];
207 		if (p != RU_ALL) {
208 			if (p == RU_NOTHING || p->startSector > startSector) {
209 				/* insert at front of list */
210 
211 				mapPtr->status[ru] = MakeReconMapListElem(mapPtr,startSector, RF_MIN(stopSector, last_in_RU), (p == RU_NOTHING) ? NULL : p);
212 
213 			} else {/* general case */
214 				do {	/* search for place to insert */
215 					pt = p;
216 					p = p->next;
217 				} while (p && (p->startSector < startSector));
218 				pt->next = MakeReconMapListElem(mapPtr,startSector, RF_MIN(stopSector, last_in_RU), p);
219 
220 			}
221 			compact_stat_entry(raidPtr, mapPtr, i, ru);
222 		}
223 		startSector = RF_MIN(stopSector, last_in_RU) + 1;
224 	}
225 	rf_lock_mutex2(mapPtr->mutex);
226 	mapPtr->lock = 0;
227 	rf_broadcast_cond2(mapPtr->cv);
228 	rf_unlock_mutex2(mapPtr->mutex);
229 }
230 
231 
232 
233 /*---------------------------------------------------------------------------
234  *
235  * performs whatever list compactions can be done, and frees any space
236  * that is no longer necessary.  Assumes only that the list is sorted
237  * by startSector.  crunch_list() compacts a single list as much as
238  * possible, and the second block of code deletes the entire list if
239  * possible.  crunch_list() is also called from
240  * MakeReconMapAccessList().
241  *
242  * When a recon unit is detected to be fully reconstructed, we set the
243  * corresponding bit in the parity stripe map so that the head follow
244  * code will not select this parity stripe again.  This is redundant
245  * (but harmless) when compact_stat_entry is called from the
246  * reconstruction code, but necessary when called from the user-write
247  * code.
248  *
249  *-------------------------------------------------------------------------*/
250 
251 static void
252 compact_stat_entry(RF_Raid_t *raidPtr, RF_ReconMap_t *mapPtr, int i, int j)
253 {
254 	RF_SectorCount_t sectorsPerReconUnit = mapPtr->sectorsPerReconUnit;
255 	RF_ReconMapListElem_t *p = mapPtr->status[j];
256 
257 	crunch_list(mapPtr, p);
258 
259 	if ((p->startSector == i * sectorsPerReconUnit) &&
260 	    (p->stopSector == i * sectorsPerReconUnit +
261 			      sectorsPerReconUnit - 1)) {
262 		mapPtr->status[j] = RU_ALL;
263 		mapPtr->unitsLeft--;
264 		FreeReconMapListElem(mapPtr, p);
265 	}
266 }
267 
268 
269 static void
270 crunch_list(RF_ReconMap_t *mapPtr, RF_ReconMapListElem_t *listPtr)
271 {
272 	RF_ReconMapListElem_t *pt, *p = listPtr;
273 
274 	if (!p)
275 		return;
276 	pt = p;
277 	p = p->next;
278 	while (p) {
279 		if (pt->stopSector >= p->startSector - 1) {
280 			pt->stopSector = RF_MAX(pt->stopSector, p->stopSector);
281 			pt->next = p->next;
282 			FreeReconMapListElem(mapPtr, p);
283 			p = pt->next;
284 		} else {
285 			pt = p;
286 			p = p->next;
287 		}
288 	}
289 }
290 /*---------------------------------------------------------------------------
291  *
292  * Allocate and fill a new list element
293  *
294  *-------------------------------------------------------------------------*/
295 
296 static RF_ReconMapListElem_t *
297 MakeReconMapListElem(RF_ReconMap_t *mapPtr, RF_SectorNum_t startSector,
298 		     RF_SectorNum_t stopSector, RF_ReconMapListElem_t *next)
299 {
300 	RF_ReconMapListElem_t *p;
301 
302 	p = pool_get(&mapPtr->elem_pool, PR_WAITOK);
303 	p->startSector = startSector;
304 	p->stopSector = stopSector;
305 	p->next = next;
306 	return (p);
307 }
308 /*---------------------------------------------------------------------------
309  *
310  * Free a list element
311  *
312  *-------------------------------------------------------------------------*/
313 
314 static void
315 FreeReconMapListElem(RF_ReconMap_t *mapPtr, RF_ReconMapListElem_t *p)
316 {
317 	pool_put(&mapPtr->elem_pool, p);
318 }
319 /*---------------------------------------------------------------------------
320  *
321  * Free an entire status structure.  Inefficient, but can be called at
322  * any time.
323  *
324  *-------------------------------------------------------------------------*/
325 void
326 rf_FreeReconMap(RF_ReconMap_t *mapPtr)
327 {
328 	RF_ReconMapListElem_t *p, *q;
329 	RF_ReconUnitNum_t i;
330 
331 	for (i = 0; i < mapPtr->status_size; i++) {
332 		p = mapPtr->status[i];
333 		while (p != RU_NOTHING && p != RU_ALL) {
334 			q = p;
335 			p = p->next;
336 			RF_Free(q, sizeof(*q));
337 		}
338 	}
339 
340 	rf_destroy_mutex2(mapPtr->mutex);
341 	rf_destroy_cond2(mapPtr->cv);
342 
343 	pool_destroy(&mapPtr->elem_pool);
344 	RF_Free(mapPtr->status, mapPtr->status_size *
345 		sizeof(RF_ReconMapListElem_t *));
346 	RF_Free(mapPtr, sizeof(RF_ReconMap_t));
347 }
348 /*---------------------------------------------------------------------------
349  *
350  * returns nonzero if the indicated RU has been reconstructed already
351  *
352  *-------------------------------------------------------------------------*/
353 
354 int
355 rf_CheckRUReconstructed(RF_ReconMap_t *mapPtr, RF_SectorNum_t startSector)
356 {
357 	RF_ReconUnitNum_t i;
358 	int rv;
359 
360 	i = startSector / mapPtr->sectorsPerReconUnit;
361 
362 	if (i < mapPtr->low_ru)
363 		rv = 1;
364 	else if (i > mapPtr->high_ru)
365 		rv = 0;
366 	else {
367 		i = i - mapPtr->low_ru + mapPtr->head;
368 		if (i >= mapPtr->status_size)
369 			i = i - mapPtr->status_size;
370 		if (mapPtr->status[i] == RU_ALL)
371 			rv = 1;
372 		else
373 			rv = 0;
374 	}
375 
376 	return rv;
377 }
378 
379 RF_ReconUnitCount_t
380 rf_UnitsLeftToReconstruct(RF_ReconMap_t *mapPtr)
381 {
382 	RF_ASSERT(mapPtr != NULL);
383 	return (mapPtr->unitsLeft);
384 }
385 
386 #if RF_DEBUG_RECON
387 void
388 rf_PrintReconSchedule(RF_ReconMap_t *mapPtr, struct timeval *starttime)
389 {
390 	static int old_pctg = -1;
391 	struct timeval tv, diff;
392 	int     new_pctg;
393 
394 	new_pctg = 100 - (rf_UnitsLeftToReconstruct(mapPtr) *
395 			  100 / mapPtr->totalRUs);
396 	if (new_pctg != old_pctg) {
397 		RF_GETTIME(tv);
398 		RF_TIMEVAL_DIFF(starttime, &tv, &diff);
399 		printf("%d %d.%06d\n", (int) new_pctg, (int) diff.tv_sec,
400 		       (int) diff.tv_usec);
401 		old_pctg = new_pctg;
402 	}
403 }
404 #endif
405 
406