1 /* $NetBSD: rf_reconmap.c,v 1.39 2022/04/08 10:27:04 andvar Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*************************************************************************
30 * rf_reconmap.c
31 *
32 * code to maintain a map of what sectors have/have not been reconstructed
33 *
34 *************************************************************************/
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: rf_reconmap.c,v 1.39 2022/04/08 10:27:04 andvar Exp $");
38
39 #include "rf_raid.h"
40 #include <sys/time.h>
41 #include "rf_general.h"
42 #include "rf_utils.h"
43
44 /* special pointer values indicating that a reconstruction unit
45 * has been either totally reconstructed or not at all. Both
46 * are illegal pointer values, so you have to be careful not to
47 * dereference through them. RU_NOTHING must be zero, since
48 * MakeReconMap uses memset to initialize the structure. These are used
49 * only at the head of the list.
50 */
51 #define RU_ALL ((RF_ReconMapListElem_t *) -1)
52 #define RU_NOTHING ((RF_ReconMapListElem_t *) 0)
53
54 /* For most reconstructs we need at most 3 RF_ReconMapListElem_t's.
55 * Bounding the number we need is quite difficult, as it depends on how
56 * badly the sectors to be reconstructed get divided up. In the current
57 * code, the reconstructed sectors appeared aligned on stripe boundaries,
58 * and are always presented in stripe width units, so we're probably
59 * allocating quite a bit more than we'll ever need.
60 */
61 #define RF_NUM_RECON_POOL_ELEM 100
62
63 static void
64 compact_stat_entry(RF_Raid_t *, RF_ReconMap_t *, int, int);
65 static void crunch_list(RF_ReconMap_t *, RF_ReconMapListElem_t *);
66 static RF_ReconMapListElem_t *
67 MakeReconMapListElem(RF_ReconMap_t *, RF_SectorNum_t, RF_SectorNum_t,
68 RF_ReconMapListElem_t *);
69 static void
70 FreeReconMapListElem(RF_ReconMap_t *mapPtr, RF_ReconMapListElem_t * p);
71
72 /*---------------------------------------------------------------------------
73 *
74 * Creates and initializes new Reconstruction map
75 *
76 * ru_sectors - size of reconstruction unit in sectors
77 * disk_sectors - size of disk in sectors
78 * spareUnitsPerDisk - zero unless distributed sparing
79 *-------------------------------------------------------------------------*/
80
81 RF_ReconMap_t *
rf_MakeReconMap(RF_Raid_t * raidPtr,RF_SectorCount_t ru_sectors,RF_SectorCount_t disk_sectors,RF_ReconUnitCount_t spareUnitsPerDisk)82 rf_MakeReconMap(RF_Raid_t *raidPtr, RF_SectorCount_t ru_sectors,
83 RF_SectorCount_t disk_sectors,
84 RF_ReconUnitCount_t spareUnitsPerDisk)
85 {
86 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
87 RF_ReconUnitCount_t num_rus = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerRU;
88 RF_ReconMap_t *p;
89
90 p = RF_Malloc(sizeof(*p));
91 p->sectorsPerReconUnit = ru_sectors;
92 p->sectorsInDisk = disk_sectors;
93
94 p->totalRUs = num_rus;
95 p->spareRUs = spareUnitsPerDisk;
96 p->unitsLeft = num_rus - spareUnitsPerDisk;
97 p->low_ru = 0;
98 p->status_size = RF_RECONMAP_SIZE;
99 p->high_ru = p->status_size - 1;
100 p->head = 0;
101
102 p->status = RF_Malloc(p->status_size * sizeof(*p->status));
103 RF_ASSERT(p->status != NULL);
104
105 pool_init(&p->elem_pool, sizeof(RF_ReconMapListElem_t), 0,
106 0, 0, "raidreconpl", NULL, IPL_BIO);
107 pool_prime(&p->elem_pool, RF_NUM_RECON_POOL_ELEM);
108
109 rf_init_mutex2(p->mutex, IPL_VM);
110 rf_init_cond2(p->cv, "reconupdate");
111
112 return (p);
113 }
114
115
116 /*---------------------------------------------------------------------------
117 *
118 * marks a new set of sectors as reconstructed. All the possible
119 * mergings get complicated. To simplify matters, the approach I take
120 * is to just dump something into the list, and then clean it up
121 * (i.e. merge elements and eliminate redundant ones) in a second pass
122 * over the list (compact_stat_entry()). Not 100% efficient, since a
123 * structure can be allocated and then immediately freed, but it keeps
124 * this code from becoming (more of) a nightmare of special cases.
125 * The only thing that compact_stat_entry() assumes is that the list
126 * is sorted by startSector, and so this is the only condition I
127 * maintain here. (MCH)
128 *
129 * This code now uses a pool instead of the previous malloc/free
130 * stuff.
131 *-------------------------------------------------------------------------*/
132
133 void
rf_ReconMapUpdate(RF_Raid_t * raidPtr,RF_ReconMap_t * mapPtr,RF_SectorNum_t startSector,RF_SectorNum_t stopSector)134 rf_ReconMapUpdate(RF_Raid_t *raidPtr, RF_ReconMap_t *mapPtr,
135 RF_SectorNum_t startSector, RF_SectorNum_t stopSector)
136 {
137 RF_SectorCount_t sectorsPerReconUnit = mapPtr->sectorsPerReconUnit;
138 RF_SectorNum_t i, first_in_RU, last_in_RU, ru;
139 RF_ReconMapListElem_t *p, *pt;
140
141 rf_lock_mutex2(mapPtr->mutex);
142 while(mapPtr->lock) {
143 rf_wait_cond2(mapPtr->cv, mapPtr->mutex);
144 }
145 mapPtr->lock = 1;
146 rf_unlock_mutex2(mapPtr->mutex);
147 RF_ASSERT(startSector >= 0 && stopSector < mapPtr->sectorsInDisk &&
148 stopSector >= startSector);
149
150 while (startSector <= stopSector) {
151 i = startSector / mapPtr->sectorsPerReconUnit;
152 first_in_RU = i * sectorsPerReconUnit;
153 last_in_RU = first_in_RU + sectorsPerReconUnit - 1;
154
155 /* do we need to move the queue? */
156 while (i > mapPtr->high_ru) {
157 #if 0
158 #ifdef DIAGNOSTIC
159 /* XXX: The check below is not valid for
160 * RAID5_RS. It is valid for RAID 1 and RAID 5.
161 * The issue is that we can easily have
162 * RU_NOTHING entries here too, and those are
163 * quite correct.
164 */
165 if (mapPtr->status[mapPtr->head]!=RU_ALL) {
166 printf("\nraid%d: reconmap incorrect -- working on i %" PRIu64 "\n",
167 raidPtr->raidid, i);
168 printf("raid%d: ru %" PRIu64 " not completed!!!\n",
169 raidPtr->raidid, mapPtr->head);
170
171 printf("raid%d: low: %" PRIu64 " high: %" PRIu64 "\n",
172 raidPtr->raidid, mapPtr->low_ru, mapPtr->high_ru);
173
174 panic("reconmap incorrect");
175 }
176 #endif
177 #endif
178 mapPtr->low_ru++;
179 mapPtr->high_ru++;
180 /* initialize "highest" RU status entry, which
181 will take over the current head position */
182 mapPtr->status[mapPtr->head]=RU_NOTHING;
183
184 /* move head too */
185 mapPtr->head++;
186 if (mapPtr->head >= mapPtr->status_size)
187 mapPtr->head = 0;
188
189 }
190
191 ru = i - mapPtr->low_ru + mapPtr->head;
192 if (ru >= mapPtr->status_size)
193 ru = ru - mapPtr->status_size;
194
195 if ((ru < 0) || (ru >= mapPtr->status_size)) {
196 printf("raid%d: ru is bogus %" PRIu64 "%" PRIu64 "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n",
197 raidPtr->raidid, i, ru, mapPtr->head, mapPtr->low_ru, mapPtr->high_ru);
198 panic("bogus ru in reconmap");
199 }
200
201 p = mapPtr->status[ru];
202 if (p != RU_ALL) {
203 if (p == RU_NOTHING || p->startSector > startSector) {
204 /* insert at front of list */
205
206 mapPtr->status[ru] = MakeReconMapListElem(mapPtr,startSector, RF_MIN(stopSector, last_in_RU), (p == RU_NOTHING) ? NULL : p);
207
208 } else {/* general case */
209 do { /* search for place to insert */
210 pt = p;
211 p = p->next;
212 } while (p && (p->startSector < startSector));
213 pt->next = MakeReconMapListElem(mapPtr,startSector, RF_MIN(stopSector, last_in_RU), p);
214
215 }
216 compact_stat_entry(raidPtr, mapPtr, i, ru);
217 }
218 startSector = RF_MIN(stopSector, last_in_RU) + 1;
219 }
220 rf_lock_mutex2(mapPtr->mutex);
221 mapPtr->lock = 0;
222 rf_broadcast_cond2(mapPtr->cv);
223 rf_unlock_mutex2(mapPtr->mutex);
224 }
225
226
227
228 /*---------------------------------------------------------------------------
229 *
230 * performs whatever list compactions can be done, and frees any space
231 * that is no longer necessary. Assumes only that the list is sorted
232 * by startSector. crunch_list() compacts a single list as much as
233 * possible, and the second block of code deletes the entire list if
234 * possible. crunch_list() is also called from
235 * MakeReconMapAccessList().
236 *
237 * When a recon unit is detected to be fully reconstructed, we set the
238 * corresponding bit in the parity stripe map so that the head follow
239 * code will not select this parity stripe again. This is redundant
240 * (but harmless) when compact_stat_entry is called from the
241 * reconstruction code, but necessary when called from the user-write
242 * code.
243 *
244 *-------------------------------------------------------------------------*/
245
246 static void
compact_stat_entry(RF_Raid_t * raidPtr,RF_ReconMap_t * mapPtr,int i,int j)247 compact_stat_entry(RF_Raid_t *raidPtr, RF_ReconMap_t *mapPtr, int i, int j)
248 {
249 RF_SectorCount_t sectorsPerReconUnit = mapPtr->sectorsPerReconUnit;
250 RF_ReconMapListElem_t *p = mapPtr->status[j];
251
252 crunch_list(mapPtr, p);
253
254 if ((p->startSector == i * sectorsPerReconUnit) &&
255 (p->stopSector == i * sectorsPerReconUnit +
256 sectorsPerReconUnit - 1)) {
257 mapPtr->status[j] = RU_ALL;
258 mapPtr->unitsLeft--;
259 FreeReconMapListElem(mapPtr, p);
260 }
261 }
262
263
264 static void
crunch_list(RF_ReconMap_t * mapPtr,RF_ReconMapListElem_t * listPtr)265 crunch_list(RF_ReconMap_t *mapPtr, RF_ReconMapListElem_t *listPtr)
266 {
267 RF_ReconMapListElem_t *pt, *p = listPtr;
268
269 if (!p)
270 return;
271 pt = p;
272 p = p->next;
273 while (p) {
274 if (pt->stopSector >= p->startSector - 1) {
275 pt->stopSector = RF_MAX(pt->stopSector, p->stopSector);
276 pt->next = p->next;
277 FreeReconMapListElem(mapPtr, p);
278 p = pt->next;
279 } else {
280 pt = p;
281 p = p->next;
282 }
283 }
284 }
285 /*---------------------------------------------------------------------------
286 *
287 * Allocate and fill a new list element
288 *
289 *-------------------------------------------------------------------------*/
290
291 static RF_ReconMapListElem_t *
MakeReconMapListElem(RF_ReconMap_t * mapPtr,RF_SectorNum_t startSector,RF_SectorNum_t stopSector,RF_ReconMapListElem_t * next)292 MakeReconMapListElem(RF_ReconMap_t *mapPtr, RF_SectorNum_t startSector,
293 RF_SectorNum_t stopSector, RF_ReconMapListElem_t *next)
294 {
295 RF_ReconMapListElem_t *p;
296
297 p = pool_get(&mapPtr->elem_pool, PR_WAITOK);
298 p->startSector = startSector;
299 p->stopSector = stopSector;
300 p->next = next;
301 return (p);
302 }
303 /*---------------------------------------------------------------------------
304 *
305 * Free a list element
306 *
307 *-------------------------------------------------------------------------*/
308
309 static void
FreeReconMapListElem(RF_ReconMap_t * mapPtr,RF_ReconMapListElem_t * p)310 FreeReconMapListElem(RF_ReconMap_t *mapPtr, RF_ReconMapListElem_t *p)
311 {
312 pool_put(&mapPtr->elem_pool, p);
313 }
314 /*---------------------------------------------------------------------------
315 *
316 * Free an entire status structure. Inefficient, but can be called at
317 * any time.
318 *
319 *-------------------------------------------------------------------------*/
320 void
rf_FreeReconMap(RF_ReconMap_t * mapPtr)321 rf_FreeReconMap(RF_ReconMap_t *mapPtr)
322 {
323 RF_ReconMapListElem_t *p, *q;
324 RF_ReconUnitNum_t i;
325
326 for (i = 0; i < mapPtr->status_size; i++) {
327 p = mapPtr->status[i];
328 while (p != RU_NOTHING && p != RU_ALL) {
329 q = p;
330 p = p->next;
331 RF_Free(q, sizeof(*q));
332 }
333 }
334
335 rf_destroy_mutex2(mapPtr->mutex);
336 rf_destroy_cond2(mapPtr->cv);
337
338 pool_destroy(&mapPtr->elem_pool);
339 RF_Free(mapPtr->status, mapPtr->status_size *
340 sizeof(RF_ReconMapListElem_t *));
341 RF_Free(mapPtr, sizeof(RF_ReconMap_t));
342 }
343 /*---------------------------------------------------------------------------
344 *
345 * returns nonzero if the indicated RU has been reconstructed already
346 *
347 *-------------------------------------------------------------------------*/
348
349 int
rf_CheckRUReconstructed(RF_ReconMap_t * mapPtr,RF_SectorNum_t startSector)350 rf_CheckRUReconstructed(RF_ReconMap_t *mapPtr, RF_SectorNum_t startSector)
351 {
352 RF_ReconUnitNum_t i;
353 int rv;
354
355 i = startSector / mapPtr->sectorsPerReconUnit;
356
357 if (i < mapPtr->low_ru)
358 rv = 1;
359 else if (i > mapPtr->high_ru)
360 rv = 0;
361 else {
362 i = i - mapPtr->low_ru + mapPtr->head;
363 if (i >= mapPtr->status_size)
364 i = i - mapPtr->status_size;
365 if (mapPtr->status[i] == RU_ALL)
366 rv = 1;
367 else
368 rv = 0;
369 }
370
371 return rv;
372 }
373
374 RF_ReconUnitCount_t
rf_UnitsLeftToReconstruct(RF_ReconMap_t * mapPtr)375 rf_UnitsLeftToReconstruct(RF_ReconMap_t *mapPtr)
376 {
377 RF_ASSERT(mapPtr != NULL);
378 return (mapPtr->unitsLeft);
379 }
380
381 #if RF_DEBUG_RECON
382 void
rf_PrintReconSchedule(RF_ReconMap_t * mapPtr,struct timeval * starttime)383 rf_PrintReconSchedule(RF_ReconMap_t *mapPtr, struct timeval *starttime)
384 {
385 static int old_pctg = -1;
386 struct timeval tv, diff;
387 int new_pctg;
388
389 new_pctg = 100 - (rf_UnitsLeftToReconstruct(mapPtr) *
390 100 / mapPtr->totalRUs);
391 if (new_pctg != old_pctg) {
392 RF_GETTIME(tv);
393 RF_TIMEVAL_DIFF(starttime, &tv, &diff);
394 printf("%d %d.%06d\n", (int) new_pctg, (int) diff.tv_sec,
395 (int) diff.tv_usec);
396 old_pctg = new_pctg;
397 }
398 }
399 #endif
400