xref: /netbsd-src/sys/dev/raidframe/rf_driver.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: rf_driver.c,v 1.120 2008/12/20 17:04:51 oster Exp $	*/
2 /*-
3  * Copyright (c) 1999 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Greg Oster
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * Copyright (c) 1995 Carnegie-Mellon University.
33  * All rights reserved.
34  *
35  * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II,
36  *         Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 /******************************************************************************
60  *
61  * rf_driver.c -- main setup, teardown, and access routines for the RAID driver
62  *
63  * all routines are prefixed with rf_ (raidframe), to avoid conficts.
64  *
65  ******************************************************************************/
66 
67 
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.120 2008/12/20 17:04:51 oster Exp $");
70 
71 #ifdef _KERNEL_OPT
72 #include "opt_raid_diagnostic.h"
73 #endif
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/ioctl.h>
78 #include <sys/fcntl.h>
79 #include <sys/vnode.h>
80 
81 
82 #include "rf_archs.h"
83 #include "rf_threadstuff.h"
84 
85 #include <sys/errno.h>
86 
87 #include "rf_raid.h"
88 #include "rf_dag.h"
89 #include "rf_aselect.h"
90 #include "rf_diskqueue.h"
91 #include "rf_parityscan.h"
92 #include "rf_alloclist.h"
93 #include "rf_dagutils.h"
94 #include "rf_utils.h"
95 #include "rf_etimer.h"
96 #include "rf_acctrace.h"
97 #include "rf_general.h"
98 #include "rf_desc.h"
99 #include "rf_states.h"
100 #include "rf_decluster.h"
101 #include "rf_map.h"
102 #include "rf_revent.h"
103 #include "rf_callback.h"
104 #include "rf_engine.h"
105 #include "rf_mcpair.h"
106 #include "rf_nwayxor.h"
107 #include "rf_copyback.h"
108 #include "rf_driver.h"
109 #include "rf_options.h"
110 #include "rf_shutdown.h"
111 #include "rf_kintf.h"
112 
113 #include <sys/buf.h>
114 
115 #ifndef RF_ACCESS_DEBUG
116 #define RF_ACCESS_DEBUG 0
117 #endif
118 
119 /* rad == RF_RaidAccessDesc_t */
120 RF_DECLARE_MUTEX(rf_rad_lock)
121 #define RF_MAX_FREE_RAD 128
122 #define RF_MIN_FREE_RAD  32
123 
124 /* debug variables */
125 char    rf_panicbuf[2048];	/* a buffer to hold an error msg when we panic */
126 
127 /* main configuration routines */
128 static int raidframe_booted = 0;
129 
130 static void rf_ConfigureDebug(RF_Config_t * cfgPtr);
131 static void set_debug_option(char *name, long val);
132 static void rf_UnconfigureArray(void);
133 static void rf_ShutdownRDFreeList(void *);
134 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **);
135 
136 RF_DECLARE_MUTEX(rf_printf_mutex)	/* debug only:  avoids interleaved
137 					 * printfs by different stripes */
138 
139 #define SIGNAL_QUIESCENT_COND(_raid_)  wakeup(&((_raid_)->accesses_suspended))
140 #define WAIT_FOR_QUIESCENCE(_raid_) \
141 	ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \
142 		"raidframe quiesce", 0, &((_raid_)->access_suspend_mutex))
143 
144 static int configureCount = 0;	/* number of active configurations */
145 static int isconfigged = 0;	/* is basic raidframe (non per-array)
146 				 * stuff configured */
147 RF_DECLARE_LKMGR_STATIC_MUTEX(configureMutex)	/* used to lock the configuration
148 					 * stuff */
149 static RF_ShutdownList_t *globalShutdown;	/* non array-specific
150 						 * stuff */
151 
152 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp);
153 static int rf_AllocEmergBuffers(RF_Raid_t *);
154 static void rf_FreeEmergBuffers(RF_Raid_t *);
155 
156 /* called at system boot time */
157 int
158 rf_BootRaidframe()
159 {
160 
161 	if (raidframe_booted)
162 		return (EBUSY);
163 	raidframe_booted = 1;
164 	mutex_init(&configureMutex, MUTEX_DEFAULT, IPL_NONE);
165  	configureCount = 0;
166 	isconfigged = 0;
167 	globalShutdown = NULL;
168 	return (0);
169 }
170 
171 /*
172  * Called whenever an array is shutdown
173  */
174 static void
175 rf_UnconfigureArray()
176 {
177 
178 	RF_LOCK_LKMGR_MUTEX(configureMutex);
179 	if (--configureCount == 0) {	/* if no active configurations, shut
180 					 * everything down */
181 		isconfigged = 0;
182 		rf_ShutdownList(&globalShutdown);
183 
184 		/*
185 	         * We must wait until now, because the AllocList module
186 	         * uses the DebugMem module.
187 	         */
188 #if RF_DEBUG_MEM
189 		if (rf_memDebug)
190 			rf_print_unfreed();
191 #endif
192 	}
193 	RF_UNLOCK_LKMGR_MUTEX(configureMutex);
194 }
195 
196 /*
197  * Called to shut down an array.
198  */
199 int
200 rf_Shutdown(RF_Raid_t *raidPtr)
201 {
202 
203 	if (!raidPtr->valid) {
204 		RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver.  Aborting shutdown\n");
205 		return (EINVAL);
206 	}
207 	/*
208          * wait for outstanding IOs to land
209          * As described in rf_raid.h, we use the rad_freelist lock
210          * to protect the per-array info about outstanding descs
211          * since we need to do freelist locking anyway, and this
212          * cuts down on the amount of serialization we've got going
213          * on.
214          */
215 	RF_LOCK_MUTEX(rf_rad_lock);
216 	if (raidPtr->waitShutdown) {
217 		RF_UNLOCK_MUTEX(rf_rad_lock);
218 		return (EBUSY);
219 	}
220 	raidPtr->waitShutdown = 1;
221 	while (raidPtr->nAccOutstanding) {
222 		RF_WAIT_COND(raidPtr->outstandingCond, rf_rad_lock);
223 	}
224 	RF_UNLOCK_MUTEX(rf_rad_lock);
225 
226 	/* Wait for any parity re-writes to stop... */
227 	while (raidPtr->parity_rewrite_in_progress) {
228 		printf("raid%d: Waiting for parity re-write to exit...\n",
229 		       raidPtr->raidid);
230 		tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO,
231 		       "rfprwshutdown", 0);
232 	}
233 
234 	/* Wait for any reconstruction to stop... */
235 	while (raidPtr->reconInProgress) {
236 		printf("raid%d: Waiting for reconstruction to stop...\n",
237 		       raidPtr->raidid);
238 		tsleep(&raidPtr->waitForReconCond, PRIBIO,
239 		       "rfreshutdown",0);
240 	}
241 
242 	raidPtr->valid = 0;
243 
244 	rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE);
245 
246 	rf_UnconfigureVnodes(raidPtr);
247 
248 	rf_FreeEmergBuffers(raidPtr);
249 
250 	rf_ShutdownList(&raidPtr->shutdownList);
251 
252 	rf_UnconfigureArray();
253 
254 	return (0);
255 }
256 
257 
258 #define DO_INIT_CONFIGURE(f) { \
259 	rc = f (&globalShutdown); \
260 	if (rc) { \
261 		RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
262 		rf_ShutdownList(&globalShutdown); \
263 		configureCount--; \
264 		RF_UNLOCK_LKMGR_MUTEX(configureMutex); \
265 		return(rc); \
266 	} \
267 }
268 
269 #define DO_RAID_FAIL() { \
270 	rf_UnconfigureVnodes(raidPtr); \
271 	rf_FreeEmergBuffers(raidPtr); \
272 	rf_ShutdownList(&raidPtr->shutdownList); \
273 	rf_UnconfigureArray(); \
274 }
275 
276 #define DO_RAID_INIT_CONFIGURE(f) { \
277 	rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \
278 	if (rc) { \
279 		RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
280 		DO_RAID_FAIL(); \
281 		return(rc); \
282 	} \
283 }
284 
285 #define DO_RAID_MUTEX(_m_) { \
286 	rf_mutex_init((_m_)); \
287 }
288 
289 int
290 rf_Configure(RF_Raid_t *raidPtr, RF_Config_t *cfgPtr, RF_AutoConfig_t *ac)
291 {
292 	RF_RowCol_t col;
293 	int rc;
294 
295 	RF_LOCK_LKMGR_MUTEX(configureMutex);
296 	configureCount++;
297 	if (isconfigged == 0) {
298 		rf_mutex_init(&rf_printf_mutex);
299 
300 		/* initialize globals */
301 
302 		DO_INIT_CONFIGURE(rf_ConfigureAllocList);
303 
304 		/*
305 	         * Yes, this does make debugging general to the whole
306 	         * system instead of being array specific. Bummer, drag.
307 		 */
308 		rf_ConfigureDebug(cfgPtr);
309 		DO_INIT_CONFIGURE(rf_ConfigureDebugMem);
310 #if RF_ACC_TRACE > 0
311 		DO_INIT_CONFIGURE(rf_ConfigureAccessTrace);
312 #endif
313 		DO_INIT_CONFIGURE(rf_ConfigureMapModule);
314 		DO_INIT_CONFIGURE(rf_ConfigureReconEvent);
315 		DO_INIT_CONFIGURE(rf_ConfigureCallback);
316 		DO_INIT_CONFIGURE(rf_ConfigureRDFreeList);
317 		DO_INIT_CONFIGURE(rf_ConfigureNWayXor);
318 		DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
319 		DO_INIT_CONFIGURE(rf_ConfigureMCPair);
320 		DO_INIT_CONFIGURE(rf_ConfigureDAGs);
321 		DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs);
322 		DO_INIT_CONFIGURE(rf_ConfigureReconstruction);
323 		DO_INIT_CONFIGURE(rf_ConfigureCopyback);
324 		DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
325 		DO_INIT_CONFIGURE(rf_ConfigurePSStatus);
326 		isconfigged = 1;
327 	}
328 	RF_UNLOCK_LKMGR_MUTEX(configureMutex);
329 
330 	DO_RAID_MUTEX(&raidPtr->mutex);
331 	/* set up the cleanup list.  Do this after ConfigureDebug so that
332 	 * value of memDebug will be set */
333 
334 	rf_MakeAllocList(raidPtr->cleanupList);
335 	if (raidPtr->cleanupList == NULL) {
336 		DO_RAID_FAIL();
337 		return (ENOMEM);
338 	}
339 	rf_ShutdownCreate(&raidPtr->shutdownList,
340 			  (void (*) (void *)) rf_FreeAllocList,
341 			  raidPtr->cleanupList);
342 
343 	raidPtr->numCol = cfgPtr->numCol;
344 	raidPtr->numSpare = cfgPtr->numSpare;
345 
346 	raidPtr->status = rf_rs_optimal;
347 	raidPtr->reconControl = NULL;
348 
349 	TAILQ_INIT(&(raidPtr->iodone));
350 	simple_lock_init(&(raidPtr->iodone_lock));
351 
352 	DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine);
353 	DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks);
354 
355 	raidPtr->outstandingCond = 0;
356 
357 	raidPtr->nAccOutstanding = 0;
358 	raidPtr->waitShutdown = 0;
359 
360 	DO_RAID_MUTEX(&raidPtr->access_suspend_mutex);
361 
362 	raidPtr->waitForReconCond = 0;
363 
364 	if (ac!=NULL) {
365 		/* We have an AutoConfig structure..  Don't do the
366 		   normal disk configuration... call the auto config
367 		   stuff */
368 		rf_AutoConfigureDisks(raidPtr, cfgPtr, ac);
369 	} else {
370 		DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks);
371 		DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks);
372 	}
373 	/* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev
374 	 * no. is set */
375 	DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues);
376 
377 	DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout);
378 
379 	/* Initialize per-RAID PSS bits */
380 	rf_InitPSStatus(raidPtr);
381 
382 #if RF_INCLUDE_CHAINDECLUSTER > 0
383 	for (col = 0; col < raidPtr->numCol; col++) {
384 		/*
385 		 * XXX better distribution
386 		 */
387 		raidPtr->hist_diskreq[col] = 0;
388 	}
389 #endif
390 	raidPtr->numNewFailures = 0;
391 	raidPtr->copyback_in_progress = 0;
392 	raidPtr->parity_rewrite_in_progress = 0;
393 	raidPtr->adding_hot_spare = 0;
394 	raidPtr->recon_in_progress = 0;
395 	raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs;
396 
397 	/* autoconfigure and root_partition will actually get filled in
398 	   after the config is done */
399 	raidPtr->autoconfigure = 0;
400 	raidPtr->root_partition = 0;
401 	raidPtr->last_unit = raidPtr->raidid;
402 	raidPtr->config_order = 0;
403 
404 	if (rf_keepAccTotals) {
405 		raidPtr->keep_acc_totals = 1;
406 	}
407 
408 	/* Allocate a bunch of buffers to be used in low-memory conditions */
409 	raidPtr->iobuf = NULL;
410 
411 	rc = rf_AllocEmergBuffers(raidPtr);
412 	if (rc) {
413 		printf("raid%d: Unable to allocate emergency buffers.\n",
414 		       raidPtr->raidid);
415 		DO_RAID_FAIL();
416 		return(rc);
417 	}
418 
419 	raidPtr->valid = 1;
420 
421 	printf("raid%d: %s\n", raidPtr->raidid,
422 	       raidPtr->Layout.map->configName);
423 	printf("raid%d: Components:", raidPtr->raidid);
424 
425 	for (col = 0; col < raidPtr->numCol; col++) {
426 		printf(" %s", raidPtr->Disks[col].devname);
427 		if (RF_DEAD_DISK(raidPtr->Disks[col].status)) {
428 			printf("[**FAILED**]");
429 		}
430 	}
431 	printf("\n");
432 	printf("raid%d: Total Sectors: %" PRIu64 " (%" PRIu64 " MB)\n",
433 	       raidPtr->raidid,
434 	       raidPtr->totalSectors,
435 	       (raidPtr->totalSectors / 1024 *
436 				(1 << raidPtr->logBytesPerSector) / 1024));
437 
438 	return (0);
439 }
440 
441 
442 /*
443 
444   Routines to allocate and free the "emergency buffers" for a given
445   RAID set.  These emergency buffers will be used when the kernel runs
446   out of kernel memory.
447 
448  */
449 
450 static int
451 rf_AllocEmergBuffers(RF_Raid_t *raidPtr)
452 {
453 	void *tmpbuf;
454 	RF_VoidPointerListElem_t *vple;
455 	int i;
456 
457 	/* XXX next line needs tuning... */
458 	raidPtr->numEmergencyBuffers = 10 * raidPtr->numCol;
459 #if DEBUG
460 	printf("raid%d: allocating %d buffers of %d bytes.\n",
461 	       raidPtr->raidid,
462 	       raidPtr->numEmergencyBuffers,
463 	       (int)(raidPtr->Layout.sectorsPerStripeUnit <<
464 	       raidPtr->logBytesPerSector));
465 #endif
466 	for (i = 0; i < raidPtr->numEmergencyBuffers; i++) {
467 		tmpbuf = malloc( raidPtr->Layout.sectorsPerStripeUnit <<
468 				 raidPtr->logBytesPerSector,
469 				 M_RAIDFRAME, M_WAITOK);
470 		if (tmpbuf) {
471 			vple = rf_AllocVPListElem();
472 			vple->p= tmpbuf;
473 			vple->next = raidPtr->iobuf;
474 			raidPtr->iobuf = vple;
475 			raidPtr->iobuf_count++;
476 		} else {
477 			printf("raid%d: failed to allocate emergency buffer!\n",
478 			       raidPtr->raidid);
479 			return 1;
480 		}
481 	}
482 
483 	/* XXX next line needs tuning too... */
484 	raidPtr->numEmergencyStripeBuffers = 10;
485         for (i = 0; i < raidPtr->numEmergencyStripeBuffers; i++) {
486                 tmpbuf = malloc( raidPtr->numCol * (raidPtr->Layout.sectorsPerStripeUnit <<
487                                  raidPtr->logBytesPerSector),
488                                  M_RAIDFRAME, M_WAITOK);
489                 if (tmpbuf) {
490                         vple = rf_AllocVPListElem();
491                         vple->p= tmpbuf;
492                         vple->next = raidPtr->stripebuf;
493                         raidPtr->stripebuf = vple;
494                         raidPtr->stripebuf_count++;
495                 } else {
496                         printf("raid%d: failed to allocate emergency stripe buffer!\n",
497                                raidPtr->raidid);
498 			return 1;
499                 }
500         }
501 
502 	return (0);
503 }
504 
505 static void
506 rf_FreeEmergBuffers(RF_Raid_t *raidPtr)
507 {
508 	RF_VoidPointerListElem_t *tmp;
509 
510 	/* Free the emergency IO buffers */
511 	while (raidPtr->iobuf != NULL) {
512 		tmp = raidPtr->iobuf;
513 		raidPtr->iobuf = raidPtr->iobuf->next;
514 		free(tmp->p, M_RAIDFRAME);
515 		rf_FreeVPListElem(tmp);
516 	}
517 
518 	/* Free the emergency stripe buffers */
519 	while (raidPtr->stripebuf != NULL) {
520 		tmp = raidPtr->stripebuf;
521 		raidPtr->stripebuf = raidPtr->stripebuf->next;
522 		free(tmp->p, M_RAIDFRAME);
523 		rf_FreeVPListElem(tmp);
524 	}
525 }
526 
527 
528 static void
529 rf_ShutdownRDFreeList(void *ignored)
530 {
531 	pool_destroy(&rf_pools.rad);
532 }
533 
534 static int
535 rf_ConfigureRDFreeList(RF_ShutdownList_t **listp)
536 {
537 
538 	rf_pool_init(&rf_pools.rad, sizeof(RF_RaidAccessDesc_t),
539 		     "rf_rad_pl", RF_MIN_FREE_RAD, RF_MAX_FREE_RAD);
540 	rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL);
541 	simple_lock_init(&rf_rad_lock);
542 	return (0);
543 }
544 
545 RF_RaidAccessDesc_t *
546 rf_AllocRaidAccDesc(RF_Raid_t *raidPtr, RF_IoType_t type,
547 		    RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
548 		    void *bufPtr, void *bp, RF_RaidAccessFlags_t flags,
549 		    const RF_AccessState_t *states)
550 {
551 	RF_RaidAccessDesc_t *desc;
552 
553 	desc = pool_get(&rf_pools.rad, PR_WAITOK);
554 
555 	RF_LOCK_MUTEX(rf_rad_lock);
556 	if (raidPtr->waitShutdown) {
557 		/*
558 	         * Actually, we're shutting the array down. Free the desc
559 	         * and return NULL.
560 	         */
561 
562 		RF_UNLOCK_MUTEX(rf_rad_lock);
563 		pool_put(&rf_pools.rad, desc);
564 		return (NULL);
565 	}
566 	raidPtr->nAccOutstanding++;
567 
568 	RF_UNLOCK_MUTEX(rf_rad_lock);
569 
570 	desc->raidPtr = (void *) raidPtr;
571 	desc->type = type;
572 	desc->raidAddress = raidAddress;
573 	desc->numBlocks = numBlocks;
574 	desc->bufPtr = bufPtr;
575 	desc->bp = bp;
576 	desc->flags = flags;
577 	desc->states = states;
578 	desc->state = 0;
579 	desc->dagList = NULL;
580 
581 	desc->status = 0;
582 	desc->numRetries = 0;
583 #if RF_ACC_TRACE > 0
584 	memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t));
585 #endif
586 	desc->callbackFunc = NULL;
587 	desc->callbackArg = NULL;
588 	desc->next = NULL;
589 	desc->iobufs = NULL;
590 	desc->stripebufs = NULL;
591 
592 	return (desc);
593 }
594 
595 void
596 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t *desc)
597 {
598 	RF_Raid_t *raidPtr = desc->raidPtr;
599 	RF_DagList_t *dagList, *temp;
600 	RF_VoidPointerListElem_t *tmp;
601 
602 	RF_ASSERT(desc);
603 
604 	/* Cleanup the dagList(s) */
605 	dagList = desc->dagList;
606 	while(dagList != NULL) {
607 		temp = dagList;
608 		dagList = dagList->next;
609 		rf_FreeDAGList(temp);
610 	}
611 
612 	while (desc->iobufs) {
613 		tmp = desc->iobufs;
614 		desc->iobufs = desc->iobufs->next;
615 		rf_FreeIOBuffer(raidPtr, tmp);
616 	}
617 
618 	while (desc->stripebufs) {
619 		tmp = desc->stripebufs;
620 		desc->stripebufs = desc->stripebufs->next;
621 		rf_FreeStripeBuffer(raidPtr, tmp);
622 	}
623 
624 	pool_put(&rf_pools.rad, desc);
625 	RF_LOCK_MUTEX(rf_rad_lock);
626 	raidPtr->nAccOutstanding--;
627 	if (raidPtr->waitShutdown) {
628 		RF_SIGNAL_COND(raidPtr->outstandingCond);
629 	}
630 	RF_UNLOCK_MUTEX(rf_rad_lock);
631 }
632 /*********************************************************************
633  * Main routine for performing an access.
634  * Accesses are retried until a DAG can not be selected.  This occurs
635  * when either the DAG library is incomplete or there are too many
636  * failures in a parity group.
637  *
638  * type should be read or write async_flag should be RF_TRUE or
639  * RF_FALSE bp_in is a buf pointer.  void *to facilitate ignoring it
640  * outside the kernel
641  ********************************************************************/
642 int
643 rf_DoAccess(RF_Raid_t * raidPtr, RF_IoType_t type, int async_flag,
644 	    RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
645 	    void *bufPtr, struct buf *bp, RF_RaidAccessFlags_t flags)
646 {
647 	RF_RaidAccessDesc_t *desc;
648 	void *lbufPtr = bufPtr;
649 
650 	raidAddress += rf_raidSectorOffset;
651 
652 #if RF_ACCESS_DEBUG
653 	if (rf_accessDebug) {
654 
655 		printf("logBytes is: %d %d %d\n", raidPtr->raidid,
656 		    raidPtr->logBytesPerSector,
657 		    (int) rf_RaidAddressToByte(raidPtr, numBlocks));
658 		printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid,
659 		    (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress,
660 		    (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress),
661 		    (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1),
662 		    (int) numBlocks,
663 		    (int) rf_RaidAddressToByte(raidPtr, numBlocks),
664 		    (long) bufPtr);
665 	}
666 #endif
667 
668 	desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress,
669 	    numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states);
670 
671 	if (desc == NULL) {
672 		return (ENOMEM);
673 	}
674 #if RF_ACC_TRACE > 0
675 	RF_ETIMER_START(desc->tracerec.tot_timer);
676 #endif
677 	desc->async_flag = async_flag;
678 
679 	rf_ContinueRaidAccess(desc);
680 
681 	return (0);
682 }
683 #if 0
684 /* force the array into reconfigured mode without doing reconstruction */
685 int
686 rf_SetReconfiguredMode(RF_Raid_t *raidPtr, int col)
687 {
688 	if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
689 		printf("Can't set reconfigured mode in dedicated-spare array\n");
690 		RF_PANIC();
691 	}
692 	RF_LOCK_MUTEX(raidPtr->mutex);
693 	raidPtr->numFailures++;
694 	raidPtr->Disks[col].status = rf_ds_dist_spared;
695 	raidPtr->status = rf_rs_reconfigured;
696 	rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
697 	/* install spare table only if declustering + distributed sparing
698 	 * architecture. */
699 	if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED)
700 		rf_InstallSpareTable(raidPtr, col);
701 	RF_UNLOCK_MUTEX(raidPtr->mutex);
702 	return (0);
703 }
704 #endif
705 
706 int
707 rf_FailDisk(RF_Raid_t *raidPtr, int fcol, int initRecon)
708 {
709 
710 	/* need to suspend IO's here -- if there are DAGs in flight
711 	   and we pull the rug out from under ci_vp, Bad Things
712 	   can happen.  */
713 
714 	rf_SuspendNewRequestsAndWait(raidPtr);
715 
716 	RF_LOCK_MUTEX(raidPtr->mutex);
717 	if (raidPtr->Disks[fcol].status != rf_ds_failed) {
718 		/* must be failing something that is valid, or else it's
719 		   already marked as failed (in which case we don't
720 		   want to mark it failed again!) */
721 		raidPtr->numFailures++;
722 		raidPtr->Disks[fcol].status = rf_ds_failed;
723 		raidPtr->status = rf_rs_degraded;
724 	}
725 	RF_UNLOCK_MUTEX(raidPtr->mutex);
726 
727 	rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
728 
729 	/* Close the component, so that it's not "locked" if someone
730 	   else want's to use it! */
731 
732 	rf_close_component(raidPtr, raidPtr->raid_cinfo[fcol].ci_vp,
733 			   raidPtr->Disks[fcol].auto_configured);
734 
735 	RF_LOCK_MUTEX(raidPtr->mutex);
736 	raidPtr->raid_cinfo[fcol].ci_vp = NULL;
737 
738 	/* Need to mark the component as not being auto_configured
739 	   (in case it was previously). */
740 
741 	raidPtr->Disks[fcol].auto_configured = 0;
742 	RF_UNLOCK_MUTEX(raidPtr->mutex);
743 	/* now we can allow IO to continue -- we'll be suspending it
744 	   again in rf_ReconstructFailedDisk() if we have to.. */
745 
746 	rf_ResumeNewRequests(raidPtr);
747 
748 	if (initRecon)
749 		rf_ReconstructFailedDisk(raidPtr, fcol);
750 	return (0);
751 }
752 /* releases a thread that is waiting for the array to become quiesced.
753  * access_suspend_mutex should be locked upon calling this
754  */
755 void
756 rf_SignalQuiescenceLock(RF_Raid_t *raidPtr)
757 {
758 #if RF_DEBUG_QUIESCE
759 	if (rf_quiesceDebug) {
760 		printf("raid%d: Signalling quiescence lock\n",
761 		       raidPtr->raidid);
762 	}
763 #endif
764 	raidPtr->access_suspend_release = 1;
765 
766 	if (raidPtr->waiting_for_quiescence) {
767 		SIGNAL_QUIESCENT_COND(raidPtr);
768 	}
769 }
770 /* suspends all new requests to the array.  No effect on accesses that are in flight.  */
771 int
772 rf_SuspendNewRequestsAndWait(RF_Raid_t *raidPtr)
773 {
774 #if RF_DEBUG_QUIESCE
775 	if (rf_quiesceDebug)
776 		printf("raid%d: Suspending new reqs\n", raidPtr->raidid);
777 #endif
778 	RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
779 	raidPtr->accesses_suspended++;
780 	raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1;
781 
782 	if (raidPtr->waiting_for_quiescence) {
783 		raidPtr->access_suspend_release = 0;
784 		while (!raidPtr->access_suspend_release) {
785 #if RF_DEBUG_QUIESCE
786 			printf("raid%d: Suspending: Waiting for Quiescence\n",
787 			       raidPtr->raidid);
788 #endif
789 			WAIT_FOR_QUIESCENCE(raidPtr);
790 			raidPtr->waiting_for_quiescence = 0;
791 		}
792 	}
793 #if RF_DEBUG_QUIESCE
794 	printf("raid%d: Quiescence reached..\n", raidPtr->raidid);
795 #endif
796 
797 	RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
798 	return (raidPtr->waiting_for_quiescence);
799 }
800 /* wake up everyone waiting for quiescence to be released */
801 void
802 rf_ResumeNewRequests(RF_Raid_t *raidPtr)
803 {
804 	RF_CallbackDesc_t *t, *cb;
805 
806 #if RF_DEBUG_QUIESCE
807 	if (rf_quiesceDebug)
808 		printf("raid%d: Resuming new requests\n", raidPtr->raidid);
809 #endif
810 
811 	RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
812 	raidPtr->accesses_suspended--;
813 	if (raidPtr->accesses_suspended == 0)
814 		cb = raidPtr->quiesce_wait_list;
815 	else
816 		cb = NULL;
817 	raidPtr->quiesce_wait_list = NULL;
818 	RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
819 
820 	while (cb) {
821 		t = cb;
822 		cb = cb->next;
823 		(t->callbackFunc) (t->callbackArg);
824 		rf_FreeCallbackDesc(t);
825 	}
826 }
827 /*****************************************************************************************
828  *
829  * debug routines
830  *
831  ****************************************************************************************/
832 
833 static void
834 set_debug_option(char *name, long val)
835 {
836 	RF_DebugName_t *p;
837 
838 	for (p = rf_debugNames; p->name; p++) {
839 		if (!strcmp(p->name, name)) {
840 			*(p->ptr) = val;
841 			printf("[Set debug variable %s to %ld]\n", name, val);
842 			return;
843 		}
844 	}
845 	RF_ERRORMSG1("Unknown debug string \"%s\"\n", name);
846 }
847 
848 
849 /* would like to use sscanf here, but apparently not available in kernel */
850 /*ARGSUSED*/
851 static void
852 rf_ConfigureDebug(RF_Config_t *cfgPtr)
853 {
854 	char   *val_p, *name_p, *white_p;
855 	long    val;
856 	int     i;
857 
858 	rf_ResetDebugOptions();
859 	for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) {
860 		name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]);
861 		white_p = rf_find_white(name_p);	/* skip to start of 2nd
862 							 * word */
863 		val_p = rf_find_non_white(white_p);
864 		if (*val_p == '0' && *(val_p + 1) == 'x')
865 			val = rf_htoi(val_p + 2);
866 		else
867 			val = rf_atoi(val_p);
868 		*white_p = '\0';
869 		set_debug_option(name_p, val);
870 	}
871 }
872 
873 void
874 rf_print_panic_message(int line, const char *file)
875 {
876 	snprintf(rf_panicbuf, sizeof(rf_panicbuf),
877 	    "raidframe error at line %d file %s", line, file);
878 }
879 
880 #ifdef RAID_DIAGNOSTIC
881 void
882 rf_print_assert_panic_message(int line,	const char *file, const char *condition)
883 {
884 	snprintf(rf_panicbuf, sizeof(rf_panicbuf),
885 		"raidframe error at line %d file %s (failed asserting %s)\n",
886 		line, file, condition);
887 }
888 #endif
889 
890 void
891 rf_print_unable_to_init_mutex(const char *file, int line, int rc)
892 {
893 	RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n",
894 		     file, line, rc);
895 }
896 
897 void
898 rf_print_unable_to_add_shutdown(const char *file, int line, int rc)
899 {
900 	RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n",
901 		     file, line, rc);
902 }
903