xref: /netbsd-src/sys/dev/raidframe/rf_revent.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: rf_revent.c,v 1.18 2004/03/07 22:15:19 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author:
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 /*
29  * revent.c -- reconstruction event handling code
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.18 2004/03/07 22:15:19 oster Exp $");
34 
35 #include <sys/errno.h>
36 
37 #include "rf_raid.h"
38 #include "rf_revent.h"
39 #include "rf_etimer.h"
40 #include "rf_general.h"
41 #include "rf_desc.h"
42 #include "rf_shutdown.h"
43 
44 #define RF_MAX_FREE_REVENT 128
45 #define RF_MIN_FREE_REVENT  32
46 
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 
50 static void rf_ShutdownReconEvent(void *);
51 
52 static RF_ReconEvent_t *
53 GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type);
54 
55 static void rf_ShutdownReconEvent(void *ignored)
56 {
57 	pool_destroy(&rf_pools.revent);
58 }
59 
60 int
61 rf_ConfigureReconEvent(RF_ShutdownList_t **listp)
62 {
63 
64 	rf_pool_init(&rf_pools.revent, sizeof(RF_ReconEvent_t),
65 		     "rf_revent_pl", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
66 	rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
67 
68 	return (0);
69 }
70 
71 /* returns the next reconstruction event, blocking the calling thread
72  * until one becomes available.  will now return null if it is blocked
73  * or will return an event if it is not */
74 
75 RF_ReconEvent_t *
76 rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc,
77 		     void (*continueFunc)(void *), void *continueArg)
78 {
79 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
80 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
81 	RF_ReconEvent_t *event;
82 
83 	RF_LOCK_MUTEX(rctrl->eq_mutex);
84 	/* q null and count==0 must be equivalent conditions */
85 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
86 
87 	rctrl->continueFunc = continueFunc;
88 	rctrl->continueArg = continueArg;
89 
90 
91 	/* mpsleep timeout value: secs = timo_val/hz.  'ticks' here is
92 	   defined as cycle-counter ticks, not softclock ticks */
93 
94 #define MAX_RECON_EXEC_USECS (100 * 1000)  /* 100 ms */
95 #define RECON_DELAY_MS 25
96 #define RECON_TIMO     ((RECON_DELAY_MS * hz) / 1000)
97 
98 	/* we are not pre-emptible in the kernel, but we don't want to run
99 	 * forever.  If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
100 	 * ticks of the cycle counter, delay for RECON_DELAY before
101 	 * continuing. this may murder us with context switches, so we may
102 	 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
103 	if (reconDesc->reconExecTimerRunning) {
104 		int     status;
105 
106 		RF_ETIMER_STOP(reconDesc->recon_exec_timer);
107 		RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
108 		reconDesc->reconExecTicks +=
109 			RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
110 		if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
111 			reconDesc->maxReconExecTicks =
112 				reconDesc->reconExecTicks;
113 		if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
114 			/* we've been running too long.  delay for
115 			 * RECON_DELAY_MS */
116 #if RF_RECON_STATS > 0
117 			reconDesc->numReconExecDelays++;
118 #endif				/* RF_RECON_STATS > 0 */
119 
120 			status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
121 					 "recon delay", RECON_TIMO,
122 					 &rctrl->eq_mutex);
123 			RF_ASSERT(status == EWOULDBLOCK);
124 			reconDesc->reconExecTicks = 0;
125 		}
126 	}
127 	while (!rctrl->eventQueue) {
128 #if RF_RECON_STATS > 0
129 		reconDesc->numReconEventWaits++;
130 #endif				/* RF_RECON_STATS > 0 */
131 
132 		ltsleep(&(rctrl)->eventQueue, PRIBIO,  "raidframe eventq",
133 			0, &((rctrl)->eq_mutex));
134 
135 		reconDesc->reconExecTicks = 0;	/* we've just waited */
136 	}
137 
138 	reconDesc->reconExecTimerRunning = 1;
139 	if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
140 		/* it moved!!  reset the timer. */
141 		RF_ETIMER_START(reconDesc->recon_exec_timer);
142 	}
143 	event = rctrl->eventQueue;
144 	rctrl->eventQueue = event->next;
145 	event->next = NULL;
146 	rctrl->eq_count--;
147 
148 	/* q null and count==0 must be equivalent conditions */
149 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
150 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
151 	return (event);
152 }
153 /* enqueues a reconstruction event on the indicated queue */
154 void
155 rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg,
156 		   RF_Revent_t type)
157 {
158 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
159 	RF_ReconEvent_t *event = GetReconEventDesc(col, arg, type);
160 
161 	if (type == RF_REVENT_BUFCLEAR) {
162 		RF_ASSERT(col != rctrl->fcol);
163 	}
164 	RF_ASSERT(col >= 0 && col <= raidPtr->numCol);
165 	RF_LOCK_MUTEX(rctrl->eq_mutex);
166 	/* q null and count==0 must be equivalent conditions */
167 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
168 	event->next = rctrl->eventQueue;
169 	rctrl->eventQueue = event;
170 	rctrl->eq_count++;
171 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
172 
173 	wakeup(&(rctrl)->eventQueue);
174 }
175 /* allocates and initializes a recon event descriptor */
176 static RF_ReconEvent_t *
177 GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type)
178 {
179 	RF_ReconEvent_t *t;
180 
181 	t = pool_get(&rf_pools.revent, PR_WAITOK);
182 	t->col = col;
183 	t->arg = arg;
184 	t->type = type;
185 	t->next = NULL;
186 	return (t);
187 }
188 
189 void
190 rf_FreeReconEventDesc(RF_ReconEvent_t *event)
191 {
192 	pool_put(&rf_pools.revent, event);
193 }
194