xref: /netbsd-src/sys/dev/raidframe/rf_revent.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /*	$NetBSD: rf_revent.c,v 1.9 2000/09/21 01:45:46 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author:
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 /*
29  * revent.c -- reconstruction event handling code
30  */
31 
32 #include <sys/errno.h>
33 
34 #include "rf_raid.h"
35 #include "rf_revent.h"
36 #include "rf_etimer.h"
37 #include "rf_general.h"
38 #include "rf_freelist.h"
39 #include "rf_desc.h"
40 #include "rf_shutdown.h"
41 
42 static RF_FreeList_t *rf_revent_freelist;
43 #define RF_MAX_FREE_REVENT 128
44 #define RF_REVENT_INC        8
45 #define RF_REVENT_INITIAL    8
46 
47 
48 
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 
52 #define DO_WAIT(_rc)  \
53 	ltsleep(&(_rc)->eventQueue, PRIBIO,  "raidframe eventq", \
54 		0, &((_rc)->eq_mutex))
55 
56 #define DO_SIGNAL(_rc)     wakeup(&(_rc)->eventQueue)
57 
58 
59 static void rf_ShutdownReconEvent(void *);
60 
61 static RF_ReconEvent_t *
62 GetReconEventDesc(RF_RowCol_t row, RF_RowCol_t col,
63     void *arg, RF_Revent_t type);
64 
65 static void rf_ShutdownReconEvent(ignored)
66 	void   *ignored;
67 {
68 	RF_FREELIST_DESTROY(rf_revent_freelist, next, (RF_ReconEvent_t *));
69 }
70 
71 int
72 rf_ConfigureReconEvent(listp)
73 	RF_ShutdownList_t **listp;
74 {
75 	int     rc;
76 
77 	RF_FREELIST_CREATE(rf_revent_freelist, RF_MAX_FREE_REVENT,
78 	    RF_REVENT_INC, sizeof(RF_ReconEvent_t));
79 	if (rf_revent_freelist == NULL)
80 		return (ENOMEM);
81 	rc = rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
82 	if (rc) {
83 		RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", __FILE__,
84 		    __LINE__, rc);
85 		rf_ShutdownReconEvent(NULL);
86 		return (rc);
87 	}
88 	RF_FREELIST_PRIME(rf_revent_freelist, RF_REVENT_INITIAL, next,
89 	    (RF_ReconEvent_t *));
90 	return (0);
91 }
92 
93 /* returns the next reconstruction event, blocking the calling thread
94  * until one becomes available.  will now return null if it is blocked
95  * or will return an event if it is not */
96 
97 RF_ReconEvent_t *
98 rf_GetNextReconEvent(reconDesc, row, continueFunc, continueArg)
99 	RF_RaidReconDesc_t *reconDesc;
100 	RF_RowCol_t row;
101 	void    (*continueFunc) (void *);
102 	void   *continueArg;
103 {
104 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
105 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl[row];
106 	RF_ReconEvent_t *event;
107 
108 	RF_ASSERT(row >= 0 && row <= raidPtr->numRow);
109 	RF_LOCK_MUTEX(rctrl->eq_mutex);
110 	/* q null and count==0 must be equivalent conditions */
111 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
112 
113 	rctrl->continueFunc = continueFunc;
114 	rctrl->continueArg = continueArg;
115 
116 
117 	/* mpsleep timeout value: secs = timo_val/hz.  'ticks' here is
118 	   defined as cycle-counter ticks, not softclock ticks */
119 
120 #define MAX_RECON_EXEC_USECS (100 * 1000)  /* 100 ms */
121 #define RECON_DELAY_MS 25
122 #define RECON_TIMO     ((RECON_DELAY_MS * hz) / 1000)
123 
124 	/* we are not pre-emptible in the kernel, but we don't want to run
125 	 * forever.  If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
126 	 * ticks of the cycle counter, delay for RECON_DELAY before
127 	 * continuing. this may murder us with context switches, so we may
128 	 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
129 	if (reconDesc->reconExecTimerRunning) {
130 		int     status;
131 
132 		RF_ETIMER_STOP(reconDesc->recon_exec_timer);
133 		RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
134 		reconDesc->reconExecTicks +=
135 			RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
136 		if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
137 			reconDesc->maxReconExecTicks =
138 				reconDesc->reconExecTicks;
139 		if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
140 			/* we've been running too long.  delay for
141 			 * RECON_DELAY_MS */
142 #if RF_RECON_STATS > 0
143 			reconDesc->numReconExecDelays++;
144 #endif				/* RF_RECON_STATS > 0 */
145 
146 			status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
147 					 "recon delay", RECON_TIMO,
148 					 &rctrl->eq_mutex);
149 			RF_ASSERT(status == EWOULDBLOCK);
150 			reconDesc->reconExecTicks = 0;
151 		}
152 	}
153 	while (!rctrl->eventQueue) {
154 #if RF_RECON_STATS > 0
155 		reconDesc->numReconEventWaits++;
156 #endif				/* RF_RECON_STATS > 0 */
157 		DO_WAIT(rctrl);
158 		reconDesc->reconExecTicks = 0;	/* we've just waited */
159 	}
160 
161 	reconDesc->reconExecTimerRunning = 1;
162 	if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
163 		/* it moved!!  reset the timer. */
164 		RF_ETIMER_START(reconDesc->recon_exec_timer);
165 	}
166 	event = rctrl->eventQueue;
167 	rctrl->eventQueue = event->next;
168 	event->next = NULL;
169 	rctrl->eq_count--;
170 
171 	/* q null and count==0 must be equivalent conditions */
172 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
173 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
174 	return (event);
175 }
176 /* enqueues a reconstruction event on the indicated queue */
177 void
178 rf_CauseReconEvent(raidPtr, row, col, arg, type)
179 	RF_Raid_t *raidPtr;
180 	RF_RowCol_t row;
181 	RF_RowCol_t col;
182 	void   *arg;
183 	RF_Revent_t type;
184 {
185 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl[row];
186 	RF_ReconEvent_t *event = GetReconEventDesc(row, col, arg, type);
187 
188 	if (type == RF_REVENT_BUFCLEAR) {
189 		RF_ASSERT(col != rctrl->fcol);
190 	}
191 	RF_ASSERT(row >= 0 && row <= raidPtr->numRow && col >= 0 && col <= raidPtr->numCol);
192 	RF_LOCK_MUTEX(rctrl->eq_mutex);
193 	/* q null and count==0 must be equivalent conditions */
194 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
195 	event->next = rctrl->eventQueue;
196 	rctrl->eventQueue = event;
197 	rctrl->eq_count++;
198 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
199 
200 	DO_SIGNAL(rctrl);
201 }
202 /* allocates and initializes a recon event descriptor */
203 static RF_ReconEvent_t *
204 GetReconEventDesc(row, col, arg, type)
205 	RF_RowCol_t row;
206 	RF_RowCol_t col;
207 	void   *arg;
208 	RF_Revent_t type;
209 {
210 	RF_ReconEvent_t *t;
211 
212 	RF_FREELIST_GET(rf_revent_freelist, t, next, (RF_ReconEvent_t *));
213 	if (t == NULL)
214 		return (NULL);
215 	t->col = col;
216 	t->arg = arg;
217 	t->type = type;
218 	return (t);
219 }
220 
221 void
222 rf_FreeReconEventDesc(event)
223 	RF_ReconEvent_t *event;
224 {
225 	RF_FREELIST_FREE(rf_revent_freelist, event, next);
226 }
227