xref: /netbsd-src/sys/dev/raidframe/rf_revent.c (revision d20841bb642898112fe68f0ad3f7b26dddf56f07)
1 /*	$NetBSD: rf_revent.c,v 1.14 2003/12/30 21:59:03 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author:
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 /*
29  * revent.c -- reconstruction event handling code
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.14 2003/12/30 21:59:03 oster Exp $");
34 
35 #include <sys/errno.h>
36 
37 #include "rf_raid.h"
38 #include "rf_revent.h"
39 #include "rf_etimer.h"
40 #include "rf_general.h"
41 #include "rf_desc.h"
42 #include "rf_shutdown.h"
43 
44 static struct pool rf_revent_pool;
45 #define RF_MAX_FREE_REVENT 128
46 #define RF_REVENT_INC        8
47 #define RF_REVENT_INITIAL    8
48 
49 
50 
51 #include <sys/proc.h>
52 #include <sys/kernel.h>
53 
54 #define DO_WAIT(_rc)  \
55 	ltsleep(&(_rc)->eventQueue, PRIBIO,  "raidframe eventq", \
56 		0, &((_rc)->eq_mutex))
57 
58 #define DO_SIGNAL(_rc)     wakeup(&(_rc)->eventQueue)
59 
60 
61 static void rf_ShutdownReconEvent(void *);
62 
63 static RF_ReconEvent_t *
64 GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type);
65 
66 static void rf_ShutdownReconEvent(void *ignored)
67 {
68 	pool_destroy(&rf_revent_pool);
69 }
70 
71 int
72 rf_ConfigureReconEvent(RF_ShutdownList_t **listp)
73 {
74 	int     rc;
75 
76 	pool_init(&rf_revent_pool, sizeof(RF_ReconEvent_t),
77 		  0, 0, 0, "rf_revent_pl", NULL);
78 	pool_sethiwat(&rf_revent_pool, RF_MAX_FREE_REVENT);
79 	pool_prime(&rf_revent_pool, RF_REVENT_INITIAL);
80 
81 	rc = rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
82 	if (rc) {
83 		rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc);
84 		rf_ShutdownReconEvent(NULL);
85 		return (rc);
86 	}
87 
88 	return (0);
89 }
90 
91 /* returns the next reconstruction event, blocking the calling thread
92  * until one becomes available.  will now return null if it is blocked
93  * or will return an event if it is not */
94 
95 RF_ReconEvent_t *
96 rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc,
97 		     void (*continueFunc)(void *), void *continueArg)
98 {
99 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
100 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
101 	RF_ReconEvent_t *event;
102 
103 	RF_LOCK_MUTEX(rctrl->eq_mutex);
104 	/* q null and count==0 must be equivalent conditions */
105 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
106 
107 	rctrl->continueFunc = continueFunc;
108 	rctrl->continueArg = continueArg;
109 
110 
111 	/* mpsleep timeout value: secs = timo_val/hz.  'ticks' here is
112 	   defined as cycle-counter ticks, not softclock ticks */
113 
114 #define MAX_RECON_EXEC_USECS (100 * 1000)  /* 100 ms */
115 #define RECON_DELAY_MS 25
116 #define RECON_TIMO     ((RECON_DELAY_MS * hz) / 1000)
117 
118 	/* we are not pre-emptible in the kernel, but we don't want to run
119 	 * forever.  If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
120 	 * ticks of the cycle counter, delay for RECON_DELAY before
121 	 * continuing. this may murder us with context switches, so we may
122 	 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
123 	if (reconDesc->reconExecTimerRunning) {
124 		int     status;
125 
126 		RF_ETIMER_STOP(reconDesc->recon_exec_timer);
127 		RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
128 		reconDesc->reconExecTicks +=
129 			RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
130 		if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
131 			reconDesc->maxReconExecTicks =
132 				reconDesc->reconExecTicks;
133 		if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
134 			/* we've been running too long.  delay for
135 			 * RECON_DELAY_MS */
136 #if RF_RECON_STATS > 0
137 			reconDesc->numReconExecDelays++;
138 #endif				/* RF_RECON_STATS > 0 */
139 
140 			status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
141 					 "recon delay", RECON_TIMO,
142 					 &rctrl->eq_mutex);
143 			RF_ASSERT(status == EWOULDBLOCK);
144 			reconDesc->reconExecTicks = 0;
145 		}
146 	}
147 	while (!rctrl->eventQueue) {
148 #if RF_RECON_STATS > 0
149 		reconDesc->numReconEventWaits++;
150 #endif				/* RF_RECON_STATS > 0 */
151 		DO_WAIT(rctrl);
152 		reconDesc->reconExecTicks = 0;	/* we've just waited */
153 	}
154 
155 	reconDesc->reconExecTimerRunning = 1;
156 	if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
157 		/* it moved!!  reset the timer. */
158 		RF_ETIMER_START(reconDesc->recon_exec_timer);
159 	}
160 	event = rctrl->eventQueue;
161 	rctrl->eventQueue = event->next;
162 	event->next = NULL;
163 	rctrl->eq_count--;
164 
165 	/* q null and count==0 must be equivalent conditions */
166 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
167 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
168 	return (event);
169 }
170 /* enqueues a reconstruction event on the indicated queue */
171 void
172 rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg,
173 		   RF_Revent_t type)
174 {
175 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
176 	RF_ReconEvent_t *event = GetReconEventDesc(col, arg, type);
177 
178 	if (type == RF_REVENT_BUFCLEAR) {
179 		RF_ASSERT(col != rctrl->fcol);
180 	}
181 	RF_ASSERT(col >= 0 && col <= raidPtr->numCol);
182 	RF_LOCK_MUTEX(rctrl->eq_mutex);
183 	/* q null and count==0 must be equivalent conditions */
184 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
185 	event->next = rctrl->eventQueue;
186 	rctrl->eventQueue = event;
187 	rctrl->eq_count++;
188 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
189 
190 	DO_SIGNAL(rctrl);
191 }
192 /* allocates and initializes a recon event descriptor */
193 static RF_ReconEvent_t *
194 GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type)
195 {
196 	RF_ReconEvent_t *t;
197 
198 	t = pool_get(&rf_revent_pool, PR_WAITOK);
199 	if (t == NULL)
200 		return (NULL);
201 	t->col = col;
202 	t->arg = arg;
203 	t->type = type;
204 	t->next = NULL;
205 	return (t);
206 }
207 
208 void
209 rf_FreeReconEventDesc(RF_ReconEvent_t *event)
210 {
211 	pool_put(&rf_revent_pool, event);
212 }
213