xref: /netbsd-src/sys/rump/librump/rumpkern/intr.c (revision 10ad5ffa714ce1a679dcc9dd8159648df2d67b5a)
1 /*	$NetBSD: intr.c,v 1.17 2009/04/26 20:44:50 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.17 2009/04/26 20:44:50 pooka Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/cpu.h>
33 #include <sys/kmem.h>
34 #include <sys/kthread.h>
35 #include <sys/intr.h>
36 
37 #include <rump/rumpuser.h>
38 
39 #include "rump_private.h"
40 
41 /*
42  * Interrupt simulator.  It executes hardclock() and softintrs.
43  */
44 
45 time_t time_uptime = 0;
46 
47 struct softint {
48 	void (*si_func)(void *);
49 	void *si_arg;
50 	bool si_onlist;
51 	bool si_mpsafe;
52 
53 	LIST_ENTRY(softint) si_entries;
54 };
55 static LIST_HEAD(, softint) si_pending = LIST_HEAD_INITIALIZER(si_pending);
56 static kmutex_t si_mtx;
57 static kcondvar_t si_cv;
58 
59 #define INTRTHREAD_DEFAULT	2
60 #define INTRTHREAD_MAX		20
61 static int wrkidle, wrktotal;
62 
63 static void sithread(void *);
64 
65 static void
66 makeworker(bool bootstrap)
67 {
68 	int rv;
69 
70 	if (wrktotal > INTRTHREAD_MAX) {
71 		/* XXX: ratecheck */
72 		printf("maximum interrupt threads (%d) reached\n",
73 		    INTRTHREAD_MAX);
74 		return;
75 	}
76 	rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_INTR, NULL,
77 	    sithread, NULL, NULL, "rumpsi");
78 	if (rv) {
79 		if (bootstrap)
80 			panic("intr thread creation failed %d", rv);
81 		else
82 			printf("intr thread creation failed %d\n", rv);
83 	} else {
84 		wrkidle++;
85 		wrktotal++;
86 	}
87 }
88 
89 /* rumpuser structures since we call rumpuser interfaces directly */
90 static struct rumpuser_cv *clockcv;
91 static struct rumpuser_mtx *clockmtx;
92 static struct timespec clockbase, clockup;
93 static unsigned clkgen;
94 
95 void
96 rump_getuptime(struct timespec *ts)
97 {
98 	int startgen, i = 0;
99 
100 	do {
101 		startgen = clkgen;
102 		if (__predict_false(i++ > 10)) {
103 			yield();
104 			i = 0;
105 		}
106 		*ts = clockup;
107 	} while (startgen != clkgen || clkgen % 2 != 0);
108 }
109 
110 void
111 rump_gettime(struct timespec *ts)
112 {
113 	struct timespec ts_up;
114 
115 	rump_getuptime(&ts_up);
116 	timespecadd(&clockbase, &ts_up, ts);
117 }
118 
119 /*
120  * clock "interrupt"
121  */
122 static void
123 doclock(void *noarg)
124 {
125 	struct timespec tick, curtime;
126 	uint64_t sec, nsec;
127 	int ticks = 0, error;
128 	extern int hz;
129 
130 	rumpuser_gettime(&sec, &nsec, &error);
131 	clockbase.tv_sec = sec;
132 	clockbase.tv_nsec = nsec;
133 	curtime = clockbase;
134 	tick.tv_sec = 0;
135 	tick.tv_nsec = 1000000000/hz;
136 
137 	rumpuser_mutex_enter(clockmtx);
138 	rumpuser_cv_signal(clockcv);
139 
140 	for (;;) {
141 		callout_hardclock();
142 
143 		if (++ticks == hz) {
144 			time_uptime++;
145 			ticks = 0;
146 		}
147 
148 		/* wait until the next tick. XXX: what if the clock changes? */
149 		while (rumpuser_cv_timedwait(clockcv, clockmtx,
150 		    &curtime) != EWOULDBLOCK)
151 			continue;
152 
153 		clkgen++;
154 		timespecadd(&clockup, &tick, &clockup);
155 		clkgen++;
156 		timespecadd(&clockup, &clockbase, &curtime);
157 	}
158 }
159 
160 /*
161  * run a scheduled soft interrupt
162  */
163 static void
164 sithread(void *arg)
165 {
166 	struct softint *si;
167 	void (*func)(void *) = NULL;
168 	void *funarg;
169 	bool mpsafe;
170 
171 	mutex_enter(&si_mtx);
172 	for (;;) {
173 		if (!LIST_EMPTY(&si_pending)) {
174 			si = LIST_FIRST(&si_pending);
175 			func = si->si_func;
176 			funarg = si->si_arg;
177 			mpsafe = si->si_mpsafe;
178 
179 			si->si_onlist = false;
180 			LIST_REMOVE(si, si_entries);
181 		} else {
182 			cv_wait(&si_cv, &si_mtx);
183 			continue;
184 		}
185 		wrkidle--;
186 		if (__predict_false(wrkidle == 0))
187 			makeworker(false);
188 		mutex_exit(&si_mtx);
189 
190 		if (!mpsafe)
191 			KERNEL_LOCK(1, curlwp);
192 		func(funarg);
193 		if (!mpsafe)
194 			KERNEL_UNLOCK_ONE(curlwp);
195 
196 		mutex_enter(&si_mtx);
197 		wrkidle++;
198 	}
199 }
200 
201 void
202 softint_init(struct cpu_info *ci)
203 {
204 	int rv;
205 
206 	mutex_init(&si_mtx, MUTEX_DEFAULT, IPL_NONE);
207 	cv_init(&si_cv, "intrw8"); /* cv of temporary w8ness */
208 
209 	rumpuser_cv_init(&clockcv);
210 	rumpuser_mutex_init(&clockmtx);
211 
212 	/* XXX: should have separate "wanttimer" control */
213 	if (rump_threads) {
214 		rumpuser_mutex_enter(clockmtx);
215 		rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, doclock,
216 		    NULL, NULL, "rumpclk");
217 		if (rv)
218 			panic("clock thread creation failed: %d", rv);
219 		mutex_enter(&si_mtx);
220 		while (wrktotal < INTRTHREAD_DEFAULT) {
221 			makeworker(true);
222 		}
223 		mutex_exit(&si_mtx);
224 
225 		/* make sure we have a clocktime before returning */
226 		rumpuser_cv_wait(clockcv, clockmtx);
227 		rumpuser_mutex_exit(clockmtx);
228 	}
229 }
230 
231 /*
232  * Soft interrupts bring two choices.  If we are running with thread
233  * support enabled, defer execution, otherwise execute in place.
234  * See softint_schedule().
235  *
236  * As there is currently no clear concept of when a thread finishes
237  * work (although rump_clear_curlwp() is close), simply execute all
238  * softints in the timer thread.  This is probably not the most
239  * efficient method, but good enough for now.
240  */
241 void *
242 softint_establish(u_int flags, void (*func)(void *), void *arg)
243 {
244 	struct softint *si;
245 
246 	si = kmem_alloc(sizeof(*si), KM_SLEEP);
247 	si->si_func = func;
248 	si->si_arg = arg;
249 	si->si_onlist = false;
250 	si->si_mpsafe = flags & SOFTINT_MPSAFE;
251 
252 	return si;
253 }
254 
255 void
256 softint_schedule(void *arg)
257 {
258 	struct softint *si = arg;
259 
260 	if (!rump_threads) {
261 		si->si_func(si->si_arg);
262 	} else {
263 		mutex_enter(&si_mtx);
264 		if (!si->si_onlist) {
265 			LIST_INSERT_HEAD(&si_pending, si, si_entries);
266 			si->si_onlist = true;
267 		}
268 		cv_signal(&si_cv);
269 		mutex_exit(&si_mtx);
270 	}
271 }
272 
273 bool
274 cpu_intr_p(void)
275 {
276 
277 	return false;
278 }
279