1*e71b1b1eSchristos /* $NetBSD: intr.c,v 1.56 2020/11/01 20:58:38 christos Exp $ */
24a780c9aSad
3f88be143Spooka /*
4547f1a31Spooka * Copyright (c) 2008-2010, 2015 Antti Kantee. All Rights Reserved.
54a780c9aSad *
64a780c9aSad * Redistribution and use in source and binary forms, with or without
74a780c9aSad * modification, are permitted provided that the following conditions
84a780c9aSad * are met:
94a780c9aSad * 1. Redistributions of source code must retain the above copyright
104a780c9aSad * notice, this list of conditions and the following disclaimer.
114a780c9aSad * 2. Redistributions in binary form must reproduce the above copyright
124a780c9aSad * notice, this list of conditions and the following disclaimer in the
134a780c9aSad * documentation and/or other materials provided with the distribution.
144a780c9aSad *
15f88be143Spooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16f88be143Spooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17f88be143Spooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18f88be143Spooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19f88be143Spooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20f88be143Spooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21f88be143Spooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22f88be143Spooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23f88be143Spooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24f88be143Spooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25f88be143Spooka * SUCH DAMAGE.
264a780c9aSad */
274a780c9aSad
28a768afd6Spooka #include <sys/cdefs.h>
29*e71b1b1eSchristos __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.56 2020/11/01 20:58:38 christos Exp $");
30a768afd6Spooka
314a780c9aSad #include <sys/param.h>
32d806a53fSmartin #include <sys/atomic.h>
334a780c9aSad #include <sys/cpu.h>
34d3b3912fSpooka #include <sys/kernel.h>
353e5a8e03Spooka #include <sys/kmem.h>
36f88be143Spooka #include <sys/kthread.h>
37b1b78627Spooka #include <sys/malloc.h>
38f88be143Spooka #include <sys/intr.h>
39d3b3912fSpooka #include <sys/timetc.h>
404a780c9aSad
416bb51422Spooka #include <rump-sys/kern.h>
42f88be143Spooka
436bb51422Spooka #include <rump/rumpuser.h>
44f88be143Spooka
45f88be143Spooka /*
46f88be143Spooka * Interrupt simulator. It executes hardclock() and softintrs.
47f88be143Spooka */
48f88be143Spooka
4964d32972Spooka #define SI_MPSAFE 0x01
5057245467Spooka #define SI_KILLME 0x02
51644009f9Spooka
5257245467Spooka struct softint_percpu;
53f88be143Spooka struct softint {
54f88be143Spooka void (*si_func)(void *);
55f88be143Spooka void *si_arg;
5664d32972Spooka int si_flags;
57644009f9Spooka int si_level;
58f88be143Spooka
5957245467Spooka struct softint_percpu *si_entry; /* [0,ncpu-1] */
6057245467Spooka };
6157245467Spooka
6257245467Spooka struct softint_percpu {
6357245467Spooka struct softint *sip_parent;
6457245467Spooka bool sip_onlist;
65e82a50e4Spooka bool sip_onlist_cpu;
6657245467Spooka
67547f1a31Spooka TAILQ_ENTRY(softint_percpu) sip_entries; /* scheduled */
68e82a50e4Spooka TAILQ_ENTRY(softint_percpu) sip_entries_cpu; /* to be scheduled */
694a780c9aSad };
704a780c9aSad
7162e84772Spooka struct softint_lev {
72644009f9Spooka struct rumpuser_cv *si_cv;
73547f1a31Spooka TAILQ_HEAD(, softint_percpu) si_pending;
7462e84772Spooka };
75df8e80beSpooka
76e82a50e4Spooka static TAILQ_HEAD(, softint_percpu) sicpupending \
77e82a50e4Spooka = TAILQ_HEAD_INITIALIZER(sicpupending);
78e82a50e4Spooka static struct rumpuser_mtx *sicpumtx;
79e82a50e4Spooka static struct rumpuser_cv *sicpucv;
80e82a50e4Spooka
8109aa8689Spooka kcondvar_t lbolt; /* Oh Kath Ra */
8209aa8689Spooka
838fff110dSpooka static int ncpu_final;
84d3b3912fSpooka
noclock(void)8587792ce2Spooka void noclock(void); void noclock(void) {return;}
8687792ce2Spooka __strong_alias(sched_schedclock,noclock);
8787792ce2Spooka __strong_alias(cpu_initclocks,noclock);
8887792ce2Spooka __strong_alias(addupc_intr,noclock);
8987792ce2Spooka __strong_alias(sched_tick,noclock);
9087792ce2Spooka __strong_alias(setstatclockrate,noclock);
919188159bSpooka
92f88be143Spooka /*
93df8e80beSpooka * clock "interrupt"
94f88be143Spooka */
95df8e80beSpooka static void
doclock(void * noarg)96df8e80beSpooka doclock(void *noarg)
97df8e80beSpooka {
98a53a2a53Spooka struct timespec thetick, curclock;
992c5012faSpooka struct clockframe *clkframe;
1005606f9bfSpooka int64_t sec;
1015606f9bfSpooka long nsec;
102d3b3912fSpooka int error;
10387792ce2Spooka struct cpu_info *ci = curcpu();
1049188159bSpooka
105152588e3Spooka error = rumpuser_clock_gettime(RUMPUSER_CLOCK_ABSMONO, &sec, &nsec);
106a53a2a53Spooka if (error)
107a53a2a53Spooka panic("clock: cannot get monotonic time");
108a53a2a53Spooka
109a53a2a53Spooka curclock.tv_sec = sec;
110a53a2a53Spooka curclock.tv_nsec = nsec;
111d3b3912fSpooka thetick.tv_sec = 0;
112d3b3912fSpooka thetick.tv_nsec = 1000000000/hz;
1139188159bSpooka
1142c5012faSpooka /* generate dummy clockframe for hardclock to consume */
1152c5012faSpooka clkframe = rump_cpu_makeclockframe();
11687792ce2Spooka
117df8e80beSpooka for (;;) {
11887792ce2Spooka int lbolt_ticks = 0;
11987792ce2Spooka
1202c5012faSpooka hardclock(clkframe);
12187792ce2Spooka if (CPU_IS_PRIMARY(ci)) {
12287792ce2Spooka if (++lbolt_ticks >= hz) {
12387792ce2Spooka lbolt_ticks = 0;
12487792ce2Spooka cv_broadcast(&lbolt);
12587792ce2Spooka }
12687792ce2Spooka }
127f88be143Spooka
128152588e3Spooka error = rumpuser_clock_sleep(RUMPUSER_CLOCK_ABSMONO,
129152588e3Spooka curclock.tv_sec, curclock.tv_nsec);
130015ffe5bSpooka if (error) {
131015ffe5bSpooka panic("rumpuser_clock_sleep failed with error %d",
132015ffe5bSpooka error);
133015ffe5bSpooka }
134a53a2a53Spooka timespecadd(&curclock, &thetick, &curclock);
135df8e80beSpooka }
136df8e80beSpooka }
137df8e80beSpooka
138df8e80beSpooka /*
139b1b78627Spooka * Soft interrupt execution thread. This thread is pinned to the
140b1b78627Spooka * same CPU that scheduled the interrupt, so we don't need to do
141b1b78627Spooka * lock against si_lvl.
142df8e80beSpooka */
143df8e80beSpooka static void
sithread(void * arg)144df8e80beSpooka sithread(void *arg)
145df8e80beSpooka {
14657245467Spooka struct softint_percpu *sip;
147df8e80beSpooka struct softint *si;
148df8e80beSpooka void (*func)(void *) = NULL;
149df8e80beSpooka void *funarg;
150df8e80beSpooka bool mpsafe;
151644009f9Spooka int mylevel = (uintptr_t)arg;
15262e84772Spooka struct softint_lev *si_lvlp, *si_lvl;
15362e84772Spooka struct cpu_data *cd = &curcpu()->ci_data;
154be7784b1Spooka
15562e84772Spooka si_lvlp = cd->cpu_softcpu;
15662e84772Spooka si_lvl = &si_lvlp[mylevel];
15762e84772Spooka
158df8e80beSpooka for (;;) {
159547f1a31Spooka if (!TAILQ_EMPTY(&si_lvl->si_pending)) {
160547f1a31Spooka sip = TAILQ_FIRST(&si_lvl->si_pending);
16157245467Spooka si = sip->sip_parent;
16257245467Spooka
163f88be143Spooka func = si->si_func;
164f88be143Spooka funarg = si->si_arg;
16564d32972Spooka mpsafe = si->si_flags & SI_MPSAFE;
166f88be143Spooka
16757245467Spooka sip->sip_onlist = false;
168547f1a31Spooka TAILQ_REMOVE(&si_lvl->si_pending, sip, sip_entries);
169644009f9Spooka if (si->si_flags & SI_KILLME) {
17064d32972Spooka softint_disestablish(si);
171df8e80beSpooka continue;
1724a780c9aSad }
173644009f9Spooka } else {
174b1b78627Spooka rump_schedlock_cv_wait(si_lvl->si_cv);
175644009f9Spooka continue;
176644009f9Spooka }
177f88be143Spooka
178f88be143Spooka if (!mpsafe)
179f88be143Spooka KERNEL_LOCK(1, curlwp);
180f88be143Spooka func(funarg);
181f88be143Spooka if (!mpsafe)
182f88be143Spooka KERNEL_UNLOCK_ONE(curlwp);
183f88be143Spooka }
184644009f9Spooka
185644009f9Spooka panic("sithread unreachable");
1864a780c9aSad }
1874a780c9aSad
188e82a50e4Spooka /*
189e82a50e4Spooka * Helper for softint_schedule_cpu()
190e82a50e4Spooka */
191e82a50e4Spooka static void
sithread_cpu_bouncer(void * arg)192e82a50e4Spooka sithread_cpu_bouncer(void *arg)
193e82a50e4Spooka {
194e82a50e4Spooka struct lwp *me;
195e82a50e4Spooka
196e82a50e4Spooka me = curlwp;
197e82a50e4Spooka me->l_pflag |= LP_BOUND;
198e82a50e4Spooka
199e82a50e4Spooka rump_unschedule();
200e82a50e4Spooka for (;;) {
201e82a50e4Spooka struct softint_percpu *sip;
202e82a50e4Spooka struct softint *si;
203e82a50e4Spooka struct cpu_info *ci;
204e82a50e4Spooka unsigned int cidx;
205e82a50e4Spooka
206e82a50e4Spooka rumpuser_mutex_enter_nowrap(sicpumtx);
207e82a50e4Spooka while (TAILQ_EMPTY(&sicpupending)) {
208e82a50e4Spooka rumpuser_cv_wait_nowrap(sicpucv, sicpumtx);
209e82a50e4Spooka }
210e82a50e4Spooka sip = TAILQ_FIRST(&sicpupending);
211e82a50e4Spooka TAILQ_REMOVE(&sicpupending, sip, sip_entries_cpu);
212e82a50e4Spooka sip->sip_onlist_cpu = false;
213e82a50e4Spooka rumpuser_mutex_exit(sicpumtx);
214e82a50e4Spooka
215e82a50e4Spooka /*
216e82a50e4Spooka * ok, now figure out which cpu we need the softint to
217e82a50e4Spooka * be handled on
218e82a50e4Spooka */
219e82a50e4Spooka si = sip->sip_parent;
220e82a50e4Spooka cidx = sip - si->si_entry;
221e82a50e4Spooka ci = cpu_lookup(cidx);
222e82a50e4Spooka me->l_target_cpu = ci;
223e82a50e4Spooka
224e82a50e4Spooka /* schedule ourselves there, and then schedule the softint */
225e82a50e4Spooka rump_schedule();
226e82a50e4Spooka KASSERT(curcpu() == ci);
227e82a50e4Spooka softint_schedule(si);
228e82a50e4Spooka rump_unschedule();
229e82a50e4Spooka }
230e82a50e4Spooka panic("sithread_cpu_bouncer unreasonable");
231e82a50e4Spooka }
232e82a50e4Spooka
233d64fcb8dSpooka static kmutex_t sithr_emtx;
234d64fcb8dSpooka static unsigned int sithr_est;
235d64fcb8dSpooka static int sithr_canest;
236d64fcb8dSpooka
237d64fcb8dSpooka /*
238d64fcb8dSpooka * Create softint handler threads when the softint for each respective
239d64fcb8dSpooka * level is established for the first time. Most rump kernels don't
240d64fcb8dSpooka * need at least half of the softint levels, so on-demand saves bootstrap
241d64fcb8dSpooka * time and memory resources. Note, though, that this routine may be
242d64fcb8dSpooka * called before it's possible to call kthread_create(). Creation of
243d64fcb8dSpooka * those softints (SOFTINT_CLOCK, as of writing this) will be deferred
244d64fcb8dSpooka * to until softint_init() is called for the main CPU.
245d64fcb8dSpooka */
246d64fcb8dSpooka static void
sithread_establish(int level)247d64fcb8dSpooka sithread_establish(int level)
248d64fcb8dSpooka {
249d64fcb8dSpooka int docreate, rv;
250d64fcb8dSpooka int lvlbit = 1<<level;
251d64fcb8dSpooka int i;
252d64fcb8dSpooka
253d64fcb8dSpooka KASSERT((level & ~SOFTINT_LVLMASK) == 0);
254d64fcb8dSpooka if (__predict_true(sithr_est & lvlbit))
255d64fcb8dSpooka return;
256d64fcb8dSpooka
257d64fcb8dSpooka mutex_enter(&sithr_emtx);
258d64fcb8dSpooka docreate = (sithr_est & lvlbit) == 0 && sithr_canest;
259d64fcb8dSpooka sithr_est |= lvlbit;
260d64fcb8dSpooka mutex_exit(&sithr_emtx);
261d64fcb8dSpooka
262d64fcb8dSpooka if (docreate) {
263d64fcb8dSpooka for (i = 0; i < ncpu_final; i++) {
264d64fcb8dSpooka if ((rv = kthread_create(PRI_NONE,
265d64fcb8dSpooka KTHREAD_MPSAFE | KTHREAD_INTR,
266d64fcb8dSpooka cpu_lookup(i), sithread, (void *)(uintptr_t)level,
267d64fcb8dSpooka NULL, "rsi%d/%d", i, level)) != 0)
268d64fcb8dSpooka panic("softint thread create failed: %d", rv);
269d64fcb8dSpooka }
270d64fcb8dSpooka }
271d64fcb8dSpooka }
272d64fcb8dSpooka
2734a780c9aSad void
rump_intr_init(int numcpu)2748fff110dSpooka rump_intr_init(int numcpu)
2754a780c9aSad {
2764a780c9aSad
27709aa8689Spooka cv_init(&lbolt, "oh kath ra");
278d64fcb8dSpooka mutex_init(&sithr_emtx, MUTEX_DEFAULT, IPL_NONE);
2798fff110dSpooka ncpu_final = numcpu;
28062e84772Spooka }
2819188159bSpooka
28262e84772Spooka void
softint_init(struct cpu_info * ci)28362e84772Spooka softint_init(struct cpu_info *ci)
28462e84772Spooka {
28562e84772Spooka struct cpu_data *cd = &ci->ci_data;
28662e84772Spooka struct softint_lev *slev;
28762e84772Spooka int rv, i;
28862e84772Spooka
28962e84772Spooka if (!rump_threads)
29062e84772Spooka return;
29162e84772Spooka
2928a90d685Spooka slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP);
2938a90d685Spooka for (i = 0; i < SOFTINT_COUNT; i++) {
2948a90d685Spooka rumpuser_cv_init(&slev[i].si_cv);
295547f1a31Spooka TAILQ_INIT(&slev[i].si_pending);
2968a90d685Spooka }
2978a90d685Spooka cd->cpu_softcpu = slev;
2988a90d685Spooka
299d64fcb8dSpooka /* overloaded global init ... */
3008a90d685Spooka /* XXX: should be done the last time we are called */
301651a6f78Spooka if (ci->ci_index == 0) {
302d64fcb8dSpooka int sithr_swap;
303d64fcb8dSpooka
30487792ce2Spooka /* pretend that we have our own for these */
30587792ce2Spooka stathz = 1;
30687792ce2Spooka schedhz = 1;
30787792ce2Spooka profhz = 1;
30887792ce2Spooka
30987792ce2Spooka initclocks();
310d64fcb8dSpooka
311d64fcb8dSpooka /* create deferred softint threads */
312d64fcb8dSpooka mutex_enter(&sithr_emtx);
313d64fcb8dSpooka sithr_swap = sithr_est;
314d64fcb8dSpooka sithr_est = 0;
315d64fcb8dSpooka sithr_canest = 1;
316d64fcb8dSpooka mutex_exit(&sithr_emtx);
317d64fcb8dSpooka for (i = 0; i < SOFTINT_COUNT; i++) {
318d64fcb8dSpooka if (sithr_swap & (1<<i))
319d64fcb8dSpooka sithread_establish(i);
320d64fcb8dSpooka }
321651a6f78Spooka }
322651a6f78Spooka
323d64fcb8dSpooka /* well, not really a "soft" interrupt ... */
324d64fcb8dSpooka if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
325d64fcb8dSpooka ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index)) != 0)
326df8e80beSpooka panic("clock thread creation failed: %d", rv);
327e82a50e4Spooka
328e82a50e4Spooka /* not one either, but at least a softint helper */
329e82a50e4Spooka rumpuser_mutex_init(&sicpumtx, RUMPUSER_MTX_SPIN);
330e82a50e4Spooka rumpuser_cv_init(&sicpucv);
331e82a50e4Spooka if ((rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE,
332e82a50e4Spooka NULL, sithread_cpu_bouncer, NULL, NULL, "sipbnc")) != 0)
333e82a50e4Spooka panic("softint cpu bouncer creation failed: %d", rv);
33462e84772Spooka }
3359188159bSpooka
336f88be143Spooka void *
softint_establish(u_int flags,void (* func)(void *),void * arg)337f88be143Spooka softint_establish(u_int flags, void (*func)(void *), void *arg)
338f88be143Spooka {
339f88be143Spooka struct softint *si;
34057245467Spooka struct softint_percpu *sip;
341d64fcb8dSpooka int level = flags & SOFTINT_LVLMASK;
34257245467Spooka int i;
343f88be143Spooka
344b1b78627Spooka si = malloc(sizeof(*si), M_TEMP, M_WAITOK);
345f88be143Spooka si->si_func = func;
346f88be143Spooka si->si_arg = arg;
34764d32972Spooka si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
348d64fcb8dSpooka si->si_level = level;
349644009f9Spooka KASSERT(si->si_level < SOFTINT_COUNT);
3508fff110dSpooka si->si_entry = malloc(sizeof(*si->si_entry) * ncpu_final,
35157245467Spooka M_TEMP, M_WAITOK | M_ZERO);
3528fff110dSpooka for (i = 0; i < ncpu_final; i++) {
35357245467Spooka sip = &si->si_entry[i];
35457245467Spooka sip->sip_parent = si;
35557245467Spooka }
356d64fcb8dSpooka sithread_establish(level);
357f88be143Spooka
358f88be143Spooka return si;
3594a780c9aSad }
3604a780c9aSad
361e82a50e4Spooka static struct softint_percpu *
sitosip(struct softint * si,struct cpu_info * ci)362e82a50e4Spooka sitosip(struct softint *si, struct cpu_info *ci)
363e82a50e4Spooka {
364e82a50e4Spooka
365e82a50e4Spooka return &si->si_entry[ci->ci_index];
366e82a50e4Spooka }
367e82a50e4Spooka
368d0016412Spooka /*
369d0016412Spooka * Soft interrupts bring two choices. If we are running with thread
370d0016412Spooka * support enabled, defer execution, otherwise execute in place.
371d0016412Spooka */
372d0016412Spooka
3734a780c9aSad void
softint_schedule(void * arg)3744a780c9aSad softint_schedule(void *arg)
3754a780c9aSad {
376f88be143Spooka struct softint *si = arg;
37763714deaSpooka struct cpu_info *ci = curcpu();
378e82a50e4Spooka struct softint_percpu *sip = sitosip(si, ci);
37963714deaSpooka struct cpu_data *cd = &ci->ci_data;
38062e84772Spooka struct softint_lev *si_lvl = cd->cpu_softcpu;
3814a780c9aSad
382f88be143Spooka if (!rump_threads) {
383f88be143Spooka si->si_func(si->si_arg);
384f88be143Spooka } else {
38557245467Spooka if (!sip->sip_onlist) {
386547f1a31Spooka TAILQ_INSERT_TAIL(&si_lvl[si->si_level].si_pending,
38757245467Spooka sip, sip_entries);
38857245467Spooka sip->sip_onlist = true;
389f88be143Spooka }
390f88be143Spooka }
3914a780c9aSad }
3924a780c9aSad
39363efbb43Srmind /*
394e82a50e4Spooka * Like softint_schedule(), except schedule softint to be handled on
395e82a50e4Spooka * the core designated by ci_tgt instead of the core the call is made on.
396e82a50e4Spooka *
397e82a50e4Spooka * Unlike softint_schedule(), the performance is not important
398e82a50e4Spooka * (unless ci_tgt == curcpu): high-performance rump kernel I/O stacks
399e82a50e4Spooka * should arrange data to already be on the right core at the driver
400e82a50e4Spooka * layer.
40163efbb43Srmind */
402e82a50e4Spooka void
softint_schedule_cpu(void * arg,struct cpu_info * ci_tgt)403e82a50e4Spooka softint_schedule_cpu(void *arg, struct cpu_info *ci_tgt)
404e82a50e4Spooka {
405e82a50e4Spooka struct softint *si = arg;
406e82a50e4Spooka struct cpu_info *ci_cur = curcpu();
407e82a50e4Spooka struct softint_percpu *sip;
408e82a50e4Spooka
409e82a50e4Spooka KASSERT(rump_threads);
410e82a50e4Spooka
411e82a50e4Spooka /* preferred case (which can be optimized some day) */
412e82a50e4Spooka if (ci_cur == ci_tgt) {
413e82a50e4Spooka softint_schedule(si);
414e82a50e4Spooka return;
415e82a50e4Spooka }
416e82a50e4Spooka
417e82a50e4Spooka /*
418e82a50e4Spooka * no? then it's softint turtles all the way down
419e82a50e4Spooka */
420e82a50e4Spooka
421e82a50e4Spooka sip = sitosip(si, ci_tgt);
422e82a50e4Spooka rumpuser_mutex_enter_nowrap(sicpumtx);
423e82a50e4Spooka if (sip->sip_onlist_cpu) {
424e82a50e4Spooka rumpuser_mutex_exit(sicpumtx);
425e82a50e4Spooka return;
426e82a50e4Spooka }
427e82a50e4Spooka TAILQ_INSERT_TAIL(&sicpupending, sip, sip_entries_cpu);
428e82a50e4Spooka sip->sip_onlist_cpu = true;
429e82a50e4Spooka rumpuser_cv_signal(sicpucv);
430e82a50e4Spooka rumpuser_mutex_exit(sicpumtx);
43106dd4db0Srmind }
43206dd4db0Srmind
43357245467Spooka /*
43457245467Spooka * flimsy disestablish: should wait for softints to finish.
43557245467Spooka */
43664d32972Spooka void
softint_disestablish(void * cook)43764d32972Spooka softint_disestablish(void *cook)
43864d32972Spooka {
43964d32972Spooka struct softint *si = cook;
44057245467Spooka int i;
44164d32972Spooka
4428fff110dSpooka for (i = 0; i < ncpu_final; i++) {
44357245467Spooka struct softint_percpu *sip;
44457245467Spooka
44557245467Spooka sip = &si->si_entry[i];
44657245467Spooka if (sip->sip_onlist) {
44764d32972Spooka si->si_flags |= SI_KILLME;
44864d32972Spooka return;
44964d32972Spooka }
45057245467Spooka }
45157245467Spooka free(si->si_entry, M_TEMP);
452b1b78627Spooka free(si, M_TEMP);
45364d32972Spooka }
45464d32972Spooka
455644009f9Spooka void
rump_softint_run(struct cpu_info * ci)456644009f9Spooka rump_softint_run(struct cpu_info *ci)
457644009f9Spooka {
45862e84772Spooka struct cpu_data *cd = &ci->ci_data;
45962e84772Spooka struct softint_lev *si_lvl = cd->cpu_softcpu;
460644009f9Spooka int i;
461644009f9Spooka
46262e84772Spooka if (!rump_threads)
46362e84772Spooka return;
46462e84772Spooka
465644009f9Spooka for (i = 0; i < SOFTINT_COUNT; i++) {
466547f1a31Spooka if (!TAILQ_EMPTY(&si_lvl[i].si_pending))
467*e71b1b1eSchristos rump_schedlock_cv_signal(ci, si_lvl[i].si_cv);
468644009f9Spooka }
469644009f9Spooka }
470644009f9Spooka
4714a780c9aSad bool
cpu_intr_p(void)472d296aa91Schristos cpu_intr_p(void)
4734a780c9aSad {
4744a780c9aSad
4754a780c9aSad return false;
4764a780c9aSad }
477