xref: /netbsd-src/sys/kern/kern_softint.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: kern_softint.c,v 1.64 2020/03/27 00:13:52 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Generic software interrupt framework.
34  *
35  * Overview
36  *
37  *	The soft interrupt framework provides a mechanism to schedule a
38  *	low priority callback that runs with thread context.  It allows
39  *	for dynamic registration of software interrupts, and for fair
40  *	queueing and prioritization of those interrupts.  The callbacks
41  *	can be scheduled to run from nearly any point in the kernel: by
42  *	code running with thread context, by code running from a
43  *	hardware interrupt handler, and at any interrupt priority
44  *	level.
45  *
46  * Priority levels
47  *
48  *	Since soft interrupt dispatch can be tied to the underlying
49  *	architecture's interrupt dispatch code, it can be limited
50  *	both by the capabilities of the hardware and the capabilities
51  *	of the interrupt dispatch code itself.  The number of priority
52  *	levels is restricted to four.  In order of priority (lowest to
53  *	highest) the levels are: clock, bio, net, serial.
54  *
55  *	The names are symbolic and in isolation do not have any direct
56  *	connection with a particular kind of device activity: they are
57  *	only meant as a guide.
58  *
59  *	The four priority levels map directly to scheduler priority
60  *	levels, and where the architecture implements 'fast' software
61  *	interrupts, they also map onto interrupt priorities.  The
62  *	interrupt priorities are intended to be hidden from machine
63  *	independent code, which should use thread-safe mechanisms to
64  *	synchronize with software interrupts (for example: mutexes).
65  *
66  * Capabilities
67  *
68  *	Software interrupts run with limited machine context.  In
69  *	particular, they do not posess any address space context.  They
70  *	should not try to operate on user space addresses, or to use
71  *	virtual memory facilities other than those noted as interrupt
72  *	safe.
73  *
74  *	Unlike hardware interrupts, software interrupts do have thread
75  *	context.  They may block on synchronization objects, sleep, and
76  *	resume execution at a later time.
77  *
78  *	Since software interrupts are a limited resource and run with
79  *	higher priority than most other LWPs in the system, all
80  *	block-and-resume activity by a software interrupt must be kept
81  *	short to allow futher processing at that level to continue.  By
82  *	extension, code running with process context must take care to
83  *	ensure that any lock that may be taken from a software interrupt
84  *	can not be held for more than a short period of time.
85  *
86  *	The kernel does not allow software interrupts to use facilities
87  *	or perform actions that may block for a significant amount of
88  *	time.  This means that it's not valid for a software interrupt
89  *	to sleep on condition variables	or wait for resources to become
90  *	available (for example,	memory).
91  *
92  * Per-CPU operation
93  *
94  *	If a soft interrupt is triggered on a CPU, it can only be
95  *	dispatched on the same CPU.  Each LWP dedicated to handling a
96  *	soft interrupt is bound to its home CPU, so if the LWP blocks
97  *	and needs to run again, it can only run there.  Nearly all data
98  *	structures used to manage software interrupts are per-CPU.
99  *
100  *	The per-CPU requirement is intended to reduce "ping-pong" of
101  *	cache lines between CPUs: lines occupied by data structures
102  *	used to manage the soft interrupts, and lines occupied by data
103  *	items being passed down to the soft interrupt.  As a positive
104  *	side effect, this also means that the soft interrupt dispatch
105  *	code does not need to to use spinlocks to synchronize.
106  *
107  * Generic implementation
108  *
109  *	A generic, low performance implementation is provided that
110  *	works across all architectures, with no machine-dependent
111  *	modifications needed.  This implementation uses the scheduler,
112  *	and so has a number of restrictions:
113  *
114  *	1) The software interrupts are not currently preemptive, so
115  *	must wait for the currently executing LWP to yield the CPU.
116  *	This can introduce latency.
117  *
118  *	2) An expensive context switch is required for a software
119  *	interrupt to be handled.
120  *
121  * 'Fast' software interrupts
122  *
123  *	If an architectures defines __HAVE_FAST_SOFTINTS, it implements
124  *	the fast mechanism.  Threads running either in the kernel or in
125  *	userspace will be interrupted, but will not be preempted.  When
126  *	the soft interrupt completes execution, the interrupted LWP
127  *	is resumed.  Interrupt dispatch code must provide the minimum
128  *	level of context necessary for the soft interrupt to block and
129  *	be resumed at a later time.  The machine-dependent dispatch
130  *	path looks something like the following:
131  *
132  *	softintr()
133  *	{
134  *		go to IPL_HIGH if necessary for switch;
135  *		save any necessary registers in a format that can be
136  *		    restored by cpu_switchto if the softint blocks;
137  *		arrange for cpu_switchto() to restore into the
138  *		    trampoline function;
139  *		identify LWP to handle this interrupt;
140  *		switch to the LWP's stack;
141  *		switch register stacks, if necessary;
142  *		assign new value of curlwp;
143  *		call MI softint_dispatch, passing old curlwp and IPL
144  *		    to execute interrupt at;
145  *		switch back to old stack;
146  *		switch back to old register stack, if necessary;
147  *		restore curlwp;
148  *		return to interrupted LWP;
149  *	}
150  *
151  *	If the soft interrupt blocks, a trampoline function is returned
152  *	to in the context of the interrupted LWP, as arranged for by
153  *	softint():
154  *
155  *	softint_ret()
156  *	{
157  *		unlock soft interrupt LWP;
158  *		resume interrupt processing, likely returning to
159  *		    interrupted LWP or dispatching another, different
160  *		    interrupt;
161  *	}
162  *
163  *	Once the soft interrupt has fired (and even if it has blocked),
164  *	no further soft interrupts at that level will be triggered by
165  *	MI code until the soft interrupt handler has ceased execution.
166  *	If a soft interrupt handler blocks and is resumed, it resumes
167  *	execution as a normal LWP (kthread) and gains VM context.  Only
168  *	when it has completed and is ready to fire again will it
169  *	interrupt other threads.
170  */
171 
172 #include <sys/cdefs.h>
173 __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.64 2020/03/27 00:13:52 ad Exp $");
174 
175 #include <sys/param.h>
176 #include <sys/proc.h>
177 #include <sys/intr.h>
178 #include <sys/ipi.h>
179 #include <sys/lock.h>
180 #include <sys/mutex.h>
181 #include <sys/kernel.h>
182 #include <sys/kthread.h>
183 #include <sys/evcnt.h>
184 #include <sys/cpu.h>
185 #include <sys/xcall.h>
186 
187 #include <net/netisr.h>
188 
189 #include <uvm/uvm_extern.h>
190 
191 /* This could overlap with signal info in struct lwp. */
192 typedef struct softint {
193 	SIMPLEQ_HEAD(, softhand) si_q;
194 	struct lwp		*si_lwp;
195 	struct cpu_info		*si_cpu;
196 	uintptr_t		si_machdep;
197 	struct evcnt		si_evcnt;
198 	struct evcnt		si_evcnt_block;
199 	volatile int		si_active;
200 	char			si_name[8];
201 	char			si_name_block[8+6];
202 } softint_t;
203 
204 typedef struct softhand {
205 	SIMPLEQ_ENTRY(softhand)	sh_q;
206 	void			(*sh_func)(void *);
207 	void			*sh_arg;
208 	softint_t		*sh_isr;
209 	u_int			sh_flags;
210 	u_int			sh_ipi_id;
211 } softhand_t;
212 
213 typedef struct softcpu {
214 	struct cpu_info		*sc_cpu;
215 	softint_t		sc_int[SOFTINT_COUNT];
216 	softhand_t		sc_hand[1];
217 } softcpu_t;
218 
219 static void	softint_thread(void *);
220 
221 u_int		softint_bytes = 32768;
222 u_int		softint_timing;
223 static u_int	softint_max;
224 static kmutex_t	softint_lock;
225 static void	*softint_netisrs[NETISR_MAX];
226 
227 /*
228  * softint_init_isr:
229  *
230  *	Initialize a single interrupt level for a single CPU.
231  */
232 static void
233 softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
234 {
235 	struct cpu_info *ci;
236 	softint_t *si;
237 	int error;
238 
239 	si = &sc->sc_int[level];
240 	ci = sc->sc_cpu;
241 	si->si_cpu = ci;
242 
243 	SIMPLEQ_INIT(&si->si_q);
244 
245 	error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
246 	    KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
247 	    "soft%s/%u", desc, ci->ci_index);
248 	if (error != 0)
249 		panic("softint_init_isr: error %d", error);
250 
251 	snprintf(si->si_name, sizeof(si->si_name), "%s/%u", desc,
252 	    ci->ci_index);
253 	evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_MISC, NULL,
254 	   "softint", si->si_name);
255 	snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%u",
256 	    desc, ci->ci_index);
257 	evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_MISC, NULL,
258 	   "softint", si->si_name_block);
259 
260 	si->si_lwp->l_private = si;
261 	softint_init_md(si->si_lwp, level, &si->si_machdep);
262 }
263 
264 /*
265  * softint_init:
266  *
267  *	Initialize per-CPU data structures.  Called from mi_cpu_attach().
268  */
269 void
270 softint_init(struct cpu_info *ci)
271 {
272 	static struct cpu_info *first;
273 	softcpu_t *sc, *scfirst;
274 	softhand_t *sh, *shmax;
275 
276 	if (first == NULL) {
277 		/* Boot CPU. */
278 		first = ci;
279 		mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
280 		softint_bytes = round_page(softint_bytes);
281 		softint_max = (softint_bytes - sizeof(softcpu_t)) /
282 		    sizeof(softhand_t);
283 	}
284 
285 	/* Use uvm_km(9) for persistent, page-aligned allocation. */
286 	sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
287 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
288 	if (sc == NULL)
289 		panic("softint_init_cpu: cannot allocate memory");
290 
291 	ci->ci_data.cpu_softcpu = sc;
292 	ci->ci_data.cpu_softints = 0;
293 	sc->sc_cpu = ci;
294 
295 	softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
296 	softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
297 	softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
298 	softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
299 
300 	if (first != ci) {
301 		mutex_enter(&softint_lock);
302 		scfirst = first->ci_data.cpu_softcpu;
303 		sh = sc->sc_hand;
304 		memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
305 		/* Update pointers for this CPU. */
306 		for (shmax = sh + softint_max; sh < shmax; sh++) {
307 			if (sh->sh_func == NULL)
308 				continue;
309 			sh->sh_isr =
310 			    &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
311 		}
312 		mutex_exit(&softint_lock);
313 	} else {
314 		/*
315 		 * Establish handlers for legacy net interrupts.
316 		 * XXX Needs to go away.
317 		 */
318 #define DONETISR(n, f)							\
319     softint_netisrs[(n)] = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,\
320         (void (*)(void *))(f), NULL)
321 #include <net/netisr_dispatch.h>
322 	}
323 }
324 
325 /*
326  * softint_establish:
327  *
328  *	Register a software interrupt handler.
329  */
330 void *
331 softint_establish(u_int flags, void (*func)(void *), void *arg)
332 {
333 	CPU_INFO_ITERATOR cii;
334 	struct cpu_info *ci;
335 	softcpu_t *sc;
336 	softhand_t *sh;
337 	u_int level, index;
338 	u_int ipi_id = 0;
339 	void *sih;
340 
341 	level = (flags & SOFTINT_LVLMASK);
342 	KASSERT(level < SOFTINT_COUNT);
343 	KASSERT((flags & SOFTINT_IMPMASK) == 0);
344 
345 	mutex_enter(&softint_lock);
346 
347 	/* Find a free slot. */
348 	sc = curcpu()->ci_data.cpu_softcpu;
349 	for (index = 1; index < softint_max; index++) {
350 		if (sc->sc_hand[index].sh_func == NULL)
351 			break;
352 	}
353 	if (index == softint_max) {
354 		mutex_exit(&softint_lock);
355 		printf("WARNING: softint_establish: table full, "
356 		    "increase softint_bytes\n");
357 		return NULL;
358 	}
359 	sih = (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc);
360 
361 	if (flags & SOFTINT_RCPU) {
362 		if ((ipi_id = ipi_register(softint_schedule, sih)) == 0) {
363 			mutex_exit(&softint_lock);
364 			return NULL;
365 		}
366 	}
367 
368 	/* Set up the handler on each CPU. */
369 	if (ncpu < 2) {
370 		/* XXX hack for machines with no CPU_INFO_FOREACH() early on */
371 		sc = curcpu()->ci_data.cpu_softcpu;
372 		sh = &sc->sc_hand[index];
373 		sh->sh_isr = &sc->sc_int[level];
374 		sh->sh_func = func;
375 		sh->sh_arg = arg;
376 		sh->sh_flags = flags;
377 		sh->sh_ipi_id = ipi_id;
378 	} else for (CPU_INFO_FOREACH(cii, ci)) {
379 		sc = ci->ci_data.cpu_softcpu;
380 		sh = &sc->sc_hand[index];
381 		sh->sh_isr = &sc->sc_int[level];
382 		sh->sh_func = func;
383 		sh->sh_arg = arg;
384 		sh->sh_flags = flags;
385 		sh->sh_ipi_id = ipi_id;
386 	}
387 	mutex_exit(&softint_lock);
388 
389 	return sih;
390 }
391 
392 /*
393  * softint_disestablish:
394  *
395  *	Unregister a software interrupt handler.  The soft interrupt could
396  *	still be active at this point, but the caller commits not to try
397  *	and trigger it again once this call is made.  The caller must not
398  *	hold any locks that could be taken from soft interrupt context,
399  *	because we will wait for the softint to complete if it's still
400  *	running.
401  */
402 void
403 softint_disestablish(void *arg)
404 {
405 	CPU_INFO_ITERATOR cii;
406 	struct cpu_info *ci;
407 	softcpu_t *sc;
408 	softhand_t *sh;
409 	uintptr_t offset;
410 	u_int flags;
411 
412 	offset = (uintptr_t)arg;
413 	KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u",
414 	    offset, softint_bytes);
415 
416 	/*
417 	 * Unregister an IPI handler if there is any.  Note: there is
418 	 * no need to disable preemption here - ID is stable.
419 	 */
420 	sc = curcpu()->ci_data.cpu_softcpu;
421 	sh = (softhand_t *)((uint8_t *)sc + offset);
422 	if (sh->sh_ipi_id) {
423 		ipi_unregister(sh->sh_ipi_id);
424 	}
425 
426 	/*
427 	 * Run a cross call so we see up to date values of sh_flags from
428 	 * all CPUs.  Once softint_disestablish() is called, the caller
429 	 * commits to not trigger the interrupt and set SOFTINT_ACTIVE on
430 	 * it again.  So, we are only looking for handler records with
431 	 * SOFTINT_ACTIVE already set.
432 	 */
433 	if (__predict_true(mp_online)) {
434 		xc_barrier(0);
435 	}
436 
437 	for (;;) {
438 		/* Collect flag values from each CPU. */
439 		flags = 0;
440 		for (CPU_INFO_FOREACH(cii, ci)) {
441 			sc = ci->ci_data.cpu_softcpu;
442 			sh = (softhand_t *)((uint8_t *)sc + offset);
443 			KASSERT(sh->sh_func != NULL);
444 			flags |= sh->sh_flags;
445 		}
446 		/* Inactive on all CPUs? */
447 		if ((flags & SOFTINT_ACTIVE) == 0) {
448 			break;
449 		}
450 		/* Oops, still active.  Wait for it to clear. */
451 		(void)kpause("softdis", false, 1, NULL);
452 	}
453 
454 	/* Clear the handler on each CPU. */
455 	mutex_enter(&softint_lock);
456 	for (CPU_INFO_FOREACH(cii, ci)) {
457 		sc = ci->ci_data.cpu_softcpu;
458 		sh = (softhand_t *)((uint8_t *)sc + offset);
459 		KASSERT(sh->sh_func != NULL);
460 		sh->sh_func = NULL;
461 	}
462 	mutex_exit(&softint_lock);
463 }
464 
465 /*
466  * softint_schedule:
467  *
468  *	Trigger a software interrupt.  Must be called from a hardware
469  *	interrupt handler, or with preemption disabled (since we are
470  *	using the value of curcpu()).
471  */
472 void
473 softint_schedule(void *arg)
474 {
475 	softhand_t *sh;
476 	softint_t *si;
477 	uintptr_t offset;
478 	int s;
479 
480 	/*
481 	 * If this assert fires, rather than disabling preemption explicitly
482 	 * to make it stop, consider that you are probably using a softint
483 	 * when you don't need to.
484 	 */
485 	KASSERT(kpreempt_disabled());
486 
487 	/* Find the handler record for this CPU. */
488 	offset = (uintptr_t)arg;
489 	KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u",
490 	    offset, softint_bytes);
491 	sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
492 
493 	/* If it's already pending there's nothing to do. */
494 	if ((sh->sh_flags & SOFTINT_PENDING) != 0) {
495 		return;
496 	}
497 
498 	/*
499 	 * Enqueue the handler into the LWP's pending list.
500 	 * If the LWP is completely idle, then make it run.
501 	 */
502 	s = splhigh();
503 	if ((sh->sh_flags & SOFTINT_PENDING) == 0) {
504 		si = sh->sh_isr;
505 		sh->sh_flags |= SOFTINT_PENDING;
506 		SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q);
507 		if (si->si_active == 0) {
508 			si->si_active = 1;
509 			softint_trigger(si->si_machdep);
510 		}
511 	}
512 	splx(s);
513 }
514 
515 /*
516  * softint_schedule_cpu:
517  *
518  *	Trigger a software interrupt on a target CPU.  This invokes
519  *	softint_schedule() for the local CPU or send an IPI to invoke
520  *	this routine on the remote CPU.  Preemption must be disabled.
521  */
522 void
523 softint_schedule_cpu(void *arg, struct cpu_info *ci)
524 {
525 	KASSERT(kpreempt_disabled());
526 
527 	if (curcpu() != ci) {
528 		const softcpu_t *sc = ci->ci_data.cpu_softcpu;
529 		const uintptr_t offset = (uintptr_t)arg;
530 		const softhand_t *sh;
531 
532 		sh = (const softhand_t *)((const uint8_t *)sc + offset);
533 		KASSERT((sh->sh_flags & SOFTINT_RCPU) != 0);
534 		ipi_trigger(sh->sh_ipi_id, ci);
535 		return;
536 	}
537 
538 	/* Just a local CPU. */
539 	softint_schedule(arg);
540 }
541 
542 /*
543  * softint_execute:
544  *
545  *	Invoke handlers for the specified soft interrupt.
546  *	Must be entered at splhigh.  Will drop the priority
547  *	to the level specified, but returns back at splhigh.
548  */
549 static inline void
550 softint_execute(softint_t *si, lwp_t *l, int s)
551 {
552 	softhand_t *sh;
553 
554 	KASSERT(si->si_lwp == curlwp);
555 	KASSERT(si->si_cpu == curcpu());
556 	KASSERT(si->si_lwp->l_wchan == NULL);
557 	KASSERT(si->si_active);
558 
559 	/*
560 	 * Note: due to priority inheritance we may have interrupted a
561 	 * higher priority LWP.  Since the soft interrupt must be quick
562 	 * and is non-preemptable, we don't bother yielding.
563 	 */
564 
565 	while (!SIMPLEQ_EMPTY(&si->si_q)) {
566 		/*
567 		 * Pick the longest waiting handler to run.  We block
568 		 * interrupts but do not lock in order to do this, as
569 		 * we are protecting against the local CPU only.
570 		 */
571 		sh = SIMPLEQ_FIRST(&si->si_q);
572 		SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q);
573 		KASSERT((sh->sh_flags & SOFTINT_PENDING) != 0);
574 		KASSERT((sh->sh_flags & SOFTINT_ACTIVE) == 0);
575 		sh->sh_flags ^= (SOFTINT_PENDING | SOFTINT_ACTIVE);
576 		splx(s);
577 
578 		/* Run the handler. */
579 		if (__predict_true((sh->sh_flags & SOFTINT_MPSAFE) != 0)) {
580 			(*sh->sh_func)(sh->sh_arg);
581 		} else {
582 			KERNEL_LOCK(1, l);
583 			(*sh->sh_func)(sh->sh_arg);
584 			KERNEL_UNLOCK_ONE(l);
585 		}
586 
587 		/* Diagnostic: check that spin-locks have not leaked. */
588 		KASSERTMSG(curcpu()->ci_mtx_count == 0,
589 		    "%s: ci_mtx_count (%d) != 0, sh_func %p\n",
590 		    __func__, curcpu()->ci_mtx_count, sh->sh_func);
591 		/* Diagnostic: check that psrefs have not leaked. */
592 		KASSERTMSG(l->l_psrefs == 0, "%s: l_psrefs=%d, sh_func=%p\n",
593 		    __func__, l->l_psrefs, sh->sh_func);
594 
595 		(void)splhigh();
596 		KASSERT((sh->sh_flags & SOFTINT_ACTIVE) != 0);
597 		sh->sh_flags ^= SOFTINT_ACTIVE;
598 	}
599 
600 	PSREF_DEBUG_BARRIER();
601 
602 	CPU_COUNT(CPU_COUNT_NSOFT, 1);
603 
604 	KASSERT(si->si_cpu == curcpu());
605 	KASSERT(si->si_lwp->l_wchan == NULL);
606 	KASSERT(si->si_active);
607 	si->si_evcnt.ev_count++;
608 	si->si_active = 0;
609 }
610 
611 /*
612  * softint_block:
613  *
614  *	Update statistics when the soft interrupt blocks.
615  */
616 void
617 softint_block(lwp_t *l)
618 {
619 	softint_t *si = l->l_private;
620 
621 	KASSERT((l->l_pflag & LP_INTR) != 0);
622 	si->si_evcnt_block.ev_count++;
623 }
624 
625 /*
626  * schednetisr:
627  *
628  *	Trigger a legacy network interrupt.  XXX Needs to go away.
629  */
630 void
631 schednetisr(int isr)
632 {
633 
634 	softint_schedule(softint_netisrs[isr]);
635 }
636 
637 #ifndef __HAVE_FAST_SOFTINTS
638 
639 #ifdef __HAVE_PREEMPTION
640 #error __HAVE_PREEMPTION requires __HAVE_FAST_SOFTINTS
641 #endif
642 
643 /*
644  * softint_init_md:
645  *
646  *	Slow path: perform machine-dependent initialization.
647  */
648 void
649 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
650 {
651 	struct proc *p;
652 	softint_t *si;
653 
654 	*machdep = (1 << level);
655 	si = l->l_private;
656 	p = l->l_proc;
657 
658 	mutex_enter(p->p_lock);
659 	lwp_lock(l);
660 	/* Cheat and make the KASSERT in softint_thread() happy. */
661 	si->si_active = 1;
662 	setrunnable(l);
663 	/* LWP now unlocked */
664 	mutex_exit(p->p_lock);
665 }
666 
667 /*
668  * softint_trigger:
669  *
670  *	Slow path: cause a soft interrupt handler to begin executing.
671  *	Called at IPL_HIGH.
672  */
673 void
674 softint_trigger(uintptr_t machdep)
675 {
676 	struct cpu_info *ci;
677 	lwp_t *l;
678 
679 	ci = curcpu();
680 	ci->ci_data.cpu_softints |= machdep;
681 	l = ci->ci_onproc;
682 
683 	/*
684 	 * Arrange for mi_switch() to be called.  If called from interrupt
685 	 * mode, we don't know if curlwp is executing in kernel or user, so
686 	 * post an AST and have it take a trip through userret().  If not in
687 	 * interrupt mode, curlwp is running in kernel and will notice the
688 	 * resched soon enough; avoid the AST.
689 	 */
690 	if (l == ci->ci_data.cpu_idlelwp) {
691 		atomic_or_uint(&ci->ci_want_resched,
692 		    RESCHED_IDLE | RESCHED_UPREEMPT);
693 	} else {
694 		atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
695 		if (cpu_intr_p()) {
696 			cpu_signotify(l);
697 		}
698 	}
699 }
700 
701 /*
702  * softint_thread:
703  *
704  *	Slow path: MI software interrupt dispatch.
705  */
706 void
707 softint_thread(void *cookie)
708 {
709 	softint_t *si;
710 	lwp_t *l;
711 	int s;
712 
713 	l = curlwp;
714 	si = l->l_private;
715 
716 	for (;;) {
717 		/* Clear pending status and run it. */
718 		s = splhigh();
719 		l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
720 		softint_execute(si, l, s);
721 		splx(s);
722 
723 		/* Interrupts allowed to run again before switching. */
724 		lwp_lock(l);
725 		l->l_stat = LSIDL;
726 		spc_lock(l->l_cpu);
727 		mi_switch(l);
728 	}
729 }
730 
731 /*
732  * softint_picklwp:
733  *
734  *	Slow path: called from mi_switch() to pick the highest priority
735  *	soft interrupt LWP that needs to run.
736  */
737 lwp_t *
738 softint_picklwp(void)
739 {
740 	struct cpu_info *ci;
741 	u_int mask;
742 	softint_t *si;
743 	lwp_t *l;
744 
745 	ci = curcpu();
746 	si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
747 	mask = ci->ci_data.cpu_softints;
748 
749 	if ((mask & (1 << SOFTINT_SERIAL)) != 0) {
750 		l = si[SOFTINT_SERIAL].si_lwp;
751 	} else if ((mask & (1 << SOFTINT_NET)) != 0) {
752 		l = si[SOFTINT_NET].si_lwp;
753 	} else if ((mask & (1 << SOFTINT_BIO)) != 0) {
754 		l = si[SOFTINT_BIO].si_lwp;
755 	} else if ((mask & (1 << SOFTINT_CLOCK)) != 0) {
756 		l = si[SOFTINT_CLOCK].si_lwp;
757 	} else {
758 		panic("softint_picklwp");
759 	}
760 
761 	return l;
762 }
763 
764 #else	/*  !__HAVE_FAST_SOFTINTS */
765 
766 /*
767  * softint_thread:
768  *
769  *	Fast path: the LWP is switched to without restoring any state,
770  *	so we should not arrive here - there is a direct handoff between
771  *	the interrupt stub and softint_dispatch().
772  */
773 void
774 softint_thread(void *cookie)
775 {
776 
777 	panic("softint_thread");
778 }
779 
780 /*
781  * softint_dispatch:
782  *
783  *	Fast path: entry point from machine-dependent code.
784  */
785 void
786 softint_dispatch(lwp_t *pinned, int s)
787 {
788 	struct bintime now;
789 	softint_t *si;
790 	u_int timing;
791 	lwp_t *l;
792 
793 #ifdef DIAGNOSTIC
794 	if ((pinned->l_pflag & LP_RUNNING) == 0 || curlwp->l_stat != LSIDL) {
795 		struct lwp *onproc = curcpu()->ci_onproc;
796 		int s2 = splhigh();
797 		printf("curcpu=%d, spl=%d curspl=%d\n"
798 			"onproc=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
799 			"curlwp=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
800 			"pinned=%p => l_stat=%d l_flag=%08x l_cpu=%d\n",
801 			cpu_index(curcpu()), s, s2, onproc, onproc->l_stat,
802 			onproc->l_flag, cpu_index(onproc->l_cpu), curlwp,
803 			curlwp->l_stat, curlwp->l_flag,
804 			cpu_index(curlwp->l_cpu), pinned, pinned->l_stat,
805 			pinned->l_flag, cpu_index(pinned->l_cpu));
806 		splx(s2);
807 		panic("softint screwup");
808 	}
809 #endif
810 
811 	l = curlwp;
812 	si = l->l_private;
813 
814 	/*
815 	 * Note the interrupted LWP, and mark the current LWP as running
816 	 * before proceeding.  Although this must as a rule be done with
817 	 * the LWP locked, at this point no external agents will want to
818 	 * modify the interrupt LWP's state.
819 	 */
820 	timing = softint_timing;
821 	l->l_switchto = pinned;
822 	l->l_stat = LSONPROC;
823 
824 	/*
825 	 * Dispatch the interrupt.  If softints are being timed, charge
826 	 * for it.
827 	 */
828 	if (timing) {
829 		binuptime(&l->l_stime);
830 		membar_producer();	/* for calcru */
831 		l->l_pflag |= LP_TIMEINTR;
832 	}
833 	l->l_pflag |= LP_RUNNING;
834 	softint_execute(si, l, s);
835 	if (timing) {
836 		binuptime(&now);
837 		updatertime(l, &now);
838 		l->l_pflag &= ~LP_TIMEINTR;
839 	}
840 
841 	/*
842 	 * If we blocked while handling the interrupt, the pinned LWP is
843 	 * gone, so find another LWP to run.  It will select a new LWP to
844 	 * run.  softint_dispatch() won't be reentered until the priority
845 	 * is finally dropped to IPL_NONE on entry to the LWP chosen by
846 	 * mi_switch().
847 	 */
848 	l->l_stat = LSIDL;
849 	if (l->l_switchto == NULL) {
850 		lwp_lock(l);
851 		spc_lock(l->l_cpu);
852 		mi_switch(l);
853 		/* NOTREACHED */
854 	}
855 	l->l_switchto = NULL;
856 	l->l_pflag &= ~LP_RUNNING;
857 }
858 
859 #endif	/* !__HAVE_FAST_SOFTINTS */
860