xref: /netbsd-src/sys/arch/powerpc/pic/intr.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: intr.c,v 1.27 2020/02/20 05:10:01 rin Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Michael Lorenz
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.27 2020/02/20 05:10:01 rin Exp $");
31 
32 #include "opt_interrupt.h"
33 #include "opt_multiprocessor.h"
34 #include "opt_pic.h"
35 
36 #define __INTR_PRIVATE
37 
38 #include <sys/param.h>
39 #include <sys/cpu.h>
40 #include <sys/kernel.h>
41 #include <sys/kmem.h>
42 #include <sys/interrupt.h>
43 
44 #include <powerpc/psl.h>
45 #include <powerpc/pic/picvar.h>
46 
47 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
48 #include <machine/isa_machdep.h>
49 #endif
50 
51 #ifdef MULTIPROCESSOR
52 #include <powerpc/pic/ipivar.h>
53 #endif
54 
55 #ifdef __HAVE_FAST_SOFTINTS
56 #include <powerpc/softint.h>
57 #endif
58 
59 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
60 
61 #define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
62 
63 #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
64 /* eieio is implemented as sync */
65 #define REORDER_PROTECT() __asm volatile("sync")
66 #else
67 #define REORDER_PROTECT() __asm volatile("sync; eieio")
68 #endif
69 
70 struct pic_ops *pics[MAX_PICS];
71 int num_pics = 0;
72 int max_base = 0;
73 uint8_t	virq_map[NIRQ];
74 imask_t virq_mask = HWIRQ_MASK;
75 imask_t	imask[NIPL];
76 int	primary_pic = 0;
77 
78 static int	fakeintr(void *);
79 static int	mapirq(int);
80 static void	intr_calculatemasks(void);
81 static struct pic_ops *find_pic_by_hwirq(int);
82 
83 static struct intr_source intrsources[NVIRQ];
84 
85 void
86 pic_init(void)
87 {
88 	/* everything is in bss, no reason to zero it. */
89 }
90 
91 int
92 pic_add(struct pic_ops *pic)
93 {
94 
95 	if (num_pics >= MAX_PICS)
96 		return -1;
97 
98 	pics[num_pics] = pic;
99 	pic->pic_intrbase = max_base;
100 	max_base += pic->pic_numintrs;
101 	num_pics++;
102 
103 	return pic->pic_intrbase;
104 }
105 
106 void
107 pic_finish_setup(void)
108 {
109 	for (size_t i = 0; i < num_pics; i++) {
110 		struct pic_ops * const pic = pics[i];
111 		if (pic->pic_finish_setup != NULL)
112 			pic->pic_finish_setup(pic);
113 	}
114 }
115 
116 static struct pic_ops *
117 find_pic_by_hwirq(int hwirq)
118 {
119 	for (u_int base = 0; base < num_pics; base++) {
120 		struct pic_ops * const pic = pics[base];
121 		if (pic->pic_intrbase <= hwirq
122 		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
123 			return pic;
124 		}
125 	}
126 	return NULL;
127 }
128 
129 static int
130 fakeintr(void *arg)
131 {
132 
133 	return 0;
134 }
135 
136 /*
137  * Register an interrupt handler.
138  */
139 void *
140 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
141     void *ih_arg)
142 {
143 	return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
144 }
145 
146 void *
147 intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
148     void *ih_arg, const char *xname)
149 {
150 	struct intrhand **p, *q, *ih;
151 	struct pic_ops *pic;
152 	static struct intrhand fakehand;
153 	int maxipl = ipl;
154 
155 	if (maxipl == IPL_NONE)
156 		maxipl = IPL_HIGH;
157 
158 	if (hwirq >= max_base) {
159 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
160 		    max_base - 1);
161 	}
162 
163 	pic = find_pic_by_hwirq(hwirq);
164 	if (pic == NULL) {
165 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
166 	}
167 
168 	const int virq = mapirq(hwirq);
169 
170 	/* no point in sleeping unless someone can free memory. */
171 	ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
172 	if (ih == NULL)
173 		panic("intr_establish: can't allocate handler info");
174 
175 	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
176 		panic("intr_establish: bogus irq (%d) or type (%d)",
177 		    hwirq, type);
178 
179 	struct intr_source * const is = &intrsources[virq];
180 
181 	switch (is->is_type) {
182 	case IST_NONE:
183 		is->is_type = type;
184 		break;
185 	case IST_EDGE_FALLING:
186 	case IST_EDGE_RISING:
187 	case IST_LEVEL_LOW:
188 	case IST_LEVEL_HIGH:
189 		if (type == is->is_type)
190 			break;
191 		/* FALLTHROUGH */
192 	case IST_PULSE:
193 		if (type != IST_NONE)
194 			panic("intr_establish: can't share %s with %s",
195 			    intr_typename(is->is_type),
196 			    intr_typename(type));
197 		break;
198 	}
199 	if (is->is_hand == NULL) {
200 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
201 		    is->is_hwirq);
202 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
203 		    pic->pic_name, is->is_source);
204 	}
205 
206 	/*
207 	 * Figure out where to put the handler.
208 	 * This is O(N^2), but we want to preserve the order, and N is
209 	 * generally small.
210 	 */
211 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
212 		maxipl = uimax(maxipl, q->ih_ipl);
213 	}
214 
215 	/*
216 	 * Actually install a fake handler momentarily, since we might be doing
217 	 * this with interrupts enabled and don't want the real routine called
218 	 * until masking is set up.
219 	 */
220 	fakehand.ih_ipl = ipl;
221 	fakehand.ih_fun = fakeintr;
222 	*p = &fakehand;
223 
224 	/*
225 	 * Poke the real handler in now.
226 	 */
227 	ih->ih_fun = ih_fun;
228 	ih->ih_arg = ih_arg;
229 	ih->ih_next = NULL;
230 	ih->ih_ipl = ipl;
231 	ih->ih_virq = virq;
232 	strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
233 	    sizeof(ih->ih_xname));
234 	*p = ih;
235 
236 	if (pic->pic_establish_irq != NULL)
237 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
238 		    is->is_type, maxipl);
239 
240 	/*
241 	 * Remember the highest IPL used by this handler.
242 	 */
243 	is->is_ipl = maxipl;
244 
245 	/*
246 	 * now that the handler is established we're actually ready to
247 	 * calculate the masks
248 	 */
249 	intr_calculatemasks();
250 
251 	return ih;
252 }
253 
254 void
255 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
256 {
257 }
258 
259 /*
260  * Deregister an interrupt handler.
261  */
262 void
263 intr_disestablish(void *arg)
264 {
265 	struct intrhand * const ih = arg;
266 	const int virq = ih->ih_virq;
267 	struct intr_source * const is = &intrsources[virq];
268 	struct intrhand **p, **q;
269 	int maxipl = IPL_NONE;
270 
271 	if (!PIC_VIRQ_LEGAL_P(virq))
272 		panic("intr_disestablish: bogus virq %d", virq);
273 
274 	/*
275 	 * Remove the handler from the chain.
276 	 * This is O(n^2), too.
277 	 */
278 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
279 		struct intrhand * const tmp_ih = *p;
280 		if (tmp_ih == ih) {
281 			q = p;
282 		} else {
283 			maxipl = uimax(maxipl, tmp_ih->ih_ipl);
284 		}
285 	}
286 	if (q)
287 		*q = ih->ih_next;
288 	else
289 		panic("intr_disestablish: handler not registered");
290 	kmem_intr_free((void *)ih, sizeof(*ih));
291 
292 	/*
293 	 * Reset the IPL for this source now that we've removed a handler.
294 	 */
295 	is->is_ipl = maxipl;
296 
297 	intr_calculatemasks();
298 
299 	if (is->is_hand == NULL) {
300 		is->is_type = IST_NONE;
301 		evcnt_detach(&is->is_ev);
302 		/*
303 		 * Make the virutal IRQ available again.
304 		 */
305 		virq_map[virq] = 0;
306 		virq_mask |= PIC_VIRQ_TO_MASK(virq);
307 	}
308 }
309 
310 /*
311  * Map max_base irqs into 32 (bits).
312  */
313 static int
314 mapirq(int hwirq)
315 {
316 	struct pic_ops *pic;
317 
318 	if (hwirq >= max_base)
319 		panic("invalid irq %d", hwirq);
320 
321 	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
322 		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
323 
324 	if (virq_map[hwirq])
325 		return virq_map[hwirq];
326 
327 	if (virq_mask == 0)
328 		panic("virq overflow");
329 
330 	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
331 	struct intr_source * const is = intrsources + virq;
332 
333 	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
334 
335 	is->is_hwirq = hwirq;
336 	is->is_pic = pic;
337 	virq_map[hwirq] = virq;
338 #ifdef PIC_DEBUG
339 	printf("mapping hwirq %d to virq %d\n", hwirq, virq);
340 #endif
341 	return virq;
342 }
343 
344 static const char * const intr_typenames[] = {
345    [IST_NONE]  = "none",
346    [IST_PULSE] = "pulsed",
347    [IST_EDGE_FALLING]  = "falling edge triggered",
348    [IST_EDGE_RISING]  = "rising edge triggered",
349    [IST_LEVEL_LOW] = "low level triggered",
350    [IST_LEVEL_HIGH] = "high level triggered",
351 };
352 
353 const char *
354 intr_typename(int type)
355 {
356 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
357 	KASSERT(intr_typenames[type] != NULL);
358 	return intr_typenames[type];
359 }
360 
361 /*
362  * Recalculate the interrupt masks from scratch.
363  * We could code special registry and deregistry versions of this function that
364  * would be faster, but the code would be nastier, and we don't expect this to
365  * happen very much anyway.
366  */
367 static void
368 intr_calculatemasks(void)
369 {
370 	imask_t newmask[NIPL];
371 	struct intr_source *is;
372 	struct intrhand *ih;
373 	int irq;
374 
375 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
376 		newmask[ipl] = 0;
377 	}
378 
379 	/* First, figure out which ipl each IRQ uses. */
380 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
381 		for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
382 			newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
383 		}
384 	}
385 
386 	/*
387 	 * IPL_NONE is used for hardware interrupts that are never blocked,
388 	 * and do not block anything else.
389 	 */
390 	newmask[IPL_NONE] = 0;
391 
392 	/*
393 	 * strict hierarchy - all IPLs block everything blocked by any lower
394 	 * IPL
395 	 */
396 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
397 		newmask[ipl] |= newmask[ipl - 1];
398 	}
399 
400 #ifdef PIC_DEBUG
401 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
402 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
403 	}
404 #endif
405 
406 	/*
407 	 * Disable all interrupts.
408 	 */
409 	for (u_int base = 0; base < num_pics; base++) {
410 		struct pic_ops * const pic = pics[base];
411 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
412 			pic->pic_disable_irq(pic, i);
413 		}
414 	}
415 
416 	/*
417 	 * Now that all interrupts are disabled, update the ipl masks.
418 	 */
419 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
420 		imask[ipl] = newmask[ipl];
421 	}
422 
423 	/*
424 	 * Lastly, enable IRQs actually in use.
425 	 */
426 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
427 		if (is->is_hand)
428 			pic_enable_irq(is->is_hwirq);
429 	}
430 }
431 
432 void
433 pic_enable_irq(int hwirq)
434 {
435 	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
436 	if (pic == NULL)
437 		panic("%s: bogus IRQ %d", __func__, hwirq);
438 	const int type = intrsources[virq_map[hwirq]].is_type;
439 	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
440 }
441 
442 void
443 pic_mark_pending(int hwirq)
444 {
445 	struct cpu_info * const ci = curcpu();
446 
447 	const int virq = virq_map[hwirq];
448 	if (virq == 0)
449 		printf("IRQ %d maps to 0\n", hwirq);
450 
451 	const register_t msr = mfmsr();
452 	mtmsr(msr & ~PSL_EE);
453 	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
454 	mtmsr(msr);
455 }
456 
457 static void
458 intr_deliver(struct intr_source *is, int virq)
459 {
460 	bool locked = false;
461 	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
462 		KASSERTMSG(ih->ih_fun != NULL,
463 		    "%s: irq %d, hwirq %d, is %p ih %p: "
464 		     "NULL interrupt handler!\n", __func__,
465 		     virq, is->is_hwirq, is, ih);
466 		if (ih->ih_ipl == IPL_VM) {
467 			if (!locked) {
468 				KERNEL_LOCK(1, NULL);
469 				locked = true;
470 			}
471 		} else if (locked) {
472 			KERNEL_UNLOCK_ONE(NULL);
473 			locked = false;
474 		}
475 		(*ih->ih_fun)(ih->ih_arg);
476 	}
477 	if (locked) {
478 		KERNEL_UNLOCK_ONE(NULL);
479 	}
480 	is->is_ev.ev_count++;
481 }
482 
483 void
484 pic_do_pending_int(void)
485 {
486 	struct cpu_info * const ci = curcpu();
487 	imask_t vpend;
488 
489 	if (ci->ci_iactive)
490 		return;
491 
492 	ci->ci_iactive = 1;
493 
494 	const register_t emsr = mfmsr();
495 	const register_t dmsr = emsr & ~PSL_EE;
496 
497 	KASSERT(emsr & PSL_EE);
498 	mtmsr(dmsr);
499 
500 	const int pcpl = ci->ci_cpl;
501 #ifdef __HAVE_FAST_SOFTINTS
502 again:
503 #endif
504 
505 	/* Do now unmasked pendings */
506 	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
507 		ci->ci_idepth++;
508 		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
509 
510 		/* Get most significant pending bit */
511 		const int virq = PIC_VIRQ_MS_PENDING(vpend);
512 		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
513 
514 		struct intr_source * const is = &intrsources[virq];
515 		struct pic_ops * const pic = is->is_pic;
516 
517 		splraise(is->is_ipl);
518 		mtmsr(emsr);
519 		intr_deliver(is, virq);
520 		mtmsr(dmsr);
521 		ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
522 
523 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
524 		    is->is_type);
525 		ci->ci_idepth--;
526 	}
527 
528 #ifdef __HAVE_FAST_SOFTINTS
529 	const u_int softints = ci->ci_data.cpu_softints &
530 				 (IPL_SOFTMASK << pcpl);
531 
532 	/* make sure there are no bits to screw with the line above */
533 	KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
534 
535 	if (__predict_false(softints != 0)) {
536 		ci->ci_cpl = IPL_HIGH;
537 		mtmsr(emsr);
538 		powerpc_softint(ci, pcpl,
539 		    (vaddr_t)__builtin_return_address(0));
540 		mtmsr(dmsr);
541 		ci->ci_cpl = pcpl;
542 		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
543 			goto again;
544 	}
545 #endif
546 
547 	ci->ci_iactive = 0;
548 	mtmsr(emsr);
549 }
550 
551 int
552 pic_handle_intr(void *cookie)
553 {
554 	struct pic_ops *pic = cookie;
555 	struct cpu_info *ci = curcpu();
556 	int picirq;
557 
558 	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
559 	if (picirq == 255)
560 		return 0;
561 
562 	const register_t msr = mfmsr();
563 	const int pcpl = ci->ci_cpl;
564 
565 	do {
566 		const int virq = virq_map[picirq + pic->pic_intrbase];
567 
568 		KASSERT(virq != 0);
569 		KASSERT(picirq < pic->pic_numintrs);
570 		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
571 		struct intr_source * const is = &intrsources[virq];
572 
573 		if ((imask[pcpl] & v_imen) != 0) {
574 			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
575 			pic->pic_disable_irq(pic, picirq);
576 		} else {
577 			/* this interrupt is no longer pending */
578 			ci->ci_ipending &= ~v_imen;
579 			ci->ci_idepth++;
580 
581 			splraise(is->is_ipl);
582 			mtmsr(msr | PSL_EE);
583 			intr_deliver(is, virq);
584 			mtmsr(msr);
585 			ci->ci_cpl = pcpl;
586 
587 			ci->ci_data.cpu_nintr++;
588 			ci->ci_idepth--;
589 		}
590 		pic->pic_ack_irq(pic, picirq);
591 	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
592 
593 	mtmsr(msr | PSL_EE);
594 	splx(pcpl);	/* Process pendings. */
595 	mtmsr(msr);
596 
597 	return 0;
598 }
599 
600 void
601 pic_ext_intr(void)
602 {
603 
604 	KASSERT(pics[primary_pic] != NULL);
605 	pic_handle_intr(pics[primary_pic]);
606 
607 	return;
608 
609 }
610 
611 int
612 splraise(int ncpl)
613 {
614 	struct cpu_info *ci = curcpu();
615 	int ocpl;
616 
617 	if (ncpl == ci->ci_cpl) return ncpl;
618 	REORDER_PROTECT();
619 	ocpl = ci->ci_cpl;
620 	KASSERT(ncpl < NIPL);
621 	ci->ci_cpl = uimax(ncpl, ocpl);
622 	REORDER_PROTECT();
623 	__insn_barrier();
624 	return ocpl;
625 }
626 
627 static inline bool
628 have_pending_intr_p(struct cpu_info *ci, int ncpl)
629 {
630 	if (ci->ci_ipending & ~imask[ncpl])
631 		return true;
632 #ifdef __HAVE_FAST_SOFTINTS
633 	if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
634 		return true;
635 #endif
636 	return false;
637 }
638 
639 void
640 splx(int ncpl)
641 {
642 	struct cpu_info *ci = curcpu();
643 
644 	__insn_barrier();
645 	REORDER_PROTECT();
646 	ci->ci_cpl = ncpl;
647 	if (have_pending_intr_p(ci, ncpl))
648 		pic_do_pending_int();
649 
650 	REORDER_PROTECT();
651 }
652 
653 int
654 spllower(int ncpl)
655 {
656 	struct cpu_info *ci = curcpu();
657 	int ocpl;
658 
659 	__insn_barrier();
660 	REORDER_PROTECT();
661 	ocpl = ci->ci_cpl;
662 	ci->ci_cpl = ncpl;
663 	if (have_pending_intr_p(ci, ncpl))
664 		pic_do_pending_int();
665 	REORDER_PROTECT();
666 	return ocpl;
667 }
668 
669 void
670 genppc_cpu_configure(void)
671 {
672 	aprint_normal("vmmask %x schedmask %x highmask %x\n",
673 	    (u_int)imask[IPL_VM] & 0x7fffffff,
674 	    (u_int)imask[IPL_SCHED] & 0x7fffffff,
675 	    (u_int)imask[IPL_HIGH] & 0x7fffffff);
676 
677 	spl0();
678 }
679 
680 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
681 /*
682  * isa_intr_alloc needs to be done here, because it needs direct access to
683  * the various interrupt handler structures.
684  */
685 
686 int
687 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
688     int mask, int type, int *irq_p)
689 {
690 	int irq, vi;
691 	int maybe_irq = -1;
692 	int shared_depth = 0;
693 	struct intr_source *is;
694 
695 	if (pic == NULL)
696 		return 1;
697 
698 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
699 	     mask >>= 1, irq++) {
700 		if ((mask & 1) == 0)
701 			continue;
702 		vi = virq_map[irq + pic->pic_intrbase];
703 		if (!vi) {
704 			*irq_p = irq;
705 			return 0;
706 		}
707 		is = &intrsources[vi];
708 		if (is->is_type == IST_NONE) {
709 			*irq_p = irq;
710 			return 0;
711 		}
712 		/* Level interrupts can be shared */
713 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
714 			struct intrhand *ih = is->is_hand;
715 			int depth;
716 
717 			if (maybe_irq == -1) {
718 				maybe_irq = irq;
719 				continue;
720 			}
721 			for (depth = 0; ih != NULL; ih = ih->ih_next)
722 				depth++;
723 			if (depth < shared_depth) {
724 				maybe_irq = irq;
725 				shared_depth = depth;
726 			}
727 		}
728 	}
729 	if (maybe_irq != -1) {
730 		*irq_p = maybe_irq;
731 		return 0;
732 	}
733 	return 1;
734 }
735 #endif
736 
737 static struct intr_source *
738 intr_get_source(const char *intrid)
739 {
740 	struct intr_source *is;
741 	int irq;
742 
743 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
744 		if (strcmp(intrid, is->is_source) == 0)
745 			return is;
746 	}
747 	return NULL;
748 }
749 
750 static struct intrhand *
751 intr_get_handler(const char *intrid)
752 {
753 	struct intr_source *is;
754 
755 	is = intr_get_source(intrid);
756 	if (is != NULL)
757 		return is->is_hand;
758 	return NULL;
759 }
760 
761 uint64_t
762 interrupt_get_count(const char *intrid, u_int cpu_idx)
763 {
764 	struct intr_source *is;
765 
766 	/* XXX interrupt is always generated by CPU 0 */
767 	if (cpu_idx != 0)
768 		return 0;
769 
770 	is = intr_get_source(intrid);
771 	if (is != NULL)
772 		return is->is_ev.ev_count;
773 	return 0;
774 }
775 
776 void
777 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
778 {
779 	struct intr_source *is;
780 
781 	kcpuset_zero(cpuset);
782 
783 	is = intr_get_source(intrid);
784 	if (is != NULL)
785 		kcpuset_set(cpuset, 0);	/* XXX */
786 }
787 
788 void
789 interrupt_get_available(kcpuset_t *cpuset)
790 {
791 	CPU_INFO_ITERATOR cii;
792 	struct cpu_info *ci;
793 
794 	kcpuset_zero(cpuset);
795 
796 	mutex_enter(&cpu_lock);
797 	for (CPU_INFO_FOREACH(cii, ci)) {
798 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
799 			kcpuset_set(cpuset, cpu_index(ci));
800 	}
801 	mutex_exit(&cpu_lock);
802 }
803 
804 void
805 interrupt_get_devname(const char *intrid, char *buf, size_t len)
806 {
807 	struct intrhand *ih;
808 
809 	if (len == 0)
810 		return;
811 
812 	buf[0] = '\0';
813 
814 	for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
815 		if (buf[0] != '\0')
816 			strlcat(buf, ", ", len);
817 		strlcat(buf, ih->ih_xname, len);
818 	}
819 }
820 
821 struct intrids_handler *
822 interrupt_construct_intrids(const kcpuset_t *cpuset)
823 {
824 	struct intr_source *is;
825 	struct intrids_handler *ii_handler;
826 	intrid_t *ids;
827 	int i, irq, count;
828 
829 	if (kcpuset_iszero(cpuset))
830 		return NULL;
831 	if (!kcpuset_isset(cpuset, 0))	/* XXX */
832 		return NULL;
833 
834 	count = 0;
835 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
836 		if (is->is_hand != NULL)
837 			count++;
838 	}
839 
840 	ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
841 	    KM_SLEEP);
842 	if (ii_handler == NULL)
843 		return NULL;
844 	ii_handler->iih_nids = count;
845 	if (count == 0)
846 		return ii_handler;
847 
848 	ids = ii_handler->iih_intrids;
849 	i = 0;
850 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
851 		/* Ignore devices attached after counting "count". */
852 		if (i >= count)
853 			break;
854 
855 		if (is->is_hand == NULL)
856 			continue;
857 
858 		strncpy(ids[i], is->is_source, sizeof(intrid_t));
859 		i++;
860 	}
861 
862 	return ii_handler;
863 }
864 
865 void
866 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
867 {
868 	size_t iih_size;
869 
870 	if (ii_handler == NULL)
871 		return;
872 
873 	iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
874 	kmem_free(ii_handler, iih_size);
875 }
876 
877 int
878 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
879 {
880 	return EOPNOTSUPP;
881 }
882 
883 int
884 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
885     kcpuset_t *oldset)
886 {
887 	return EOPNOTSUPP;
888 }
889 
890 #undef REORDER_PROTECT
891