xref: /netbsd-src/sys/arch/powerpc/booke/e500_intr.c (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 /*	$NetBSD: e500_intr.c,v 1.47 2022/07/22 19:54:14 thorpej Exp $	*/
2 /*-
3  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9  *
10  * This material is based upon work supported by the Defense Advanced Research
11  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12  * Contract No. N66001-09-C-2073.
13  * Approved for Public Release, Distribution Unlimited
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #define __INTR_PRIVATE
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: e500_intr.c,v 1.47 2022/07/22 19:54:14 thorpej Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_mpc85xx.h"
44 #include "opt_multiprocessor.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/proc.h>
49 #include <sys/intr.h>
50 #include <sys/cpu.h>
51 #include <sys/kmem.h>
52 #include <sys/atomic.h>
53 #include <sys/bus.h>
54 #include <sys/xcall.h>
55 #include <sys/ipi.h>
56 #include <sys/bitops.h>
57 #include <sys/interrupt.h>
58 
59 #include <uvm/uvm_extern.h>
60 
61 #ifdef __HAVE_FAST_SOFTINTS
62 #include <powerpc/softint.h>
63 #endif
64 
65 #include <powerpc/spr.h>
66 #include <powerpc/booke/spr.h>
67 
68 #include <powerpc/booke/cpuvar.h>
69 #include <powerpc/booke/e500reg.h>
70 #include <powerpc/booke/e500var.h>
71 #include <powerpc/booke/openpicreg.h>
72 
73 #define	IPL2CTPR(ipl)		((ipl) + 15 - IPL_HIGH)
74 #define CTPR2IPL(ctpr)		((ctpr) - (15 - IPL_HIGH))
75 
76 #define	IST_PERCPU_P(ist)	((ist) >= IST_TIMER)
77 
78 struct e500_intr_irq_info {
79 	bus_addr_t irq_vpr;
80 	bus_addr_t irq_dr;
81 	u_int irq_vector;
82 };
83 
84 struct intr_source {
85 	int (*is_func)(void *);
86 	void *is_arg;
87 	int8_t is_ipl;
88 	uint8_t is_ist;
89 	uint8_t is_irq;
90 	uint8_t is_refcnt;
91 	bus_size_t is_vpr;
92 	bus_size_t is_dr;
93 	char is_source[INTRIDBUF];
94 	char is_xname[INTRDEVNAMEBUF];
95 };
96 
97 #define	INTR_SOURCE_INITIALIZER \
98 	{ .is_func = e500_intr_spurious, .is_arg = NULL, \
99 	.is_irq = -1, .is_ipl = IPL_NONE, .is_ist = IST_NONE, \
100 	.is_source = "", .is_xname = "", }
101 
102 struct e500_intr_name {
103 	uint8_t in_irq;
104 	const char in_name[15];
105 };
106 
107 static const struct e500_intr_name e500_onchip_intr_names[] = {
108 	{ ISOURCE_L2, "l2" },
109 	{ ISOURCE_ECM, "ecm" },
110 	{ ISOURCE_DDR, "ddr" },
111 	{ ISOURCE_LBC, "lbc" },
112 	{ ISOURCE_DMA_CHAN1, "dma-chan1" },
113 	{ ISOURCE_DMA_CHAN2, "dma-chan2" },
114 	{ ISOURCE_DMA_CHAN3, "dma-chan3" },
115 	{ ISOURCE_DMA_CHAN4, "dma-chan4" },
116 	{ ISOURCE_PCI1, "pci1" },
117 	{ ISOURCE_PCIEX2, "pcie2" },
118 	{ ISOURCE_PCIEX	, "pcie1" },
119 	{ ISOURCE_PCIEX3, "pcie3" },
120 	{ ISOURCE_USB1, "usb1" },
121 	{ ISOURCE_ETSEC1_TX, "etsec1-tx" },
122 	{ ISOURCE_ETSEC1_RX, "etsec1-rx" },
123 	{ ISOURCE_ETSEC3_TX, "etsec3-tx" },
124 	{ ISOURCE_ETSEC3_RX, "etsec3-rx" },
125 	{ ISOURCE_ETSEC3_ERR, "etsec3-err" },
126 	{ ISOURCE_ETSEC1_ERR, "etsec1-err" },
127 	{ ISOURCE_ETSEC2_TX, "etsec2-tx" },
128 	{ ISOURCE_ETSEC2_RX, "etsec2-rx" },
129 	{ ISOURCE_ETSEC4_TX, "etsec4-tx" },
130 	{ ISOURCE_ETSEC4_RX, "etsec4-rx" },
131 	{ ISOURCE_ETSEC4_ERR, "etsec4-err" },
132 	{ ISOURCE_ETSEC2_ERR, "etsec2-err" },
133 	{ ISOURCE_DUART, "duart" },
134 	{ ISOURCE_I2C, "i2c" },
135 	{ ISOURCE_PERFMON, "perfmon" },
136 	{ ISOURCE_SECURITY1, "sec1" },
137 	{ ISOURCE_GPIO, "gpio" },
138 	{ ISOURCE_SRIO_EWPU, "srio-ewpu" },
139 	{ ISOURCE_SRIO_ODBELL, "srio-odbell" },
140 	{ ISOURCE_SRIO_IDBELL, "srio-idbell" },
141 	{ ISOURCE_SRIO_OMU1, "srio-omu1" },
142 	{ ISOURCE_SRIO_IMU1, "srio-imu1" },
143 	{ ISOURCE_SRIO_OMU2, "srio-omu2" },
144 	{ ISOURCE_SRIO_IMU2, "srio-imu2" },
145 	{ ISOURCE_SECURITY2, "sec2" },
146 	{ ISOURCE_SPI, "spi" },
147 	{ ISOURCE_ETSEC1_PTP, "etsec1-ptp" },
148 	{ ISOURCE_ETSEC2_PTP, "etsec2-ptp" },
149 	{ ISOURCE_ETSEC3_PTP, "etsec3-ptp" },
150 	{ ISOURCE_ETSEC4_PTP, "etsec4-ptp" },
151 	{ ISOURCE_ESDHC, "esdhc" },
152 	{ 0, "" },
153 };
154 
155 const struct e500_intr_name default_external_intr_names[] = {
156 	{ 0, "" },
157 };
158 
159 static const struct e500_intr_name e500_msigroup_intr_names[] = {
160 	{ 0, "msigroup0" },
161 	{ 1, "msigroup1" },
162 	{ 2, "msigroup2" },
163 	{ 3, "msigroup3" },
164 	{ 4, "msigroup4" },
165 	{ 5, "msigroup5" },
166 	{ 6, "msigroup6" },
167 	{ 7, "msigroup7" },
168 	{ 0, "" },
169 };
170 
171 static const struct e500_intr_name e500_timer_intr_names[] = {
172 	{ 0, "timer0" },
173 	{ 1, "timer1" },
174 	{ 2, "timer2" },
175 	{ 3, "timer3" },
176 	{ 0, "" },
177 };
178 
179 static const struct e500_intr_name e500_ipi_intr_names[] = {
180 	{ 0, "ipi0" },
181 	{ 1, "ipi1" },
182 	{ 2, "ipi2" },
183 	{ 3, "ipi3" },
184 	{ 0, "" },
185 };
186 
187 static const struct e500_intr_name e500_mi_intr_names[] = {
188 	{ 0, "mi0" },
189 	{ 1, "mi1" },
190 	{ 2, "mi2" },
191 	{ 3, "mi3" },
192 	{ 0, "" },
193 };
194 
195 struct e500_intr_info {
196 	u_int ii_external_sources;
197 	uint32_t ii_onchip_bitmap[2];
198 	u_int ii_onchip_sources;
199 	u_int ii_msigroup_sources;
200 	u_int ii_ipi_sources;			/* per-cpu */
201 	u_int ii_timer_sources;			/* per-cpu */
202 	u_int ii_mi_sources;			/* per-cpu */
203 	u_int ii_percpu_sources;
204 	const struct e500_intr_name *ii_external_intr_names;
205 	const struct e500_intr_name *ii_onchip_intr_names;
206 	u_int8_t ii_ist_vectors[IST_MAX+1];
207 };
208 
209 static kmutex_t e500_intr_lock __cacheline_aligned;
210 static struct e500_intr_info e500_intr_info;
211 
212 #define	INTR_INFO_DECL(lc_chip, UC_CHIP)				\
213 static const struct e500_intr_info lc_chip##_intr_info = {		\
214 	.ii_external_sources = UC_CHIP ## _EXTERNALSOURCES,		\
215 	.ii_onchip_bitmap = UC_CHIP ## _ONCHIPBITMAP,			\
216 	.ii_onchip_sources = UC_CHIP ## _ONCHIPSOURCES,			\
217 	.ii_msigroup_sources = UC_CHIP ## _MSIGROUPSOURCES,		\
218 	.ii_timer_sources = UC_CHIP ## _TIMERSOURCES,			\
219 	.ii_ipi_sources = UC_CHIP ## _IPISOURCES,			\
220 	.ii_mi_sources = UC_CHIP ## _MISOURCES,				\
221 	.ii_percpu_sources = UC_CHIP ## _TIMERSOURCES			\
222 	    + UC_CHIP ## _IPISOURCES + UC_CHIP ## _MISOURCES, 		\
223 	.ii_external_intr_names = lc_chip ## _external_intr_names,	\
224 	.ii_onchip_intr_names = lc_chip ## _onchip_intr_names,		\
225 	.ii_ist_vectors = {						\
226 		[IST_NONE]		= ~0,				\
227 		[IST_EDGE]		= 0,				\
228 		[IST_LEVEL_LOW]		= 0,				\
229 		[IST_LEVEL_HIGH]	= 0,				\
230 		[IST_PULSE]		= 0,				\
231 		[IST_ONCHIP]		= UC_CHIP ## _EXTERNALSOURCES,	\
232 		[IST_MSIGROUP]		= UC_CHIP ## _EXTERNALSOURCES	\
233 					    + UC_CHIP ## _ONCHIPSOURCES, \
234 		[IST_TIMER]		= UC_CHIP ## _EXTERNALSOURCES	\
235 					    + UC_CHIP ## _ONCHIPSOURCES	\
236 					    + UC_CHIP ## _MSIGROUPSOURCES, \
237 		[IST_IPI]		= UC_CHIP ## _EXTERNALSOURCES	\
238 					    + UC_CHIP ## _ONCHIPSOURCES	\
239 					    + UC_CHIP ## _MSIGROUPSOURCES \
240 					    + UC_CHIP ## _TIMERSOURCES,	\
241 		[IST_MI]		= UC_CHIP ## _EXTERNALSOURCES	\
242 					    + UC_CHIP ## _ONCHIPSOURCES	\
243 					    + UC_CHIP ## _MSIGROUPSOURCES \
244 					    + UC_CHIP ## _TIMERSOURCES	\
245 					    + UC_CHIP ## _IPISOURCES,	\
246 		[IST_MAX]		= UC_CHIP ## _EXTERNALSOURCES	\
247 					    + UC_CHIP ## _ONCHIPSOURCES	\
248 					    + UC_CHIP ## _MSIGROUPSOURCES \
249 					    + UC_CHIP ## _TIMERSOURCES	\
250 					    + UC_CHIP ## _IPISOURCES	\
251 					    + UC_CHIP ## _MISOURCES,	\
252 	},								\
253 }
254 
255 #ifdef MPC8536
256 #define	mpc8536_external_intr_names	default_external_intr_names
257 const struct e500_intr_name mpc8536_onchip_intr_names[] = {
258 	{ ISOURCE_SATA2, "sata2" },
259 	{ ISOURCE_USB2, "usb2" },
260 	{ ISOURCE_USB3, "usb3" },
261 	{ ISOURCE_SATA1, "sata1" },
262 	{ 0, "" },
263 };
264 
265 INTR_INFO_DECL(mpc8536, MPC8536);
266 #endif
267 
268 #ifdef MPC8544
269 #define	mpc8544_external_intr_names	default_external_intr_names
270 const struct e500_intr_name mpc8544_onchip_intr_names[] = {
271 	{ 0, "" },
272 };
273 
274 INTR_INFO_DECL(mpc8544, MPC8544);
275 #endif
276 #ifdef MPC8548
277 #define	mpc8548_external_intr_names	default_external_intr_names
278 const struct e500_intr_name mpc8548_onchip_intr_names[] = {
279 	{ ISOURCE_PCI1, "pci1" },
280 	{ ISOURCE_PCI2, "pci2" },
281 	{ 0, "" },
282 };
283 
284 INTR_INFO_DECL(mpc8548, MPC8548);
285 #endif
286 #ifdef MPC8555
287 #define	mpc8555_external_intr_names	default_external_intr_names
288 const struct e500_intr_name mpc8555_onchip_intr_names[] = {
289 	{ ISOURCE_PCI2, "pci2" },
290 	{ ISOURCE_CPM, "CPM" },
291 	{ 0, "" },
292 };
293 
294 INTR_INFO_DECL(mpc8555, MPC8555);
295 #endif
296 #ifdef MPC8568
297 #define	mpc8568_external_intr_names	default_external_intr_names
298 const struct e500_intr_name mpc8568_onchip_intr_names[] = {
299 	{ ISOURCE_QEB_LOW, "QEB low" },
300 	{ ISOURCE_QEB_PORT, "QEB port" },
301 	{ ISOURCE_QEB_IECC, "QEB iram ecc" },
302 	{ ISOURCE_QEB_MUECC, "QEB ram ecc" },
303 	{ ISOURCE_TLU1, "tlu1" },
304 	{ ISOURCE_QEB_HIGH, "QEB high" },
305 	{ 0, "" },
306 };
307 
308 INTR_INFO_DECL(mpc8568, MPC8568);
309 #endif
310 #ifdef MPC8572
311 #define	mpc8572_external_intr_names	default_external_intr_names
312 const struct e500_intr_name mpc8572_onchip_intr_names[] = {
313 	{ ISOURCE_PCIEX3_MPC8572, "pcie3" },
314 	{ ISOURCE_FEC, "fec" },
315 	{ ISOURCE_PME_GENERAL, "pme" },
316 	{ ISOURCE_TLU1, "tlu1" },
317 	{ ISOURCE_TLU2, "tlu2" },
318 	{ ISOURCE_PME_CHAN1, "pme-chan1" },
319 	{ ISOURCE_PME_CHAN2, "pme-chan2" },
320 	{ ISOURCE_PME_CHAN3, "pme-chan3" },
321 	{ ISOURCE_PME_CHAN4, "pme-chan4" },
322 	{ ISOURCE_DMA2_CHAN1, "dma2-chan1" },
323 	{ ISOURCE_DMA2_CHAN2, "dma2-chan2" },
324 	{ ISOURCE_DMA2_CHAN3, "dma2-chan3" },
325 	{ ISOURCE_DMA2_CHAN4, "dma2-chan4" },
326 	{ 0, "" },
327 };
328 
329 INTR_INFO_DECL(mpc8572, MPC8572);
330 #endif
331 
332 #ifdef P1025
333 #define	p1025_external_intr_names	default_external_intr_names
334 const struct e500_intr_name p1025_onchip_intr_names[] = {
335 	{ ISOURCE_PCIEX3_MPC8572, "pcie3" },
336 	{ ISOURCE_ETSEC1_G1_TX, "etsec1-g1-tx" },
337 	{ ISOURCE_ETSEC1_G1_RX, "etsec1-g1-rx" },
338 	{ ISOURCE_ETSEC1_G1_ERR, "etsec1-g1-error" },
339 	{ ISOURCE_ETSEC2_G1_TX, "etsec2-g1-tx" },
340 	{ ISOURCE_ETSEC2_G1_RX, "etsec2-g1-rx" },
341 	{ ISOURCE_ETSEC2_G1_ERR, "etsec2-g1-error" },
342 	{ ISOURCE_ETSEC3_G1_TX, "etsec3-g1-tx" },
343 	{ ISOURCE_ETSEC3_G1_RX, "etsec3-g1-rx" },
344 	{ ISOURCE_ETSEC3_G1_ERR, "etsec3-g1-error" },
345 	{ ISOURCE_QEB_MUECC, "qeb-low" },
346 	{ ISOURCE_QEB_HIGH, "qeb-crit" },
347 	{ ISOURCE_DMA2_CHAN1, "dma2-chan1" },
348 	{ ISOURCE_DMA2_CHAN2, "dma2-chan2" },
349 	{ ISOURCE_DMA2_CHAN3, "dma2-chan3" },
350 	{ ISOURCE_DMA2_CHAN4, "dma2-chan4" },
351 	{ 0, "" },
352 };
353 
354 INTR_INFO_DECL(p1025, P1025);
355 #endif
356 
357 #ifdef P2020
358 #define	p20x0_external_intr_names	default_external_intr_names
359 const struct e500_intr_name p20x0_onchip_intr_names[] = {
360 	{ ISOURCE_PCIEX3_MPC8572, "pcie3" },
361 	{ ISOURCE_DMA2_CHAN1, "dma2-chan1" },
362 	{ ISOURCE_DMA2_CHAN2, "dma2-chan2" },
363 	{ ISOURCE_DMA2_CHAN3, "dma2-chan3" },
364 	{ ISOURCE_DMA2_CHAN4, "dma2-chan4" },
365 	{ 0, "" },
366 };
367 
368 INTR_INFO_DECL(p20x0, P20x0);
369 #endif
370 
371 #ifdef P1023
372 #define	p1023_external_intr_names	default_external_intr_names
373 const struct e500_intr_name p1023_onchip_intr_names[] = {
374 	{ ISOURCE_FMAN,            "fman" },
375 	{ ISOURCE_MDIO,            "mdio" },
376 	{ ISOURCE_QMAN0,           "qman0" },
377 	{ ISOURCE_BMAN0,           "bman0" },
378 	{ ISOURCE_QMAN1,           "qman1" },
379 	{ ISOURCE_BMAN1,           "bman1" },
380 	{ ISOURCE_QMAN2,           "qman2" },
381 	{ ISOURCE_BMAN2,           "bman2" },
382 	{ ISOURCE_SECURITY2_P1023, "sec2" },
383 	{ ISOURCE_SEC_GENERAL,     "sec-general" },
384 	{ ISOURCE_DMA2_CHAN1,      "dma2-chan1" },
385 	{ ISOURCE_DMA2_CHAN2,      "dma2-chan2" },
386 	{ ISOURCE_DMA2_CHAN3,      "dma2-chan3" },
387 	{ ISOURCE_DMA2_CHAN4,      "dma2-chan4" },
388 	{ 0, "" },
389 };
390 
391 INTR_INFO_DECL(p1023, P1023);
392 #endif
393 
394 static const char ist_names[][12] = {
395 	[IST_NONE] = "none",
396 	[IST_EDGE] = "edge",
397 	[IST_LEVEL_LOW] = "level-",
398 	[IST_LEVEL_HIGH] = "level+",
399 	[IST_PULSE] = "pulse",
400 	[IST_MSI] = "msi",
401 	[IST_ONCHIP] = "onchip",
402 	[IST_MSIGROUP] = "msigroup",
403 	[IST_TIMER] = "timer",
404 	[IST_IPI] = "ipi",
405 	[IST_MI] = "msgint",
406 };
407 
408 static struct intr_source *e500_intr_sources;
409 static const struct intr_source *e500_intr_last_source;
410 
411 static void 	*e500_intr_establish(int, int, int, int (*)(void *), void *,
412 		    const char *);
413 static void 	e500_intr_disestablish(void *);
414 static void 	e500_intr_cpu_attach(struct cpu_info *ci);
415 static void 	e500_intr_cpu_hatch(struct cpu_info *ci);
416 static void	e500_intr_cpu_send_ipi(cpuid_t, uintptr_t);
417 static void 	e500_intr_init(void);
418 static void 	e500_intr_init_precpu(void);
419 static const char *e500_intr_string(int, int, char *, size_t);
420 static const char *e500_intr_typename(int);
421 static void 	e500_critintr(struct trapframe *tf);
422 static void 	e500_decrintr(struct trapframe *tf);
423 static void 	e500_extintr(struct trapframe *tf);
424 static void 	e500_fitintr(struct trapframe *tf);
425 static void 	e500_wdogintr(struct trapframe *tf);
426 static void	e500_spl0(void);
427 static int 	e500_splraise(int);
428 static void 	e500_splx(int);
429 static const char *e500_intr_all_name_lookup(int, int);
430 
431 const struct intrsw e500_intrsw = {
432 	.intrsw_establish = e500_intr_establish,
433 	.intrsw_disestablish = e500_intr_disestablish,
434 	.intrsw_init = e500_intr_init,
435 	.intrsw_cpu_attach = e500_intr_cpu_attach,
436 	.intrsw_cpu_hatch = e500_intr_cpu_hatch,
437 	.intrsw_cpu_send_ipi = e500_intr_cpu_send_ipi,
438 	.intrsw_string = e500_intr_string,
439 	.intrsw_typename = e500_intr_typename,
440 
441 	.intrsw_critintr = e500_critintr,
442 	.intrsw_decrintr = e500_decrintr,
443 	.intrsw_extintr = e500_extintr,
444 	.intrsw_fitintr = e500_fitintr,
445 	.intrsw_wdogintr = e500_wdogintr,
446 
447 	.intrsw_splraise = e500_splraise,
448 	.intrsw_splx = e500_splx,
449 	.intrsw_spl0 = e500_spl0,
450 
451 #ifdef __HAVE_FAST_SOFTINTS
452 	.intrsw_softint_init_md = powerpc_softint_init_md,
453 	.intrsw_softint_trigger = powerpc_softint_trigger,
454 #endif
455 };
456 
457 static bool wdog_barked;
458 
459 static inline uint32_t
460 openpic_read(struct cpu_softc *cpu, bus_size_t offset)
461 {
462 
463 	return bus_space_read_4(cpu->cpu_bst, cpu->cpu_bsh,
464 	    OPENPIC_BASE + offset);
465 }
466 
467 static inline void
468 openpic_write(struct cpu_softc *cpu, bus_size_t offset, uint32_t val)
469 {
470 
471 	return bus_space_write_4(cpu->cpu_bst, cpu->cpu_bsh,
472 	    OPENPIC_BASE + offset, val);
473 }
474 
475 static const char *
476 e500_intr_external_name_lookup(int irq)
477 {
478 	prop_array_t extirqs = board_info_get_object("external-irqs");
479 	prop_string_t irqname = prop_array_get(extirqs, irq);
480 	KASSERT(irqname != NULL);
481 	KASSERT(prop_object_type(irqname) == PROP_TYPE_STRING);
482 
483 	return prop_string_value(irqname);
484 }
485 
486 static const char *
487 e500_intr_name_lookup(const struct e500_intr_name *names, int irq)
488 {
489 	for (; names->in_name[0] != '\0'; names++) {
490 		if (names->in_irq == irq)
491 			return names->in_name;
492 	}
493 
494 	return NULL;
495 }
496 
497 static const char *
498 e500_intr_onchip_name_lookup(int irq)
499 {
500 	const char *name;
501 
502 	name = e500_intr_name_lookup(e500_intr_info.ii_onchip_intr_names, irq);
503 	if (name == NULL)
504 	       name = e500_intr_name_lookup(e500_onchip_intr_names, irq);
505 
506 	return name;
507 }
508 
509 static inline void
510 e500_splset(struct cpu_info *ci, int ipl)
511 {
512 	struct cpu_softc * const cpu = ci->ci_softc;
513 
514 #ifdef __HAVE_FAST_SOFTINTS /* XXX */
515 	KASSERT((curlwp->l_pflag & LP_INTR) == 0 || ipl != IPL_NONE);
516 #endif
517 	const u_int ctpr = IPL2CTPR(ipl);
518 	KASSERT(openpic_read(cpu, OPENPIC_CTPR) == IPL2CTPR(ci->ci_cpl));
519 	openpic_write(cpu, OPENPIC_CTPR, ctpr);
520 	KASSERT(openpic_read(cpu, OPENPIC_CTPR) == ctpr);
521 #ifdef DIAGNOSTIC
522 	cpu->cpu_spl_tb[ipl][ci->ci_cpl] = mftb();
523 #endif
524 	ci->ci_cpl = ipl;
525 }
526 
527 static void
528 e500_spl0(void)
529 {
530 	wrtee(0);
531 
532 	struct cpu_info * const ci = curcpu();
533 
534 #ifdef __HAVE_FAST_SOFTINTS
535 	if (__predict_false(ci->ci_data.cpu_softints != 0)) {
536 		e500_splset(ci, IPL_HIGH);
537 		wrtee(PSL_EE);
538 		powerpc_softint(ci, IPL_NONE,
539 		    (vaddr_t)__builtin_return_address(0));
540 		wrtee(0);
541 	}
542 #endif /* __HAVE_FAST_SOFTINTS */
543 	e500_splset(ci, IPL_NONE);
544 
545 	wrtee(PSL_EE);
546 }
547 
548 static void
549 e500_splx(int ipl)
550 {
551 	struct cpu_info * const ci = curcpu();
552 	const int old_ipl = ci->ci_cpl;
553 
554 	/* if we panicked because of watchdog, PSL_CE will be clear.  */
555 	KASSERT(wdog_barked || (mfmsr() & PSL_CE));
556 
557 	if (ipl == old_ipl)
558 		return;
559 
560 	if (__predict_false(ipl > old_ipl)) {
561 		printf("%s: %p: cpl=%u: ignoring splx(%u) to raise ipl\n",
562 		    __func__, __builtin_return_address(0), old_ipl, ipl);
563 		if (old_ipl == IPL_NONE)
564 			Debugger();
565 	}
566 
567 	// const
568 	register_t msr = wrtee(0);
569 #ifdef __HAVE_FAST_SOFTINTS
570 	const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << ipl);
571 	if (__predict_false(softints != 0)) {
572 		e500_splset(ci, IPL_HIGH);
573 		wrtee(msr);
574 		powerpc_softint(ci, ipl,
575 		    (vaddr_t)__builtin_return_address(0));
576 		wrtee(0);
577 	}
578 #endif /* __HAVE_FAST_SOFTINTS */
579 	e500_splset(ci, ipl);
580 #if 1
581 	if (ipl < IPL_VM && old_ipl >= IPL_VM)
582 		msr = PSL_EE;
583 #endif
584 	wrtee(msr);
585 }
586 
587 static int
588 e500_splraise(int ipl)
589 {
590 	struct cpu_info * const ci = curcpu();
591 	const int old_ipl = ci->ci_cpl;
592 
593 	/* if we panicked because of watchdog, PSL_CE will be clear.  */
594 	KASSERT(wdog_barked || (mfmsr() & PSL_CE));
595 
596 	if (old_ipl < ipl) {
597 		//const
598 		register_t msr = wrtee(0);
599 		e500_splset(ci, ipl);
600 #if 0
601 		if (old_ipl < IPL_VM && ipl >= IPL_VM)
602 			msr = 0;
603 #endif
604 		wrtee(msr);
605 	}
606 #if 0
607 	else if (ipl == IPL_NONE) {
608 		panic("%s: %p: cpl=%u: attempt to splraise(IPL_NONE)",
609 		    __func__, __builtin_return_address(0), old_ipl);
610 	} else if (old_ipl > ipl) {
611 		printf("%s: %p: cpl=%u: ignoring splraise(%u) to lower ipl\n",
612 		    __func__, __builtin_return_address(0), old_ipl, ipl);
613 	}
614 #endif
615 
616 	return old_ipl;
617 }
618 
619 static int
620 e500_intr_spurious(void *arg)
621 {
622 	return 0;
623 }
624 
625 static bool
626 e500_intr_irq_info_get(struct cpu_info *ci, u_int irq, int ipl, int ist,
627 	struct e500_intr_irq_info *ii)
628 {
629 	const struct e500_intr_info * const info = &e500_intr_info;
630 	bool ok;
631 
632 #if DEBUG > 2
633 	printf("%s(%p,irq=%u,ipl=%u,ist=%u,%p)\n", __func__, ci, irq, ipl, ist, ii);
634 #endif
635 
636 	if (ipl < IPL_VM || ipl > IPL_HIGH) {
637 #if DEBUG > 2
638 		printf("%s:%d ipl=%u\n", __func__, __LINE__, ipl);
639 #endif
640 		return false;
641 	}
642 
643 	if (ist <= IST_NONE || ist >= IST_MAX) {
644 #if DEBUG > 2
645 		printf("%s:%d ist=%u\n", __func__, __LINE__, ist);
646 #endif
647 		return false;
648 	}
649 
650 	ii->irq_vector = irq + info->ii_ist_vectors[ist];
651 	if (IST_PERCPU_P(ist) && ist != IST_IPI)
652 		ii->irq_vector += ci->ci_cpuid * info->ii_percpu_sources;
653 
654 	switch (ist) {
655 	default:
656 		ii->irq_vpr = OPENPIC_EIVPR(irq);
657 		ii->irq_dr  = OPENPIC_EIDR(irq);
658 		ok = irq < info->ii_external_sources
659 		    && (ist == IST_EDGE
660 			|| ist == IST_LEVEL_LOW
661 			|| ist == IST_LEVEL_HIGH);
662 		break;
663 	case IST_PULSE:
664 		ok = false;
665 		break;
666 	case IST_ONCHIP:
667 		ii->irq_vpr = OPENPIC_IIVPR(irq);
668 		ii->irq_dr  = OPENPIC_IIDR(irq);
669 		ok = irq < 32 * __arraycount(info->ii_onchip_bitmap);
670 #if DEBUG > 2
671 		printf("%s: irq=%u: ok=%u\n", __func__, irq, ok);
672 #endif
673 		ok = ok && (info->ii_onchip_bitmap[irq/32] & (1 << (irq & 31)));
674 #if DEBUG > 2
675 		printf("%s: %08x%08x -> %08x%08x: ok=%u\n", __func__,
676 		    irq < 32 ? 0 : (1 << irq), irq < 32 ? (1 << irq) : 0,
677 		    info->ii_onchip_bitmap[1], info->ii_onchip_bitmap[0],
678 		    ok);
679 #endif
680 		break;
681 	case IST_MSIGROUP:
682 		ii->irq_vpr = OPENPIC_MSIVPR(irq);
683 		ii->irq_dr  = OPENPIC_MSIDR(irq);
684 		ok = irq < info->ii_msigroup_sources
685 		    && ipl == IPL_VM;
686 		break;
687 	case IST_TIMER:
688 		ii->irq_vpr = OPENPIC_GTVPR(ci->ci_cpuid, irq);
689 		ii->irq_dr  = OPENPIC_GTDR(ci->ci_cpuid, irq);
690 		ok = irq < info->ii_timer_sources;
691 #if DEBUG > 2
692 		printf("%s: IST_TIMER irq=%u: ok=%u\n", __func__, irq, ok);
693 #endif
694 		break;
695 	case IST_IPI:
696 		ii->irq_vpr = OPENPIC_IPIVPR(irq);
697 		ii->irq_dr  = OPENPIC_IPIDR(irq);
698 		ok = irq < info->ii_ipi_sources;
699 		break;
700 	case IST_MI:
701 		ii->irq_vpr = OPENPIC_MIVPR(irq);
702 		ii->irq_dr  = OPENPIC_MIDR(irq);
703 		ok = irq < info->ii_mi_sources;
704 		break;
705 	}
706 
707 	return ok;
708 }
709 
710 static const char *
711 e500_intr_string(int irq, int ist, char *buf, size_t len)
712 {
713 	struct cpu_info * const ci = curcpu();
714 	struct cpu_softc * const cpu = ci->ci_softc;
715 	struct e500_intr_irq_info ii;
716 
717 	if (!e500_intr_irq_info_get(ci, irq, IPL_VM, ist, &ii))
718 		return NULL;
719 
720 	strlcpy(buf, cpu->cpu_evcnt_intrs[ii.irq_vector].ev_name, len);
721 	return buf;
722 }
723 
724 __CTASSERT(__arraycount(ist_names) == IST_MAX);
725 
726 static const char *
727 e500_intr_typename(int ist)
728 {
729 	if (IST_NONE <= ist && ist < IST_MAX)
730 		return ist_names[ist];
731 
732 	return NULL;
733 }
734 
735 static void *
736 e500_intr_cpu_establish(struct cpu_info *ci, int irq, int ipl, int ist,
737 	int (*handler)(void *), void *arg, const char *xname)
738 {
739 	struct cpu_softc * const cpu = ci->ci_softc;
740 	struct e500_intr_irq_info ii;
741 
742 	KASSERT(ipl >= IPL_VM && ipl <= IPL_HIGH);
743 	KASSERT(ist > IST_NONE && ist < IST_MAX && ist != IST_MSI);
744 
745 	if (!e500_intr_irq_info_get(ci, irq, ipl, ist, &ii)) {
746 		printf("%s: e500_intr_irq_info_get(%p,%u,%u,%u,%p) failed\n",
747 		    __func__, ci, irq, ipl, ist, &ii);
748 		return NULL;
749 	}
750 
751 	if (xname == NULL) {
752 		xname = e500_intr_all_name_lookup(irq, ist);
753 		if (xname == NULL)
754 			xname = "unknown";
755 	}
756 
757 	struct intr_source * const is = &e500_intr_sources[ii.irq_vector];
758 	mutex_enter(&e500_intr_lock);
759 	if (is->is_ipl != IPL_NONE) {
760 		/* XXX IPI0 is shared by all CPU. */
761 		if (is->is_ist != IST_IPI ||
762 		    is->is_irq != irq ||
763 		    is->is_ipl != ipl ||
764 		    is->is_ist != ist ||
765 		    is->is_func != handler ||
766 		    is->is_arg != arg) {
767 			mutex_exit(&e500_intr_lock);
768 			return NULL;
769 		}
770 	}
771 
772 	is->is_func = handler;
773 	is->is_arg = arg;
774 	is->is_ipl = ipl;
775 	is->is_ist = ist;
776 	is->is_irq = irq;
777 	is->is_refcnt++;
778 	is->is_vpr = ii.irq_vpr;
779 	is->is_dr = ii.irq_dr;
780 	switch (ist) {
781 	case IST_EDGE:
782 	case IST_LEVEL_LOW:
783 	case IST_LEVEL_HIGH:
784 		snprintf(is->is_source, sizeof(is->is_source), "extirq %d",
785 		    irq);
786 		break;
787 	case IST_ONCHIP:
788 		snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
789 		break;
790 	case IST_MSIGROUP:
791 		snprintf(is->is_source, sizeof(is->is_source), "msigroup %d",
792 		    irq);
793 		break;
794 	case IST_TIMER:
795 		snprintf(is->is_source, sizeof(is->is_source), "timer %d", irq);
796 		break;
797 	case IST_IPI:
798 		snprintf(is->is_source, sizeof(is->is_source), "ipi %d", irq);
799 		break;
800 	case IST_MI:
801 		snprintf(is->is_source, sizeof(is->is_source), "mi %d", irq);
802 		break;
803 	case IST_PULSE:
804 	default:
805 		panic("%s: invalid ist (%d)\n", __func__, ist);
806 	}
807 	strlcpy(is->is_xname, xname, sizeof(is->is_xname));
808 
809 	uint32_t vpr = VPR_PRIORITY_MAKE(IPL2CTPR(ipl))
810 	    | VPR_VECTOR_MAKE(((ii.irq_vector + 1) << 4) | ipl)
811 	    | (ist == IST_LEVEL_LOW
812 		? VPR_LEVEL_LOW
813 		: (ist == IST_LEVEL_HIGH
814 		    ? VPR_LEVEL_HIGH
815 		    : (ist == IST_ONCHIP
816 		      ? VPR_P_HIGH
817 		      : 0)));
818 
819 	/*
820 	 * All interrupts go to the primary except per-cpu interrupts which get
821 	 * routed to the appropriate cpu.
822 	 */
823 	uint32_t dr = openpic_read(cpu, ii.irq_dr);
824 
825 	dr |= 1 << (IST_PERCPU_P(ist) ? ci->ci_cpuid : 0);
826 
827 	/*
828 	 * Update the vector/priority and destination registers keeping the
829 	 * interrupt masked.
830 	 */
831 	const register_t msr = wrtee(0);	/* disable interrupts */
832 	openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK);
833 	openpic_write(cpu, ii.irq_dr, dr);
834 
835 	/*
836 	 * Now unmask the interrupt.
837 	 */
838 	openpic_write(cpu, ii.irq_vpr, vpr);
839 
840 	wrtee(msr);				/* re-enable interrupts */
841 
842 	mutex_exit(&e500_intr_lock);
843 
844 	return is;
845 }
846 
847 static void *
848 e500_intr_establish(int irq, int ipl, int ist, int (*handler)(void *),
849     void *arg, const char *xname)
850 {
851 	return e500_intr_cpu_establish(curcpu(), irq, ipl, ist, handler, arg,
852 	    xname);
853 }
854 
855 static void
856 e500_intr_disestablish(void *vis)
857 {
858 	struct cpu_softc * const cpu = curcpu()->ci_softc;
859 	struct intr_source * const is = vis;
860 	struct e500_intr_irq_info ii;
861 
862 	KASSERT(e500_intr_sources <= is);
863 	KASSERT(is < e500_intr_last_source);
864 	KASSERT(!cpu_intr_p());
865 
866 	bool ok = e500_intr_irq_info_get(curcpu(), is->is_irq, is->is_ipl,
867 	    is->is_ist, &ii);
868 	(void)ok;	/* appease gcc */
869 	KASSERT(ok);
870 	KASSERT(is - e500_intr_sources == ii.irq_vector);
871 
872 	mutex_enter(&e500_intr_lock);
873 
874 	if (is->is_refcnt-- > 1) {
875 		mutex_exit(&e500_intr_lock);
876 		return;
877 	}
878 
879 	/*
880 	 * Mask the source using the mask (MSK) bit in the vector/priority reg.
881 	 */
882 	uint32_t vpr = openpic_read(cpu, ii.irq_vpr);
883 	openpic_write(cpu, ii.irq_vpr, VPR_MSK | vpr);
884 
885 	/*
886 	 * Wait for the Activity (A) bit for the source to be cleared.
887 	 */
888 	while (openpic_read(cpu, ii.irq_vpr) & VPR_A)
889 		;
890 
891 	/*
892 	 * Now the source can be modified.
893 	 */
894 	openpic_write(cpu, ii.irq_dr, 0);		/* stop delivery */
895 	openpic_write(cpu, ii.irq_vpr, VPR_MSK);	/* mask/reset it */
896 
897 	*is = (struct intr_source)INTR_SOURCE_INITIALIZER;
898 
899 	mutex_exit(&e500_intr_lock);
900 }
901 
902 static void
903 e500_critintr(struct trapframe *tf)
904 {
905 	panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
906 }
907 
908 static void
909 e500_decrintr(struct trapframe *tf)
910 {
911 	panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
912 }
913 
914 static void
915 e500_fitintr(struct trapframe *tf)
916 {
917 	panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1);
918 }
919 
920 static void
921 e500_wdogintr(struct trapframe *tf)
922 {
923 	struct cpu_info * const ci = curcpu();
924 	mtspr(SPR_TSR, TSR_ENW|TSR_WIS);
925 	wdog_barked = true;
926 	dump_splhist(ci, NULL);
927 	dump_trapframe(tf, NULL);
928 	panic("%s: tf=%p tb=%"PRId64" srr0/srr1=%#lx/%#lx"
929 	    " cpl=%d idepth=%d, mtxcount=%d",
930 	    __func__, tf, mftb(), tf->tf_srr0, tf->tf_srr1,
931 	    ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count);
932 }
933 
934 static void
935 e500_extintr(struct trapframe *tf)
936 {
937 	struct cpu_info * const ci = curcpu();
938 	struct cpu_softc * const cpu = ci->ci_softc;
939 	const int old_ipl = ci->ci_cpl;
940 
941 	/* if we panicked because of watchdog, PSL_CE will be clear.  */
942 	KASSERT(wdog_barked || (mfmsr() & PSL_CE));
943 
944 #if 0
945 //	printf("%s(%p): idepth=%d enter\n", __func__, tf, ci->ci_idepth);
946 	if ((register_t)tf >= (register_t)curlwp->l_addr + USPACE
947 	    || (register_t)tf < (register_t)curlwp->l_addr + NBPG) {
948 		printf("%s(entry): pid %d.%d (%s): srr0/srr1=%#lx/%#lx: invalid tf addr %p\n",
949 		    __func__, curlwp->l_proc->p_pid, curlwp->l_lid,
950 		    curlwp->l_proc->p_comm, tf->tf_srr0, tf->tf_srr1, tf);
951 	}
952 #endif
953 
954 
955 	ci->ci_data.cpu_nintr++;
956 	tf->tf_cf.cf_idepth = ci->ci_idepth++;
957 	cpu->cpu_pcpls[ci->ci_idepth] = old_ipl;
958 #if 1
959 	if (mfmsr() & PSL_EE)
960 		panic("%s(%p): MSR[EE] is on (%#lx)!", __func__, tf, mfmsr());
961 	if (old_ipl == IPL_HIGH
962 	    || IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
963 		panic("%s(%p): old_ipl(%u) == IPL_HIGH(%u) "
964 		    "|| old_ipl + %u != OPENPIC_CTPR (%u)",
965 		    __func__, tf, old_ipl, IPL_HIGH,
966 		    15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
967 #else
968 	if (old_ipl >= IPL_VM)
969 		panic("%s(%p): old_ipl(%u) >= IPL_VM(%u) CTPR=%u",
970 		    __func__, tf, old_ipl, IPL_VM, openpic_read(cpu, OPENPIC_CTPR));
971 #endif
972 
973 	for (;;) {
974 		/*
975 		 * Find out the pending interrupt.
976 		 */
977 		KASSERTMSG((mfmsr() & PSL_EE) == 0,
978 		    "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr());
979 		if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
980 			panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
981 			    __func__, tf, __LINE__, old_ipl,
982 			    15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
983 		const uint32_t iack = openpic_read(cpu, OPENPIC_IACK);
984 #ifdef DIAGNOSTIC
985 		const int ipl = iack & 0xf;
986 #endif
987 		const int irq = (iack >> 4) - 1;
988 #if 0
989 		printf("%s: iack=%d ipl=%d irq=%d <%s>\n",
990 		    __func__, iack, ipl, irq,
991 		    (iack != IRQ_SPURIOUS ?
992 			cpu->cpu_evcnt_intrs[irq].ev_name : "spurious"));
993 #endif
994 		if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
995 			panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
996 			    __func__, tf, __LINE__, old_ipl,
997 			    15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
998 		if (iack == IRQ_SPURIOUS)
999 			break;
1000 
1001 		struct intr_source * const is = &e500_intr_sources[irq];
1002 		if (__predict_true(is < e500_intr_last_source)) {
1003 			/*
1004 			 * Timer interrupts get their argument overridden with
1005 			 * the pointer to the trapframe.
1006 			 */
1007 			KASSERTMSG(is->is_ipl == ipl,
1008 			    "iack %#x: is %p: irq %d ipl %d != iack ipl %d",
1009 			    iack, is, irq, is->is_ipl, ipl);
1010 			void *arg = (is->is_ist == IST_TIMER ? tf : is->is_arg);
1011 			if (is->is_ipl <= old_ipl)
1012 				panic("%s(%p): %s (%u): is->is_ipl (%u) <= old_ipl (%u)\n",
1013 				    __func__, tf,
1014 				    cpu->cpu_evcnt_intrs[irq].ev_name, irq,
1015 				    is->is_ipl, old_ipl);
1016 			KASSERT(is->is_ipl > old_ipl);
1017 			e500_splset(ci, is->is_ipl);	/* change IPL */
1018 			if (__predict_false(is->is_func == NULL)) {
1019 				aprint_error_dev(ci->ci_dev,
1020 				    "interrupt from unestablished irq %d\n",
1021 				    irq);
1022 			} else {
1023 				int (*func)(void *) = is->is_func;
1024 				wrtee(PSL_EE);
1025 				int rv = (*func)(arg);
1026 				wrtee(0);
1027 #if DEBUG > 2
1028 				printf("%s: %s handler %p(%p) returned %d\n",
1029 				    __func__,
1030 				    cpu->cpu_evcnt_intrs[irq].ev_name,
1031 				    func, arg, rv);
1032 #endif
1033 				if (rv == 0)
1034 					cpu->cpu_evcnt_spurious_intr.ev_count++;
1035 			}
1036 			e500_splset(ci, old_ipl);	/* restore IPL */
1037 			cpu->cpu_evcnt_intrs[irq].ev_count++;
1038 		} else {
1039 			aprint_error_dev(ci->ci_dev,
1040 			    "interrupt from illegal irq %d\n", irq);
1041 			cpu->cpu_evcnt_spurious_intr.ev_count++;
1042 		}
1043 		/*
1044 		 * If this is a nested interrupt, simply ack it and exit
1045 		 * because the loop we interrupted will complete looking
1046 		 * for interrupts.
1047 		 */
1048 		KASSERTMSG((mfmsr() & PSL_EE) == 0,
1049 		    "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr());
1050 		if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1051 			panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1052 			    __func__, tf, __LINE__, old_ipl,
1053 			    15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1054 
1055 		openpic_write(cpu, OPENPIC_EOI, 0);
1056 		if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR))
1057 			panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)",
1058 			    __func__, tf, __LINE__, old_ipl,
1059 			    15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR));
1060 		if (ci->ci_idepth > 0)
1061 			break;
1062 	}
1063 
1064 	ci->ci_idepth--;
1065 
1066 #ifdef __HAVE_FAST_SOFTINTS
1067 	/*
1068 	 * Before exiting, deal with any softints that need to be dealt with.
1069 	 */
1070 	const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << old_ipl);
1071 	if (__predict_false(softints != 0)) {
1072 		KASSERT(old_ipl < IPL_VM);
1073 		e500_splset(ci, IPL_HIGH);	/* pop to high */
1074 		wrtee(PSL_EE);			/* reenable interrupts */
1075 		powerpc_softint(ci, old_ipl,	/* deal with them */
1076 		    tf->tf_srr0);
1077 		wrtee(0);			/* disable interrupts */
1078 		e500_splset(ci, old_ipl);	/* and drop back */
1079 	}
1080 #endif /* __HAVE_FAST_SOFTINTS */
1081 	KASSERT(ci->ci_cpl == old_ipl);
1082 
1083 	/*
1084 	 * If we interrupted while power-saving and we need to exit idle,
1085 	 * we need to clear PSL_POW so we won't go back into power-saving.
1086 	 */
1087 	if (__predict_false(tf->tf_srr1 & PSL_POW) && ci->ci_want_resched)
1088 		tf->tf_srr1 &= ~PSL_POW;
1089 
1090 //	printf("%s(%p): idepth=%d exit\n", __func__, tf, ci->ci_idepth);
1091 }
1092 
1093 static void
1094 e500_intr_init(void)
1095 {
1096 	struct cpu_info * const ci = curcpu();
1097 	struct cpu_softc * const cpu = ci->ci_softc;
1098 	const uint32_t frr = openpic_read(cpu, OPENPIC_FRR);
1099 	const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1100 //	const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1101 	struct intr_source *is;
1102 	struct e500_intr_info * const ii = &e500_intr_info;
1103 
1104 	const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
1105 	switch (svr) {
1106 #ifdef MPC8536
1107 	case SVR_MPC8536v1 >> 16:
1108 		*ii = mpc8536_intr_info;
1109 		break;
1110 #endif
1111 #ifdef MPC8544
1112 	case SVR_MPC8544v1 >> 16:
1113 		*ii = mpc8544_intr_info;
1114 		break;
1115 #endif
1116 #ifdef MPC8548
1117 	case SVR_MPC8543v1 >> 16:
1118 	case SVR_MPC8548v1 >> 16:
1119 		*ii = mpc8548_intr_info;
1120 		break;
1121 #endif
1122 #ifdef MPC8555
1123 	case SVR_MPC8541v1 >> 16:
1124 	case SVR_MPC8555v1 >> 16:
1125 		*ii = mpc8555_intr_info;
1126 		break;
1127 #endif
1128 #ifdef MPC8568
1129 	case SVR_MPC8568v1 >> 16:
1130 		*ii = mpc8568_intr_info;
1131 		break;
1132 #endif
1133 #ifdef MPC8572
1134 	case SVR_MPC8572v1 >> 16:
1135 		*ii = mpc8572_intr_info;
1136 		break;
1137 #endif
1138 #ifdef P1023
1139 	case SVR_P1017v1 >> 16:
1140 	case SVR_P1023v1 >> 16:
1141 		*ii = p1023_intr_info;
1142 		break;
1143 #endif
1144 #ifdef P1025
1145 	case SVR_P1016v1 >> 16:
1146 	case SVR_P1025v1 >> 16:
1147 		*ii = p1025_intr_info;
1148 		break;
1149 #endif
1150 #ifdef P2020
1151 	case SVR_P2010v2 >> 16:
1152 	case SVR_P2020v2 >> 16:
1153 		*ii = p20x0_intr_info;
1154 		break;
1155 #endif
1156 	default:
1157 		panic("%s: don't know how to deal with SVR %#jx",
1158 		    __func__, (uintmax_t)mfspr(SPR_SVR));
1159 	}
1160 
1161 	/*
1162 	 * Initialize interrupt handler lock
1163 	 */
1164 	mutex_init(&e500_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
1165 
1166 	/*
1167 	 * We need to be in mixed mode.
1168 	 */
1169 	openpic_write(cpu, OPENPIC_GCR, GCR_M);
1170 
1171 	/*
1172 	 * Make we and the openpic both agree about the current SPL level.
1173 	 */
1174 	e500_splset(ci, ci->ci_cpl);
1175 
1176 	/*
1177 	 * Allow the required number of interrupt sources.
1178 	 */
1179 	is = kmem_zalloc(nirq * sizeof(*is), KM_SLEEP);
1180 	e500_intr_sources = is;
1181 	e500_intr_last_source = is + nirq;
1182 
1183 	/*
1184 	 * Initialize all the external interrupts as active low.
1185 	 */
1186 	for (u_int irq = 0; irq < e500_intr_info.ii_external_sources; irq++) {
1187 		openpic_write(cpu, OPENPIC_EIVPR(irq),
1188 		    VPR_VECTOR_MAKE(irq) | VPR_LEVEL_LOW);
1189 	}
1190 }
1191 
1192 static void
1193 e500_intr_init_precpu(void)
1194 {
1195 	struct cpu_info const *ci = curcpu();
1196 	struct cpu_softc * const cpu = ci->ci_softc;
1197 	bus_addr_t dr;
1198 
1199 	/*
1200 	 * timer's DR is set to be delivered to cpu0 as initial value.
1201 	 */
1202 	for (u_int irq = 0; irq < e500_intr_info.ii_timer_sources; irq++) {
1203 		dr = OPENPIC_GTDR(ci->ci_cpuid, irq);
1204 		openpic_write(cpu, dr, 0);	/* stop delivery */
1205 	}
1206 }
1207 
1208 static void
1209 e500_idlespin(void)
1210 {
1211 	KASSERTMSG(curcpu()->ci_cpl == IPL_NONE,
1212 	    "%s: cpu%u: ci_cpl (%d) != 0", __func__, cpu_number(),
1213 	     curcpu()->ci_cpl);
1214 	KASSERTMSG(CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)) == IPL_NONE,
1215 	    "%s: cpu%u: CTPR (%d) != IPL_NONE", __func__, cpu_number(),
1216 	     CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)));
1217 	KASSERT(mfmsr() & PSL_EE);
1218 
1219 	if (powersave > 0)
1220 		mtmsr(mfmsr() | PSL_POW);
1221 }
1222 
1223 static void
1224 e500_intr_cpu_attach(struct cpu_info *ci)
1225 {
1226 	struct cpu_softc * const cpu = ci->ci_softc;
1227 	const char * const xname = device_xname(ci->ci_dev);
1228 
1229 	const u_int32_t frr = openpic_read(cpu, OPENPIC_FRR);
1230 	const u_int nirq = FRR_NIRQ_GET(frr) + 1;
1231 //	const u_int ncpu = FRR_NCPU_GET(frr) + 1;
1232 
1233 	const struct e500_intr_info * const info = &e500_intr_info;
1234 
1235 	cpu->cpu_clock_gtbcr = OPENPIC_GTBCR(ci->ci_cpuid, E500_CLOCK_TIMER);
1236 
1237 	cpu->cpu_evcnt_intrs =
1238 	    kmem_zalloc(nirq * sizeof(cpu->cpu_evcnt_intrs[0]), KM_SLEEP);
1239 
1240 	struct evcnt *evcnt = cpu->cpu_evcnt_intrs;
1241 	for (size_t j = 0; j < info->ii_external_sources; j++, evcnt++) {
1242 		const char *name = e500_intr_external_name_lookup(j);
1243 		evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, NULL, xname, name);
1244 	}
1245 	KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_ONCHIP]);
1246 	for (size_t j = 0; j < info->ii_onchip_sources; j++, evcnt++) {
1247 		if (info->ii_onchip_bitmap[j / 32] & __BIT(j & 31)) {
1248 			const char *name = e500_intr_onchip_name_lookup(j);
1249 			if (name != NULL) {
1250 				evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1251 				    NULL, xname, name);
1252 #ifdef DIAGNOSTIC
1253 			} else {
1254 				printf("%s: missing evcnt for onchip irq %zu\n",
1255 				    __func__, j);
1256 #endif
1257 			}
1258 		}
1259 	}
1260 
1261 	KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_MSIGROUP]);
1262 	for (size_t j = 0; j < info->ii_msigroup_sources; j++, evcnt++) {
1263 		evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1264 		    NULL, xname, e500_msigroup_intr_names[j].in_name);
1265 	}
1266 
1267 	KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_TIMER]);
1268 	evcnt += ci->ci_cpuid * info->ii_percpu_sources;
1269 	for (size_t j = 0; j < info->ii_timer_sources; j++, evcnt++) {
1270 		evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1271 		    NULL, xname, e500_timer_intr_names[j].in_name);
1272 	}
1273 
1274 	for (size_t j = 0; j < info->ii_ipi_sources; j++, evcnt++) {
1275 		evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1276 		    NULL, xname, e500_ipi_intr_names[j].in_name);
1277 	}
1278 
1279 	for (size_t j = 0; j < info->ii_mi_sources; j++, evcnt++) {
1280 		evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR,
1281 		    NULL, xname, e500_mi_intr_names[j].in_name);
1282 	}
1283 
1284 	ci->ci_idlespin = e500_idlespin;
1285 }
1286 
1287 static void
1288 e500_intr_cpu_send_ipi(cpuid_t target, uint32_t ipimsg)
1289 {
1290 	struct cpu_info * const ci = curcpu();
1291 	struct cpu_softc * const cpu = ci->ci_softc;
1292 	uint32_t dstmask;
1293 
1294 	if (target >= CPU_MAXNUM) {
1295 		CPU_INFO_ITERATOR cii;
1296 		struct cpu_info *dst_ci;
1297 
1298 		KASSERT(target == IPI_DST_NOTME || target == IPI_DST_ALL);
1299 
1300 		dstmask = 0;
1301 		for (CPU_INFO_FOREACH(cii, dst_ci)) {
1302 			if (target == IPI_DST_ALL || ci != dst_ci) {
1303 				dstmask |= 1 << cpu_index(ci);
1304 				if (ipimsg)
1305 					atomic_or_32(&dst_ci->ci_pending_ipis,
1306 					    ipimsg);
1307 			}
1308 		}
1309 	} else {
1310 		struct cpu_info * const dst_ci = cpu_lookup(target);
1311 		KASSERT(dst_ci != NULL);
1312 		KASSERTMSG(target == cpu_index(dst_ci),
1313 		    "%s: target (%lu) != cpu_index(cpu%u)",
1314 		     __func__, target, cpu_index(dst_ci));
1315 		dstmask = (1 << target);
1316 		if (ipimsg)
1317 			atomic_or_32(&dst_ci->ci_pending_ipis, ipimsg);
1318 	}
1319 
1320 	openpic_write(cpu, OPENPIC_IPIDR(0), dstmask);
1321 }
1322 
1323 typedef void (*ipifunc_t)(void);
1324 
1325 #ifdef __HAVE_PREEMPTION
1326 static void
1327 e500_ipi_kpreempt(void)
1328 {
1329 	poowerpc_softint_trigger(1 << IPL_NONE);
1330 }
1331 #endif
1332 
1333 static void
1334 e500_ipi_suspend(void)
1335 {
1336 
1337 #ifdef MULTIPROCESSOR
1338 	cpu_pause(NULL);
1339 #endif	/* MULTIPROCESSOR */
1340 }
1341 
1342 static void
1343 e500_ipi_ast(void)
1344 {
1345 	curcpu()->ci_onproc->l_md.md_astpending = 1;
1346 }
1347 
1348 static const ipifunc_t e500_ipifuncs[] = {
1349 	[ilog2(IPI_XCALL)] =	xc_ipi_handler,
1350 	[ilog2(IPI_GENERIC)] =	ipi_cpu_handler,
1351 	[ilog2(IPI_HALT)] =	e500_ipi_halt,
1352 #ifdef __HAVE_PREEMPTION
1353 	[ilog2(IPI_KPREEMPT)] =	e500_ipi_kpreempt,
1354 #endif
1355 	[ilog2(IPI_TLB1SYNC)] =	e500_tlb1_sync,
1356 	[ilog2(IPI_SUSPEND)] =	e500_ipi_suspend,
1357 	[ilog2(IPI_AST)] =	e500_ipi_ast,
1358 };
1359 
1360 static int
1361 e500_ipi_intr(void *v)
1362 {
1363 	struct cpu_info * const ci = curcpu();
1364 
1365 	ci->ci_ev_ipi.ev_count++;
1366 
1367 	uint32_t pending_ipis = atomic_swap_32(&ci->ci_pending_ipis, 0);
1368 	for (u_int ipi = 31; pending_ipis != 0; ipi--, pending_ipis <<= 1) {
1369 		const u_int bits = __builtin_clz(pending_ipis);
1370 		ipi -= bits;
1371 		pending_ipis <<= bits;
1372 		KASSERT(e500_ipifuncs[ipi] != NULL);
1373 		(*e500_ipifuncs[ipi])();
1374 	}
1375 
1376 	return 1;
1377 }
1378 
1379 static void
1380 e500_intr_cpu_hatch(struct cpu_info *ci)
1381 {
1382 	char iname[INTRIDBUF];
1383 
1384 	/* Initialize percpu interrupts. */
1385 	e500_intr_init_precpu();
1386 
1387 	/*
1388 	 * Establish clock interrupt for this CPU.
1389 	 */
1390 	snprintf(iname, sizeof(iname), "%s clock", device_xname(ci->ci_dev));
1391 	if (e500_intr_cpu_establish(ci, E500_CLOCK_TIMER, IPL_CLOCK, IST_TIMER,
1392 	    e500_clock_intr, NULL, iname) == NULL)
1393 		panic("%s: failed to establish clock interrupt!", __func__);
1394 
1395 	/*
1396 	 * Establish the IPI interrupts for this CPU.
1397 	 */
1398 	if (e500_intr_cpu_establish(ci, 0, IPL_VM, IST_IPI, e500_ipi_intr,
1399 	    NULL, "ipi") == NULL)
1400 		panic("%s: failed to establish ipi interrupt!", __func__);
1401 
1402 	/*
1403 	 * Enable watchdog interrupts.
1404 	 */
1405 	uint32_t tcr = mfspr(SPR_TCR);
1406 	tcr |= TCR_WIE;
1407 	mtspr(SPR_TCR, tcr);
1408 }
1409 
1410 static const char *
1411 e500_intr_all_name_lookup(int irq, int ist)
1412 {
1413 	const struct e500_intr_info * const info = &e500_intr_info;
1414 
1415 	switch (ist) {
1416 	default:
1417 		if (irq < info->ii_external_sources &&
1418 		    (ist == IST_EDGE ||
1419 		     ist == IST_LEVEL_LOW ||
1420 		     ist == IST_LEVEL_HIGH))
1421 			return e500_intr_name_lookup(
1422 			    info->ii_external_intr_names, irq);
1423 		break;
1424 
1425 	case IST_PULSE:
1426 		break;
1427 
1428 	case IST_ONCHIP:
1429 		if (irq < info->ii_onchip_sources)
1430 			return e500_intr_onchip_name_lookup(irq);
1431 		break;
1432 
1433 	case IST_MSIGROUP:
1434 		if (irq < info->ii_msigroup_sources)
1435 			return e500_intr_name_lookup(e500_msigroup_intr_names,
1436 			    irq);
1437 		break;
1438 
1439 	case IST_TIMER:
1440 		if (irq < info->ii_timer_sources)
1441 			return e500_intr_name_lookup(e500_timer_intr_names,
1442 			    irq);
1443 		break;
1444 
1445 	case IST_IPI:
1446 		if (irq < info->ii_ipi_sources)
1447 			return e500_intr_name_lookup(e500_ipi_intr_names, irq);
1448 		break;
1449 
1450 	case IST_MI:
1451 		if (irq < info->ii_mi_sources)
1452 			return e500_intr_name_lookup(e500_mi_intr_names, irq);
1453 		break;
1454 	}
1455 
1456 	return NULL;
1457 }
1458 
1459 static void
1460 e500_intr_get_affinity(struct intr_source *is, kcpuset_t *cpuset)
1461 {
1462 	struct cpu_info * const ci = curcpu();
1463 	struct cpu_softc * const cpu = ci->ci_softc;
1464 	struct e500_intr_irq_info ii;
1465 
1466 	kcpuset_zero(cpuset);
1467 
1468 	if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) {
1469 		if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl,
1470 		    is->is_ist, &ii)) {
1471 			uint32_t dr = openpic_read(cpu, ii.irq_dr);
1472 			while (dr != 0) {
1473 				u_int n = ffs(dr);
1474 				if (n-- == 0)
1475 					break;
1476 				dr &= ~(1 << n);
1477 				kcpuset_set(cpuset, n);
1478 			}
1479 		}
1480 	}
1481 }
1482 
1483 static int
1484 e500_intr_set_affinity(struct intr_source *is, const kcpuset_t *cpuset)
1485 {
1486 	struct cpu_info * const ci = curcpu();
1487 	struct cpu_softc * const cpu = ci->ci_softc;
1488 	struct e500_intr_irq_info ii;
1489 	uint32_t ecpuset, tcpuset;
1490 
1491 	KASSERT(mutex_owned(&cpu_lock));
1492 	KASSERT(mutex_owned(&e500_intr_lock));
1493 	KASSERT(!kcpuset_iszero(cpuset));
1494 
1495 	kcpuset_export_u32(cpuset, &ecpuset, sizeof(ecpuset));
1496 	tcpuset = ecpuset;
1497 	while (tcpuset != 0) {
1498 		u_int cpu_idx = ffs(tcpuset);
1499 		if (cpu_idx-- == 0)
1500 			break;
1501 
1502 		tcpuset &= ~(1 << cpu_idx);
1503 		struct cpu_info * const newci = cpu_lookup(cpu_idx);
1504 		if (newci == NULL)
1505 			return EINVAL;
1506 		if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
1507 			return EINVAL;
1508 	}
1509 
1510 	if (!e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist,
1511 	    &ii))
1512 		return ENXIO;
1513 
1514 	/*
1515 	 * Update the vector/priority and destination registers keeping the
1516 	 * interrupt masked.
1517 	 */
1518 	const register_t msr = wrtee(0);	/* disable interrupts */
1519 
1520 	uint32_t vpr = openpic_read(cpu, ii.irq_vpr);
1521 	openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK);
1522 
1523 	/*
1524 	 * Wait for the Activity (A) bit for the source to be cleared.
1525 	 */
1526 	while (openpic_read(cpu, ii.irq_vpr) & VPR_A)
1527 		continue;
1528 
1529 	/*
1530 	 * Update destination register
1531 	 */
1532 	openpic_write(cpu, ii.irq_dr, ecpuset);
1533 
1534 	/*
1535 	 * Now unmask the interrupt.
1536 	 */
1537 	openpic_write(cpu, ii.irq_vpr, vpr);
1538 
1539 	wrtee(msr);				/* re-enable interrupts */
1540 
1541 	return 0;
1542 }
1543 
1544 static bool
1545 e500_intr_is_affinity_intrsource(struct intr_source *is,
1546     const kcpuset_t *cpuset)
1547 {
1548 	struct cpu_info * const ci = curcpu();
1549 	struct cpu_softc * const cpu = ci->ci_softc;
1550 	struct e500_intr_irq_info ii;
1551 	bool result = false;
1552 
1553 	if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) {
1554 		if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl,
1555 		    is->is_ist, &ii)) {
1556 			uint32_t dr = openpic_read(cpu, ii.irq_dr);
1557 			while (dr != 0 && !result) {
1558 				u_int n = ffs(dr);
1559 				if (n-- == 0)
1560 					break;
1561 				dr &= ~(1 << n);
1562 				result = kcpuset_isset(cpuset, n);
1563 			}
1564 		}
1565 	}
1566 	return result;
1567 }
1568 
1569 static struct intr_source *
1570 e500_intr_get_source(const char *intrid)
1571 {
1572 	struct intr_source *is;
1573 
1574 	mutex_enter(&e500_intr_lock);
1575 	for (is = e500_intr_sources; is < e500_intr_last_source; ++is) {
1576 		if (is->is_source[0] == '\0')
1577 			continue;
1578 
1579 		if (!strncmp(intrid, is->is_source, sizeof(is->is_source) - 1))
1580 			break;
1581 	}
1582 	if (is == e500_intr_last_source)
1583 		is = NULL;
1584 	mutex_exit(&e500_intr_lock);
1585 	return is;
1586 }
1587 
1588 uint64_t
1589 interrupt_get_count(const char *intrid, u_int cpu_idx)
1590 {
1591 	struct cpu_info * const ci = cpu_lookup(cpu_idx);
1592 	struct cpu_softc * const cpu = ci->ci_softc;
1593 	struct intr_source *is;
1594 	struct e500_intr_irq_info ii;
1595 
1596 	is = e500_intr_get_source(intrid);
1597 	if (is == NULL)
1598 		return 0;
1599 
1600 	if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist, &ii))
1601 		return cpu->cpu_evcnt_intrs[ii.irq_vector].ev_count;
1602 	return 0;
1603 }
1604 
1605 void
1606 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1607 {
1608 	struct intr_source *is;
1609 
1610 	kcpuset_zero(cpuset);
1611 
1612 	is = e500_intr_get_source(intrid);
1613 	if (is == NULL)
1614 		return;
1615 
1616 	mutex_enter(&e500_intr_lock);
1617 	e500_intr_get_affinity(is, cpuset);
1618 	mutex_exit(&e500_intr_lock);
1619 }
1620 
1621 void
1622 interrupt_get_available(kcpuset_t *cpuset)
1623 {
1624 	CPU_INFO_ITERATOR cii;
1625 	struct cpu_info *ci;
1626 
1627 	kcpuset_zero(cpuset);
1628 
1629 	mutex_enter(&cpu_lock);
1630 	for (CPU_INFO_FOREACH(cii, ci)) {
1631 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1632 			kcpuset_set(cpuset, cpu_index(ci));
1633 	}
1634 	mutex_exit(&cpu_lock);
1635 }
1636 
1637 void
1638 interrupt_get_devname(const char *intrid, char *buf, size_t len)
1639 {
1640 	struct intr_source *is;
1641 
1642 	if (len == 0)
1643 		return;
1644 
1645 	buf[0] = '\0';
1646 
1647 	is = e500_intr_get_source(intrid);
1648 	if (is != NULL)
1649 		strlcpy(buf, is->is_xname, len);
1650 }
1651 
1652 struct intrids_handler *
1653 interrupt_construct_intrids(const kcpuset_t *cpuset)
1654 {
1655 	struct intr_source *is;
1656 	struct intrids_handler *ii_handler;
1657 	intrid_t *ids;
1658 	int i, n;
1659 
1660 	if (kcpuset_iszero(cpuset))
1661 		return NULL;
1662 
1663 	n = 0;
1664 	mutex_enter(&e500_intr_lock);
1665 	for (is = e500_intr_sources; is < e500_intr_last_source; ++is) {
1666 		if (e500_intr_is_affinity_intrsource(is, cpuset))
1667 			++n;
1668 	}
1669 	mutex_exit(&e500_intr_lock);
1670 
1671 	const size_t alloc_size = sizeof(int) + sizeof(intrid_t) * n;
1672 	ii_handler = kmem_zalloc(alloc_size, KM_SLEEP);
1673 	ii_handler->iih_nids = n;
1674 	if (n == 0)
1675 		return ii_handler;
1676 
1677 	ids = ii_handler->iih_intrids;
1678 	mutex_enter(&e500_intr_lock);
1679 	for (i = 0, is = e500_intr_sources;
1680 	     i < n && is < e500_intr_last_source;
1681 	     ++is) {
1682 		if (!e500_intr_is_affinity_intrsource(is, cpuset))
1683 			continue;
1684 
1685 		if (is->is_source[0] != '\0') {
1686 			strlcpy(ids[i], is->is_source, sizeof(ids[0]));
1687 			++i;
1688 		}
1689 	}
1690 	mutex_exit(&e500_intr_lock);
1691 
1692 	return ii_handler;
1693 }
1694 
1695 void
1696 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
1697 {
1698 	size_t iih_size;
1699 
1700 	if (ii_handler == NULL)
1701 		return;
1702 
1703 	iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
1704 	kmem_free(ii_handler, iih_size);
1705 }
1706 
1707 static int
1708 interrupt_distribute_locked(struct intr_source *is, const kcpuset_t *newset,
1709     kcpuset_t *oldset)
1710 {
1711 	int error;
1712 
1713 	KASSERT(mutex_owned(&cpu_lock));
1714 
1715 	if (is->is_ipl == IPL_NONE || IST_PERCPU_P(is->is_ist))
1716 		return EINVAL;
1717 
1718 	mutex_enter(&e500_intr_lock);
1719 	if (oldset != NULL)
1720 		e500_intr_get_affinity(is, oldset);
1721 	error = e500_intr_set_affinity(is, newset);
1722 	mutex_exit(&e500_intr_lock);
1723 
1724 	return error;
1725 }
1726 
1727 int
1728 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
1729 {
1730 	int error;
1731 
1732 	mutex_enter(&cpu_lock);
1733 	error = interrupt_distribute_locked(ich, newset, oldset);
1734 	mutex_exit(&cpu_lock);
1735 
1736 	return error;
1737 }
1738 
1739 int
1740 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1741     kcpuset_t *oldset)
1742 {
1743 	struct intr_source *is;
1744 	int error;
1745 
1746 	is = e500_intr_get_source(intrid);
1747 	if (is != NULL) {
1748 		mutex_enter(&cpu_lock);
1749 		error = interrupt_distribute_locked(is, newset, oldset);
1750 		mutex_exit(&cpu_lock);
1751 	} else
1752 		error = ENOENT;
1753 
1754 	return error;
1755 }
1756