xref: /openbsd-src/sys/arch/arm64/dev/aplintc.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*	$OpenBSD: aplintc.c,v 1.12 2022/07/13 09:28:18 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2021 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/atomic.h>
20 #include <sys/device.h>
21 #include <sys/evcount.h>
22 #include <sys/malloc.h>
23 #include <sys/systm.h>
24 
25 #include <machine/bus.h>
26 #include <machine/fdt.h>
27 #include <machine/intr.h>
28 
29 #include <dev/ofw/openfirm.h>
30 #include <dev/ofw/fdt.h>
31 
32 #include <ddb/db_output.h>
33 
34 #define APL_IRQ_CR_EL1		s3_4_c15_c10_4
35 #define  APL_IRQ_CR_EL1_DISABLE	(3 << 0)
36 
37 #define APL_IPI_LOCAL_RR_EL1	s3_5_c15_c0_0
38 #define APL_IPI_GLOBAL_RR_EL1	s3_5_c15_c0_1
39 #define APL_IPI_SR_EL1		s3_5_c15_c1_1
40 #define  APL_IPI_SR_EL1_PENDING	(1 << 0)
41 
42 #define CNTV_CTL_ENABLE		(1 << 0)
43 #define CNTV_CTL_IMASK		(1 << 1)
44 #define CNTV_CTL_ISTATUS	(1 << 2)
45 
46 #define AIC_INFO		0x0004
47 #define  AIC_INFO_NDIE(val)	(((val) >> 24) & 0xf)
48 #define  AIC_INFO_NIRQ(val)	((val) & 0xffff)
49 #define AIC_WHOAMI		0x2000
50 #define AIC_EVENT		0x2004
51 #define  AIC_EVENT_DIE(val)	(((val) >> 24) & 0xff)
52 #define  AIC_EVENT_TYPE(val)	(((val) >> 16) & 0xff)
53 #define  AIC_EVENT_TYPE_NONE	0
54 #define  AIC_EVENT_TYPE_IRQ	1
55 #define  AIC_EVENT_TYPE_IPI	4
56 #define  AIC_EVENT_IRQ(val)	((val) & 0xffff)
57 #define  AIC_EVENT_IPI_OTHER	1
58 #define  AIC_EVENT_IPI_SELF	2
59 #define AIC_IPI_SEND		0x2008
60 #define AIC_IPI_ACK		0x200c
61 #define AIC_IPI_MASK_SET	0x2024
62 #define AIC_IPI_MASK_CLR	0x2028
63 #define  AIC_IPI_OTHER		(1U << 0)
64 #define  AIC_IPI_SELF		(1U << 31)
65 #define AIC_TARGET_CPU(irq)	(0x3000 + ((irq) << 2))
66 #define AIC_SW_SET(irq)		(0x4000 + (((irq) >> 5) << 2))
67 #define AIC_SW_CLR(irq)		(0x4080 + (((irq) >> 5) << 2))
68 #define  AIC_SW_BIT(irq)	(1U << ((irq) & 0x1f))
69 #define AIC_MASK_SET(irq)	(0x4100 + (((irq) >> 5) << 2))
70 #define AIC_MASK_CLR(irq)	(0x4180 + (((irq) >> 5) << 2))
71 #define  AIC_MASK_BIT(irq)	(1U << ((irq) & 0x1f))
72 
73 #define AIC2_CONFIG		0x0014
74 #define  AIC2_CONFIG_ENABLE	(1 << 0)
75 #define AIC2_SW_SET(die, irq)	(0x6000 + (die) * 0x4a00 + (((irq) >> 5) << 2))
76 #define AIC2_SW_CLR(die, irq)	(0x6200 + (die) * 0x4a00 + (((irq) >> 5) << 2))
77 #define AIC2_MASK_SET(die, irq)	(0x6400 + (die) * 0x4a00 + (((irq) >> 5) << 2))
78 #define AIC2_MASK_CLR(die, irq)	(0x6600 + (die) * 0x4a00 + (((irq) >> 5) << 2))
79 #define AIC2_EVENT		0xc000
80 
81 #define AIC_MAXCPUS		32
82 #define AIC_MAXDIES		4
83 
84 #define HREAD4(sc, reg)							\
85 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
86 #define HWRITE4(sc, reg, val)						\
87 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
88 #define HSET4(sc, reg, bits)						\
89 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
90 #define HCLR4(sc, reg, bits)						\
91 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
92 
93 struct intrhand {
94 	TAILQ_ENTRY(intrhand) ih_list;
95 	int		(*ih_func)(void *);
96 	void		*ih_arg;
97 	int		ih_ipl;
98 	int		ih_flags;
99 	int		ih_die;
100 	int		ih_irq;
101 	struct evcount	ih_count;
102 	const char	*ih_name;
103 	struct cpu_info *ih_ci;
104 };
105 
106 struct aplintc_softc {
107 	struct device		sc_dev;
108 	bus_space_tag_t		sc_iot;
109 	bus_space_handle_t	sc_ioh;
110 	bus_space_handle_t	sc_event_ioh;
111 
112 	int			sc_version;
113 
114 	struct interrupt_controller sc_ic;
115 
116 	struct intrhand		*sc_fiq_handler;
117 	int			sc_fiq_pending[AIC_MAXCPUS];
118 	struct intrhand		**sc_irq_handler[AIC_MAXDIES];
119 	int 			sc_nirq;
120 	int			sc_ndie;
121 	TAILQ_HEAD(, intrhand)	sc_irq_list[NIPL];
122 
123 	uint32_t		sc_cpuremap[AIC_MAXCPUS];
124 	u_int			sc_ipi_reason[AIC_MAXCPUS];
125 	struct evcount		sc_ipi_count;
126 };
127 
128 struct aplintc_softc *aplintc_sc;
129 
130 int	aplintc_match(struct device *, void *, void *);
131 void	aplintc_attach(struct device *, struct device *, void *);
132 
133 const struct cfattach	aplintc_ca = {
134 	sizeof (struct aplintc_softc), aplintc_match, aplintc_attach
135 };
136 
137 struct cfdriver aplintc_cd = {
138 	NULL, "aplintc", DV_DULL
139 };
140 
141 void	aplintc_cpuinit(void);
142 void	aplintc_irq_handler(void *);
143 void	aplintc_fiq_handler(void *);
144 void	aplintc_intr_barrier(void *);
145 int	aplintc_splraise(int);
146 int	aplintc_spllower(int);
147 void	aplintc_splx(int);
148 void	aplintc_setipl(int);
149 
150 void 	*aplintc_intr_establish(void *, int *, int, struct cpu_info *,
151 	    int (*)(void *), void *, char *);
152 void	aplintc_intr_disestablish(void *);
153 
154 void	aplintc_send_ipi(struct cpu_info *, int);
155 void	aplintc_handle_ipi(struct aplintc_softc *);
156 
157 int
158 aplintc_match(struct device *parent, void *match, void *aux)
159 {
160 	struct fdt_attach_args *faa = aux;
161 
162 	return OF_is_compatible(faa->fa_node, "apple,aic") ||
163 	    OF_is_compatible(faa->fa_node, "apple,aic2");
164 }
165 
166 void
167 aplintc_attach(struct device *parent, struct device *self, void *aux)
168 {
169 	struct aplintc_softc *sc = (struct aplintc_softc *)self;
170 	struct fdt_attach_args *faa = aux;
171 	uint32_t info;
172 	int die, ipl;
173 
174 	if (faa->fa_nreg < 1) {
175 		printf(": no registers\n");
176 		return;
177 	}
178 
179 	sc->sc_iot = faa->fa_iot;
180 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
181 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
182 		printf(": can't map registers\n");
183 		return;
184 	}
185 
186 	if (OF_is_compatible(faa->fa_node, "apple,aic2"))
187 		sc->sc_version = 2;
188 	else
189 		sc->sc_version = 1;
190 
191 	/*
192 	 * AIC2 has the event register specified separately.  However
193 	 * a preliminary device tree binding for AIC2 had it included
194 	 * in the main register area, like with AIC1.  Support both
195 	 * for now.
196 	 */
197 	if (faa->fa_nreg > 1) {
198 		if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
199 		    faa->fa_reg[1].size, 0, &sc->sc_event_ioh)) {
200 			printf(": can't map event register\n");
201 			return;
202 		}
203 	} else {
204 		if (sc->sc_version == 1) {
205 			bus_space_subregion(sc->sc_iot, sc->sc_ioh,
206 			    AIC_EVENT, 4, &sc->sc_event_ioh);
207 		} else {
208 			bus_space_subregion(sc->sc_iot, sc->sc_ioh,
209 			    AIC2_EVENT, 4, &sc->sc_event_ioh);
210 		}
211 	}
212 
213 	info = HREAD4(sc, AIC_INFO);
214 	sc->sc_nirq = AIC_INFO_NIRQ(info);
215 	sc->sc_ndie = AIC_INFO_NDIE(info) + 1;
216 	for (die = 0; die < sc->sc_ndie; die++) {
217 		sc->sc_irq_handler[die] = mallocarray(sc->sc_nirq,
218 		    sizeof(struct intrhand), M_DEVBUF, M_WAITOK | M_ZERO);
219 	}
220 	for (ipl = 0; ipl < NIPL; ipl++)
221 		TAILQ_INIT(&sc->sc_irq_list[ipl]);
222 
223 	printf(" nirq %d ndie %d\n", sc->sc_nirq, sc->sc_ndie);
224 
225 	arm_init_smask();
226 
227 	aplintc_sc = sc;
228 	aplintc_cpuinit();
229 
230 	evcount_attach(&sc->sc_ipi_count, "ipi", NULL);
231 	arm_set_intr_handler(aplintc_splraise, aplintc_spllower, aplintc_splx,
232 	    aplintc_setipl, aplintc_irq_handler, aplintc_fiq_handler);
233 
234 	sc->sc_ic.ic_node = faa->fa_node;
235 	sc->sc_ic.ic_cookie = self;
236 	sc->sc_ic.ic_establish = aplintc_intr_establish;
237 	sc->sc_ic.ic_disestablish = aplintc_intr_disestablish;
238 	sc->sc_ic.ic_cpu_enable = aplintc_cpuinit;
239 	sc->sc_ic.ic_barrier = aplintc_intr_barrier;
240 	arm_intr_register_fdt(&sc->sc_ic);
241 
242 	intr_send_ipi_func = aplintc_send_ipi;
243 
244 	if (sc->sc_version == 2)
245 		HSET4(sc, AIC2_CONFIG, AIC2_CONFIG_ENABLE);
246 }
247 
248 void
249 aplintc_cpuinit(void)
250 {
251 	struct aplintc_softc *sc = aplintc_sc;
252 	struct cpu_info *ci = curcpu();
253 	uint32_t hwid;
254 
255 	KASSERT(ci->ci_cpuid < AIC_MAXCPUS);
256 
257 	/*
258 	 * AIC2 does not provide us with a way to target external
259 	 * interrupts to a particular core.  Therefore, disable IRQ
260 	 * delivery to the secondary CPUs which makes sure all
261 	 * external interrupts are delivered to the primary CPU.
262 	 */
263 	if (!CPU_IS_PRIMARY(ci))
264 		WRITE_SPECIALREG(APL_IRQ_CR_EL1, APL_IRQ_CR_EL1_DISABLE);
265 
266 	if (sc->sc_version == 1) {
267 		hwid = HREAD4(sc, AIC_WHOAMI);
268 		KASSERT(hwid < AIC_MAXCPUS);
269 		sc->sc_cpuremap[ci->ci_cpuid] = hwid;
270 	}
271 }
272 
273 static inline void
274 aplintc_sw_clr(struct aplintc_softc *sc, int die, int irq)
275 {
276 	if (sc->sc_version == 1)
277 		HWRITE4(sc, AIC_SW_CLR(irq), AIC_SW_BIT(irq));
278 	else
279 		HWRITE4(sc, AIC2_SW_CLR(die, irq), AIC_SW_BIT(irq));
280 }
281 
282 static inline void
283 aplintc_sw_set(struct aplintc_softc *sc, int die, int irq)
284 {
285 	if (sc->sc_version == 1)
286 		HWRITE4(sc, AIC_SW_SET(irq), AIC_SW_BIT(irq));
287 	else
288 		HWRITE4(sc, AIC2_SW_SET(die, irq), AIC_SW_BIT(irq));
289 }
290 
291 static inline void
292 aplintc_mask_clr(struct aplintc_softc *sc, int die, int irq)
293 {
294 	if (sc->sc_version == 1)
295 		HWRITE4(sc, AIC_MASK_CLR(irq), AIC_MASK_BIT(irq));
296 	else
297 		HWRITE4(sc, AIC2_MASK_CLR(die, irq), AIC_MASK_BIT(irq));
298 }
299 
300 static inline void
301 aplintc_mask_set(struct aplintc_softc *sc, int die, int irq)
302 {
303 	if (sc->sc_version == 1)
304 		HWRITE4(sc, AIC_MASK_SET(irq), AIC_MASK_BIT(irq));
305 	else
306 		HWRITE4(sc, AIC2_MASK_SET(die, irq), AIC_MASK_BIT(irq));
307 }
308 
309 void
310 aplintc_run_handler(struct intrhand *ih, void *frame, int s)
311 {
312 	void *arg;
313 	int handled;
314 
315 #ifdef MULTIPROCESSOR
316 	int need_lock;
317 
318 	if (ih->ih_flags & IPL_MPSAFE)
319 		need_lock = 0;
320 	else
321 		need_lock = s < IPL_SCHED;
322 
323 	if (need_lock)
324 		KERNEL_LOCK();
325 #endif
326 
327 	if (ih->ih_arg)
328 		arg = ih->ih_arg;
329 	else
330 		arg = frame;
331 
332 	handled = ih->ih_func(arg);
333 	if (handled)
334 		ih->ih_count.ec_count++;
335 
336 #ifdef MULTIPROCESSOR
337 	if (need_lock)
338 		KERNEL_UNLOCK();
339 #endif
340 }
341 
342 void
343 aplintc_irq_handler(void *frame)
344 {
345 	struct aplintc_softc *sc = aplintc_sc;
346 	struct cpu_info *ci = curcpu();
347 	struct intrhand *ih;
348 	uint32_t event;
349 	uint32_t die, irq, type;
350 	int s;
351 
352 	event = bus_space_read_4(sc->sc_iot, sc->sc_event_ioh, 0);
353 	die = AIC_EVENT_DIE(event);
354 	irq = AIC_EVENT_IRQ(event);
355 	type = AIC_EVENT_TYPE(event);
356 
357 	if (type != AIC_EVENT_TYPE_IRQ) {
358 		if (type != AIC_EVENT_TYPE_NONE) {
359 			printf("%s: unexpected event type %d\n",
360 			    __func__, type);
361 		}
362 		return;
363 	}
364 
365 	if (die >= sc->sc_ndie)
366 		panic("%s: unexpected die %d", __func__, die);
367 	if (irq >= sc->sc_nirq)
368 		panic("%s: unexpected irq %d", __func__, irq);
369 
370 	if (sc->sc_irq_handler[die][irq] == NULL)
371 		return;
372 
373 	aplintc_sw_clr(sc, die, irq);
374 	ih = sc->sc_irq_handler[die][irq];
375 
376 	if (ci->ci_cpl >= ih->ih_ipl) {
377 		/* Queue interrupt as pending. */
378 		TAILQ_INSERT_TAIL(&sc->sc_irq_list[ih->ih_ipl], ih, ih_list);
379 	} else {
380 		s = aplintc_splraise(ih->ih_ipl);
381 		intr_enable();
382 		aplintc_run_handler(ih, frame, s);
383 		intr_disable();
384 		aplintc_splx(s);
385 
386 		aplintc_mask_clr(sc, die, irq);
387 	}
388 }
389 
390 void
391 aplintc_fiq_handler(void *frame)
392 {
393 	struct aplintc_softc *sc = aplintc_sc;
394 	struct cpu_info *ci = curcpu();
395 	uint64_t reg;
396 	int s;
397 
398 	/* Handle IPIs. */
399 	reg = READ_SPECIALREG(APL_IPI_SR_EL1);
400 	if (reg & APL_IPI_SR_EL1_PENDING) {
401 		WRITE_SPECIALREG(APL_IPI_SR_EL1, APL_IPI_SR_EL1_PENDING);
402 		aplintc_handle_ipi(sc);
403 	}
404 
405 	/* Handle timer interrupts. */
406 	reg = READ_SPECIALREG(cntv_ctl_el0);
407 	if ((reg & (CNTV_CTL_ENABLE | CNTV_CTL_IMASK | CNTV_CTL_ISTATUS)) ==
408 	    (CNTV_CTL_ENABLE | CNTV_CTL_ISTATUS)) {
409 		if (ci->ci_cpl >= IPL_CLOCK) {
410 			/* Mask timer interrupt and mark as pending. */
411 			WRITE_SPECIALREG(cntv_ctl_el0, reg | CNTV_CTL_IMASK);
412 			sc->sc_fiq_pending[ci->ci_cpuid] = 1;
413 		} else {
414 			s = aplintc_splraise(IPL_CLOCK);
415 			sc->sc_fiq_handler->ih_func(frame);
416 			sc->sc_fiq_handler->ih_count.ec_count++;
417 			aplintc_splx(s);
418 		}
419 	}
420 }
421 
422 void
423 aplintc_intr_barrier(void *cookie)
424 {
425 	struct intrhand	*ih = cookie;
426 
427 	sched_barrier(ih->ih_ci);
428 }
429 
430 int
431 aplintc_splraise(int new)
432 {
433 	struct cpu_info *ci = curcpu();
434 	int old = ci->ci_cpl;
435 
436 	if (old > new)
437 		new = old;
438 
439 	aplintc_setipl(new);
440 	return old;
441 }
442 
443 int
444 aplintc_spllower(int new)
445 {
446 	struct cpu_info *ci = curcpu();
447 	int old = ci->ci_cpl;
448 
449 	aplintc_splx(new);
450 	return old;
451 }
452 
453 void
454 aplintc_splx(int new)
455 {
456 	struct aplintc_softc *sc = aplintc_sc;
457 	struct cpu_info *ci = curcpu();
458 	struct intrhand *ih;
459 	uint64_t reg;
460 	u_long daif;
461 	int ipl;
462 
463 	daif = intr_disable();
464 
465 	/* Process pending FIQs. */
466 	if (sc->sc_fiq_pending[ci->ci_cpuid] && new < IPL_CLOCK) {
467 		sc->sc_fiq_pending[ci->ci_cpuid] = 0;
468 		reg = READ_SPECIALREG(cntv_ctl_el0);
469 		WRITE_SPECIALREG(cntv_ctl_el0, reg & ~CNTV_CTL_IMASK);
470 	}
471 
472 	/* Process pending IRQs. */
473 	if (CPU_IS_PRIMARY(ci)) {
474 		for (ipl = ci->ci_cpl; ipl > new; ipl--) {
475 			while (!TAILQ_EMPTY(&sc->sc_irq_list[ipl])) {
476 				ih = TAILQ_FIRST(&sc->sc_irq_list[ipl]);
477 				TAILQ_REMOVE(&sc->sc_irq_list[ipl],
478 				    ih, ih_list);
479 
480 				aplintc_sw_set(sc, ih->ih_die, ih->ih_irq);
481 				aplintc_mask_clr(sc, ih->ih_die, ih->ih_irq);
482 			}
483 		}
484 	}
485 
486 	aplintc_setipl(new);
487 	intr_restore(daif);
488 
489 	if (ci->ci_ipending & arm_smask[new])
490 		arm_do_pending_intr(new);
491 }
492 
493 void
494 aplintc_setipl(int ipl)
495 {
496 	struct cpu_info *ci = curcpu();
497 
498 	ci->ci_cpl = ipl;
499 }
500 
501 void *
502 aplintc_intr_establish(void *cookie, int *cell, int level,
503     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
504 {
505 	struct aplintc_softc *sc = cookie;
506 	struct intrhand *ih;
507 	uint32_t type = cell[0];
508 	uint32_t die, irq;
509 
510 	if (sc->sc_version == 1) {
511 		die = 0;
512 		irq = cell[1];
513 	} else {
514 		die = cell[1];
515 		irq = cell[2];
516 	}
517 
518 	if (type == 0) {
519 		KASSERT(level != (IPL_CLOCK | IPL_MPSAFE));
520 		if (die >= sc->sc_ndie) {
521 			panic("%s: bogus die number %d",
522 			    sc->sc_dev.dv_xname, die);
523 		}
524 		if (irq >= sc->sc_nirq) {
525 			panic("%s: bogus irq number %d",
526 			    sc->sc_dev.dv_xname, irq);
527 		}
528 	} else if (type == 1) {
529 		KASSERT(level == (IPL_CLOCK | IPL_MPSAFE));
530 		if (irq >= 4)
531 			panic("%s: bogus fiq number %d",
532 			    sc->sc_dev.dv_xname, irq);
533 	} else {
534 		panic("%s: bogus irq type %d",
535 		    sc->sc_dev.dv_xname, cell[0]);
536 	}
537 
538 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
539 	ih->ih_func = func;
540 	ih->ih_arg = arg;
541 	ih->ih_ipl = level & IPL_IRQMASK;
542 	ih->ih_flags = level & IPL_FLAGMASK;
543 	ih->ih_die = die;
544 	ih->ih_irq = irq;
545 	ih->ih_name = name;
546 	ih->ih_ci = ci;
547 
548 	if (name != NULL)
549 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
550 
551 	if (type == 0) {
552 		sc->sc_irq_handler[die][irq] = ih;
553 		if (sc->sc_version == 1)
554 			HWRITE4(sc, AIC_TARGET_CPU(irq), 1);
555 		aplintc_mask_clr(sc, die, irq);
556 	} else
557 		sc->sc_fiq_handler = ih;
558 
559 	return ih;
560 }
561 
562 void
563 aplintc_intr_disestablish(void *cookie)
564 {
565 	struct aplintc_softc *sc = aplintc_sc;
566 	struct intrhand *ih = cookie;
567 	struct intrhand *tmp;
568 	u_long daif;
569 
570 	KASSERT(ih->ih_ipl < IPL_CLOCK);
571 
572 	daif = intr_disable();
573 
574 	aplintc_sw_clr(sc, ih->ih_die, ih->ih_irq);
575 	aplintc_mask_set(sc, ih->ih_die, ih->ih_irq);
576 
577 	/* Remove ourselves from the list of pending IRQs. */
578 	TAILQ_FOREACH(tmp, &sc->sc_irq_list[ih->ih_ipl], ih_list) {
579 		if (tmp == ih) {
580 			TAILQ_REMOVE(&sc->sc_irq_list[ih->ih_ipl],
581 			    ih, ih_list);
582 			break;
583 		}
584 	}
585 
586 	sc->sc_irq_handler[ih->ih_die][ih->ih_irq] = NULL;
587 	if (ih->ih_name)
588 		evcount_detach(&ih->ih_count);
589 
590 	intr_restore(daif);
591 
592 	free(ih, M_DEVBUF, sizeof(*ih));
593 }
594 
595 void
596 aplintc_send_ipi(struct cpu_info *ci, int reason)
597 {
598 	struct aplintc_softc *sc = aplintc_sc;
599 	uint64_t sendmask;
600 
601 	if (ci == curcpu() && reason == ARM_IPI_NOP)
602 		return;
603 
604 	/* never overwrite IPI_DDB or IPI_HALT with IPI_NOP */
605 	if (reason == ARM_IPI_DDB || reason == ARM_IPI_HALT)
606 		sc->sc_ipi_reason[ci->ci_cpuid] = reason;
607 	membar_producer();
608 
609 	sendmask = (ci->ci_mpidr & MPIDR_AFF0);
610 	if ((curcpu()->ci_mpidr & MPIDR_AFF1) == (ci->ci_mpidr & MPIDR_AFF1)) {
611 		/* Same cluster, so request local delivery. */
612 		WRITE_SPECIALREG(APL_IPI_LOCAL_RR_EL1, sendmask);
613 	} else {
614 		/* Different cluster, so request global delivery. */
615 		sendmask |= (ci->ci_mpidr & MPIDR_AFF1) << 8;
616 		WRITE_SPECIALREG(APL_IPI_GLOBAL_RR_EL1, sendmask);
617 	}
618 }
619 
620 void
621 aplintc_handle_ipi(struct aplintc_softc *sc)
622 {
623 	struct cpu_info *ci = curcpu();
624 
625 	membar_consumer();
626 	if (sc->sc_ipi_reason[ci->ci_cpuid] == ARM_IPI_DDB) {
627 		sc->sc_ipi_reason[ci->ci_cpuid] = ARM_IPI_NOP;
628 #ifdef DDB
629 		db_enter();
630 #endif
631 	}
632 
633 	sc->sc_ipi_count.ec_count++;
634 }
635