xref: /openbsd-src/sys/arch/arm64/dev/aplintc.c (revision 5dee57024e6d59a6ce03549ce37fbdea3e9df090)
1 /*	$OpenBSD: aplintc.c,v 1.18 2022/12/21 22:30:42 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2021 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/atomic.h>
20 #include <sys/device.h>
21 #include <sys/evcount.h>
22 #include <sys/malloc.h>
23 #include <sys/systm.h>
24 
25 #include <machine/armreg.h>
26 #include <machine/bus.h>
27 #include <machine/fdt.h>
28 #include <machine/intr.h>
29 
30 #include <dev/ofw/openfirm.h>
31 #include <dev/ofw/fdt.h>
32 
33 #include <ddb/db_output.h>
34 
35 #define APL_IRQ_CR_EL1		s3_4_c15_c10_4
36 #define  APL_IRQ_CR_EL1_DISABLE	(3 << 0)
37 
38 #define APL_IPI_LOCAL_RR_EL1	s3_5_c15_c0_0
39 #define APL_IPI_GLOBAL_RR_EL1	s3_5_c15_c0_1
40 #define APL_IPI_SR_EL1		s3_5_c15_c1_1
41 #define  APL_IPI_SR_EL1_PENDING	(1 << 0)
42 
43 #define AIC_INFO		0x0004
44 #define  AIC_INFO_NDIE(val)	(((val) >> 24) & 0xf)
45 #define  AIC_INFO_NIRQ(val)	((val) & 0xffff)
46 #define AIC_WHOAMI		0x2000
47 #define AIC_EVENT		0x2004
48 #define  AIC_EVENT_DIE(val)	(((val) >> 24) & 0xff)
49 #define  AIC_EVENT_TYPE(val)	(((val) >> 16) & 0xff)
50 #define  AIC_EVENT_TYPE_NONE	0
51 #define  AIC_EVENT_TYPE_IRQ	1
52 #define  AIC_EVENT_TYPE_IPI	4
53 #define  AIC_EVENT_IRQ(val)	((val) & 0xffff)
54 #define  AIC_EVENT_IPI_OTHER	1
55 #define  AIC_EVENT_IPI_SELF	2
56 #define AIC_IPI_SEND		0x2008
57 #define AIC_IPI_ACK		0x200c
58 #define AIC_IPI_MASK_SET	0x2024
59 #define AIC_IPI_MASK_CLR	0x2028
60 #define  AIC_IPI_OTHER		(1U << 0)
61 #define  AIC_IPI_SELF		(1U << 31)
62 #define AIC_TARGET_CPU(irq)	(0x3000 + ((irq) << 2))
63 #define AIC_SW_SET(irq)		(0x4000 + (((irq) >> 5) << 2))
64 #define AIC_SW_CLR(irq)		(0x4080 + (((irq) >> 5) << 2))
65 #define  AIC_SW_BIT(irq)	(1U << ((irq) & 0x1f))
66 #define AIC_MASK_SET(irq)	(0x4100 + (((irq) >> 5) << 2))
67 #define AIC_MASK_CLR(irq)	(0x4180 + (((irq) >> 5) << 2))
68 #define  AIC_MASK_BIT(irq)	(1U << ((irq) & 0x1f))
69 
70 #define AIC2_CONFIG		0x0014
71 #define  AIC2_CONFIG_ENABLE	(1 << 0)
72 #define AIC2_SW_SET(die, irq)	(0x6000 + (die) * 0x4a00 + (((irq) >> 5) << 2))
73 #define AIC2_SW_CLR(die, irq)	(0x6200 + (die) * 0x4a00 + (((irq) >> 5) << 2))
74 #define AIC2_MASK_SET(die, irq)	(0x6400 + (die) * 0x4a00 + (((irq) >> 5) << 2))
75 #define AIC2_MASK_CLR(die, irq)	(0x6600 + (die) * 0x4a00 + (((irq) >> 5) << 2))
76 #define AIC2_EVENT		0xc000
77 
78 #define AIC_MAXCPUS		32
79 #define AIC_MAXDIES		4
80 
81 #define HREAD4(sc, reg)							\
82 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
83 #define HWRITE4(sc, reg, val)						\
84 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
85 #define HSET4(sc, reg, bits)						\
86 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
87 #define HCLR4(sc, reg, bits)						\
88 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
89 
90 struct intrhand {
91 	TAILQ_ENTRY(intrhand) ih_list;
92 	int		(*ih_func)(void *);
93 	void		*ih_arg;
94 	int		ih_ipl;
95 	int		ih_flags;
96 	int		ih_die;
97 	int		ih_irq;
98 	struct evcount	ih_count;
99 	const char	*ih_name;
100 	struct cpu_info *ih_ci;
101 };
102 
103 struct aplintc_softc {
104 	struct device		sc_dev;
105 	bus_space_tag_t		sc_iot;
106 	bus_space_handle_t	sc_ioh;
107 	bus_space_handle_t	sc_event_ioh;
108 
109 	int			sc_version;
110 
111 	struct interrupt_controller sc_ic;
112 
113 	struct intrhand		*sc_fiq_handler;
114 	int			sc_fiq_pending[AIC_MAXCPUS];
115 	struct intrhand		**sc_irq_handler[AIC_MAXDIES];
116 	int 			sc_nirq;
117 	int			sc_ndie;
118 	int			sc_ncells;
119 	TAILQ_HEAD(, intrhand)	sc_irq_list[NIPL];
120 
121 	uint32_t		sc_cpuremap[AIC_MAXCPUS];
122 	u_int			sc_ipi_reason[AIC_MAXCPUS];
123 	struct evcount		sc_ipi_count;
124 };
125 
126 static inline void
aplintc_sw_clr(struct aplintc_softc * sc,int die,int irq)127 aplintc_sw_clr(struct aplintc_softc *sc, int die, int irq)
128 {
129 	if (sc->sc_version == 1)
130 		HWRITE4(sc, AIC_SW_CLR(irq), AIC_SW_BIT(irq));
131 	else
132 		HWRITE4(sc, AIC2_SW_CLR(die, irq), AIC_SW_BIT(irq));
133 }
134 
135 static inline void
aplintc_sw_set(struct aplintc_softc * sc,int die,int irq)136 aplintc_sw_set(struct aplintc_softc *sc, int die, int irq)
137 {
138 	if (sc->sc_version == 1)
139 		HWRITE4(sc, AIC_SW_SET(irq), AIC_SW_BIT(irq));
140 	else
141 		HWRITE4(sc, AIC2_SW_SET(die, irq), AIC_SW_BIT(irq));
142 }
143 
144 static inline void
aplintc_mask_clr(struct aplintc_softc * sc,int die,int irq)145 aplintc_mask_clr(struct aplintc_softc *sc, int die, int irq)
146 {
147 	if (sc->sc_version == 1)
148 		HWRITE4(sc, AIC_MASK_CLR(irq), AIC_MASK_BIT(irq));
149 	else
150 		HWRITE4(sc, AIC2_MASK_CLR(die, irq), AIC_MASK_BIT(irq));
151 }
152 
153 static inline void
aplintc_mask_set(struct aplintc_softc * sc,int die,int irq)154 aplintc_mask_set(struct aplintc_softc *sc, int die, int irq)
155 {
156 	if (sc->sc_version == 1)
157 		HWRITE4(sc, AIC_MASK_SET(irq), AIC_MASK_BIT(irq));
158 	else
159 		HWRITE4(sc, AIC2_MASK_SET(die, irq), AIC_MASK_BIT(irq));
160 }
161 
162 struct aplintc_softc *aplintc_sc;
163 
164 int	aplintc_match(struct device *, void *, void *);
165 void	aplintc_attach(struct device *, struct device *, void *);
166 
167 const struct cfattach aplintc_ca = {
168 	sizeof (struct aplintc_softc), aplintc_match, aplintc_attach
169 };
170 
171 struct cfdriver aplintc_cd = {
172 	NULL, "aplintc", DV_DULL
173 };
174 
175 void	aplintc_cpuinit(void);
176 void	aplintc_irq_handler(void *);
177 void	aplintc_fiq_handler(void *);
178 void	aplintc_intr_barrier(void *);
179 int	aplintc_splraise(int);
180 int	aplintc_spllower(int);
181 void	aplintc_splx(int);
182 void	aplintc_setipl(int);
183 void	aplintc_enable_wakeup(void);
184 void	aplintc_disable_wakeup(void);
185 
186 void 	*aplintc_intr_establish(void *, int *, int, struct cpu_info *,
187 	    int (*)(void *), void *, char *);
188 void	aplintc_intr_disestablish(void *);
189 void	aplintc_intr_set_wakeup(void *);
190 
191 void	aplintc_send_ipi(struct cpu_info *, int);
192 void	aplintc_handle_ipi(struct aplintc_softc *);
193 
194 int
aplintc_match(struct device * parent,void * match,void * aux)195 aplintc_match(struct device *parent, void *match, void *aux)
196 {
197 	struct fdt_attach_args *faa = aux;
198 
199 	return OF_is_compatible(faa->fa_node, "apple,aic") ||
200 	    OF_is_compatible(faa->fa_node, "apple,aic2");
201 }
202 
203 void
aplintc_attach(struct device * parent,struct device * self,void * aux)204 aplintc_attach(struct device *parent, struct device *self, void *aux)
205 {
206 	struct aplintc_softc *sc = (struct aplintc_softc *)self;
207 	struct fdt_attach_args *faa = aux;
208 	uint32_t info;
209 	int die, ipl;
210 
211 	if (faa->fa_nreg < 1) {
212 		printf(": no registers\n");
213 		return;
214 	}
215 
216 	sc->sc_iot = faa->fa_iot;
217 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
218 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
219 		printf(": can't map registers\n");
220 		return;
221 	}
222 
223 	if (OF_is_compatible(faa->fa_node, "apple,aic2"))
224 		sc->sc_version = 2;
225 	else
226 		sc->sc_version = 1;
227 
228 	sc->sc_ncells = OF_getpropint(faa->fa_node, "#interrupt-cells", 3);
229 	if (sc->sc_ncells < 3 || sc->sc_ncells > 4) {
230 		printf(": invalid number of cells\n");
231 		return;
232 	}
233 
234 	/*
235 	 * AIC2 has the event register specified separately.  However
236 	 * a preliminary device tree binding for AIC2 had it included
237 	 * in the main register area, like with AIC1.  Support both
238 	 * for now.
239 	 */
240 	if (faa->fa_nreg > 1) {
241 		if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
242 		    faa->fa_reg[1].size, 0, &sc->sc_event_ioh)) {
243 			printf(": can't map event register\n");
244 			return;
245 		}
246 	} else {
247 		if (sc->sc_version == 1) {
248 			bus_space_subregion(sc->sc_iot, sc->sc_ioh,
249 			    AIC_EVENT, 4, &sc->sc_event_ioh);
250 		} else {
251 			bus_space_subregion(sc->sc_iot, sc->sc_ioh,
252 			    AIC2_EVENT, 4, &sc->sc_event_ioh);
253 		}
254 	}
255 
256 	info = HREAD4(sc, AIC_INFO);
257 	sc->sc_nirq = AIC_INFO_NIRQ(info);
258 	sc->sc_ndie = AIC_INFO_NDIE(info) + 1;
259 	for (die = 0; die < sc->sc_ndie; die++) {
260 		sc->sc_irq_handler[die] = mallocarray(sc->sc_nirq,
261 		    sizeof(struct intrhand), M_DEVBUF, M_WAITOK | M_ZERO);
262 	}
263 	for (ipl = 0; ipl < NIPL; ipl++)
264 		TAILQ_INIT(&sc->sc_irq_list[ipl]);
265 
266 	printf(" nirq %d ndie %d\n", sc->sc_nirq, sc->sc_ndie);
267 
268 	arm_init_smask();
269 
270 	aplintc_sc = sc;
271 	aplintc_cpuinit();
272 
273 	evcount_attach(&sc->sc_ipi_count, "ipi", NULL);
274 	arm_set_intr_handler(aplintc_splraise, aplintc_spllower, aplintc_splx,
275 	    aplintc_setipl, aplintc_irq_handler, aplintc_fiq_handler,
276 	    aplintc_enable_wakeup, aplintc_disable_wakeup);
277 
278 	sc->sc_ic.ic_node = faa->fa_node;
279 	sc->sc_ic.ic_cookie = self;
280 	sc->sc_ic.ic_establish = aplintc_intr_establish;
281 	sc->sc_ic.ic_disestablish = aplintc_intr_disestablish;
282 	sc->sc_ic.ic_cpu_enable = aplintc_cpuinit;
283 	sc->sc_ic.ic_barrier = aplintc_intr_barrier;
284 	sc->sc_ic.ic_set_wakeup = aplintc_intr_set_wakeup;
285 	arm_intr_register_fdt(&sc->sc_ic);
286 
287 #ifdef MULTIPROCESSOR
288 	intr_send_ipi_func = aplintc_send_ipi;
289 #endif
290 
291 	if (sc->sc_version == 2)
292 		HSET4(sc, AIC2_CONFIG, AIC2_CONFIG_ENABLE);
293 }
294 
295 void
aplintc_cpuinit(void)296 aplintc_cpuinit(void)
297 {
298 	struct aplintc_softc *sc = aplintc_sc;
299 	struct cpu_info *ci = curcpu();
300 	uint32_t hwid;
301 
302 	KASSERT(ci->ci_cpuid < AIC_MAXCPUS);
303 
304 	/*
305 	 * AIC2 does not provide us with a way to target external
306 	 * interrupts to a particular core.  Therefore, disable IRQ
307 	 * delivery to the secondary CPUs which makes sure all
308 	 * external interrupts are delivered to the primary CPU.
309 	 */
310 	if (!CPU_IS_PRIMARY(ci))
311 		WRITE_SPECIALREG(APL_IRQ_CR_EL1, APL_IRQ_CR_EL1_DISABLE);
312 
313 	if (sc->sc_version == 1) {
314 		hwid = HREAD4(sc, AIC_WHOAMI);
315 		KASSERT(hwid < AIC_MAXCPUS);
316 		sc->sc_cpuremap[ci->ci_cpuid] = hwid;
317 	}
318 }
319 
320 void
aplintc_run_handler(struct intrhand * ih,void * frame,int s)321 aplintc_run_handler(struct intrhand *ih, void *frame, int s)
322 {
323 	void *arg;
324 	int handled;
325 
326 #ifdef MULTIPROCESSOR
327 	int need_lock;
328 
329 	if (ih->ih_flags & IPL_MPSAFE)
330 		need_lock = 0;
331 	else
332 		need_lock = s < IPL_SCHED;
333 
334 	if (need_lock)
335 		KERNEL_LOCK();
336 #endif
337 
338 	if (ih->ih_arg)
339 		arg = ih->ih_arg;
340 	else
341 		arg = frame;
342 
343 	handled = ih->ih_func(arg);
344 	if (handled)
345 		ih->ih_count.ec_count++;
346 
347 #ifdef MULTIPROCESSOR
348 	if (need_lock)
349 		KERNEL_UNLOCK();
350 #endif
351 }
352 
353 void
aplintc_irq_handler(void * frame)354 aplintc_irq_handler(void *frame)
355 {
356 	struct aplintc_softc *sc = aplintc_sc;
357 	struct cpu_info *ci = curcpu();
358 	struct intrhand *ih;
359 	uint32_t event;
360 	uint32_t die, irq, type;
361 	int s;
362 
363 	event = bus_space_read_4(sc->sc_iot, sc->sc_event_ioh, 0);
364 	die = AIC_EVENT_DIE(event);
365 	irq = AIC_EVENT_IRQ(event);
366 	type = AIC_EVENT_TYPE(event);
367 
368 	if (type != AIC_EVENT_TYPE_IRQ) {
369 		if (type != AIC_EVENT_TYPE_NONE) {
370 			printf("%s: unexpected event type %d\n",
371 			    __func__, type);
372 		}
373 		return;
374 	}
375 
376 	if (die >= sc->sc_ndie)
377 		panic("%s: unexpected die %d", __func__, die);
378 	if (irq >= sc->sc_nirq)
379 		panic("%s: unexpected irq %d", __func__, irq);
380 
381 	if (sc->sc_irq_handler[die][irq] == NULL)
382 		return;
383 
384 	aplintc_sw_clr(sc, die, irq);
385 	ih = sc->sc_irq_handler[die][irq];
386 
387 	if (ci->ci_cpl >= ih->ih_ipl) {
388 		/* Queue interrupt as pending. */
389 		TAILQ_INSERT_TAIL(&sc->sc_irq_list[ih->ih_ipl], ih, ih_list);
390 	} else {
391 		s = aplintc_splraise(ih->ih_ipl);
392 		intr_enable();
393 		aplintc_run_handler(ih, frame, s);
394 		intr_disable();
395 		aplintc_splx(s);
396 
397 		aplintc_mask_clr(sc, die, irq);
398 	}
399 }
400 
401 void
aplintc_fiq_handler(void * frame)402 aplintc_fiq_handler(void *frame)
403 {
404 	struct aplintc_softc *sc = aplintc_sc;
405 	struct cpu_info *ci = curcpu();
406 	uint64_t reg;
407 	int s;
408 
409 #ifdef MULTIPROCESSOR
410 	/* Handle IPIs. */
411 	reg = READ_SPECIALREG(APL_IPI_SR_EL1);
412 	if (reg & APL_IPI_SR_EL1_PENDING) {
413 		WRITE_SPECIALREG(APL_IPI_SR_EL1, APL_IPI_SR_EL1_PENDING);
414 		aplintc_handle_ipi(sc);
415 	}
416 #endif
417 
418 	/* Handle timer interrupts. */
419 	reg = READ_SPECIALREG(cntv_ctl_el0);
420 	if ((reg & (CNTV_CTL_ENABLE | CNTV_CTL_IMASK | CNTV_CTL_ISTATUS)) ==
421 	    (CNTV_CTL_ENABLE | CNTV_CTL_ISTATUS)) {
422 		if (ci->ci_cpl >= IPL_CLOCK) {
423 			/* Mask timer interrupt and mark as pending. */
424 			WRITE_SPECIALREG(cntv_ctl_el0, reg | CNTV_CTL_IMASK);
425 			sc->sc_fiq_pending[ci->ci_cpuid] = 1;
426 		} else {
427 			s = aplintc_splraise(IPL_CLOCK);
428 			sc->sc_fiq_handler->ih_func(frame);
429 			sc->sc_fiq_handler->ih_count.ec_count++;
430 			aplintc_splx(s);
431 		}
432 	}
433 }
434 
435 void
aplintc_intr_barrier(void * cookie)436 aplintc_intr_barrier(void *cookie)
437 {
438 	struct intrhand	*ih = cookie;
439 
440 	sched_barrier(ih->ih_ci);
441 }
442 
443 int
aplintc_splraise(int new)444 aplintc_splraise(int new)
445 {
446 	struct cpu_info *ci = curcpu();
447 	int old = ci->ci_cpl;
448 
449 	if (old > new)
450 		new = old;
451 
452 	aplintc_setipl(new);
453 	return old;
454 }
455 
456 int
aplintc_spllower(int new)457 aplintc_spllower(int new)
458 {
459 	struct cpu_info *ci = curcpu();
460 	int old = ci->ci_cpl;
461 
462 	aplintc_splx(new);
463 	return old;
464 }
465 
466 void
aplintc_splx(int new)467 aplintc_splx(int new)
468 {
469 	struct aplintc_softc *sc = aplintc_sc;
470 	struct cpu_info *ci = curcpu();
471 	struct intrhand *ih;
472 	uint64_t reg;
473 	u_long daif;
474 	int ipl;
475 
476 	daif = intr_disable();
477 
478 	/* Process pending FIQs. */
479 	if (sc->sc_fiq_pending[ci->ci_cpuid] && new < IPL_CLOCK) {
480 		sc->sc_fiq_pending[ci->ci_cpuid] = 0;
481 		reg = READ_SPECIALREG(cntv_ctl_el0);
482 		WRITE_SPECIALREG(cntv_ctl_el0, reg & ~CNTV_CTL_IMASK);
483 	}
484 
485 	/* Process pending IRQs. */
486 	if (CPU_IS_PRIMARY(ci)) {
487 		for (ipl = ci->ci_cpl; ipl > new; ipl--) {
488 			while (!TAILQ_EMPTY(&sc->sc_irq_list[ipl])) {
489 				ih = TAILQ_FIRST(&sc->sc_irq_list[ipl]);
490 				TAILQ_REMOVE(&sc->sc_irq_list[ipl],
491 				    ih, ih_list);
492 
493 				aplintc_sw_set(sc, ih->ih_die, ih->ih_irq);
494 				aplintc_mask_clr(sc, ih->ih_die, ih->ih_irq);
495 			}
496 		}
497 	}
498 
499 	aplintc_setipl(new);
500 	intr_restore(daif);
501 
502 	if (ci->ci_ipending & arm_smask[new])
503 		arm_do_pending_intr(new);
504 }
505 
506 void
aplintc_setipl(int ipl)507 aplintc_setipl(int ipl)
508 {
509 	struct cpu_info *ci = curcpu();
510 
511 	ci->ci_cpl = ipl;
512 }
513 
514 void
aplintc_enable_wakeup(void)515 aplintc_enable_wakeup(void)
516 {
517 	struct aplintc_softc *sc = aplintc_sc;
518 	struct intrhand *ih;
519 	int die, irq;
520 
521 	for (die = 0; die < sc->sc_ndie; die++) {
522 		for (irq = 0; irq < sc->sc_nirq; irq++) {
523 			ih = sc->sc_irq_handler[die][irq];
524 			if (ih == NULL || (ih->ih_flags & IPL_WAKEUP))
525 				continue;
526 			aplintc_mask_set(sc, die, irq);
527 		}
528 	}
529 }
530 
531 void
aplintc_disable_wakeup(void)532 aplintc_disable_wakeup(void)
533 {
534 	struct aplintc_softc *sc = aplintc_sc;
535 	struct intrhand *ih;
536 	int die, irq;
537 
538 	for (die = 0; die < sc->sc_ndie; die++) {
539 		for (irq = 0; irq < sc->sc_nirq; irq++) {
540 			ih = sc->sc_irq_handler[die][irq];
541 			if (ih == NULL || (ih->ih_flags & IPL_WAKEUP))
542 				continue;
543 			aplintc_mask_clr(sc, die, irq);
544 		}
545 	}
546 }
547 
548 void *
aplintc_intr_establish(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)549 aplintc_intr_establish(void *cookie, int *cell, int level,
550     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
551 {
552 	struct aplintc_softc *sc = cookie;
553 	struct intrhand *ih;
554 	uint32_t type = cell[0];
555 	uint32_t die, irq;
556 
557 	if (sc->sc_ncells == 3) {
558 		die = 0;
559 		irq = cell[1];
560 	} else {
561 		die = cell[1];
562 		irq = cell[2];
563 	}
564 
565 	if (type == 0) {
566 		KASSERT(level != (IPL_CLOCK | IPL_MPSAFE));
567 		if (die >= sc->sc_ndie) {
568 			panic("%s: bogus die number %d",
569 			    sc->sc_dev.dv_xname, die);
570 		}
571 		if (irq >= sc->sc_nirq) {
572 			panic("%s: bogus irq number %d",
573 			    sc->sc_dev.dv_xname, irq);
574 		}
575 	} else if (type == 1) {
576 		KASSERT(level == (IPL_CLOCK | IPL_MPSAFE));
577 		if (irq >= 4)
578 			panic("%s: bogus fiq number %d",
579 			    sc->sc_dev.dv_xname, irq);
580 	} else {
581 		panic("%s: bogus irq type %d",
582 		    sc->sc_dev.dv_xname, cell[0]);
583 	}
584 
585 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
586 	ih->ih_func = func;
587 	ih->ih_arg = arg;
588 	ih->ih_ipl = level & IPL_IRQMASK;
589 	ih->ih_flags = level & IPL_FLAGMASK;
590 	ih->ih_die = die;
591 	ih->ih_irq = irq;
592 	ih->ih_name = name;
593 	ih->ih_ci = ci;
594 
595 	if (name != NULL)
596 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
597 
598 	if (type == 0) {
599 		sc->sc_irq_handler[die][irq] = ih;
600 		if (sc->sc_version == 1)
601 			HWRITE4(sc, AIC_TARGET_CPU(irq), 1);
602 		aplintc_mask_clr(sc, die, irq);
603 	} else
604 		sc->sc_fiq_handler = ih;
605 
606 	return ih;
607 }
608 
609 void
aplintc_intr_disestablish(void * cookie)610 aplintc_intr_disestablish(void *cookie)
611 {
612 	struct aplintc_softc *sc = aplintc_sc;
613 	struct intrhand *ih = cookie;
614 	struct intrhand *tmp;
615 	u_long daif;
616 
617 	KASSERT(ih->ih_ipl < IPL_CLOCK);
618 
619 	daif = intr_disable();
620 
621 	aplintc_sw_clr(sc, ih->ih_die, ih->ih_irq);
622 	aplintc_mask_set(sc, ih->ih_die, ih->ih_irq);
623 
624 	/* Remove ourselves from the list of pending IRQs. */
625 	TAILQ_FOREACH(tmp, &sc->sc_irq_list[ih->ih_ipl], ih_list) {
626 		if (tmp == ih) {
627 			TAILQ_REMOVE(&sc->sc_irq_list[ih->ih_ipl],
628 			    ih, ih_list);
629 			break;
630 		}
631 	}
632 
633 	sc->sc_irq_handler[ih->ih_die][ih->ih_irq] = NULL;
634 	if (ih->ih_name)
635 		evcount_detach(&ih->ih_count);
636 
637 	intr_restore(daif);
638 
639 	free(ih, M_DEVBUF, sizeof(*ih));
640 }
641 
642 void
aplintc_intr_set_wakeup(void * cookie)643 aplintc_intr_set_wakeup(void *cookie)
644 {
645 	struct intrhand *ih = cookie;
646 
647 	ih->ih_flags |= IPL_WAKEUP;
648 }
649 
650 #ifdef MULTIPROCESSOR
651 
652 void
aplintc_send_ipi(struct cpu_info * ci,int reason)653 aplintc_send_ipi(struct cpu_info *ci, int reason)
654 {
655 	struct aplintc_softc *sc = aplintc_sc;
656 	uint64_t sendmask;
657 
658 	if (ci == curcpu() && reason == ARM_IPI_NOP)
659 		return;
660 
661 	/* never overwrite IPI_DDB or IPI_HALT with IPI_NOP */
662 	if (reason == ARM_IPI_DDB || reason == ARM_IPI_HALT)
663 		sc->sc_ipi_reason[ci->ci_cpuid] = reason;
664 	membar_producer();
665 
666 	sendmask = (ci->ci_mpidr & MPIDR_AFF0);
667 	if ((curcpu()->ci_mpidr & MPIDR_AFF1) == (ci->ci_mpidr & MPIDR_AFF1)) {
668 		/* Same cluster, so request local delivery. */
669 		WRITE_SPECIALREG(APL_IPI_LOCAL_RR_EL1, sendmask);
670 	} else {
671 		/* Different cluster, so request global delivery. */
672 		sendmask |= (ci->ci_mpidr & MPIDR_AFF1) << 8;
673 		WRITE_SPECIALREG(APL_IPI_GLOBAL_RR_EL1, sendmask);
674 	}
675 }
676 
677 void
aplintc_handle_ipi(struct aplintc_softc * sc)678 aplintc_handle_ipi(struct aplintc_softc *sc)
679 {
680 	struct cpu_info *ci = curcpu();
681 
682 	membar_consumer();
683 	if (sc->sc_ipi_reason[ci->ci_cpuid] == ARM_IPI_DDB) {
684 		sc->sc_ipi_reason[ci->ci_cpuid] = ARM_IPI_NOP;
685 #ifdef DDB
686 		db_enter();
687 #endif
688 	} else if (sc->sc_ipi_reason[ci->ci_cpuid] == ARM_IPI_HALT) {
689 		sc->sc_ipi_reason[ci->ci_cpuid] = ARM_IPI_NOP;
690 		cpu_halt();
691 	}
692 
693 	sc->sc_ipi_count.ec_count++;
694 }
695 
696 #endif
697