xref: /openbsd-src/sys/arch/arm64/dev/bcm2836_intr.c (revision 5dee57024e6d59a6ce03549ce37fbdea3e9df090)
1 /* $OpenBSD: bcm2836_intr.c,v 1.15 2022/12/21 22:30:42 kettenis Exp $ */
2 /*
3  * Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
4  * Copyright (c) 2015 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/queue.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/evcount.h>
25 
26 #include <machine/bus.h>
27 #include <machine/fdt.h>
28 
29 #include <dev/ofw/openfirm.h>
30 #include <dev/ofw/fdt.h>
31 
32 /* registers */
33 #define	INTC_PENDING_BANK0	0x00
34 #define	INTC_PENDING_BANK1	0x04
35 #define	INTC_PENDING_BANK2	0x08
36 #define	INTC_FIQ_CONTROL	0x0C
37 #define	INTC_ENABLE_BANK1	0x10
38 #define	INTC_ENABLE_BANK2	0x14
39 #define	INTC_ENABLE_BANK0	0x18
40 #define	INTC_DISABLE_BANK1	0x1C
41 #define	INTC_DISABLE_BANK2	0x20
42 #define	INTC_DISABLE_BANK0	0x24
43 
44 /* arm local */
45 #define	ARM_LOCAL_CONTROL		0x00
46 #define	ARM_LOCAL_PRESCALER		0x08
47 #define	 PRESCALER_19_2			0x80000000 /* 19.2 MHz */
48 #define	ARM_LOCAL_INT_TIMER(n)		(0x40 + (n) * 4)
49 #define	ARM_LOCAL_INT_MAILBOX(n)	(0x50 + (n) * 4)
50 #define	ARM_LOCAL_INT_PENDING(n)	(0x60 + (n) * 4)
51 #define	 ARM_LOCAL_INT_PENDING_MASK	0x0f
52 #define	ARM_LOCAL_INT_MAILBOX_SET(n)	(0x80 + (n) * 16)
53 #define	ARM_LOCAL_INT_MAILBOX_CLR(n)	(0xc0 + (n) * 16)
54 
55 #define	BANK0_START	64
56 #define	BANK0_END	(BANK0_START + 32 - 1)
57 #define	BANK1_START	0
58 #define	BANK1_END	(BANK1_START + 32 - 1)
59 #define	BANK2_START	32
60 #define	BANK2_END	(BANK2_START + 32 - 1)
61 #define	LOCAL_START	96
62 #define	LOCAL_END	(LOCAL_START + 32 - 1)
63 
64 #define	IS_IRQ_BANK0(n)	(((n) >= BANK0_START) && ((n) <= BANK0_END))
65 #define	IS_IRQ_BANK1(n)	(((n) >= BANK1_START) && ((n) <= BANK1_END))
66 #define	IS_IRQ_BANK2(n)	(((n) >= BANK2_START) && ((n) <= BANK2_END))
67 #define	IS_IRQ_LOCAL(n)	(((n) >= LOCAL_START) && ((n) <= LOCAL_END))
68 #define	IRQ_BANK0(n)	((n) - BANK0_START)
69 #define	IRQ_BANK1(n)	((n) - BANK1_START)
70 #define	IRQ_BANK2(n)	((n) - BANK2_START)
71 #define	IRQ_LOCAL(n)	((n) - LOCAL_START)
72 
73 #define ARM_LOCAL_IRQ_MAILBOX(n) (4 + (n))
74 
75 #define	INTC_NIRQ	128
76 #define	INTC_NBANK	4
77 
78 #define INTC_IRQ_TO_REG(i)	(((i) >> 5) & 0x3)
79 #define INTC_IRQ_TO_REGi(i)	((i) & 0x1f)
80 
81 struct intrhand {
82 	TAILQ_ENTRY(intrhand) ih_list;	/* link on intrq list */
83 	int (*ih_func)(void *);		/* handler */
84 	void *ih_arg;			/* arg for handler */
85 	int ih_ipl;			/* IPL_* */
86 	int ih_flags;
87 	int ih_irq;			/* IRQ number */
88 	struct evcount ih_count;	/* interrupt counter */
89 	char *ih_name;			/* device name */
90 };
91 
92 struct intrsource {
93 	TAILQ_HEAD(, intrhand) is_list;	/* handler list */
94 	int is_irq;			/* IRQ to mask while handling */
95 };
96 
97 struct bcm_intc_softc {
98 	struct device		 sc_dev;
99 	struct intrsource	 sc_handler[INTC_NIRQ];
100 	uint32_t		 sc_imask[INTC_NBANK][NIPL];
101 	int32_t			 sc_localcoremask[MAXCPUS];
102 	bus_space_tag_t		 sc_iot;
103 	bus_space_handle_t	 sc_ioh;
104 	bus_space_handle_t	 sc_lioh;
105 	struct interrupt_controller sc_intc;
106 	struct interrupt_controller sc_l1_intc;
107 };
108 struct bcm_intc_softc *bcm_intc;
109 
110 int	 bcm_intc_match(struct device *, void *, void *);
111 void	 bcm_intc_attach(struct device *, struct device *, void *);
112 void	 bcm_intc_splx(int new);
113 int	 bcm_intc_spllower(int new);
114 int	 bcm_intc_splraise(int new);
115 void	 bcm_intc_setipl(int new);
116 void	 bcm_intc_calc_mask(void);
117 void	*bcm_intc_intr_establish(int, int, struct cpu_info *,
118 	    int (*)(void *), void *, char *);
119 void	*bcm_intc_intr_establish_fdt(void *, int *, int, struct cpu_info *,
120 	    int (*)(void *), void *, char *);
121 void	*l1_intc_intr_establish_fdt(void *, int *, int, struct cpu_info *,
122 	    int (*)(void *), void *, char *);
123 void	 bcm_intc_intr_disestablish(void *);
124 void	 bcm_intc_irq_handler(void *);
125 void	 bcm_intc_intr_route(void *, int , struct cpu_info *);
126 void	 bcm_intc_handle_ipi(void);
127 void	 bcm_intc_send_ipi(struct cpu_info *, int);
128 
129 const struct cfattach	bcmintc_ca = {
130 	sizeof (struct bcm_intc_softc), bcm_intc_match, bcm_intc_attach
131 };
132 
133 struct cfdriver bcmintc_cd = {
134 	NULL, "bcmintc", DV_DULL
135 };
136 
137 int
bcm_intc_match(struct device * parent,void * cfdata,void * aux)138 bcm_intc_match(struct device *parent, void *cfdata, void *aux)
139 {
140 	struct fdt_attach_args *faa = aux;
141 
142 	if (OF_is_compatible(faa->fa_node, "brcm,bcm2836-armctrl-ic"))
143 		return 1;
144 
145 	return 0;
146 }
147 
148 void
bcm_intc_attach(struct device * parent,struct device * self,void * aux)149 bcm_intc_attach(struct device *parent, struct device *self, void *aux)
150 {
151 	struct bcm_intc_softc *sc = (struct bcm_intc_softc *)self;
152 	struct fdt_attach_args *faa = aux;
153 	uint32_t reg[2];
154 	int node;
155 	int i;
156 
157 	if (faa->fa_nreg < 1)
158 		return;
159 
160 	bcm_intc = sc;
161 
162 	sc->sc_iot = faa->fa_iot;
163 
164 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
165 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
166 		panic("%s: bus_space_map failed!", __func__);
167 
168 	/*
169 	 * ARM control logic.
170 	 *
171 	 * XXX Should really be implemented as a separate interrupt
172 	 * controller, but for now it is easier to handle it together
173 	 * with its BCM2835 partner.
174 	 */
175 	node = OF_finddevice("/soc/local_intc");
176 	if (node == -1)
177 		panic("%s: can't find ARM control logic", __func__);
178 
179 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
180 		panic("%s: can't map ARM control logic", __func__);
181 
182 	if (bus_space_map(sc->sc_iot, reg[0], reg[1], 0, &sc->sc_lioh))
183 		panic("%s: bus_space_map failed!", __func__);
184 
185 	printf("\n");
186 
187 	/* mask all interrupts */
188 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK0,
189 	    0xffffffff);
190 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK1,
191 	    0xffffffff);
192 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK2,
193 	    0xffffffff);
194 
195 	/* ARM control specific */
196 	bus_space_write_4(sc->sc_iot, sc->sc_lioh, ARM_LOCAL_CONTROL, 0);
197 	bus_space_write_4(sc->sc_iot, sc->sc_lioh, ARM_LOCAL_PRESCALER,
198 	    PRESCALER_19_2);
199 	for (i = 0; i < 4; i++)
200 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
201 		    ARM_LOCAL_INT_TIMER(i), 0);
202 	for (i = 0; i < 4; i++)
203 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
204 		    ARM_LOCAL_INT_MAILBOX(i), 0);
205 
206 	for (i = 0; i < INTC_NIRQ; i++) {
207 		TAILQ_INIT(&sc->sc_handler[i].is_list);
208 	}
209 
210 	bcm_intc_calc_mask();
211 
212 	/* insert self as interrupt handler */
213 	arm_set_intr_handler(bcm_intc_splraise, bcm_intc_spllower,
214 	    bcm_intc_splx, bcm_intc_setipl, bcm_intc_irq_handler, NULL,
215 	    NULL, NULL);
216 
217 	sc->sc_intc.ic_node = faa->fa_node;
218 	sc->sc_intc.ic_cookie = sc;
219 	sc->sc_intc.ic_establish = bcm_intc_intr_establish_fdt;
220 	sc->sc_intc.ic_disestablish = bcm_intc_intr_disestablish;
221 	sc->sc_intc.ic_route = bcm_intc_intr_route;
222 	arm_intr_register_fdt(&sc->sc_intc);
223 
224 	sc->sc_l1_intc.ic_node = node;
225 	sc->sc_l1_intc.ic_cookie = sc;
226 	sc->sc_l1_intc.ic_establish = l1_intc_intr_establish_fdt;
227 	sc->sc_l1_intc.ic_disestablish = bcm_intc_intr_disestablish;
228 	sc->sc_l1_intc.ic_route = bcm_intc_intr_route;
229 	arm_intr_register_fdt(&sc->sc_l1_intc);
230 
231 	intr_send_ipi_func = bcm_intc_send_ipi;
232 
233 	bcm_intc_setipl(IPL_HIGH);  /* XXX ??? */
234 	intr_enable();
235 }
236 
237 void
bcm_intc_intr_enable(int irq,int ipl)238 bcm_intc_intr_enable(int irq, int ipl)
239 {
240 	struct bcm_intc_softc	*sc = bcm_intc;
241 
242 	if (IS_IRQ_BANK0(irq))
243 		sc->sc_imask[0][ipl] |= (1 << IRQ_BANK0(irq));
244 	else if (IS_IRQ_BANK1(irq))
245 		sc->sc_imask[1][ipl] |= (1 << IRQ_BANK1(irq));
246 	else if (IS_IRQ_BANK2(irq))
247 		sc->sc_imask[2][ipl] |= (1 << IRQ_BANK2(irq));
248 	else if (IS_IRQ_LOCAL(irq))
249 		sc->sc_imask[3][ipl] |= (1 << IRQ_LOCAL(irq));
250 	else
251 		printf("%s: invalid irq number: %d\n", __func__, irq);
252 }
253 
254 void
bcm_intc_intr_disable(int irq,int ipl)255 bcm_intc_intr_disable(int irq, int ipl)
256 {
257 	struct bcm_intc_softc	*sc = bcm_intc;
258 
259 	if (IS_IRQ_BANK0(irq))
260 		sc->sc_imask[0][ipl] &= ~(1 << IRQ_BANK0(irq));
261 	else if (IS_IRQ_BANK1(irq))
262 		sc->sc_imask[1][ipl] &= ~(1 << IRQ_BANK1(irq));
263 	else if (IS_IRQ_BANK2(irq))
264 		sc->sc_imask[2][ipl] &= ~(1 << IRQ_BANK2(irq));
265 	else if (IS_IRQ_LOCAL(irq))
266 		sc->sc_imask[3][ipl] &= ~(1 << IRQ_LOCAL(irq));
267 	else
268 		printf("%s: invalid irq number: %d\n", __func__, irq);
269 }
270 
271 void
bcm_intc_calc_mask(void)272 bcm_intc_calc_mask(void)
273 {
274 	struct cpu_info *ci = curcpu();
275 	struct bcm_intc_softc *sc = bcm_intc;
276 	int irq;
277 	struct intrhand *ih;
278 	int i;
279 
280 	for (irq = 0; irq < INTC_NIRQ; irq++) {
281 		int max = IPL_NONE;
282 		int min = IPL_HIGH;
283 		TAILQ_FOREACH(ih, &sc->sc_handler[irq].is_list, ih_list) {
284 			if (ih->ih_ipl > max)
285 				max = ih->ih_ipl;
286 
287 			if (ih->ih_ipl < min)
288 				min = ih->ih_ipl;
289 		}
290 
291 		sc->sc_handler[irq].is_irq = max;
292 
293 		if (max == IPL_NONE)
294 			min = IPL_NONE;
295 
296 #ifdef DEBUG_INTC
297 		if (min != IPL_NONE) {
298 			printf("irq %d to block at %d %d reg %d bit %d\n",
299 			    irq, max, min, INTC_IRQ_TO_REG(irq),
300 			    INTC_IRQ_TO_REGi(irq));
301 		}
302 #endif
303 		/* Enable interrupts at lower levels, clear -> enable */
304 		for (i = 0; i < min; i++)
305 			bcm_intc_intr_enable(irq, i);
306 		for (; i <= IPL_HIGH; i++)
307 			bcm_intc_intr_disable(irq, i);
308 	}
309 	arm_init_smask();
310 	bcm_intc_setipl(ci->ci_cpl);
311 }
312 
313 void
bcm_intc_splx(int new)314 bcm_intc_splx(int new)
315 {
316 	struct cpu_info *ci = curcpu();
317 
318 	if (ci->ci_ipending & arm_smask[new])
319 		arm_do_pending_intr(new);
320 
321 	bcm_intc_setipl(new);
322 }
323 
324 int
bcm_intc_spllower(int new)325 bcm_intc_spllower(int new)
326 {
327 	struct cpu_info *ci = curcpu();
328 	int old = ci->ci_cpl;
329 	bcm_intc_splx(new);
330 	return (old);
331 }
332 
333 int
bcm_intc_splraise(int new)334 bcm_intc_splraise(int new)
335 {
336 	struct cpu_info *ci = curcpu();
337 	int old;
338 	old = ci->ci_cpl;
339 
340 	/*
341 	 * setipl must always be called because there is a race window
342 	 * where the variable is updated before the mask is set
343 	 * an interrupt occurs in that window without the mask always
344 	 * being set, the hardware might not get updated on the next
345 	 * splraise completely messing up spl protection.
346 	 */
347 	if (old > new)
348 		new = old;
349 
350 	bcm_intc_setipl(new);
351 
352 	return (old);
353 }
354 
355 void
bcm_intc_setipl(int new)356 bcm_intc_setipl(int new)
357 {
358 	struct cpu_info *ci = curcpu();
359 	struct bcm_intc_softc *sc = bcm_intc;
360 	u_long psw;
361 
362 	psw = intr_disable();
363 	ci->ci_cpl = new;
364 	if (cpu_number() == 0) {
365 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK0,
366 		    0xffffffff);
367 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK1,
368 		    0xffffffff);
369 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK2,
370 		    0xffffffff);
371 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK0,
372 		    sc->sc_imask[0][new]);
373 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK1,
374 		    sc->sc_imask[1][new]);
375 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK2,
376 		    sc->sc_imask[2][new]);
377 	}
378 	/* timer for current core */
379 	bus_space_write_4(sc->sc_iot, sc->sc_lioh,
380 	    ARM_LOCAL_INT_TIMER(cpu_number()),
381 	    sc->sc_imask[3][ci->ci_cpl] &
382 	    sc->sc_localcoremask[cpu_number()]);
383 	intr_restore(psw);
384 }
385 
386 int
bcm_intc_get_next_irq(int last_irq)387 bcm_intc_get_next_irq(int last_irq)
388 {
389 	struct bcm_intc_softc *sc = bcm_intc;
390 	uint32_t pending;
391 	int32_t irq = last_irq + 1;
392 
393 	/* Sanity check */
394 	if (irq < 0)
395 		irq = 0;
396 
397 	/* We need to keep this order. */
398 	/* TODO: should we mask last_irq? */
399 	if (IS_IRQ_BANK1(irq)) {
400 		pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
401 		    INTC_PENDING_BANK1);
402 		if (pending == 0) {
403 			irq = BANK2_START;	/* skip to next bank */
404 		} else do {
405 			if (pending & (1 << IRQ_BANK1(irq)))
406 				return irq;
407 			irq++;
408 		} while (IS_IRQ_BANK1(irq));
409 	}
410 	if (IS_IRQ_BANK2(irq)) {
411 		pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
412 		    INTC_PENDING_BANK2);
413 		if (pending == 0) {
414 			irq = BANK0_START;	/* skip to next bank */
415 		} else do {
416 			if (pending & (1 << IRQ_BANK2(irq)))
417 				return irq;
418 			irq++;
419 		} while (IS_IRQ_BANK2(irq));
420 	}
421 	if (IS_IRQ_BANK0(irq)) {
422 		pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
423 		    INTC_PENDING_BANK0);
424 		if (pending == 0) {
425 			irq = LOCAL_START;	/* skip to next bank */
426 		} else do {
427 			if (pending & (1 << IRQ_BANK0(irq)))
428 				return irq;
429 			irq++;
430 		} while (IS_IRQ_BANK0(irq));
431 	}
432 	if (IS_IRQ_LOCAL(irq)) {
433 		pending = bus_space_read_4(sc->sc_iot, sc->sc_lioh,
434 		    ARM_LOCAL_INT_PENDING(cpu_number()));
435 		pending &= ARM_LOCAL_INT_PENDING_MASK;
436 		if (pending != 0) do {
437 			if (pending & (1 << IRQ_LOCAL(irq)))
438 				return irq;
439 			irq++;
440 		} while (IS_IRQ_LOCAL(irq));
441 	}
442 	return (-1);
443 }
444 
445 void
bcm_intc_run_handler(struct intrhand * ih,void * frame,int s)446 bcm_intc_run_handler(struct intrhand *ih, void *frame, int s)
447 {
448 	int handled;
449 	void *arg;
450 
451 #ifdef MULTIPROCESSOR
452 	int need_lock;
453 
454 	if (ih->ih_flags & IPL_MPSAFE)
455 		need_lock = 0;
456 	else
457 		need_lock = s < IPL_SCHED;
458 
459 	if (need_lock)
460 		KERNEL_LOCK();
461 #endif
462 
463 	if (ih->ih_arg)
464 		arg = ih->ih_arg;
465 	else
466 		arg = frame;
467 
468 	handled = ih->ih_func(arg);
469 	if (handled)
470 		ih->ih_count.ec_count++;
471 
472 #ifdef MULTIPROCESSOR
473 	if (need_lock)
474 		KERNEL_UNLOCK();
475 #endif
476 }
477 
478 void
bcm_intc_irq_handler(void * frame)479 bcm_intc_irq_handler(void *frame)
480 {
481 	struct bcm_intc_softc *sc = bcm_intc;
482 	struct intrhand *ih;
483 	int irq, pri, s;
484 
485 	irq = (cpu_number() == 0 ? 0 : LOCAL_START) - 1;
486 	while ((irq = bcm_intc_get_next_irq(irq)) != -1) {
487 #ifdef MULTIPROCESSOR
488 		if (irq == ARM_LOCAL_IRQ_MAILBOX(cpu_number())) {
489 			bcm_intc_handle_ipi();
490 			continue;
491 		}
492 #endif
493 
494 		pri = sc->sc_handler[irq].is_irq;
495 		s = bcm_intc_splraise(pri);
496 		TAILQ_FOREACH(ih, &sc->sc_handler[irq].is_list, ih_list) {
497 			intr_enable();
498 			bcm_intc_run_handler(ih, frame, s);
499 			intr_disable();
500 		}
501 		bcm_intc_splx(s);
502 	}
503 }
504 
505 void *
bcm_intc_intr_establish_fdt(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)506 bcm_intc_intr_establish_fdt(void *cookie, int *cell, int level,
507     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
508 {
509 	struct bcm_intc_softc	*sc = (struct bcm_intc_softc *)cookie;
510 	int irq;
511 
512 	irq = cell[1];
513 	if (cell[0] == 0)
514 		irq += BANK0_START;
515 	else if (cell[0] == 1)
516 		irq += BANK1_START;
517 	else if (cell[0] == 2)
518 		irq += BANK2_START;
519 	else if (cell[0] == 3)
520 		irq += LOCAL_START;
521 	else
522 		panic("%s: bogus interrupt type", sc->sc_dev.dv_xname);
523 
524 	return bcm_intc_intr_establish(irq, level, ci, func, arg, name);
525 }
526 
527 void *
l1_intc_intr_establish_fdt(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)528 l1_intc_intr_establish_fdt(void *cookie, int *cell, int level,
529     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
530 {
531 	int irq;
532 
533 	irq = cell[0] + LOCAL_START;
534 	return bcm_intc_intr_establish(irq, level, ci, func, arg, name);
535 }
536 
537 void *
bcm_intc_intr_establish(int irqno,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)538 bcm_intc_intr_establish(int irqno, int level, struct cpu_info *ci,
539     int (*func)(void *), void *arg, char *name)
540 {
541 	struct bcm_intc_softc *sc = bcm_intc;
542 	struct intrhand *ih;
543 	u_long psw;
544 
545 	if (irqno < 0 || irqno >= INTC_NIRQ)
546 		panic("bcm_intc_intr_establish: bogus irqnumber %d: %s",
547 		     irqno, name);
548 
549 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
550 		return NULL;
551 
552 	psw = intr_disable();
553 
554 	ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
555 	ih->ih_func = func;
556 	ih->ih_arg = arg;
557 	ih->ih_ipl = level & IPL_IRQMASK;
558 	ih->ih_flags = level & IPL_FLAGMASK;
559 	ih->ih_irq = irqno;
560 	ih->ih_name = name;
561 
562 	if (IS_IRQ_LOCAL(irqno))
563 		sc->sc_localcoremask[0] |= (1 << IRQ_LOCAL(irqno));
564 
565 	TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].is_list, ih, ih_list);
566 
567 	if (name != NULL)
568 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
569 
570 #ifdef DEBUG_INTC
571 	printf("%s irq %d level %d [%s]\n", __func__, irqno, level,
572 	    name);
573 #endif
574 	bcm_intc_calc_mask();
575 
576 	intr_restore(psw);
577 	return (ih);
578 }
579 
580 void
bcm_intc_intr_disestablish(void * cookie)581 bcm_intc_intr_disestablish(void *cookie)
582 {
583 	struct bcm_intc_softc *sc = bcm_intc;
584 	struct intrhand *ih = cookie;
585 	int irqno = ih->ih_irq;
586 	u_long psw;
587 
588 	psw = intr_disable();
589 	TAILQ_REMOVE(&sc->sc_handler[irqno].is_list, ih, ih_list);
590 	if (ih->ih_name != NULL)
591 		evcount_detach(&ih->ih_count);
592 	intr_restore(psw);
593 
594 	free(ih, M_DEVBUF, 0);
595 }
596 
597 void
bcm_intc_intr_route(void * cookie,int enable,struct cpu_info * ci)598 bcm_intc_intr_route(void *cookie, int enable, struct cpu_info *ci)
599 {
600 	struct bcm_intc_softc *sc = bcm_intc;
601 	struct intrhand *ih = cookie;
602 	int lirq = IRQ_LOCAL(ih->ih_irq);
603 
604 	if (enable)
605 		sc->sc_localcoremask[ci->ci_cpuid] |= (1 << lirq);
606 	else
607 		sc->sc_localcoremask[ci->ci_cpuid] &= ~(1 << lirq);
608 
609 	if (ci == curcpu()) {
610 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
611 		    ARM_LOCAL_INT_TIMER(cpu_number()),
612 		    sc->sc_imask[3][ci->ci_cpl] &
613 		    sc->sc_localcoremask[cpu_number()]);
614 #ifdef MULTIPROCESSOR
615 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
616 		    ARM_LOCAL_INT_MAILBOX(cpu_number()),
617 		    sc->sc_imask[3][ci->ci_cpl] &
618 		    sc->sc_localcoremask[cpu_number()]);
619 #endif
620 	}
621 }
622 
623 void
bcm_intc_handle_ipi(void)624 bcm_intc_handle_ipi(void)
625 {
626 	struct bcm_intc_softc *sc = bcm_intc;
627 	int cpuno = cpu_number();
628 	uint32_t mbox_val;
629 	int ipi;
630 
631 	mbox_val = bus_space_read_4(sc->sc_iot, sc->sc_lioh,
632 		ARM_LOCAL_INT_MAILBOX_CLR(cpuno));
633 	ipi = ffs(mbox_val) - 1;
634 	bus_space_write_4(sc->sc_iot, sc->sc_lioh,
635 	    ARM_LOCAL_INT_MAILBOX_CLR(cpuno), 1 << ipi);
636 	switch (ipi) {
637 	case ARM_IPI_DDB:
638 		/* XXX */
639 #ifdef DDB
640 		db_enter();
641 #endif
642 		break;
643 	case ARM_IPI_NOP:
644 		break;
645 	}
646 }
647 
648 void
bcm_intc_send_ipi(struct cpu_info * ci,int id)649 bcm_intc_send_ipi(struct cpu_info *ci, int id)
650 {
651 	struct bcm_intc_softc *sc = bcm_intc;
652 
653 	__asm volatile("dsb sy"); /* XXX */
654 
655 	bus_space_write_4(sc->sc_iot, sc->sc_lioh,
656 	    ARM_LOCAL_INT_MAILBOX_SET(ci->ci_cpuid), 1 << id);
657 }
658