xref: /netbsd-src/sys/arch/arm/cortex/gic.c (revision 66e19b5cbfb62068c8d9de31b4dd8a5010c405c9)
1 /*	$NetBSD: gic.c,v 1.57 2023/10/05 12:30:59 riastradh Exp $	*/
2 /*-
3  * Copyright (c) 2012 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas of 3am Software Foundry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "opt_ddb.h"
32 #include "opt_multiprocessor.h"
33 
34 #define _INTR_PRIVATE
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: gic.c,v 1.57 2023/10/05 12:30:59 riastradh Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/device.h>
43 #include <sys/evcnt.h>
44 #include <sys/intr.h>
45 #include <sys/proc.h>
46 #include <sys/atomic.h>
47 
48 #include <arm/armreg.h>
49 #include <arm/cpufunc.h>
50 #include <arm/locore.h>
51 
52 #include <arm/cortex/gic_reg.h>
53 #include <arm/cortex/mpcore_var.h>
54 
55 void armgic_irq_handler(void *);
56 
57 #define	ARMGIC_SGI_IPIBASE	0
58 
59 /*
60  * SGIs 8-16 are reserved for use by ARM Trusted Firmware.
61  */
62 __CTASSERT(ARMGIC_SGI_IPIBASE + NIPI <= 8);
63 
64 static int armgic_match(device_t, cfdata_t, void *);
65 static void armgic_attach(device_t, device_t, void *);
66 
67 static void armgic_set_priority(struct pic_softc *, int);
68 static void armgic_unblock_irqs(struct pic_softc *, size_t, uint32_t);
69 static void armgic_block_irqs(struct pic_softc *, size_t, uint32_t);
70 static void armgic_establish_irq(struct pic_softc *, struct intrsource *);
71 #if 0
72 static void armgic_source_name(struct pic_softc *, int, char *, size_t);
73 #endif
74 
75 #ifdef MULTIPROCESSOR
76 static void armgic_cpu_init(struct pic_softc *, struct cpu_info *);
77 static void armgic_ipi_send(struct pic_softc *, const kcpuset_t *, u_long);
78 static void armgic_get_affinity(struct pic_softc *, size_t, kcpuset_t *);
79 static int armgic_set_affinity(struct pic_softc *, size_t, const kcpuset_t *);
80 #endif
81 
82 static const struct pic_ops armgic_picops = {
83 	.pic_unblock_irqs = armgic_unblock_irqs,
84 	.pic_block_irqs = armgic_block_irqs,
85 	.pic_establish_irq = armgic_establish_irq,
86 #if 0
87 	.pic_source_name = armgic_source_name,
88 #endif
89 	.pic_set_priority = armgic_set_priority,
90 #ifdef MULTIPROCESSOR
91 	.pic_cpu_init = armgic_cpu_init,
92 	.pic_ipi_send = armgic_ipi_send,
93 	.pic_get_affinity = armgic_get_affinity,
94 	.pic_set_affinity = armgic_set_affinity,
95 #endif
96 };
97 
98 #define	PICTOSOFTC(pic)		((struct armgic_softc *)(pic))
99 
100 static struct armgic_softc {
101 	struct pic_softc sc_pic;
102 	device_t sc_dev;
103 	bus_space_tag_t sc_memt;
104 	bus_space_handle_t sc_gicch;
105 	bus_space_handle_t sc_gicdh;
106 	size_t sc_gic_lines;
107 	uint32_t sc_gic_type;
108 	uint32_t sc_gic_valid_lines[1024/32];
109 	uint32_t sc_enabled_local;
110 #ifdef MULTIPROCESSOR
111 	uint32_t sc_target[MAXCPUS];
112 	uint32_t sc_mptargets;
113 #endif
114 	uint32_t sc_bptargets;
115 } armgic_softc = {
116 	.sc_pic = {
117 		.pic_ops = &armgic_picops,
118 		.pic_name = "armgic",
119 	},
120 };
121 
122 static struct intrsource armgic_dummy_source;
123 
124 __CTASSERT(NIPL == 8);
125 
126 /*
127  * GIC register are always in little-endian.  It is assumed the bus_space
128  * will do any endian conversion required.
129  */
130 static inline uint32_t
gicc_read(struct armgic_softc * sc,bus_size_t o)131 gicc_read(struct armgic_softc *sc, bus_size_t o)
132 {
133 	return bus_space_read_4(sc->sc_memt, sc->sc_gicch, o);
134 }
135 
136 static inline void
gicc_write(struct armgic_softc * sc,bus_size_t o,uint32_t v)137 gicc_write(struct armgic_softc *sc, bus_size_t o, uint32_t v)
138 {
139 	bus_space_write_4(sc->sc_memt, sc->sc_gicch, o, v);
140 }
141 
142 static inline uint32_t
gicd_read(struct armgic_softc * sc,bus_size_t o)143 gicd_read(struct armgic_softc *sc, bus_size_t o)
144 {
145 	return bus_space_read_4(sc->sc_memt, sc->sc_gicdh, o);
146 }
147 
148 static inline void
gicd_write(struct armgic_softc * sc,bus_size_t o,uint32_t v)149 gicd_write(struct armgic_softc *sc, bus_size_t o, uint32_t v)
150 {
151 	bus_space_write_4(sc->sc_memt, sc->sc_gicdh, o, v);
152 }
153 
154 static uint32_t
gicd_find_targets(struct armgic_softc * sc)155 gicd_find_targets(struct armgic_softc *sc)
156 {
157 	uint32_t targets = 0;
158 
159 	/*
160 	 * GICD_ITARGETSR0 through 7 are read-only, and each field returns
161 	 * a value that corresponds only to the processor reading the
162 	 * register. Use this to determine the current processor's
163 	 * CPU interface number.
164 	 */
165 	for (int i = 0; i < 8; i++) {
166 		targets = gicd_read(sc, GICD_ITARGETSRn(i));
167 		if (targets != 0)
168 			break;
169 	}
170 	targets |= (targets >> 16);
171 	targets |= (targets >> 8);
172 	targets &= 0xff;
173 
174 	return targets ? targets : 1;
175 }
176 
177 /*
178  * In the GIC prioritization scheme, lower numbers have higher priority.
179  * Only write priorities that could be non-secure.
180  */
181 static inline uint32_t
armgic_ipl_to_priority(int ipl)182 armgic_ipl_to_priority(int ipl)
183 {
184 	return GICC_PMR_NONSECURE
185 	    | ((IPL_HIGH - ipl) * GICC_PMR_NS_PRIORITIES / NIPL);
186 }
187 
188 #if 0
189 static inline int
190 armgic_priority_to_ipl(uint32_t priority)
191 {
192 	return IPL_HIGH
193 	    - (priority & ~GICC_PMR_NONSECURE) * NIPL / GICC_PMR_NS_PRIORITIES;
194 }
195 #endif
196 
197 static void
armgic_unblock_irqs(struct pic_softc * pic,size_t irq_base,uint32_t irq_mask)198 armgic_unblock_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask)
199 {
200 	struct armgic_softc * const sc = PICTOSOFTC(pic);
201 	const size_t group = irq_base / 32;
202 
203 	if (group == 0)
204 		sc->sc_enabled_local |= irq_mask;
205 
206 	gicd_write(sc, GICD_ISENABLERn(group), irq_mask);
207 }
208 
209 static void
armgic_block_irqs(struct pic_softc * pic,size_t irq_base,uint32_t irq_mask)210 armgic_block_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask)
211 {
212 	struct armgic_softc * const sc = PICTOSOFTC(pic);
213 	const size_t group = irq_base / 32;
214 
215 	if (group == 0)
216 		sc->sc_enabled_local &= ~irq_mask;
217 
218 	gicd_write(sc, GICD_ICENABLERn(group), irq_mask);
219 }
220 
221 static void
armgic_set_priority(struct pic_softc * pic,int ipl)222 armgic_set_priority(struct pic_softc *pic, int ipl)
223 {
224 	struct armgic_softc * const sc = PICTOSOFTC(pic);
225 	struct cpu_info * const ci = curcpu();
226 
227 	while (ipl < ci->ci_hwpl) {
228 		/* Lowering priority mask */
229 		ci->ci_hwpl = ipl;
230 		__insn_barrier();
231 		gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ipl));
232 	}
233 	__insn_barrier();
234 	ci->ci_cpl = ipl;
235 }
236 
237 #ifdef MULTIPROCESSOR
238 static void
armgic_get_affinity(struct pic_softc * pic,size_t irq,kcpuset_t * affinity)239 armgic_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
240 {
241 	struct armgic_softc * const sc = PICTOSOFTC(pic);
242 	const size_t group = irq / 32;
243 	int n;
244 
245 	kcpuset_zero(affinity);
246 	if (group == 0) {
247 		/* All CPUs are targets for group 0 (SGI/PPI) */
248 		for (n = 0; n < MAXCPUS; n++) {
249 			if (sc->sc_target[n] != 0)
250 				kcpuset_set(affinity, n);
251 		}
252 	} else {
253 		/* Find distributor targets (SPI) */
254 		const u_int byte_shift = 8 * (irq & 3);
255 		const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
256 		const uint32_t targets = gicd_read(sc, targets_reg);
257 		const uint32_t targets_val = (targets >> byte_shift) & 0xff;
258 
259 		for (n = 0; n < MAXCPUS; n++) {
260 			if (sc->sc_target[n] & targets_val)
261 				kcpuset_set(affinity, n);
262 		}
263 	}
264 }
265 
266 static int
armgic_set_affinity(struct pic_softc * pic,size_t irq,const kcpuset_t * affinity)267 armgic_set_affinity(struct pic_softc *pic, size_t irq,
268     const kcpuset_t *affinity)
269 {
270 	struct armgic_softc * const sc = PICTOSOFTC(pic);
271 	const size_t group = irq / 32;
272 	if (group == 0)
273 		return EINVAL;
274 
275 	const u_int byte_shift = 8 * (irq & 3);
276 	const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
277 	uint32_t targets_val = 0;
278 	int n;
279 
280 	for (n = 0; n < MAXCPUS; n++) {
281 		if (kcpuset_isset(affinity, n))
282 			targets_val |= sc->sc_target[n];
283 	}
284 
285 	uint32_t targets = gicd_read(sc, targets_reg);
286 	targets &= ~(0xff << byte_shift);
287 	targets |= (targets_val << byte_shift);
288 	gicd_write(sc, targets_reg, targets);
289 
290 	return 0;
291 }
292 #endif
293 
294 #ifdef __HAVE_PIC_FAST_SOFTINTS
295 void
softint_init_md(lwp_t * l,u_int level,uintptr_t * machdep_p)296 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep_p)
297 {
298 	lwp_t **lp = &l->l_cpu->ci_softlwps[level];
299 	KASSERT(*lp == NULL || *lp == l);
300 	*lp = l;
301 	/*
302 	 * Really easy.  Just tell it to trigger the local CPU.
303 	 */
304 	*machdep_p = GICD_SGIR_TargetListFilter_Me
305 	    | __SHIFTIN(level, GICD_SGIR_SGIINTID);
306 }
307 
308 void
softint_trigger(uintptr_t machdep)309 softint_trigger(uintptr_t machdep)
310 {
311 
312 	gicd_write(&armgic_softc, GICD_SGIR, machdep);
313 }
314 #endif
315 
316 void
armgic_irq_handler(void * tf)317 armgic_irq_handler(void *tf)
318 {
319 	struct cpu_info * const ci = curcpu();
320 	struct armgic_softc * const sc = &armgic_softc;
321 	const int old_ipl = ci->ci_cpl;
322 	const int old_mtx_count = ci->ci_mtx_count;
323 	const int old_l_blcnt = ci->ci_curlwp->l_blcnt;
324 #ifdef DEBUG
325 	size_t n = 0;
326 #endif
327 
328 	ci->ci_data.cpu_nintr++;
329 
330 	/*
331 	 * Raise ci_hwpl (and PMR) to ci_cpl and IAR will tell us if the
332 	 * interrupt that got us here can have its handler run or not.
333 	 */
334 	if (ci->ci_hwpl <= old_ipl) {
335 		ci->ci_hwpl = old_ipl;
336 		gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(old_ipl));
337 		/*
338 		 * we'll get no interrupts when PMR is IPL_HIGH, so bail
339 		 * early.
340 		 */
341 		if (old_ipl == IPL_HIGH) {
342 			return;
343 		}
344 	}
345 
346 	for (;;) {
347 		uint32_t iar = gicc_read(sc, GICC_IAR);
348 		uint32_t irq = __SHIFTOUT(iar, GICC_IAR_IRQ);
349 
350 		if (irq == GICC_IAR_IRQ_SPURIOUS ||
351 		    irq == GICC_IAR_IRQ_SSPURIOUS) {
352 			iar = gicc_read(sc, GICC_IAR);
353 			irq = __SHIFTOUT(iar, GICC_IAR_IRQ);
354 			if (irq == GICC_IAR_IRQ_SPURIOUS)
355 				break;
356 			if (irq == GICC_IAR_IRQ_SSPURIOUS) {
357 				break;
358 			}
359 		}
360 
361 		KASSERTMSG(old_ipl != IPL_HIGH, "old_ipl %d pmr %#x hppir %#x",
362 		    old_ipl, gicc_read(sc, GICC_PMR), gicc_read(sc, GICC_HPPIR));
363 
364 		//const uint32_t cpuid = __SHIFTOUT(iar, GICC_IAR_CPUID_MASK);
365 		struct intrsource * const is = sc->sc_pic.pic_sources[irq];
366 		KASSERT(is != &armgic_dummy_source);
367 
368 		/*
369 		 * GIC has asserted IPL for us so we can just update ci_cpl.
370 		 *
371 		 * But it's not that simple.  We may have already bumped ci_cpl
372 		 * due to a high priority interrupt and now we are about to
373 		 * dispatch one lower than the previous.  It's possible for
374 		 * that previous interrupt to have deferred some interrupts
375 		 * so we need deal with those when lowering to the current
376 		 * interrupt's ipl.
377 		 *
378 		 * However, if are just raising ipl, we can just update ci_cpl.
379 		 */
380 
381 		/* Surely we can KASSERT(ipl < ci->ci_cpl); */
382 		const int ipl = is->is_ipl;
383 		if (__predict_false(ipl < ci->ci_cpl)) {
384 			pic_do_pending_ints(I32_bit, ipl, tf);
385 			KASSERT(ci->ci_cpl == ipl);
386 		} else if (ci->ci_cpl != ipl) {
387 			KASSERTMSG(ipl > ci->ci_cpl, "ipl %d cpl %d hw-ipl %#x",
388 			    ipl, ci->ci_cpl,
389 			    gicc_read(sc, GICC_PMR));
390 			gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ipl));
391 			ci->ci_hwpl = ci->ci_cpl = ipl;
392 		}
393 		ENABLE_INTERRUPT();
394 		pic_dispatch(is, tf);
395 		DISABLE_INTERRUPT();
396 		gicc_write(sc, GICC_EOIR, iar);
397 #ifdef DEBUG
398 		n++;
399 		KDASSERTMSG(n < 5, "%s: processed too many (%zu)",
400 		    ci->ci_data.cpu_name, n);
401 #endif
402 	}
403 
404 	/*
405 	 * Now handle any pending ints.
406 	 */
407 	pic_do_pending_ints(I32_bit, old_ipl, tf);
408 	KASSERTMSG(ci->ci_cpl == old_ipl, "ci_cpl %d old_ipl %d", ci->ci_cpl, old_ipl);
409 	KASSERT(old_mtx_count == ci->ci_mtx_count);
410 	KASSERT(old_l_blcnt == ci->ci_curlwp->l_blcnt);
411 }
412 
413 void
armgic_establish_irq(struct pic_softc * pic,struct intrsource * is)414 armgic_establish_irq(struct pic_softc *pic, struct intrsource *is)
415 {
416 	struct armgic_softc * const sc = PICTOSOFTC(pic);
417 	const size_t group = is->is_irq / 32;
418 	const u_int irq = is->is_irq & 31;
419 	const u_int byte_shift = 8 * (irq & 3);
420 	const u_int twopair_shift = 2 * (irq & 15);
421 
422 	KASSERTMSG(sc->sc_gic_valid_lines[group] & __BIT(irq),
423 	    "irq %u: not valid (group[%zu]=0x%08x [0x%08x])",
424 	    is->is_irq, group, sc->sc_gic_valid_lines[group],
425 	    (uint32_t)__BIT(irq));
426 
427 	KASSERTMSG(is->is_type == IST_LEVEL || is->is_type == IST_EDGE,
428 	    "irq %u: type %u unsupported", is->is_irq, is->is_type);
429 
430 	const bus_size_t targets_reg = GICD_ITARGETSRn(is->is_irq / 4);
431 	const bus_size_t cfg_reg = GICD_ICFGRn(is->is_irq / 16);
432 	uint32_t targets = gicd_read(sc, targets_reg);
433 	uint32_t cfg = gicd_read(sc, cfg_reg);
434 
435 	if (group > 0) {
436 		/*
437 		 * There are 4 irqs per TARGETS register.  For now bind
438 		 * to the primary cpu.
439 		 */
440 		targets &= ~(0xffU << byte_shift);
441 #if 0
442 #ifdef MULTIPROCESSOR
443 		if (is->is_mpsafe) {
444 			targets |= sc->sc_mptargets << byte_shift;
445 		} else
446 #endif
447 #endif
448 		targets |= sc->sc_bptargets << byte_shift;
449 		gicd_write(sc, targets_reg, targets);
450 
451 		/*
452 		 * There are 16 irqs per CFG register.  10=EDGE 00=LEVEL
453 		 */
454 		uint32_t new_cfg = cfg;
455 		uint32_t old_cfg = (cfg >> twopair_shift) & __BITS(1, 0);
456 		if (is->is_type == IST_LEVEL && (old_cfg & __BIT(1)) != 0) {
457 			new_cfg &= ~(__BITS(1, 0) << twopair_shift);
458 		} else if (is->is_type == IST_EDGE && (old_cfg & 2) == 0) {
459 			new_cfg |= __BIT(1) << twopair_shift;
460 		}
461 		if (new_cfg != cfg) {
462 			gicd_write(sc, cfg_reg, new_cfg);
463 		}
464 #ifdef MULTIPROCESSOR
465 	} else {
466 		/*
467 		 * All group 0 interrupts are per processor and MPSAFE by
468 		 * default.
469 		 */
470 		is->is_mpsafe = true;
471 		is->is_percpu = true;
472 #endif
473 	}
474 
475 	/*
476 	 * There are 4 irqs per PRIORITY register.  Map the IPL
477 	 * to GIC priority.
478 	 */
479 	const bus_size_t priority_reg = GICD_IPRIORITYRn(is->is_irq / 4);
480 	uint32_t priority = gicd_read(sc, priority_reg);
481 	priority &= ~(0xffU << byte_shift);
482 	priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift;
483 	gicd_write(sc, priority_reg, priority);
484 }
485 
486 #ifdef MULTIPROCESSOR
487 static void
armgic_cpu_init_priorities(struct armgic_softc * sc)488 armgic_cpu_init_priorities(struct armgic_softc *sc)
489 {
490 	/* Set lowest priority, i.e. disable interrupts */
491 	for (size_t i = 0; i < sc->sc_pic.pic_maxsources; i += 4) {
492 		const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4);
493 		gicd_write(sc, priority_reg, ~0);
494 	}
495 }
496 
497 static void
armgic_cpu_update_priorities(struct armgic_softc * sc)498 armgic_cpu_update_priorities(struct armgic_softc *sc)
499 {
500 	uint32_t enabled = sc->sc_enabled_local;
501 	for (size_t i = 0; i < sc->sc_pic.pic_maxsources; i += 4, enabled >>= 4) {
502 		const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4);
503 		uint32_t priority = gicd_read(sc, priority_reg);
504 		uint32_t byte_mask = 0xff;
505 		size_t byte_shift = 0;
506 		for (size_t j = 0; j < 4; j++, byte_mask <<= 8, byte_shift += 8) {
507 			struct intrsource * const is = sc->sc_pic.pic_sources[i+j];
508 			priority |= byte_mask;
509 			if (is == NULL || is == &armgic_dummy_source)
510 				continue;
511 			priority &= ~byte_mask;
512 			priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift;
513 		}
514 		gicd_write(sc, priority_reg, priority);
515 	}
516 }
517 
518 static void
armgic_cpu_init_targets(struct armgic_softc * sc)519 armgic_cpu_init_targets(struct armgic_softc *sc)
520 {
521 	/*
522 	 * Update the mpsafe targets
523 	 */
524 	for (size_t irq = 32; irq < sc->sc_pic.pic_maxsources; irq++) {
525 		struct intrsource * const is = sc->sc_pic.pic_sources[irq];
526 		const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
527 		if (is != NULL && is->is_mpsafe) {
528 			const u_int byte_shift = 8 * (irq & 3);
529 			uint32_t targets = gicd_read(sc, targets_reg);
530 #if 0
531 			targets |= sc->sc_mptargets << byte_shift;
532 #else
533 			targets |= sc->sc_bptargets << byte_shift;
534 #endif
535 			gicd_write(sc, targets_reg, targets);
536 		}
537 	}
538 }
539 
540 void
armgic_cpu_init(struct pic_softc * pic,struct cpu_info * ci)541 armgic_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
542 {
543 	struct armgic_softc * const sc = PICTOSOFTC(pic);
544 	sc->sc_target[cpu_index(ci)] = gicd_find_targets(sc);
545 	atomic_or_32(&sc->sc_mptargets, sc->sc_target[cpu_index(ci)]);
546 	KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl);
547 	armgic_cpu_init_priorities(sc);
548 	if (!CPU_IS_PRIMARY(ci)) {
549 		if (popcount(sc->sc_mptargets) != 1) {
550 			armgic_cpu_init_targets(sc);
551 		}
552 		if (sc->sc_enabled_local) {
553 			armgic_cpu_update_priorities(sc);
554 			gicd_write(sc, GICD_ISENABLERn(0),
555 			    sc->sc_enabled_local);
556 		}
557 	}
558 	ci->ci_hwpl = ci->ci_cpl;
559 	gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ci->ci_cpl));	// set PMR
560 	gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable);	// enable interrupt
561 	ENABLE_INTERRUPT();				// allow IRQ exceptions
562 }
563 
564 void
armgic_ipi_send(struct pic_softc * pic,const kcpuset_t * kcp,u_long ipi)565 armgic_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
566 {
567 	struct armgic_softc * const sc = PICTOSOFTC(pic);
568 
569 #if 0
570 	if (ipi == IPI_NOP) {
571 		sev();
572 		return;
573 	}
574 #endif
575 
576 	uint32_t sgir = __SHIFTIN(ARMGIC_SGI_IPIBASE + ipi, GICD_SGIR_SGIINTID);
577 	if (kcp != NULL) {
578 		uint32_t targets_val = 0;
579 		for (int n = 0; n < MAXCPUS; n++) {
580 			if (kcpuset_isset(kcp, n))
581 				targets_val |= sc->sc_target[n];
582 		}
583 		sgir |= __SHIFTIN(targets_val, GICD_SGIR_TargetList);
584 		sgir |= GICD_SGIR_TargetListFilter_List;
585 	} else {
586 		if (ncpu == 1)
587 			return;
588 		sgir |= GICD_SGIR_TargetListFilter_NotMe;
589 	}
590 
591 	gicd_write(sc, GICD_SGIR, sgir);
592 }
593 #endif
594 
595 int
armgic_match(device_t parent,cfdata_t cf,void * aux)596 armgic_match(device_t parent, cfdata_t cf, void *aux)
597 {
598 	struct mpcore_attach_args * const mpcaa = aux;
599 
600 	if (strcmp(cf->cf_name, mpcaa->mpcaa_name) != 0)
601 		return 0;
602 
603 	return 1;
604 }
605 
606 void
armgic_attach(device_t parent,device_t self,void * aux)607 armgic_attach(device_t parent, device_t self, void *aux)
608 {
609 	struct armgic_softc * const sc = &armgic_softc;
610 	struct mpcore_attach_args * const mpcaa = aux;
611 
612 	sc->sc_dev = self;
613 	device_set_private(self, sc);
614 
615 	sc->sc_memt = mpcaa->mpcaa_memt;	/* provided for us */
616 	bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off1,
617 	    4096, &sc->sc_gicdh);
618 	bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off2,
619 	    4096, &sc->sc_gicch);
620 
621 	sc->sc_gic_type = gicd_read(sc, GICD_TYPER);
622 	sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(sc->sc_gic_type);
623 
624 	gicc_write(sc, GICC_CTRL, 0);	/* disable all interrupts */
625 	gicd_write(sc, GICD_CTRL, 0);	/* disable all interrupts */
626 
627 	gicc_write(sc, GICC_PMR, 0xff);
628 	uint32_t pmr = gicc_read(sc, GICC_PMR);
629 	u_int priorities = 1 << popcount32(pmr);
630 
631 	const uint32_t iidr = gicc_read(sc, GICC_IIDR);
632 	const int iidr_prod = __SHIFTOUT(iidr, GICC_IIDR_ProductID);
633 	const int iidr_arch = __SHIFTOUT(iidr, GICC_IIDR_ArchVersion);
634 	const int iidr_rev = __SHIFTOUT(iidr, GICC_IIDR_Revision);
635 	const int iidr_imp = __SHIFTOUT(iidr, GICC_IIDR_Implementer);
636 
637 	/*
638 	 * Find the boot processor's CPU interface number.
639 	 */
640 	sc->sc_bptargets = gicd_find_targets(sc);
641 
642 	/*
643 	 * Let's find out how many real sources we have.
644 	 */
645 	for (size_t i = 0, group = 0;
646 	     i < sc->sc_pic.pic_maxsources;
647 	     i += 32, group++) {
648 		/*
649 		 * To figure what sources are real, one enables all interrupts
650 		 * and then reads back the enable mask so which ones really
651 		 * got enabled.
652 		 */
653 		gicd_write(sc, GICD_ISENABLERn(group), 0xffffffff);
654 		uint32_t valid = gicd_read(sc, GICD_ISENABLERn(group));
655 
656 		/*
657 		 * Now disable (clear enable) them again.
658 		 */
659 		gicd_write(sc, GICD_ICENABLERn(group), valid);
660 
661 		/*
662 		 * Count how many are valid.
663 		 */
664 		sc->sc_gic_lines += popcount32(valid);
665 		sc->sc_gic_valid_lines[group] = valid;
666 	}
667 
668 	aprint_normal(": Generic Interrupt Controller, "
669 	    "%zu sources (%zu valid)\n",
670 	    sc->sc_pic.pic_maxsources, sc->sc_gic_lines);
671 	aprint_debug_dev(sc->sc_dev, "Architecture version %d"
672 	    " (0x%x:%d rev %d)\n", iidr_arch, iidr_imp, iidr_prod,
673 	    iidr_rev);
674 
675 #ifdef MULTIPROCESSOR
676 	sc->sc_pic.pic_cpus = kcpuset_running;
677 #endif
678 	pic_add(&sc->sc_pic, 0);
679 
680 	/*
681 	 * Force the GICD to IPL_HIGH and then enable interrupts.
682 	 */
683 	struct cpu_info * const ci = curcpu();
684 	KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl);
685 	armgic_set_priority(&sc->sc_pic, ci->ci_cpl);	// set PMR
686 	gicd_write(sc, GICD_CTRL, GICD_CTRL_Enable);	// enable Distributer
687 	gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable);	// enable CPU interrupts
688 	ENABLE_INTERRUPT();				// allow interrupt exceptions
689 
690 	/*
691 	 * For each line that isn't valid, we set the intrsource for it to
692 	 * point at a dummy source so that pic_intr_establish will fail for it.
693 	 */
694 	for (size_t i = 0, group = 0;
695 	     i < sc->sc_pic.pic_maxsources;
696 	     i += 32, group++) {
697 		uint32_t invalid = ~sc->sc_gic_valid_lines[group];
698 		for (size_t j = 0; invalid && j < 32; j++, invalid >>= 1) {
699 			if (invalid & 1) {
700 				sc->sc_pic.pic_sources[i + j] =
701 				     &armgic_dummy_source;
702 			}
703 		}
704 	}
705 #ifdef __HAVE_PIC_FAST_SOFTINTS
706 	intr_establish_xname(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE,
707 	    pic_handle_softint, (void *)SOFTINT_BIO, "softint bio");
708 	intr_establish_xname(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE,
709 	    pic_handle_softint, (void *)SOFTINT_CLOCK, "softint clock");
710 	intr_establish_xname(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE,
711 	    pic_handle_softint, (void *)SOFTINT_NET, "softint net");
712 	intr_establish_xname(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE,
713 	    pic_handle_softint, (void *)SOFTINT_SERIAL, "softint serial");
714 #endif
715 #ifdef MULTIPROCESSOR
716 	armgic_cpu_init(&sc->sc_pic, curcpu());
717 
718 	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_AST, IPL_VM,
719 	    IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast");
720 	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_XCALL, IPL_HIGH,
721 	    IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall");
722 	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_GENERIC, IPL_HIGH,
723 	    IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic");
724 	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_NOP, IPL_VM,
725 	    IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop");
726 	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_SHOOTDOWN, IPL_SCHED,
727 	    IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown");
728 #ifdef DDB
729 	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_DDB, IPL_HIGH,
730 	    IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb");
731 #endif
732 #ifdef __HAVE_PREEMPTION
733 	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_KPREEMPT, IPL_VM,
734 	    IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt");
735 #endif
736 #endif
737 
738 	const u_int ppis = popcount32(sc->sc_gic_valid_lines[0] >> 16);
739 	const u_int sgis = popcount32(sc->sc_gic_valid_lines[0] & 0xffff);
740 	aprint_normal_dev(sc->sc_dev, "%u Priorities, %zu SPIs, %u PPIs, "
741 	    "%u SGIs\n",  priorities, sc->sc_gic_lines - ppis - sgis, ppis,
742 	    sgis);
743 }
744 
745 CFATTACH_DECL_NEW(armgic, 0,
746     armgic_match, armgic_attach, NULL, NULL);
747