1 /* $OpenBSD: ampintc.c,v 1.32 2024/07/06 10:39:50 jsg Exp $ */
2 /*
3 * Copyright (c) 2007,2009,2011 Dale Rahn <drahn@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 /*
19 * This driver implements the interrupt controller as specified in
20 * DDI0407E_cortex_a9_mpcore_r2p0_trm with the
21 * IHI0048A_gic_architecture_spec_v1_0 underlying specification
22 */
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/queue.h>
26 #include <sys/malloc.h>
27 #include <sys/device.h>
28 #include <sys/evcount.h>
29
30 #include <uvm/uvm_extern.h>
31
32 #include <machine/bus.h>
33 #include <machine/fdt.h>
34
35 #include <dev/ofw/fdt.h>
36 #include <dev/ofw/openfirm.h>
37
38 #include <machine/simplebusvar.h>
39
40 /* registers */
41 #define ICD_DCR 0x000
42 #define ICD_DCR_ES 0x00000001
43 #define ICD_DCR_ENS 0x00000002
44
45 #define ICD_ICTR 0x004
46 #define ICD_ICTR_LSPI_SH 11
47 #define ICD_ICTR_LSPI_M 0x1f
48 #define ICD_ICTR_CPU_SH 5
49 #define ICD_ICTR_CPU_M 0x07
50 #define ICD_ICTR_ITL_SH 0
51 #define ICD_ICTR_ITL_M 0x1f
52 #define ICD_IDIR 0x008
53 #define ICD_DIR_PROD_SH 24
54 #define ICD_DIR_PROD_M 0xff
55 #define ICD_DIR_REV_SH 12
56 #define ICD_DIR_REV_M 0xfff
57 #define ICD_DIR_IMP_SH 0
58 #define ICD_DIR_IMP_M 0xfff
59
60 #define IRQ_TO_REG32(i) (((i) >> 5) & 0x1f)
61 #define IRQ_TO_REG32BIT(i) ((i) & 0x1f)
62 #define IRQ_TO_REG4(i) (((i) >> 2) & 0xff)
63 #define IRQ_TO_REG4BIT(i) ((i) & 0x3)
64 #define IRQ_TO_REG16(i) (((i) >> 4) & 0x3f)
65 #define IRQ_TO_REG16BIT(i) ((i) & 0xf)
66 #define IRQ_TO_REGBIT_S(i) 8
67 #define IRQ_TO_REG4BIT_M(i) 8
68
69 #define ICD_ISRn(i) (0x080 + (IRQ_TO_REG32(i) * 4))
70 #define ICD_ISERn(i) (0x100 + (IRQ_TO_REG32(i) * 4))
71 #define ICD_ICERn(i) (0x180 + (IRQ_TO_REG32(i) * 4))
72 #define ICD_ISPRn(i) (0x200 + (IRQ_TO_REG32(i) * 4))
73 #define ICD_ICPRn(i) (0x280 + (IRQ_TO_REG32(i) * 4))
74 #define ICD_ABRn(i) (0x300 + (IRQ_TO_REG32(i) * 4))
75 #define ICD_IPRn(i) (0x400 + (i))
76 #define ICD_IPTRn(i) (0x800 + (i))
77 #define ICD_ICRn(i) (0xC00 + (IRQ_TO_REG16(i) * 4))
78 #define ICD_ICR_TRIG_LEVEL(i) (0x0 << (IRQ_TO_REG16BIT(i) * 2))
79 #define ICD_ICR_TRIG_EDGE(i) (0x2 << (IRQ_TO_REG16BIT(i) * 2))
80 #define ICD_ICR_TRIG_MASK(i) (0x2 << (IRQ_TO_REG16BIT(i) * 2))
81
82 /*
83 * what about (ppi|spi)_status
84 */
85 #define ICD_PPI 0xD00
86 #define ICD_PPI_GTIMER (1 << 11)
87 #define ICD_PPI_FIQ (1 << 12)
88 #define ICD_PPI_PTIMER (1 << 13)
89 #define ICD_PPI_PWDOG (1 << 14)
90 #define ICD_PPI_IRQ (1 << 15)
91 #define ICD_SPI_BASE 0xD04
92 #define ICD_SPIn(i) (ICD_SPI_BASE + ((i) * 4))
93
94
95 #define ICD_SGIR 0xF00
96
97 #define ICD_PERIPH_ID_0 0xFD0
98 #define ICD_PERIPH_ID_1 0xFD4
99 #define ICD_PERIPH_ID_2 0xFD8
100 #define ICD_PERIPH_ID_3 0xFDC
101 #define ICD_PERIPH_ID_4 0xFE0
102 #define ICD_PERIPH_ID_5 0xFE4
103 #define ICD_PERIPH_ID_6 0xFE8
104 #define ICD_PERIPH_ID_7 0xFEC
105
106 #define ICD_COMP_ID_0 0xFEC
107 #define ICD_COMP_ID_1 0xFEC
108 #define ICD_COMP_ID_2 0xFEC
109 #define ICD_COMP_ID_3 0xFEC
110
111
112 #define ICPICR 0x00
113 #define ICPIPMR 0x04
114 /* XXX - must left justify bits to 0 - 7 */
115 #define ICMIPMR_SH 4
116 #define ICPBPR 0x08
117 #define ICPIAR 0x0C
118 #define ICPIAR_IRQ_SH 0
119 #define ICPIAR_IRQ_M 0x3ff
120 #define ICPIAR_CPUID_SH 10
121 #define ICPIAR_CPUID_M 0x7
122 #define ICPIAR_NO_PENDING_IRQ ICPIAR_IRQ_M
123 #define ICPEOIR 0x10
124 #define ICPPRP 0x14
125 #define ICPHPIR 0x18
126 #define ICPIIR 0xFC
127
128 /*
129 * what about periph_id and component_id
130 */
131
132 #define IRQ_ENABLE 1
133 #define IRQ_DISABLE 0
134
135 struct ampintc_softc {
136 struct simplebus_softc sc_sbus;
137 struct intrq *sc_handler;
138 int sc_nintr;
139 bus_space_tag_t sc_iot;
140 bus_space_handle_t sc_d_ioh, sc_p_ioh;
141 uint8_t sc_cpu_mask[ICD_ICTR_CPU_M + 1];
142 struct evcount sc_spur;
143 struct interrupt_controller sc_ic;
144 int sc_ipi_reason[ICD_ICTR_CPU_M + 1];
145 int sc_ipi_num[3];
146 };
147 struct ampintc_softc *ampintc;
148
149
150 struct intrhand {
151 TAILQ_ENTRY(intrhand) ih_list; /* link on intrq list */
152 int (*ih_func)(void *); /* handler */
153 void *ih_arg; /* arg for handler */
154 int ih_ipl; /* IPL_* */
155 int ih_flags;
156 int ih_irq; /* IRQ number */
157 struct evcount ih_count;
158 char *ih_name;
159 struct cpu_info *ih_ci; /* CPU the IRQ runs on */
160 };
161
162 struct intrq {
163 TAILQ_HEAD(, intrhand) iq_list; /* handler list */
164 struct cpu_info *iq_ci; /* CPU the IRQ runs on */
165 int iq_irq_max; /* IRQ to mask while handling */
166 int iq_irq_min; /* lowest IRQ when shared */
167 int iq_ist; /* share type */
168 };
169
170
171 int ampintc_match(struct device *, void *, void *);
172 void ampintc_attach(struct device *, struct device *, void *);
173 int ampintc_activate(struct device *, int);
174 void ampintc_init(struct ampintc_softc *);
175 void ampintc_cpuinit(void);
176 int ampintc_spllower(int);
177 void ampintc_splx(int);
178 int ampintc_splraise(int);
179 void ampintc_setipl(int);
180 void ampintc_calc_mask(void);
181 void ampintc_calc_irq(struct ampintc_softc *, int);
182 void *ampintc_intr_establish(int, int, int, struct cpu_info *,
183 int (*)(void *), void *, char *);
184 void *ampintc_intr_establish_fdt(void *, int *, int,
185 struct cpu_info *, int (*)(void *), void *, char *);
186 void ampintc_intr_disestablish(void *);
187 void ampintc_irq_handler(void *);
188 uint32_t ampintc_iack(void);
189 void ampintc_eoi(uint32_t);
190 void ampintc_set_priority(int, int);
191 void ampintc_intr_enable(int);
192 void ampintc_intr_disable(int);
193 void ampintc_intr_config(int, int);
194 void ampintc_route(int, int, struct cpu_info *);
195 void ampintc_route_irq(void *, int, struct cpu_info *);
196 void ampintc_intr_barrier(void *);
197
198 int ampintc_ipi_combined(void *);
199 int ampintc_ipi_nop(void *);
200 int ampintc_ipi_ddb(void *);
201 int ampintc_ipi_halt(void *);
202 void ampintc_send_ipi(struct cpu_info *, int);
203
204 const struct cfattach ampintc_ca = {
205 sizeof (struct ampintc_softc), ampintc_match, ampintc_attach,
206 NULL, ampintc_activate
207 };
208
209 struct cfdriver ampintc_cd = {
210 NULL, "ampintc", DV_DULL
211 };
212
213 static char *ampintc_compatibles[] = {
214 "arm,cortex-a7-gic",
215 "arm,cortex-a9-gic",
216 "arm,cortex-a15-gic",
217 "arm,gic-400",
218 NULL
219 };
220
221 int
ampintc_match(struct device * parent,void * cfdata,void * aux)222 ampintc_match(struct device *parent, void *cfdata, void *aux)
223 {
224 struct fdt_attach_args *faa = aux;
225 int i;
226
227 for (i = 0; ampintc_compatibles[i]; i++)
228 if (OF_is_compatible(faa->fa_node, ampintc_compatibles[i]))
229 return (1);
230
231 return (0);
232 }
233
234 void
ampintc_attach(struct device * parent,struct device * self,void * aux)235 ampintc_attach(struct device *parent, struct device *self, void *aux)
236 {
237 struct ampintc_softc *sc = (struct ampintc_softc *)self;
238 struct fdt_attach_args *faa = aux;
239 int i, nintr, ncpu;
240 uint32_t ictr;
241 #ifdef MULTIPROCESSOR
242 int nipi, ipiirq[3];
243 #endif
244
245 ampintc = sc;
246
247 arm_init_smask();
248
249 sc->sc_iot = faa->fa_iot;
250
251 /* First row: ICD */
252 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
253 faa->fa_reg[0].size, 0, &sc->sc_d_ioh))
254 panic("%s: ICD bus_space_map failed!", __func__);
255
256 /* Second row: ICP */
257 if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
258 faa->fa_reg[1].size, 0, &sc->sc_p_ioh))
259 panic("%s: ICP bus_space_map failed!", __func__);
260
261 evcount_attach(&sc->sc_spur, "irq1023/spur", NULL);
262
263 ictr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICTR);
264 nintr = 32 * ((ictr >> ICD_ICTR_ITL_SH) & ICD_ICTR_ITL_M);
265 nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */
266 sc->sc_nintr = nintr;
267 ncpu = ((ictr >> ICD_ICTR_CPU_SH) & ICD_ICTR_CPU_M) + 1;
268 printf(" nirq %d, ncpu %d", nintr, ncpu);
269
270 KASSERT(curcpu()->ci_cpuid <= ICD_ICTR_CPU_M);
271 sc->sc_cpu_mask[curcpu()->ci_cpuid] =
272 bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(0));
273
274 ampintc_init(sc);
275
276 /* software reset of the part? */
277 /* set protection bit (kernel only)? */
278
279 /* XXX - check power saving bit */
280
281 sc->sc_handler = mallocarray(nintr, sizeof(*sc->sc_handler), M_DEVBUF,
282 M_ZERO | M_NOWAIT);
283 for (i = 0; i < nintr; i++) {
284 TAILQ_INIT(&sc->sc_handler[i].iq_list);
285 }
286
287 ampintc_setipl(IPL_HIGH); /* XXX ??? */
288 ampintc_calc_mask();
289
290 /* insert self as interrupt handler */
291 arm_set_intr_handler(ampintc_splraise, ampintc_spllower, ampintc_splx,
292 ampintc_setipl, ampintc_irq_handler, NULL, NULL, NULL);
293
294 #ifdef MULTIPROCESSOR
295 /* setup IPI interrupts */
296
297 /*
298 * Ideally we want three IPI interrupts, one for NOP, one for
299 * DDB and one for HALT. However we can survive if only one
300 * is available; it is possible that most are not available to
301 * the non-secure OS.
302 */
303 nipi = 0;
304 for (i = 0; i < 16; i++) {
305 int reg, oldreg;
306
307 oldreg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
308 ICD_IPRn(i));
309 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
310 oldreg ^ 0x20);
311
312 /* if this interrupt is not usable, route will be zero */
313 reg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i));
314 if (reg == oldreg)
315 continue;
316
317 /* return to original value, will be set when used */
318 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
319 oldreg);
320
321 if (nipi == 0)
322 printf(" ipi: %d", i);
323 else
324 printf(", %d", i);
325 ipiirq[nipi++] = i;
326 if (nipi == 3)
327 break;
328 }
329
330 if (nipi == 0)
331 panic ("no irq available for IPI");
332
333 switch (nipi) {
334 case 1:
335 ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
336 IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_combined, sc, "ipi");
337 sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
338 sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
339 sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[0];
340 break;
341 case 2:
342 ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
343 IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_nop, sc, "ipinop");
344 sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
345 ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
346 IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_combined, sc, "ipi");
347 sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
348 sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[1];
349 break;
350 case 3:
351 ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
352 IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_nop, sc, "ipinop");
353 sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
354 ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
355 IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_ddb, sc, "ipiddb");
356 sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
357 ampintc_intr_establish(ipiirq[2], IST_EDGE_RISING,
358 IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_halt, sc, "ipihalt");
359 sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[2];
360 break;
361 default:
362 panic("nipi unexpected number %d", nipi);
363 }
364
365 intr_send_ipi_func = ampintc_send_ipi;
366 #endif
367
368 /* enable interrupts */
369 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
370 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
371 intr_enable();
372
373 sc->sc_ic.ic_node = faa->fa_node;
374 sc->sc_ic.ic_cookie = self;
375 sc->sc_ic.ic_establish = ampintc_intr_establish_fdt;
376 sc->sc_ic.ic_disestablish = ampintc_intr_disestablish;
377 sc->sc_ic.ic_route = ampintc_route_irq;
378 sc->sc_ic.ic_cpu_enable = ampintc_cpuinit;
379 sc->sc_ic.ic_barrier = ampintc_intr_barrier;
380 arm_intr_register_fdt(&sc->sc_ic);
381
382 /* attach GICv2M frame controller */
383 simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa);
384 }
385
386 int
ampintc_activate(struct device * self,int act)387 ampintc_activate(struct device *self, int act)
388 {
389 struct ampintc_softc *sc = (struct ampintc_softc *)self;
390 struct cpu_info *ci;
391 int irq, min;
392
393 switch (act) {
394 case DVACT_RESUME:
395 for (irq = 0; irq < sc->sc_nintr; irq++) {
396 ci = sc->sc_handler[irq].iq_ci;
397 min = sc->sc_handler[irq].iq_irq_min;
398 if (min != IPL_NONE) {
399 ampintc_set_priority(irq, min);
400 ampintc_intr_enable(irq);
401 ampintc_route(irq, IRQ_ENABLE, ci);
402 } else {
403 ampintc_intr_disable(irq);
404 }
405 }
406
407 /* enable interrupts */
408 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
409 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
410 break;
411 }
412
413 return 0;
414 }
415
416 void
ampintc_init(struct ampintc_softc * sc)417 ampintc_init(struct ampintc_softc *sc)
418 {
419 int i;
420
421 /* Disable all interrupts, clear all pending */
422 for (i = 0; i < sc->sc_nintr / 32; i++) {
423 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
424 ICD_ICERn(i * 32), ~0);
425 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
426 ICD_ICPRn(i * 32), ~0);
427 }
428 for (i = 0; i < sc->sc_nintr; i++) {
429 /* lowest priority ?? */
430 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 0xff);
431 /* target no cpus */
432 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(i), 0);
433 }
434 for (i = 2; i < sc->sc_nintr / 16; i++) {
435 /* irq 32 - N */
436 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
437 ICD_ICRn(i * 16), 0);
438 }
439 }
440
441 void
ampintc_set_priority(int irq,int pri)442 ampintc_set_priority(int irq, int pri)
443 {
444 struct ampintc_softc *sc = ampintc;
445 uint32_t prival;
446
447 /*
448 * We only use 16 (13 really) interrupt priorities,
449 * and a CPU is only required to implement bit 4-7 of each field
450 * so shift into the top bits.
451 * also low values are higher priority thus IPL_HIGH - pri
452 */
453 prival = (IPL_HIGH - pri) << ICMIPMR_SH;
454 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(irq), prival);
455 }
456
457 void
ampintc_setipl(int new)458 ampintc_setipl(int new)
459 {
460 struct cpu_info *ci = curcpu();
461 struct ampintc_softc *sc = ampintc;
462 u_long psw;
463
464 /* disable here is only to keep hardware in sync with ci->ci_cpl */
465 psw = intr_disable();
466 ci->ci_cpl = new;
467
468 /* low values are higher priority thus IPL_HIGH - pri */
469 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPIPMR,
470 (IPL_HIGH - new) << ICMIPMR_SH);
471 intr_restore(psw);
472 }
473
474 void
ampintc_intr_enable(int irq)475 ampintc_intr_enable(int irq)
476 {
477 struct ampintc_softc *sc = ampintc;
478
479 #ifdef DEBUG
480 printf("enable irq %d register %x bitmask %08x\n",
481 irq, ICD_ISERn(irq), 1 << IRQ_TO_REG32BIT(irq));
482 #endif
483
484 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ISERn(irq),
485 1 << IRQ_TO_REG32BIT(irq));
486 }
487
488 void
ampintc_intr_disable(int irq)489 ampintc_intr_disable(int irq)
490 {
491 struct ampintc_softc *sc = ampintc;
492
493 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICERn(irq),
494 1 << IRQ_TO_REG32BIT(irq));
495 }
496
497 void
ampintc_intr_config(int irqno,int type)498 ampintc_intr_config(int irqno, int type)
499 {
500 struct ampintc_softc *sc = ampintc;
501 uint32_t ctrl;
502
503 ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno));
504
505 ctrl &= ~ICD_ICR_TRIG_MASK(irqno);
506 if (type == IST_EDGE_RISING)
507 ctrl |= ICD_ICR_TRIG_EDGE(irqno);
508 else
509 ctrl |= ICD_ICR_TRIG_LEVEL(irqno);
510
511 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno), ctrl);
512 }
513
514 void
ampintc_calc_mask(void)515 ampintc_calc_mask(void)
516 {
517 struct ampintc_softc *sc = ampintc;
518 int irq;
519
520 for (irq = 0; irq < sc->sc_nintr; irq++)
521 ampintc_calc_irq(sc, irq);
522 }
523
524 void
ampintc_calc_irq(struct ampintc_softc * sc,int irq)525 ampintc_calc_irq(struct ampintc_softc *sc, int irq)
526 {
527 struct cpu_info *ci = sc->sc_handler[irq].iq_ci;
528 struct intrhand *ih;
529 int max = IPL_NONE;
530 int min = IPL_HIGH;
531
532 TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
533 if (ih->ih_ipl > max)
534 max = ih->ih_ipl;
535
536 if (ih->ih_ipl < min)
537 min = ih->ih_ipl;
538 }
539
540 if (max == IPL_NONE)
541 min = IPL_NONE;
542
543 if (sc->sc_handler[irq].iq_irq_max == max &&
544 sc->sc_handler[irq].iq_irq_min == min)
545 return;
546
547 sc->sc_handler[irq].iq_irq_max = max;
548 sc->sc_handler[irq].iq_irq_min = min;
549
550 /* Enable interrupts at lower levels, clear -> enable */
551 /* Set interrupt priority/enable */
552 if (min != IPL_NONE) {
553 ampintc_set_priority(irq, min);
554 ampintc_intr_enable(irq);
555 ampintc_route(irq, IRQ_ENABLE, ci);
556 } else {
557 ampintc_intr_disable(irq);
558 ampintc_route(irq, IRQ_DISABLE, ci);
559 }
560 }
561
562 void
ampintc_splx(int new)563 ampintc_splx(int new)
564 {
565 struct cpu_info *ci = curcpu();
566
567 if (ci->ci_ipending & arm_smask[new])
568 arm_do_pending_intr(new);
569
570 ampintc_setipl(new);
571 }
572
573 int
ampintc_spllower(int new)574 ampintc_spllower(int new)
575 {
576 struct cpu_info *ci = curcpu();
577 int old = ci->ci_cpl;
578 ampintc_splx(new);
579 return (old);
580 }
581
582 int
ampintc_splraise(int new)583 ampintc_splraise(int new)
584 {
585 struct cpu_info *ci = curcpu();
586 int old;
587 old = ci->ci_cpl;
588
589 /*
590 * setipl must always be called because there is a race window
591 * where the variable is updated before the mask is set
592 * an interrupt occurs in that window without the mask always
593 * being set, the hardware might not get updated on the next
594 * splraise completely messing up spl protection.
595 */
596 if (old > new)
597 new = old;
598
599 ampintc_setipl(new);
600
601 return (old);
602 }
603
604
605 uint32_t
ampintc_iack(void)606 ampintc_iack(void)
607 {
608 uint32_t intid;
609 struct ampintc_softc *sc = ampintc;
610
611 intid = bus_space_read_4(sc->sc_iot, sc->sc_p_ioh, ICPIAR);
612
613 return (intid);
614 }
615
616 void
ampintc_eoi(uint32_t eoi)617 ampintc_eoi(uint32_t eoi)
618 {
619 struct ampintc_softc *sc = ampintc;
620
621 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPEOIR, eoi);
622 }
623
624 void
ampintc_route(int irq,int enable,struct cpu_info * ci)625 ampintc_route(int irq, int enable, struct cpu_info *ci)
626 {
627 struct ampintc_softc *sc = ampintc;
628 uint8_t mask, val;
629
630 KASSERT(ci->ci_cpuid <= ICD_ICTR_CPU_M);
631 mask = sc->sc_cpu_mask[ci->ci_cpuid];
632
633 val = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq));
634 if (enable == IRQ_ENABLE)
635 val |= mask;
636 else
637 val &= ~mask;
638 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq), val);
639 }
640
641 void
ampintc_cpuinit(void)642 ampintc_cpuinit(void)
643 {
644 struct ampintc_softc *sc = ampintc;
645 int i, irq;
646
647 /* XXX - this is the only cpu specific call to set this */
648 if (sc->sc_cpu_mask[cpu_number()] == 0) {
649 for (i = 0; i < 32; i++) {
650 int cpumask =
651 bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
652 ICD_IPTRn(i));
653
654 if (cpumask != 0) {
655 sc->sc_cpu_mask[cpu_number()] = cpumask;
656 break;
657 }
658 }
659 }
660
661 if (sc->sc_cpu_mask[cpu_number()] == 0)
662 panic("could not determine cpu target mask");
663
664 for (irq = 0; irq < sc->sc_nintr; irq++) {
665 if (sc->sc_handler[irq].iq_ci != curcpu())
666 continue;
667 if (sc->sc_handler[irq].iq_irq_min != IPL_NONE)
668 ampintc_route(irq, IRQ_ENABLE, curcpu());
669 else
670 ampintc_route(irq, IRQ_DISABLE, curcpu());
671 }
672
673 /*
674 * If a secondary CPU is turned off from an IPI handler and
675 * the GIC did not go through a full reset (for example when
676 * we fail to suspend) the IPI might still be active. So
677 * signal EOI here to make sure new interrupts will be
678 * serviced.
679 */
680 ampintc_eoi(sc->sc_ipi_num[ARM_IPI_HALT]);
681 }
682
683 void
ampintc_route_irq(void * v,int enable,struct cpu_info * ci)684 ampintc_route_irq(void *v, int enable, struct cpu_info *ci)
685 {
686 struct ampintc_softc *sc = ampintc;
687 struct intrhand *ih = v;
688
689 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
690 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(ih->ih_irq), 0);
691 if (enable) {
692 ampintc_set_priority(ih->ih_irq,
693 sc->sc_handler[ih->ih_irq].iq_irq_min);
694 ampintc_intr_enable(ih->ih_irq);
695 }
696
697 ampintc_route(ih->ih_irq, enable, ci);
698 }
699
700 void
ampintc_intr_barrier(void * cookie)701 ampintc_intr_barrier(void *cookie)
702 {
703 struct intrhand *ih = cookie;
704
705 sched_barrier(ih->ih_ci);
706 }
707
708 void
ampintc_run_handler(struct intrhand * ih,void * frame,int s)709 ampintc_run_handler(struct intrhand *ih, void *frame, int s)
710 {
711 void *arg;
712 int handled;
713
714 #ifdef MULTIPROCESSOR
715 int need_lock;
716
717 if (ih->ih_flags & IPL_MPSAFE)
718 need_lock = 0;
719 else
720 need_lock = s < IPL_SCHED;
721
722 if (need_lock)
723 KERNEL_LOCK();
724 #endif
725
726 if (ih->ih_arg)
727 arg = ih->ih_arg;
728 else
729 arg = frame;
730
731 handled = ih->ih_func(arg);
732 if (handled)
733 ih->ih_count.ec_count++;
734
735 #ifdef MULTIPROCESSOR
736 if (need_lock)
737 KERNEL_UNLOCK();
738 #endif
739 }
740
741 void
ampintc_irq_handler(void * frame)742 ampintc_irq_handler(void *frame)
743 {
744 struct ampintc_softc *sc = ampintc;
745 struct intrhand *ih;
746 uint32_t iack_val;
747 int irq, pri, s;
748
749 iack_val = ampintc_iack();
750 #ifdef DEBUG_INTC
751 if (iack_val != 27)
752 printf("irq %d fired\n", iack_val);
753 else {
754 static int cnt = 0;
755 if ((cnt++ % 100) == 0) {
756 printf("irq %d fired * _100\n", iack_val);
757 #ifdef DDB
758 db_enter();
759 #endif
760 }
761
762 }
763 #endif
764
765 irq = iack_val & ICPIAR_IRQ_M;
766
767 if (irq == 1023) {
768 sc->sc_spur.ec_count++;
769 return;
770 }
771
772 if (irq >= sc->sc_nintr)
773 return;
774
775 pri = sc->sc_handler[irq].iq_irq_max;
776 s = ampintc_splraise(pri);
777 intr_enable();
778 TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
779 ampintc_run_handler(ih, frame, s);
780 }
781 intr_disable();
782 ampintc_eoi(iack_val);
783
784 ampintc_splx(s);
785 }
786
787 void *
ampintc_intr_establish_fdt(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)788 ampintc_intr_establish_fdt(void *cookie, int *cell, int level,
789 struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
790 {
791 struct ampintc_softc *sc = (struct ampintc_softc *)cookie;
792 int irq;
793 int type;
794
795 /* 2nd cell contains the interrupt number */
796 irq = cell[1];
797
798 /* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */
799 if (cell[0] == 0)
800 irq += 32;
801 else if (cell[0] == 1)
802 irq += 16;
803 else
804 panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname);
805
806 /* SPIs are only active-high level or low-to-high edge */
807 if (cell[2] & 0x3)
808 type = IST_EDGE_RISING;
809 else
810 type = IST_LEVEL_HIGH;
811
812 return ampintc_intr_establish(irq, type, level, ci, func, arg, name);
813 }
814
815 void *
ampintc_intr_establish(int irqno,int type,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)816 ampintc_intr_establish(int irqno, int type, int level, struct cpu_info *ci,
817 int (*func)(void *), void *arg, char *name)
818 {
819 struct ampintc_softc *sc = ampintc;
820 struct intrhand *ih;
821 u_long psw;
822
823 if (irqno < 0 || irqno >= sc->sc_nintr)
824 panic("ampintc_intr_establish: bogus irqnumber %d: %s",
825 irqno, name);
826
827 if (ci == NULL)
828 ci = &cpu_info_primary;
829
830 if (irqno < 16) {
831 /* SGI are only EDGE */
832 type = IST_EDGE_RISING;
833 } else if (irqno < 32) {
834 /* PPI are only LEVEL */
835 type = IST_LEVEL_HIGH;
836 }
837
838 ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
839 ih->ih_func = func;
840 ih->ih_arg = arg;
841 ih->ih_ipl = level & IPL_IRQMASK;
842 ih->ih_flags = level & IPL_FLAGMASK;
843 ih->ih_irq = irqno;
844 ih->ih_name = name;
845 ih->ih_ci = ci;
846
847 psw = intr_disable();
848
849 if (!TAILQ_EMPTY(&sc->sc_handler[irqno].iq_list) &&
850 sc->sc_handler[irqno].iq_ci != ci) {
851 free(ih, M_DEVBUF, sizeof(*ih));
852 intr_restore(psw);
853 return NULL;
854 }
855
856 TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
857 sc->sc_handler[irqno].iq_ci = ci;
858
859 if (name != NULL)
860 evcount_attach(&ih->ih_count, name, &ih->ih_irq);
861
862 #ifdef DEBUG_INTC
863 printf("ampintc_intr_establish irq %d level %d [%s]\n", irqno, level,
864 name);
865 #endif
866
867 ampintc_intr_config(irqno, type);
868 ampintc_calc_mask();
869
870 intr_restore(psw);
871 return (ih);
872 }
873
874 void
ampintc_intr_disestablish(void * cookie)875 ampintc_intr_disestablish(void *cookie)
876 {
877 struct ampintc_softc *sc = ampintc;
878 struct intrhand *ih = cookie;
879 u_long psw;
880
881 #ifdef DEBUG_INTC
882 printf("ampintc_intr_disestablish irq %d level %d [%s]\n",
883 ih->ih_irq, ih->ih_ipl, ih->ih_name);
884 #endif
885
886 psw = intr_disable();
887
888 TAILQ_REMOVE(&sc->sc_handler[ih->ih_irq].iq_list, ih, ih_list);
889 if (ih->ih_name != NULL)
890 evcount_detach(&ih->ih_count);
891
892 ampintc_calc_mask();
893
894 intr_restore(psw);
895
896 free(ih, M_DEVBUF, sizeof(*ih));
897 }
898
899 /*
900 * GICv2m frame controller for MSI interrupts.
901 */
902 #define GICV2M_TYPER 0x008
903 #define GICV2M_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff)
904 #define GICV2M_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff)
905 #define GICV2M_SETSPI_NS 0x040
906
907 int ampintc_msi_match(struct device *, void *, void *);
908 void ampintc_msi_attach(struct device *, struct device *, void *);
909 void *ampintc_intr_establish_msi(void *, uint64_t *, uint64_t *,
910 int , struct cpu_info *, int (*)(void *), void *, char *);
911 void ampintc_intr_disestablish_msi(void *);
912 void ampintc_intr_barrier_msi(void *);
913
914 struct ampintc_msi_softc {
915 struct device sc_dev;
916 bus_space_tag_t sc_iot;
917 bus_space_handle_t sc_ioh;
918 int sc_node;
919 paddr_t sc_addr;
920 int sc_bspi;
921 int sc_nspi;
922 void **sc_spi;
923 struct interrupt_controller sc_ic;
924 };
925
926 const struct cfattach ampintcmsi_ca = {
927 sizeof (struct ampintc_msi_softc), ampintc_msi_match, ampintc_msi_attach
928 };
929
930 struct cfdriver ampintcmsi_cd = {
931 NULL, "ampintcmsi", DV_DULL
932 };
933
934 int
ampintc_msi_match(struct device * parent,void * cfdata,void * aux)935 ampintc_msi_match(struct device *parent, void *cfdata, void *aux)
936 {
937 struct fdt_attach_args *faa = aux;
938
939 return OF_is_compatible(faa->fa_node, "arm,gic-v2m-frame");
940 }
941
942 void
ampintc_msi_attach(struct device * parent,struct device * self,void * aux)943 ampintc_msi_attach(struct device *parent, struct device *self, void *aux)
944 {
945 struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
946 struct fdt_attach_args *faa = aux;
947 uint32_t typer;
948
949 sc->sc_node = faa->fa_node;
950 sc->sc_iot = faa->fa_iot;
951 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
952 faa->fa_reg[0].size, 0, &sc->sc_ioh))
953 panic("%s: bus_space_map failed!", __func__);
954
955 /* XXX: Hack to retrieve the physical address (from a CPU PoV). */
956 if (!pmap_extract(pmap_kernel(), sc->sc_ioh, &sc->sc_addr)) {
957 printf(": cannot retrieve msi addr\n");
958 return;
959 }
960
961 typer = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GICV2M_TYPER);
962 sc->sc_bspi = GICV2M_TYPER_SPI_BASE(typer);
963 sc->sc_nspi = GICV2M_TYPER_SPI_COUNT(typer);
964
965 sc->sc_bspi = OF_getpropint(faa->fa_node,
966 "arm,msi-base-spi", sc->sc_bspi);
967 sc->sc_nspi = OF_getpropint(faa->fa_node,
968 "arm,msi-num-spis", sc->sc_nspi);
969
970 printf(": nspi %d\n", sc->sc_nspi);
971
972 sc->sc_spi = mallocarray(sc->sc_nspi, sizeof(void *), M_DEVBUF,
973 M_WAITOK|M_ZERO);
974
975 sc->sc_ic.ic_node = faa->fa_node;
976 sc->sc_ic.ic_cookie = sc;
977 sc->sc_ic.ic_establish_msi = ampintc_intr_establish_msi;
978 sc->sc_ic.ic_disestablish = ampintc_intr_disestablish_msi;
979 sc->sc_ic.ic_barrier = ampintc_intr_barrier_msi;
980 arm_intr_register_fdt(&sc->sc_ic);
981 }
982
983 void *
ampintc_intr_establish_msi(void * self,uint64_t * addr,uint64_t * data,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)984 ampintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
985 int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
986 {
987 struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
988 extern LIST_HEAD(, interrupt_controller) interrupt_controllers;
989 struct interrupt_controller *ic;
990 struct machine_intr_handle *ih;
991 void *cookie;
992 int cells[3];
993 int i;
994
995 LIST_FOREACH(ic, &interrupt_controllers, ic_list) {
996 if (ic->ic_node == OF_parent(sc->sc_node))
997 break;
998 }
999 if (ic == NULL)
1000 return NULL;
1001
1002 cells[0] = 0; /* SPI */
1003 cells[2] = 1; /* Edge-Rising */
1004
1005 for (i = 0; i < sc->sc_nspi; i++) {
1006 if (sc->sc_spi[i] != NULL)
1007 continue;
1008
1009 cells[1] = sc->sc_bspi + i - 32;
1010 cookie = ic->ic_establish(ic->ic_cookie, cells,
1011 level, ci, func, arg, name);
1012 if (cookie == NULL)
1013 return NULL;
1014
1015 ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
1016 ih->ih_ic = ic;
1017 ih->ih_ih = cookie;
1018
1019 *addr = sc->sc_addr + GICV2M_SETSPI_NS;
1020 *data = sc->sc_bspi + i;
1021 sc->sc_spi[i] = ih;
1022 return &sc->sc_spi[i];
1023 }
1024
1025 return NULL;
1026 }
1027
1028 void
ampintc_intr_disestablish_msi(void * cookie)1029 ampintc_intr_disestablish_msi(void *cookie)
1030 {
1031 fdt_intr_disestablish(*(void **)cookie);
1032 *(void **)cookie = NULL;
1033 }
1034
1035 void
ampintc_intr_barrier_msi(void * cookie)1036 ampintc_intr_barrier_msi(void *cookie)
1037 {
1038 intr_barrier(*(void **)cookie);
1039 }
1040
1041 #ifdef MULTIPROCESSOR
1042 int
ampintc_ipi_ddb(void * v)1043 ampintc_ipi_ddb(void *v)
1044 {
1045 /* XXX */
1046 #ifdef DDB
1047 db_enter();
1048 #endif
1049 return 1;
1050 }
1051
1052 int
ampintc_ipi_halt(void * v)1053 ampintc_ipi_halt(void *v)
1054 {
1055 cpu_halt();
1056 return 1;
1057 }
1058
1059 int
ampintc_ipi_nop(void * v)1060 ampintc_ipi_nop(void *v)
1061 {
1062 /* Nothing to do here, just enough to wake up from WFI */
1063 return 1;
1064 }
1065
1066 int
ampintc_ipi_combined(void * v)1067 ampintc_ipi_combined(void *v)
1068 {
1069 struct ampintc_softc *sc = (struct ampintc_softc *)v;
1070
1071 if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
1072 sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
1073 return ampintc_ipi_ddb(v);
1074 } else if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_HALT) {
1075 sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
1076 return ampintc_ipi_halt(v);
1077 } else {
1078 return ampintc_ipi_nop(v);
1079 }
1080 }
1081
1082 void
ampintc_send_ipi(struct cpu_info * ci,int id)1083 ampintc_send_ipi(struct cpu_info *ci, int id)
1084 {
1085 struct ampintc_softc *sc = ampintc;
1086 int sendmask;
1087
1088 if (ci == curcpu() && id == ARM_IPI_NOP)
1089 return;
1090
1091 /* never overwrite IPI_DDB or IPI_HALT with IPI_NOP */
1092 if (id == ARM_IPI_DDB || id == ARM_IPI_HALT)
1093 sc->sc_ipi_reason[ci->ci_cpuid] = id;
1094
1095 /* currently will only send to one cpu */
1096 sendmask = sc->sc_cpu_mask[ci->ci_cpuid] << 16;
1097 sendmask |= sc->sc_ipi_num[id];
1098
1099 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_SGIR, sendmask);
1100 }
1101 #endif
1102