1 /* $OpenBSD: ampintc.c,v 1.27 2019/09/29 10:36:52 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2007,2009,2011 Dale Rahn <drahn@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 /* 19 * This driver implements the interrupt controller as specified in 20 * DDI0407E_cortex_a9_mpcore_r2p0_trm with the 21 * IHI0048A_gic_architecture_spec_v1_0 underlying specification 22 */ 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/queue.h> 26 #include <sys/malloc.h> 27 #include <sys/device.h> 28 #include <sys/evcount.h> 29 30 #include <uvm/uvm_extern.h> 31 32 #include <machine/bus.h> 33 #include <machine/fdt.h> 34 35 #include <arm/cpufunc.h> 36 #include <arm/cortex/cortex.h> 37 38 #include <dev/ofw/fdt.h> 39 #include <dev/ofw/openfirm.h> 40 41 #include <arm/simplebus/simplebusvar.h> 42 43 /* registers */ 44 #define ICD_DCR 0x000 45 #define ICD_DCR_ES 0x00000001 46 #define ICD_DCR_ENS 0x00000002 47 48 #define ICD_ICTR 0x004 49 #define ICD_ICTR_LSPI_SH 11 50 #define ICD_ICTR_LSPI_M 0x1f 51 #define ICD_ICTR_CPU_SH 5 52 #define ICD_ICTR_CPU_M 0x07 53 #define ICD_ICTR_ITL_SH 0 54 #define ICD_ICTR_ITL_M 0x1f 55 #define ICD_IDIR 0x008 56 #define ICD_DIR_PROD_SH 24 57 #define ICD_DIR_PROD_M 0xff 58 #define ICD_DIR_REV_SH 12 59 #define ICD_DIR_REV_M 0xfff 60 #define ICD_DIR_IMP_SH 0 61 #define ICD_DIR_IMP_M 0xfff 62 63 #define IRQ_TO_REG32(i) (((i) >> 5) & 0x1f) 64 #define IRQ_TO_REG32BIT(i) ((i) & 0x1f) 65 #define IRQ_TO_REG4(i) (((i) >> 2) & 0xff) 66 #define IRQ_TO_REG4BIT(i) ((i) & 0x3) 67 #define IRQ_TO_REG16(i) (((i) >> 4) & 0x3f) 68 #define IRQ_TO_REG16BIT(i) ((i) & 0xf) 69 #define IRQ_TO_REGBIT_S(i) 8 70 #define IRQ_TO_REG4BIT_M(i) 8 71 72 #define ICD_ISRn(i) (0x080 + (IRQ_TO_REG32(i) * 4)) 73 #define ICD_ISERn(i) (0x100 + (IRQ_TO_REG32(i) * 4)) 74 #define ICD_ICERn(i) (0x180 + (IRQ_TO_REG32(i) * 4)) 75 #define ICD_ISPRn(i) (0x200 + (IRQ_TO_REG32(i) * 4)) 76 #define ICD_ICPRn(i) (0x280 + (IRQ_TO_REG32(i) * 4)) 77 #define ICD_ABRn(i) (0x300 + (IRQ_TO_REG32(i) * 4)) 78 #define ICD_IPRn(i) (0x400 + (i)) 79 #define ICD_IPTRn(i) (0x800 + (i)) 80 #define ICD_ICRn(i) (0xC00 + (IRQ_TO_REG16(i) * 4)) 81 #define ICD_ICR_TRIG_LEVEL(i) (0x0 << (IRQ_TO_REG16BIT(i) * 2)) 82 #define ICD_ICR_TRIG_EDGE(i) (0x2 << (IRQ_TO_REG16BIT(i) * 2)) 83 #define ICD_ICR_TRIG_MASK(i) (0x2 << (IRQ_TO_REG16BIT(i) * 2)) 84 85 /* 86 * what about (ppi|spi)_status 87 */ 88 #define ICD_PPI 0xD00 89 #define ICD_PPI_GTIMER (1 << 11) 90 #define ICD_PPI_FIQ (1 << 12) 91 #define ICD_PPI_PTIMER (1 << 13) 92 #define ICD_PPI_PWDOG (1 << 14) 93 #define ICD_PPI_IRQ (1 << 15) 94 #define ICD_SPI_BASE 0xD04 95 #define ICD_SPIn(i) (ICD_SPI_BASE + ((i) * 4)) 96 97 98 #define ICD_SGIR 0xF00 99 100 #define ICD_PERIPH_ID_0 0xFD0 101 #define ICD_PERIPH_ID_1 0xFD4 102 #define ICD_PERIPH_ID_2 0xFD8 103 #define ICD_PERIPH_ID_3 0xFDC 104 #define ICD_PERIPH_ID_4 0xFE0 105 #define ICD_PERIPH_ID_5 0xFE4 106 #define ICD_PERIPH_ID_6 0xFE8 107 #define ICD_PERIPH_ID_7 0xFEC 108 109 #define ICD_COMP_ID_0 0xFEC 110 #define ICD_COMP_ID_1 0xFEC 111 #define ICD_COMP_ID_2 0xFEC 112 #define ICD_COMP_ID_3 0xFEC 113 114 115 #define ICPICR 0x00 116 #define ICPIPMR 0x04 117 /* XXX - must left justify bits to 0 - 7 */ 118 #define ICMIPMR_SH 4 119 #define ICPBPR 0x08 120 #define ICPIAR 0x0C 121 #define ICPIAR_IRQ_SH 0 122 #define ICPIAR_IRQ_M 0x3ff 123 #define ICPIAR_CPUID_SH 10 124 #define ICPIAR_CPUID_M 0x7 125 #define ICPIAR_NO_PENDING_IRQ ICPIAR_IRQ_M 126 #define ICPEOIR 0x10 127 #define ICPPRP 0x14 128 #define ICPHPIR 0x18 129 #define ICPIIR 0xFC 130 131 /* 132 * what about periph_id and component_id 133 */ 134 135 #define IRQ_ENABLE 1 136 #define IRQ_DISABLE 0 137 138 struct ampintc_softc { 139 struct simplebus_softc sc_sbus; 140 struct intrq *sc_handler; 141 int sc_nintr; 142 bus_space_tag_t sc_iot; 143 bus_space_handle_t sc_d_ioh, sc_p_ioh; 144 uint8_t sc_cpu_mask[ICD_ICTR_CPU_M + 1]; 145 struct evcount sc_spur; 146 struct interrupt_controller sc_ic; 147 int sc_ipi_reason[ICD_ICTR_CPU_M + 1]; 148 int sc_ipi_num[2]; 149 }; 150 struct ampintc_softc *ampintc; 151 152 153 struct intrhand { 154 TAILQ_ENTRY(intrhand) ih_list; /* link on intrq list */ 155 int (*ih_func)(void *); /* handler */ 156 void *ih_arg; /* arg for handler */ 157 int ih_ipl; /* IPL_* */ 158 int ih_flags; 159 int ih_irq; /* IRQ number */ 160 struct evcount ih_count; 161 char *ih_name; 162 }; 163 164 struct intrq { 165 TAILQ_HEAD(, intrhand) iq_list; /* handler list */ 166 int iq_irq_max; /* IRQ to mask while handling */ 167 int iq_irq_min; /* lowest IRQ when shared */ 168 int iq_ist; /* share type */ 169 }; 170 171 172 int ampintc_match(struct device *, void *, void *); 173 void ampintc_attach(struct device *, struct device *, void *); 174 void ampintc_cpuinit(void); 175 int ampintc_spllower(int); 176 void ampintc_splx(int); 177 int ampintc_splraise(int); 178 void ampintc_setipl(int); 179 void ampintc_calc_mask(void); 180 void *ampintc_intr_establish(int, int, int, int (*)(void *), 181 void *, char *); 182 void *ampintc_intr_establish_ext(int, int, int (*)(void *), 183 void *, char *); 184 void *ampintc_intr_establish_fdt(void *, int *, int, 185 int (*)(void *), void *, char *); 186 void ampintc_intr_disestablish(void *); 187 void ampintc_irq_handler(void *); 188 const char *ampintc_intr_string(void *); 189 uint32_t ampintc_iack(void); 190 void ampintc_eoi(uint32_t); 191 void ampintc_set_priority(int, int); 192 void ampintc_intr_enable(int); 193 void ampintc_intr_disable(int); 194 void ampintc_intr_config(int, int); 195 void ampintc_route(int, int, struct cpu_info *); 196 void ampintc_route_irq(void *, int, struct cpu_info *); 197 198 int ampintc_ipi_combined(void *); 199 int ampintc_ipi_nop(void *); 200 int ampintc_ipi_ddb(void *); 201 void ampintc_send_ipi(struct cpu_info *, int); 202 203 struct cfattach ampintc_ca = { 204 sizeof (struct ampintc_softc), ampintc_match, ampintc_attach 205 }; 206 207 struct cfdriver ampintc_cd = { 208 NULL, "ampintc", DV_DULL 209 }; 210 211 static char *ampintc_compatibles[] = { 212 "arm,cortex-a7-gic", 213 "arm,cortex-a9-gic", 214 "arm,cortex-a15-gic", 215 "arm,gic-400", 216 NULL 217 }; 218 219 int 220 ampintc_match(struct device *parent, void *cfdata, void *aux) 221 { 222 struct fdt_attach_args *faa = aux; 223 int i; 224 225 for (i = 0; ampintc_compatibles[i]; i++) 226 if (OF_is_compatible(faa->fa_node, ampintc_compatibles[i])) 227 return (1); 228 229 return (0); 230 } 231 232 void 233 ampintc_attach(struct device *parent, struct device *self, void *aux) 234 { 235 struct ampintc_softc *sc = (struct ampintc_softc *)self; 236 struct fdt_attach_args *faa = aux; 237 int i, nintr, ncpu; 238 uint32_t ictr; 239 #ifdef MULTIPROCESSOR 240 int nipi, ipiirq[2]; 241 #endif 242 243 ampintc = sc; 244 245 arm_init_smask(); 246 247 sc->sc_iot = faa->fa_iot; 248 249 /* First row: ICD */ 250 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 251 faa->fa_reg[0].size, 0, &sc->sc_d_ioh)) 252 panic("%s: ICD bus_space_map failed!", __func__); 253 254 /* Second row: ICP */ 255 if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr, 256 faa->fa_reg[1].size, 0, &sc->sc_p_ioh)) 257 panic("%s: ICP bus_space_map failed!", __func__); 258 259 evcount_attach(&sc->sc_spur, "irq1023/spur", NULL); 260 261 ictr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICTR); 262 nintr = 32 * ((ictr >> ICD_ICTR_ITL_SH) & ICD_ICTR_ITL_M); 263 nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */ 264 sc->sc_nintr = nintr; 265 ncpu = ((ictr >> ICD_ICTR_CPU_SH) & ICD_ICTR_CPU_M) + 1; 266 printf(" nirq %d, ncpu %d", nintr, ncpu); 267 268 KASSERT(curcpu()->ci_cpuid <= ICD_ICTR_CPU_M); 269 sc->sc_cpu_mask[curcpu()->ci_cpuid] = 270 bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(0)); 271 272 /* Disable all interrupts, clear all pending */ 273 for (i = 0; i < nintr/32; i++) { 274 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, 275 ICD_ICERn(i*32), ~0); 276 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, 277 ICD_ICPRn(i*32), ~0); 278 } 279 for (i = 0; i < nintr; i++) { 280 /* lowest priority ?? */ 281 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 0xff); 282 /* target no cpus */ 283 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(i), 0); 284 } 285 for (i = 2; i < nintr/16; i++) { 286 /* irq 32 - N */ 287 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(i*16), 0); 288 } 289 290 /* software reset of the part? */ 291 /* set protection bit (kernel only)? */ 292 293 /* XXX - check power saving bit */ 294 295 sc->sc_handler = mallocarray(nintr, sizeof(*sc->sc_handler), M_DEVBUF, 296 M_ZERO | M_NOWAIT); 297 for (i = 0; i < nintr; i++) { 298 TAILQ_INIT(&sc->sc_handler[i].iq_list); 299 } 300 301 ampintc_setipl(IPL_HIGH); /* XXX ??? */ 302 ampintc_calc_mask(); 303 304 /* insert self as interrupt handler */ 305 arm_set_intr_handler(ampintc_splraise, ampintc_spllower, ampintc_splx, 306 ampintc_setipl, ampintc_intr_establish_ext, 307 ampintc_intr_disestablish, ampintc_intr_string, ampintc_irq_handler); 308 309 #ifdef MULTIPROCESSOR 310 /* setup IPI interrupts */ 311 312 /* 313 * Ideally we want two IPI interrupts, one for NOP and one for 314 * DDB, however we can survive if only one is available it is 315 * possible that most are not available to the non-secure OS. 316 */ 317 nipi = 0; 318 for (i = 0; i < 16; i++) { 319 int reg, oldreg; 320 321 oldreg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, 322 ICD_IPRn(i)); 323 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 324 oldreg ^ 0x20); 325 326 /* if this interrupt is not usable, route will be zero */ 327 reg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i)); 328 if (reg == oldreg) 329 continue; 330 331 /* return to original value, will be set when used */ 332 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 333 oldreg); 334 335 if (nipi == 0) 336 printf(" ipi: %d", i); 337 else 338 printf(", %d", i); 339 ipiirq[nipi++] = i; 340 if (nipi == 2) 341 break; 342 } 343 344 if (nipi == 0) 345 panic ("no irq available for IPI"); 346 347 switch (nipi) { 348 case 1: 349 ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING, 350 IPL_IPI|IPL_MPSAFE, ampintc_ipi_combined, sc, "ipi"); 351 sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0]; 352 sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0]; 353 break; 354 case 2: 355 ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING, 356 IPL_IPI|IPL_MPSAFE, ampintc_ipi_nop, sc, "ipinop"); 357 sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0]; 358 ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING, 359 IPL_IPI|IPL_MPSAFE, ampintc_ipi_ddb, sc, "ipiddb"); 360 sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1]; 361 break; 362 default: 363 panic("nipi unexpected number %d", nipi); 364 } 365 366 intr_send_ipi_func = ampintc_send_ipi; 367 #endif 368 369 /* enable interrupts */ 370 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3); 371 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1); 372 enable_interrupts(PSR_I); 373 374 sc->sc_ic.ic_node = faa->fa_node; 375 sc->sc_ic.ic_cookie = self; 376 sc->sc_ic.ic_establish = ampintc_intr_establish_fdt; 377 sc->sc_ic.ic_disestablish = ampintc_intr_disestablish; 378 sc->sc_ic.ic_route = ampintc_route_irq; 379 sc->sc_ic.ic_cpu_enable = ampintc_cpuinit; 380 arm_intr_register_fdt(&sc->sc_ic); 381 382 /* attach GICv2M frame controller */ 383 simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa); 384 } 385 386 void 387 ampintc_set_priority(int irq, int pri) 388 { 389 struct ampintc_softc *sc = ampintc; 390 uint32_t prival; 391 392 /* 393 * We only use 16 (13 really) interrupt priorities, 394 * and a CPU is only required to implement bit 4-7 of each field 395 * so shift into the top bits. 396 * also low values are higher priority thus IPL_HIGH - pri 397 */ 398 prival = (IPL_HIGH - pri) << ICMIPMR_SH; 399 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(irq), prival); 400 } 401 402 void 403 ampintc_setipl(int new) 404 { 405 struct cpu_info *ci = curcpu(); 406 struct ampintc_softc *sc = ampintc; 407 int psw; 408 409 /* disable here is only to keep hardware in sync with ci->ci_cpl */ 410 psw = disable_interrupts(PSR_I); 411 ci->ci_cpl = new; 412 413 /* low values are higher priority thus IPL_HIGH - pri */ 414 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPIPMR, 415 (IPL_HIGH - new) << ICMIPMR_SH); 416 restore_interrupts(psw); 417 } 418 419 void 420 ampintc_intr_enable(int irq) 421 { 422 struct ampintc_softc *sc = ampintc; 423 424 #ifdef DEBUG 425 printf("enable irq %d register %x bitmask %08x\n", 426 irq, ICD_ISERn(irq), 1 << IRQ_TO_REG32BIT(irq)); 427 #endif 428 429 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ISERn(irq), 430 1 << IRQ_TO_REG32BIT(irq)); 431 } 432 433 void 434 ampintc_intr_disable(int irq) 435 { 436 struct ampintc_softc *sc = ampintc; 437 438 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICERn(irq), 439 1 << IRQ_TO_REG32BIT(irq)); 440 } 441 442 void 443 ampintc_intr_config(int irqno, int type) 444 { 445 struct ampintc_softc *sc = ampintc; 446 uint32_t ctrl; 447 448 ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno)); 449 450 ctrl &= ~ICD_ICR_TRIG_MASK(irqno); 451 if (type == IST_EDGE_RISING) 452 ctrl |= ICD_ICR_TRIG_EDGE(irqno); 453 else 454 ctrl |= ICD_ICR_TRIG_LEVEL(irqno); 455 456 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno), ctrl); 457 } 458 459 void 460 ampintc_calc_mask(void) 461 { 462 struct cpu_info *ci = curcpu(); 463 struct ampintc_softc *sc = ampintc; 464 struct intrhand *ih; 465 int irq; 466 467 for (irq = 0; irq < sc->sc_nintr; irq++) { 468 int max = IPL_NONE; 469 int min = IPL_HIGH; 470 TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) { 471 if (ih->ih_ipl > max) 472 max = ih->ih_ipl; 473 474 if (ih->ih_ipl < min) 475 min = ih->ih_ipl; 476 } 477 478 if (max == IPL_NONE) 479 min = IPL_NONE; 480 481 if (sc->sc_handler[irq].iq_irq_max == max && 482 sc->sc_handler[irq].iq_irq_min == min) 483 continue; 484 485 sc->sc_handler[irq].iq_irq_max = max; 486 sc->sc_handler[irq].iq_irq_min = min; 487 488 /* Enable interrupts at lower levels, clear -> enable */ 489 /* Set interrupt priority/enable */ 490 if (min != IPL_NONE) { 491 ampintc_set_priority(irq, min); 492 ampintc_intr_enable(irq); 493 ampintc_route(irq, IRQ_ENABLE, ci); 494 } else { 495 ampintc_intr_disable(irq); 496 ampintc_route(irq, IRQ_DISABLE, ci); 497 } 498 } 499 ampintc_setipl(ci->ci_cpl); 500 } 501 502 void 503 ampintc_splx(int new) 504 { 505 struct cpu_info *ci = curcpu(); 506 507 if (ci->ci_ipending & arm_smask[new]) 508 arm_do_pending_intr(new); 509 510 ampintc_setipl(new); 511 } 512 513 int 514 ampintc_spllower(int new) 515 { 516 struct cpu_info *ci = curcpu(); 517 int old = ci->ci_cpl; 518 ampintc_splx(new); 519 return (old); 520 } 521 522 int 523 ampintc_splraise(int new) 524 { 525 struct cpu_info *ci = curcpu(); 526 int old; 527 old = ci->ci_cpl; 528 529 /* 530 * setipl must always be called because there is a race window 531 * where the variable is updated before the mask is set 532 * an interrupt occurs in that window without the mask always 533 * being set, the hardware might not get updated on the next 534 * splraise completely messing up spl protection. 535 */ 536 if (old > new) 537 new = old; 538 539 ampintc_setipl(new); 540 541 return (old); 542 } 543 544 545 uint32_t 546 ampintc_iack(void) 547 { 548 uint32_t intid; 549 struct ampintc_softc *sc = ampintc; 550 551 intid = bus_space_read_4(sc->sc_iot, sc->sc_p_ioh, ICPIAR); 552 553 return (intid); 554 } 555 556 void 557 ampintc_eoi(uint32_t eoi) 558 { 559 struct ampintc_softc *sc = ampintc; 560 561 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPEOIR, eoi); 562 } 563 564 void 565 ampintc_route(int irq, int enable, struct cpu_info *ci) 566 { 567 struct ampintc_softc *sc = ampintc; 568 uint8_t mask, val; 569 570 KASSERT(ci->ci_cpuid <= ICD_ICTR_CPU_M); 571 mask = sc->sc_cpu_mask[ci->ci_cpuid]; 572 573 val = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq)); 574 if (enable == IRQ_ENABLE) 575 val |= mask; 576 else 577 val &= ~mask; 578 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq), val); 579 } 580 581 void 582 ampintc_cpuinit(void) 583 { 584 struct ampintc_softc *sc = ampintc; 585 int i; 586 587 /* XXX - this is the only cpu specific call to set this */ 588 if (sc->sc_cpu_mask[cpu_number()] == 0) { 589 for (i = 0; i < 32; i++) { 590 int cpumask = 591 bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, 592 ICD_IPTRn(i)); 593 594 if (cpumask != 0) { 595 sc->sc_cpu_mask[cpu_number()] = cpumask; 596 break; 597 } 598 } 599 } 600 601 if (sc->sc_cpu_mask[cpu_number()] == 0) 602 panic("could not determine cpu target mask"); 603 } 604 605 void 606 ampintc_route_irq(void *v, int enable, struct cpu_info *ci) 607 { 608 struct ampintc_softc *sc = ampintc; 609 struct intrhand *ih = v; 610 611 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1); 612 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(ih->ih_irq), 0); 613 if (enable) { 614 ampintc_set_priority(ih->ih_irq, 615 sc->sc_handler[ih->ih_irq].iq_irq_min); 616 ampintc_intr_enable(ih->ih_irq); 617 } 618 619 ampintc_route(ih->ih_irq, enable, ci); 620 } 621 622 void 623 ampintc_irq_handler(void *frame) 624 { 625 struct ampintc_softc *sc = ampintc; 626 struct intrhand *ih; 627 void *arg; 628 uint32_t iack_val; 629 int irq, pri, s, handled; 630 631 iack_val = ampintc_iack(); 632 #ifdef DEBUG_INTC 633 if (iack_val != 27) 634 printf("irq %d fired\n", iack_val); 635 else { 636 static int cnt = 0; 637 if ((cnt++ % 100) == 0) { 638 printf("irq %d fired * _100\n", iack_val); 639 #ifdef DDB 640 db_enter(); 641 #endif 642 } 643 644 } 645 #endif 646 647 irq = iack_val & ICPIAR_IRQ_M; 648 649 if (irq == 1023) { 650 sc->sc_spur.ec_count++; 651 return; 652 } 653 654 if (irq >= sc->sc_nintr) 655 return; 656 657 pri = sc->sc_handler[irq].iq_irq_max; 658 s = ampintc_splraise(pri); 659 TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) { 660 #ifdef MULTIPROCESSOR 661 int need_lock; 662 663 if (ih->ih_flags & IPL_MPSAFE) 664 need_lock = 0; 665 else 666 need_lock = s < IPL_SCHED; 667 668 if (need_lock) 669 KERNEL_LOCK(); 670 #endif 671 672 if (ih->ih_arg != 0) 673 arg = ih->ih_arg; 674 else 675 arg = frame; 676 677 enable_interrupts(PSR_I); 678 handled = ih->ih_func(arg); 679 disable_interrupts(PSR_I); 680 if (handled) 681 ih->ih_count.ec_count++; 682 683 #ifdef MULTIPROCESSOR 684 if (need_lock) 685 KERNEL_UNLOCK(); 686 #endif 687 } 688 ampintc_eoi(iack_val); 689 690 ampintc_splx(s); 691 } 692 693 void * 694 ampintc_intr_establish_ext(int irqno, int level, int (*func)(void *), 695 void *arg, char *name) 696 { 697 return ampintc_intr_establish(irqno+32, IST_LEVEL_HIGH, level, 698 func, arg, name); 699 } 700 701 void * 702 ampintc_intr_establish_fdt(void *cookie, int *cell, int level, 703 int (*func)(void *), void *arg, char *name) 704 { 705 struct ampintc_softc *sc = (struct ampintc_softc *)cookie; 706 int irq; 707 int type; 708 709 /* 2nd cell contains the interrupt number */ 710 irq = cell[1]; 711 712 /* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */ 713 if (cell[0] == 0) 714 irq += 32; 715 else if (cell[0] == 1) 716 irq += 16; 717 else 718 panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname); 719 720 /* SPIs are only active-high level or low-to-high edge */ 721 if (cell[2] & 0x3) 722 type = IST_EDGE_RISING; 723 else 724 type = IST_LEVEL_HIGH; 725 726 return ampintc_intr_establish(irq, type, level, func, arg, name); 727 } 728 729 void * 730 ampintc_intr_establish(int irqno, int type, int level, int (*func)(void *), 731 void *arg, char *name) 732 { 733 struct ampintc_softc *sc = ampintc; 734 struct intrhand *ih; 735 int psw; 736 737 if (irqno < 0 || irqno >= sc->sc_nintr) 738 panic("ampintc_intr_establish: bogus irqnumber %d: %s", 739 irqno, name); 740 741 if (irqno < 16) { 742 /* SGI are only EDGE */ 743 type = IST_EDGE_RISING; 744 } else if (irqno < 32) { 745 /* PPI are only LEVEL */ 746 type = IST_LEVEL_HIGH; 747 } 748 749 ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK); 750 ih->ih_func = func; 751 ih->ih_arg = arg; 752 ih->ih_ipl = level & IPL_IRQMASK; 753 ih->ih_flags = level & IPL_FLAGMASK; 754 ih->ih_irq = irqno; 755 ih->ih_name = name; 756 757 psw = disable_interrupts(PSR_I); 758 759 TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list); 760 761 if (name != NULL) 762 evcount_attach(&ih->ih_count, name, &ih->ih_irq); 763 764 #ifdef DEBUG_INTC 765 printf("ampintc_intr_establish irq %d level %d [%s]\n", irqno, level, 766 name); 767 #endif 768 769 ampintc_intr_config(irqno, type); 770 ampintc_calc_mask(); 771 772 restore_interrupts(psw); 773 return (ih); 774 } 775 776 void 777 ampintc_intr_disestablish(void *cookie) 778 { 779 struct ampintc_softc *sc = ampintc; 780 struct intrhand *ih = cookie; 781 int psw; 782 783 #ifdef DEBUG_INTC 784 printf("ampintc_intr_disestablish irq %d level %d [%s]\n", 785 ih->ih_irq, ih->ih_ipl, ih->ih_name); 786 #endif 787 788 psw = disable_interrupts(PSR_I); 789 790 TAILQ_REMOVE(&sc->sc_handler[ih->ih_irq].iq_list, ih, ih_list); 791 if (ih->ih_name != NULL) 792 evcount_detach(&ih->ih_count); 793 free(ih, M_DEVBUF, sizeof(*ih)); 794 795 ampintc_calc_mask(); 796 797 restore_interrupts(psw); 798 } 799 800 const char * 801 ampintc_intr_string(void *cookie) 802 { 803 struct intrhand *ih = (struct intrhand *)cookie; 804 static char irqstr[1 + sizeof("ampintc irq ") + 4]; 805 806 snprintf(irqstr, sizeof irqstr, "ampintc irq %d", ih->ih_irq); 807 return irqstr; 808 } 809 810 /* 811 * GICv2m frame controller for MSI interrupts. 812 */ 813 #define GICV2M_TYPER 0x008 814 #define GICV2M_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff) 815 #define GICV2M_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff) 816 #define GICV2M_SETSPI_NS 0x040 817 818 int ampintc_msi_match(struct device *, void *, void *); 819 void ampintc_msi_attach(struct device *, struct device *, void *); 820 void *ampintc_intr_establish_msi(void *, uint64_t *, uint64_t *, 821 int , int (*)(void *), void *, char *); 822 void ampintc_intr_disestablish_msi(void *); 823 824 struct ampintc_msi_softc { 825 struct device sc_dev; 826 bus_space_tag_t sc_iot; 827 bus_space_handle_t sc_ioh; 828 paddr_t sc_addr; 829 int sc_bspi; 830 int sc_nspi; 831 void **sc_spi; 832 struct interrupt_controller sc_ic; 833 }; 834 835 struct cfattach ampintcmsi_ca = { 836 sizeof (struct ampintc_msi_softc), ampintc_msi_match, ampintc_msi_attach 837 }; 838 839 struct cfdriver ampintcmsi_cd = { 840 NULL, "ampintcmsi", DV_DULL 841 }; 842 843 int 844 ampintc_msi_match(struct device *parent, void *cfdata, void *aux) 845 { 846 struct fdt_attach_args *faa = aux; 847 848 return OF_is_compatible(faa->fa_node, "arm,gic-v2m-frame"); 849 } 850 851 void 852 ampintc_msi_attach(struct device *parent, struct device *self, void *aux) 853 { 854 struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self; 855 struct fdt_attach_args *faa = aux; 856 uint32_t typer; 857 858 sc->sc_iot = faa->fa_iot; 859 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 860 faa->fa_reg[0].size, 0, &sc->sc_ioh)) 861 panic("%s: bus_space_map failed!", __func__); 862 863 /* XXX: Hack to retrieve the physical address (from a CPU PoV). */ 864 if (!pmap_extract(pmap_kernel(), sc->sc_ioh, &sc->sc_addr)) { 865 printf(": cannot retrieve msi addr\n"); 866 return; 867 } 868 869 typer = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GICV2M_TYPER); 870 sc->sc_bspi = GICV2M_TYPER_SPI_BASE(typer); 871 sc->sc_nspi = GICV2M_TYPER_SPI_COUNT(typer); 872 873 sc->sc_bspi = OF_getpropint(faa->fa_node, 874 "arm,msi-base-spi", sc->sc_bspi); 875 sc->sc_nspi = OF_getpropint(faa->fa_node, 876 "arm,msi-num-spis", sc->sc_nspi); 877 878 printf(": nspi %d\n", sc->sc_nspi); 879 880 sc->sc_spi = mallocarray(sc->sc_nspi, sizeof(void *), M_DEVBUF, 881 M_WAITOK|M_ZERO); 882 883 sc->sc_ic.ic_node = faa->fa_node; 884 sc->sc_ic.ic_cookie = sc; 885 sc->sc_ic.ic_establish_msi = ampintc_intr_establish_msi; 886 sc->sc_ic.ic_disestablish = ampintc_intr_disestablish_msi; 887 arm_intr_register_fdt(&sc->sc_ic); 888 } 889 890 void * 891 ampintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data, 892 int level, int (*func)(void *), void *arg, char *name) 893 { 894 struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self; 895 void *cookie; 896 int i; 897 898 for (i = 0; i < sc->sc_nspi; i++) { 899 if (sc->sc_spi[i] != NULL) 900 continue; 901 902 cookie = ampintc_intr_establish(sc->sc_bspi + i, 903 IST_EDGE_RISING, level, func, arg, name); 904 if (cookie == NULL) 905 return NULL; 906 907 *addr = sc->sc_addr + GICV2M_SETSPI_NS; 908 *data = sc->sc_bspi + i; 909 sc->sc_spi[i] = cookie; 910 return &sc->sc_spi[i]; 911 } 912 913 return NULL; 914 } 915 916 void 917 ampintc_intr_disestablish_msi(void *cookie) 918 { 919 ampintc_intr_disestablish(*(void **)cookie); 920 *(void **)cookie = NULL; 921 } 922 923 #ifdef MULTIPROCESSOR 924 int 925 ampintc_ipi_ddb(void *v) 926 { 927 /* XXX */ 928 db_enter(); 929 return 1; 930 } 931 932 int 933 ampintc_ipi_nop(void *v) 934 { 935 /* Nothing to do here, just enough to wake up from WFI */ 936 return 1; 937 } 938 939 int 940 ampintc_ipi_combined(void *v) 941 { 942 struct ampintc_softc *sc = (struct ampintc_softc *)v; 943 944 if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) { 945 sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP; 946 return ampintc_ipi_ddb(v); 947 } else { 948 return ampintc_ipi_nop(v); 949 } 950 } 951 952 void 953 ampintc_send_ipi(struct cpu_info *ci, int id) 954 { 955 struct ampintc_softc *sc = ampintc; 956 int sendmask; 957 958 if (ci == curcpu() && id == ARM_IPI_NOP) 959 return; 960 961 /* never overwrite IPI_DDB with IPI_NOP */ 962 if (id == ARM_IPI_DDB) 963 sc->sc_ipi_reason[ci->ci_cpuid] = id; 964 965 /* currently will only send to one cpu */ 966 sendmask = 1 << (16 + ci->ci_cpuid); 967 sendmask |= sc->sc_ipi_num[id]; 968 969 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_SGIR, sendmask); 970 } 971 #endif 972