1 /* $NetBSD: gicv3.c,v 1.36 2020/12/04 21:39:26 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2018 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_multiprocessor.h" 30 31 #define _INTR_PRIVATE 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.36 2020/12/04 21:39:26 jmcneill Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/bus.h> 39 #include <sys/device.h> 40 #include <sys/intr.h> 41 #include <sys/systm.h> 42 #include <sys/cpu.h> 43 #include <sys/vmem.h> 44 #include <sys/atomic.h> 45 46 #include <machine/cpufunc.h> 47 48 #include <arm/locore.h> 49 #include <arm/armreg.h> 50 51 #include <arm/cortex/gicv3.h> 52 #include <arm/cortex/gic_reg.h> 53 54 #define PICTOSOFTC(pic) \ 55 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic))) 56 #define LPITOSOFTC(lpi) \ 57 ((void *)((uintptr_t)(lpi) - offsetof(struct gicv3_softc, sc_lpi))) 58 59 #define IPL_TO_PRIORITY(sc, ipl) (((0xff - (ipl)) << (sc)->sc_priority_shift) & 0xff) 60 #define IPL_TO_PMR(sc, ipl) (((0xff - (ipl)) << (sc)->sc_pmr_shift) & 0xff) 61 62 #define GIC_SUPPORTS_1OFN(sc) (((sc)->sc_gicd_typer & GICD_TYPER_No1N) == 0) 63 64 #define GIC_PRIO_SHIFT_NS 4 65 #define GIC_PRIO_SHIFT_S 3 66 67 static struct gicv3_softc *gicv3_softc; 68 69 static inline uint32_t 70 gicd_read_4(struct gicv3_softc *sc, bus_size_t reg) 71 { 72 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg); 73 } 74 75 static inline void 76 gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val) 77 { 78 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val); 79 } 80 81 static inline uint64_t 82 gicd_read_8(struct gicv3_softc *sc, bus_size_t reg) 83 { 84 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_d, reg); 85 } 86 87 static inline void 88 gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val) 89 { 90 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val); 91 } 92 93 static inline uint32_t 94 gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg) 95 { 96 KASSERT(index < sc->sc_bsh_r_count); 97 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg); 98 } 99 100 static inline void 101 gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val) 102 { 103 KASSERT(index < sc->sc_bsh_r_count); 104 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val); 105 } 106 107 static inline uint64_t 108 gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg) 109 { 110 KASSERT(index < sc->sc_bsh_r_count); 111 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg); 112 } 113 114 static inline void 115 gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val) 116 { 117 KASSERT(index < sc->sc_bsh_r_count); 118 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val); 119 } 120 121 static void 122 gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 123 { 124 struct gicv3_softc * const sc = PICTOSOFTC(pic); 125 struct cpu_info * const ci = curcpu(); 126 const u_int group = irqbase / 32; 127 128 if (group == 0) { 129 atomic_or_32(&sc->sc_enabled_sgippi, mask); 130 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask); 131 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 132 ; 133 } else { 134 gicd_write_4(sc, GICD_ISENABLERn(group), mask); 135 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 136 ; 137 } 138 } 139 140 static void 141 gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 142 { 143 struct gicv3_softc * const sc = PICTOSOFTC(pic); 144 struct cpu_info * const ci = curcpu(); 145 const u_int group = irqbase / 32; 146 147 if (group == 0) { 148 atomic_and_32(&sc->sc_enabled_sgippi, ~mask); 149 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask); 150 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 151 ; 152 } else { 153 gicd_write_4(sc, GICD_ICENABLERn(group), mask); 154 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 155 ; 156 } 157 } 158 159 static void 160 gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is) 161 { 162 struct gicv3_softc * const sc = PICTOSOFTC(pic); 163 const u_int group = is->is_irq / 32; 164 uint32_t ipriority, icfg; 165 uint64_t irouter; 166 u_int n; 167 168 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl); 169 const u_int ipriority_shift = (is->is_irq & 0x3) * 8; 170 const u_int icfg_shift = (is->is_irq & 0xf) * 2; 171 172 if (group == 0) { 173 /* SGIs and PPIs are always MP-safe */ 174 is->is_mpsafe = true; 175 176 /* Update interrupt configuration and priority on all redistributors */ 177 for (n = 0; n < sc->sc_bsh_r_count; n++) { 178 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16)); 179 if (is->is_type == IST_LEVEL) 180 icfg &= ~(0x2 << icfg_shift); 181 if (is->is_type == IST_EDGE) 182 icfg |= (0x2 << icfg_shift); 183 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg); 184 185 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4)); 186 ipriority &= ~(0xffU << ipriority_shift); 187 ipriority |= (ipriority_val << ipriority_shift); 188 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority); 189 } 190 } else { 191 /* 192 * If 1 of N SPI routing is supported, route MP-safe interrupts to all 193 * participating PEs. Otherwise, just route to the primary PE. 194 */ 195 if (is->is_mpsafe && GIC_SUPPORTS_1OFN(sc)) { 196 irouter = GICD_IROUTER_Interrupt_Routing_mode; 197 } else { 198 irouter = sc->sc_irouter[0]; 199 } 200 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter); 201 202 /* Update interrupt configuration */ 203 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16)); 204 if (is->is_type == IST_LEVEL) 205 icfg &= ~(0x2 << icfg_shift); 206 if (is->is_type == IST_EDGE) 207 icfg |= (0x2 << icfg_shift); 208 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg); 209 210 /* Update interrupt priority */ 211 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4)); 212 ipriority &= ~(0xffU << ipriority_shift); 213 ipriority |= (ipriority_val << ipriority_shift); 214 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority); 215 } 216 } 217 218 static void 219 gicv3_set_priority(struct pic_softc *pic, int ipl) 220 { 221 struct gicv3_softc * const sc = PICTOSOFTC(pic); 222 223 icc_pmr_write(IPL_TO_PMR(sc, ipl)); 224 } 225 226 static void 227 gicv3_dist_enable(struct gicv3_softc *sc) 228 { 229 uint32_t gicd_ctrl; 230 u_int n; 231 232 /* Disable the distributor */ 233 gicd_ctrl = gicd_read_4(sc, GICD_CTRL); 234 gicd_ctrl &= ~(GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS); 235 gicd_write_4(sc, GICD_CTRL, gicd_ctrl); 236 237 /* Wait for register write to complete */ 238 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 239 ; 240 241 /* Clear all INTID enable bits */ 242 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) 243 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0); 244 245 /* Set default priorities to lowest */ 246 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4) 247 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0); 248 249 /* Set all interrupts to G1NS */ 250 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) { 251 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0); 252 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0); 253 } 254 255 /* Set all interrupts level-sensitive by default */ 256 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16) 257 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0); 258 259 /* Wait for register writes to complete */ 260 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 261 ; 262 263 /* Enable Affinity routing and G1NS interrupts */ 264 gicd_ctrl = GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS; 265 gicd_write_4(sc, GICD_CTRL, gicd_ctrl); 266 } 267 268 static void 269 gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci) 270 { 271 uint32_t icfg; 272 u_int n, o; 273 274 /* Clear INTID enable bits */ 275 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0); 276 277 /* Wait for register write to complete */ 278 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 279 ; 280 281 /* Set default priorities */ 282 for (n = 0; n < 32; n += 4) { 283 uint32_t priority = 0; 284 size_t byte_shift = 0; 285 for (o = 0; o < 4; o++, byte_shift += 8) { 286 struct intrsource * const is = sc->sc_pic.pic_sources[n + o]; 287 if (is == NULL) 288 priority |= (0xffU << byte_shift); 289 else { 290 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl); 291 priority |= ipriority_val << byte_shift; 292 } 293 } 294 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority); 295 } 296 297 /* Set all interrupts to G1NS */ 298 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0); 299 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0); 300 301 /* Restore PPI configs */ 302 for (n = 0, icfg = 0; n < 16; n++) { 303 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n]; 304 if (is != NULL && is->is_type == IST_EDGE) 305 icfg |= (0x2 << (n * 2)); 306 } 307 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg); 308 309 /* Restore current enable bits */ 310 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi); 311 312 /* Wait for register write to complete */ 313 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 314 ; 315 } 316 317 static uint64_t 318 gicv3_cpu_identity(void) 319 { 320 u_int aff3, aff2, aff1, aff0; 321 322 const register_t mpidr = cpu_mpidr_aff_read(); 323 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0); 324 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1); 325 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2); 326 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3); 327 328 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) | 329 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) | 330 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) | 331 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3); 332 } 333 334 static u_int 335 gicv3_find_redist(struct gicv3_softc *sc) 336 { 337 uint64_t gicr_typer; 338 u_int n; 339 340 const uint64_t cpu_identity = gicv3_cpu_identity(); 341 342 for (n = 0; n < sc->sc_bsh_r_count; n++) { 343 gicr_typer = gicr_read_8(sc, n, GICR_TYPER); 344 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity) 345 return n; 346 } 347 348 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 349 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 350 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 351 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 352 353 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d", 354 cpu_name(curcpu()), aff3, aff2, aff1, aff0); 355 } 356 357 static uint64_t 358 gicv3_sgir(struct gicv3_softc *sc) 359 { 360 const uint64_t cpu_identity = gicv3_cpu_identity(); 361 362 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 363 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 364 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 365 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 366 367 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) | 368 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) | 369 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) | 370 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3); 371 } 372 373 static void 374 gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 375 { 376 struct gicv3_softc * const sc = PICTOSOFTC(pic); 377 uint32_t icc_sre, icc_ctlr, gicr_waker; 378 379 evcnt_attach_dynamic(&ci->ci_intr_preempt, EVCNT_TYPE_MISC, NULL, 380 ci->ci_cpuname, "intr preempt"); 381 382 ci->ci_gic_redist = gicv3_find_redist(sc); 383 ci->ci_gic_sgir = gicv3_sgir(sc); 384 385 /* Store route to CPU for SPIs */ 386 const uint64_t cpu_identity = gicv3_cpu_identity(); 387 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 388 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 389 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 390 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 391 sc->sc_irouter[cpu_index(ci)] = 392 __SHIFTIN(aff0, GICD_IROUTER_Aff0) | 393 __SHIFTIN(aff1, GICD_IROUTER_Aff1) | 394 __SHIFTIN(aff2, GICD_IROUTER_Aff2) | 395 __SHIFTIN(aff3, GICD_IROUTER_Aff3); 396 397 /* Enable System register access and disable IRQ/FIQ bypass */ 398 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB; 399 icc_sre_write(icc_sre); 400 401 /* Mark the connected PE as being awake */ 402 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER); 403 gicr_waker &= ~GICR_WAKER_ProcessorSleep; 404 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker); 405 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep) 406 ; 407 408 /* Set initial priority mask */ 409 gicv3_set_priority(pic, IPL_HIGH); 410 411 /* Set the binary point field to the minimum value */ 412 icc_bpr1_write(0); 413 414 /* Enable group 1 interrupt signaling */ 415 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable); 416 417 /* Set EOI mode */ 418 icc_ctlr = icc_ctlr_read(); 419 icc_ctlr &= ~ICC_CTLR_EL1_EOImode; 420 icc_ctlr_write(icc_ctlr); 421 422 /* Enable redistributor */ 423 gicv3_redist_enable(sc, ci); 424 425 /* Allow IRQ exceptions */ 426 cpsie(I32_bit); 427 } 428 429 #ifdef MULTIPROCESSOR 430 static void 431 gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi) 432 { 433 struct cpu_info *ci; 434 uint64_t sgir; 435 436 sgir = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID); 437 if (kcp == NULL) { 438 /* Interrupts routed to all PEs, excluding "self" */ 439 if (ncpu == 1) 440 return; 441 sgir |= ICC_SGIR_EL1_IRM; 442 } else { 443 /* Interrupt to exactly one PE */ 444 ci = cpu_lookup(kcpuset_ffs(kcp) - 1); 445 if (ci == curcpu()) 446 return; 447 sgir |= ci->ci_gic_sgir; 448 } 449 icc_sgi1r_write(sgir); 450 isb(); 451 } 452 453 static void 454 gicv3_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity) 455 { 456 struct gicv3_softc * const sc = PICTOSOFTC(pic); 457 const size_t group = irq / 32; 458 int n; 459 460 kcpuset_zero(affinity); 461 if (group == 0) { 462 /* All CPUs are targets for group 0 (SGI/PPI) */ 463 for (n = 0; n < ncpu; n++) { 464 if (sc->sc_irouter[n] != UINT64_MAX) 465 kcpuset_set(affinity, n); 466 } 467 } else { 468 /* Find distributor targets (SPI) */ 469 const uint64_t irouter = gicd_read_8(sc, GICD_IROUTER(irq)); 470 for (n = 0; n < ncpu; n++) { 471 if (irouter == GICD_IROUTER_Interrupt_Routing_mode || 472 irouter == sc->sc_irouter[n]) 473 kcpuset_set(affinity, n); 474 } 475 } 476 } 477 478 static int 479 gicv3_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity) 480 { 481 struct gicv3_softc * const sc = PICTOSOFTC(pic); 482 const size_t group = irq / 32; 483 uint64_t irouter; 484 485 if (group == 0) 486 return EINVAL; 487 488 const int set = kcpuset_countset(affinity); 489 if (set == 1) { 490 irouter = sc->sc_irouter[kcpuset_ffs(affinity) - 1]; 491 } else if (set == ncpu && GIC_SUPPORTS_1OFN(sc)) { 492 irouter = GICD_IROUTER_Interrupt_Routing_mode; 493 } else { 494 return EINVAL; 495 } 496 497 gicd_write_8(sc, GICD_IROUTER(irq), irouter); 498 499 return 0; 500 } 501 #endif 502 503 static const struct pic_ops gicv3_picops = { 504 .pic_unblock_irqs = gicv3_unblock_irqs, 505 .pic_block_irqs = gicv3_block_irqs, 506 .pic_establish_irq = gicv3_establish_irq, 507 .pic_set_priority = gicv3_set_priority, 508 #ifdef MULTIPROCESSOR 509 .pic_cpu_init = gicv3_cpu_init, 510 .pic_ipi_send = gicv3_ipi_send, 511 .pic_get_affinity = gicv3_get_affinity, 512 .pic_set_affinity = gicv3_set_affinity, 513 #endif 514 }; 515 516 static void 517 gicv3_lpi_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 518 { 519 struct gicv3_softc * const sc = LPITOSOFTC(pic); 520 int bit; 521 522 while ((bit = ffs(mask)) != 0) { 523 sc->sc_lpiconf.base[irqbase + bit - 1] |= GIC_LPICONF_Enable; 524 if (sc->sc_lpiconf_flush) 525 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1); 526 mask &= ~__BIT(bit - 1); 527 } 528 529 if (!sc->sc_lpiconf_flush) 530 dsb(ishst); 531 } 532 533 static void 534 gicv3_lpi_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 535 { 536 struct gicv3_softc * const sc = LPITOSOFTC(pic); 537 int bit; 538 539 while ((bit = ffs(mask)) != 0) { 540 sc->sc_lpiconf.base[irqbase + bit - 1] &= ~GIC_LPICONF_Enable; 541 if (sc->sc_lpiconf_flush) 542 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1); 543 mask &= ~__BIT(bit - 1); 544 } 545 546 if (!sc->sc_lpiconf_flush) 547 dsb(ishst); 548 } 549 550 static void 551 gicv3_lpi_establish_irq(struct pic_softc *pic, struct intrsource *is) 552 { 553 struct gicv3_softc * const sc = LPITOSOFTC(pic); 554 555 sc->sc_lpiconf.base[is->is_irq] = IPL_TO_PRIORITY(sc, is->is_ipl) | GIC_LPICONF_Res1; 556 557 if (sc->sc_lpiconf_flush) 558 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[is->is_irq], 1); 559 else 560 dsb(ishst); 561 } 562 563 static void 564 gicv3_lpi_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 565 { 566 struct gicv3_softc * const sc = LPITOSOFTC(pic); 567 struct gicv3_lpi_callback *cb; 568 uint64_t propbase, pendbase; 569 uint32_t ctlr; 570 571 /* If physical LPIs are not supported on this redistributor, just return. */ 572 const uint64_t typer = gicr_read_8(sc, ci->ci_gic_redist, GICR_TYPER); 573 if ((typer & GICR_TYPER_PLPIS) == 0) 574 return; 575 576 /* Interrupt target address for this CPU, used by ITS when GITS_TYPER.PTA == 0 */ 577 sc->sc_processor_id[cpu_index(ci)] = __SHIFTOUT(typer, GICR_TYPER_Processor_Number); 578 579 /* Disable LPIs before making changes */ 580 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR); 581 ctlr &= ~GICR_CTLR_Enable_LPIs; 582 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr); 583 dsb(sy); 584 585 /* Setup the LPI configuration table */ 586 propbase = sc->sc_lpiconf.segs[0].ds_addr | 587 __SHIFTIN(ffs(pic->pic_maxsources) - 1, GICR_PROPBASER_IDbits) | 588 __SHIFTIN(GICR_Shareability_IS, GICR_PROPBASER_Shareability) | 589 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PROPBASER_InnerCache); 590 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase); 591 propbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PROPBASER); 592 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) != GICR_Shareability_IS) { 593 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) == GICR_Shareability_NS) { 594 propbase &= ~GICR_PROPBASER_Shareability; 595 propbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PROPBASER_Shareability); 596 propbase &= ~GICR_PROPBASER_InnerCache; 597 propbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PROPBASER_InnerCache); 598 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase); 599 } 600 sc->sc_lpiconf_flush = true; 601 } 602 603 /* Setup the LPI pending table */ 604 pendbase = sc->sc_lpipend[cpu_index(ci)].segs[0].ds_addr | 605 __SHIFTIN(GICR_Shareability_IS, GICR_PENDBASER_Shareability) | 606 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PENDBASER_InnerCache); 607 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase); 608 pendbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PENDBASER); 609 if (__SHIFTOUT(pendbase, GICR_PENDBASER_Shareability) == GICR_Shareability_NS) { 610 pendbase &= ~GICR_PENDBASER_Shareability; 611 pendbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PENDBASER_Shareability); 612 pendbase &= ~GICR_PENDBASER_InnerCache; 613 pendbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PENDBASER_InnerCache); 614 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase); 615 } 616 617 /* Enable LPIs */ 618 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR); 619 ctlr |= GICR_CTLR_Enable_LPIs; 620 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr); 621 dsb(sy); 622 623 /* Setup ITS if present */ 624 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) 625 cb->cpu_init(cb->priv, ci); 626 } 627 628 #ifdef MULTIPROCESSOR 629 static void 630 gicv3_lpi_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity) 631 { 632 struct gicv3_softc * const sc = LPITOSOFTC(pic); 633 struct gicv3_lpi_callback *cb; 634 635 kcpuset_zero(affinity); 636 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) 637 cb->get_affinity(cb->priv, irq, affinity); 638 } 639 640 static int 641 gicv3_lpi_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity) 642 { 643 struct gicv3_softc * const sc = LPITOSOFTC(pic); 644 struct gicv3_lpi_callback *cb; 645 int error = EINVAL; 646 647 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) { 648 error = cb->set_affinity(cb->priv, irq, affinity); 649 if (error != EPASSTHROUGH) 650 return error; 651 } 652 653 return EINVAL; 654 } 655 #endif 656 657 static const struct pic_ops gicv3_lpiops = { 658 .pic_unblock_irqs = gicv3_lpi_unblock_irqs, 659 .pic_block_irqs = gicv3_lpi_block_irqs, 660 .pic_establish_irq = gicv3_lpi_establish_irq, 661 #ifdef MULTIPROCESSOR 662 .pic_cpu_init = gicv3_lpi_cpu_init, 663 .pic_get_affinity = gicv3_lpi_get_affinity, 664 .pic_set_affinity = gicv3_lpi_set_affinity, 665 #endif 666 }; 667 668 void 669 gicv3_dma_alloc(struct gicv3_softc *sc, struct gicv3_dma *dma, bus_size_t len, bus_size_t align) 670 { 671 int nsegs, error; 672 673 dma->len = len; 674 error = bus_dmamem_alloc(sc->sc_dmat, dma->len, align, 0, dma->segs, 1, &nsegs, BUS_DMA_WAITOK); 675 if (error) 676 panic("bus_dmamem_alloc failed: %d", error); 677 error = bus_dmamem_map(sc->sc_dmat, dma->segs, nsegs, len, (void **)&dma->base, BUS_DMA_WAITOK); 678 if (error) 679 panic("bus_dmamem_map failed: %d", error); 680 error = bus_dmamap_create(sc->sc_dmat, len, 1, len, 0, BUS_DMA_WAITOK, &dma->map); 681 if (error) 682 panic("bus_dmamap_create failed: %d", error); 683 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->base, dma->len, NULL, BUS_DMA_WAITOK); 684 if (error) 685 panic("bus_dmamap_load failed: %d", error); 686 687 memset(dma->base, 0, dma->len); 688 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, dma->len, BUS_DMASYNC_PREWRITE); 689 } 690 691 static void 692 gicv3_lpi_init(struct gicv3_softc *sc) 693 { 694 /* 695 * Allocate LPI configuration table 696 */ 697 gicv3_dma_alloc(sc, &sc->sc_lpiconf, sc->sc_lpi.pic_maxsources, 0x1000); 698 KASSERT((sc->sc_lpiconf.segs[0].ds_addr & ~GICR_PROPBASER_Physical_Address) == 0); 699 700 /* 701 * Allocate LPI pending tables 702 */ 703 const bus_size_t lpipend_sz = (8192 + sc->sc_lpi.pic_maxsources) / NBBY; 704 for (int cpuindex = 0; cpuindex < ncpu; cpuindex++) { 705 gicv3_dma_alloc(sc, &sc->sc_lpipend[cpuindex], lpipend_sz, 0x10000); 706 KASSERT((sc->sc_lpipend[cpuindex].segs[0].ds_addr & ~GICR_PENDBASER_Physical_Address) == 0); 707 } 708 } 709 710 void 711 gicv3_irq_handler(void *frame) 712 { 713 struct cpu_info * const ci = curcpu(); 714 struct gicv3_softc * const sc = gicv3_softc; 715 struct pic_softc *pic; 716 const int oldipl = ci->ci_cpl; 717 718 ci->ci_data.cpu_nintr++; 719 720 for (;;) { 721 const uint32_t iar = icc_iar1_read(); 722 dsb(sy); 723 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID); 724 if (irq == ICC_IAR_INTID_SPURIOUS) 725 break; 726 727 pic = irq >= GIC_LPI_BASE ? &sc->sc_lpi : &sc->sc_pic; 728 if (irq - pic->pic_irqbase >= pic->pic_maxsources) 729 continue; 730 731 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase]; 732 KASSERT(is != NULL); 733 734 const bool early_eoi = irq < GIC_LPI_BASE && is->is_type == IST_EDGE; 735 736 const int ipl = is->is_ipl; 737 if (__predict_false(ipl < ci->ci_cpl)) { 738 pic_do_pending_ints(I32_bit, ipl, frame); 739 } else if (ci->ci_cpl != ipl) { 740 gicv3_set_priority(pic, ipl); 741 ci->ci_cpl = ipl; 742 } 743 744 if (early_eoi) { 745 icc_eoi1r_write(iar); 746 isb(); 747 } 748 749 const int64_t nintr = ci->ci_data.cpu_nintr; 750 751 cpsie(I32_bit); 752 pic_dispatch(is, frame); 753 cpsid(I32_bit); 754 755 if (nintr != ci->ci_data.cpu_nintr) 756 ci->ci_intr_preempt.ev_count++; 757 758 if (!early_eoi) { 759 icc_eoi1r_write(iar); 760 isb(); 761 } 762 } 763 764 pic_do_pending_ints(I32_bit, oldipl, frame); 765 } 766 767 static bool 768 gicv3_cpuif_is_nonsecure(struct gicv3_softc *sc) 769 { 770 /* 771 * Write 0 to bit7 and see if it sticks. This is only possible if 772 * we have a non-secure view of the PMR register. 773 */ 774 const uint32_t opmr = icc_pmr_read(); 775 icc_pmr_write(0); 776 const uint32_t npmr = icc_pmr_read(); 777 icc_pmr_write(opmr); 778 779 return (npmr & GICC_PMR_NONSECURE) == 0; 780 } 781 782 static bool 783 gicv3_dist_is_nonsecure(struct gicv3_softc *sc) 784 { 785 const uint32_t gicd_ctrl = gicd_read_4(sc, GICD_CTRL); 786 787 /* 788 * If security is enabled, we have a non-secure view of the IPRIORITYRn 789 * registers and LPI configuration priority fields. 790 */ 791 return (gicd_ctrl & GICD_CTRL_DS) == 0; 792 } 793 794 /* 795 * Rockchip RK3399 provides a different view of int priority registers 796 * depending on which firmware is in use. This is hard to detect in 797 * a way that could possibly break other boards, so only do this 798 * detection if we know we are on a RK3399 SoC. 799 */ 800 static void 801 gicv3_quirk_rockchip_rk3399(struct gicv3_softc *sc) 802 { 803 /* Detect the number of supported PMR bits */ 804 icc_pmr_write(0xff); 805 const uint8_t pmrbits = icc_pmr_read(); 806 807 /* Detect the number of supported IPRIORITYRn bits */ 808 const uint32_t oiprio = gicd_read_4(sc, GICD_IPRIORITYRn(8)); 809 gicd_write_4(sc, GICD_IPRIORITYRn(8), oiprio | 0xff); 810 const uint8_t pribits = gicd_read_4(sc, GICD_IPRIORITYRn(8)) & 0xff; 811 gicd_write_4(sc, GICD_IPRIORITYRn(8), oiprio); 812 813 /* 814 * If we see fewer PMR bits than IPRIORITYRn bits here, it means 815 * we have a secure view of IPRIORITYRn (this is not supposed to 816 * happen!). 817 */ 818 if (pmrbits < pribits) { 819 aprint_verbose_dev(sc->sc_dev, 820 "buggy RK3399 firmware detected; applying workaround\n"); 821 sc->sc_priority_shift = GIC_PRIO_SHIFT_S; 822 } 823 } 824 825 int 826 gicv3_init(struct gicv3_softc *sc) 827 { 828 int n; 829 830 KASSERT(CPU_IS_PRIMARY(curcpu())); 831 832 LIST_INIT(&sc->sc_lpi_callbacks); 833 834 for (n = 0; n < MAXCPUS; n++) 835 sc->sc_irouter[n] = UINT64_MAX; 836 837 sc->sc_gicd_typer = gicd_read_4(sc, GICD_TYPER); 838 839 /* 840 * We don't alwayst have a consistent view of priorities between the 841 * CPU interface (ICC_PMR_EL1) and the GICD/GICR registers. Detect 842 * if we are making secure or non-secure accesses to each, and adjust 843 * the values that we write to each accordingly. 844 */ 845 const bool dist_ns = gicv3_dist_is_nonsecure(sc); 846 sc->sc_priority_shift = dist_ns ? GIC_PRIO_SHIFT_NS : GIC_PRIO_SHIFT_S; 847 const bool cpuif_ns = gicv3_cpuif_is_nonsecure(sc); 848 sc->sc_pmr_shift = cpuif_ns ? GIC_PRIO_SHIFT_NS : GIC_PRIO_SHIFT_S; 849 850 if ((sc->sc_quirks & GICV3_QUIRK_RK3399) != 0) 851 gicv3_quirk_rockchip_rk3399(sc); 852 853 aprint_verbose_dev(sc->sc_dev, 854 "iidr 0x%08x, cpuif %ssecure, dist %ssecure, " 855 "priority shift %d, pmr shift %d, quirks %#x\n", 856 gicd_read_4(sc, GICD_IIDR), 857 cpuif_ns ? "non-" : "", 858 dist_ns ? "non-" : "", 859 sc->sc_priority_shift, 860 sc->sc_pmr_shift, 861 sc->sc_quirks); 862 863 sc->sc_pic.pic_ops = &gicv3_picops; 864 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(sc->sc_gicd_typer); 865 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3"); 866 #ifdef MULTIPROCESSOR 867 sc->sc_pic.pic_cpus = kcpuset_running; 868 #endif 869 pic_add(&sc->sc_pic, 0); 870 871 if ((sc->sc_gicd_typer & GICD_TYPER_LPIS) != 0) { 872 sc->sc_lpi.pic_ops = &gicv3_lpiops; 873 sc->sc_lpi.pic_maxsources = 8192; /* Min. required by GICv3 spec */ 874 snprintf(sc->sc_lpi.pic_name, sizeof(sc->sc_lpi.pic_name), "gicv3-lpi"); 875 pic_add(&sc->sc_lpi, GIC_LPI_BASE); 876 877 sc->sc_lpi_pool = vmem_create("gicv3-lpi", 0, sc->sc_lpi.pic_maxsources, 878 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_HIGH); 879 if (sc->sc_lpi_pool == NULL) 880 panic("failed to create gicv3 lpi pool\n"); 881 882 gicv3_lpi_init(sc); 883 } 884 885 KASSERT(gicv3_softc == NULL); 886 gicv3_softc = sc; 887 888 for (int i = 0; i < sc->sc_bsh_r_count; i++) { 889 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER); 890 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0); 891 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1); 892 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2); 893 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3); 894 895 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n", 896 i, aff3, aff2, aff1, aff0); 897 } 898 899 gicv3_dist_enable(sc); 900 901 gicv3_cpu_init(&sc->sc_pic, curcpu()); 902 if ((sc->sc_gicd_typer & GICD_TYPER_LPIS) != 0) 903 gicv3_lpi_cpu_init(&sc->sc_lpi, curcpu()); 904 905 #ifdef MULTIPROCESSOR 906 intr_establish_xname(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast"); 907 intr_establish_xname(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall"); 908 intr_establish_xname(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic"); 909 intr_establish_xname(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop"); 910 intr_establish_xname(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown"); 911 #ifdef DDB 912 intr_establish_xname(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb"); 913 #endif 914 #ifdef __HAVE_PREEMPTION 915 intr_establish_xname(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt"); 916 #endif 917 #endif 918 919 return 0; 920 } 921