1 /* $NetBSD: gicv3.c,v 1.13 2018/11/23 11:49:04 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2018 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_multiprocessor.h" 30 31 #define _INTR_PRIVATE 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.13 2018/11/23 11:49:04 jmcneill Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/bus.h> 39 #include <sys/device.h> 40 #include <sys/intr.h> 41 #include <sys/systm.h> 42 #include <sys/cpu.h> 43 44 #include <arm/locore.h> 45 #include <arm/armreg.h> 46 47 #include <arm/cortex/gicv3.h> 48 #include <arm/cortex/gic_reg.h> 49 50 #define PICTOSOFTC(pic) \ 51 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic))) 52 #define LPITOSOFTC(lpi) \ 53 ((void *)((uintptr_t)(lpi) - offsetof(struct gicv3_softc, sc_lpi))) 54 55 #define IPL_TO_PRIORITY(ipl) ((IPL_HIGH - (ipl)) << 4) 56 57 static struct gicv3_softc *gicv3_softc; 58 59 static inline uint32_t 60 gicd_read_4(struct gicv3_softc *sc, bus_size_t reg) 61 { 62 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg); 63 } 64 65 static inline void 66 gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val) 67 { 68 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val); 69 } 70 71 static inline uint64_t 72 gicd_read_8(struct gicv3_softc *sc, bus_size_t reg) 73 { 74 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_d, reg); 75 } 76 77 static inline void 78 gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val) 79 { 80 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val); 81 } 82 83 static inline uint32_t 84 gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg) 85 { 86 KASSERT(index < sc->sc_bsh_r_count); 87 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg); 88 } 89 90 static inline void 91 gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val) 92 { 93 KASSERT(index < sc->sc_bsh_r_count); 94 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val); 95 } 96 97 static inline uint64_t 98 gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg) 99 { 100 KASSERT(index < sc->sc_bsh_r_count); 101 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg); 102 } 103 104 static inline void 105 gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val) 106 { 107 KASSERT(index < sc->sc_bsh_r_count); 108 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val); 109 } 110 111 static void 112 gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 113 { 114 struct gicv3_softc * const sc = PICTOSOFTC(pic); 115 struct cpu_info * const ci = curcpu(); 116 const u_int group = irqbase / 32; 117 118 if (group == 0) { 119 sc->sc_enabled_sgippi |= mask; 120 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask); 121 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 122 ; 123 } else { 124 gicd_write_4(sc, GICD_ISENABLERn(group), mask); 125 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 126 ; 127 } 128 } 129 130 static void 131 gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 132 { 133 struct gicv3_softc * const sc = PICTOSOFTC(pic); 134 struct cpu_info * const ci = curcpu(); 135 const u_int group = irqbase / 32; 136 137 if (group == 0) { 138 sc->sc_enabled_sgippi &= ~mask; 139 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask); 140 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 141 ; 142 } else { 143 gicd_write_4(sc, GICD_ICENABLERn(group), mask); 144 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 145 ; 146 } 147 } 148 149 static void 150 gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is) 151 { 152 struct gicv3_softc * const sc = PICTOSOFTC(pic); 153 const u_int group = is->is_irq / 32; 154 uint32_t ipriority, icfg; 155 uint64_t irouter; 156 u_int n; 157 158 const u_int ipriority_val = 0x80 | IPL_TO_PRIORITY(is->is_ipl); 159 const u_int ipriority_shift = (is->is_irq & 0x3) * 8; 160 const u_int icfg_shift = (is->is_irq & 0xf) * 2; 161 162 if (group == 0) { 163 /* SGIs and PPIs are always MP-safe */ 164 is->is_mpsafe = true; 165 166 /* Update interrupt configuration and priority on all redistributors */ 167 for (n = 0; n < sc->sc_bsh_r_count; n++) { 168 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16)); 169 if (is->is_type == IST_LEVEL) 170 icfg &= ~(0x2 << icfg_shift); 171 if (is->is_type == IST_EDGE) 172 icfg |= (0x2 << icfg_shift); 173 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg); 174 175 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4)); 176 ipriority &= ~(0xff << ipriority_shift); 177 ipriority |= (ipriority_val << ipriority_shift); 178 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority); 179 } 180 } else { 181 if (is->is_mpsafe) { 182 /* Route MP-safe interrupts to all participating PEs */ 183 irouter = GICD_IROUTER_Interrupt_Routing_mode; 184 } else { 185 /* Route non-MP-safe interrupts to the primary PE only */ 186 irouter = sc->sc_irouter[0]; 187 } 188 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter); 189 190 /* Update interrupt configuration */ 191 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16)); 192 if (is->is_type == IST_LEVEL) 193 icfg &= ~(0x2 << icfg_shift); 194 if (is->is_type == IST_EDGE) 195 icfg |= (0x2 << icfg_shift); 196 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg); 197 198 /* Update interrupt priority */ 199 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4)); 200 ipriority &= ~(0xff << ipriority_shift); 201 ipriority |= (ipriority_val << ipriority_shift); 202 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority); 203 } 204 } 205 206 static void 207 gicv3_set_priority(struct pic_softc *pic, int ipl) 208 { 209 icc_pmr_write(IPL_TO_PRIORITY(ipl) << 1); 210 } 211 212 static void 213 gicv3_dist_enable(struct gicv3_softc *sc) 214 { 215 uint32_t gicd_ctrl; 216 u_int n; 217 218 /* Disable the distributor */ 219 gicd_write_4(sc, GICD_CTRL, 0); 220 221 /* Wait for register write to complete */ 222 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 223 ; 224 225 /* Clear all INTID enable bits */ 226 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) 227 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0); 228 229 /* Set default priorities to lowest */ 230 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4) 231 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0); 232 233 /* Set all interrupts to G1NS */ 234 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) { 235 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0); 236 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0); 237 } 238 239 /* Set all interrupts level-sensitive by default */ 240 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16) 241 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0); 242 243 /* Wait for register writes to complete */ 244 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 245 ; 246 247 /* Enable Affinity routing and G1NS interrupts */ 248 gicd_ctrl = GICD_CTRL_EnableGrp1A | GICD_CTRL_Enable | GICD_CTRL_ARE_NS; 249 gicd_write_4(sc, GICD_CTRL, gicd_ctrl); 250 } 251 252 static void 253 gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci) 254 { 255 uint32_t icfg; 256 u_int n, o; 257 258 /* Clear INTID enable bits */ 259 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0); 260 261 /* Wait for register write to complete */ 262 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 263 ; 264 265 /* Set default priorities */ 266 for (n = 0; n < 32; n += 4) { 267 uint32_t priority = 0; 268 size_t byte_shift = 0; 269 for (o = 0; o < 4; o++, byte_shift += 8) { 270 struct intrsource * const is = sc->sc_pic.pic_sources[n + o]; 271 if (is == NULL) 272 priority |= 0xff << byte_shift; 273 else { 274 const u_int ipriority_val = 0x80 | IPL_TO_PRIORITY(is->is_ipl); 275 priority |= ipriority_val << byte_shift; 276 } 277 } 278 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority); 279 } 280 281 /* Set all interrupts to G1NS */ 282 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0); 283 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0); 284 285 /* Restore PPI configs */ 286 for (n = 0, icfg = 0; n < 16; n++) { 287 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n]; 288 if (is != NULL && is->is_type == IST_EDGE) 289 icfg |= (0x2 << (n * 2)); 290 } 291 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg); 292 293 /* Restore current enable bits */ 294 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi); 295 296 /* Wait for register write to complete */ 297 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 298 ; 299 } 300 301 static uint64_t 302 gicv3_cpu_identity(void) 303 { 304 u_int aff3, aff2, aff1, aff0; 305 306 #ifdef __aarch64__ 307 const register_t mpidr = reg_mpidr_el1_read(); 308 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0); 309 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1); 310 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2); 311 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3); 312 #else 313 const register_t mpidr = armreg_mpidr_read(); 314 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0); 315 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1); 316 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2); 317 aff3 = 0; 318 #endif 319 320 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) | 321 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) | 322 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) | 323 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3); 324 } 325 326 static u_int 327 gicv3_find_redist(struct gicv3_softc *sc) 328 { 329 uint64_t gicr_typer; 330 u_int n; 331 332 const uint64_t cpu_identity = gicv3_cpu_identity(); 333 334 for (n = 0; n < sc->sc_bsh_r_count; n++) { 335 gicr_typer = gicr_read_8(sc, n, GICR_TYPER); 336 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity) 337 return n; 338 } 339 340 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 341 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 342 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 343 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 344 345 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d", 346 cpu_name(curcpu()), aff3, aff2, aff1, aff0); 347 } 348 349 static uint64_t 350 gicv3_sgir(struct gicv3_softc *sc) 351 { 352 const uint64_t cpu_identity = gicv3_cpu_identity(); 353 354 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 355 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 356 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 357 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 358 359 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) | 360 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) | 361 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) | 362 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3); 363 } 364 365 static void 366 gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 367 { 368 struct gicv3_softc * const sc = PICTOSOFTC(pic); 369 uint32_t icc_sre, icc_ctlr, gicr_waker; 370 371 ci->ci_gic_redist = gicv3_find_redist(sc); 372 ci->ci_gic_sgir = gicv3_sgir(sc); 373 374 /* Store route to CPU for SPIs */ 375 const uint64_t cpu_identity = gicv3_cpu_identity(); 376 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 377 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 378 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 379 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 380 sc->sc_irouter[cpu_index(ci)] = 381 __SHIFTIN(aff0, GICD_IROUTER_Aff0) | 382 __SHIFTIN(aff1, GICD_IROUTER_Aff1) | 383 __SHIFTIN(aff2, GICD_IROUTER_Aff2) | 384 __SHIFTIN(aff3, GICD_IROUTER_Aff3); 385 386 /* Enable System register access and disable IRQ/FIQ bypass */ 387 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB; 388 icc_sre_write(icc_sre); 389 390 /* Mark the connected PE as being awake */ 391 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER); 392 gicr_waker &= ~GICR_WAKER_ProcessorSleep; 393 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker); 394 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep) 395 ; 396 397 /* Set initial priority mask */ 398 gicv3_set_priority(pic, IPL_HIGH); 399 400 /* Set the binary point field to the minimum value */ 401 icc_bpr1_write(0); 402 403 /* Enable group 1 interrupt signaling */ 404 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable); 405 406 /* Set EOI mode */ 407 icc_ctlr = icc_ctlr_read(); 408 icc_ctlr &= ~ICC_CTLR_EL1_EOImode; 409 icc_ctlr_write(icc_ctlr); 410 411 /* Enable redistributor */ 412 gicv3_redist_enable(sc, ci); 413 414 /* Allow IRQ exceptions */ 415 cpsie(I32_bit); 416 } 417 418 #ifdef MULTIPROCESSOR 419 static void 420 gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi) 421 { 422 CPU_INFO_ITERATOR cii; 423 struct cpu_info *ci; 424 uint64_t intid, aff, targets; 425 426 intid = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID); 427 if (kcp == NULL) { 428 /* Interrupts routed to all PEs, excluding "self" */ 429 if (ncpu == 1) 430 return; 431 icc_sgi1r_write(intid | ICC_SGIR_EL1_IRM); 432 } else { 433 /* Interrupts routed to specific PEs */ 434 aff = 0; 435 targets = 0; 436 for (CPU_INFO_FOREACH(cii, ci)) { 437 if (!kcpuset_isset(kcp, cpu_index(ci))) 438 continue; 439 if ((ci->ci_gic_sgir & ICC_SGIR_EL1_Aff) != aff) { 440 if (targets != 0) { 441 icc_sgi1r_write(intid | aff | targets); 442 targets = 0; 443 } 444 aff = (ci->ci_gic_sgir & ICC_SGIR_EL1_Aff); 445 } 446 targets |= (ci->ci_gic_sgir & ICC_SGIR_EL1_TargetList); 447 } 448 if (targets != 0) 449 icc_sgi1r_write(intid | aff | targets); 450 } 451 } 452 453 static void 454 gicv3_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity) 455 { 456 struct gicv3_softc * const sc = PICTOSOFTC(pic); 457 const size_t group = irq / 32; 458 int n; 459 460 kcpuset_zero(affinity); 461 if (group == 0) { 462 /* All CPUs are targets for group 0 (SGI/PPI) */ 463 for (n = 0; n < ncpu; n++) { 464 if (sc->sc_irouter[n] != UINT64_MAX) 465 kcpuset_set(affinity, n); 466 } 467 } else { 468 /* Find distributor targets (SPI) */ 469 const uint64_t irouter = gicd_read_8(sc, GICD_IROUTER(irq)); 470 for (n = 0; n < ncpu; n++) { 471 if (irouter == GICD_IROUTER_Interrupt_Routing_mode || 472 irouter == sc->sc_irouter[n]) 473 kcpuset_set(affinity, n); 474 } 475 } 476 } 477 478 static int 479 gicv3_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity) 480 { 481 struct gicv3_softc * const sc = PICTOSOFTC(pic); 482 const size_t group = irq / 32; 483 uint64_t irouter; 484 485 if (group == 0) 486 return EINVAL; 487 488 const int set = kcpuset_countset(affinity); 489 if (set == ncpu) 490 irouter = GICD_IROUTER_Interrupt_Routing_mode; 491 else if (set == 1) 492 irouter = sc->sc_irouter[kcpuset_ffs(affinity) - 1]; 493 else 494 return EINVAL; 495 496 gicd_write_8(sc, GICD_IROUTER(irq), irouter); 497 498 return 0; 499 } 500 #endif 501 502 static const struct pic_ops gicv3_picops = { 503 .pic_unblock_irqs = gicv3_unblock_irqs, 504 .pic_block_irqs = gicv3_block_irqs, 505 .pic_establish_irq = gicv3_establish_irq, 506 .pic_set_priority = gicv3_set_priority, 507 #ifdef MULTIPROCESSOR 508 .pic_cpu_init = gicv3_cpu_init, 509 .pic_ipi_send = gicv3_ipi_send, 510 .pic_get_affinity = gicv3_get_affinity, 511 .pic_set_affinity = gicv3_set_affinity, 512 #endif 513 }; 514 515 static void 516 gicv3_lpi_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 517 { 518 struct gicv3_softc * const sc = LPITOSOFTC(pic); 519 int bit; 520 521 while ((bit = ffs(mask)) != 0) { 522 sc->sc_lpiconf.base[irqbase + bit - 1] |= GIC_LPICONF_Enable; 523 mask &= ~__BIT(bit - 1); 524 } 525 526 bus_dmamap_sync(sc->sc_dmat, sc->sc_lpiconf.map, irqbase, 32, BUS_DMASYNC_PREWRITE); 527 } 528 529 static void 530 gicv3_lpi_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 531 { 532 struct gicv3_softc * const sc = LPITOSOFTC(pic); 533 int bit; 534 535 while ((bit = ffs(mask)) != 0) { 536 sc->sc_lpiconf.base[irqbase + bit - 1] &= ~GIC_LPICONF_Enable; 537 mask &= ~__BIT(bit - 1); 538 } 539 540 bus_dmamap_sync(sc->sc_dmat, sc->sc_lpiconf.map, irqbase, 32, BUS_DMASYNC_PREWRITE); 541 } 542 543 static void 544 gicv3_lpi_establish_irq(struct pic_softc *pic, struct intrsource *is) 545 { 546 struct gicv3_softc * const sc = LPITOSOFTC(pic); 547 548 sc->sc_lpiconf.base[is->is_irq] = 0x80 | IPL_TO_PRIORITY(is->is_ipl) | GIC_LPICONF_Res1; 549 550 bus_dmamap_sync(sc->sc_dmat, sc->sc_lpiconf.map, is->is_irq, 1, BUS_DMASYNC_PREWRITE); 551 } 552 553 static void 554 gicv3_lpi_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 555 { 556 struct gicv3_softc * const sc = LPITOSOFTC(pic); 557 struct gicv3_lpi_callback *cb; 558 uint32_t ctlr; 559 560 /* If physical LPIs are not supported on this redistributor, just return. */ 561 const uint64_t typer = gicr_read_8(sc, ci->ci_gic_redist, GICR_TYPER); 562 if ((typer & GICR_TYPER_PLPIS) == 0) 563 return; 564 565 /* Interrupt target address for this CPU, used by ITS when GITS_TYPER.PTA == 0 */ 566 sc->sc_processor_id[cpu_index(ci)] = __SHIFTOUT(typer, GICR_TYPER_Processor_Number); 567 568 /* Disable LPIs before making changes */ 569 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR); 570 ctlr &= ~GICR_CTLR_Enable_LPIs; 571 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr); 572 arm_dsb(); 573 574 /* Setup the LPI configuration table */ 575 const uint64_t propbase = sc->sc_lpiconf.segs[0].ds_addr | 576 __SHIFTIN(ffs(pic->pic_maxsources) - 1, GICR_PROPBASER_IDbits) | 577 __SHIFTIN(GICR_Shareability_NS, GICR_PROPBASER_Shareability) | 578 __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PROPBASER_InnerCache); 579 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase); 580 581 /* Setup the LPI pending table */ 582 const uint64_t pendbase = sc->sc_lpipend[cpu_index(ci)].segs[0].ds_addr | 583 __SHIFTIN(GICR_Shareability_NS, GICR_PENDBASER_Shareability) | 584 __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PENDBASER_InnerCache) | 585 GICR_PENDBASER_PTZ; 586 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase); 587 588 /* Enable LPIs */ 589 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR); 590 ctlr |= GICR_CTLR_Enable_LPIs; 591 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr); 592 arm_dsb(); 593 594 /* Setup ITS if present */ 595 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) 596 cb->cpu_init(cb->priv, ci); 597 } 598 599 #ifdef MULTIPROCESSOR 600 static void 601 gicv3_lpi_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity) 602 { 603 struct gicv3_softc * const sc = LPITOSOFTC(pic); 604 struct gicv3_lpi_callback *cb; 605 606 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) 607 cb->get_affinity(cb->priv, irq, affinity); 608 } 609 610 static int 611 gicv3_lpi_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity) 612 { 613 struct gicv3_softc * const sc = LPITOSOFTC(pic); 614 struct gicv3_lpi_callback *cb; 615 int error = EINVAL; 616 617 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) { 618 error = cb->set_affinity(cb->priv, irq, affinity); 619 if (error) 620 return error; 621 } 622 623 return error; 624 } 625 #endif 626 627 static const struct pic_ops gicv3_lpiops = { 628 .pic_unblock_irqs = gicv3_lpi_unblock_irqs, 629 .pic_block_irqs = gicv3_lpi_block_irqs, 630 .pic_establish_irq = gicv3_lpi_establish_irq, 631 #ifdef MULTIPROCESSOR 632 .pic_cpu_init = gicv3_lpi_cpu_init, 633 .pic_get_affinity = gicv3_lpi_get_affinity, 634 .pic_set_affinity = gicv3_lpi_set_affinity, 635 #endif 636 }; 637 638 void 639 gicv3_dma_alloc(struct gicv3_softc *sc, struct gicv3_dma *dma, bus_size_t len, bus_size_t align) 640 { 641 int nsegs, error; 642 643 dma->len = len; 644 error = bus_dmamem_alloc(sc->sc_dmat, dma->len, align, 0, dma->segs, 1, &nsegs, BUS_DMA_WAITOK); 645 if (error) 646 panic("bus_dmamem_alloc failed: %d", error); 647 error = bus_dmamem_map(sc->sc_dmat, dma->segs, nsegs, len, (void **)&dma->base, BUS_DMA_WAITOK); 648 if (error) 649 panic("bus_dmamem_map failed: %d", error); 650 error = bus_dmamap_create(sc->sc_dmat, len, 1, len, 0, BUS_DMA_WAITOK, &dma->map); 651 if (error) 652 panic("bus_dmamap_create failed: %d", error); 653 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->base, dma->len, NULL, BUS_DMA_WAITOK); 654 if (error) 655 panic("bus_dmamap_load failed: %d", error); 656 657 memset(dma->base, 0, dma->len); 658 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, dma->len, BUS_DMASYNC_PREWRITE); 659 } 660 661 static void 662 gicv3_lpi_init(struct gicv3_softc *sc) 663 { 664 /* 665 * Allocate LPI configuration table 666 */ 667 gicv3_dma_alloc(sc, &sc->sc_lpiconf, sc->sc_lpi.pic_maxsources, 0x1000); 668 KASSERT((sc->sc_lpiconf.segs[0].ds_addr & ~GICR_PROPBASER_Physical_Address) == 0); 669 670 /* 671 * Allocate LPI pending tables 672 */ 673 const bus_size_t lpipend_sz = sc->sc_lpi.pic_maxsources / NBBY; 674 for (int cpuindex = 0; cpuindex < ncpu; cpuindex++) { 675 gicv3_dma_alloc(sc, &sc->sc_lpipend[cpuindex], lpipend_sz, 0x10000); 676 KASSERT((sc->sc_lpipend[cpuindex].segs[0].ds_addr & ~GICR_PENDBASER_Physical_Address) == 0); 677 } 678 } 679 680 void 681 gicv3_irq_handler(void *frame) 682 { 683 struct cpu_info * const ci = curcpu(); 684 struct gicv3_softc * const sc = gicv3_softc; 685 struct pic_softc *pic; 686 const int oldipl = ci->ci_cpl; 687 688 ci->ci_data.cpu_nintr++; 689 690 for (;;) { 691 const uint32_t iar = icc_iar1_read(); 692 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID); 693 if (irq == ICC_IAR_INTID_SPURIOUS) 694 break; 695 696 pic = irq >= GIC_LPI_BASE ? &sc->sc_lpi : &sc->sc_pic; 697 if (irq - pic->pic_irqbase >= pic->pic_maxsources) 698 continue; 699 700 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase]; 701 KASSERT(is != NULL); 702 703 const int ipl = is->is_ipl; 704 if (ci->ci_cpl < ipl) 705 pic_set_priority(ci, ipl); 706 707 cpsie(I32_bit); 708 pic_dispatch(is, frame); 709 cpsid(I32_bit); 710 711 icc_eoi1r_write(iar); 712 } 713 714 if (ci->ci_cpl != oldipl) 715 pic_set_priority(ci, oldipl); 716 } 717 718 int 719 gicv3_init(struct gicv3_softc *sc) 720 { 721 const uint32_t gicd_typer = gicd_read_4(sc, GICD_TYPER); 722 int n; 723 724 KASSERT(CPU_IS_PRIMARY(curcpu())); 725 726 LIST_INIT(&sc->sc_lpi_callbacks); 727 728 for (n = 0; n < MAXCPUS; n++) 729 sc->sc_irouter[n] = UINT64_MAX; 730 731 sc->sc_pic.pic_ops = &gicv3_picops; 732 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(gicd_typer); 733 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3"); 734 #ifdef MULTIPROCESSOR 735 sc->sc_pic.pic_cpus = kcpuset_running; 736 #endif 737 pic_add(&sc->sc_pic, 0); 738 739 if ((gicd_typer & GICD_TYPER_LPIS) != 0) { 740 sc->sc_lpi.pic_ops = &gicv3_lpiops; 741 sc->sc_lpi.pic_maxsources = 8192; /* Min. required by GICv3 spec */ 742 snprintf(sc->sc_lpi.pic_name, sizeof(sc->sc_lpi.pic_name), "gicv3-lpi"); 743 pic_add(&sc->sc_lpi, GIC_LPI_BASE); 744 745 gicv3_lpi_init(sc); 746 } 747 748 KASSERT(gicv3_softc == NULL); 749 gicv3_softc = sc; 750 751 for (int i = 0; i < sc->sc_bsh_r_count; i++) { 752 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER); 753 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0); 754 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1); 755 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2); 756 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3); 757 758 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n", 759 i, aff3, aff2, aff1, aff0); 760 } 761 762 gicv3_dist_enable(sc); 763 764 gicv3_cpu_init(&sc->sc_pic, curcpu()); 765 if ((gicd_typer & GICD_TYPER_LPIS) != 0) 766 gicv3_lpi_cpu_init(&sc->sc_lpi, curcpu()); 767 768 #ifdef __HAVE_PIC_FAST_SOFTINTS 769 intr_establish_xname(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_BIO, "softint bio"); 770 intr_establish_xname(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_CLOCK, "softint clock"); 771 intr_establish_xname(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_NET, "softint net"); 772 intr_establish_xname(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_SERIAL, "softint serial"); 773 #endif 774 775 #ifdef MULTIPROCESSOR 776 intr_establish_xname(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast"); 777 intr_establish_xname(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall"); 778 intr_establish_xname(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic"); 779 intr_establish_xname(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop"); 780 intr_establish_xname(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown"); 781 #ifdef DDB 782 intr_establish_xname(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb"); 783 #endif 784 #ifdef __HAVE_PREEMPTION 785 intr_establish_xname(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt"); 786 #endif 787 #endif 788 789 return 0; 790 } 791