1 /* $NetBSD: gic.c,v 1.10 2014/05/19 22:47:53 rmind Exp $ */ 2 /*- 3 * Copyright (c) 2012 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas of 3am Software Foundry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "opt_ddb.h" 32 33 #define _INTR_PRIVATE 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: gic.c,v 1.10 2014/05/19 22:47:53 rmind Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/device.h> 41 #include <sys/evcnt.h> 42 #include <sys/intr.h> 43 #include <sys/cpu.h> 44 #include <sys/proc.h> 45 46 #include <arm/armreg.h> 47 #include <arm/cpufunc.h> 48 #include <arm/atomic.h> 49 50 #include <arm/cortex/gic_reg.h> 51 #include <arm/cortex/mpcore_var.h> 52 53 #define ARMGIC_SGI_IPIBASE (16 - NIPI) 54 55 static int armgic_match(device_t, cfdata_t, void *); 56 static void armgic_attach(device_t, device_t, void *); 57 58 static void armgic_set_priority(struct pic_softc *, int); 59 static void armgic_unblock_irqs(struct pic_softc *, size_t, uint32_t); 60 static void armgic_block_irqs(struct pic_softc *, size_t, uint32_t); 61 static void armgic_establish_irq(struct pic_softc *, struct intrsource *); 62 #if 0 63 static void armgic_source_name(struct pic_softc *, int, char *, size_t); 64 #endif 65 66 #ifdef MULTIPROCESSOR 67 static void armgic_cpu_init(struct pic_softc *, struct cpu_info *); 68 static void armgic_ipi_send(struct pic_softc *, const kcpuset_t *, u_long); 69 #endif 70 71 static const struct pic_ops armgic_picops = { 72 .pic_unblock_irqs = armgic_unblock_irqs, 73 .pic_block_irqs = armgic_block_irqs, 74 .pic_establish_irq = armgic_establish_irq, 75 #if 0 76 .pic_source_name = armgic_source_name, 77 #endif 78 .pic_set_priority = armgic_set_priority, 79 #ifdef MULTIPROCESSOR 80 .pic_cpu_init = armgic_cpu_init, 81 .pic_ipi_send = armgic_ipi_send, 82 #endif 83 }; 84 85 #define PICTOSOFTC(pic) ((struct armgic_softc *)(pic)) 86 87 static struct armgic_softc { 88 struct pic_softc sc_pic; 89 device_t sc_dev; 90 bus_space_tag_t sc_memt; 91 bus_space_handle_t sc_gicch; 92 bus_space_handle_t sc_gicdh; 93 size_t sc_gic_lines; 94 uint32_t sc_gic_type; 95 uint32_t sc_gic_valid_lines[1024/32]; 96 uint32_t sc_enabled_local; 97 #ifdef MULTIPROCESSOR 98 uint32_t sc_mptargets; 99 #endif 100 } armgic_softc = { 101 .sc_pic = { 102 .pic_ops = &armgic_picops, 103 .pic_name = "armgic", 104 }, 105 }; 106 107 static struct intrsource armgic_dummy_source; 108 109 __CTASSERT(NIPL == 8); 110 111 /* 112 * GIC register are always in little-endian. It is assumed the bus_space 113 * will do any endian conversion required. 114 */ 115 static inline uint32_t 116 gicc_read(struct armgic_softc *sc, bus_size_t o) 117 { 118 return bus_space_read_4(sc->sc_memt, sc->sc_gicch, o); 119 } 120 121 static inline void 122 gicc_write(struct armgic_softc *sc, bus_size_t o, uint32_t v) 123 { 124 bus_space_write_4(sc->sc_memt, sc->sc_gicch, o, v); 125 } 126 127 static inline uint32_t 128 gicd_read(struct armgic_softc *sc, bus_size_t o) 129 { 130 return bus_space_read_4(sc->sc_memt, sc->sc_gicdh, o); 131 } 132 133 static inline void 134 gicd_write(struct armgic_softc *sc, bus_size_t o, uint32_t v) 135 { 136 bus_space_write_4(sc->sc_memt, sc->sc_gicdh, o, v); 137 } 138 139 /* 140 * In the GIC prioritization scheme, lower numbers have higher priority. 141 * Only write priorities that could be non-secure. 142 */ 143 static inline uint32_t 144 armgic_ipl_to_priority(int ipl) 145 { 146 return GICC_PMR_NONSECURE 147 | ((IPL_HIGH - ipl) * GICC_PMR_NS_PRIORITIES / NIPL); 148 } 149 150 #if 0 151 static inline int 152 armgic_priority_to_ipl(uint32_t priority) 153 { 154 return IPL_HIGH 155 - (priority & ~GICC_PMR_NONSECURE) * NIPL / GICC_PMR_NS_PRIORITIES; 156 } 157 #endif 158 159 static void 160 armgic_unblock_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask) 161 { 162 struct armgic_softc * const sc = PICTOSOFTC(pic); 163 const size_t group = irq_base / 32; 164 165 if (group == 0) 166 sc->sc_enabled_local |= irq_mask; 167 168 gicd_write(sc, GICD_ISENABLERn(group), irq_mask); 169 } 170 171 static void 172 armgic_block_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask) 173 { 174 struct armgic_softc * const sc = PICTOSOFTC(pic); 175 const size_t group = irq_base / 32; 176 177 if (group == 0) 178 sc->sc_enabled_local &= ~irq_mask; 179 180 gicd_write(sc, GICD_ICENABLERn(group), irq_mask); 181 } 182 183 static uint32_t armgic_last_priority; 184 185 static void 186 armgic_set_priority(struct pic_softc *pic, int ipl) 187 { 188 struct armgic_softc * const sc = PICTOSOFTC(pic); 189 190 const uint32_t priority = armgic_ipl_to_priority(ipl); 191 gicc_write(sc, GICC_PMR, priority); 192 armgic_last_priority = priority; 193 } 194 195 #ifdef __HAVE_PIC_FAST_SOFTINTS 196 void 197 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep_p) 198 { 199 lwp_t **lp = &l->l_cpu->ci_softlwps[level]; 200 KASSERT(*lp == NULL || *lp == l); 201 *lp = l; 202 /* 203 * Really easy. Just tell it to trigger the local CPU. 204 */ 205 *machdep_p = GICD_SGIR_TargetListFilter_Me 206 | __SHIFTIN(level, GICD_SGIR_SGIINTID); 207 } 208 209 void 210 softint_trigger(uintptr_t machdep) 211 { 212 213 gicd_write(&armgic_softc, GICD_SGIR, machdep); 214 } 215 #endif 216 217 void 218 armgic_irq_handler(void *tf) 219 { 220 struct cpu_info * const ci = curcpu(); 221 struct armgic_softc * const sc = &armgic_softc; 222 const int old_ipl = ci->ci_cpl; 223 #ifdef DIAGNOSTIC 224 const int old_mtx_count = ci->ci_mtx_count; 225 const int old_l_biglocks = ci->ci_curlwp->l_biglocks; 226 #endif 227 #ifdef DEBUG 228 size_t n = 0; 229 #endif 230 231 ci->ci_data.cpu_nintr++; 232 233 KASSERTMSG(old_ipl != IPL_HIGH, "old_ipl %d pmr %#x hppir %#x", 234 old_ipl, gicc_read(sc, GICC_PMR), gicc_read(sc, GICC_HPPIR)); 235 #if 0 236 printf("%s(enter): %s: pmr=%u hppir=%u\n", 237 __func__, ci->ci_data.cpu_name, 238 gicc_read(sc, GICC_PMR), 239 gicc_read(sc, GICC_HPPIR)); 240 #elif 0 241 printf("(%u:%d", ci->ci_index, old_ipl); 242 #endif 243 244 for (;;) { 245 uint32_t iar = gicc_read(sc, GICC_IAR); 246 uint32_t irq = __SHIFTOUT(iar, GICC_IAR_IRQ); 247 //printf(".%u", irq); 248 if (irq == GICC_IAR_IRQ_SPURIOUS) { 249 iar = gicc_read(sc, GICC_IAR); 250 irq = __SHIFTOUT(iar, GICC_IAR_IRQ); 251 if (irq == GICC_IAR_IRQ_SPURIOUS) 252 break; 253 //printf(".%u", irq); 254 } 255 256 //const uint32_t cpuid = __SHIFTOUT(iar, GICC_IAR_CPUID_MASK); 257 struct intrsource * const is = sc->sc_pic.pic_sources[irq]; 258 KASSERT(is != &armgic_dummy_source); 259 260 /* 261 * GIC has asserted IPL for us so we can just update ci_cpl. 262 * 263 * But it's not that simple. We may have already bumped ci_cpl 264 * due to a high priority interrupt and now we are about to 265 * dispatch one lower than the previous. It's possible for 266 * that previous interrupt to have deferred some interrupts 267 * so we need deal with those when lowering to the current 268 * interrupt's ipl. 269 * 270 * However, if are just raising ipl, we can just update ci_cpl. 271 */ 272 #if 0 273 const int ipl = armgic_priority_to_ipl(gicc_read(sc, GICC_RPR)); 274 KASSERTMSG(panicstr != NULL || ipl == is->is_ipl, 275 "%s: irq %d: running ipl %d != source ipl %u", 276 ci->ci_data.cpu_name, irq, ipl, is->is_ipl); 277 #else 278 const int ipl = is->is_ipl; 279 #endif 280 if (__predict_false(ipl < ci->ci_cpl)) { 281 //printf("<"); 282 pic_do_pending_ints(I32_bit, ipl, tf); 283 KASSERT(ci->ci_cpl == ipl); 284 } else { 285 KASSERTMSG(ipl > ci->ci_cpl, "ipl %d cpl %d hw-ipl %#x", 286 ipl, ci->ci_cpl, 287 gicc_read(sc, GICC_PMR)); 288 //printf(">"); 289 gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ipl)); 290 ci->ci_cpl = ipl; 291 } 292 //printf("$"); 293 cpsie(I32_bit); 294 pic_dispatch(is, tf); 295 cpsid(I32_bit); 296 gicc_write(sc, GICC_EOIR, iar); 297 #ifdef DEBUG 298 n++; 299 KDASSERTMSG(n < 5, "%s: processed too many (%zu)", 300 ci->ci_data.cpu_name, n); 301 #endif 302 } 303 304 // printf("%s(%p): exit (%zu dispatched)\n", __func__, tf, n); 305 /* 306 * Now handle any pending ints. 307 */ 308 //printf("!"); 309 KASSERT(old_ipl != IPL_HIGH); 310 pic_do_pending_ints(I32_bit, old_ipl, tf); 311 KASSERTMSG(ci->ci_cpl == old_ipl, "ci_cpl %d old_ipl %d", ci->ci_cpl, old_ipl); 312 KASSERT(old_mtx_count == ci->ci_mtx_count); 313 KASSERT(old_l_biglocks == ci->ci_curlwp->l_biglocks); 314 #if 0 315 printf("%s(exit): %s(%d): pmr=%u hppir=%u\n", 316 __func__, ci->ci_data.cpu_name, ci->ci_cpl, 317 gicc_read(sc, GICC_PMR), 318 gicc_read(sc, GICC_HPPIR)); 319 #elif 0 320 printf("->%#x)", ((struct trapframe *)tf)->tf_pc); 321 #endif 322 } 323 324 void 325 armgic_establish_irq(struct pic_softc *pic, struct intrsource *is) 326 { 327 struct armgic_softc * const sc = PICTOSOFTC(pic); 328 const size_t group = is->is_irq / 32; 329 const u_int irq = is->is_irq & 31; 330 const u_int byte_shift = 8 * (irq & 3); 331 const u_int twopair_shift = 2 * (irq & 15); 332 333 KASSERTMSG(sc->sc_gic_valid_lines[group] & __BIT(irq), 334 "irq %u: not valid (group[%zu]=0x%08x [0x%08x])", 335 is->is_irq, group, sc->sc_gic_valid_lines[group], 336 (uint32_t)__BIT(irq)); 337 338 KASSERTMSG(is->is_type == IST_LEVEL || is->is_type == IST_EDGE, 339 "irq %u: type %u unsupported", is->is_irq, is->is_type); 340 341 const bus_size_t targets_reg = GICD_ITARGETSRn(is->is_irq / 4); 342 const bus_size_t cfg_reg = GICD_ICFGRn(is->is_irq / 16); 343 uint32_t targets = gicd_read(sc, targets_reg); 344 uint32_t cfg = gicd_read(sc, cfg_reg); 345 346 if (group > 0) { 347 /* 348 * There are 4 irqs per TARGETS register. For now bind 349 * to the primary cpu. 350 */ 351 targets &= ~(0xff << byte_shift); 352 #ifdef MULTIPROCESSOR 353 if (is->is_mpsafe) { 354 targets |= sc->sc_mptargets; 355 } else 356 #endif 357 targets |= 1 << byte_shift; 358 gicd_write(sc, targets_reg, targets); 359 360 /* 361 * There are 16 irqs per CFG register. 10=EDGE 00=LEVEL 362 */ 363 uint32_t new_cfg = cfg; 364 uint32_t old_cfg = (cfg >> twopair_shift) & 3; 365 if (is->is_type == IST_LEVEL && (old_cfg & 2) != 0) { 366 new_cfg &= ~(3 << twopair_shift); 367 } else if (is->is_type == IST_EDGE && (old_cfg & 2) == 0) { 368 new_cfg |= 2 << twopair_shift; 369 } 370 if (new_cfg != cfg) { 371 gicd_write(sc, cfg_reg, cfg); 372 #if 0 373 printf("%s: irq %u: cfg changed from %#x to %#x\n", 374 pic->pic_name, is->is_irq, cfg, new_cfg); 375 #endif 376 } 377 #ifdef MULTIPROCESSOR 378 } else { 379 /* 380 * All group 0 interrupts are per processor and MPSAFE by 381 * default. 382 */ 383 is->is_mpsafe = true; 384 #endif 385 } 386 387 /* 388 * There are 4 irqs per PRIORITY register. Map the IPL 389 * to GIC priority. 390 */ 391 const bus_size_t priority_reg = GICD_IPRIORITYRn(is->is_irq / 4); 392 uint32_t priority = gicd_read(sc, priority_reg); 393 priority &= ~(0xff << byte_shift); 394 priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift; 395 gicd_write(sc, priority_reg, priority); 396 397 #if 0 398 printf("%s: irq %u: target %#x cfg %u priority %#x (%u)\n", 399 pic->pic_name, is->is_irq, (targets >> byte_shift) & 0xff, 400 (cfg >> twopair_shift) & 3, (priority >> byte_shift) & 0xff, 401 is->is_ipl); 402 #endif 403 } 404 405 #ifdef MULTIPROCESSOR 406 static void 407 armgic_cpu_init_priorities(struct armgic_softc *sc) 408 { 409 uint32_t enabled = sc->sc_enabled_local; 410 for (size_t i = 0; i < 32; i += 4, enabled >>= 4) { 411 /* 412 * If there are no enabled interrupts for the priority register, 413 * don't bother changing it. 414 */ 415 if ((enabled & 0x0f) == 0) 416 continue; 417 /* 418 * Since priorities are in 3210 order, it' 419 */ 420 const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4); 421 uint32_t priority = gicd_read(sc, priority_reg); 422 uint32_t byte_mask = 0xff; 423 size_t byte_shift = 0; 424 for (size_t j = 0; j < 4; j++, byte_mask <<= 8, byte_shift += 8) { 425 struct intrsource * const is = sc->sc_pic.pic_sources[i+j]; 426 if (is == NULL || is == &armgic_dummy_source) 427 continue; 428 priority &= ~byte_mask; 429 priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift; 430 } 431 gicd_write(sc, priority_reg, priority); 432 } 433 } 434 435 static void 436 armgic_cpu_init_targets(struct armgic_softc *sc) 437 { 438 /* 439 * Update the mpsafe targets 440 */ 441 for (size_t irq = 32; irq < sc->sc_gic_lines; irq++) { 442 struct intrsource * const is = sc->sc_pic.pic_sources[irq]; 443 const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4); 444 if (is != NULL && is->is_mpsafe) { 445 const u_int byte_shift = 0xff << (8 * (irq & 3)); 446 uint32_t targets = gicd_read(sc, targets_reg); 447 targets |= sc->sc_mptargets << byte_shift; 448 gicd_write(sc, targets_reg, targets); 449 } 450 } 451 } 452 453 void 454 armgic_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 455 { 456 struct armgic_softc * const sc = PICTOSOFTC(pic); 457 sc->sc_mptargets |= 1 << cpu_index(ci); 458 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl); 459 if (!CPU_IS_PRIMARY(ci)) { 460 if (sc->sc_mptargets != 1) { 461 armgic_cpu_init_targets(sc); 462 } 463 if (sc->sc_enabled_local) { 464 armgic_cpu_init_priorities(sc); 465 gicd_write(sc, GICD_ISENABLERn(0), 466 sc->sc_enabled_local); 467 } 468 } 469 gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ci->ci_cpl)); // set PMR 470 gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable); // enable interrupt 471 cpsie(I32_bit); // allow IRQ exceptions 472 } 473 474 void 475 armgic_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi) 476 { 477 struct armgic_softc * const sc = PICTOSOFTC(pic); 478 479 #if 0 480 if (ipi == IPI_NOP) { 481 __asm __volatile("sev"); 482 return; 483 } 484 #endif 485 486 uint32_t sgir = __SHIFTIN(ARMGIC_SGI_IPIBASE + ipi, GICD_SGIR_SGIINTID); 487 if (kcp != NULL) { 488 uint32_t targets; 489 kcpuset_export_u32(kcp, &targets, sizeof(targets)); 490 sgir |= __SHIFTIN(targets, GICD_SGIR_TargetList); 491 sgir |= GICD_SGIR_TargetListFilter_List; 492 } else { 493 if (ncpu == 1) 494 return; 495 sgir |= GICD_SGIR_TargetListFilter_NotMe; 496 } 497 498 //printf("%s: %s: %#x", __func__, curcpu()->ci_data.cpu_name, sgir); 499 gicd_write(sc, GICD_SGIR, sgir); 500 //printf("\n"); 501 } 502 #endif 503 504 int 505 armgic_match(device_t parent, cfdata_t cf, void *aux) 506 { 507 struct mpcore_attach_args * const mpcaa = aux; 508 509 if (strcmp(cf->cf_name, mpcaa->mpcaa_name) != 0) 510 return 0; 511 if (!CPU_ID_CORTEX_P(cputype) || CPU_ID_CORTEX_A8_P(cputype)) 512 return 0; 513 514 return 1; 515 } 516 517 void 518 armgic_attach(device_t parent, device_t self, void *aux) 519 { 520 struct armgic_softc * const sc = &armgic_softc; 521 struct mpcore_attach_args * const mpcaa = aux; 522 523 sc->sc_dev = self; 524 self->dv_private = sc; 525 526 sc->sc_memt = mpcaa->mpcaa_memt; /* provided for us */ 527 bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off1, 528 4096, &sc->sc_gicdh); 529 bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off2, 530 4096, &sc->sc_gicch); 531 532 sc->sc_gic_type = gicd_read(sc, GICD_TYPER); 533 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(sc->sc_gic_type); 534 535 gicc_write(sc, GICC_CTRL, 0); /* disable all interrupts */ 536 gicd_write(sc, GICD_CTRL, 0); /* disable all interrupts */ 537 538 gicc_write(sc, GICC_PMR, 0xff); 539 uint32_t pmr = gicc_read(sc, GICC_PMR); 540 u_int priorities = 1 << popcount32(pmr); 541 542 /* 543 * Let's find out how many real sources we have. 544 */ 545 for (size_t i = 0, group = 0; 546 i < sc->sc_pic.pic_maxsources; 547 i += 32, group++) { 548 /* 549 * To figure what sources are real, one enables all interrupts 550 * and then reads back the enable mask so which ones really 551 * got enabled. 552 */ 553 gicd_write(sc, GICD_ISENABLERn(group), 0xffffffff); 554 uint32_t valid = gicd_read(sc, GICD_ISENABLERn(group)); 555 556 /* 557 * Now disable (clear enable) them again. 558 */ 559 gicd_write(sc, GICD_ICENABLERn(group), valid); 560 561 /* 562 * Count how many are valid. 563 */ 564 sc->sc_gic_lines += popcount32(valid); 565 sc->sc_gic_valid_lines[group] = valid; 566 } 567 568 aprint_normal(": Generic Interrupt Controller, " 569 "%zu sources (%zu valid)\n", 570 sc->sc_pic.pic_maxsources, sc->sc_gic_lines); 571 572 pic_add(&sc->sc_pic, 0); 573 574 /* 575 * Force the GICD to IPL_HIGH and then enable interrupts. 576 */ 577 struct cpu_info * const ci = curcpu(); 578 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl); 579 armgic_set_priority(&sc->sc_pic, ci->ci_cpl); // set PMR 580 gicd_write(sc, GICD_CTRL, GICD_CTRL_Enable); // enable Distributer 581 gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable); // enable CPU interrupts 582 cpsie(I32_bit); // allow interrupt exceptions 583 584 /* 585 * For each line that isn't valid, we set the intrsource for it to 586 * point at a dummy source so that pic_intr_establish will fail for it. 587 */ 588 for (size_t i = 0, group = 0; 589 i < sc->sc_pic.pic_maxsources; 590 i += 32, group++) { 591 uint32_t invalid = ~sc->sc_gic_valid_lines[group]; 592 for (size_t j = 0; invalid && j < 32; j++, invalid >>= 1) { 593 if (invalid & 1) { 594 sc->sc_pic.pic_sources[i + j] = 595 &armgic_dummy_source; 596 } 597 } 598 } 599 #ifdef __HAVE_PIC_FAST_SOFTINTS 600 intr_establish(SOFTINT_BIO, IPL_SOFTBIO, IST_EDGE, 601 pic_handle_softint, (void *)SOFTINT_BIO); 602 intr_establish(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_EDGE, 603 pic_handle_softint, (void *)SOFTINT_CLOCK); 604 intr_establish(SOFTINT_NET, IPL_SOFTNET, IST_EDGE, 605 pic_handle_softint, (void *)SOFTINT_NET); 606 intr_establish(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_EDGE, 607 pic_handle_softint, (void *)SOFTINT_SERIAL); 608 #endif 609 #ifdef MULTIPROCESSOR 610 intr_establish(ARMGIC_SGI_IPIBASE + IPI_AST, IPL_VM, IST_EDGE, 611 pic_ipi_nop, (void *)-1); 612 intr_establish(ARMGIC_SGI_IPIBASE + IPI_XCALL, IPL_VM, IST_EDGE, 613 pic_ipi_xcall, (void *)-1); 614 intr_establish(ARMGIC_SGI_IPIBASE + IPI_GENERIC, IPL_VM, IST_EDGE, 615 pic_ipi_generic, (void *)-1); 616 intr_establish(ARMGIC_SGI_IPIBASE + IPI_NOP, IPL_VM, IST_EDGE, 617 pic_ipi_nop, (void *)-1); 618 intr_establish(ARMGIC_SGI_IPIBASE + IPI_SHOOTDOWN, IPL_VM, IST_EDGE, 619 pic_ipi_shootdown, (void *)-1); 620 #ifdef DDB 621 intr_establish(ARMGIC_SGI_IPIBASE + IPI_DDB, IPL_HIGH, IST_EDGE, 622 pic_ipi_ddb, NULL); 623 #endif 624 #ifdef __HAVE_PREEMPTION 625 intr_establish(ARMGIC_SGI_IPIBASE + IPI_KPREEMPT, IPL_VM, IST_EDGE, 626 pic_ipi_nop, (void *)-1); 627 #endif 628 armgic_cpu_init(&sc->sc_pic, curcpu()); 629 #endif 630 631 const u_int ppis = popcount32(sc->sc_gic_valid_lines[0] >> 16); 632 const u_int sgis = popcount32(sc->sc_gic_valid_lines[0] & 0xffff); 633 aprint_normal_dev(sc->sc_dev, "%u Priorities, %zu SPIs, %u PPIs, %u SGIs\n", 634 priorities, sc->sc_gic_lines - ppis - sgis, ppis, sgis); 635 } 636 637 CFATTACH_DECL_NEW(armgic, 0, 638 armgic_match, armgic_attach, NULL, NULL); 639