1 /* $NetBSD: gic.c,v 1.3 2012/09/16 22:09:34 rmind Exp $ */ 2 /*- 3 * Copyright (c) 2012 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas of 3am Software Foundry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define _INTR_PRIVATE 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: gic.c,v 1.3 2012/09/16 22:09:34 rmind Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/bus.h> 38 #include <sys/device.h> 39 #include <sys/evcnt.h> 40 #include <sys/intr.h> 41 #include <sys/proc.h> 42 #include <sys/xcall.h> /* for xc_ipi_handler */ 43 44 #include <arm/armreg.h> 45 #include <arm/cpufunc.h> 46 #include <arm/atomic.h> 47 48 #include <arm/cortex/gic_reg.h> 49 #include <arm/cortex/mpcore_var.h> 50 51 #define ARMGIC_SGI_IPIBASE (16 - NIPI) 52 53 static int armgic_match(device_t, cfdata_t, void *); 54 static void armgic_attach(device_t, device_t, void *); 55 56 static void armgic_set_priority(struct pic_softc *, int); 57 static void armgic_unblock_irqs(struct pic_softc *, size_t, uint32_t); 58 static void armgic_block_irqs(struct pic_softc *, size_t, uint32_t); 59 static void armgic_establish_irq(struct pic_softc *, struct intrsource *); 60 #if 0 61 static void armgic_source_name(struct pic_softc *, int, char *, size_t); 62 #endif 63 64 #ifdef MULTIPROCESSOR 65 static void armgic_cpu_init(struct pic_softc *, struct cpu_info *); 66 static void armgic_ipi_send(struct pic_softc *, const kcpuset_t *, u_long); 67 #endif 68 69 static const struct pic_ops armgic_picops = { 70 .pic_unblock_irqs = armgic_unblock_irqs, 71 .pic_block_irqs = armgic_block_irqs, 72 .pic_establish_irq = armgic_establish_irq, 73 #if 0 74 .pic_source_name = armgic_source_name, 75 #endif 76 .pic_set_priority = armgic_set_priority, 77 #ifdef MULTIPROCESSOR 78 .pic_cpu_init = armgic_cpu_init, 79 .pic_ipi_send = armgic_ipi_send, 80 #endif 81 }; 82 83 #define PICTOSOFTC(pic) ((struct armgic_softc *)(pic)) 84 85 static struct armgic_softc { 86 struct pic_softc sc_pic; 87 device_t sc_dev; 88 bus_space_tag_t sc_memt; 89 bus_space_handle_t sc_memh; 90 size_t sc_gic_lines; 91 uint32_t sc_gic_type; 92 uint32_t sc_gic_valid_lines[1024/32]; 93 uint32_t sc_enabled_local; 94 } armgic_softc = { 95 .sc_pic = { 96 .pic_ops = &armgic_picops, 97 .pic_name = "armgic", 98 }, 99 }; 100 101 static struct intrsource armgic_dummy_source; 102 103 __CTASSERT(NIPL == 8); 104 105 /* 106 * GIC register are always in little-endian. 107 */ 108 static inline uint32_t 109 gicc_read(struct armgic_softc *sc, bus_size_t o) 110 { 111 uint32_t v = bus_space_read_4(sc->sc_memt, sc->sc_memh, GICC_BASE + o); 112 return le32toh(v); 113 } 114 115 static inline void 116 gicc_write(struct armgic_softc *sc, bus_size_t o, uint32_t v) 117 { 118 v = htole32(v); 119 bus_space_write_4(sc->sc_memt, sc->sc_memh, GICC_BASE + o, v); 120 } 121 122 static inline uint32_t 123 gicd_read(struct armgic_softc *sc, bus_size_t o) 124 { 125 uint32_t v = bus_space_read_4(sc->sc_memt, sc->sc_memh, GICD_BASE + o); 126 return le32toh(v); 127 } 128 129 static inline void 130 gicd_write(struct armgic_softc *sc, bus_size_t o, uint32_t v) 131 { 132 v = htole32(v); 133 bus_space_write_4(sc->sc_memt, sc->sc_memh, GICD_BASE + o, v); 134 } 135 136 /* 137 * In the GIC prioritization scheme, lower numbers have higher priority. 138 */ 139 static inline uint32_t 140 armgic_ipl_to_priority(int ipl) 141 { 142 return (IPL_HIGH - ipl) * GICC_PMR_PRIORITIES / NIPL; 143 } 144 145 static inline int 146 armgic_priority_to_ipl(uint32_t priority) 147 { 148 return IPL_HIGH - priority * NIPL / GICC_PMR_PRIORITIES; 149 } 150 151 static void 152 armgic_unblock_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask) 153 { 154 struct armgic_softc * const sc = PICTOSOFTC(pic); 155 const size_t group = irq_base / 32; 156 157 if (group == 0) 158 sc->sc_enabled_local |= irq_mask; 159 160 gicd_write(sc, GICD_ISENABLERn(group), irq_mask); 161 } 162 163 static void 164 armgic_block_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask) 165 { 166 struct armgic_softc * const sc = PICTOSOFTC(pic); 167 const size_t group = irq_base / 32; 168 169 if (group == 0) 170 sc->sc_enabled_local &= ~irq_mask; 171 172 gicd_write(sc, GICD_ICENABLERn(group), irq_mask); 173 } 174 175 static uint32_t armgic_last_priority; 176 177 static void 178 armgic_set_priority(struct pic_softc *pic, int ipl) 179 { 180 struct armgic_softc * const sc = PICTOSOFTC(pic); 181 182 const uint32_t priority = armgic_ipl_to_priority(ipl); 183 gicc_write(sc, GICC_PMR, priority); 184 armgic_last_priority = priority; 185 } 186 187 #ifdef __HAVE_PIC_FAST_SOFTINTS 188 void 189 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep_p) 190 { 191 lwp_t **lp = &l->l_cpu->ci_softlwps[level]; 192 KASSERT(*lp == NULL || *lp == l); 193 *lp = l; 194 /* 195 * Really easy. Just tell it to trigger the local CPU. 196 */ 197 *machdep_p = GICD_SGIR_TargetListFilter_Me 198 | __SHIFTIN(level, GICD_SGIR_SGIINTID); 199 } 200 201 void 202 softint_trigger(uintptr_t machdep) 203 { 204 205 gicd_write(&armgic_softc, GICD_SGIR, machdep); 206 } 207 #endif 208 209 void 210 armgic_irq_handler(void *tf) 211 { 212 struct cpu_info * const ci = curcpu(); 213 struct armgic_softc * const sc = &armgic_softc; 214 const int old_ipl = ci->ci_cpl; 215 #ifdef DIAGNOSTIC 216 const int old_mtx_count = ci->ci_mtx_count; 217 const int old_l_biglocks = ci->ci_curlwp->l_biglocks; 218 #endif 219 #ifdef DEBUG 220 size_t n = 0; 221 #endif 222 223 ci->ci_data.cpu_nintr++; 224 225 KASSERTMSG(old_ipl != IPL_HIGH, "old_ipl %d pmr %#x hppir %#x", 226 old_ipl, gicc_read(sc, GICC_PMR), gicc_read(sc, GICC_HPPIR)); 227 #if 0 228 printf("%s(enter): %s: pmr=%u hppir=%u\n", 229 __func__, ci->ci_data.cpu_name, 230 gicc_read(sc, GICC_PMR), 231 gicc_read(sc, GICC_HPPIR)); 232 #elif 0 233 printf("(%u:%d", ci->ci_index, old_ipl); 234 #endif 235 236 for (;;) { 237 uint32_t iar = gicc_read(sc, GICC_IAR); 238 uint32_t irq = __SHIFTOUT(iar, GICC_IAR_IRQ); 239 //printf(".%u", irq); 240 if (irq == GICC_IAR_IRQ_SPURIOUS) { 241 iar = gicc_read(sc, GICC_IAR); 242 irq = __SHIFTOUT(iar, GICC_IAR_IRQ); 243 if (irq == GICC_IAR_IRQ_SPURIOUS) 244 break; 245 //printf(".%u", irq); 246 } 247 248 //const uint32_t cpuid = __SHIFTOUT(iar, GICC_IAR_CPUID_MASK); 249 struct intrsource * const is = sc->sc_pic.pic_sources[irq]; 250 KASSERT(is != &armgic_dummy_source); 251 252 /* 253 * GIC has asserted IPL for us so we can just update ci_cpl. 254 * 255 * But it's not that simple. We may have already bumped ci_cpl 256 * due to a high priority interrupt and now we are about to 257 * dispatch one lower than the previous. It's possible for 258 * that previous interrupt to have deferred some interrupts 259 * so we need deal with those when lowering to the current 260 * interrupt's ipl. 261 * 262 * However, if are just raising ipl, we can just update ci_cpl. 263 */ 264 #if 0 265 const int ipl = armgic_priority_to_ipl(gicc_read(sc, GICC_RPR)); 266 KASSERTMSG(panicstr != NULL || ipl == is->is_ipl, 267 "%s: irq %d: running ipl %d != source ipl %u", 268 ci->ci_data.cpu_name, irq, ipl, is->is_ipl); 269 #else 270 const int ipl = is->is_ipl; 271 #endif 272 if (__predict_false(ipl < ci->ci_cpl)) { 273 //printf("<"); 274 pic_do_pending_ints(I32_bit, ipl, tf); 275 KASSERT(ci->ci_cpl == ipl); 276 } else { 277 KASSERTMSG(ipl > ci->ci_cpl, "ipl %d cpl %d hw-ipl %#x", 278 ipl, ci->ci_cpl, 279 gicc_read(sc, GICC_PMR)); 280 //printf(">"); 281 gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ipl)); 282 ci->ci_cpl = ipl; 283 } 284 //printf("$"); 285 cpsie(I32_bit); 286 pic_dispatch(is, tf); 287 cpsid(I32_bit); 288 gicc_write(sc, GICC_EOIR, iar); 289 #ifdef DEBUG 290 n++; 291 KDASSERTMSG(n < 5, "%s: processed too many (%zu)", 292 ci->ci_data.cpu_name, n); 293 #endif 294 } 295 296 // printf("%s(%p): exit (%zu dispatched)\n", __func__, tf, n); 297 /* 298 * Now handle any pending ints. 299 */ 300 //printf("!"); 301 KASSERT(old_ipl != IPL_HIGH); 302 pic_do_pending_ints(I32_bit, old_ipl, tf); 303 KASSERTMSG(ci->ci_cpl == old_ipl, "ci_cpl %d old_ipl %d", ci->ci_cpl, old_ipl); 304 KASSERT(old_mtx_count == ci->ci_mtx_count); 305 KASSERT(old_l_biglocks == ci->ci_curlwp->l_biglocks); 306 #if 0 307 printf("%s(exit): %s(%d): pmr=%u hppir=%u\n", 308 __func__, ci->ci_data.cpu_name, ci->ci_cpl, 309 gicc_read(sc, GICC_PMR), 310 gicc_read(sc, GICC_HPPIR)); 311 #elif 0 312 printf("->%#x)", ((struct trapframe *)tf)->tf_pc); 313 #endif 314 } 315 316 void 317 armgic_establish_irq(struct pic_softc *pic, struct intrsource *is) 318 { 319 struct armgic_softc * const sc = PICTOSOFTC(pic); 320 const size_t group = is->is_irq / 32; 321 const u_int irq = is->is_irq & 31; 322 const u_int byte_shift = 8 * (irq & 3); 323 const u_int twopair_shift = 2 * (irq & 15); 324 325 KASSERTMSG(sc->sc_gic_valid_lines[group] & __BIT(irq), 326 "irq %u: not valid (group[%zu]=0x%08x [0x%08x])", 327 is->is_irq, group, sc->sc_gic_valid_lines[group], 328 (uint32_t)__BIT(irq)); 329 330 KASSERTMSG(is->is_type == IST_LEVEL || is->is_type == IST_EDGE, 331 "irq %u: type %u unsupported", is->is_irq, is->is_type); 332 333 const bus_size_t targets_reg = GICD_ITARGETSRn(is->is_irq / 4); 334 const bus_size_t cfg_reg = GICD_ICFGRn(is->is_irq / 16); 335 uint32_t targets = gicd_read(sc, targets_reg); 336 uint32_t cfg = gicd_read(sc, cfg_reg); 337 338 if (group > 0) { 339 /* 340 * There are 4 irqs per TARGETS register. For now bind 341 * to the primary cpu. 342 */ 343 targets &= ~(0xff << byte_shift); 344 targets |= 1 << byte_shift; 345 gicd_write(sc, targets_reg, targets); 346 347 /* 348 * There are 16 irqs per CFG register. 10=EDGE 00=LEVEL 349 */ 350 uint32_t new_cfg = cfg; 351 uint32_t old_cfg = (cfg >> twopair_shift) & 3; 352 if (is->is_type == IST_LEVEL && (old_cfg & 2) != 0) { 353 new_cfg &= ~(3 << twopair_shift); 354 } else if (is->is_type == IST_EDGE && (old_cfg & 2) == 0) { 355 new_cfg |= 2 << twopair_shift; 356 } 357 if (new_cfg != cfg) { 358 gicd_write(sc, cfg_reg, cfg); 359 #if 0 360 printf("%s: irq %u: cfg changed from %#x to %#x\n", 361 pic->pic_name, is->is_irq, cfg, new_cfg); 362 #endif 363 } 364 } 365 366 /* 367 * There are 4 irqs per PRIORITY register. Map the IPL 368 * to GIC priority. 369 */ 370 const bus_size_t priority_reg = GICD_IPRIORITYRn(is->is_irq / 4); 371 uint32_t priority = gicd_read(sc, priority_reg); 372 priority &= ~(0xff << byte_shift); 373 priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift; 374 gicd_write(sc, priority_reg, priority); 375 376 #if 0 377 printf("%s: irq %u: target %#x cfg %u priority %#x (%u)\n", 378 pic->pic_name, is->is_irq, (targets >> byte_shift) & 0xff, 379 (cfg >> twopair_shift) & 3, (priority >> byte_shift) & 0xff, 380 is->is_ipl); 381 #endif 382 } 383 384 #ifdef MULTIPROCESSOR 385 static void 386 armgic_cpu_init_priorities(struct armgic_softc *sc) 387 { 388 uint32_t enabled = sc->sc_enabled_local; 389 for (size_t i = 0; i < 32; i += 4, enabled >>= 4) { 390 /* 391 * If there are no enabled interrupts for the priority register, 392 * don't bother changing it. 393 */ 394 if ((enabled & 0x0f) == 0) 395 continue; 396 /* 397 * Since priorities are in 3210 order, it' 398 */ 399 const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4); 400 uint32_t priority = gicd_read(sc, priority_reg); 401 uint32_t byte_mask = 0xff; 402 size_t byte_shift = 0; 403 for (size_t j = 0; j < 4; j++, byte_mask <<= 8, byte_shift += 8) { 404 struct intrsource * const is = sc->sc_pic.pic_sources[i+j]; 405 if (is == NULL || is == &armgic_dummy_source) 406 continue; 407 priority &= ~byte_mask; 408 priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift; 409 } 410 gicd_write(sc, priority_reg, priority); 411 } 412 } 413 414 void 415 armgic_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 416 { 417 struct armgic_softc * const sc = PICTOSOFTC(pic); 418 if (!CPU_IS_PRIMARY(ci) && sc->sc_enabled_local) { 419 armgic_cpu_init_priorities(sc); 420 } 421 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl); 422 gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ci->ci_cpl)); // set PMR 423 gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable); // enable interrupt 424 if (!CPU_IS_PRIMARY(ci) && sc->sc_enabled_local) 425 gicd_write(sc, GICD_ISENABLERn(0), sc->sc_enabled_local); 426 cpsie(I32_bit); // allow IRQ exceptions 427 } 428 429 void 430 armgic_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi) 431 { 432 struct armgic_softc * const sc = PICTOSOFTC(pic); 433 434 if (ipi == IPI_NOP) { 435 __asm __volatile("sev"); 436 return; 437 } 438 439 uint32_t targets; 440 kcpuset_export_u32(kcp, &targets, sizeof(targets)); 441 uint32_t sgir = __SHIFTOUT(ARMGIC_SGI_IPIBASE + ipi, GICD_SGIR_SGIINTID); 442 sgir |= __SHIFTOUT(targets, GICD_SGIR_TargetList); 443 444 printf("%s: %s: %#x", __func__, curcpu()->ci_data.cpu_name, sgir); 445 gicd_write(sc, GICD_SGIR, sgir); 446 printf("\n"); 447 } 448 #endif 449 450 int 451 armgic_match(device_t parent, cfdata_t cf, void *aux) 452 { 453 struct mpcore_attach_args * const mpcaa = aux; 454 455 if (strcmp(cf->cf_name, mpcaa->mpcaa_name) != 0) 456 return 0; 457 if (!CPU_ID_CORTEX_P(cputype)) 458 return 0; 459 if (CPU_ID_CORTEX_A8_P(cputype)) 460 return 0; 461 462 return 1; 463 } 464 465 void 466 armgic_attach(device_t parent, device_t self, void *aux) 467 { 468 struct armgic_softc * const sc = &armgic_softc; 469 struct mpcore_attach_args * const mpcaa = aux; 470 471 sc->sc_dev = self; 472 self->dv_private = sc; 473 474 sc->sc_memt = mpcaa->mpcaa_memt; /* provided for us */ 475 sc->sc_memh = mpcaa->mpcaa_memh; /* provided for us */ 476 477 sc->sc_gic_type = gicd_read(sc, GICD_TYPER); 478 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(sc->sc_gic_type); 479 480 gicc_write(sc, GICC_CTRL, 0); /* disable all interrupts */ 481 gicd_write(sc, GICD_CTRL, 0); /* disable all interrupts */ 482 483 gicc_write(sc, GICC_PMR, 0xff); 484 uint32_t pmr = gicc_read(sc, GICC_PMR); 485 u_int priorities = 1 << popcount32(pmr); 486 487 /* 488 * Let's find out how many real sources we have. 489 */ 490 for (size_t i = 0, group = 0; 491 i < sc->sc_pic.pic_maxsources; 492 i += 32, group++) { 493 /* 494 * To figure what sources are real, one enables all interrupts 495 * and then reads back the enable mask so which ones really 496 * got enabled. 497 */ 498 gicd_write(sc, GICD_ISENABLERn(group), 0xffffffff); 499 uint32_t valid = gicd_read(sc, GICD_ISENABLERn(group)); 500 501 /* 502 * Now disable (clear enable) them again. 503 */ 504 gicd_write(sc, GICD_ICENABLERn(group), valid); 505 506 /* 507 * Count how many are valid. 508 */ 509 sc->sc_gic_lines += popcount32(valid); 510 sc->sc_gic_valid_lines[group] = valid; 511 } 512 513 pic_add(&sc->sc_pic, 0); 514 515 /* 516 * Force the GICD to IPL_HIGH and then enable interrupts. 517 */ 518 struct cpu_info * const ci = curcpu(); 519 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl); 520 armgic_set_priority(&sc->sc_pic, ci->ci_cpl); // set PMR 521 gicd_write(sc, GICD_CTRL, GICD_CTRL_Enable); // enable Distributer 522 gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable); // enable CPU interrupts 523 cpsie(I32_bit); // allow interrupt exceptions 524 525 /* 526 * For each line that isn't valid, we set the intrsource for it to 527 * point at a dummy source so that pic_intr_establish will fail for it. 528 */ 529 for (size_t i = 0, group = 0; 530 i < sc->sc_pic.pic_maxsources; 531 i += 32, group++) { 532 uint32_t invalid = ~sc->sc_gic_valid_lines[group]; 533 for (size_t j = 0; invalid && j < 32; j++, invalid >>= 1) { 534 if (invalid & 1) { 535 sc->sc_pic.pic_sources[i + j] = 536 &armgic_dummy_source; 537 } 538 } 539 } 540 #ifdef __HAVE_PIC_FAST_SOFTINTS 541 intr_establish(SOFTINT_BIO, IPL_SOFTBIO, IST_EDGE, 542 pic_handle_softint, (void *)SOFTINT_BIO); 543 intr_establish(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_EDGE, 544 pic_handle_softint, (void *)SOFTINT_CLOCK); 545 intr_establish(SOFTINT_NET, IPL_SOFTNET, IST_EDGE, 546 pic_handle_softint, (void *)SOFTINT_NET); 547 intr_establish(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_EDGE, 548 pic_handle_softint, (void *)SOFTINT_SERIAL); 549 #endif 550 #ifdef MULTIPROCESSOR 551 intr_establish(ARMGIC_SGI_IPIBASE + IPI_AST, IPL_VM, IST_EDGE, 552 pic_ipi_nop, (void *)-1); 553 intr_establish(ARMGIC_SGI_IPIBASE + IPI_XCALL, IPL_VM, IST_EDGE, 554 pic_ipi_xcall, (void *)-1); 555 #if 0 /* Not needed */ 556 intr_establish(ARMGIC_SGI_IPIBASE + IPI_NOP, IPL_VM, IST_EDGE, 557 pic_ipi_nop, (void *)-1); 558 #endif 559 #ifdef __HAVE_PREEMPTION 560 intr_establish(ARMGIC_SGI_IPIBASE + IPI_KPREEMPT, IPL_VM, IST_EDGE, 561 pic_ipi_nop, (void *)-1); 562 #endif 563 armgic_cpu_init(&sc->sc_pic, curcpu()); 564 #endif 565 566 aprint_normal(": Generic Interrupt Controller, " 567 "%zu sources (%zu valid)\n", 568 sc->sc_pic.pic_maxsources, sc->sc_gic_lines); 569 570 const u_int ppis = popcount32(sc->sc_gic_valid_lines[0] >> 16); 571 const u_int sgis = popcount32(sc->sc_gic_valid_lines[0] & 0xffff); 572 aprint_normal_dev(sc->sc_dev, "%u Priorities, %zu SPIs, %u PPIs, %u SGIs\n", 573 priorities, sc->sc_gic_lines - ppis - sgis, ppis, sgis); 574 } 575 576 CFATTACH_DECL_NEW(armgic, 0, 577 armgic_match, armgic_attach, NULL, NULL); 578