1 /* $NetBSD: pic.c,v 1.57 2020/07/27 16:26:51 skrll Exp $ */ 2 /*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define _INTR_PRIVATE 32 #include "opt_ddb.h" 33 #include "opt_multiprocessor.h" 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.57 2020/07/27 16:26:51 skrll Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/atomic.h> 40 #include <sys/cpu.h> 41 #include <sys/evcnt.h> 42 #include <sys/interrupt.h> 43 #include <sys/intr.h> 44 #include <sys/ipi.h> 45 #include <sys/kernel.h> 46 #include <sys/kmem.h> 47 #include <sys/mutex.h> 48 #include <sys/once.h> 49 #include <sys/xcall.h> 50 51 #include <arm/armreg.h> 52 #include <arm/cpufunc.h> 53 #include <arm/locore.h> /* for compat aarch64 */ 54 55 #ifdef DDB 56 #include <arm/db_machdep.h> 57 #endif 58 59 #include <arm/pic/picvar.h> 60 61 #if defined(__HAVE_PIC_PENDING_INTRS) 62 /* 63 * This implementation of pending interrupts on a MULTIPROCESSOR system makes 64 * the assumption that a PIC (pic_softc) shall only have all its interrupts 65 * come from the same CPU. In other words, interrupts from a single PIC will 66 * not be distributed among multiple CPUs. 67 */ 68 struct pic_pending { 69 volatile uint32_t blocked_pics; 70 volatile uint32_t pending_pics; 71 volatile uint32_t pending_ipls; 72 }; 73 static uint32_t 74 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 75 static struct pic_softc * 76 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t); 77 static void 78 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *); 79 static void 80 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *); 81 82 #ifdef MULTIPROCESSOR 83 percpu_t *pic_pending_percpu; 84 static struct pic_pending * 85 pic_pending_get(void) 86 { 87 return percpu_getref(pic_pending_percpu); 88 } 89 static void 90 pic_pending_put(struct pic_pending *pend) 91 { 92 percpu_putref(pic_pending_percpu); 93 } 94 #else 95 struct pic_pending pic_pending; 96 #define pic_pending_get() (&pic_pending) 97 #define pic_pending_put(pend) __nothing 98 #endif /* MULTIPROCESSOR */ 99 #endif /* __HAVE_PIC_PENDING_INTRS */ 100 101 struct pic_softc *pic_list[PIC_MAXPICS]; 102 #if PIC_MAXPICS > 32 103 #error PIC_MAXPICS > 32 not supported 104 #endif 105 struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 106 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 107 struct intrsource **pic_iplsource[NIPL] = { 108 [0 ... NIPL-1] = pic__iplsources, 109 }; 110 size_t pic_ipl_offset[NIPL+1]; 111 112 static kmutex_t pic_lock; 113 static size_t pic_sourcebase; 114 static int pic_lastbase; 115 static struct evcnt pic_deferral_ev = 116 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 117 EVCNT_ATTACH_STATIC(pic_deferral_ev); 118 119 static int pic_init(void); 120 121 #ifdef __HAVE_PIC_SET_PRIORITY 122 void 123 pic_set_priority(struct cpu_info *ci, int newipl) 124 { 125 register_t psw = cpsid(I32_bit); 126 if (pic_list[0] != NULL) 127 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 128 ci->ci_cpl = newipl; 129 if ((psw & I32_bit) == 0) 130 cpsie(I32_bit); 131 } 132 #endif 133 134 #ifdef MULTIPROCESSOR 135 int 136 pic_ipi_ast(void *arg) 137 { 138 setsoftast(curcpu()); 139 return 1; 140 } 141 142 int 143 pic_ipi_nop(void *arg) 144 { 145 /* do nothing */ 146 return 1; 147 } 148 149 int 150 pic_ipi_xcall(void *arg) 151 { 152 xc_ipi_handler(); 153 return 1; 154 } 155 156 int 157 pic_ipi_generic(void *arg) 158 { 159 ipi_cpu_handler(); 160 return 1; 161 } 162 163 #ifdef DDB 164 int 165 pic_ipi_ddb(void *arg) 166 { 167 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 168 kdb_trap(-1, arg); 169 return 1; 170 } 171 #endif /* DDB */ 172 173 #ifdef __HAVE_PREEMPTION 174 int 175 pic_ipi_kpreempt(void *arg) 176 { 177 atomic_or_uint(&curcpu()->ci_astpending, __BIT(1)); 178 return 1; 179 } 180 #endif /* __HAVE_PREEMPTION */ 181 182 void 183 intr_cpu_init(struct cpu_info *ci) 184 { 185 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 186 struct pic_softc * const pic = pic_list[slot]; 187 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 188 (*pic->pic_ops->pic_cpu_init)(pic, ci); 189 } 190 } 191 } 192 193 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 194 195 void 196 intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 197 { 198 struct cpu_info * const ci = curcpu(); 199 KASSERT(ipi < NIPI); 200 bool __diagused sent_p = false; 201 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 202 struct pic_softc * const pic = pic_list[slot]; 203 if (pic == NULL || pic->pic_cpus == NULL) 204 continue; 205 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) { 206 // never send to ourself 207 if (pic->pic_cpus == ci->ci_kcpuset) 208 continue; 209 210 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 211 // If we were targeting a single CPU or this pic 212 // handles all cpus, we're done. 213 if (kcp != NULL || pic->pic_cpus == kcpuset_running) 214 return; 215 sent_p = true; 216 } 217 } 218 KASSERT(cold || sent_p || ncpu <= 1); 219 } 220 #endif /* MULTIPROCESSOR */ 221 222 #ifdef __HAVE_PIC_FAST_SOFTINTS 223 int 224 pic_handle_softint(void *arg) 225 { 226 void softint_switch(lwp_t *, int); 227 struct cpu_info * const ci = curcpu(); 228 const size_t softint = (size_t) arg; 229 int s = splhigh(); 230 ci->ci_intr_depth--; // don't count these as interrupts 231 softint_switch(ci->ci_softlwps[softint], s); 232 ci->ci_intr_depth++; 233 splx(s); 234 return 1; 235 } 236 #endif 237 238 int 239 pic_handle_intr(void *arg) 240 { 241 struct pic_softc * const pic = arg; 242 int rv; 243 244 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 245 246 return rv > 0; 247 } 248 249 #if defined(__HAVE_PIC_PENDING_INTRS) 250 void 251 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 252 { 253 const uint32_t ipl_mask = __BIT(is->is_ipl); 254 255 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 256 __BIT(is->is_irq & 0x1f)); 257 258 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 259 struct pic_pending *pend = pic_pending_get(); 260 atomic_or_32(&pend->pending_ipls, ipl_mask); 261 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 262 pic_pending_put(pend); 263 } 264 265 void 266 pic_mark_pending(struct pic_softc *pic, int irq) 267 { 268 struct intrsource * const is = pic->pic_sources[irq]; 269 270 KASSERT(irq < pic->pic_maxsources); 271 KASSERT(is != NULL); 272 273 pic_mark_pending_source(pic, is); 274 } 275 276 uint32_t 277 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 278 uint32_t pending) 279 { 280 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 281 struct intrsource *is; 282 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 283 uint32_t ipl_mask = 0; 284 285 if (pending == 0) 286 return ipl_mask; 287 288 KASSERT((irq_base & 31) == 0); 289 290 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 291 292 atomic_or_32(ipending, pending); 293 while (pending != 0) { 294 int n = ffs(pending); 295 if (n-- == 0) 296 break; 297 is = isbase[n]; 298 KASSERT(is != NULL); 299 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 300 pending &= ~__BIT(n); 301 ipl_mask |= __BIT(is->is_ipl); 302 } 303 304 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 305 struct pic_pending *pend = pic_pending_get(); 306 atomic_or_32(&pend->pending_ipls, ipl_mask); 307 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 308 pic_pending_put(pend); 309 return ipl_mask; 310 } 311 312 uint32_t 313 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 314 uint32_t pending, int ipl) 315 { 316 uint32_t ipl_irq_mask = 0; 317 uint32_t irq_mask; 318 319 for (;;) { 320 int irq = ffs(pending); 321 if (irq-- == 0) 322 return ipl_irq_mask; 323 324 irq_mask = __BIT(irq); 325 #if 1 326 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 327 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 328 #else 329 if (pic->pic_sources[irq_base + irq] == NULL) { 330 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 331 irq_base, irq); 332 } else 333 #endif 334 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 335 ipl_irq_mask |= irq_mask; 336 337 pending &= ~irq_mask; 338 } 339 } 340 #endif /* __HAVE_PIC_PENDING_INTRS */ 341 342 void 343 pic_dispatch(struct intrsource *is, void *frame) 344 { 345 int (*func)(void *) = is->is_func; 346 void *arg = is->is_arg; 347 348 if (__predict_false(arg == NULL)) { 349 if (__predict_false(frame == NULL)) { 350 pic_deferral_ev.ev_count++; 351 return; 352 } 353 arg = frame; 354 } 355 356 #ifdef MULTIPROCESSOR 357 if (!is->is_mpsafe) { 358 KERNEL_LOCK(1, NULL); 359 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 360 const u_int l_blcnt __diagused = curlwp->l_blcnt; 361 (void)(*func)(arg); 362 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 363 KASSERT(l_blcnt == curlwp->l_blcnt); 364 KERNEL_UNLOCK_ONE(NULL); 365 } else 366 #endif 367 (void)(*func)(arg); 368 369 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 370 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 371 pcpu->pcpu_evs[is->is_irq].ev_count++; 372 percpu_putref(is->is_pic->pic_percpu); 373 } 374 375 #if defined(__HAVE_PIC_PENDING_INTRS) 376 void 377 pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl, 378 void *frame) 379 { 380 const uint32_t ipl_mask = __BIT(ipl); 381 struct intrsource *is; 382 volatile uint32_t *ipending = pic->pic_pending_irqs; 383 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 384 size_t irq_base; 385 #if PIC_MAXSOURCES > 32 386 size_t irq_count; 387 int poi = 0; /* Possibility of interrupting */ 388 #endif 389 uint32_t pending_irqs; 390 uint32_t blocked_irqs; 391 int irq; 392 bool progress __diagused = false; 393 394 KASSERT(pic->pic_pending_ipls & ipl_mask); 395 396 irq_base = 0; 397 #if PIC_MAXSOURCES > 32 398 irq_count = 0; 399 #endif 400 401 for (;;) { 402 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 403 *ipending, ipl); 404 KASSERT((pending_irqs & *ipending) == pending_irqs); 405 KASSERT((pending_irqs & ~(*ipending)) == 0); 406 if (pending_irqs == 0) { 407 #if PIC_MAXSOURCES > 32 408 irq_count += 32; 409 if (__predict_true(irq_count >= pic->pic_maxsources)) { 410 if (!poi) 411 /*Interrupt at this level was handled.*/ 412 break; 413 irq_base = 0; 414 irq_count = 0; 415 poi = 0; 416 ipending = pic->pic_pending_irqs; 417 iblocked = pic->pic_blocked_irqs; 418 } else { 419 irq_base += 32; 420 ipending++; 421 iblocked++; 422 KASSERT(irq_base <= pic->pic_maxsources); 423 } 424 continue; 425 #else 426 break; 427 #endif 428 } 429 progress = true; 430 blocked_irqs = 0; 431 do { 432 irq = ffs(pending_irqs) - 1; 433 KASSERT(irq >= 0); 434 435 atomic_and_32(ipending, ~__BIT(irq)); 436 is = pic->pic_sources[irq_base + irq]; 437 if (is != NULL) { 438 cpsie(I32_bit); 439 pic_dispatch(is, frame); 440 cpsid(I32_bit); 441 #if PIC_MAXSOURCES > 32 442 /* 443 * There is a possibility of interrupting 444 * from cpsie() to cpsid(). 445 */ 446 poi = 1; 447 #endif 448 blocked_irqs |= __BIT(irq); 449 } else { 450 KASSERT(0); 451 } 452 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 453 irq_base, *ipending, ipl); 454 } while (pending_irqs); 455 if (blocked_irqs) { 456 atomic_or_32(iblocked, blocked_irqs); 457 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id)); 458 } 459 } 460 461 KASSERT(progress); 462 /* 463 * Since interrupts are disabled, we don't have to be too careful 464 * about these. 465 */ 466 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 467 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id)); 468 } 469 470 static void 471 pic_list_unblock_irqs(struct pic_pending *pend) 472 { 473 uint32_t blocked_pics = pend->blocked_pics; 474 475 pend->blocked_pics = 0; 476 477 for (;;) { 478 struct pic_softc *pic; 479 #if PIC_MAXSOURCES > 32 480 volatile uint32_t *iblocked; 481 uint32_t blocked; 482 size_t irq_base; 483 #endif 484 485 int pic_id = ffs(blocked_pics); 486 if (pic_id-- == 0) 487 return; 488 489 pic = pic_list[pic_id]; 490 KASSERT(pic != NULL); 491 #if PIC_MAXSOURCES > 32 492 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 493 irq_base < pic->pic_maxsources; 494 irq_base += 32, iblocked++) { 495 if ((blocked = *iblocked) != 0) { 496 (*pic->pic_ops->pic_unblock_irqs)(pic, 497 irq_base, blocked); 498 atomic_and_32(iblocked, ~blocked); 499 } 500 } 501 #else 502 KASSERT(pic->pic_blocked_irqs[0] != 0); 503 (*pic->pic_ops->pic_unblock_irqs)(pic, 504 0, pic->pic_blocked_irqs[0]); 505 pic->pic_blocked_irqs[0] = 0; 506 #endif 507 blocked_pics &= ~__BIT(pic_id); 508 } 509 } 510 511 struct pic_softc * 512 pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask) 513 { 514 uint32_t pending_pics = pend->pending_pics; 515 struct pic_softc *pic; 516 517 for (;;) { 518 int pic_id = ffs(pending_pics); 519 if (pic_id-- == 0) 520 return NULL; 521 522 pic = pic_list[pic_id]; 523 KASSERT(pic != NULL); 524 if (pic->pic_pending_ipls & ipl_mask) 525 return pic; 526 pending_pics &= ~__BIT(pic_id); 527 } 528 } 529 530 void 531 pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl, 532 void *frame) 533 { 534 const uint32_t ipl_mask = __BIT(ipl); 535 struct pic_softc *pic; 536 537 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) { 538 pic_deliver_irqs(pend, pic, ipl, frame); 539 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 540 } 541 atomic_and_32(&pend->pending_ipls, ~ipl_mask); 542 } 543 #endif /* __HAVE_PIC_PENDING_INTRS */ 544 545 void 546 pic_do_pending_ints(register_t psw, int newipl, void *frame) 547 { 548 struct cpu_info * const ci = curcpu(); 549 if (__predict_false(newipl == IPL_HIGH)) { 550 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 551 return; 552 } 553 #if defined(__HAVE_PIC_PENDING_INTRS) 554 struct pic_pending *pend = pic_pending_get(); 555 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 556 KASSERT(pend->pending_ipls < __BIT(NIPL)); 557 for (;;) { 558 int ipl = 31 - __builtin_clz(pend->pending_ipls); 559 KASSERT(ipl < NIPL); 560 if (ipl <= newipl) 561 break; 562 563 pic_set_priority(ci, ipl); 564 pic_list_deliver_irqs(pend, psw, ipl, frame); 565 pic_list_unblock_irqs(pend); 566 } 567 } 568 pic_pending_put(pend); 569 #endif /* __HAVE_PIC_PENDING_INTRS */ 570 #ifdef __HAVE_PREEMPTION 571 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) { 572 pic_set_priority(ci, IPL_SCHED); 573 kpreempt(0); 574 } 575 #endif 576 if (ci->ci_cpl != newipl) 577 pic_set_priority(ci, newipl); 578 } 579 580 static void 581 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 582 { 583 struct pic_percpu * const pcpu = v0; 584 struct pic_softc * const pic = v1; 585 586 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 587 KM_SLEEP); 588 KASSERT(pcpu->pcpu_evs != NULL); 589 590 #define PCPU_NAMELEN 32 591 #ifdef DIAGNOSTIC 592 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 593 #endif 594 595 KASSERT(namelen < PCPU_NAMELEN); 596 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 597 #ifdef MULTIPROCESSOR 598 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 599 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 600 #else 601 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 602 #endif 603 pcpu->pcpu_magic = PICPERCPU_MAGIC; 604 #if 0 605 printf("%s: %s %s: <%s>\n", 606 __func__, ci->ci_data.cpu_name, pic->pic_name, 607 pcpu->pcpu_name); 608 #endif 609 } 610 611 static int 612 pic_init(void) 613 { 614 615 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH); 616 617 return 0; 618 } 619 620 int 621 pic_add(struct pic_softc *pic, int irqbase) 622 { 623 int slot, maybe_slot = -1; 624 size_t sourcebase; 625 static ONCE_DECL(pic_once); 626 627 RUN_ONCE(&pic_once, pic_init); 628 629 KASSERT(strlen(pic->pic_name) > 0); 630 631 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 632 if (__predict_false(pic_pending_percpu == NULL)) 633 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending)); 634 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 635 636 mutex_enter(&pic_lock); 637 if (irqbase == PIC_IRQBASE_ALLOC) { 638 irqbase = pic_lastbase; 639 } 640 for (slot = 0; slot < PIC_MAXPICS; slot++) { 641 struct pic_softc * const xpic = pic_list[slot]; 642 if (xpic == NULL) { 643 if (maybe_slot < 0) 644 maybe_slot = slot; 645 if (irqbase < 0) 646 break; 647 continue; 648 } 649 if (irqbase < 0 || xpic->pic_irqbase < 0) 650 continue; 651 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 652 continue; 653 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 654 continue; 655 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 656 " with pic %s (%zu sources @ irq %u)", 657 pic->pic_name, pic->pic_maxsources, irqbase, 658 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 659 } 660 slot = maybe_slot; 661 #if 0 662 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 663 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 664 #endif 665 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 666 pic->pic_maxsources); 667 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 668 sourcebase = pic_sourcebase; 669 pic_sourcebase += pic->pic_maxsources; 670 if (pic_lastbase < irqbase + pic->pic_maxsources) 671 pic_lastbase = irqbase + pic->pic_maxsources; 672 mutex_exit(&pic_lock); 673 674 /* 675 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 676 * allocate its evcnts and then attach an evcnt for each pin. 677 * We can't allocate the evcnt structures directly since 678 * percpu will move the contents of percpu memory around and 679 * corrupt the pointers in the evcnts themselves. Remember, any 680 * problem can be solved with sufficient indirection. 681 */ 682 pic->pic_percpu = percpu_create(sizeof(struct pic_percpu), 683 pic_percpu_allocate, NULL, pic); 684 685 pic->pic_sources = &pic_sources[sourcebase]; 686 pic->pic_irqbase = irqbase; 687 pic->pic_id = slot; 688 #ifdef __HAVE_PIC_SET_PRIORITY 689 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 690 #endif 691 #ifdef MULTIPROCESSOR 692 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL)); 693 #endif 694 pic_list[slot] = pic; 695 696 return irqbase; 697 } 698 699 int 700 pic_alloc_irq(struct pic_softc *pic) 701 { 702 int irq; 703 704 for (irq = 0; irq < pic->pic_maxsources; irq++) { 705 if (pic->pic_sources[irq] == NULL) 706 return irq; 707 } 708 709 return -1; 710 } 711 712 static void 713 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 714 { 715 struct pic_percpu * const pcpu = v0; 716 struct intrsource * const is = v1; 717 718 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 719 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 720 pcpu->pcpu_name, is->is_source); 721 } 722 723 void * 724 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 725 int (*func)(void *), void *arg, const char *xname) 726 { 727 struct intrsource *is; 728 int off, nipl; 729 730 if (pic->pic_sources[irq]) { 731 printf("pic_establish_intr: pic %s irq %d already present\n", 732 pic->pic_name, irq); 733 return NULL; 734 } 735 736 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 737 is->is_pic = pic; 738 is->is_irq = irq; 739 is->is_ipl = ipl; 740 is->is_type = type & 0xff; 741 is->is_func = func; 742 is->is_arg = arg; 743 #ifdef MULTIPROCESSOR 744 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM; 745 #endif 746 747 if (pic->pic_ops->pic_source_name) 748 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 749 sizeof(is->is_source)); 750 else 751 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 752 753 /* 754 * Now attach the per-cpu evcnts. 755 */ 756 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 757 758 pic->pic_sources[irq] = is; 759 760 /* 761 * First try to use an existing slot which is empty. 762 */ 763 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 764 if (pic__iplsources[off] == NULL) { 765 is->is_iplidx = off - pic_ipl_offset[ipl]; 766 pic__iplsources[off] = is; 767 goto unblock; 768 } 769 } 770 771 /* 772 * Move up all the sources by one. 773 */ 774 if (ipl < NIPL) { 775 off = pic_ipl_offset[ipl+1]; 776 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 777 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 778 } 779 780 /* 781 * Advance the offset of all IPLs higher than this. Include an 782 * extra one as well. Thus the number of sources per ipl is 783 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 784 */ 785 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 786 pic_ipl_offset[nipl]++; 787 788 /* 789 * Insert into the previously made position at the end of this IPL's 790 * sources. 791 */ 792 off = pic_ipl_offset[ipl + 1] - 1; 793 is->is_iplidx = off - pic_ipl_offset[ipl]; 794 pic__iplsources[off] = is; 795 796 (*pic->pic_ops->pic_establish_irq)(pic, is); 797 798 unblock: 799 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 800 __BIT(is->is_irq & 0x1f)); 801 802 if (xname) { 803 if (is->is_xname == NULL) 804 is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP); 805 if (is->is_xname[0] != '\0') 806 strlcat(is->is_xname, ", ", INTRDEVNAMEBUF); 807 strlcat(is->is_xname, xname, INTRDEVNAMEBUF); 808 } 809 810 /* We're done. */ 811 return is; 812 } 813 814 static void 815 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 816 { 817 struct pic_percpu * const pcpu = v0; 818 struct intrsource * const is = v1; 819 820 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 821 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 822 } 823 824 void 825 pic_disestablish_source(struct intrsource *is) 826 { 827 struct pic_softc * const pic = is->is_pic; 828 const int irq = is->is_irq; 829 830 KASSERT(is == pic->pic_sources[irq]); 831 832 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 833 pic->pic_sources[irq] = NULL; 834 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 835 if (is->is_xname != NULL) { 836 kmem_free(is->is_xname, INTRDEVNAMEBUF); 837 is->is_xname = NULL; 838 } 839 /* 840 * Now detach the per-cpu evcnts. 841 */ 842 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 843 844 kmem_free(is, sizeof(*is)); 845 } 846 847 void * 848 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 849 { 850 return intr_establish_xname(irq, ipl, type, func, arg, NULL); 851 } 852 853 void * 854 intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg, 855 const char *xname) 856 { 857 KASSERT(!cpu_intr_p()); 858 KASSERT(!cpu_softintr_p()); 859 860 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 861 struct pic_softc * const pic = pic_list[slot]; 862 if (pic == NULL || pic->pic_irqbase < 0) 863 continue; 864 if (pic->pic_irqbase <= irq 865 && irq < pic->pic_irqbase + pic->pic_maxsources) { 866 return pic_establish_intr(pic, irq - pic->pic_irqbase, 867 ipl, type, func, arg, xname); 868 } 869 } 870 871 return NULL; 872 } 873 874 void 875 intr_disestablish(void *ih) 876 { 877 struct intrsource * const is = ih; 878 879 KASSERT(!cpu_intr_p()); 880 KASSERT(!cpu_softintr_p()); 881 882 pic_disestablish_source(is); 883 } 884 885 void 886 intr_mask(void *ih) 887 { 888 struct intrsource * const is = ih; 889 struct pic_softc * const pic = is->is_pic; 890 const int irq = is->is_irq; 891 892 if (atomic_inc_32_nv(&is->is_mask_count) == 1) 893 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 894 } 895 896 void 897 intr_unmask(void *ih) 898 { 899 struct intrsource * const is = ih; 900 struct pic_softc * const pic = is->is_pic; 901 const int irq = is->is_irq; 902 903 if (atomic_dec_32_nv(&is->is_mask_count) == 0) 904 (*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 905 } 906 907 const char * 908 intr_string(intr_handle_t irq, char *buf, size_t len) 909 { 910 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 911 struct pic_softc * const pic = pic_list[slot]; 912 if (pic == NULL || pic->pic_irqbase < 0) 913 continue; 914 if (pic->pic_irqbase <= irq 915 && irq < pic->pic_irqbase + pic->pic_maxsources) { 916 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase]; 917 snprintf(buf, len, "%s %s", pic->pic_name, is->is_source); 918 return buf; 919 } 920 } 921 922 return NULL; 923 } 924 925 static struct intrsource * 926 intr_get_source(const char *intrid) 927 { 928 struct intrsource *is; 929 intrid_t buf; 930 size_t slot; 931 int irq; 932 933 KASSERT(mutex_owned(&cpu_lock)); 934 935 for (slot = 0; slot < PIC_MAXPICS; slot++) { 936 struct pic_softc * const pic = pic_list[slot]; 937 if (pic == NULL || pic->pic_irqbase < 0) 938 continue; 939 for (irq = 0; irq < pic->pic_maxsources; irq++) { 940 is = pic->pic_sources[irq]; 941 if (is == NULL || is->is_source[0] == '\0') 942 continue; 943 944 snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source); 945 if (strcmp(buf, intrid) == 0) 946 return is; 947 } 948 } 949 950 return NULL; 951 } 952 953 struct intrids_handler * 954 interrupt_construct_intrids(const kcpuset_t *cpuset) 955 { 956 struct intrids_handler *iih; 957 struct intrsource *is; 958 int count, irq, n; 959 size_t slot; 960 961 if (kcpuset_iszero(cpuset)) 962 return NULL; 963 964 count = 0; 965 for (slot = 0; slot < PIC_MAXPICS; slot++) { 966 struct pic_softc * const pic = pic_list[slot]; 967 if (pic != NULL && pic->pic_irqbase >= 0) { 968 for (irq = 0; irq < pic->pic_maxsources; irq++) { 969 is = pic->pic_sources[irq]; 970 if (is && is->is_source[0] != '\0') 971 count++; 972 } 973 } 974 } 975 976 iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP); 977 iih->iih_nids = count; 978 979 for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) { 980 struct pic_softc * const pic = pic_list[slot]; 981 if (pic == NULL || pic->pic_irqbase < 0) 982 continue; 983 for (irq = 0; irq < pic->pic_maxsources; irq++) { 984 is = pic->pic_sources[irq]; 985 if (is == NULL || is->is_source[0] == '\0') 986 continue; 987 988 snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s", 989 pic->pic_name, is->is_source); 990 } 991 } 992 993 return iih; 994 } 995 996 void 997 interrupt_destruct_intrids(struct intrids_handler *iih) 998 { 999 if (iih == NULL) 1000 return; 1001 1002 kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids); 1003 } 1004 1005 void 1006 interrupt_get_available(kcpuset_t *cpuset) 1007 { 1008 CPU_INFO_ITERATOR cii; 1009 struct cpu_info *ci; 1010 1011 kcpuset_zero(cpuset); 1012 1013 mutex_enter(&cpu_lock); 1014 for (CPU_INFO_FOREACH(cii, ci)) { 1015 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) 1016 kcpuset_set(cpuset, cpu_index(ci)); 1017 } 1018 mutex_exit(&cpu_lock); 1019 } 1020 1021 void 1022 interrupt_get_devname(const char *intrid, char *buf, size_t len) 1023 { 1024 struct intrsource *is; 1025 1026 mutex_enter(&cpu_lock); 1027 is = intr_get_source(intrid); 1028 if (is == NULL || is->is_xname == NULL) 1029 buf[0] = '\0'; 1030 else 1031 strlcpy(buf, is->is_xname, len); 1032 mutex_exit(&cpu_lock); 1033 } 1034 1035 struct interrupt_get_count_arg { 1036 struct intrsource *is; 1037 uint64_t count; 1038 u_int cpu_idx; 1039 }; 1040 1041 static void 1042 interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci) 1043 { 1044 struct pic_percpu * const pcpu = v0; 1045 struct interrupt_get_count_arg * const arg = v1; 1046 1047 if (arg->cpu_idx != cpu_index(ci)) 1048 return; 1049 1050 arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count; 1051 } 1052 1053 uint64_t 1054 interrupt_get_count(const char *intrid, u_int cpu_idx) 1055 { 1056 struct interrupt_get_count_arg arg; 1057 struct intrsource *is; 1058 uint64_t count; 1059 1060 count = 0; 1061 1062 mutex_enter(&cpu_lock); 1063 is = intr_get_source(intrid); 1064 if (is != NULL && is->is_pic != NULL) { 1065 arg.is = is; 1066 arg.count = 0; 1067 arg.cpu_idx = cpu_idx; 1068 percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg); 1069 count = arg.count; 1070 } 1071 mutex_exit(&cpu_lock); 1072 1073 return count; 1074 } 1075 1076 #ifdef MULTIPROCESSOR 1077 void 1078 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) 1079 { 1080 struct intrsource *is; 1081 struct pic_softc *pic; 1082 1083 kcpuset_zero(cpuset); 1084 1085 mutex_enter(&cpu_lock); 1086 is = intr_get_source(intrid); 1087 if (is != NULL) { 1088 pic = is->is_pic; 1089 if (pic && pic->pic_ops->pic_get_affinity) 1090 pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset); 1091 } 1092 mutex_exit(&cpu_lock); 1093 } 1094 1095 int 1096 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset, 1097 kcpuset_t *oldset) 1098 { 1099 struct intrsource *is; 1100 int error; 1101 1102 mutex_enter(&cpu_lock); 1103 is = intr_get_source(intrid); 1104 if (is == NULL) { 1105 error = ENOENT; 1106 } else { 1107 error = interrupt_distribute(is, newset, oldset); 1108 } 1109 mutex_exit(&cpu_lock); 1110 1111 return error; 1112 } 1113 1114 int 1115 interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset) 1116 { 1117 struct intrsource * const is = ih; 1118 struct pic_softc * const pic = is->is_pic; 1119 1120 if (pic == NULL) 1121 return EOPNOTSUPP; 1122 if (pic->pic_ops->pic_set_affinity == NULL || 1123 pic->pic_ops->pic_get_affinity == NULL) 1124 return EOPNOTSUPP; 1125 1126 if (!is->is_mpsafe) 1127 return EINVAL; 1128 1129 if (oldset != NULL) 1130 pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset); 1131 1132 return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset); 1133 } 1134 #endif 1135