1 /* $NetBSD: pic.c,v 1.44 2018/07/15 16:03:24 jmcneill Exp $ */ 2 /*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define _INTR_PRIVATE 32 #include "opt_ddb.h" 33 #include "opt_multiprocessor.h" 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.44 2018/07/15 16:03:24 jmcneill Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/atomic.h> 40 #include <sys/cpu.h> 41 #include <sys/evcnt.h> 42 #include <sys/intr.h> 43 #include <sys/kernel.h> 44 #include <sys/kmem.h> 45 #include <sys/mutex.h> 46 #include <sys/once.h> 47 #include <sys/interrupt.h> 48 #include <sys/xcall.h> 49 #include <sys/ipi.h> 50 51 #include <arm/armreg.h> 52 #include <arm/cpufunc.h> 53 #include <arm/locore.h> /* for compat aarch64 */ 54 55 #ifdef DDB 56 #include <arm/db_machdep.h> 57 #endif 58 59 #include <arm/pic/picvar.h> 60 61 #if defined(__HAVE_PIC_PENDING_INTRS) 62 /* 63 * This implementation of pending interrupts on a MULTIPROCESSOR system makes 64 * the assumption that a PIC (pic_softc) shall only have all its interrupts 65 * come from the same CPU. In other words, interrupts from a single PIC will 66 * not be distributed among multiple CPUs. 67 */ 68 struct pic_pending { 69 volatile uint32_t blocked_pics; 70 volatile uint32_t pending_pics; 71 volatile uint32_t pending_ipls; 72 }; 73 static uint32_t 74 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 75 static struct pic_softc * 76 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t); 77 static void 78 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *); 79 static void 80 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *); 81 82 #ifdef MULTIPROCESSOR 83 percpu_t *pic_pending_percpu; 84 #else 85 struct pic_pending pic_pending; 86 #endif /* MULTIPROCESSOR */ 87 #endif /* __HAVE_PIC_PENDING_INTRS */ 88 89 struct pic_softc *pic_list[PIC_MAXPICS]; 90 #if PIC_MAXPICS > 32 91 #error PIC_MAXPICS > 32 not supported 92 #endif 93 struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 94 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 95 struct intrsource **pic_iplsource[NIPL] = { 96 [0 ... NIPL-1] = pic__iplsources, 97 }; 98 size_t pic_ipl_offset[NIPL+1]; 99 100 static kmutex_t pic_lock; 101 size_t pic_sourcebase; 102 static struct evcnt pic_deferral_ev = 103 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 104 EVCNT_ATTACH_STATIC(pic_deferral_ev); 105 106 static int pic_init(void); 107 108 #ifdef __HAVE_PIC_SET_PRIORITY 109 void 110 pic_set_priority(struct cpu_info *ci, int newipl) 111 { 112 register_t psw = cpsid(I32_bit); 113 if (pic_list[0] != NULL) 114 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 115 ci->ci_cpl = newipl; 116 if ((psw & I32_bit) == 0) 117 cpsie(I32_bit); 118 } 119 #endif 120 121 #ifdef MULTIPROCESSOR 122 int 123 pic_ipi_ast(void *arg) 124 { 125 setsoftast(curcpu()); 126 return 1; 127 } 128 129 int 130 pic_ipi_nop(void *arg) 131 { 132 /* do nothing */ 133 return 1; 134 } 135 136 int 137 pic_ipi_xcall(void *arg) 138 { 139 xc_ipi_handler(); 140 return 1; 141 } 142 143 int 144 pic_ipi_generic(void *arg) 145 { 146 ipi_cpu_handler(); 147 return 1; 148 } 149 150 #ifdef DDB 151 int 152 pic_ipi_ddb(void *arg) 153 { 154 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 155 kdb_trap(-1, arg); 156 return 1; 157 } 158 #endif /* DDB */ 159 160 #ifdef __HAVE_PREEMPTION 161 int 162 pic_ipi_kpreempt(void *arg) 163 { 164 atomic_or_uint(&curcpu()->ci_astpending, __BIT(1)); 165 return 1; 166 } 167 #endif /* __HAVE_PREEMPTION */ 168 169 void 170 intr_cpu_init(struct cpu_info *ci) 171 { 172 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 173 struct pic_softc * const pic = pic_list[slot]; 174 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 175 (*pic->pic_ops->pic_cpu_init)(pic, ci); 176 } 177 } 178 } 179 180 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 181 182 void 183 intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 184 { 185 struct cpu_info * const ci = curcpu(); 186 KASSERT(ipi < NIPI); 187 bool __diagused sent_p = false; 188 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 189 struct pic_softc * const pic = pic_list[slot]; 190 if (pic == NULL || pic->pic_cpus == NULL) 191 continue; 192 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) { 193 // never send to ourself 194 if (pic->pic_cpus == ci->ci_kcpuset) 195 continue; 196 197 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 198 // If we were targeting a single CPU or this pic 199 // handles all cpus, we're done. 200 if (kcp != NULL || pic->pic_cpus == kcpuset_running) 201 return; 202 sent_p = true; 203 } 204 } 205 KASSERT(cold || sent_p || ncpu <= 1); 206 } 207 #endif /* MULTIPROCESSOR */ 208 209 #ifdef __HAVE_PIC_FAST_SOFTINTS 210 int 211 pic_handle_softint(void *arg) 212 { 213 void softint_switch(lwp_t *, int); 214 struct cpu_info * const ci = curcpu(); 215 const size_t softint = (size_t) arg; 216 int s = splhigh(); 217 ci->ci_intr_depth--; // don't count these as interrupts 218 softint_switch(ci->ci_softlwps[softint], s); 219 ci->ci_intr_depth++; 220 splx(s); 221 return 1; 222 } 223 #endif 224 225 int 226 pic_handle_intr(void *arg) 227 { 228 struct pic_softc * const pic = arg; 229 int rv; 230 231 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 232 233 return rv > 0; 234 } 235 236 #if defined(__HAVE_PIC_PENDING_INTRS) 237 void 238 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 239 { 240 const uint32_t ipl_mask = __BIT(is->is_ipl); 241 242 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 243 __BIT(is->is_irq & 0x1f)); 244 245 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 246 #ifdef MULTIPROCESSOR 247 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 248 #else 249 struct pic_pending *pend = &pic_pending; 250 #endif 251 atomic_or_32(&pend->pending_ipls, ipl_mask); 252 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 253 #ifdef MULTIPROCESSOR 254 percpu_putref(pic_pending_percpu); 255 #endif 256 } 257 258 void 259 pic_mark_pending(struct pic_softc *pic, int irq) 260 { 261 struct intrsource * const is = pic->pic_sources[irq]; 262 263 KASSERT(irq < pic->pic_maxsources); 264 KASSERT(is != NULL); 265 266 pic_mark_pending_source(pic, is); 267 } 268 269 uint32_t 270 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 271 uint32_t pending) 272 { 273 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 274 struct intrsource *is; 275 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 276 uint32_t ipl_mask = 0; 277 278 if (pending == 0) 279 return ipl_mask; 280 281 KASSERT((irq_base & 31) == 0); 282 283 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 284 285 atomic_or_32(ipending, pending); 286 while (pending != 0) { 287 int n = ffs(pending); 288 if (n-- == 0) 289 break; 290 is = isbase[n]; 291 KASSERT(is != NULL); 292 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 293 pending &= ~__BIT(n); 294 ipl_mask |= __BIT(is->is_ipl); 295 } 296 297 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 298 #ifdef MULTIPROCESSOR 299 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 300 #else 301 struct pic_pending *pend = &pic_pending; 302 #endif 303 atomic_or_32(&pend->pending_ipls, ipl_mask); 304 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 305 #ifdef MULTIPROCESSOR 306 percpu_putref(pic_pending_percpu); 307 #endif 308 return ipl_mask; 309 } 310 311 uint32_t 312 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 313 uint32_t pending, int ipl) 314 { 315 uint32_t ipl_irq_mask = 0; 316 uint32_t irq_mask; 317 318 for (;;) { 319 int irq = ffs(pending); 320 if (irq-- == 0) 321 return ipl_irq_mask; 322 323 irq_mask = __BIT(irq); 324 #if 1 325 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 326 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 327 #else 328 if (pic->pic_sources[irq_base + irq] == NULL) { 329 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 330 irq_base, irq); 331 } else 332 #endif 333 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 334 ipl_irq_mask |= irq_mask; 335 336 pending &= ~irq_mask; 337 } 338 } 339 #endif /* __HAVE_PIC_PENDING_INTRS */ 340 341 void 342 pic_dispatch(struct intrsource *is, void *frame) 343 { 344 int (*func)(void *) = is->is_func; 345 void *arg = is->is_arg; 346 347 if (__predict_false(arg == NULL)) { 348 if (__predict_false(frame == NULL)) { 349 pic_deferral_ev.ev_count++; 350 return; 351 } 352 arg = frame; 353 } 354 355 #ifdef MULTIPROCESSOR 356 if (!is->is_mpsafe) { 357 KERNEL_LOCK(1, NULL); 358 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 359 const u_int l_blcnt __diagused = curlwp->l_blcnt; 360 (void)(*func)(arg); 361 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 362 KASSERT(l_blcnt == curlwp->l_blcnt); 363 KERNEL_UNLOCK_ONE(NULL); 364 } else 365 #endif 366 (void)(*func)(arg); 367 368 369 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 370 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 371 pcpu->pcpu_evs[is->is_irq].ev_count++; 372 percpu_putref(is->is_pic->pic_percpu); 373 } 374 375 #if defined(__HAVE_PIC_PENDING_INTRS) 376 void 377 pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl, 378 void *frame) 379 { 380 const uint32_t ipl_mask = __BIT(ipl); 381 struct intrsource *is; 382 volatile uint32_t *ipending = pic->pic_pending_irqs; 383 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 384 size_t irq_base; 385 #if PIC_MAXSOURCES > 32 386 size_t irq_count; 387 int poi = 0; /* Possibility of interrupting */ 388 #endif 389 uint32_t pending_irqs; 390 uint32_t blocked_irqs; 391 int irq; 392 bool progress __diagused = false; 393 394 KASSERT(pic->pic_pending_ipls & ipl_mask); 395 396 irq_base = 0; 397 #if PIC_MAXSOURCES > 32 398 irq_count = 0; 399 #endif 400 401 for (;;) { 402 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 403 *ipending, ipl); 404 KASSERT((pending_irqs & *ipending) == pending_irqs); 405 KASSERT((pending_irqs & ~(*ipending)) == 0); 406 if (pending_irqs == 0) { 407 #if PIC_MAXSOURCES > 32 408 irq_count += 32; 409 if (__predict_true(irq_count >= pic->pic_maxsources)) { 410 if (!poi) 411 /*Interrupt at this level was handled.*/ 412 break; 413 irq_base = 0; 414 irq_count = 0; 415 poi = 0; 416 ipending = pic->pic_pending_irqs; 417 iblocked = pic->pic_blocked_irqs; 418 } else { 419 irq_base += 32; 420 ipending++; 421 iblocked++; 422 KASSERT(irq_base <= pic->pic_maxsources); 423 } 424 continue; 425 #else 426 break; 427 #endif 428 } 429 progress = true; 430 blocked_irqs = 0; 431 do { 432 irq = ffs(pending_irqs) - 1; 433 KASSERT(irq >= 0); 434 435 atomic_and_32(ipending, ~__BIT(irq)); 436 is = pic->pic_sources[irq_base + irq]; 437 if (is != NULL) { 438 cpsie(I32_bit); 439 pic_dispatch(is, frame); 440 cpsid(I32_bit); 441 #if PIC_MAXSOURCES > 32 442 /* 443 * There is a possibility of interrupting 444 * from cpsie() to cpsid(). 445 */ 446 poi = 1; 447 #endif 448 blocked_irqs |= __BIT(irq); 449 } else { 450 KASSERT(0); 451 } 452 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 453 irq_base, *ipending, ipl); 454 } while (pending_irqs); 455 if (blocked_irqs) { 456 atomic_or_32(iblocked, blocked_irqs); 457 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id)); 458 } 459 } 460 461 KASSERT(progress); 462 /* 463 * Since interrupts are disabled, we don't have to be too careful 464 * about these. 465 */ 466 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 467 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id)); 468 } 469 470 static void 471 pic_list_unblock_irqs(struct pic_pending *pend) 472 { 473 uint32_t blocked_pics = pend->blocked_pics; 474 475 pend->blocked_pics = 0; 476 477 for (;;) { 478 struct pic_softc *pic; 479 #if PIC_MAXSOURCES > 32 480 volatile uint32_t *iblocked; 481 uint32_t blocked; 482 size_t irq_base; 483 #endif 484 485 int pic_id = ffs(blocked_pics); 486 if (pic_id-- == 0) 487 return; 488 489 pic = pic_list[pic_id]; 490 KASSERT(pic != NULL); 491 #if PIC_MAXSOURCES > 32 492 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 493 irq_base < pic->pic_maxsources; 494 irq_base += 32, iblocked++) { 495 if ((blocked = *iblocked) != 0) { 496 (*pic->pic_ops->pic_unblock_irqs)(pic, 497 irq_base, blocked); 498 atomic_and_32(iblocked, ~blocked); 499 } 500 } 501 #else 502 KASSERT(pic->pic_blocked_irqs[0] != 0); 503 (*pic->pic_ops->pic_unblock_irqs)(pic, 504 0, pic->pic_blocked_irqs[0]); 505 pic->pic_blocked_irqs[0] = 0; 506 #endif 507 blocked_pics &= ~__BIT(pic_id); 508 } 509 } 510 511 512 struct pic_softc * 513 pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask) 514 { 515 uint32_t pending_pics = pend->pending_pics; 516 struct pic_softc *pic; 517 518 for (;;) { 519 int pic_id = ffs(pending_pics); 520 if (pic_id-- == 0) 521 return NULL; 522 523 pic = pic_list[pic_id]; 524 KASSERT(pic != NULL); 525 if (pic->pic_pending_ipls & ipl_mask) 526 return pic; 527 pending_pics &= ~__BIT(pic_id); 528 } 529 } 530 531 void 532 pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl, 533 void *frame) 534 { 535 const uint32_t ipl_mask = __BIT(ipl); 536 struct pic_softc *pic; 537 538 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) { 539 pic_deliver_irqs(pend, pic, ipl, frame); 540 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 541 } 542 atomic_and_32(&pend->pending_ipls, ~ipl_mask); 543 } 544 #endif /* __HAVE_PIC_PENDING_INTRS */ 545 546 void 547 pic_do_pending_ints(register_t psw, int newipl, void *frame) 548 { 549 struct cpu_info * const ci = curcpu(); 550 if (__predict_false(newipl == IPL_HIGH)) { 551 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 552 return; 553 } 554 #if defined(__HAVE_PIC_PENDING_INTRS) 555 #ifdef MULTIPROCESSOR 556 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 557 #else 558 struct pic_pending *pend = &pic_pending; 559 #endif 560 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 561 KASSERT(pend->pending_ipls < __BIT(NIPL)); 562 for (;;) { 563 int ipl = 31 - __builtin_clz(pend->pending_ipls); 564 KASSERT(ipl < NIPL); 565 if (ipl <= newipl) 566 break; 567 568 pic_set_priority(ci, ipl); 569 pic_list_deliver_irqs(pend, psw, ipl, frame); 570 pic_list_unblock_irqs(pend); 571 } 572 } 573 #ifdef MULTIPROCESSOR 574 percpu_putref(pic_pending_percpu); 575 #endif 576 #endif /* __HAVE_PIC_PENDING_INTRS */ 577 #ifdef __HAVE_PREEMPTION 578 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) { 579 pic_set_priority(ci, IPL_SCHED); 580 kpreempt(0); 581 } 582 #endif 583 if (ci->ci_cpl != newipl) 584 pic_set_priority(ci, newipl); 585 } 586 587 static void 588 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 589 { 590 struct pic_percpu * const pcpu = v0; 591 struct pic_softc * const pic = v1; 592 593 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 594 KM_SLEEP); 595 KASSERT(pcpu->pcpu_evs != NULL); 596 597 #define PCPU_NAMELEN 32 598 #ifdef DIAGNOSTIC 599 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 600 #endif 601 602 KASSERT(namelen < PCPU_NAMELEN); 603 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 604 #ifdef MULTIPROCESSOR 605 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 606 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 607 #else 608 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 609 #endif 610 pcpu->pcpu_magic = PICPERCPU_MAGIC; 611 #if 0 612 printf("%s: %s %s: <%s>\n", 613 __func__, ci->ci_data.cpu_name, pic->pic_name, 614 pcpu->pcpu_name); 615 #endif 616 } 617 618 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 619 static void 620 pic_pending_zero(void *v0, void *v1, struct cpu_info *ci) 621 { 622 struct pic_pending * const p = v0; 623 memset(p, 0, sizeof(*p)); 624 } 625 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 626 627 static int 628 pic_init(void) 629 { 630 631 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH); 632 633 return 0; 634 } 635 636 void 637 pic_add(struct pic_softc *pic, int irqbase) 638 { 639 int slot, maybe_slot = -1; 640 size_t sourcebase; 641 static ONCE_DECL(pic_once); 642 643 RUN_ONCE(&pic_once, pic_init); 644 645 KASSERT(strlen(pic->pic_name) > 0); 646 647 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 648 if (__predict_false(pic_pending_percpu == NULL)) { 649 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending)); 650 651 /* 652 * Now zero the per-cpu pending data. 653 */ 654 percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL); 655 } 656 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 657 658 mutex_enter(&pic_lock); 659 for (slot = 0; slot < PIC_MAXPICS; slot++) { 660 struct pic_softc * const xpic = pic_list[slot]; 661 if (xpic == NULL) { 662 if (maybe_slot < 0) 663 maybe_slot = slot; 664 if (irqbase < 0) 665 break; 666 continue; 667 } 668 if (irqbase < 0 || xpic->pic_irqbase < 0) 669 continue; 670 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 671 continue; 672 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 673 continue; 674 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 675 " with pic %s (%zu sources @ irq %u)", 676 pic->pic_name, pic->pic_maxsources, irqbase, 677 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 678 } 679 slot = maybe_slot; 680 #if 0 681 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 682 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 683 #endif 684 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 685 pic->pic_maxsources); 686 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 687 sourcebase = pic_sourcebase; 688 pic_sourcebase += pic->pic_maxsources; 689 690 mutex_exit(&pic_lock); 691 692 /* 693 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 694 * allocate its evcnts and then attach an evcnt for each pin. 695 * We can't allocate the evcnt structures directly since 696 * percpu will move the contents of percpu memory around and 697 * corrupt the pointers in the evcnts themselves. Remember, any 698 * problem can be solved with sufficient indirection. 699 */ 700 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); 701 702 /* 703 * Now allocate the per-cpu evcnts. 704 */ 705 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); 706 707 pic->pic_sources = &pic_sources[sourcebase]; 708 pic->pic_irqbase = irqbase; 709 pic->pic_id = slot; 710 #ifdef __HAVE_PIC_SET_PRIORITY 711 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 712 #endif 713 #ifdef MULTIPROCESSOR 714 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL)); 715 #endif 716 pic_list[slot] = pic; 717 } 718 719 int 720 pic_alloc_irq(struct pic_softc *pic) 721 { 722 int irq; 723 724 for (irq = 0; irq < pic->pic_maxsources; irq++) { 725 if (pic->pic_sources[irq] == NULL) 726 return irq; 727 } 728 729 return -1; 730 } 731 732 static void 733 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 734 { 735 struct pic_percpu * const pcpu = v0; 736 struct intrsource * const is = v1; 737 738 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 739 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 740 pcpu->pcpu_name, is->is_source); 741 } 742 743 void * 744 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 745 int (*func)(void *), void *arg) 746 { 747 struct intrsource *is; 748 int off, nipl; 749 750 if (pic->pic_sources[irq]) { 751 printf("pic_establish_intr: pic %s irq %d already present\n", 752 pic->pic_name, irq); 753 return NULL; 754 } 755 756 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 757 is->is_pic = pic; 758 is->is_irq = irq; 759 is->is_ipl = ipl; 760 is->is_type = type & 0xff; 761 is->is_func = func; 762 is->is_arg = arg; 763 #ifdef MULTIPROCESSOR 764 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM; 765 #endif 766 767 if (pic->pic_ops->pic_source_name) 768 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 769 sizeof(is->is_source)); 770 else 771 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 772 773 /* 774 * Now attach the per-cpu evcnts. 775 */ 776 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 777 778 pic->pic_sources[irq] = is; 779 780 /* 781 * First try to use an existing slot which is empty. 782 */ 783 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 784 if (pic__iplsources[off] == NULL) { 785 is->is_iplidx = off - pic_ipl_offset[ipl]; 786 pic__iplsources[off] = is; 787 goto unblock; 788 } 789 } 790 791 /* 792 * Move up all the sources by one. 793 */ 794 if (ipl < NIPL) { 795 off = pic_ipl_offset[ipl+1]; 796 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 797 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 798 } 799 800 /* 801 * Advance the offset of all IPLs higher than this. Include an 802 * extra one as well. Thus the number of sources per ipl is 803 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 804 */ 805 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 806 pic_ipl_offset[nipl]++; 807 808 /* 809 * Insert into the previously made position at the end of this IPL's 810 * sources. 811 */ 812 off = pic_ipl_offset[ipl + 1] - 1; 813 is->is_iplidx = off - pic_ipl_offset[ipl]; 814 pic__iplsources[off] = is; 815 816 (*pic->pic_ops->pic_establish_irq)(pic, is); 817 818 unblock: 819 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 820 __BIT(is->is_irq & 0x1f)); 821 822 /* We're done. */ 823 return is; 824 } 825 826 static void 827 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 828 { 829 struct pic_percpu * const pcpu = v0; 830 struct intrsource * const is = v1; 831 832 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 833 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 834 } 835 836 void 837 pic_disestablish_source(struct intrsource *is) 838 { 839 struct pic_softc * const pic = is->is_pic; 840 const int irq = is->is_irq; 841 842 KASSERT(is == pic->pic_sources[irq]); 843 844 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 845 pic->pic_sources[irq] = NULL; 846 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 847 /* 848 * Now detach the per-cpu evcnts. 849 */ 850 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 851 852 kmem_free(is, sizeof(*is)); 853 } 854 855 void * 856 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 857 { 858 KASSERT(!cpu_intr_p()); 859 KASSERT(!cpu_softintr_p()); 860 861 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 862 struct pic_softc * const pic = pic_list[slot]; 863 if (pic == NULL || pic->pic_irqbase < 0) 864 continue; 865 if (pic->pic_irqbase <= irq 866 && irq < pic->pic_irqbase + pic->pic_maxsources) { 867 return pic_establish_intr(pic, irq - pic->pic_irqbase, 868 ipl, type, func, arg); 869 } 870 } 871 872 return NULL; 873 } 874 875 void 876 intr_disestablish(void *ih) 877 { 878 struct intrsource * const is = ih; 879 880 KASSERT(!cpu_intr_p()); 881 KASSERT(!cpu_softintr_p()); 882 883 pic_disestablish_source(is); 884 } 885 886 #ifdef MULTIPROCESSOR 887 int 888 interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset) 889 { 890 struct intrsource * const is = ih; 891 struct pic_softc * const pic = is->is_pic; 892 893 if (pic == NULL) 894 return EOPNOTSUPP; 895 if (pic->pic_ops->pic_set_affinity == NULL || 896 pic->pic_ops->pic_get_affinity == NULL) 897 return EOPNOTSUPP; 898 899 if (!is->is_mpsafe) 900 return EINVAL; 901 902 if (oldset != NULL) 903 pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset); 904 905 return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset); 906 } 907 #endif 908