1 /* $NetBSD: pic.c,v 1.42 2018/04/01 04:35:04 ryo Exp $ */ 2 /*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define _INTR_PRIVATE 32 #include "opt_ddb.h" 33 #include "opt_multiprocessor.h" 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.42 2018/04/01 04:35:04 ryo Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/atomic.h> 40 #include <sys/cpu.h> 41 #include <sys/evcnt.h> 42 #include <sys/intr.h> 43 #include <sys/kernel.h> 44 #include <sys/kmem.h> 45 #include <sys/mutex.h> 46 #include <sys/once.h> 47 #include <sys/xcall.h> 48 #include <sys/ipi.h> 49 50 #include <arm/armreg.h> 51 #include <arm/cpufunc.h> 52 #include <arm/locore.h> /* for compat aarch64 */ 53 54 #ifdef DDB 55 #include <arm/db_machdep.h> 56 #endif 57 58 #include <arm/pic/picvar.h> 59 60 #if defined(__HAVE_PIC_PENDING_INTRS) 61 /* 62 * This implementation of pending interrupts on a MULTIPROCESSOR system makes 63 * the assumption that a PIC (pic_softc) shall only have all its interrupts 64 * come from the same CPU. In other words, interrupts from a single PIC will 65 * not be distributed among multiple CPUs. 66 */ 67 struct pic_pending { 68 volatile uint32_t blocked_pics; 69 volatile uint32_t pending_pics; 70 volatile uint32_t pending_ipls; 71 }; 72 static uint32_t 73 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 74 static struct pic_softc * 75 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t); 76 static void 77 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *); 78 static void 79 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *); 80 81 #ifdef MULTIPROCESSOR 82 percpu_t *pic_pending_percpu; 83 #else 84 struct pic_pending pic_pending; 85 #endif /* MULTIPROCESSOR */ 86 #endif /* __HAVE_PIC_PENDING_INTRS */ 87 88 struct pic_softc *pic_list[PIC_MAXPICS]; 89 #if PIC_MAXPICS > 32 90 #error PIC_MAXPICS > 32 not supported 91 #endif 92 struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 93 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 94 struct intrsource **pic_iplsource[NIPL] = { 95 [0 ... NIPL-1] = pic__iplsources, 96 }; 97 size_t pic_ipl_offset[NIPL+1]; 98 99 static kmutex_t pic_lock; 100 size_t pic_sourcebase; 101 static struct evcnt pic_deferral_ev = 102 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 103 EVCNT_ATTACH_STATIC(pic_deferral_ev); 104 105 static int pic_init(void); 106 107 #ifdef __HAVE_PIC_SET_PRIORITY 108 void 109 pic_set_priority(struct cpu_info *ci, int newipl) 110 { 111 register_t psw = cpsid(I32_bit); 112 if (pic_list[0] != NULL) 113 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 114 ci->ci_cpl = newipl; 115 if ((psw & I32_bit) == 0) 116 cpsie(I32_bit); 117 } 118 #endif 119 120 #ifdef MULTIPROCESSOR 121 int 122 pic_ipi_ast(void *arg) 123 { 124 setsoftast(curcpu()); 125 return 1; 126 } 127 128 int 129 pic_ipi_nop(void *arg) 130 { 131 /* do nothing */ 132 return 1; 133 } 134 135 int 136 pic_ipi_xcall(void *arg) 137 { 138 xc_ipi_handler(); 139 return 1; 140 } 141 142 int 143 pic_ipi_generic(void *arg) 144 { 145 ipi_cpu_handler(); 146 return 1; 147 } 148 149 #ifdef DDB 150 int 151 pic_ipi_ddb(void *arg) 152 { 153 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 154 kdb_trap(-1, arg); 155 return 1; 156 } 157 #endif /* DDB */ 158 159 #ifdef __HAVE_PREEMPTION 160 int 161 pic_ipi_kpreempt(void *arg) 162 { 163 atomic_or_uint(&curcpu()->ci_astpending, __BIT(1)); 164 return 1; 165 } 166 #endif /* __HAVE_PREEMPTION */ 167 168 void 169 intr_cpu_init(struct cpu_info *ci) 170 { 171 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 172 struct pic_softc * const pic = pic_list[slot]; 173 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 174 (*pic->pic_ops->pic_cpu_init)(pic, ci); 175 } 176 } 177 } 178 179 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 180 181 void 182 intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 183 { 184 struct cpu_info * const ci = curcpu(); 185 KASSERT(ipi < NIPI); 186 bool __diagused sent_p = false; 187 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 188 struct pic_softc * const pic = pic_list[slot]; 189 if (pic == NULL || pic->pic_cpus == NULL) 190 continue; 191 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) { 192 // never send to ourself 193 if (pic->pic_cpus == ci->ci_kcpuset) 194 continue; 195 196 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 197 // If we were targeting a single CPU or this pic 198 // handles all cpus, we're done. 199 if (kcp != NULL || pic->pic_cpus == kcpuset_running) 200 return; 201 sent_p = true; 202 } 203 } 204 KASSERT(cold || sent_p || arm_cpu_max == 1); 205 } 206 #endif /* MULTIPROCESSOR */ 207 208 #ifdef __HAVE_PIC_FAST_SOFTINTS 209 int 210 pic_handle_softint(void *arg) 211 { 212 void softint_switch(lwp_t *, int); 213 struct cpu_info * const ci = curcpu(); 214 const size_t softint = (size_t) arg; 215 int s = splhigh(); 216 ci->ci_intr_depth--; // don't count these as interrupts 217 softint_switch(ci->ci_softlwps[softint], s); 218 ci->ci_intr_depth++; 219 splx(s); 220 return 1; 221 } 222 #endif 223 224 int 225 pic_handle_intr(void *arg) 226 { 227 struct pic_softc * const pic = arg; 228 int rv; 229 230 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 231 232 return rv > 0; 233 } 234 235 #if defined(__HAVE_PIC_PENDING_INTRS) 236 void 237 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 238 { 239 const uint32_t ipl_mask = __BIT(is->is_ipl); 240 241 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 242 __BIT(is->is_irq & 0x1f)); 243 244 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 245 #ifdef MULTIPROCESSOR 246 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 247 #else 248 struct pic_pending *pend = &pic_pending; 249 #endif 250 atomic_or_32(&pend->pending_ipls, ipl_mask); 251 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 252 #ifdef MULTIPROCESSOR 253 percpu_putref(pic_pending_percpu); 254 #endif 255 } 256 257 void 258 pic_mark_pending(struct pic_softc *pic, int irq) 259 { 260 struct intrsource * const is = pic->pic_sources[irq]; 261 262 KASSERT(irq < pic->pic_maxsources); 263 KASSERT(is != NULL); 264 265 pic_mark_pending_source(pic, is); 266 } 267 268 uint32_t 269 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 270 uint32_t pending) 271 { 272 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 273 struct intrsource *is; 274 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 275 uint32_t ipl_mask = 0; 276 277 if (pending == 0) 278 return ipl_mask; 279 280 KASSERT((irq_base & 31) == 0); 281 282 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 283 284 atomic_or_32(ipending, pending); 285 while (pending != 0) { 286 int n = ffs(pending); 287 if (n-- == 0) 288 break; 289 is = isbase[n]; 290 KASSERT(is != NULL); 291 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 292 pending &= ~__BIT(n); 293 ipl_mask |= __BIT(is->is_ipl); 294 } 295 296 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 297 #ifdef MULTIPROCESSOR 298 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 299 #else 300 struct pic_pending *pend = &pic_pending; 301 #endif 302 atomic_or_32(&pend->pending_ipls, ipl_mask); 303 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 304 #ifdef MULTIPROCESSOR 305 percpu_putref(pic_pending_percpu); 306 #endif 307 return ipl_mask; 308 } 309 310 uint32_t 311 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 312 uint32_t pending, int ipl) 313 { 314 uint32_t ipl_irq_mask = 0; 315 uint32_t irq_mask; 316 317 for (;;) { 318 int irq = ffs(pending); 319 if (irq-- == 0) 320 return ipl_irq_mask; 321 322 irq_mask = __BIT(irq); 323 #if 1 324 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 325 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 326 #else 327 if (pic->pic_sources[irq_base + irq] == NULL) { 328 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 329 irq_base, irq); 330 } else 331 #endif 332 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 333 ipl_irq_mask |= irq_mask; 334 335 pending &= ~irq_mask; 336 } 337 } 338 #endif /* __HAVE_PIC_PENDING_INTRS */ 339 340 void 341 pic_dispatch(struct intrsource *is, void *frame) 342 { 343 int (*func)(void *) = is->is_func; 344 void *arg = is->is_arg; 345 346 if (__predict_false(arg == NULL)) { 347 if (__predict_false(frame == NULL)) { 348 pic_deferral_ev.ev_count++; 349 return; 350 } 351 arg = frame; 352 } 353 354 #ifdef MULTIPROCESSOR 355 if (!is->is_mpsafe) { 356 KERNEL_LOCK(1, NULL); 357 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 358 const u_int l_blcnt __diagused = curlwp->l_blcnt; 359 (void)(*func)(arg); 360 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 361 KASSERT(l_blcnt == curlwp->l_blcnt); 362 KERNEL_UNLOCK_ONE(NULL); 363 } else 364 #endif 365 (void)(*func)(arg); 366 367 368 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 369 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 370 pcpu->pcpu_evs[is->is_irq].ev_count++; 371 percpu_putref(is->is_pic->pic_percpu); 372 } 373 374 #if defined(__HAVE_PIC_PENDING_INTRS) 375 void 376 pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl, 377 void *frame) 378 { 379 const uint32_t ipl_mask = __BIT(ipl); 380 struct intrsource *is; 381 volatile uint32_t *ipending = pic->pic_pending_irqs; 382 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 383 size_t irq_base; 384 #if PIC_MAXSOURCES > 32 385 size_t irq_count; 386 int poi = 0; /* Possibility of interrupting */ 387 #endif 388 uint32_t pending_irqs; 389 uint32_t blocked_irqs; 390 int irq; 391 bool progress __diagused = false; 392 393 KASSERT(pic->pic_pending_ipls & ipl_mask); 394 395 irq_base = 0; 396 #if PIC_MAXSOURCES > 32 397 irq_count = 0; 398 #endif 399 400 for (;;) { 401 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 402 *ipending, ipl); 403 KASSERT((pending_irqs & *ipending) == pending_irqs); 404 KASSERT((pending_irqs & ~(*ipending)) == 0); 405 if (pending_irqs == 0) { 406 #if PIC_MAXSOURCES > 32 407 irq_count += 32; 408 if (__predict_true(irq_count >= pic->pic_maxsources)) { 409 if (!poi) 410 /*Interrupt at this level was handled.*/ 411 break; 412 irq_base = 0; 413 irq_count = 0; 414 poi = 0; 415 ipending = pic->pic_pending_irqs; 416 iblocked = pic->pic_blocked_irqs; 417 } else { 418 irq_base += 32; 419 ipending++; 420 iblocked++; 421 KASSERT(irq_base <= pic->pic_maxsources); 422 } 423 continue; 424 #else 425 break; 426 #endif 427 } 428 progress = true; 429 blocked_irqs = 0; 430 do { 431 irq = ffs(pending_irqs) - 1; 432 KASSERT(irq >= 0); 433 434 atomic_and_32(ipending, ~__BIT(irq)); 435 is = pic->pic_sources[irq_base + irq]; 436 if (is != NULL) { 437 cpsie(I32_bit); 438 pic_dispatch(is, frame); 439 cpsid(I32_bit); 440 #if PIC_MAXSOURCES > 32 441 /* 442 * There is a possibility of interrupting 443 * from cpsie() to cpsid(). 444 */ 445 poi = 1; 446 #endif 447 blocked_irqs |= __BIT(irq); 448 } else { 449 KASSERT(0); 450 } 451 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 452 irq_base, *ipending, ipl); 453 } while (pending_irqs); 454 if (blocked_irqs) { 455 atomic_or_32(iblocked, blocked_irqs); 456 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id)); 457 } 458 } 459 460 KASSERT(progress); 461 /* 462 * Since interrupts are disabled, we don't have to be too careful 463 * about these. 464 */ 465 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 466 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id)); 467 } 468 469 static void 470 pic_list_unblock_irqs(struct pic_pending *pend) 471 { 472 uint32_t blocked_pics = pend->blocked_pics; 473 474 pend->blocked_pics = 0; 475 476 for (;;) { 477 struct pic_softc *pic; 478 #if PIC_MAXSOURCES > 32 479 volatile uint32_t *iblocked; 480 uint32_t blocked; 481 size_t irq_base; 482 #endif 483 484 int pic_id = ffs(blocked_pics); 485 if (pic_id-- == 0) 486 return; 487 488 pic = pic_list[pic_id]; 489 KASSERT(pic != NULL); 490 #if PIC_MAXSOURCES > 32 491 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 492 irq_base < pic->pic_maxsources; 493 irq_base += 32, iblocked++) { 494 if ((blocked = *iblocked) != 0) { 495 (*pic->pic_ops->pic_unblock_irqs)(pic, 496 irq_base, blocked); 497 atomic_and_32(iblocked, ~blocked); 498 } 499 } 500 #else 501 KASSERT(pic->pic_blocked_irqs[0] != 0); 502 (*pic->pic_ops->pic_unblock_irqs)(pic, 503 0, pic->pic_blocked_irqs[0]); 504 pic->pic_blocked_irqs[0] = 0; 505 #endif 506 blocked_pics &= ~__BIT(pic_id); 507 } 508 } 509 510 511 struct pic_softc * 512 pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask) 513 { 514 uint32_t pending_pics = pend->pending_pics; 515 struct pic_softc *pic; 516 517 for (;;) { 518 int pic_id = ffs(pending_pics); 519 if (pic_id-- == 0) 520 return NULL; 521 522 pic = pic_list[pic_id]; 523 KASSERT(pic != NULL); 524 if (pic->pic_pending_ipls & ipl_mask) 525 return pic; 526 pending_pics &= ~__BIT(pic_id); 527 } 528 } 529 530 void 531 pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl, 532 void *frame) 533 { 534 const uint32_t ipl_mask = __BIT(ipl); 535 struct pic_softc *pic; 536 537 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) { 538 pic_deliver_irqs(pend, pic, ipl, frame); 539 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 540 } 541 atomic_and_32(&pend->pending_ipls, ~ipl_mask); 542 } 543 #endif /* __HAVE_PIC_PENDING_INTRS */ 544 545 void 546 pic_do_pending_ints(register_t psw, int newipl, void *frame) 547 { 548 struct cpu_info * const ci = curcpu(); 549 if (__predict_false(newipl == IPL_HIGH)) { 550 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 551 return; 552 } 553 #if defined(__HAVE_PIC_PENDING_INTRS) 554 #ifdef MULTIPROCESSOR 555 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 556 #else 557 struct pic_pending *pend = &pic_pending; 558 #endif 559 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 560 KASSERT(pend->pending_ipls < __BIT(NIPL)); 561 for (;;) { 562 int ipl = 31 - __builtin_clz(pend->pending_ipls); 563 KASSERT(ipl < NIPL); 564 if (ipl <= newipl) 565 break; 566 567 pic_set_priority(ci, ipl); 568 pic_list_deliver_irqs(pend, psw, ipl, frame); 569 pic_list_unblock_irqs(pend); 570 } 571 } 572 #ifdef MULTIPROCESSOR 573 percpu_putref(pic_pending_percpu); 574 #endif 575 #endif /* __HAVE_PIC_PENDING_INTRS */ 576 #ifdef __HAVE_PREEMPTION 577 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) { 578 pic_set_priority(ci, IPL_SCHED); 579 kpreempt(0); 580 } 581 #endif 582 if (ci->ci_cpl != newipl) 583 pic_set_priority(ci, newipl); 584 } 585 586 static void 587 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 588 { 589 struct pic_percpu * const pcpu = v0; 590 struct pic_softc * const pic = v1; 591 592 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 593 KM_SLEEP); 594 KASSERT(pcpu->pcpu_evs != NULL); 595 596 #define PCPU_NAMELEN 32 597 #ifdef DIAGNOSTIC 598 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 599 #endif 600 601 KASSERT(namelen < PCPU_NAMELEN); 602 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 603 #ifdef MULTIPROCESSOR 604 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 605 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 606 #else 607 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 608 #endif 609 pcpu->pcpu_magic = PICPERCPU_MAGIC; 610 #if 0 611 printf("%s: %s %s: <%s>\n", 612 __func__, ci->ci_data.cpu_name, pic->pic_name, 613 pcpu->pcpu_name); 614 #endif 615 } 616 617 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 618 static void 619 pic_pending_zero(void *v0, void *v1, struct cpu_info *ci) 620 { 621 struct pic_pending * const p = v0; 622 memset(p, 0, sizeof(*p)); 623 } 624 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 625 626 static int 627 pic_init(void) 628 { 629 630 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH); 631 632 return 0; 633 } 634 635 void 636 pic_add(struct pic_softc *pic, int irqbase) 637 { 638 int slot, maybe_slot = -1; 639 size_t sourcebase; 640 static ONCE_DECL(pic_once); 641 642 RUN_ONCE(&pic_once, pic_init); 643 644 KASSERT(strlen(pic->pic_name) > 0); 645 646 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 647 if (__predict_false(pic_pending_percpu == NULL)) { 648 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending)); 649 650 /* 651 * Now zero the per-cpu pending data. 652 */ 653 percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL); 654 } 655 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 656 657 mutex_enter(&pic_lock); 658 for (slot = 0; slot < PIC_MAXPICS; slot++) { 659 struct pic_softc * const xpic = pic_list[slot]; 660 if (xpic == NULL) { 661 if (maybe_slot < 0) 662 maybe_slot = slot; 663 if (irqbase < 0) 664 break; 665 continue; 666 } 667 if (irqbase < 0 || xpic->pic_irqbase < 0) 668 continue; 669 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 670 continue; 671 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 672 continue; 673 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 674 " with pic %s (%zu sources @ irq %u)", 675 pic->pic_name, pic->pic_maxsources, irqbase, 676 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 677 } 678 slot = maybe_slot; 679 #if 0 680 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 681 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 682 #endif 683 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 684 pic->pic_maxsources); 685 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 686 sourcebase = pic_sourcebase; 687 pic_sourcebase += pic->pic_maxsources; 688 689 mutex_exit(&pic_lock); 690 691 /* 692 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 693 * allocate its evcnts and then attach an evcnt for each pin. 694 * We can't allocate the evcnt structures directly since 695 * percpu will move the contents of percpu memory around and 696 * corrupt the pointers in the evcnts themselves. Remember, any 697 * problem can be solved with sufficient indirection. 698 */ 699 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); 700 701 /* 702 * Now allocate the per-cpu evcnts. 703 */ 704 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); 705 706 pic->pic_sources = &pic_sources[sourcebase]; 707 pic->pic_irqbase = irqbase; 708 pic->pic_id = slot; 709 #ifdef __HAVE_PIC_SET_PRIORITY 710 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 711 #endif 712 #ifdef MULTIPROCESSOR 713 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL)); 714 #endif 715 pic_list[slot] = pic; 716 } 717 718 int 719 pic_alloc_irq(struct pic_softc *pic) 720 { 721 int irq; 722 723 for (irq = 0; irq < pic->pic_maxsources; irq++) { 724 if (pic->pic_sources[irq] == NULL) 725 return irq; 726 } 727 728 return -1; 729 } 730 731 static void 732 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 733 { 734 struct pic_percpu * const pcpu = v0; 735 struct intrsource * const is = v1; 736 737 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 738 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 739 pcpu->pcpu_name, is->is_source); 740 } 741 742 void * 743 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 744 int (*func)(void *), void *arg) 745 { 746 struct intrsource *is; 747 int off, nipl; 748 749 if (pic->pic_sources[irq]) { 750 printf("pic_establish_intr: pic %s irq %d already present\n", 751 pic->pic_name, irq); 752 return NULL; 753 } 754 755 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 756 is->is_pic = pic; 757 is->is_irq = irq; 758 is->is_ipl = ipl; 759 is->is_type = type & 0xff; 760 is->is_func = func; 761 is->is_arg = arg; 762 #ifdef MULTIPROCESSOR 763 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM; 764 #endif 765 766 if (pic->pic_ops->pic_source_name) 767 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 768 sizeof(is->is_source)); 769 else 770 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 771 772 /* 773 * Now attach the per-cpu evcnts. 774 */ 775 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 776 777 pic->pic_sources[irq] = is; 778 779 /* 780 * First try to use an existing slot which is empty. 781 */ 782 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 783 if (pic__iplsources[off] == NULL) { 784 is->is_iplidx = off - pic_ipl_offset[ipl]; 785 pic__iplsources[off] = is; 786 goto unblock; 787 } 788 } 789 790 /* 791 * Move up all the sources by one. 792 */ 793 if (ipl < NIPL) { 794 off = pic_ipl_offset[ipl+1]; 795 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 796 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 797 } 798 799 /* 800 * Advance the offset of all IPLs higher than this. Include an 801 * extra one as well. Thus the number of sources per ipl is 802 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 803 */ 804 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 805 pic_ipl_offset[nipl]++; 806 807 /* 808 * Insert into the previously made position at the end of this IPL's 809 * sources. 810 */ 811 off = pic_ipl_offset[ipl + 1] - 1; 812 is->is_iplidx = off - pic_ipl_offset[ipl]; 813 pic__iplsources[off] = is; 814 815 (*pic->pic_ops->pic_establish_irq)(pic, is); 816 817 unblock: 818 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 819 __BIT(is->is_irq & 0x1f)); 820 821 /* We're done. */ 822 return is; 823 } 824 825 static void 826 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 827 { 828 struct pic_percpu * const pcpu = v0; 829 struct intrsource * const is = v1; 830 831 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 832 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 833 } 834 835 void 836 pic_disestablish_source(struct intrsource *is) 837 { 838 struct pic_softc * const pic = is->is_pic; 839 const int irq = is->is_irq; 840 841 KASSERT(is == pic->pic_sources[irq]); 842 843 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 844 pic->pic_sources[irq] = NULL; 845 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 846 /* 847 * Now detach the per-cpu evcnts. 848 */ 849 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 850 851 kmem_free(is, sizeof(*is)); 852 } 853 854 void * 855 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 856 { 857 KASSERT(!cpu_intr_p()); 858 KASSERT(!cpu_softintr_p()); 859 860 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 861 struct pic_softc * const pic = pic_list[slot]; 862 if (pic == NULL || pic->pic_irqbase < 0) 863 continue; 864 if (pic->pic_irqbase <= irq 865 && irq < pic->pic_irqbase + pic->pic_maxsources) { 866 return pic_establish_intr(pic, irq - pic->pic_irqbase, 867 ipl, type, func, arg); 868 } 869 } 870 871 return NULL; 872 } 873 874 void 875 intr_disestablish(void *ih) 876 { 877 struct intrsource * const is = ih; 878 879 KASSERT(!cpu_intr_p()); 880 KASSERT(!cpu_softintr_p()); 881 882 pic_disestablish_source(is); 883 } 884