1 /* $NetBSD: pic.c,v 1.34 2015/04/15 15:45:06 matt Exp $ */ 2 /*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define _INTR_PRIVATE 32 #include "opt_ddb.h" 33 #include "opt_multiprocessor.h" 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.34 2015/04/15 15:45:06 matt Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/atomic.h> 40 #include <sys/cpu.h> 41 #include <sys/evcnt.h> 42 #include <sys/intr.h> 43 #include <sys/kernel.h> 44 #include <sys/kmem.h> 45 #include <sys/xcall.h> 46 #include <sys/ipi.h> 47 48 #if defined(__arm__) 49 #include <arm/armreg.h> 50 #include <arm/cpufunc.h> 51 #elif defined(__aarch64__) 52 #include <aarch64/locore.h> 53 #define I32_bit DAIF_I 54 #define F32_bit DAIF_F 55 #endif 56 57 #ifdef DDB 58 #include <arm/db_machdep.h> 59 #endif 60 61 #include <arm/pic/picvar.h> 62 63 #if defined(__HAVE_PIC_PENDING_INTRS) 64 /* 65 * This implementation of pending interrupts on a MULTIPROCESSOR system makes 66 * the assumption that a PIC (pic_softc) shall only have all its interrupts 67 * come from the same CPU. In other words, interrupts from a single PIC will 68 * not be distributed among multiple CPUs. 69 */ 70 struct pic_pending { 71 volatile uint32_t blocked_pics; 72 volatile uint32_t pending_pics; 73 volatile uint32_t pending_ipls; 74 }; 75 static uint32_t 76 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 77 static struct pic_softc * 78 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t); 79 static void 80 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *); 81 static void 82 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *); 83 84 #ifdef MULTIPROCESSOR 85 percpu_t *pic_pending_percpu; 86 #else 87 struct pic_pending pic_pending; 88 #endif /* MULTIPROCESSOR */ 89 #endif /* __HAVE_PIC_PENDING_INTRS */ 90 91 struct pic_softc *pic_list[PIC_MAXPICS]; 92 #if PIC_MAXPICS > 32 93 #error PIC_MAXPICS > 32 not supported 94 #endif 95 struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 96 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 97 struct intrsource **pic_iplsource[NIPL] = { 98 [0 ... NIPL-1] = pic__iplsources, 99 }; 100 size_t pic_ipl_offset[NIPL+1]; 101 size_t pic_sourcebase; 102 static struct evcnt pic_deferral_ev = 103 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 104 EVCNT_ATTACH_STATIC(pic_deferral_ev); 105 106 #ifdef __HAVE_PIC_SET_PRIORITY 107 void 108 pic_set_priority(struct cpu_info *ci, int newipl) 109 { 110 register_t psw = cpsid(I32_bit); 111 if (pic_list[0] != NULL) 112 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 113 ci->ci_cpl = newipl; 114 if ((psw & I32_bit) == 0) 115 cpsie(I32_bit); 116 } 117 #endif 118 119 #ifdef MULTIPROCESSOR 120 int 121 pic_ipi_ast(void *arg) 122 { 123 setsoftast(curcpu()); 124 return 1; 125 } 126 127 int 128 pic_ipi_nop(void *arg) 129 { 130 /* do nothing */ 131 return 1; 132 } 133 134 int 135 pic_ipi_xcall(void *arg) 136 { 137 xc_ipi_handler(); 138 return 1; 139 } 140 141 int 142 pic_ipi_generic(void *arg) 143 { 144 ipi_cpu_handler(); 145 return 1; 146 } 147 148 #ifdef DDB 149 int 150 pic_ipi_ddb(void *arg) 151 { 152 // printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 153 kdb_trap(-1, arg); 154 return 1; 155 } 156 157 #ifdef __HAVE_PREEMPTION 158 int 159 pic_ipi_kpreempt(void *arg) 160 { 161 atomic_or_uint(&curcpu()->ci_astpending, __BIT(1)); 162 return 1; 163 } 164 #endif 165 #endif /* MULTIPROCESSOR */ 166 167 void 168 intr_cpu_init(struct cpu_info *ci) 169 { 170 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 171 struct pic_softc * const pic = pic_list[slot]; 172 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 173 (*pic->pic_ops->pic_cpu_init)(pic, ci); 174 } 175 } 176 } 177 178 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 179 180 void 181 intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 182 { 183 struct cpu_info * const ci = curcpu(); 184 KASSERT(ipi < NIPI); 185 bool __diagused sent_p = false; 186 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 187 struct pic_softc * const pic = pic_list[slot]; 188 if (pic == NULL || pic->pic_cpus == NULL) 189 continue; 190 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) { 191 // never send to ourself 192 if (pic->pic_cpus == ci->ci_kcpuset) 193 continue; 194 195 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 196 // If we were targeting a single CPU or this pic 197 // handles all cpus, we're done. 198 if (kcp != NULL || pic->pic_cpus == kcpuset_running) 199 return; 200 sent_p = true; 201 } 202 } 203 KASSERT(cold || sent_p); 204 } 205 #endif /* MULTIPROCESSOR */ 206 207 #ifdef __HAVE_PIC_FAST_SOFTINTS 208 int 209 pic_handle_softint(void *arg) 210 { 211 void softint_switch(lwp_t *, int); 212 struct cpu_info * const ci = curcpu(); 213 const size_t softint = (size_t) arg; 214 int s = splhigh(); 215 ci->ci_intr_depth--; // don't count these as interrupts 216 softint_switch(ci->ci_softlwps[softint], s); 217 ci->ci_intr_depth++; 218 splx(s); 219 return 1; 220 } 221 #endif 222 223 int 224 pic_handle_intr(void *arg) 225 { 226 struct pic_softc * const pic = arg; 227 int rv; 228 229 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 230 231 return rv > 0; 232 } 233 234 #if defined(__HAVE_PIC_PENDING_INTRS) 235 void 236 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 237 { 238 const uint32_t ipl_mask = __BIT(is->is_ipl); 239 240 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 241 __BIT(is->is_irq & 0x1f)); 242 243 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 244 #ifdef MULTIPROCESSOR 245 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 246 #else 247 struct pic_pending *pend = &pic_pending; 248 #endif 249 atomic_or_32(&pend->pending_ipls, ipl_mask); 250 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 251 #ifdef MULTIPROCESSOR 252 percpu_putref(pic_pending_percpu); 253 #endif 254 } 255 256 void 257 pic_mark_pending(struct pic_softc *pic, int irq) 258 { 259 struct intrsource * const is = pic->pic_sources[irq]; 260 261 KASSERT(irq < pic->pic_maxsources); 262 KASSERT(is != NULL); 263 264 pic_mark_pending_source(pic, is); 265 } 266 267 uint32_t 268 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 269 uint32_t pending) 270 { 271 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 272 struct intrsource *is; 273 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 274 uint32_t ipl_mask = 0; 275 276 if (pending == 0) 277 return ipl_mask; 278 279 KASSERT((irq_base & 31) == 0); 280 281 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 282 283 atomic_or_32(ipending, pending); 284 while (pending != 0) { 285 int n = ffs(pending); 286 if (n-- == 0) 287 break; 288 is = isbase[n]; 289 KASSERT(is != NULL); 290 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 291 pending &= ~__BIT(n); 292 ipl_mask |= __BIT(is->is_ipl); 293 } 294 295 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 296 #ifdef MULTIPROCESSOR 297 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 298 #else 299 struct pic_pending *pend = &pic_pending; 300 #endif 301 atomic_or_32(&pend->pending_ipls, ipl_mask); 302 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 303 #ifdef MULTIPROCESSOR 304 percpu_putref(pic_pending_percpu); 305 #endif 306 return ipl_mask; 307 } 308 309 uint32_t 310 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 311 uint32_t pending, int ipl) 312 { 313 uint32_t ipl_irq_mask = 0; 314 uint32_t irq_mask; 315 316 for (;;) { 317 int irq = ffs(pending); 318 if (irq-- == 0) 319 return ipl_irq_mask; 320 321 irq_mask = __BIT(irq); 322 #if 1 323 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 324 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 325 #else 326 if (pic->pic_sources[irq_base + irq] == NULL) { 327 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 328 irq_base, irq); 329 } else 330 #endif 331 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 332 ipl_irq_mask |= irq_mask; 333 334 pending &= ~irq_mask; 335 } 336 } 337 #endif /* __HAVE_PIC_PENDING_INTRS */ 338 339 void 340 pic_dispatch(struct intrsource *is, void *frame) 341 { 342 int (*func)(void *) = is->is_func; 343 void *arg = is->is_arg; 344 345 if (__predict_false(arg == NULL)) { 346 if (__predict_false(frame == NULL)) { 347 pic_deferral_ev.ev_count++; 348 return; 349 } 350 arg = frame; 351 } 352 353 #ifdef MULTIPROCESSOR 354 if (!is->is_mpsafe) { 355 KERNEL_LOCK(1, NULL); 356 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 357 const u_int l_blcnt __diagused = curlwp->l_blcnt; 358 (void)(*func)(arg); 359 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 360 KASSERT(l_blcnt == curlwp->l_blcnt); 361 KERNEL_UNLOCK_ONE(NULL); 362 } else 363 #endif 364 (void)(*func)(arg); 365 366 367 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 368 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 369 pcpu->pcpu_evs[is->is_irq].ev_count++; 370 percpu_putref(is->is_pic->pic_percpu); 371 } 372 373 #if defined(__HAVE_PIC_PENDING_INTRS) 374 void 375 pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl, 376 void *frame) 377 { 378 const uint32_t ipl_mask = __BIT(ipl); 379 struct intrsource *is; 380 volatile uint32_t *ipending = pic->pic_pending_irqs; 381 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 382 size_t irq_base; 383 #if PIC_MAXSOURCES > 32 384 size_t irq_count; 385 int poi = 0; /* Possibility of interrupting */ 386 #endif 387 uint32_t pending_irqs; 388 uint32_t blocked_irqs; 389 int irq; 390 bool progress __diagused = false; 391 392 KASSERT(pic->pic_pending_ipls & ipl_mask); 393 394 irq_base = 0; 395 #if PIC_MAXSOURCES > 32 396 irq_count = 0; 397 #endif 398 399 for (;;) { 400 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 401 *ipending, ipl); 402 KASSERT((pending_irqs & *ipending) == pending_irqs); 403 KASSERT((pending_irqs & ~(*ipending)) == 0); 404 if (pending_irqs == 0) { 405 #if PIC_MAXSOURCES > 32 406 irq_count += 32; 407 if (__predict_true(irq_count >= pic->pic_maxsources)) { 408 if (!poi) 409 /*Interrupt at this level was handled.*/ 410 break; 411 irq_base = 0; 412 irq_count = 0; 413 poi = 0; 414 ipending = pic->pic_pending_irqs; 415 iblocked = pic->pic_blocked_irqs; 416 } else { 417 irq_base += 32; 418 ipending++; 419 iblocked++; 420 KASSERT(irq_base <= pic->pic_maxsources); 421 } 422 continue; 423 #else 424 break; 425 #endif 426 } 427 progress = true; 428 blocked_irqs = 0; 429 do { 430 irq = ffs(pending_irqs) - 1; 431 KASSERT(irq >= 0); 432 433 atomic_and_32(ipending, ~__BIT(irq)); 434 is = pic->pic_sources[irq_base + irq]; 435 if (is != NULL) { 436 cpsie(I32_bit); 437 pic_dispatch(is, frame); 438 cpsid(I32_bit); 439 #if PIC_MAXSOURCES > 32 440 /* 441 * There is a possibility of interrupting 442 * from cpsie() to cpsid(). 443 */ 444 poi = 1; 445 #endif 446 blocked_irqs |= __BIT(irq); 447 } else { 448 KASSERT(0); 449 } 450 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 451 irq_base, *ipending, ipl); 452 } while (pending_irqs); 453 if (blocked_irqs) { 454 atomic_or_32(iblocked, blocked_irqs); 455 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id)); 456 } 457 } 458 459 KASSERT(progress); 460 /* 461 * Since interrupts are disabled, we don't have to be too careful 462 * about these. 463 */ 464 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 465 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id)); 466 } 467 468 static void 469 pic_list_unblock_irqs(struct pic_pending *pend) 470 { 471 uint32_t blocked_pics = pend->blocked_pics; 472 473 pend->blocked_pics = 0; 474 475 for (;;) { 476 struct pic_softc *pic; 477 #if PIC_MAXSOURCES > 32 478 volatile uint32_t *iblocked; 479 uint32_t blocked; 480 size_t irq_base; 481 #endif 482 483 int pic_id = ffs(blocked_pics); 484 if (pic_id-- == 0) 485 return; 486 487 pic = pic_list[pic_id]; 488 KASSERT(pic != NULL); 489 #if PIC_MAXSOURCES > 32 490 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 491 irq_base < pic->pic_maxsources; 492 irq_base += 32, iblocked++) { 493 if ((blocked = *iblocked) != 0) { 494 (*pic->pic_ops->pic_unblock_irqs)(pic, 495 irq_base, blocked); 496 atomic_and_32(iblocked, ~blocked); 497 } 498 } 499 #else 500 KASSERT(pic->pic_blocked_irqs[0] != 0); 501 (*pic->pic_ops->pic_unblock_irqs)(pic, 502 0, pic->pic_blocked_irqs[0]); 503 pic->pic_blocked_irqs[0] = 0; 504 #endif 505 blocked_pics &= ~__BIT(pic_id); 506 } 507 } 508 509 510 struct pic_softc * 511 pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask) 512 { 513 uint32_t pending_pics = pend->pending_pics; 514 struct pic_softc *pic; 515 516 for (;;) { 517 int pic_id = ffs(pending_pics); 518 if (pic_id-- == 0) 519 return NULL; 520 521 pic = pic_list[pic_id]; 522 KASSERT(pic != NULL); 523 if (pic->pic_pending_ipls & ipl_mask) 524 return pic; 525 pending_pics &= ~__BIT(pic_id); 526 } 527 } 528 529 void 530 pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl, 531 void *frame) 532 { 533 const uint32_t ipl_mask = __BIT(ipl); 534 struct pic_softc *pic; 535 536 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) { 537 pic_deliver_irqs(pend, pic, ipl, frame); 538 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 539 } 540 atomic_and_32(&pend->pending_ipls, ~ipl_mask); 541 } 542 #endif /* __HAVE_PIC_PENDING_INTRS */ 543 544 void 545 pic_do_pending_ints(register_t psw, int newipl, void *frame) 546 { 547 struct cpu_info * const ci = curcpu(); 548 if (__predict_false(newipl == IPL_HIGH)) { 549 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 550 return; 551 } 552 #if defined(__HAVE_PIC_PENDING_INTRS) 553 #ifdef MULTIPROCESSOR 554 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 555 #else 556 struct pic_pending *pend = &pic_pending; 557 #endif 558 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 559 KASSERT(pend->pending_ipls < __BIT(NIPL)); 560 for (;;) { 561 int ipl = 31 - __builtin_clz(pend->pending_ipls); 562 KASSERT(ipl < NIPL); 563 if (ipl <= newipl) 564 break; 565 566 pic_set_priority(ci, ipl); 567 pic_list_deliver_irqs(pend, psw, ipl, frame); 568 pic_list_unblock_irqs(pend); 569 } 570 } 571 #ifdef MULTIPROCESSOR 572 percpu_putref(pic_pending_percpu); 573 #endif 574 #endif /* __HAVE_PIC_PENDING_INTRS */ 575 #ifdef __HAVE_PREEMPTION 576 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) { 577 pic_set_priority(ci, IPL_SCHED); 578 kpreempt(0); 579 } 580 #endif 581 if (ci->ci_cpl != newipl) 582 pic_set_priority(ci, newipl); 583 } 584 585 static void 586 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 587 { 588 struct pic_percpu * const pcpu = v0; 589 struct pic_softc * const pic = v1; 590 591 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 592 KM_SLEEP); 593 KASSERT(pcpu->pcpu_evs != NULL); 594 595 #define PCPU_NAMELEN 32 596 #ifdef DIAGNOSTIC 597 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 598 #endif 599 600 KASSERT(namelen < PCPU_NAMELEN); 601 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 602 #ifdef MULTIPROCESSOR 603 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 604 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 605 #else 606 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 607 #endif 608 pcpu->pcpu_magic = PICPERCPU_MAGIC; 609 #if 0 610 printf("%s: %s %s: <%s>\n", 611 __func__, ci->ci_data.cpu_name, pic->pic_name, 612 pcpu->pcpu_name); 613 #endif 614 } 615 616 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 617 static void 618 pic_pending_zero(void *v0, void *v1, struct cpu_info *ci) 619 { 620 struct pic_pending * const p = v0; 621 memset(p, 0, sizeof(*p)); 622 } 623 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 624 625 void 626 pic_add(struct pic_softc *pic, int irqbase) 627 { 628 int slot, maybe_slot = -1; 629 630 KASSERT(strlen(pic->pic_name) > 0); 631 632 #if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 633 if (__predict_false(pic_pending_percpu == NULL)) { 634 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending)); 635 KASSERT(pic_pending_percpu != NULL); 636 637 /* 638 * Now zero the per-cpu pending data. 639 */ 640 percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL); 641 } 642 #endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 643 644 for (slot = 0; slot < PIC_MAXPICS; slot++) { 645 struct pic_softc * const xpic = pic_list[slot]; 646 if (xpic == NULL) { 647 if (maybe_slot < 0) 648 maybe_slot = slot; 649 if (irqbase < 0) 650 break; 651 continue; 652 } 653 if (irqbase < 0 || xpic->pic_irqbase < 0) 654 continue; 655 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 656 continue; 657 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 658 continue; 659 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 660 " with pic %s (%zu sources @ irq %u)", 661 pic->pic_name, pic->pic_maxsources, irqbase, 662 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 663 } 664 slot = maybe_slot; 665 #if 0 666 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 667 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 668 #endif 669 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 670 pic->pic_maxsources); 671 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 672 673 /* 674 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 675 * allocate its evcnts and then attach an evcnt for each pin. 676 * We can't allocate the evcnt structures directly since 677 * percpu will move the contents of percpu memory around and 678 * corrupt the pointers in the evcnts themselves. Remember, any 679 * problem can be solved with sufficient indirection. 680 */ 681 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); 682 KASSERT(pic->pic_percpu != NULL); 683 684 /* 685 * Now allocate the per-cpu evcnts. 686 */ 687 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); 688 689 pic->pic_sources = &pic_sources[pic_sourcebase]; 690 pic->pic_irqbase = irqbase; 691 pic_sourcebase += pic->pic_maxsources; 692 pic->pic_id = slot; 693 #ifdef __HAVE_PIC_SET_PRIORITY 694 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 695 #endif 696 #ifdef MULTIPROCESSOR 697 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL)); 698 #endif 699 pic_list[slot] = pic; 700 } 701 702 int 703 pic_alloc_irq(struct pic_softc *pic) 704 { 705 int irq; 706 707 for (irq = 0; irq < pic->pic_maxsources; irq++) { 708 if (pic->pic_sources[irq] == NULL) 709 return irq; 710 } 711 712 return -1; 713 } 714 715 static void 716 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 717 { 718 struct pic_percpu * const pcpu = v0; 719 struct intrsource * const is = v1; 720 721 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 722 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 723 pcpu->pcpu_name, is->is_source); 724 } 725 726 void * 727 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 728 int (*func)(void *), void *arg) 729 { 730 struct intrsource *is; 731 int off, nipl; 732 733 if (pic->pic_sources[irq]) { 734 printf("pic_establish_intr: pic %s irq %d already present\n", 735 pic->pic_name, irq); 736 return NULL; 737 } 738 739 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 740 if (is == NULL) 741 return NULL; 742 743 is->is_pic = pic; 744 is->is_irq = irq; 745 is->is_ipl = ipl; 746 is->is_type = type & 0xff; 747 is->is_func = func; 748 is->is_arg = arg; 749 #ifdef MULTIPROCESSOR 750 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM; 751 #endif 752 753 if (pic->pic_ops->pic_source_name) 754 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 755 sizeof(is->is_source)); 756 else 757 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 758 759 /* 760 * Now attach the per-cpu evcnts. 761 */ 762 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 763 764 pic->pic_sources[irq] = is; 765 766 /* 767 * First try to use an existing slot which is empty. 768 */ 769 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 770 if (pic__iplsources[off] == NULL) { 771 is->is_iplidx = off - pic_ipl_offset[ipl]; 772 pic__iplsources[off] = is; 773 return is; 774 } 775 } 776 777 /* 778 * Move up all the sources by one. 779 */ 780 if (ipl < NIPL) { 781 off = pic_ipl_offset[ipl+1]; 782 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 783 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 784 } 785 786 /* 787 * Advance the offset of all IPLs higher than this. Include an 788 * extra one as well. Thus the number of sources per ipl is 789 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 790 */ 791 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 792 pic_ipl_offset[nipl]++; 793 794 /* 795 * Insert into the previously made position at the end of this IPL's 796 * sources. 797 */ 798 off = pic_ipl_offset[ipl + 1] - 1; 799 is->is_iplidx = off - pic_ipl_offset[ipl]; 800 pic__iplsources[off] = is; 801 802 (*pic->pic_ops->pic_establish_irq)(pic, is); 803 804 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 805 __BIT(is->is_irq & 0x1f)); 806 807 /* We're done. */ 808 return is; 809 } 810 811 static void 812 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 813 { 814 struct pic_percpu * const pcpu = v0; 815 struct intrsource * const is = v1; 816 817 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 818 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 819 } 820 821 void 822 pic_disestablish_source(struct intrsource *is) 823 { 824 struct pic_softc * const pic = is->is_pic; 825 const int irq = is->is_irq; 826 827 KASSERT(is == pic->pic_sources[irq]); 828 829 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 830 pic->pic_sources[irq] = NULL; 831 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 832 /* 833 * Now detach the per-cpu evcnts. 834 */ 835 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 836 837 kmem_free(is, sizeof(*is)); 838 } 839 840 void * 841 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 842 { 843 KASSERT(!cpu_intr_p()); 844 KASSERT(!cpu_softintr_p()); 845 846 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 847 struct pic_softc * const pic = pic_list[slot]; 848 if (pic == NULL || pic->pic_irqbase < 0) 849 continue; 850 if (pic->pic_irqbase <= irq 851 && irq < pic->pic_irqbase + pic->pic_maxsources) { 852 return pic_establish_intr(pic, irq - pic->pic_irqbase, 853 ipl, type, func, arg); 854 } 855 } 856 857 return NULL; 858 } 859 860 void 861 intr_disestablish(void *ih) 862 { 863 struct intrsource * const is = ih; 864 865 KASSERT(!cpu_intr_p()); 866 KASSERT(!cpu_softintr_p()); 867 868 pic_disestablish_source(is); 869 } 870