1 /* $NetBSD: pic.c,v 1.21 2014/03/13 23:47:53 matt Exp $ */ 2 /*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define _INTR_PRIVATE 32 #include "opt_ddb.h" 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.21 2014/03/13 23:47:53 matt Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/atomic.h> 39 #include <sys/cpu.h> 40 #include <sys/evcnt.h> 41 #include <sys/intr.h> 42 #include <sys/kernel.h> 43 #include <sys/kmem.h> 44 #include <sys/xcall.h> 45 46 #include <arm/armreg.h> 47 #include <arm/cpufunc.h> 48 49 #ifdef DDB 50 #include <arm/db_machdep.h> 51 #endif 52 53 #include <arm/pic/picvar.h> 54 55 static uint32_t 56 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 57 static struct pic_softc * 58 pic_list_find_pic_by_pending_ipl(uint32_t); 59 static void 60 pic_deliver_irqs(struct pic_softc *, int, void *); 61 static void 62 pic_list_deliver_irqs(register_t, int, void *); 63 64 struct pic_softc *pic_list[PIC_MAXPICS]; 65 #if PIC_MAXPICS > 32 66 #error PIC_MAXPICS > 32 not supported 67 #endif 68 volatile uint32_t pic_blocked_pics; 69 volatile uint32_t pic_pending_pics; 70 volatile uint32_t pic_pending_ipls; 71 struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 72 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 73 struct intrsource **pic_iplsource[NIPL] = { 74 [0 ... NIPL-1] = pic__iplsources, 75 }; 76 size_t pic_ipl_offset[NIPL+1]; 77 size_t pic_sourcebase; 78 static struct evcnt pic_deferral_ev = 79 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 80 EVCNT_ATTACH_STATIC(pic_deferral_ev); 81 82 #ifdef __HAVE_PIC_SET_PRIORITY 83 void 84 pic_set_priority(struct cpu_info *ci, int newipl) 85 { 86 register_t psw = cpsid(I32_bit); 87 if (pic_list[0] != NULL) 88 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 89 ci->ci_cpl = newipl; 90 if ((psw & I32_bit) == 0) 91 cpsie(I32_bit); 92 } 93 #endif 94 95 #ifdef MULTIPROCESSOR 96 int 97 pic_ipi_nop(void *arg) 98 { 99 /* do nothing */ 100 return 1; 101 } 102 103 int 104 pic_ipi_xcall(void *arg) 105 { 106 xc_ipi_handler(); 107 return 1; 108 } 109 110 #ifdef DDB 111 int 112 pic_ipi_ddb(void *arg) 113 { 114 printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 115 kdb_trap(-1, arg); 116 return 1; 117 } 118 #endif 119 120 void 121 intr_cpu_init(struct cpu_info *ci) 122 { 123 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 124 struct pic_softc * const pic = pic_list[slot]; 125 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 126 (*pic->pic_ops->pic_cpu_init)(pic, ci); 127 } 128 } 129 } 130 131 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 132 133 static struct pic_softc * 134 pic_ipi_sender(void) 135 { 136 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 137 struct pic_softc * const pic = pic_list[slot]; 138 if (pic != NULL && pic->pic_ops->pic_ipi_send != NULL) { 139 return pic; 140 } 141 } 142 return NULL; 143 } 144 145 void 146 intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 147 { 148 struct pic_softc * const pic = pic_ipi_sender(); 149 KASSERT(ipi < NIPI); 150 if (cold && pic == NULL) 151 return; 152 KASSERT(pic != NULL); 153 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 154 } 155 #endif /* MULTIPROCESSOR */ 156 157 #ifdef __HAVE_PIC_FAST_SOFTINTS 158 int 159 pic_handle_softint(void *arg) 160 { 161 void softint_switch(lwp_t *, int); 162 struct cpu_info * const ci = curcpu(); 163 const size_t softint = (size_t) arg; 164 int s = splhigh(); 165 ci->ci_intr_depth--; // don't count these as interrupts 166 softint_switch(ci->ci_softlwps[softint], s); 167 ci->ci_intr_depth++; 168 splx(s); 169 return 1; 170 } 171 #endif 172 173 int 174 pic_handle_intr(void *arg) 175 { 176 struct pic_softc * const pic = arg; 177 int rv; 178 179 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 180 181 return rv > 0; 182 } 183 184 void 185 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 186 { 187 const uint32_t ipl_mask = __BIT(is->is_ipl); 188 189 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 190 __BIT(is->is_irq & 0x1f)); 191 192 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 193 atomic_or_32(&pic_pending_ipls, ipl_mask); 194 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 195 } 196 197 void 198 pic_mark_pending(struct pic_softc *pic, int irq) 199 { 200 struct intrsource * const is = pic->pic_sources[irq]; 201 202 KASSERT(irq < pic->pic_maxsources); 203 KASSERT(is != NULL); 204 205 pic_mark_pending_source(pic, is); 206 } 207 208 uint32_t 209 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 210 uint32_t pending) 211 { 212 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 213 struct intrsource *is; 214 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 215 uint32_t ipl_mask = 0; 216 217 if (pending == 0) 218 return ipl_mask; 219 220 KASSERT((irq_base & 31) == 0); 221 222 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 223 224 atomic_or_32(ipending, pending); 225 while (pending != 0) { 226 int n = ffs(pending); 227 if (n-- == 0) 228 break; 229 is = isbase[n]; 230 KASSERT(is != NULL); 231 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 232 pending &= ~__BIT(n); 233 ipl_mask |= __BIT(is->is_ipl); 234 } 235 236 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 237 atomic_or_32(&pic_pending_ipls, ipl_mask); 238 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 239 240 return ipl_mask; 241 } 242 243 uint32_t 244 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 245 uint32_t pending, int ipl) 246 { 247 uint32_t ipl_irq_mask = 0; 248 uint32_t irq_mask; 249 250 for (;;) { 251 int irq = ffs(pending); 252 if (irq-- == 0) 253 return ipl_irq_mask; 254 255 irq_mask = __BIT(irq); 256 #if 1 257 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 258 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 259 #else 260 if (pic->pic_sources[irq_base + irq] == NULL) { 261 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 262 irq_base, irq); 263 } else 264 #endif 265 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 266 ipl_irq_mask |= irq_mask; 267 268 pending &= ~irq_mask; 269 } 270 } 271 272 void 273 pic_dispatch(struct intrsource *is, void *frame) 274 { 275 int (*func)(void *) = is->is_func; 276 void *arg = is->is_arg; 277 278 if (__predict_false(arg == NULL)) { 279 if (__predict_false(frame == NULL)) { 280 pic_deferral_ev.ev_count++; 281 return; 282 } 283 arg = frame; 284 } 285 286 #ifdef MULTIPROCESSOR 287 if (!is->is_mpsafe) { 288 KERNEL_LOCK(1, NULL); 289 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 290 const u_int l_blcnt __diagused = curlwp->l_blcnt; 291 (void)(*func)(arg); 292 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 293 KASSERT(l_blcnt == curlwp->l_blcnt); 294 KERNEL_UNLOCK_ONE(NULL); 295 } else 296 #endif 297 (void)(*func)(arg); 298 299 300 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 301 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 302 pcpu->pcpu_evs[is->is_irq].ev_count++; 303 percpu_putref(is->is_pic->pic_percpu); 304 } 305 306 void 307 pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame) 308 { 309 const uint32_t ipl_mask = __BIT(ipl); 310 struct intrsource *is; 311 volatile uint32_t *ipending = pic->pic_pending_irqs; 312 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 313 size_t irq_base; 314 #if PIC_MAXSOURCES > 32 315 size_t irq_count; 316 int poi = 0; /* Possibility of interrupting */ 317 #endif 318 uint32_t pending_irqs; 319 uint32_t blocked_irqs; 320 int irq; 321 bool progress __diagused = false; 322 323 KASSERT(pic->pic_pending_ipls & ipl_mask); 324 325 irq_base = 0; 326 #if PIC_MAXSOURCES > 32 327 irq_count = 0; 328 #endif 329 330 for (;;) { 331 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 332 *ipending, ipl); 333 KASSERT((pending_irqs & *ipending) == pending_irqs); 334 KASSERT((pending_irqs & ~(*ipending)) == 0); 335 if (pending_irqs == 0) { 336 #if PIC_MAXSOURCES > 32 337 irq_count += 32; 338 if (__predict_true(irq_count >= pic->pic_maxsources)) { 339 if (!poi) 340 /*Interrupt at this level was handled.*/ 341 break; 342 irq_base = 0; 343 irq_count = 0; 344 poi = 0; 345 ipending = pic->pic_pending_irqs; 346 iblocked = pic->pic_blocked_irqs; 347 } else { 348 irq_base += 32; 349 ipending++; 350 iblocked++; 351 KASSERT(irq_base <= pic->pic_maxsources); 352 } 353 continue; 354 #else 355 break; 356 #endif 357 } 358 progress = true; 359 blocked_irqs = 0; 360 do { 361 irq = ffs(pending_irqs) - 1; 362 KASSERT(irq >= 0); 363 364 atomic_and_32(ipending, ~__BIT(irq)); 365 is = pic->pic_sources[irq_base + irq]; 366 if (is != NULL) { 367 cpsie(I32_bit); 368 pic_dispatch(is, frame); 369 cpsid(I32_bit); 370 #if PIC_MAXSOURCES > 32 371 /* 372 * There is a possibility of interrupting 373 * from cpsie() to cpsid(). 374 */ 375 poi = 1; 376 #endif 377 blocked_irqs |= __BIT(irq); 378 } else { 379 KASSERT(0); 380 } 381 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 382 irq_base, *ipending, ipl); 383 } while (pending_irqs); 384 if (blocked_irqs) { 385 atomic_or_32(iblocked, blocked_irqs); 386 atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id)); 387 } 388 } 389 390 KASSERT(progress); 391 /* 392 * Since interrupts are disabled, we don't have to be too careful 393 * about these. 394 */ 395 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 396 atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id)); 397 } 398 399 static void 400 pic_list_unblock_irqs(void) 401 { 402 uint32_t blocked_pics = pic_blocked_pics; 403 404 pic_blocked_pics = 0; 405 for (;;) { 406 struct pic_softc *pic; 407 #if PIC_MAXSOURCES > 32 408 volatile uint32_t *iblocked; 409 uint32_t blocked; 410 size_t irq_base; 411 #endif 412 413 int pic_id = ffs(blocked_pics); 414 if (pic_id-- == 0) 415 return; 416 417 pic = pic_list[pic_id]; 418 KASSERT(pic != NULL); 419 #if PIC_MAXSOURCES > 32 420 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 421 irq_base < pic->pic_maxsources; 422 irq_base += 32, iblocked++) { 423 if ((blocked = *iblocked) != 0) { 424 (*pic->pic_ops->pic_unblock_irqs)(pic, 425 irq_base, blocked); 426 atomic_and_32(iblocked, ~blocked); 427 } 428 } 429 #else 430 KASSERT(pic->pic_blocked_irqs[0] != 0); 431 (*pic->pic_ops->pic_unblock_irqs)(pic, 432 0, pic->pic_blocked_irqs[0]); 433 pic->pic_blocked_irqs[0] = 0; 434 #endif 435 blocked_pics &= ~__BIT(pic_id); 436 } 437 } 438 439 440 struct pic_softc * 441 pic_list_find_pic_by_pending_ipl(uint32_t ipl_mask) 442 { 443 uint32_t pending_pics = pic_pending_pics; 444 struct pic_softc *pic; 445 446 for (;;) { 447 int pic_id = ffs(pending_pics); 448 if (pic_id-- == 0) 449 return NULL; 450 451 pic = pic_list[pic_id]; 452 KASSERT(pic != NULL); 453 if (pic->pic_pending_ipls & ipl_mask) 454 return pic; 455 pending_pics &= ~__BIT(pic_id); 456 } 457 } 458 459 void 460 pic_list_deliver_irqs(register_t psw, int ipl, void *frame) 461 { 462 const uint32_t ipl_mask = __BIT(ipl); 463 struct pic_softc *pic; 464 465 while ((pic = pic_list_find_pic_by_pending_ipl(ipl_mask)) != NULL) { 466 pic_deliver_irqs(pic, ipl, frame); 467 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 468 } 469 atomic_and_32(&pic_pending_ipls, ~ipl_mask); 470 } 471 472 void 473 pic_do_pending_ints(register_t psw, int newipl, void *frame) 474 { 475 struct cpu_info * const ci = curcpu(); 476 if (__predict_false(newipl == IPL_HIGH)) { 477 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 478 return; 479 } 480 while ((pic_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 481 KASSERT(pic_pending_ipls < __BIT(NIPL)); 482 for (;;) { 483 int ipl = 31 - __builtin_clz(pic_pending_ipls); 484 KASSERT(ipl < NIPL); 485 if (ipl <= newipl) 486 break; 487 488 pic_set_priority(ci, ipl); 489 pic_list_deliver_irqs(psw, ipl, frame); 490 pic_list_unblock_irqs(); 491 } 492 } 493 if (ci->ci_cpl != newipl) 494 pic_set_priority(ci, newipl); 495 } 496 497 static void 498 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 499 { 500 struct pic_percpu * const pcpu = v0; 501 struct pic_softc * const pic = v1; 502 503 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 504 KM_SLEEP); 505 KASSERT(pcpu->pcpu_evs != NULL); 506 507 #define PCPU_NAMELEN 32 508 #ifdef DIAGNOSTIC 509 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 510 #endif 511 512 KASSERT(namelen < PCPU_NAMELEN); 513 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 514 #ifdef MULTIPROCESSOR 515 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 516 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 517 #else 518 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 519 #endif 520 pcpu->pcpu_magic = PICPERCPU_MAGIC; 521 #if 0 522 printf("%s: %s %s: <%s>\n", 523 __func__, ci->ci_data.cpu_name, pic->pic_name, 524 pcpu->pcpu_name); 525 #endif 526 } 527 528 void 529 pic_add(struct pic_softc *pic, int irqbase) 530 { 531 int slot, maybe_slot = -1; 532 533 KASSERT(strlen(pic->pic_name) > 0); 534 535 for (slot = 0; slot < PIC_MAXPICS; slot++) { 536 struct pic_softc * const xpic = pic_list[slot]; 537 if (xpic == NULL) { 538 if (maybe_slot < 0) 539 maybe_slot = slot; 540 if (irqbase < 0) 541 break; 542 continue; 543 } 544 if (irqbase < 0 || xpic->pic_irqbase < 0) 545 continue; 546 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 547 continue; 548 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 549 continue; 550 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 551 " with pic %s (%zu sources @ irq %u)", 552 pic->pic_name, pic->pic_maxsources, irqbase, 553 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 554 } 555 slot = maybe_slot; 556 #if 0 557 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 558 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 559 #endif 560 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 561 pic->pic_maxsources); 562 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 563 564 /* 565 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 566 * allocate its evcnts and then attach an evcnt for each pin. 567 * We can't allocate the evcnt structures directly since 568 * percpu will move the contents of percpu memory around and 569 * corrupt the pointers in the evcnts themselves. Remember, any 570 * problem can be solved with sufficient indirection. 571 */ 572 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); 573 KASSERT(pic->pic_percpu != NULL); 574 575 /* 576 * Now allocate the per-cpu evcnts. 577 */ 578 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); 579 580 pic->pic_sources = &pic_sources[pic_sourcebase]; 581 pic->pic_irqbase = irqbase; 582 pic_sourcebase += pic->pic_maxsources; 583 pic->pic_id = slot; 584 #ifdef __HAVE_PIC_SET_PRIORITY 585 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 586 #endif 587 #ifdef MULTIPROCESSOR 588 KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL)); 589 #endif 590 pic_list[slot] = pic; 591 } 592 593 int 594 pic_alloc_irq(struct pic_softc *pic) 595 { 596 int irq; 597 598 for (irq = 0; irq < pic->pic_maxsources; irq++) { 599 if (pic->pic_sources[irq] == NULL) 600 return irq; 601 } 602 603 return -1; 604 } 605 606 static void 607 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 608 { 609 struct pic_percpu * const pcpu = v0; 610 struct intrsource * const is = v1; 611 612 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 613 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 614 pcpu->pcpu_name, is->is_source); 615 } 616 617 void * 618 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 619 int (*func)(void *), void *arg) 620 { 621 struct intrsource *is; 622 int off, nipl; 623 624 if (pic->pic_sources[irq]) { 625 printf("pic_establish_intr: pic %s irq %d already present\n", 626 pic->pic_name, irq); 627 return NULL; 628 } 629 630 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 631 if (is == NULL) 632 return NULL; 633 634 is->is_pic = pic; 635 is->is_irq = irq; 636 is->is_ipl = ipl; 637 is->is_type = type & 0xff; 638 is->is_func = func; 639 is->is_arg = arg; 640 #ifdef MULTIPROCESSOR 641 is->is_mpsafe = (type & IST_MPSAFE); 642 #endif 643 644 if (pic->pic_ops->pic_source_name) 645 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 646 sizeof(is->is_source)); 647 else 648 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 649 650 /* 651 * Now attach the per-cpu evcnts. 652 */ 653 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 654 655 pic->pic_sources[irq] = is; 656 657 /* 658 * First try to use an existing slot which is empty. 659 */ 660 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 661 if (pic__iplsources[off] == NULL) { 662 is->is_iplidx = off - pic_ipl_offset[ipl]; 663 pic__iplsources[off] = is; 664 return is; 665 } 666 } 667 668 /* 669 * Move up all the sources by one. 670 */ 671 if (ipl < NIPL) { 672 off = pic_ipl_offset[ipl+1]; 673 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 674 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 675 } 676 677 /* 678 * Advance the offset of all IPLs higher than this. Include an 679 * extra one as well. Thus the number of sources per ipl is 680 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 681 */ 682 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 683 pic_ipl_offset[nipl]++; 684 685 /* 686 * Insert into the previously made position at the end of this IPL's 687 * sources. 688 */ 689 off = pic_ipl_offset[ipl + 1] - 1; 690 is->is_iplidx = off - pic_ipl_offset[ipl]; 691 pic__iplsources[off] = is; 692 693 (*pic->pic_ops->pic_establish_irq)(pic, is); 694 695 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 696 __BIT(is->is_irq & 0x1f)); 697 698 /* We're done. */ 699 return is; 700 } 701 702 static void 703 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 704 { 705 struct pic_percpu * const pcpu = v0; 706 struct intrsource * const is = v1; 707 708 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 709 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 710 } 711 712 void 713 pic_disestablish_source(struct intrsource *is) 714 { 715 struct pic_softc * const pic = is->is_pic; 716 const int irq = is->is_irq; 717 718 KASSERT(is == pic->pic_sources[irq]); 719 720 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 721 pic->pic_sources[irq] = NULL; 722 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 723 /* 724 * Now detach the per-cpu evcnts. 725 */ 726 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 727 728 kmem_free(is, sizeof(*is)); 729 } 730 731 void * 732 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 733 { 734 KASSERT(!cpu_intr_p()); 735 KASSERT(!cpu_softintr_p()); 736 737 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 738 struct pic_softc * const pic = pic_list[slot]; 739 if (pic == NULL || pic->pic_irqbase < 0) 740 continue; 741 if (pic->pic_irqbase <= irq 742 && irq < pic->pic_irqbase + pic->pic_maxsources) { 743 return pic_establish_intr(pic, irq - pic->pic_irqbase, 744 ipl, type, func, arg); 745 } 746 } 747 748 return NULL; 749 } 750 751 void 752 intr_disestablish(void *ih) 753 { 754 struct intrsource * const is = ih; 755 756 KASSERT(!cpu_intr_p()); 757 KASSERT(!cpu_softintr_p()); 758 759 pic_disestablish_source(is); 760 } 761