1 /* $NetBSD: pic.c,v 1.18 2013/12/08 14:37:01 skrll Exp $ */ 2 /*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.18 2013/12/08 14:37:01 skrll Exp $"); 32 33 #define _INTR_PRIVATE 34 #include <sys/param.h> 35 #include <sys/atomic.h> 36 #include <sys/cpu.h> 37 #include <sys/evcnt.h> 38 #include <sys/intr.h> 39 #include <sys/kernel.h> 40 #include <sys/kmem.h> 41 #include <sys/xcall.h> 42 43 #include <arm/armreg.h> 44 #include <arm/cpufunc.h> 45 46 #include <arm/pic/picvar.h> 47 48 static uint32_t 49 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 50 static struct pic_softc * 51 pic_list_find_pic_by_pending_ipl(uint32_t); 52 static void 53 pic_deliver_irqs(struct pic_softc *, int, void *); 54 static void 55 pic_list_deliver_irqs(register_t, int, void *); 56 57 struct pic_softc *pic_list[PIC_MAXPICS]; 58 #if PIC_MAXPICS > 32 59 #error PIC_MAXPICS > 32 not supported 60 #endif 61 volatile uint32_t pic_blocked_pics; 62 volatile uint32_t pic_pending_pics; 63 volatile uint32_t pic_pending_ipls; 64 struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 65 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 66 struct intrsource **pic_iplsource[NIPL] = { 67 [0 ... NIPL-1] = pic__iplsources, 68 }; 69 size_t pic_ipl_offset[NIPL+1]; 70 size_t pic_sourcebase; 71 static struct evcnt pic_deferral_ev = 72 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 73 EVCNT_ATTACH_STATIC(pic_deferral_ev); 74 75 #ifdef __HAVE_PIC_SET_PRIORITY 76 void 77 pic_set_priority(struct cpu_info *ci, int newipl) 78 { 79 register_t psw = cpsid(I32_bit); 80 if (pic_list[0] != NULL) 81 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 82 ci->ci_cpl = newipl; 83 if ((psw & I32_bit) == 0) 84 cpsie(I32_bit); 85 } 86 #endif 87 88 #ifdef MULTIPROCESSOR 89 int 90 pic_ipi_nop(void *arg) 91 { 92 /* do nothing */ 93 return 1; 94 } 95 96 int 97 pic_ipi_xcall(void *arg) 98 { 99 xc_ipi_handler(); 100 return 1; 101 } 102 103 void 104 intr_cpu_init(struct cpu_info *ci) 105 { 106 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 107 struct pic_softc * const pic = pic_list[slot]; 108 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 109 (*pic->pic_ops->pic_cpu_init)(pic, ci); 110 } 111 } 112 } 113 114 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 115 116 static struct pic_softc * 117 pic_ipi_sender(void) 118 { 119 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 120 struct pic_softc * const pic = pic_list[slot]; 121 if (pic != NULL && pic->pic_ops->pic_ipi_send != NULL) { 122 return pic; 123 } 124 } 125 return NULL; 126 } 127 128 void 129 intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 130 { 131 struct pic_softc * const pic = pic_ipi_sender(); 132 KASSERT(ipi < NIPI); 133 if (cold && pic == NULL) 134 return; 135 KASSERT(pic != NULL); 136 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 137 } 138 #endif /* MULTIPROCESSOR */ 139 140 #ifdef __HAVE_PIC_FAST_SOFTINTS 141 int 142 pic_handle_softint(void *arg) 143 { 144 void softint_switch(lwp_t *, int); 145 struct cpu_info * const ci = curcpu(); 146 const size_t softint = (size_t) arg; 147 int s = splhigh(); 148 ci->ci_intr_depth--; // don't count these as interrupts 149 softint_switch(ci->ci_softlwps[softint], s); 150 ci->ci_intr_depth++; 151 splx(s); 152 return 1; 153 } 154 #endif 155 156 int 157 pic_handle_intr(void *arg) 158 { 159 struct pic_softc * const pic = arg; 160 int rv; 161 162 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 163 164 return rv > 0; 165 } 166 167 void 168 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 169 { 170 const uint32_t ipl_mask = __BIT(is->is_ipl); 171 172 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 173 __BIT(is->is_irq & 0x1f)); 174 175 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 176 atomic_or_32(&pic_pending_ipls, ipl_mask); 177 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 178 } 179 180 void 181 pic_mark_pending(struct pic_softc *pic, int irq) 182 { 183 struct intrsource * const is = pic->pic_sources[irq]; 184 185 KASSERT(irq < pic->pic_maxsources); 186 KASSERT(is != NULL); 187 188 pic_mark_pending_source(pic, is); 189 } 190 191 uint32_t 192 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 193 uint32_t pending) 194 { 195 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 196 struct intrsource *is; 197 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 198 uint32_t ipl_mask = 0; 199 200 if (pending == 0) 201 return ipl_mask; 202 203 KASSERT((irq_base & 31) == 0); 204 205 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 206 207 atomic_or_32(ipending, pending); 208 while (pending != 0) { 209 int n = ffs(pending); 210 if (n-- == 0) 211 break; 212 is = isbase[n]; 213 KASSERT(is != NULL); 214 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 215 pending &= ~__BIT(n); 216 ipl_mask |= __BIT(is->is_ipl); 217 } 218 219 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 220 atomic_or_32(&pic_pending_ipls, ipl_mask); 221 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 222 223 return ipl_mask; 224 } 225 226 uint32_t 227 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 228 uint32_t pending, int ipl) 229 { 230 uint32_t ipl_irq_mask = 0; 231 uint32_t irq_mask; 232 233 for (;;) { 234 int irq = ffs(pending); 235 if (irq-- == 0) 236 return ipl_irq_mask; 237 238 irq_mask = __BIT(irq); 239 #if 1 240 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 241 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 242 #else 243 if (pic->pic_sources[irq_base + irq] == NULL) { 244 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 245 irq_base, irq); 246 } else 247 #endif 248 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 249 ipl_irq_mask |= irq_mask; 250 251 pending &= ~irq_mask; 252 } 253 } 254 255 void 256 pic_dispatch(struct intrsource *is, void *frame) 257 { 258 int rv __unused; 259 260 if (__predict_false(is->is_arg == NULL) 261 && __predict_true(frame != NULL)) { 262 rv = (*is->is_func)(frame); 263 } else if (__predict_true(is->is_arg != NULL)) { 264 rv = (*is->is_func)(is->is_arg); 265 } else { 266 pic_deferral_ev.ev_count++; 267 return; 268 } 269 270 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 271 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 272 pcpu->pcpu_evs[is->is_irq].ev_count++; 273 percpu_putref(is->is_pic->pic_percpu); 274 } 275 276 void 277 pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame) 278 { 279 const uint32_t ipl_mask = __BIT(ipl); 280 struct intrsource *is; 281 volatile uint32_t *ipending = pic->pic_pending_irqs; 282 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 283 size_t irq_base; 284 #if PIC_MAXSOURCES > 32 285 size_t irq_count; 286 int poi = 0; /* Possibility of interrupting */ 287 #endif 288 uint32_t pending_irqs; 289 uint32_t blocked_irqs; 290 int irq; 291 bool progress = false; 292 293 KASSERT(pic->pic_pending_ipls & ipl_mask); 294 295 irq_base = 0; 296 #if PIC_MAXSOURCES > 32 297 irq_count = 0; 298 #endif 299 300 for (;;) { 301 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 302 *ipending, ipl); 303 KASSERT((pending_irqs & *ipending) == pending_irqs); 304 KASSERT((pending_irqs & ~(*ipending)) == 0); 305 if (pending_irqs == 0) { 306 #if PIC_MAXSOURCES > 32 307 irq_count += 32; 308 if (__predict_true(irq_count >= pic->pic_maxsources)) { 309 if (!poi) 310 /*Interrupt at this level was handled.*/ 311 break; 312 irq_base = 0; 313 irq_count = 0; 314 poi = 0; 315 ipending = pic->pic_pending_irqs; 316 iblocked = pic->pic_blocked_irqs; 317 } else { 318 irq_base += 32; 319 ipending++; 320 iblocked++; 321 KASSERT(irq_base <= pic->pic_maxsources); 322 } 323 continue; 324 #else 325 break; 326 #endif 327 } 328 progress = true; 329 blocked_irqs = 0; 330 do { 331 irq = ffs(pending_irqs) - 1; 332 KASSERT(irq >= 0); 333 334 atomic_and_32(ipending, ~__BIT(irq)); 335 is = pic->pic_sources[irq_base + irq]; 336 if (is != NULL) { 337 cpsie(I32_bit); 338 pic_dispatch(is, frame); 339 cpsid(I32_bit); 340 #if PIC_MAXSOURCES > 32 341 /* 342 * There is a possibility of interrupting 343 * from cpsie() to cpsid(). 344 */ 345 poi = 1; 346 #endif 347 blocked_irqs |= __BIT(irq); 348 } else { 349 KASSERT(0); 350 } 351 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 352 irq_base, *ipending, ipl); 353 } while (pending_irqs); 354 if (blocked_irqs) { 355 atomic_or_32(iblocked, blocked_irqs); 356 atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id)); 357 } 358 } 359 360 KASSERT(progress); 361 /* 362 * Since interrupts are disabled, we don't have to be too careful 363 * about these. 364 */ 365 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 366 atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id)); 367 } 368 369 static void 370 pic_list_unblock_irqs(void) 371 { 372 uint32_t blocked_pics = pic_blocked_pics; 373 374 pic_blocked_pics = 0; 375 for (;;) { 376 struct pic_softc *pic; 377 #if PIC_MAXSOURCES > 32 378 volatile uint32_t *iblocked; 379 uint32_t blocked; 380 size_t irq_base; 381 #endif 382 383 int pic_id = ffs(blocked_pics); 384 if (pic_id-- == 0) 385 return; 386 387 pic = pic_list[pic_id]; 388 KASSERT(pic != NULL); 389 #if PIC_MAXSOURCES > 32 390 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 391 irq_base < pic->pic_maxsources; 392 irq_base += 32, iblocked++) { 393 if ((blocked = *iblocked) != 0) { 394 (*pic->pic_ops->pic_unblock_irqs)(pic, 395 irq_base, blocked); 396 atomic_and_32(iblocked, ~blocked); 397 } 398 } 399 #else 400 KASSERT(pic->pic_blocked_irqs[0] != 0); 401 (*pic->pic_ops->pic_unblock_irqs)(pic, 402 0, pic->pic_blocked_irqs[0]); 403 pic->pic_blocked_irqs[0] = 0; 404 #endif 405 blocked_pics &= ~__BIT(pic_id); 406 } 407 } 408 409 410 struct pic_softc * 411 pic_list_find_pic_by_pending_ipl(uint32_t ipl_mask) 412 { 413 uint32_t pending_pics = pic_pending_pics; 414 struct pic_softc *pic; 415 416 for (;;) { 417 int pic_id = ffs(pending_pics); 418 if (pic_id-- == 0) 419 return NULL; 420 421 pic = pic_list[pic_id]; 422 KASSERT(pic != NULL); 423 if (pic->pic_pending_ipls & ipl_mask) 424 return pic; 425 pending_pics &= ~__BIT(pic_id); 426 } 427 } 428 429 void 430 pic_list_deliver_irqs(register_t psw, int ipl, void *frame) 431 { 432 const uint32_t ipl_mask = __BIT(ipl); 433 struct pic_softc *pic; 434 435 while ((pic = pic_list_find_pic_by_pending_ipl(ipl_mask)) != NULL) { 436 pic_deliver_irqs(pic, ipl, frame); 437 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 438 } 439 atomic_and_32(&pic_pending_ipls, ~ipl_mask); 440 } 441 442 void 443 pic_do_pending_ints(register_t psw, int newipl, void *frame) 444 { 445 struct cpu_info * const ci = curcpu(); 446 if (__predict_false(newipl == IPL_HIGH)) { 447 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 448 return; 449 } 450 while ((pic_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 451 KASSERT(pic_pending_ipls < __BIT(NIPL)); 452 for (;;) { 453 int ipl = 31 - __builtin_clz(pic_pending_ipls); 454 KASSERT(ipl < NIPL); 455 if (ipl <= newipl) 456 break; 457 458 pic_set_priority(ci, ipl); 459 pic_list_deliver_irqs(psw, ipl, frame); 460 pic_list_unblock_irqs(); 461 } 462 } 463 if (ci->ci_cpl != newipl) 464 pic_set_priority(ci, newipl); 465 } 466 467 static void 468 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 469 { 470 struct pic_percpu * const pcpu = v0; 471 struct pic_softc * const pic = v1; 472 473 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 474 KM_SLEEP); 475 KASSERT(pcpu->pcpu_evs != NULL); 476 477 #define PCPU_NAMELEN 32 478 #ifdef DIAGNOSTIC 479 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 480 #endif 481 482 KASSERT(namelen < PCPU_NAMELEN); 483 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 484 #ifdef MULTIPROCESSOR 485 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 486 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 487 #else 488 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 489 #endif 490 pcpu->pcpu_magic = PICPERCPU_MAGIC; 491 #if 0 492 printf("%s: %s %s: <%s>\n", 493 __func__, ci->ci_data.cpu_name, pic->pic_name, 494 pcpu->pcpu_name); 495 #endif 496 } 497 498 void 499 pic_add(struct pic_softc *pic, int irqbase) 500 { 501 int slot, maybe_slot = -1; 502 503 KASSERT(strlen(pic->pic_name) > 0); 504 505 for (slot = 0; slot < PIC_MAXPICS; slot++) { 506 struct pic_softc * const xpic = pic_list[slot]; 507 if (xpic == NULL) { 508 if (maybe_slot < 0) 509 maybe_slot = slot; 510 if (irqbase < 0) 511 break; 512 continue; 513 } 514 if (irqbase < 0 || xpic->pic_irqbase < 0) 515 continue; 516 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 517 continue; 518 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 519 continue; 520 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 521 " with pic %s (%zu sources @ irq %u)", 522 pic->pic_name, pic->pic_maxsources, irqbase, 523 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 524 } 525 slot = maybe_slot; 526 #if 0 527 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 528 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 529 #endif 530 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 531 pic->pic_maxsources); 532 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 533 534 /* 535 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 536 * allocate its evcnts and then attach an evcnt for each pin. 537 * We can't allocate the evcnt structures directly since 538 * percpu will move the contents of percpu memory around and 539 * corrupt the pointers in the evcnts themselves. Remember, any 540 * problem can be solved with sufficient indirection. 541 */ 542 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); 543 KASSERT(pic->pic_percpu != NULL); 544 545 /* 546 * Now allocate the per-cpu evcnts. 547 */ 548 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); 549 550 pic->pic_sources = &pic_sources[pic_sourcebase]; 551 pic->pic_irqbase = irqbase; 552 pic_sourcebase += pic->pic_maxsources; 553 pic->pic_id = slot; 554 #ifdef __HAVE_PIC_SET_PRIORITY 555 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 556 #endif 557 #ifdef MULTIPROCESSOR 558 KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL)); 559 #endif 560 pic_list[slot] = pic; 561 } 562 563 int 564 pic_alloc_irq(struct pic_softc *pic) 565 { 566 int irq; 567 568 for (irq = 0; irq < pic->pic_maxsources; irq++) { 569 if (pic->pic_sources[irq] == NULL) 570 return irq; 571 } 572 573 return -1; 574 } 575 576 static void 577 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 578 { 579 struct pic_percpu * const pcpu = v0; 580 struct intrsource * const is = v1; 581 582 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 583 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 584 pcpu->pcpu_name, is->is_source); 585 } 586 587 void * 588 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 589 int (*func)(void *), void *arg) 590 { 591 struct intrsource *is; 592 int off, nipl; 593 594 if (pic->pic_sources[irq]) { 595 printf("pic_establish_intr: pic %s irq %d already present\n", 596 pic->pic_name, irq); 597 return NULL; 598 } 599 600 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 601 if (is == NULL) 602 return NULL; 603 604 is->is_pic = pic; 605 is->is_irq = irq; 606 is->is_ipl = ipl; 607 is->is_type = type; 608 is->is_func = func; 609 is->is_arg = arg; 610 611 if (pic->pic_ops->pic_source_name) 612 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 613 sizeof(is->is_source)); 614 else 615 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 616 617 /* 618 * Now attach the per-cpu evcnts. 619 */ 620 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 621 622 pic->pic_sources[irq] = is; 623 624 /* 625 * First try to use an existing slot which is empty. 626 */ 627 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 628 if (pic__iplsources[off] == NULL) { 629 is->is_iplidx = off - pic_ipl_offset[ipl]; 630 pic__iplsources[off] = is; 631 return is; 632 } 633 } 634 635 /* 636 * Move up all the sources by one. 637 */ 638 if (ipl < NIPL) { 639 off = pic_ipl_offset[ipl+1]; 640 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 641 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 642 } 643 644 /* 645 * Advance the offset of all IPLs higher than this. Include an 646 * extra one as well. Thus the number of sources per ipl is 647 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 648 */ 649 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 650 pic_ipl_offset[nipl]++; 651 652 /* 653 * Insert into the previously made position at the end of this IPL's 654 * sources. 655 */ 656 off = pic_ipl_offset[ipl + 1] - 1; 657 is->is_iplidx = off - pic_ipl_offset[ipl]; 658 pic__iplsources[off] = is; 659 660 (*pic->pic_ops->pic_establish_irq)(pic, is); 661 662 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 663 __BIT(is->is_irq & 0x1f)); 664 665 /* We're done. */ 666 return is; 667 } 668 669 static void 670 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 671 { 672 struct pic_percpu * const pcpu = v0; 673 struct intrsource * const is = v1; 674 675 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 676 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 677 } 678 679 void 680 pic_disestablish_source(struct intrsource *is) 681 { 682 struct pic_softc * const pic = is->is_pic; 683 const int irq = is->is_irq; 684 685 KASSERT(is == pic->pic_sources[irq]); 686 687 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 688 pic->pic_sources[irq] = NULL; 689 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 690 /* 691 * Now detach the per-cpu evcnts. 692 */ 693 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 694 695 kmem_free(is, sizeof(*is)); 696 } 697 698 void * 699 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 700 { 701 KASSERT(!cpu_intr_p()); 702 KASSERT(!cpu_softintr_p()); 703 704 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 705 struct pic_softc * const pic = pic_list[slot]; 706 if (pic == NULL || pic->pic_irqbase < 0) 707 continue; 708 if (pic->pic_irqbase <= irq 709 && irq < pic->pic_irqbase + pic->pic_maxsources) { 710 return pic_establish_intr(pic, irq - pic->pic_irqbase, 711 ipl, type, func, arg); 712 } 713 } 714 715 return NULL; 716 } 717 718 void 719 intr_disestablish(void *ih) 720 { 721 struct intrsource * const is = ih; 722 723 KASSERT(!cpu_intr_p()); 724 KASSERT(!cpu_softintr_p()); 725 726 pic_disestablish_source(is); 727 } 728