1 /* $NetBSD: pic.c,v 1.22 2014/05/19 22:47:53 rmind Exp $ */ 2 /*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #define _INTR_PRIVATE 32 #include "opt_ddb.h" 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.22 2014/05/19 22:47:53 rmind Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/atomic.h> 39 #include <sys/cpu.h> 40 #include <sys/evcnt.h> 41 #include <sys/intr.h> 42 #include <sys/kernel.h> 43 #include <sys/kmem.h> 44 #include <sys/xcall.h> 45 #include <sys/ipi.h> 46 47 #include <arm/armreg.h> 48 #include <arm/cpufunc.h> 49 50 #ifdef DDB 51 #include <arm/db_machdep.h> 52 #endif 53 54 #include <arm/pic/picvar.h> 55 56 static uint32_t 57 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 58 static struct pic_softc * 59 pic_list_find_pic_by_pending_ipl(uint32_t); 60 static void 61 pic_deliver_irqs(struct pic_softc *, int, void *); 62 static void 63 pic_list_deliver_irqs(register_t, int, void *); 64 65 struct pic_softc *pic_list[PIC_MAXPICS]; 66 #if PIC_MAXPICS > 32 67 #error PIC_MAXPICS > 32 not supported 68 #endif 69 volatile uint32_t pic_blocked_pics; 70 volatile uint32_t pic_pending_pics; 71 volatile uint32_t pic_pending_ipls; 72 struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 73 struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 74 struct intrsource **pic_iplsource[NIPL] = { 75 [0 ... NIPL-1] = pic__iplsources, 76 }; 77 size_t pic_ipl_offset[NIPL+1]; 78 size_t pic_sourcebase; 79 static struct evcnt pic_deferral_ev = 80 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 81 EVCNT_ATTACH_STATIC(pic_deferral_ev); 82 83 #ifdef __HAVE_PIC_SET_PRIORITY 84 void 85 pic_set_priority(struct cpu_info *ci, int newipl) 86 { 87 register_t psw = cpsid(I32_bit); 88 if (pic_list[0] != NULL) 89 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 90 ci->ci_cpl = newipl; 91 if ((psw & I32_bit) == 0) 92 cpsie(I32_bit); 93 } 94 #endif 95 96 #ifdef MULTIPROCESSOR 97 int 98 pic_ipi_nop(void *arg) 99 { 100 /* do nothing */ 101 return 1; 102 } 103 104 int 105 pic_ipi_xcall(void *arg) 106 { 107 xc_ipi_handler(); 108 return 1; 109 } 110 111 int 112 pic_ipi_generic(void *arg) 113 { 114 ipi_cpu_handler(); 115 return 1; 116 } 117 118 #ifdef DDB 119 int 120 pic_ipi_ddb(void *arg) 121 { 122 printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 123 kdb_trap(-1, arg); 124 return 1; 125 } 126 #endif 127 128 void 129 intr_cpu_init(struct cpu_info *ci) 130 { 131 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 132 struct pic_softc * const pic = pic_list[slot]; 133 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 134 (*pic->pic_ops->pic_cpu_init)(pic, ci); 135 } 136 } 137 } 138 139 typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 140 141 static struct pic_softc * 142 pic_ipi_sender(void) 143 { 144 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 145 struct pic_softc * const pic = pic_list[slot]; 146 if (pic != NULL && pic->pic_ops->pic_ipi_send != NULL) { 147 return pic; 148 } 149 } 150 return NULL; 151 } 152 153 void 154 intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 155 { 156 struct pic_softc * const pic = pic_ipi_sender(); 157 KASSERT(ipi < NIPI); 158 if (cold && pic == NULL) 159 return; 160 KASSERT(pic != NULL); 161 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 162 } 163 #endif /* MULTIPROCESSOR */ 164 165 #ifdef __HAVE_PIC_FAST_SOFTINTS 166 int 167 pic_handle_softint(void *arg) 168 { 169 void softint_switch(lwp_t *, int); 170 struct cpu_info * const ci = curcpu(); 171 const size_t softint = (size_t) arg; 172 int s = splhigh(); 173 ci->ci_intr_depth--; // don't count these as interrupts 174 softint_switch(ci->ci_softlwps[softint], s); 175 ci->ci_intr_depth++; 176 splx(s); 177 return 1; 178 } 179 #endif 180 181 int 182 pic_handle_intr(void *arg) 183 { 184 struct pic_softc * const pic = arg; 185 int rv; 186 187 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 188 189 return rv > 0; 190 } 191 192 void 193 pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 194 { 195 const uint32_t ipl_mask = __BIT(is->is_ipl); 196 197 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 198 __BIT(is->is_irq & 0x1f)); 199 200 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 201 atomic_or_32(&pic_pending_ipls, ipl_mask); 202 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 203 } 204 205 void 206 pic_mark_pending(struct pic_softc *pic, int irq) 207 { 208 struct intrsource * const is = pic->pic_sources[irq]; 209 210 KASSERT(irq < pic->pic_maxsources); 211 KASSERT(is != NULL); 212 213 pic_mark_pending_source(pic, is); 214 } 215 216 uint32_t 217 pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 218 uint32_t pending) 219 { 220 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 221 struct intrsource *is; 222 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 223 uint32_t ipl_mask = 0; 224 225 if (pending == 0) 226 return ipl_mask; 227 228 KASSERT((irq_base & 31) == 0); 229 230 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 231 232 atomic_or_32(ipending, pending); 233 while (pending != 0) { 234 int n = ffs(pending); 235 if (n-- == 0) 236 break; 237 is = isbase[n]; 238 KASSERT(is != NULL); 239 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 240 pending &= ~__BIT(n); 241 ipl_mask |= __BIT(is->is_ipl); 242 } 243 244 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 245 atomic_or_32(&pic_pending_ipls, ipl_mask); 246 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 247 248 return ipl_mask; 249 } 250 251 uint32_t 252 pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 253 uint32_t pending, int ipl) 254 { 255 uint32_t ipl_irq_mask = 0; 256 uint32_t irq_mask; 257 258 for (;;) { 259 int irq = ffs(pending); 260 if (irq-- == 0) 261 return ipl_irq_mask; 262 263 irq_mask = __BIT(irq); 264 #if 1 265 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 266 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 267 #else 268 if (pic->pic_sources[irq_base + irq] == NULL) { 269 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 270 irq_base, irq); 271 } else 272 #endif 273 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 274 ipl_irq_mask |= irq_mask; 275 276 pending &= ~irq_mask; 277 } 278 } 279 280 void 281 pic_dispatch(struct intrsource *is, void *frame) 282 { 283 int (*func)(void *) = is->is_func; 284 void *arg = is->is_arg; 285 286 if (__predict_false(arg == NULL)) { 287 if (__predict_false(frame == NULL)) { 288 pic_deferral_ev.ev_count++; 289 return; 290 } 291 arg = frame; 292 } 293 294 #ifdef MULTIPROCESSOR 295 if (!is->is_mpsafe) { 296 KERNEL_LOCK(1, NULL); 297 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 298 const u_int l_blcnt __diagused = curlwp->l_blcnt; 299 (void)(*func)(arg); 300 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 301 KASSERT(l_blcnt == curlwp->l_blcnt); 302 KERNEL_UNLOCK_ONE(NULL); 303 } else 304 #endif 305 (void)(*func)(arg); 306 307 308 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 309 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 310 pcpu->pcpu_evs[is->is_irq].ev_count++; 311 percpu_putref(is->is_pic->pic_percpu); 312 } 313 314 void 315 pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame) 316 { 317 const uint32_t ipl_mask = __BIT(ipl); 318 struct intrsource *is; 319 volatile uint32_t *ipending = pic->pic_pending_irqs; 320 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 321 size_t irq_base; 322 #if PIC_MAXSOURCES > 32 323 size_t irq_count; 324 int poi = 0; /* Possibility of interrupting */ 325 #endif 326 uint32_t pending_irqs; 327 uint32_t blocked_irqs; 328 int irq; 329 bool progress __diagused = false; 330 331 KASSERT(pic->pic_pending_ipls & ipl_mask); 332 333 irq_base = 0; 334 #if PIC_MAXSOURCES > 32 335 irq_count = 0; 336 #endif 337 338 for (;;) { 339 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 340 *ipending, ipl); 341 KASSERT((pending_irqs & *ipending) == pending_irqs); 342 KASSERT((pending_irqs & ~(*ipending)) == 0); 343 if (pending_irqs == 0) { 344 #if PIC_MAXSOURCES > 32 345 irq_count += 32; 346 if (__predict_true(irq_count >= pic->pic_maxsources)) { 347 if (!poi) 348 /*Interrupt at this level was handled.*/ 349 break; 350 irq_base = 0; 351 irq_count = 0; 352 poi = 0; 353 ipending = pic->pic_pending_irqs; 354 iblocked = pic->pic_blocked_irqs; 355 } else { 356 irq_base += 32; 357 ipending++; 358 iblocked++; 359 KASSERT(irq_base <= pic->pic_maxsources); 360 } 361 continue; 362 #else 363 break; 364 #endif 365 } 366 progress = true; 367 blocked_irqs = 0; 368 do { 369 irq = ffs(pending_irqs) - 1; 370 KASSERT(irq >= 0); 371 372 atomic_and_32(ipending, ~__BIT(irq)); 373 is = pic->pic_sources[irq_base + irq]; 374 if (is != NULL) { 375 cpsie(I32_bit); 376 pic_dispatch(is, frame); 377 cpsid(I32_bit); 378 #if PIC_MAXSOURCES > 32 379 /* 380 * There is a possibility of interrupting 381 * from cpsie() to cpsid(). 382 */ 383 poi = 1; 384 #endif 385 blocked_irqs |= __BIT(irq); 386 } else { 387 KASSERT(0); 388 } 389 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 390 irq_base, *ipending, ipl); 391 } while (pending_irqs); 392 if (blocked_irqs) { 393 atomic_or_32(iblocked, blocked_irqs); 394 atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id)); 395 } 396 } 397 398 KASSERT(progress); 399 /* 400 * Since interrupts are disabled, we don't have to be too careful 401 * about these. 402 */ 403 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 404 atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id)); 405 } 406 407 static void 408 pic_list_unblock_irqs(void) 409 { 410 uint32_t blocked_pics = pic_blocked_pics; 411 412 pic_blocked_pics = 0; 413 for (;;) { 414 struct pic_softc *pic; 415 #if PIC_MAXSOURCES > 32 416 volatile uint32_t *iblocked; 417 uint32_t blocked; 418 size_t irq_base; 419 #endif 420 421 int pic_id = ffs(blocked_pics); 422 if (pic_id-- == 0) 423 return; 424 425 pic = pic_list[pic_id]; 426 KASSERT(pic != NULL); 427 #if PIC_MAXSOURCES > 32 428 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 429 irq_base < pic->pic_maxsources; 430 irq_base += 32, iblocked++) { 431 if ((blocked = *iblocked) != 0) { 432 (*pic->pic_ops->pic_unblock_irqs)(pic, 433 irq_base, blocked); 434 atomic_and_32(iblocked, ~blocked); 435 } 436 } 437 #else 438 KASSERT(pic->pic_blocked_irqs[0] != 0); 439 (*pic->pic_ops->pic_unblock_irqs)(pic, 440 0, pic->pic_blocked_irqs[0]); 441 pic->pic_blocked_irqs[0] = 0; 442 #endif 443 blocked_pics &= ~__BIT(pic_id); 444 } 445 } 446 447 448 struct pic_softc * 449 pic_list_find_pic_by_pending_ipl(uint32_t ipl_mask) 450 { 451 uint32_t pending_pics = pic_pending_pics; 452 struct pic_softc *pic; 453 454 for (;;) { 455 int pic_id = ffs(pending_pics); 456 if (pic_id-- == 0) 457 return NULL; 458 459 pic = pic_list[pic_id]; 460 KASSERT(pic != NULL); 461 if (pic->pic_pending_ipls & ipl_mask) 462 return pic; 463 pending_pics &= ~__BIT(pic_id); 464 } 465 } 466 467 void 468 pic_list_deliver_irqs(register_t psw, int ipl, void *frame) 469 { 470 const uint32_t ipl_mask = __BIT(ipl); 471 struct pic_softc *pic; 472 473 while ((pic = pic_list_find_pic_by_pending_ipl(ipl_mask)) != NULL) { 474 pic_deliver_irqs(pic, ipl, frame); 475 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 476 } 477 atomic_and_32(&pic_pending_ipls, ~ipl_mask); 478 } 479 480 void 481 pic_do_pending_ints(register_t psw, int newipl, void *frame) 482 { 483 struct cpu_info * const ci = curcpu(); 484 if (__predict_false(newipl == IPL_HIGH)) { 485 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 486 return; 487 } 488 while ((pic_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 489 KASSERT(pic_pending_ipls < __BIT(NIPL)); 490 for (;;) { 491 int ipl = 31 - __builtin_clz(pic_pending_ipls); 492 KASSERT(ipl < NIPL); 493 if (ipl <= newipl) 494 break; 495 496 pic_set_priority(ci, ipl); 497 pic_list_deliver_irqs(psw, ipl, frame); 498 pic_list_unblock_irqs(); 499 } 500 } 501 if (ci->ci_cpl != newipl) 502 pic_set_priority(ci, newipl); 503 } 504 505 static void 506 pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 507 { 508 struct pic_percpu * const pcpu = v0; 509 struct pic_softc * const pic = v1; 510 511 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 512 KM_SLEEP); 513 KASSERT(pcpu->pcpu_evs != NULL); 514 515 #define PCPU_NAMELEN 32 516 #ifdef DIAGNOSTIC 517 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 518 #endif 519 520 KASSERT(namelen < PCPU_NAMELEN); 521 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 522 #ifdef MULTIPROCESSOR 523 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 524 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 525 #else 526 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 527 #endif 528 pcpu->pcpu_magic = PICPERCPU_MAGIC; 529 #if 0 530 printf("%s: %s %s: <%s>\n", 531 __func__, ci->ci_data.cpu_name, pic->pic_name, 532 pcpu->pcpu_name); 533 #endif 534 } 535 536 void 537 pic_add(struct pic_softc *pic, int irqbase) 538 { 539 int slot, maybe_slot = -1; 540 541 KASSERT(strlen(pic->pic_name) > 0); 542 543 for (slot = 0; slot < PIC_MAXPICS; slot++) { 544 struct pic_softc * const xpic = pic_list[slot]; 545 if (xpic == NULL) { 546 if (maybe_slot < 0) 547 maybe_slot = slot; 548 if (irqbase < 0) 549 break; 550 continue; 551 } 552 if (irqbase < 0 || xpic->pic_irqbase < 0) 553 continue; 554 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 555 continue; 556 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 557 continue; 558 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 559 " with pic %s (%zu sources @ irq %u)", 560 pic->pic_name, pic->pic_maxsources, irqbase, 561 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 562 } 563 slot = maybe_slot; 564 #if 0 565 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 566 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 567 #endif 568 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 569 pic->pic_maxsources); 570 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 571 572 /* 573 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 574 * allocate its evcnts and then attach an evcnt for each pin. 575 * We can't allocate the evcnt structures directly since 576 * percpu will move the contents of percpu memory around and 577 * corrupt the pointers in the evcnts themselves. Remember, any 578 * problem can be solved with sufficient indirection. 579 */ 580 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); 581 KASSERT(pic->pic_percpu != NULL); 582 583 /* 584 * Now allocate the per-cpu evcnts. 585 */ 586 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); 587 588 pic->pic_sources = &pic_sources[pic_sourcebase]; 589 pic->pic_irqbase = irqbase; 590 pic_sourcebase += pic->pic_maxsources; 591 pic->pic_id = slot; 592 #ifdef __HAVE_PIC_SET_PRIORITY 593 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 594 #endif 595 #ifdef MULTIPROCESSOR 596 KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL)); 597 #endif 598 pic_list[slot] = pic; 599 } 600 601 int 602 pic_alloc_irq(struct pic_softc *pic) 603 { 604 int irq; 605 606 for (irq = 0; irq < pic->pic_maxsources; irq++) { 607 if (pic->pic_sources[irq] == NULL) 608 return irq; 609 } 610 611 return -1; 612 } 613 614 static void 615 pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 616 { 617 struct pic_percpu * const pcpu = v0; 618 struct intrsource * const is = v1; 619 620 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 621 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 622 pcpu->pcpu_name, is->is_source); 623 } 624 625 void * 626 pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 627 int (*func)(void *), void *arg) 628 { 629 struct intrsource *is; 630 int off, nipl; 631 632 if (pic->pic_sources[irq]) { 633 printf("pic_establish_intr: pic %s irq %d already present\n", 634 pic->pic_name, irq); 635 return NULL; 636 } 637 638 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 639 if (is == NULL) 640 return NULL; 641 642 is->is_pic = pic; 643 is->is_irq = irq; 644 is->is_ipl = ipl; 645 is->is_type = type & 0xff; 646 is->is_func = func; 647 is->is_arg = arg; 648 #ifdef MULTIPROCESSOR 649 is->is_mpsafe = (type & IST_MPSAFE); 650 #endif 651 652 if (pic->pic_ops->pic_source_name) 653 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 654 sizeof(is->is_source)); 655 else 656 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 657 658 /* 659 * Now attach the per-cpu evcnts. 660 */ 661 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 662 663 pic->pic_sources[irq] = is; 664 665 /* 666 * First try to use an existing slot which is empty. 667 */ 668 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 669 if (pic__iplsources[off] == NULL) { 670 is->is_iplidx = off - pic_ipl_offset[ipl]; 671 pic__iplsources[off] = is; 672 return is; 673 } 674 } 675 676 /* 677 * Move up all the sources by one. 678 */ 679 if (ipl < NIPL) { 680 off = pic_ipl_offset[ipl+1]; 681 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 682 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 683 } 684 685 /* 686 * Advance the offset of all IPLs higher than this. Include an 687 * extra one as well. Thus the number of sources per ipl is 688 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 689 */ 690 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 691 pic_ipl_offset[nipl]++; 692 693 /* 694 * Insert into the previously made position at the end of this IPL's 695 * sources. 696 */ 697 off = pic_ipl_offset[ipl + 1] - 1; 698 is->is_iplidx = off - pic_ipl_offset[ipl]; 699 pic__iplsources[off] = is; 700 701 (*pic->pic_ops->pic_establish_irq)(pic, is); 702 703 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 704 __BIT(is->is_irq & 0x1f)); 705 706 /* We're done. */ 707 return is; 708 } 709 710 static void 711 pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 712 { 713 struct pic_percpu * const pcpu = v0; 714 struct intrsource * const is = v1; 715 716 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 717 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 718 } 719 720 void 721 pic_disestablish_source(struct intrsource *is) 722 { 723 struct pic_softc * const pic = is->is_pic; 724 const int irq = is->is_irq; 725 726 KASSERT(is == pic->pic_sources[irq]); 727 728 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 729 pic->pic_sources[irq] = NULL; 730 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 731 /* 732 * Now detach the per-cpu evcnts. 733 */ 734 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 735 736 kmem_free(is, sizeof(*is)); 737 } 738 739 void * 740 intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 741 { 742 KASSERT(!cpu_intr_p()); 743 KASSERT(!cpu_softintr_p()); 744 745 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 746 struct pic_softc * const pic = pic_list[slot]; 747 if (pic == NULL || pic->pic_irqbase < 0) 748 continue; 749 if (pic->pic_irqbase <= irq 750 && irq < pic->pic_irqbase + pic->pic_maxsources) { 751 return pic_establish_intr(pic, irq - pic->pic_irqbase, 752 ipl, type, func, arg); 753 } 754 } 755 756 return NULL; 757 } 758 759 void 760 intr_disestablish(void *ih) 761 { 762 struct intrsource * const is = ih; 763 764 KASSERT(!cpu_intr_p()); 765 KASSERT(!cpu_softintr_p()); 766 767 pic_disestablish_source(is); 768 } 769