1 /* $NetBSD: intr.c,v 1.20 2012/02/01 09:54:03 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Michael Lorenz 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.20 2012/02/01 09:54:03 matt Exp $"); 31 32 #include "opt_interrupt.h" 33 #include "opt_multiprocessor.h" 34 #include "opt_pic.h" 35 36 #define __INTR_PRIVATE 37 38 #include <sys/param.h> 39 #include <sys/cpu.h> 40 #include <sys/kernel.h> 41 #include <sys/kmem.h> 42 43 #include <powerpc/psl.h> 44 #include <powerpc/pic/picvar.h> 45 46 #if defined(PIC_I8259) || defined (PIC_PREPIVR) 47 #include <machine/isa_machdep.h> 48 #endif 49 50 #ifdef MULTIPROCESSOR 51 #include <powerpc/pic/ipivar.h> 52 #endif 53 54 #ifdef __HAVE_FAST_SOFTINTS 55 #include <powerpc/softint.h> 56 #endif 57 58 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */ 59 60 #define PIC_VIRQ_LEGAL_P(x) ((u_int)(x) < NVIRQ) 61 62 struct pic_ops *pics[MAX_PICS]; 63 int num_pics = 0; 64 int max_base = 0; 65 uint8_t virq_map[NIRQ]; 66 imask_t virq_mask = HWIRQ_MASK; 67 imask_t imask[NIPL]; 68 int primary_pic = 0; 69 70 static int fakeintr(void *); 71 static int mapirq(int); 72 static void intr_calculatemasks(void); 73 static struct pic_ops *find_pic_by_hwirq(int); 74 75 static struct intr_source intrsources[NVIRQ]; 76 77 void 78 pic_init(void) 79 { 80 /* everything is in bss, no reason to zero it. */ 81 } 82 83 int 84 pic_add(struct pic_ops *pic) 85 { 86 87 if (num_pics >= MAX_PICS) 88 return -1; 89 90 pics[num_pics] = pic; 91 pic->pic_intrbase = max_base; 92 max_base += pic->pic_numintrs; 93 num_pics++; 94 95 return pic->pic_intrbase; 96 } 97 98 void 99 pic_finish_setup(void) 100 { 101 for (size_t i = 0; i < num_pics; i++) { 102 struct pic_ops * const pic = pics[i]; 103 if (pic->pic_finish_setup != NULL) 104 pic->pic_finish_setup(pic); 105 } 106 } 107 108 static struct pic_ops * 109 find_pic_by_hwirq(int hwirq) 110 { 111 for (u_int base = 0; base < num_pics; base++) { 112 struct pic_ops * const pic = pics[base]; 113 if (pic->pic_intrbase <= hwirq 114 && hwirq < pic->pic_intrbase + pic->pic_numintrs) { 115 return pic; 116 } 117 } 118 return NULL; 119 } 120 121 static int 122 fakeintr(void *arg) 123 { 124 125 return 0; 126 } 127 128 /* 129 * Register an interrupt handler. 130 */ 131 void * 132 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *), 133 void *ih_arg) 134 { 135 struct intrhand **p, *q, *ih; 136 struct pic_ops *pic; 137 static struct intrhand fakehand; 138 int maxipl = ipl; 139 140 if (maxipl == IPL_NONE) 141 maxipl = IPL_HIGH; 142 143 if (hwirq >= max_base) { 144 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq, 145 max_base - 1); 146 } 147 148 pic = find_pic_by_hwirq(hwirq); 149 if (pic == NULL) { 150 151 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq); 152 } 153 154 const int virq = mapirq(hwirq); 155 156 /* no point in sleeping unless someone can free memory. */ 157 ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP); 158 if (ih == NULL) 159 panic("intr_establish: can't allocate handler info"); 160 161 if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE) 162 panic("intr_establish: bogus irq (%d) or type (%d)", 163 hwirq, type); 164 165 struct intr_source * const is = &intrsources[virq]; 166 167 switch (is->is_type) { 168 case IST_NONE: 169 is->is_type = type; 170 break; 171 case IST_EDGE_FALLING: 172 case IST_EDGE_RISING: 173 case IST_LEVEL_LOW: 174 case IST_LEVEL_HIGH: 175 if (type == is->is_type) 176 break; 177 /* FALLTHROUGH */ 178 case IST_PULSE: 179 if (type != IST_NONE) 180 panic("intr_establish: can't share %s with %s", 181 intr_typename(is->is_type), 182 intr_typename(type)); 183 break; 184 } 185 if (is->is_hand == NULL) { 186 snprintf(is->is_source, sizeof(is->is_source), "irq %d", 187 is->is_hwirq); 188 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL, 189 pic->pic_name, is->is_source); 190 } 191 192 /* 193 * Figure out where to put the handler. 194 * This is O(N^2), but we want to preserve the order, and N is 195 * generally small. 196 */ 197 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) { 198 maxipl = max(maxipl, q->ih_ipl); 199 } 200 201 /* 202 * Actually install a fake handler momentarily, since we might be doing 203 * this with interrupts enabled and don't want the real routine called 204 * until masking is set up. 205 */ 206 fakehand.ih_ipl = ipl; 207 fakehand.ih_fun = fakeintr; 208 *p = &fakehand; 209 210 /* 211 * Poke the real handler in now. 212 */ 213 ih->ih_fun = ih_fun; 214 ih->ih_arg = ih_arg; 215 ih->ih_next = NULL; 216 ih->ih_ipl = ipl; 217 ih->ih_virq = virq; 218 *p = ih; 219 220 if (pic->pic_establish_irq != NULL) 221 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase, 222 is->is_type, maxipl); 223 224 /* 225 * Remember the highest IPL used by this handler. 226 */ 227 is->is_ipl = maxipl; 228 229 /* 230 * now that the handler is established we're actually ready to 231 * calculate the masks 232 */ 233 intr_calculatemasks(); 234 235 236 return ih; 237 } 238 239 void 240 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri) 241 { 242 } 243 244 /* 245 * Deregister an interrupt handler. 246 */ 247 void 248 intr_disestablish(void *arg) 249 { 250 struct intrhand * const ih = arg; 251 const int virq = ih->ih_virq; 252 struct intr_source * const is = &intrsources[virq]; 253 struct intrhand **p, **q; 254 int maxipl = IPL_NONE; 255 256 if (!PIC_VIRQ_LEGAL_P(virq)) 257 panic("intr_disestablish: bogus virq %d", virq); 258 259 /* 260 * Remove the handler from the chain. 261 * This is O(n^2), too. 262 */ 263 for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) { 264 struct intrhand * const tmp_ih = *p; 265 if (tmp_ih == ih) { 266 q = p; 267 } else { 268 maxipl = max(maxipl, tmp_ih->ih_ipl); 269 } 270 } 271 if (q) 272 *q = ih->ih_next; 273 else 274 panic("intr_disestablish: handler not registered"); 275 kmem_intr_free((void *)ih, sizeof(*ih)); 276 277 /* 278 * Reset the IPL for this source now that we've removed a handler. 279 */ 280 is->is_ipl = maxipl; 281 282 intr_calculatemasks(); 283 284 if (is->is_hand == NULL) { 285 is->is_type = IST_NONE; 286 evcnt_detach(&is->is_ev); 287 /* 288 * Make the virutal IRQ available again. 289 */ 290 virq_map[virq] = 0; 291 virq_mask |= PIC_VIRQ_TO_MASK(virq); 292 } 293 } 294 295 /* 296 * Map max_base irqs into 32 (bits). 297 */ 298 static int 299 mapirq(int hwirq) 300 { 301 struct pic_ops *pic; 302 303 if (hwirq >= max_base) 304 panic("invalid irq %d", hwirq); 305 306 if ((pic = find_pic_by_hwirq(hwirq)) == NULL) 307 panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq); 308 309 if (virq_map[hwirq]) 310 return virq_map[hwirq]; 311 312 if (virq_mask == 0) 313 panic("virq overflow"); 314 315 const int virq = PIC_VIRQ_MS_PENDING(virq_mask); 316 struct intr_source * const is = intrsources + virq; 317 318 virq_mask &= ~PIC_VIRQ_TO_MASK(virq); 319 320 is->is_hwirq = hwirq; 321 is->is_pic = pic; 322 virq_map[hwirq] = virq; 323 #ifdef PIC_DEBUG 324 printf("mapping hwirq %d to virq %d\n", hwirq, virq); 325 #endif 326 return virq; 327 } 328 329 static const char * const intr_typenames[] = { 330 [IST_NONE] = "none", 331 [IST_PULSE] = "pulsed", 332 [IST_EDGE_FALLING] = "falling edge triggered", 333 [IST_EDGE_RISING] = "rising edge triggered", 334 [IST_LEVEL_LOW] = "low level triggered", 335 [IST_LEVEL_HIGH] = "high level triggered", 336 }; 337 338 const char * 339 intr_typename(int type) 340 { 341 KASSERT((unsigned int) type < __arraycount(intr_typenames)); 342 KASSERT(intr_typenames[type] != NULL); 343 return intr_typenames[type]; 344 } 345 346 /* 347 * Recalculate the interrupt masks from scratch. 348 * We could code special registry and deregistry versions of this function that 349 * would be faster, but the code would be nastier, and we don't expect this to 350 * happen very much anyway. 351 */ 352 static void 353 intr_calculatemasks(void) 354 { 355 imask_t newmask[NIPL] = { [IPL_NONE...IPL_HIGH] = 0 }; 356 struct intr_source *is; 357 int irq; 358 359 for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) { 360 newmask[ipl] = 0; 361 } 362 363 /* First, figure out which ipl each IRQ uses. */ 364 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) { 365 newmask[is->is_ipl] |= PIC_VIRQ_TO_MASK(irq); 366 } 367 368 /* 369 * IPL_NONE is used for hardware interrupts that are never blocked, 370 * and do not block anything else. 371 */ 372 newmask[IPL_NONE] = 0; 373 374 /* 375 * strict hierarchy - all IPLs block everything blocked by any lower 376 * IPL 377 */ 378 for (u_int ipl = 1; ipl < NIPL; ipl++) { 379 newmask[ipl] |= newmask[ipl - 1]; 380 } 381 382 #ifdef DEBUG_IPL 383 for (u_int ipl = 0; ipl < NIPL; ipl++) { 384 printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]); 385 } 386 #endif 387 388 /* 389 * Disable all interrupts. 390 */ 391 for (u_int base = 0; base < num_pics; base++) { 392 struct pic_ops * const pic = pics[base]; 393 for (u_int i = 0; i < pic->pic_numintrs; i++) { 394 pic->pic_disable_irq(pic, i); 395 } 396 } 397 398 /* 399 * Now that all interrupts are disabled, update the ipl masks. 400 */ 401 for (u_int ipl = 0; ipl < NIPL; ipl++) { 402 imask[ipl] = newmask[ipl]; 403 } 404 405 /* 406 * Lastly, enable IRQs actually in use. 407 */ 408 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) { 409 if (is->is_hand) 410 pic_enable_irq(is->is_hwirq); 411 } 412 } 413 414 void 415 pic_enable_irq(int hwirq) 416 { 417 struct pic_ops * const pic = find_pic_by_hwirq(hwirq); 418 if (pic == NULL) 419 panic("%s: bogus IRQ %d", __func__, hwirq); 420 const int type = intrsources[virq_map[hwirq]].is_type; 421 (*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type); 422 } 423 424 void 425 pic_mark_pending(int hwirq) 426 { 427 struct cpu_info * const ci = curcpu(); 428 429 const int virq = virq_map[hwirq]; 430 if (virq == 0) 431 printf("IRQ %d maps to 0\n", hwirq); 432 433 const register_t msr = mfmsr(); 434 mtmsr(msr & ~PSL_EE); 435 ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq); 436 mtmsr(msr); 437 } 438 439 static void 440 intr_deliver(struct intr_source *is, int virq) 441 { 442 bool locked = false; 443 for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) { 444 KASSERTMSG(ih->ih_fun != NULL, 445 "%s: irq %d, hwirq %d, is %p ih %p: " 446 "NULL interrupt handler!\n", __func__, 447 virq, is->is_hwirq, is, ih); 448 if (ih->ih_ipl == IPL_VM) { 449 if (!locked) { 450 KERNEL_LOCK(1, NULL); 451 locked = true; 452 } 453 } else if (locked) { 454 KERNEL_UNLOCK_ONE(NULL); 455 locked = false; 456 } 457 (*ih->ih_fun)(ih->ih_arg); 458 } 459 if (locked) { 460 KERNEL_UNLOCK_ONE(NULL); 461 } 462 is->is_ev.ev_count++; 463 } 464 465 void 466 pic_do_pending_int(void) 467 { 468 struct cpu_info * const ci = curcpu(); 469 imask_t vpend; 470 471 if (ci->ci_iactive) 472 return; 473 474 ci->ci_iactive = 1; 475 476 const register_t emsr = mfmsr(); 477 const register_t dmsr = emsr & ~PSL_EE; 478 479 KASSERT(emsr & PSL_EE); 480 mtmsr(dmsr); 481 482 const int pcpl = ci->ci_cpl; 483 #ifdef __HAVE_FAST_SOFTINTS 484 again: 485 #endif 486 487 /* Do now unmasked pendings */ 488 while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) { 489 ci->ci_idepth++; 490 KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0); 491 492 /* Get most significant pending bit */ 493 const int virq = PIC_VIRQ_MS_PENDING(vpend); 494 ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq); 495 496 struct intr_source * const is = &intrsources[virq]; 497 struct pic_ops * const pic = is->is_pic; 498 499 splraise(is->is_ipl); 500 mtmsr(emsr); 501 intr_deliver(is, virq); 502 mtmsr(dmsr); 503 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */ 504 505 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase, 506 is->is_type); 507 ci->ci_idepth--; 508 } 509 510 #ifdef __HAVE_FAST_SOFTINTS 511 const u_int softints = (ci->ci_data.cpu_softints << pcpl) & IPL_SOFTMASK; 512 513 if (__predict_false(softints != 0)) { 514 ci->ci_cpl = IPL_HIGH; 515 mtmsr(emsr); 516 powerpc_softint(ci, pcpl, 517 (vaddr_t)__builtin_return_address(0)); 518 mtmsr(dmsr); 519 ci->ci_cpl = pcpl; 520 if (__predict_false(ci->ci_ipending & ~imask[pcpl])) 521 goto again; 522 } 523 #endif 524 525 ci->ci_iactive = 0; 526 mtmsr(emsr); 527 } 528 529 int 530 pic_handle_intr(void *cookie) 531 { 532 struct pic_ops *pic = cookie; 533 struct cpu_info *ci = curcpu(); 534 int picirq; 535 536 picirq = pic->pic_get_irq(pic, PIC_GET_IRQ); 537 if (picirq == 255) 538 return 0; 539 540 const register_t msr = mfmsr(); 541 const int pcpl = ci->ci_cpl; 542 543 do { 544 #ifdef MULTIPROCESSOR 545 /* THIS IS WRONG XXX */ 546 if (picirq == ipiops.ppc_ipi_vector) { 547 ci->ci_cpl = IPL_HIGH; 548 ipi_intr(NULL); 549 ci->ci_cpl = pcpl; 550 pic->pic_ack_irq(pic, picirq); 551 continue; 552 } 553 #endif 554 555 const int virq = virq_map[picirq + pic->pic_intrbase]; 556 KASSERT(virq != 0); 557 KASSERT(picirq < pic->pic_numintrs); 558 imask_t v_imen = PIC_VIRQ_TO_MASK(virq); 559 struct intr_source * const is = &intrsources[virq]; 560 561 if ((imask[pcpl] & v_imen) != 0) { 562 ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */ 563 pic->pic_disable_irq(pic, picirq); 564 } else { 565 /* this interrupt is no longer pending */ 566 ci->ci_ipending &= ~v_imen; 567 ci->ci_idepth++; 568 569 splraise(is->is_ipl); 570 mtmsr(msr | PSL_EE); 571 intr_deliver(is, virq); 572 mtmsr(msr); 573 ci->ci_cpl = pcpl; 574 575 ci->ci_data.cpu_nintr++; 576 ci->ci_idepth--; 577 } 578 pic->pic_ack_irq(pic, picirq); 579 } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255); 580 581 mtmsr(msr | PSL_EE); 582 splx(pcpl); /* Process pendings. */ 583 mtmsr(msr); 584 585 return 0; 586 } 587 588 void 589 pic_ext_intr(void) 590 { 591 592 KASSERT(pics[primary_pic] != NULL); 593 pic_handle_intr(pics[primary_pic]); 594 595 return; 596 597 } 598 599 int 600 splraise(int ncpl) 601 { 602 struct cpu_info *ci = curcpu(); 603 int ocpl; 604 605 if (ncpl == ci->ci_cpl) return ncpl; 606 __asm volatile("sync; eieio"); /* don't reorder.... */ 607 ocpl = ci->ci_cpl; 608 KASSERT(ncpl < NIPL); 609 ci->ci_cpl = max(ncpl, ocpl); 610 __asm volatile("sync; eieio"); /* reorder protect */ 611 __insn_barrier(); 612 return ocpl; 613 } 614 615 static inline bool 616 have_pending_intr_p(struct cpu_info *ci, int ncpl) 617 { 618 if (ci->ci_ipending & ~imask[ncpl]) 619 return true; 620 #ifdef __HAVE_FAST_SOFTINTS 621 if ((ci->ci_data.cpu_softints << ncpl) & IPL_SOFTMASK) 622 return true; 623 #endif 624 return false; 625 } 626 627 void 628 splx(int ncpl) 629 { 630 struct cpu_info *ci = curcpu(); 631 632 __insn_barrier(); 633 __asm volatile("sync; eieio"); /* reorder protect */ 634 ci->ci_cpl = ncpl; 635 if (have_pending_intr_p(ci, ncpl)) 636 pic_do_pending_int(); 637 638 __asm volatile("sync; eieio"); /* reorder protect */ 639 } 640 641 int 642 spllower(int ncpl) 643 { 644 struct cpu_info *ci = curcpu(); 645 int ocpl; 646 647 __insn_barrier(); 648 __asm volatile("sync; eieio"); /* reorder protect */ 649 ocpl = ci->ci_cpl; 650 ci->ci_cpl = ncpl; 651 if (have_pending_intr_p(ci, ncpl)) 652 pic_do_pending_int(); 653 __asm volatile("sync; eieio"); /* reorder protect */ 654 return ocpl; 655 } 656 657 void 658 genppc_cpu_configure(void) 659 { 660 aprint_normal("biomask %x netmask %x ttymask %x\n", 661 (u_int)imask[IPL_BIO] & 0x1fffffff, 662 (u_int)imask[IPL_NET] & 0x1fffffff, 663 (u_int)imask[IPL_TTY] & 0x1fffffff); 664 665 spl0(); 666 } 667 668 #if defined(PIC_PREPIVR) || defined(PIC_I8259) 669 /* 670 * isa_intr_alloc needs to be done here, because it needs direct access to 671 * the various interrupt handler structures. 672 */ 673 674 int 675 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic, 676 int mask, int type, int *irq_p) 677 { 678 int irq, vi; 679 int maybe_irq = -1; 680 int shared_depth = 0; 681 struct intr_source *is; 682 683 if (pic == NULL) 684 return 1; 685 686 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs); 687 mask >>= 1, irq++) { 688 if ((mask & 1) == 0) 689 continue; 690 vi = virq_map[irq + pic->pic_intrbase]; 691 if (!vi) { 692 *irq_p = irq; 693 return 0; 694 } 695 is = &intrsources[vi]; 696 if (is->is_type == IST_NONE) { 697 *irq_p = irq; 698 return 0; 699 } 700 /* Level interrupts can be shared */ 701 if (type == IST_LEVEL && is->is_type == IST_LEVEL) { 702 struct intrhand *ih = is->is_hand; 703 int depth; 704 705 if (maybe_irq == -1) { 706 maybe_irq = irq; 707 continue; 708 } 709 for (depth = 0; ih != NULL; ih = ih->ih_next) 710 depth++; 711 if (depth < shared_depth) { 712 maybe_irq = irq; 713 shared_depth = depth; 714 } 715 } 716 } 717 if (maybe_irq != -1) { 718 *irq_p = maybe_irq; 719 return 0; 720 } 721 return 1; 722 } 723 #endif 724