1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/kern_intr.c,v 1.24.2.1 2001/10/14 20:05:50 luigi Exp $ 27 * $DragonFly: src/sys/kern/kern_intr.c,v 1.55 2008/09/01 12:49:00 sephe Exp $ 28 * 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/thread.h> 37 #include <sys/proc.h> 38 #include <sys/thread2.h> 39 #include <sys/random.h> 40 #include <sys/serialize.h> 41 #include <sys/interrupt.h> 42 #include <sys/bus.h> 43 #include <sys/machintr.h> 44 45 #include <machine/frame.h> 46 47 #include <sys/interrupt.h> 48 49 struct info_info; 50 51 typedef struct intrec { 52 struct intrec *next; 53 struct intr_info *info; 54 inthand2_t *handler; 55 void *argument; 56 char *name; 57 int intr; 58 int intr_flags; 59 struct lwkt_serialize *serializer; 60 } *intrec_t; 61 62 struct intr_info { 63 intrec_t i_reclist; 64 struct thread i_thread; 65 struct random_softc i_random; 66 int i_running; 67 long i_count; /* interrupts dispatched */ 68 int i_mplock_required; 69 int i_fast; 70 int i_slow; 71 int i_state; 72 int i_errorticks; 73 unsigned long i_straycount; 74 } intr_info_ary[MAX_INTS]; 75 76 int max_installed_hard_intr; 77 int max_installed_soft_intr; 78 79 #define EMERGENCY_INTR_POLLING_FREQ_MAX 20000 80 81 static int sysctl_emergency_freq(SYSCTL_HANDLER_ARGS); 82 static int sysctl_emergency_enable(SYSCTL_HANDLER_ARGS); 83 static void emergency_intr_timer_callback(systimer_t, struct intrframe *); 84 static void ithread_handler(void *arg); 85 static void ithread_emergency(void *arg); 86 static void report_stray_interrupt(int intr, struct intr_info *info); 87 static void int_moveto_destcpu(int *, int *, int); 88 static void int_moveto_origcpu(int, int); 89 #ifdef SMP 90 static void intr_get_mplock(void); 91 #endif 92 93 int intr_info_size = sizeof(intr_info_ary) / sizeof(intr_info_ary[0]); 94 95 static struct systimer emergency_intr_timer; 96 static struct thread emergency_intr_thread; 97 98 #define ISTATE_NOTHREAD 0 99 #define ISTATE_NORMAL 1 100 #define ISTATE_LIVELOCKED 2 101 102 #ifdef SMP 103 static int intr_mpsafe = 1; 104 static int intr_migrate = 0; 105 static int intr_migrate_count; 106 TUNABLE_INT("kern.intr_mpsafe", &intr_mpsafe); 107 SYSCTL_INT(_kern, OID_AUTO, intr_mpsafe, 108 CTLFLAG_RW, &intr_mpsafe, 0, "Run INTR_MPSAFE handlers without the BGL"); 109 SYSCTL_INT(_kern, OID_AUTO, intr_migrate, 110 CTLFLAG_RW, &intr_migrate, 0, "Migrate to cpu holding BGL"); 111 SYSCTL_INT(_kern, OID_AUTO, intr_migrate_count, 112 CTLFLAG_RW, &intr_migrate_count, 0, ""); 113 #endif 114 static int livelock_limit = 40000; 115 static int livelock_lowater = 20000; 116 static int livelock_debug = -1; 117 SYSCTL_INT(_kern, OID_AUTO, livelock_limit, 118 CTLFLAG_RW, &livelock_limit, 0, "Livelock interrupt rate limit"); 119 SYSCTL_INT(_kern, OID_AUTO, livelock_lowater, 120 CTLFLAG_RW, &livelock_lowater, 0, "Livelock low-water mark restore"); 121 SYSCTL_INT(_kern, OID_AUTO, livelock_debug, 122 CTLFLAG_RW, &livelock_debug, 0, "Livelock debug intr#"); 123 124 static int emergency_intr_enable = 0; /* emergency interrupt polling */ 125 TUNABLE_INT("kern.emergency_intr_enable", &emergency_intr_enable); 126 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_enable, CTLTYPE_INT | CTLFLAG_RW, 127 0, 0, sysctl_emergency_enable, "I", "Emergency Interrupt Poll Enable"); 128 129 static int emergency_intr_freq = 10; /* emergency polling frequency */ 130 TUNABLE_INT("kern.emergency_intr_freq", &emergency_intr_freq); 131 SYSCTL_PROC(_kern, OID_AUTO, emergency_intr_freq, CTLTYPE_INT | CTLFLAG_RW, 132 0, 0, sysctl_emergency_freq, "I", "Emergency Interrupt Poll Frequency"); 133 134 /* 135 * Sysctl support routines 136 */ 137 static int 138 sysctl_emergency_enable(SYSCTL_HANDLER_ARGS) 139 { 140 int error, enabled; 141 142 enabled = emergency_intr_enable; 143 error = sysctl_handle_int(oidp, &enabled, 0, req); 144 if (error || req->newptr == NULL) 145 return error; 146 emergency_intr_enable = enabled; 147 if (emergency_intr_enable) { 148 systimer_adjust_periodic(&emergency_intr_timer, 149 emergency_intr_freq); 150 } else { 151 systimer_adjust_periodic(&emergency_intr_timer, 1); 152 } 153 return 0; 154 } 155 156 static int 157 sysctl_emergency_freq(SYSCTL_HANDLER_ARGS) 158 { 159 int error, phz; 160 161 phz = emergency_intr_freq; 162 error = sysctl_handle_int(oidp, &phz, 0, req); 163 if (error || req->newptr == NULL) 164 return error; 165 if (phz <= 0) 166 return EINVAL; 167 else if (phz > EMERGENCY_INTR_POLLING_FREQ_MAX) 168 phz = EMERGENCY_INTR_POLLING_FREQ_MAX; 169 170 emergency_intr_freq = phz; 171 if (emergency_intr_enable) { 172 systimer_adjust_periodic(&emergency_intr_timer, 173 emergency_intr_freq); 174 } else { 175 systimer_adjust_periodic(&emergency_intr_timer, 1); 176 } 177 return 0; 178 } 179 180 /* 181 * Register an SWI or INTerrupt handler. 182 */ 183 void * 184 register_swi(int intr, inthand2_t *handler, void *arg, const char *name, 185 struct lwkt_serialize *serializer) 186 { 187 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 188 panic("register_swi: bad intr %d", intr); 189 return(register_int(intr, handler, arg, name, serializer, 0)); 190 } 191 192 void * 193 register_int(int intr, inthand2_t *handler, void *arg, const char *name, 194 struct lwkt_serialize *serializer, int intr_flags) 195 { 196 struct intr_info *info; 197 struct intrec **list; 198 intrec_t rec; 199 int orig_cpuid, cpuid; 200 201 if (intr < 0 || intr >= MAX_INTS) 202 panic("register_int: bad intr %d", intr); 203 if (name == NULL) 204 name = "???"; 205 info = &intr_info_ary[intr]; 206 207 /* 208 * Construct an interrupt handler record 209 */ 210 rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); 211 rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); 212 strcpy(rec->name, name); 213 214 rec->info = info; 215 rec->handler = handler; 216 rec->argument = arg; 217 rec->intr = intr; 218 rec->intr_flags = intr_flags; 219 rec->next = NULL; 220 rec->serializer = serializer; 221 222 /* 223 * Create an emergency polling thread and set up a systimer to wake 224 * it up. 225 */ 226 if (emergency_intr_thread.td_kstack == NULL) { 227 lwkt_create(ithread_emergency, NULL, NULL, 228 &emergency_intr_thread, TDF_STOPREQ|TDF_INTTHREAD, -1, 229 "ithread emerg"); 230 systimer_init_periodic_nq(&emergency_intr_timer, 231 emergency_intr_timer_callback, &emergency_intr_thread, 232 (emergency_intr_enable ? emergency_intr_freq : 1)); 233 } 234 235 int_moveto_destcpu(&orig_cpuid, &cpuid, intr); 236 237 /* 238 * Create an interrupt thread if necessary, leave it in an unscheduled 239 * state. 240 */ 241 if (info->i_state == ISTATE_NOTHREAD) { 242 info->i_state = ISTATE_NORMAL; 243 lwkt_create((void *)ithread_handler, (void *)(intptr_t)intr, NULL, 244 &info->i_thread, TDF_STOPREQ|TDF_INTTHREAD|TDF_MPSAFE, -1, 245 "ithread %d", intr); 246 if (intr >= FIRST_SOFTINT) 247 lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM); 248 else 249 lwkt_setpri(&info->i_thread, TDPRI_INT_MED); 250 info->i_thread.td_preemptable = lwkt_preempt; 251 } 252 253 list = &info->i_reclist; 254 255 /* 256 * Keep track of how many fast and slow interrupts we have. 257 * Set i_mplock_required if any handler in the chain requires 258 * the MP lock to operate. 259 */ 260 if ((intr_flags & INTR_MPSAFE) == 0) 261 info->i_mplock_required = 1; 262 if (intr_flags & INTR_FAST) 263 ++info->i_fast; 264 else 265 ++info->i_slow; 266 267 /* 268 * Enable random number generation keying off of this interrupt. 269 */ 270 if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) { 271 info->i_random.sc_enabled = 1; 272 info->i_random.sc_intr = intr; 273 } 274 275 /* 276 * Add the record to the interrupt list. 277 */ 278 crit_enter(); 279 while (*list != NULL) 280 list = &(*list)->next; 281 *list = rec; 282 crit_exit(); 283 284 /* 285 * Update max_installed_hard_intr to make the emergency intr poll 286 * a bit more efficient. 287 */ 288 if (intr < FIRST_SOFTINT) { 289 if (max_installed_hard_intr <= intr) 290 max_installed_hard_intr = intr + 1; 291 } else { 292 if (max_installed_soft_intr <= intr) 293 max_installed_soft_intr = intr + 1; 294 } 295 296 /* 297 * Setup the machine level interrupt vector 298 */ 299 if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1) { 300 if (machintr_vector_setup(intr, intr_flags)) 301 kprintf("machintr_vector_setup: failed on irq %d\n", intr); 302 } 303 304 int_moveto_origcpu(orig_cpuid, cpuid); 305 306 return(rec); 307 } 308 309 void 310 unregister_swi(void *id) 311 { 312 unregister_int(id); 313 } 314 315 void 316 unregister_int(void *id) 317 { 318 struct intr_info *info; 319 struct intrec **list; 320 intrec_t rec; 321 int intr, orig_cpuid, cpuid; 322 323 intr = ((intrec_t)id)->intr; 324 325 if (intr < 0 || intr >= MAX_INTS) 326 panic("register_int: bad intr %d", intr); 327 328 info = &intr_info_ary[intr]; 329 330 int_moveto_destcpu(&orig_cpuid, &cpuid, intr); 331 332 /* 333 * Remove the interrupt descriptor, adjust the descriptor count, 334 * and teardown the machine level vector if this was the last interrupt. 335 */ 336 crit_enter(); 337 list = &info->i_reclist; 338 while ((rec = *list) != NULL) { 339 if (rec == id) 340 break; 341 list = &rec->next; 342 } 343 if (rec) { 344 intrec_t rec0; 345 346 *list = rec->next; 347 if (rec->intr_flags & INTR_FAST) 348 --info->i_fast; 349 else 350 --info->i_slow; 351 if (intr < FIRST_SOFTINT && info->i_fast + info->i_slow == 0) 352 machintr_vector_teardown(intr); 353 354 /* 355 * Clear i_mplock_required if no handlers in the chain require the 356 * MP lock. 357 */ 358 for (rec0 = info->i_reclist; rec0; rec0 = rec0->next) { 359 if ((rec0->intr_flags & INTR_MPSAFE) == 0) 360 break; 361 } 362 if (rec0 == NULL) 363 info->i_mplock_required = 0; 364 } 365 366 crit_exit(); 367 368 int_moveto_origcpu(orig_cpuid, cpuid); 369 370 /* 371 * Free the record. 372 */ 373 if (rec != NULL) { 374 kfree(rec->name, M_DEVBUF); 375 kfree(rec, M_DEVBUF); 376 } else { 377 kprintf("warning: unregister_int: int %d handler for %s not found\n", 378 intr, ((intrec_t)id)->name); 379 } 380 } 381 382 const char * 383 get_registered_name(int intr) 384 { 385 intrec_t rec; 386 387 if (intr < 0 || intr >= MAX_INTS) 388 panic("register_int: bad intr %d", intr); 389 390 if ((rec = intr_info_ary[intr].i_reclist) == NULL) 391 return(NULL); 392 else if (rec->next) 393 return("mux"); 394 else 395 return(rec->name); 396 } 397 398 int 399 count_registered_ints(int intr) 400 { 401 struct intr_info *info; 402 403 if (intr < 0 || intr >= MAX_INTS) 404 panic("register_int: bad intr %d", intr); 405 info = &intr_info_ary[intr]; 406 return(info->i_fast + info->i_slow); 407 } 408 409 long 410 get_interrupt_counter(int intr) 411 { 412 struct intr_info *info; 413 414 if (intr < 0 || intr >= MAX_INTS) 415 panic("register_int: bad intr %d", intr); 416 info = &intr_info_ary[intr]; 417 return(info->i_count); 418 } 419 420 421 void 422 swi_setpriority(int intr, int pri) 423 { 424 struct intr_info *info; 425 426 if (intr < FIRST_SOFTINT || intr >= MAX_INTS) 427 panic("register_swi: bad intr %d", intr); 428 info = &intr_info_ary[intr]; 429 if (info->i_state != ISTATE_NOTHREAD) 430 lwkt_setpri(&info->i_thread, pri); 431 } 432 433 void 434 register_randintr(int intr) 435 { 436 struct intr_info *info; 437 438 if (intr < 0 || intr >= MAX_INTS) 439 panic("register_randintr: bad intr %d", intr); 440 info = &intr_info_ary[intr]; 441 info->i_random.sc_intr = intr; 442 info->i_random.sc_enabled = 1; 443 } 444 445 void 446 unregister_randintr(int intr) 447 { 448 struct intr_info *info; 449 450 if (intr < 0 || intr >= MAX_INTS) 451 panic("register_swi: bad intr %d", intr); 452 info = &intr_info_ary[intr]; 453 info->i_random.sc_enabled = -1; 454 } 455 456 int 457 next_registered_randintr(int intr) 458 { 459 struct intr_info *info; 460 461 if (intr < 0 || intr >= MAX_INTS) 462 panic("register_swi: bad intr %d", intr); 463 while (intr < MAX_INTS) { 464 info = &intr_info_ary[intr]; 465 if (info->i_random.sc_enabled > 0) 466 break; 467 ++intr; 468 } 469 return(intr); 470 } 471 472 /* 473 * Dispatch an interrupt. If there's nothing to do we have a stray 474 * interrupt and can just return, leaving the interrupt masked. 475 * 476 * We need to schedule the interrupt and set its i_running bit. If 477 * we are not on the interrupt thread's cpu we have to send a message 478 * to the correct cpu that will issue the desired action (interlocking 479 * with the interrupt thread's critical section). We do NOT attempt to 480 * reschedule interrupts whos i_running bit is already set because 481 * this would prematurely wakeup a livelock-limited interrupt thread. 482 * 483 * i_running is only tested/set on the same cpu as the interrupt thread. 484 * 485 * We are NOT in a critical section, which will allow the scheduled 486 * interrupt to preempt us. The MP lock might *NOT* be held here. 487 */ 488 #ifdef SMP 489 490 static void 491 sched_ithd_remote(void *arg) 492 { 493 sched_ithd((int)(intptr_t)arg); 494 } 495 496 #endif 497 498 void 499 sched_ithd(int intr) 500 { 501 struct intr_info *info; 502 503 info = &intr_info_ary[intr]; 504 505 ++info->i_count; 506 if (info->i_state != ISTATE_NOTHREAD) { 507 if (info->i_reclist == NULL) { 508 report_stray_interrupt(intr, info); 509 } else { 510 #ifdef SMP 511 if (info->i_thread.td_gd == mycpu) { 512 if (info->i_running == 0) { 513 info->i_running = 1; 514 if (info->i_state != ISTATE_LIVELOCKED) 515 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 516 } 517 } else { 518 lwkt_send_ipiq(info->i_thread.td_gd, 519 sched_ithd_remote, (void *)(intptr_t)intr); 520 } 521 #else 522 if (info->i_running == 0) { 523 info->i_running = 1; 524 if (info->i_state != ISTATE_LIVELOCKED) 525 lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ 526 } 527 #endif 528 } 529 } else { 530 report_stray_interrupt(intr, info); 531 } 532 } 533 534 static void 535 report_stray_interrupt(int intr, struct intr_info *info) 536 { 537 ++info->i_straycount; 538 if (info->i_straycount < 10) { 539 if (info->i_errorticks == ticks) 540 return; 541 info->i_errorticks = ticks; 542 kprintf("sched_ithd: stray interrupt %d on cpu %d\n", 543 intr, mycpuid); 544 } else if (info->i_straycount == 10) { 545 kprintf("sched_ithd: %ld stray interrupts %d on cpu %d - " 546 "there will be no further reports\n", 547 info->i_straycount, intr, mycpuid); 548 } 549 } 550 551 /* 552 * This is run from a periodic SYSTIMER (and thus must be MP safe, the BGL 553 * might not be held). 554 */ 555 static void 556 ithread_livelock_wakeup(systimer_t st) 557 { 558 struct intr_info *info; 559 560 info = &intr_info_ary[(int)(intptr_t)st->data]; 561 if (info->i_state != ISTATE_NOTHREAD) 562 lwkt_schedule(&info->i_thread); 563 } 564 565 /* 566 * Schedule ithread within fast intr handler 567 * 568 * XXX Protect sched_ithd() call with gd_intr_nesting_level? 569 * Interrupts aren't enabled, but still... 570 */ 571 static __inline void 572 ithread_fast_sched(int intr, thread_t td) 573 { 574 ++td->td_nest_count; 575 576 /* 577 * We are already in critical section, exit it now to 578 * allow preemption. 579 */ 580 crit_exit_quick(td); 581 sched_ithd(intr); 582 crit_enter_quick(td); 583 584 --td->td_nest_count; 585 } 586 587 /* 588 * This function is called directly from the ICU or APIC vector code assembly 589 * to process an interrupt. The critical section and interrupt deferral 590 * checks have already been done but the function is entered WITHOUT 591 * a critical section held. The BGL may or may not be held. 592 * 593 * Must return non-zero if we do not want the vector code to re-enable 594 * the interrupt (which we don't if we have to schedule the interrupt) 595 */ 596 int ithread_fast_handler(struct intrframe *frame); 597 598 int 599 ithread_fast_handler(struct intrframe *frame) 600 { 601 int intr; 602 struct intr_info *info; 603 struct intrec **list; 604 int must_schedule; 605 #ifdef SMP 606 int got_mplock; 607 #endif 608 intrec_t rec, next_rec; 609 globaldata_t gd; 610 thread_t td; 611 612 intr = frame->if_vec; 613 gd = mycpu; 614 td = curthread; 615 616 /* We must be in critical section. */ 617 KKASSERT(td->td_pri >= TDPRI_CRIT); 618 619 info = &intr_info_ary[intr]; 620 621 /* 622 * If we are not processing any FAST interrupts, just schedule the thing. 623 */ 624 if (info->i_fast == 0) { 625 ++gd->gd_cnt.v_intr; 626 ithread_fast_sched(intr, td); 627 return(1); 628 } 629 630 /* 631 * This should not normally occur since interrupts ought to be 632 * masked if the ithread has been scheduled or is running. 633 */ 634 if (info->i_running) 635 return(1); 636 637 /* 638 * Bump the interrupt nesting level to process any FAST interrupts. 639 * Obtain the MP lock as necessary. If the MP lock cannot be obtained, 640 * schedule the interrupt thread to deal with the issue instead. 641 * 642 * To reduce overhead, just leave the MP lock held once it has been 643 * obtained. 644 */ 645 ++gd->gd_intr_nesting_level; 646 ++gd->gd_cnt.v_intr; 647 must_schedule = info->i_slow; 648 #ifdef SMP 649 got_mplock = 0; 650 #endif 651 652 list = &info->i_reclist; 653 for (rec = *list; rec; rec = next_rec) { 654 next_rec = rec->next; /* rec may be invalid after call */ 655 656 if (rec->intr_flags & INTR_FAST) { 657 #ifdef SMP 658 if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { 659 if (try_mplock() == 0) { 660 /* Couldn't get the MP lock; just schedule it. */ 661 must_schedule = 1; 662 break; 663 } 664 got_mplock = 1; 665 } 666 #endif 667 if (rec->serializer) { 668 must_schedule += lwkt_serialize_handler_try( 669 rec->serializer, rec->handler, 670 rec->argument, frame); 671 } else { 672 rec->handler(rec->argument, frame); 673 } 674 } 675 } 676 677 /* 678 * Cleanup 679 */ 680 --gd->gd_intr_nesting_level; 681 #ifdef SMP 682 if (got_mplock) 683 rel_mplock(); 684 #endif 685 686 /* 687 * If we had a problem, or mixed fast and slow interrupt handlers are 688 * registered, schedule the ithread to catch the missed records (it 689 * will just re-run all of them). A return value of 0 indicates that 690 * all handlers have been run and the interrupt can be re-enabled, and 691 * a non-zero return indicates that the interrupt thread controls 692 * re-enablement. 693 */ 694 if (must_schedule > 0) 695 ithread_fast_sched(intr, td); 696 else if (must_schedule == 0) 697 ++info->i_count; 698 return(must_schedule); 699 } 700 701 /* 702 * Interrupt threads run this as their main loop. 703 * 704 * The handler begins execution outside a critical section and with the BGL 705 * held. 706 * 707 * The i_running state starts at 0. When an interrupt occurs, the hardware 708 * interrupt is disabled and sched_ithd() The HW interrupt remains disabled 709 * until all routines have run. We then call ithread_done() to reenable 710 * the HW interrupt and deschedule us until the next interrupt. 711 * 712 * We are responsible for atomically checking i_running and ithread_done() 713 * is responsible for atomically checking for platform-specific delayed 714 * interrupts. i_running for our irq is only set in the context of our cpu, 715 * so a critical section is a sufficient interlock. 716 */ 717 #define LIVELOCK_TIMEFRAME(freq) ((freq) >> 2) /* 1/4 second */ 718 719 static void 720 ithread_handler(void *arg) 721 { 722 struct intr_info *info; 723 int use_limit; 724 __uint32_t lseconds; 725 int intr; 726 int mpheld; 727 struct intrec **list; 728 intrec_t rec, nrec; 729 globaldata_t gd; 730 struct systimer ill_timer; /* enforced freq. timer */ 731 u_int ill_count; /* interrupt livelock counter */ 732 733 ill_count = 0; 734 intr = (int)(intptr_t)arg; 735 info = &intr_info_ary[intr]; 736 list = &info->i_reclist; 737 738 /* 739 * The loop must be entered with one critical section held. The thread 740 * is created with TDF_MPSAFE so the MP lock is not held on start. 741 */ 742 gd = mycpu; 743 lseconds = gd->gd_time_seconds; 744 crit_enter_gd(gd); 745 mpheld = 0; 746 747 for (;;) { 748 /* 749 * The chain is only considered MPSAFE if all its interrupt handlers 750 * are MPSAFE. However, if intr_mpsafe has been turned off we 751 * always operate with the BGL. 752 */ 753 #ifdef SMP 754 if (intr_mpsafe == 0) { 755 if (mpheld == 0) { 756 intr_get_mplock(); 757 mpheld = 1; 758 } 759 } else if (info->i_mplock_required != mpheld) { 760 if (info->i_mplock_required) { 761 KKASSERT(mpheld == 0); 762 intr_get_mplock(); 763 mpheld = 1; 764 } else { 765 KKASSERT(mpheld != 0); 766 rel_mplock(); 767 mpheld = 0; 768 } 769 } 770 771 /* 772 * scheduled cpu may have changed, see intr_get_mplock() 773 */ 774 gd = mycpu; 775 #endif 776 777 /* 778 * If an interrupt is pending, clear i_running and execute the 779 * handlers. Note that certain types of interrupts can re-trigger 780 * and set i_running again. 781 * 782 * Each handler is run in a critical section. Note that we run both 783 * FAST and SLOW designated service routines. 784 */ 785 if (info->i_running) { 786 ++ill_count; 787 info->i_running = 0; 788 789 if (*list == NULL) 790 report_stray_interrupt(intr, info); 791 792 for (rec = *list; rec; rec = nrec) { 793 nrec = rec->next; 794 if (rec->serializer) { 795 lwkt_serialize_handler_call(rec->serializer, rec->handler, 796 rec->argument, NULL); 797 } else { 798 rec->handler(rec->argument, NULL); 799 } 800 } 801 } 802 803 /* 804 * This is our interrupt hook to add rate randomness to the random 805 * number generator. 806 */ 807 if (info->i_random.sc_enabled > 0) 808 add_interrupt_randomness(intr); 809 810 /* 811 * Unmask the interrupt to allow it to trigger again. This only 812 * applies to certain types of interrupts (typ level interrupts). 813 * This can result in the interrupt retriggering, but the retrigger 814 * will not be processed until we cycle our critical section. 815 * 816 * Only unmask interrupts while handlers are installed. It is 817 * possible to hit a situation where no handlers are installed 818 * due to a device driver livelocking and then tearing down its 819 * interrupt on close (the parallel bus being a good example). 820 */ 821 if (*list) 822 machintr_intren(intr); 823 824 /* 825 * Do a quick exit/enter to catch any higher-priority interrupt 826 * sources, such as the statclock, so thread time accounting 827 * will still work. This may also cause an interrupt to re-trigger. 828 */ 829 crit_exit_gd(gd); 830 crit_enter_gd(gd); 831 832 /* 833 * LIVELOCK STATE MACHINE 834 */ 835 switch(info->i_state) { 836 case ISTATE_NORMAL: 837 /* 838 * Reset the count each second. 839 */ 840 if (lseconds != gd->gd_time_seconds) { 841 lseconds = gd->gd_time_seconds; 842 ill_count = 0; 843 } 844 845 /* 846 * If we did not exceed the frequency limit, we are done. 847 * If the interrupt has not retriggered we deschedule ourselves. 848 */ 849 if (ill_count <= livelock_limit) { 850 if (info->i_running == 0) { 851 #ifdef SMP 852 if (mpheld && intr_migrate) { 853 rel_mplock(); 854 mpheld = 0; 855 } 856 #endif 857 lwkt_deschedule_self(gd->gd_curthread); 858 lwkt_switch(); 859 } 860 break; 861 } 862 863 /* 864 * Otherwise we are livelocked. Set up a periodic systimer 865 * to wake the thread up at the limit frequency. 866 */ 867 kprintf("intr %d at %d/%d hz, livelocked limit engaged!\n", 868 intr, ill_count, livelock_limit); 869 info->i_state = ISTATE_LIVELOCKED; 870 if ((use_limit = livelock_limit) < 100) 871 use_limit = 100; 872 else if (use_limit > 500000) 873 use_limit = 500000; 874 systimer_init_periodic_nq(&ill_timer, ithread_livelock_wakeup, 875 (void *)(intptr_t)intr, use_limit); 876 /* fall through */ 877 case ISTATE_LIVELOCKED: 878 /* 879 * Wait for our periodic timer to go off. Since the interrupt 880 * has re-armed it can still set i_running, but it will not 881 * reschedule us while we are in a livelocked state. 882 */ 883 lwkt_deschedule_self(gd->gd_curthread); 884 lwkt_switch(); 885 886 /* 887 * Check once a second to see if the livelock condition no 888 * longer applies. 889 */ 890 if (lseconds != gd->gd_time_seconds) { 891 lseconds = gd->gd_time_seconds; 892 if (ill_count < livelock_lowater) { 893 info->i_state = ISTATE_NORMAL; 894 systimer_del(&ill_timer); 895 kprintf("intr %d at %d/%d hz, livelock removed\n", 896 intr, ill_count, livelock_lowater); 897 } else if (livelock_debug == intr || 898 (bootverbose && cold)) { 899 kprintf("intr %d at %d/%d hz, in livelock\n", 900 intr, ill_count, livelock_lowater); 901 } 902 ill_count = 0; 903 } 904 break; 905 } 906 } 907 /* not reached */ 908 } 909 910 #ifdef SMP 911 912 /* 913 * An interrupt thread is trying to get the MP lock. To avoid cpu-bound 914 * code in the kernel on cpu X from interfering we chase the MP lock. 915 */ 916 static void 917 intr_get_mplock(void) 918 { 919 int owner; 920 921 if (intr_migrate == 0) { 922 get_mplock(); 923 return; 924 } 925 while (try_mplock() == 0) { 926 owner = owner_mplock(); 927 if (owner >= 0 && owner != mycpu->gd_cpuid) { 928 lwkt_migratecpu(owner); 929 ++intr_migrate_count; 930 } else { 931 lwkt_switch(); 932 } 933 } 934 } 935 936 #endif 937 938 /* 939 * Emergency interrupt polling thread. The thread begins execution 940 * outside a critical section with the BGL held. 941 * 942 * If emergency interrupt polling is enabled, this thread will 943 * execute all system interrupts not marked INTR_NOPOLL at the 944 * specified polling frequency. 945 * 946 * WARNING! This thread runs *ALL* interrupt service routines that 947 * are not marked INTR_NOPOLL, which basically means everything except 948 * the 8254 clock interrupt and the ATA interrupt. It has very high 949 * overhead and should only be used in situations where the machine 950 * cannot otherwise be made to work. Due to the severe performance 951 * degredation, it should not be enabled on production machines. 952 */ 953 static void 954 ithread_emergency(void *arg __unused) 955 { 956 struct intr_info *info; 957 intrec_t rec, nrec; 958 int intr; 959 960 for (;;) { 961 for (intr = 0; intr < max_installed_hard_intr; ++intr) { 962 info = &intr_info_ary[intr]; 963 for (rec = info->i_reclist; rec; rec = nrec) { 964 if ((rec->intr_flags & INTR_NOPOLL) == 0) { 965 if (rec->serializer) { 966 lwkt_serialize_handler_call(rec->serializer, 967 rec->handler, rec->argument, NULL); 968 } else { 969 rec->handler(rec->argument, NULL); 970 } 971 } 972 nrec = rec->next; 973 } 974 } 975 lwkt_deschedule_self(curthread); 976 lwkt_switch(); 977 } 978 } 979 980 /* 981 * Systimer callback - schedule the emergency interrupt poll thread 982 * if emergency polling is enabled. 983 */ 984 static 985 void 986 emergency_intr_timer_callback(systimer_t info, struct intrframe *frame __unused) 987 { 988 if (emergency_intr_enable) 989 lwkt_schedule(info->data); 990 } 991 992 int 993 ithread_cpuid(int intr) 994 { 995 const struct intr_info *info; 996 997 KKASSERT(intr >= 0 && intr < MAX_INTS); 998 info = &intr_info_ary[intr]; 999 1000 if (info->i_state == ISTATE_NOTHREAD) 1001 return -1; 1002 return info->i_thread.td_gd->gd_cpuid; 1003 } 1004 1005 /* 1006 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1007 * The data for this machine dependent, and the declarations are in machine 1008 * dependent code. The layout of intrnames and intrcnt however is machine 1009 * independent. 1010 * 1011 * We do not know the length of intrcnt and intrnames at compile time, so 1012 * calculate things at run time. 1013 */ 1014 1015 static int 1016 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1017 { 1018 struct intr_info *info; 1019 intrec_t rec; 1020 int error = 0; 1021 int len; 1022 int intr; 1023 char buf[64]; 1024 1025 for (intr = 0; error == 0 && intr < MAX_INTS; ++intr) { 1026 info = &intr_info_ary[intr]; 1027 1028 len = 0; 1029 buf[0] = 0; 1030 for (rec = info->i_reclist; rec; rec = rec->next) { 1031 ksnprintf(buf + len, sizeof(buf) - len, "%s%s", 1032 (len ? "/" : ""), rec->name); 1033 len += strlen(buf + len); 1034 } 1035 if (len == 0) { 1036 ksnprintf(buf, sizeof(buf), "irq%d", intr); 1037 len = strlen(buf); 1038 } 1039 error = SYSCTL_OUT(req, buf, len + 1); 1040 } 1041 return (error); 1042 } 1043 1044 1045 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1046 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1047 1048 static int 1049 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1050 { 1051 struct intr_info *info; 1052 int error = 0; 1053 int intr; 1054 1055 for (intr = 0; intr < max_installed_hard_intr; ++intr) { 1056 info = &intr_info_ary[intr]; 1057 1058 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1059 if (error) 1060 goto failed; 1061 } 1062 for (intr = FIRST_SOFTINT; intr < max_installed_soft_intr; ++intr) { 1063 info = &intr_info_ary[intr]; 1064 1065 error = SYSCTL_OUT(req, &info->i_count, sizeof(info->i_count)); 1066 if (error) 1067 goto failed; 1068 } 1069 failed: 1070 return(error); 1071 } 1072 1073 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1074 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1075 1076 static void 1077 int_moveto_destcpu(int *orig_cpuid0, int *cpuid0, int intr) 1078 { 1079 int orig_cpuid = mycpuid, cpuid; 1080 char envpath[32]; 1081 1082 cpuid = orig_cpuid; 1083 ksnprintf(envpath, sizeof(envpath), "hw.irq.%d.dest", intr); 1084 kgetenv_int(envpath, &cpuid); 1085 if (cpuid >= ncpus) 1086 cpuid = orig_cpuid; 1087 1088 if (cpuid != orig_cpuid) 1089 lwkt_migratecpu(cpuid); 1090 1091 *orig_cpuid0 = orig_cpuid; 1092 *cpuid0 = cpuid; 1093 } 1094 1095 static void 1096 int_moveto_origcpu(int orig_cpuid, int cpuid) 1097 { 1098 if (cpuid != orig_cpuid) 1099 lwkt_migratecpu(orig_cpuid); 1100 } 1101