1 /* $NetBSD: kern_softint.c,v 1.8 2007/12/11 19:07:28 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Generic software interrupt framework. 41 * 42 * Overview 43 * 44 * The soft interrupt framework provides a mechanism to schedule a 45 * low priority callback that runs with thread context. It allows 46 * for dynamic registration of software interrupts, and for fair 47 * queueing and prioritization of those interrupts. The callbacks 48 * can be scheduled to run from nearly any point in the kernel: by 49 * code running with thread context, by code running from a 50 * hardware interrupt handler, and at any interrupt priority 51 * level. 52 * 53 * Priority levels 54 * 55 * Since soft interrupt dispatch can be tied to the underlying 56 * architecture's interrupt dispatch code, it can be limited 57 * both by the capabilities of the hardware and the capabilities 58 * of the interrupt dispatch code itself. The number of priority 59 * levels is restricted to four. In order of priority (lowest to 60 * highest) the levels are: clock, bio, net, serial. 61 * 62 * The names are symbolic and in isolation do not have any direct 63 * connection with a particular kind of device activity: they are 64 * only meant as a guide. 65 * 66 * The four priority levels map directly to scheduler priority 67 * levels, and where the architecture implements 'fast' software 68 * interrupts, they also map onto interrupt priorities. The 69 * interrupt priorities are intended to be hidden from machine 70 * independent code, which should use thread-safe mechanisms to 71 * synchronize with software interrupts (for example: mutexes). 72 * 73 * Capabilities 74 * 75 * Software interrupts run with limited machine context. In 76 * particular, they do not posess any address space context. They 77 * should not try to operate on user space addresses, or to use 78 * virtual memory facilities other than those noted as interrupt 79 * safe. 80 * 81 * Unlike hardware interrupts, software interrupts do have thread 82 * context. They may block on synchronization objects, sleep, and 83 * resume execution at a later time. 84 * 85 * Since software interrupts are a limited resource and run with 86 * higher priority than most other LWPs in the system, all 87 * block-and-resume activity by a software interrupt must be kept 88 * short to allow futher processing at that level to continue. By 89 * extension, code running with process context must take care to 90 * ensure that any lock that may be taken from a software interrupt 91 * can not be held for more than a short period of time. 92 * 93 * The kernel does not allow software interrupts to use facilities 94 * or perform actions that may block for a significant amount of 95 * time. This means that it's not valid for a software interrupt 96 * to: sleep on condition variables, use the lockmgr() facility, 97 * or wait for resources to become available (for example, 98 * memory). 99 * 100 * Per-CPU operation 101 * 102 * If a soft interrupt is triggered on a CPU, it can only be 103 * dispatched on the same CPU. Each LWP dedicated to handling a 104 * soft interrupt is bound to its home CPU, so if the LWP blocks 105 * and needs to run again, it can only run there. Nearly all data 106 * structures used to manage software interrupts are per-CPU. 107 * 108 * The per-CPU requirement is intended to reduce "ping-pong" of 109 * cache lines between CPUs: lines occupied by data structures 110 * used to manage the soft interrupts, and lines occupied by data 111 * items being passed down to the soft interrupt. As a positive 112 * side effect, this also means that the soft interrupt dispatch 113 * code does not need to to use spinlocks to synchronize. 114 * 115 * Generic implementation 116 * 117 * A generic, low performance implementation is provided that 118 * works across all architectures, with no machine-dependent 119 * modifications needed. This implementation uses the scheduler, 120 * and so has a number of restrictions: 121 * 122 * 1) The software interrupts are not currently preemptive, so 123 * must wait for the currently executing LWP to yield the CPU. 124 * This can introduce latency. 125 * 126 * 2) An expensive context switch is required for a software 127 * interrupt to be handled. 128 * 129 * 'Fast' software interrupts 130 * 131 * If an architectures defines __HAVE_FAST_SOFTINTS, it implements 132 * the fast mechanism. Threads running either in the kernel or in 133 * userspace will be interrupted, but will not be preempted. When 134 * the soft interrupt completes execution, the interrupted LWP 135 * is resumed. Interrupt dispatch code must provide the minimum 136 * level of context necessary for the soft interrupt to block and 137 * be resumed at a later time. The machine-dependent dispatch 138 * path looks something like the following: 139 * 140 * softintr() 141 * { 142 * go to IPL_HIGH if necessary for switch; 143 * save any necessary registers in a format that can be 144 * restored by cpu_switchto if the softint blocks; 145 * arrange for cpu_switchto() to restore into the 146 * trampoline function; 147 * identify LWP to handle this interrupt; 148 * switch to the LWP's stack; 149 * switch register stacks, if necessary; 150 * assign new value of curlwp; 151 * call MI softint_dispatch, passing old curlwp and IPL 152 * to execute interrupt at; 153 * switch back to old stack; 154 * switch back to old register stack, if necessary; 155 * restore curlwp; 156 * return to interrupted LWP; 157 * } 158 * 159 * If the soft interrupt blocks, a trampoline function is returned 160 * to in the context of the interrupted LWP, as arranged for by 161 * softint(): 162 * 163 * softint_ret() 164 * { 165 * unlock soft interrupt LWP; 166 * resume interrupt processing, likely returning to 167 * interrupted LWP or dispatching another, different 168 * interrupt; 169 * } 170 * 171 * Once the soft interrupt has fired (and even if it has blocked), 172 * no further soft interrupts at that level will be triggered by 173 * MI code until the soft interrupt handler has ceased execution. 174 * If a soft interrupt handler blocks and is resumed, it resumes 175 * execution as a normal LWP (kthread) and gains VM context. Only 176 * when it has completed and is ready to fire again will it 177 * interrupt other threads. 178 * 179 * Future directions 180 * 181 * Provide a cheap way to direct software interrupts to remote 182 * CPUs. Provide a way to enqueue work items into the handler 183 * record, removing additional spl calls (see subr_workqueue.c). 184 */ 185 186 #include <sys/cdefs.h> 187 __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.8 2007/12/11 19:07:28 ad Exp $"); 188 189 #include <sys/param.h> 190 #include <sys/malloc.h> 191 #include <sys/proc.h> 192 #include <sys/intr.h> 193 #include <sys/mutex.h> 194 #include <sys/kthread.h> 195 #include <sys/evcnt.h> 196 #include <sys/cpu.h> 197 198 #include <net/netisr.h> 199 200 #include <uvm/uvm_extern.h> 201 202 /* This could overlap with signal info in struct lwp. */ 203 typedef struct softint { 204 SIMPLEQ_HEAD(, softhand) si_q; 205 struct lwp *si_lwp; 206 struct cpu_info *si_cpu; 207 uintptr_t si_machdep; 208 struct evcnt si_evcnt; 209 struct evcnt si_evcnt_block; 210 int si_active; 211 char si_name[8]; 212 char si_name_block[8+6]; 213 } softint_t; 214 215 typedef struct softhand { 216 SIMPLEQ_ENTRY(softhand) sh_q; 217 void (*sh_func)(void *); 218 void *sh_arg; 219 softint_t *sh_isr; 220 u_int sh_pending; 221 u_int sh_flags; 222 } softhand_t; 223 224 typedef struct softcpu { 225 struct cpu_info *sc_cpu; 226 softint_t sc_int[SOFTINT_COUNT]; 227 softhand_t sc_hand[1]; 228 } softcpu_t; 229 230 static void softint_thread(void *); 231 232 u_int softint_bytes = 8192; 233 u_int softint_timing; 234 static u_int softint_max; 235 static kmutex_t softint_lock; 236 static void *softint_netisrs[32]; 237 238 /* 239 * softint_init_isr: 240 * 241 * Initialize a single interrupt level for a single CPU. 242 */ 243 static void 244 softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level) 245 { 246 struct cpu_info *ci; 247 softint_t *si; 248 int error; 249 250 si = &sc->sc_int[level]; 251 ci = sc->sc_cpu; 252 si->si_cpu = ci; 253 254 SIMPLEQ_INIT(&si->si_q); 255 256 error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR | 257 KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp, 258 "soft%s/%d", desc, (int)ci->ci_cpuid); 259 if (error != 0) 260 panic("softint_init_isr: error %d", error); 261 262 snprintf(si->si_name, sizeof(si->si_name), "%s/%d", desc, 263 (int)ci->ci_cpuid); 264 evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_INTR, NULL, 265 "softint", si->si_name); 266 snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%d", 267 desc, (int)ci->ci_cpuid); 268 evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_INTR, NULL, 269 "softint", si->si_name_block); 270 271 si->si_lwp->l_private = si; 272 softint_init_md(si->si_lwp, level, &si->si_machdep); 273 } 274 /* 275 * softint_init: 276 * 277 * Initialize per-CPU data structures. Called from mi_cpu_attach(). 278 */ 279 void 280 softint_init(struct cpu_info *ci) 281 { 282 static struct cpu_info *first; 283 softcpu_t *sc, *scfirst; 284 softhand_t *sh, *shmax; 285 286 if (first == NULL) { 287 /* Boot CPU. */ 288 first = ci; 289 mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE); 290 softint_bytes = round_page(softint_bytes); 291 softint_max = (softint_bytes - sizeof(softcpu_t)) / 292 sizeof(softhand_t); 293 } 294 295 sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0, 296 UVM_KMF_WIRED | UVM_KMF_ZERO); 297 if (sc == NULL) 298 panic("softint_init_cpu: cannot allocate memory"); 299 300 ci->ci_data.cpu_softcpu = sc; 301 ci->ci_data.cpu_softints = 0; 302 sc->sc_cpu = ci; 303 304 softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET); 305 softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO); 306 softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK); 307 softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL); 308 309 if (first != ci) { 310 mutex_enter(&softint_lock); 311 scfirst = first->ci_data.cpu_softcpu; 312 sh = sc->sc_hand; 313 memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max); 314 /* Update pointers for this CPU. */ 315 for (shmax = sh + softint_max; sh < shmax; sh++) { 316 if (sh->sh_func == NULL) 317 continue; 318 sh->sh_isr = 319 &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK]; 320 } 321 mutex_exit(&softint_lock); 322 } else { 323 /* 324 * Establish handlers for legacy net interrupts. 325 * XXX Needs to go away. 326 */ 327 #define DONETISR(n, f) \ 328 softint_netisrs[(n)] = \ 329 softint_establish(SOFTINT_NET, (void (*)(void *))(f), NULL) 330 #include <net/netisr_dispatch.h> 331 } 332 } 333 334 /* 335 * softint_establish: 336 * 337 * Register a software interrupt handler. 338 */ 339 void * 340 softint_establish(u_int flags, void (*func)(void *), void *arg) 341 { 342 CPU_INFO_ITERATOR cii; 343 struct cpu_info *ci; 344 softcpu_t *sc; 345 softhand_t *sh; 346 u_int level, index; 347 348 level = (flags & SOFTINT_LVLMASK); 349 KASSERT(level < SOFTINT_COUNT); 350 351 mutex_enter(&softint_lock); 352 353 /* Find a free slot. */ 354 sc = curcpu()->ci_data.cpu_softcpu; 355 for (index = 1; index < softint_max; index++) 356 if (sc->sc_hand[index].sh_func == NULL) 357 break; 358 if (index == softint_max) { 359 mutex_exit(&softint_lock); 360 printf("WARNING: softint_establish: table full, " 361 "increase softint_bytes\n"); 362 return NULL; 363 } 364 365 /* Set up the handler on each CPU. */ 366 if (ncpu < 2) { 367 /* XXX hack for machines with no CPU_INFO_FOREACH() early on */ 368 sc = curcpu()->ci_data.cpu_softcpu; 369 sh = &sc->sc_hand[index]; 370 sh->sh_isr = &sc->sc_int[level]; 371 sh->sh_func = func; 372 sh->sh_arg = arg; 373 sh->sh_flags = flags; 374 sh->sh_pending = 0; 375 } else for (CPU_INFO_FOREACH(cii, ci)) { 376 sc = ci->ci_data.cpu_softcpu; 377 sh = &sc->sc_hand[index]; 378 sh->sh_isr = &sc->sc_int[level]; 379 sh->sh_func = func; 380 sh->sh_arg = arg; 381 sh->sh_flags = flags; 382 sh->sh_pending = 0; 383 } 384 385 mutex_exit(&softint_lock); 386 387 return (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc); 388 } 389 390 /* 391 * softint_disestablish: 392 * 393 * Unregister a software interrupt handler. 394 */ 395 void 396 softint_disestablish(void *arg) 397 { 398 CPU_INFO_ITERATOR cii; 399 struct cpu_info *ci; 400 softcpu_t *sc; 401 softhand_t *sh; 402 uintptr_t offset; 403 404 offset = (uintptr_t)arg; 405 KASSERT(offset != 0 && offset < softint_bytes); 406 407 mutex_enter(&softint_lock); 408 409 /* Clear the handler on each CPU. */ 410 for (CPU_INFO_FOREACH(cii, ci)) { 411 sc = ci->ci_data.cpu_softcpu; 412 sh = (softhand_t *)((uint8_t *)sc + offset); 413 KASSERT(sh->sh_func != NULL); 414 KASSERT(sh->sh_pending == 0); 415 sh->sh_func = NULL; 416 } 417 418 mutex_exit(&softint_lock); 419 } 420 421 /* 422 * softint_schedule: 423 * 424 * Trigger a software interrupt. Must be called from a hardware 425 * interrupt handler, or with preemption disabled (since we are 426 * using the value of curcpu()). 427 */ 428 void 429 softint_schedule(void *arg) 430 { 431 softhand_t *sh; 432 softint_t *si; 433 uintptr_t offset; 434 int s; 435 436 /* Find the handler record for this CPU. */ 437 offset = (uintptr_t)arg; 438 KASSERT(offset != 0 && offset < softint_bytes); 439 sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset); 440 441 /* If it's already pending there's nothing to do. */ 442 if (sh->sh_pending) 443 return; 444 445 /* 446 * Enqueue the handler into the LWP's pending list. 447 * If the LWP is completely idle, then make it run. 448 */ 449 s = splhigh(); 450 if (!sh->sh_pending) { 451 si = sh->sh_isr; 452 sh->sh_pending = 1; 453 SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q); 454 if (si->si_active == 0) { 455 si->si_active = 1; 456 softint_trigger(si->si_machdep); 457 } 458 } 459 splx(s); 460 } 461 462 /* 463 * softint_execute: 464 * 465 * Invoke handlers for the specified soft interrupt. 466 * Must be entered at splhigh. Will drop the priority 467 * to the level specified, but returns back at splhigh. 468 */ 469 static inline void 470 softint_execute(softint_t *si, lwp_t *l, int s) 471 { 472 softhand_t *sh; 473 bool havelock; 474 475 #ifdef __HAVE_FAST_SOFTINTS 476 KASSERT(si->si_lwp == curlwp); 477 #else 478 /* May be running in user context. */ 479 #endif 480 KASSERT(si->si_cpu == curcpu()); 481 KASSERT(si->si_lwp->l_wchan == NULL); 482 KASSERT(si->si_active); 483 484 havelock = false; 485 486 /* 487 * Note: due to priority inheritance we may have interrupted a 488 * higher priority LWP. Since the soft interrupt must be quick 489 * and is non-preemptable, we don't bother yielding. 490 */ 491 492 while (!SIMPLEQ_EMPTY(&si->si_q)) { 493 /* 494 * Pick the longest waiting handler to run. We block 495 * interrupts but do not lock in order to do this, as 496 * we are protecting against the local CPU only. 497 */ 498 sh = SIMPLEQ_FIRST(&si->si_q); 499 SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q); 500 sh->sh_pending = 0; 501 splx(s); 502 503 /* Run the handler. */ 504 if ((sh->sh_flags & SOFTINT_MPSAFE) == 0 && !havelock) { 505 KERNEL_LOCK(1, l); 506 havelock = true; 507 } 508 (*sh->sh_func)(sh->sh_arg); 509 510 (void)splhigh(); 511 } 512 513 if (havelock) { 514 KERNEL_UNLOCK_ONE(l); 515 } 516 517 /* 518 * Unlocked, but only for statistics. 519 * Should be per-CPU to prevent cache ping-pong. 520 */ 521 uvmexp.softs++; 522 523 si->si_evcnt.ev_count++; 524 si->si_active = 0; 525 } 526 527 /* 528 * softint_block: 529 * 530 * Update statistics when the soft interrupt blocks. 531 */ 532 void 533 softint_block(lwp_t *l) 534 { 535 softint_t *si = l->l_private; 536 537 KASSERT((l->l_pflag & LP_INTR) != 0); 538 si->si_evcnt_block.ev_count++; 539 } 540 541 /* 542 * schednetisr: 543 * 544 * Trigger a legacy network interrupt. XXX Needs to go away. 545 */ 546 void 547 schednetisr(int isr) 548 { 549 550 softint_schedule(softint_netisrs[isr]); 551 } 552 553 #ifndef __HAVE_FAST_SOFTINTS 554 555 /* 556 * softint_init_md: 557 * 558 * Slow path: perform machine-dependent initialization. 559 */ 560 void 561 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep) 562 { 563 softint_t *si; 564 565 *machdep = (1 << level); 566 si = l->l_private; 567 568 lwp_lock(l); 569 lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex); 570 lwp_lock(l); 571 /* Cheat and make the KASSERT in softint_thread() happy. */ 572 si->si_active = 1; 573 l->l_stat = LSRUN; 574 sched_enqueue(l, false); 575 lwp_unlock(l); 576 } 577 578 /* 579 * softint_trigger: 580 * 581 * Slow path: cause a soft interrupt handler to begin executing. 582 * Called at IPL_HIGH. 583 */ 584 void 585 softint_trigger(uintptr_t machdep) 586 { 587 struct cpu_info *ci; 588 lwp_t *l; 589 590 l = curlwp; 591 ci = l->l_cpu; 592 ci->ci_data.cpu_softints |= machdep; 593 if (l == ci->ci_data.cpu_idlelwp) { 594 cpu_need_resched(ci, 0); 595 } else { 596 /* MI equivalent of aston() */ 597 cpu_signotify(l); 598 } 599 } 600 601 /* 602 * softint_thread: 603 * 604 * Slow path: MI software interrupt dispatch. 605 */ 606 void 607 softint_thread(void *cookie) 608 { 609 softint_t *si; 610 lwp_t *l; 611 int s; 612 613 l = curlwp; 614 si = l->l_private; 615 616 for (;;) { 617 /* 618 * Clear pending status and run it. We must drop the 619 * spl before mi_switch(), since IPL_HIGH may be higher 620 * than IPL_SCHED (and it is not safe to switch at a 621 * higher level). 622 */ 623 s = splhigh(); 624 l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep; 625 softint_execute(si, l, s); 626 splx(s); 627 628 lwp_lock(l); 629 l->l_stat = LSIDL; 630 mi_switch(l); 631 } 632 } 633 634 /* 635 * softint_picklwp: 636 * 637 * Slow path: called from mi_switch() to pick the highest priority 638 * soft interrupt LWP that needs to run. 639 */ 640 lwp_t * 641 softint_picklwp(void) 642 { 643 struct cpu_info *ci; 644 u_int mask; 645 softint_t *si; 646 lwp_t *l; 647 648 ci = curcpu(); 649 si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int; 650 mask = ci->ci_data.cpu_softints; 651 652 if ((mask & (1 << SOFTINT_SERIAL)) != 0) { 653 l = si[SOFTINT_SERIAL].si_lwp; 654 } else if ((mask & (1 << SOFTINT_NET)) != 0) { 655 l = si[SOFTINT_NET].si_lwp; 656 } else if ((mask & (1 << SOFTINT_BIO)) != 0) { 657 l = si[SOFTINT_BIO].si_lwp; 658 } else if ((mask & (1 << SOFTINT_CLOCK)) != 0) { 659 l = si[SOFTINT_CLOCK].si_lwp; 660 } else { 661 panic("softint_picklwp"); 662 } 663 664 return l; 665 } 666 667 /* 668 * softint_overlay: 669 * 670 * Slow path: called from lwp_userret() to run a soft interrupt 671 * within the context of a user thread. 672 */ 673 void 674 softint_overlay(void) 675 { 676 struct cpu_info *ci; 677 u_int softints; 678 softint_t *si; 679 pri_t obase; 680 lwp_t *l; 681 int s; 682 683 l = curlwp; 684 ci = l->l_cpu; 685 si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int; 686 687 KASSERT((l->l_pflag & LP_INTR) == 0); 688 689 /* Arrange to elevate priority if the LWP blocks. */ 690 obase = l->l_kpribase; 691 l->l_kpribase = PRI_KERNEL_RT; 692 l->l_pflag |= LP_INTR; 693 s = splhigh(); 694 while ((softints = ci->ci_data.cpu_softints) != 0) { 695 if ((softints & (1 << SOFTINT_SERIAL)) != 0) { 696 ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL); 697 softint_execute(&si[SOFTINT_SERIAL], l, s); 698 continue; 699 } 700 if ((softints & (1 << SOFTINT_NET)) != 0) { 701 ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET); 702 softint_execute(&si[SOFTINT_NET], l, s); 703 continue; 704 } 705 if ((softints & (1 << SOFTINT_BIO)) != 0) { 706 ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO); 707 softint_execute(&si[SOFTINT_BIO], l, s); 708 continue; 709 } 710 if ((softints & (1 << SOFTINT_CLOCK)) != 0) { 711 ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK); 712 softint_execute(&si[SOFTINT_CLOCK], l, s); 713 continue; 714 } 715 } 716 splx(s); 717 l->l_pflag &= ~LP_INTR; 718 l->l_kpribase = obase; 719 } 720 721 #else /* !__HAVE_FAST_SOFTINTS */ 722 723 /* 724 * softint_thread: 725 * 726 * Fast path: the LWP is switched to without restoring any state, 727 * so we should not arrive here - there is a direct handoff between 728 * the interrupt stub and softint_dispatch(). 729 */ 730 void 731 softint_thread(void *cookie) 732 { 733 734 panic("softint_thread"); 735 } 736 737 /* 738 * softint_dispatch: 739 * 740 * Fast path: entry point from machine-dependent code. 741 */ 742 void 743 softint_dispatch(lwp_t *pinned, int s) 744 { 745 struct timeval now; 746 softint_t *si; 747 u_int timing; 748 lwp_t *l; 749 750 l = curlwp; 751 si = l->l_private; 752 753 /* 754 * Note the interrupted LWP, and mark the current LWP as running 755 * before proceeding. Although this must as a rule be done with 756 * the LWP locked, at this point no external agents will want to 757 * modify the interrupt LWP's state. 758 */ 759 timing = (softint_timing ? LW_TIMEINTR : 0); 760 l->l_switchto = pinned; 761 l->l_stat = LSONPROC; 762 l->l_flag |= (LW_RUNNING | timing); 763 764 /* 765 * Dispatch the interrupt. If softints are being timed, charge 766 * for it. 767 */ 768 if (timing) 769 microtime(&l->l_stime); 770 softint_execute(si, l, s); 771 if (timing) { 772 microtime(&now); 773 updatertime(l, &now); 774 l->l_flag &= ~LW_TIMEINTR; 775 } 776 777 /* 778 * If we blocked while handling the interrupt, the pinned LWP is 779 * gone so switch to the idle LWP. It will select a new LWP to 780 * run. 781 * 782 * We must drop the priority level as switching at IPL_HIGH could 783 * deadlock the system. We have already set si->si_active = 0, 784 * which means another interrupt at this level can be triggered. 785 * That's not be a problem: we are lowering to level 's' which will 786 * prevent softint_dispatch() from being reentered at level 's', 787 * until the priority is finally dropped to IPL_NONE on entry to 788 * the idle loop. 789 */ 790 l->l_stat = LSIDL; 791 if (l->l_switchto == NULL) { 792 splx(s); 793 pmap_deactivate(l); 794 lwp_exit_switchaway(l); 795 /* NOTREACHED */ 796 } 797 l->l_switchto = NULL; 798 l->l_flag &= ~LW_RUNNING; 799 } 800 801 #endif /* !__HAVE_FAST_SOFTINTS */ 802