1 /* 2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * Each cpu in a system has its own self-contained light weight kernel 37 * thread scheduler, which means that generally speaking we only need 38 * to use a critical section to avoid problems. Foreign thread 39 * scheduling is queued via (async) IPIs. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/kinfo.h> 48 #include <sys/malloc.h> 49 #include <sys/queue.h> 50 #include <sys/sysctl.h> 51 #include <sys/kthread.h> 52 #include <machine/cpu.h> 53 #include <sys/lock.h> 54 #include <sys/spinlock.h> 55 #include <sys/ktr.h> 56 #include <sys/indefinite.h> 57 58 #include <sys/thread2.h> 59 #include <sys/spinlock2.h> 60 #include <sys/indefinite2.h> 61 62 #include <sys/dsched.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_pager.h> 71 #include <vm/vm_extern.h> 72 73 #include <machine/stdarg.h> 74 #include <machine/smp.h> 75 #include <machine/clock.h> 76 77 #ifdef _KERNEL_VIRTUAL 78 #include <pthread.h> 79 #endif 80 81 #define LOOPMASK 82 83 #if !defined(KTR_CTXSW) 84 #define KTR_CTXSW KTR_ALL 85 #endif 86 KTR_INFO_MASTER(ctxsw); 87 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td); 88 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td); 89 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm); 90 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td); 91 92 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); 93 94 #ifdef INVARIANTS 95 static int panic_on_cscount = 0; 96 #endif 97 #ifdef DEBUG_LWKT_THREAD 98 static int64_t switch_count = 0; 99 static int64_t preempt_hit = 0; 100 static int64_t preempt_miss = 0; 101 static int64_t preempt_weird = 0; 102 #endif 103 static int lwkt_use_spin_port; 104 __read_mostly static struct objcache *thread_cache; 105 int cpu_mwait_spin = 0; 106 107 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); 108 static void lwkt_setcpu_remote(void *arg); 109 110 /* 111 * We can make all thread ports use the spin backend instead of the thread 112 * backend. This should only be set to debug the spin backend. 113 */ 114 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); 115 116 #ifdef INVARIANTS 117 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, 118 "Panic if attempting to switch lwkt's while mastering cpusync"); 119 #endif 120 #ifdef DEBUG_LWKT_THREAD 121 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, 122 "Number of switched threads"); 123 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, 124 "Successful preemption events"); 125 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, 126 "Failed preemption events"); 127 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, 128 "Number of preempted threads."); 129 #endif 130 extern int lwkt_sched_debug; 131 int lwkt_sched_debug = 0; 132 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW, 133 &lwkt_sched_debug, 0, "Scheduler debug"); 134 __read_mostly static u_int lwkt_spin_loops = 10; 135 SYSCTL_UINT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW, 136 &lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon"); 137 __read_mostly static int preempt_enable = 1; 138 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW, 139 &preempt_enable, 0, "Enable preemption"); 140 static int lwkt_cache_threads = 0; 141 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD, 142 &lwkt_cache_threads, 0, "thread+kstack cache"); 143 144 /* 145 * These helper procedures handle the runq, they can only be called from 146 * within a critical section. 147 * 148 * WARNING! Prior to SMP being brought up it is possible to enqueue and 149 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 150 * instead of 'mycpu' when referencing the globaldata structure. Once 151 * SMP live enqueuing and dequeueing only occurs on the current cpu. 152 */ 153 static __inline 154 void 155 _lwkt_dequeue(thread_t td) 156 { 157 if (td->td_flags & TDF_RUNQ) { 158 struct globaldata *gd = td->td_gd; 159 160 td->td_flags &= ~TDF_RUNQ; 161 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 162 --gd->gd_tdrunqcount; 163 if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL) 164 atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING); 165 } 166 } 167 168 /* 169 * Priority enqueue. 170 * 171 * There are a limited number of lwkt threads runnable since user 172 * processes only schedule one at a time per cpu. However, there can 173 * be many user processes in kernel mode exiting from a tsleep() which 174 * become runnable. 175 * 176 * We scan the queue in both directions to help deal with degenerate 177 * situations when hundreds or thousands (or more) threads are runnable. 178 * 179 * NOTE: lwkt_schedulerclock() will force a round-robin based on td_pri and 180 * will ignore user priority. This is to ensure that user threads in 181 * kernel mode get cpu at some point regardless of what the user 182 * scheduler thinks. 183 */ 184 static __inline 185 void 186 _lwkt_enqueue(thread_t td) 187 { 188 thread_t xtd; /* forward scan */ 189 thread_t rtd; /* reverse scan */ 190 191 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { 192 struct globaldata *gd = td->td_gd; 193 194 td->td_flags |= TDF_RUNQ; 195 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 196 if (xtd == NULL) { 197 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 198 atomic_set_int(&gd->gd_reqflags, RQF_RUNNING); 199 } else { 200 /* 201 * NOTE: td_upri - higher numbers more desireable, same sense 202 * as td_pri (typically reversed from lwp_upri). 203 * 204 * In the equal priority case we want the best selection 205 * at the beginning so the less desireable selections know 206 * that they have to setrunqueue/go-to-another-cpu, even 207 * though it means switching back to the 'best' selection. 208 * This also avoids degenerate situations when many threads 209 * are runnable or waking up at the same time. 210 * 211 * If upri matches exactly place at end/round-robin. 212 */ 213 rtd = TAILQ_LAST(&gd->gd_tdrunq, lwkt_queue); 214 215 while (xtd && 216 (xtd->td_pri > td->td_pri || 217 (xtd->td_pri == td->td_pri && 218 xtd->td_upri >= td->td_upri))) { 219 xtd = TAILQ_NEXT(xtd, td_threadq); 220 221 /* 222 * Doing a reverse scan at the same time is an optimization 223 * for the insert-closer-to-tail case that avoids having to 224 * scan the entire list. This situation can occur when 225 * thousands of threads are woken up at the same time. 226 */ 227 if (rtd->td_pri > td->td_pri || 228 (rtd->td_pri == td->td_pri && 229 rtd->td_upri >= td->td_upri)) { 230 TAILQ_INSERT_AFTER(&gd->gd_tdrunq, rtd, td, td_threadq); 231 goto skip; 232 } 233 rtd = TAILQ_PREV(rtd, lwkt_queue, td_threadq); 234 } 235 if (xtd) 236 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 237 else 238 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 239 } 240 skip: 241 ++gd->gd_tdrunqcount; 242 243 /* 244 * Request a LWKT reschedule if we are now at the head of the queue. 245 */ 246 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) 247 need_lwkt_resched(); 248 } 249 } 250 251 static boolean_t 252 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) 253 { 254 struct thread *td = (struct thread *)obj; 255 256 td->td_kstack = NULL; 257 td->td_kstack_size = 0; 258 td->td_flags = TDF_ALLOCATED_THREAD; 259 td->td_mpflags = 0; 260 return (1); 261 } 262 263 static void 264 _lwkt_thread_dtor(void *obj, void *privdata) 265 { 266 struct thread *td = (struct thread *)obj; 267 268 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD, 269 ("_lwkt_thread_dtor: not allocated from objcache")); 270 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && 271 td->td_kstack_size > 0, 272 ("_lwkt_thread_dtor: corrupted stack")); 273 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 274 td->td_kstack = NULL; 275 td->td_flags = 0; 276 } 277 278 /* 279 * Initialize the lwkt s/system. 280 * 281 * Nominally cache up to 32 thread + kstack structures. Cache more on 282 * systems with a lot of cpu cores. 283 */ 284 static void 285 lwkt_init(void) 286 { 287 TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads); 288 if (lwkt_cache_threads == 0) { 289 lwkt_cache_threads = ncpus * 4; 290 if (lwkt_cache_threads < 32) 291 lwkt_cache_threads = 32; 292 } 293 thread_cache = objcache_create_mbacked( 294 M_THREAD, sizeof(struct thread), 295 0, lwkt_cache_threads, 296 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); 297 } 298 SYSINIT(lwkt_init, SI_BOOT2_LWKT_INIT, SI_ORDER_FIRST, lwkt_init, NULL); 299 300 /* 301 * Schedule a thread to run. As the current thread we can always safely 302 * schedule ourselves, and a shortcut procedure is provided for that 303 * function. 304 * 305 * (non-blocking, self contained on a per cpu basis) 306 */ 307 void 308 lwkt_schedule_self(thread_t td) 309 { 310 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 311 crit_enter_quick(td); 312 KASSERT(td != &td->td_gd->gd_idlethread, 313 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 314 KKASSERT(td->td_lwp == NULL || 315 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 316 _lwkt_enqueue(td); 317 crit_exit_quick(td); 318 } 319 320 /* 321 * Deschedule a thread. 322 * 323 * (non-blocking, self contained on a per cpu basis) 324 */ 325 void 326 lwkt_deschedule_self(thread_t td) 327 { 328 crit_enter_quick(td); 329 _lwkt_dequeue(td); 330 crit_exit_quick(td); 331 } 332 333 /* 334 * LWKTs operate on a per-cpu basis 335 * 336 * WARNING! Called from early boot, 'mycpu' may not work yet. 337 */ 338 void 339 lwkt_gdinit(struct globaldata *gd) 340 { 341 TAILQ_INIT(&gd->gd_tdrunq); 342 TAILQ_INIT(&gd->gd_tdallq); 343 lockinit(&gd->gd_sysctllock, "sysctl", 0, LK_CANRECURSE); 344 } 345 346 /* 347 * Create a new thread. The thread must be associated with a process context 348 * or LWKT start address before it can be scheduled. If the target cpu is 349 * -1 the thread will be created on the current cpu. 350 * 351 * If you intend to create a thread without a process context this function 352 * does everything except load the startup and switcher function. 353 */ 354 thread_t 355 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) 356 { 357 static int cpu_rotator; 358 globaldata_t gd = mycpu; 359 void *stack; 360 361 /* 362 * If static thread storage is not supplied allocate a thread. Reuse 363 * a cached free thread if possible. gd_freetd is used to keep an exiting 364 * thread intact through the exit. 365 */ 366 if (td == NULL) { 367 crit_enter_gd(gd); 368 if ((td = gd->gd_freetd) != NULL) { 369 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 370 TDF_RUNQ)) == 0); 371 gd->gd_freetd = NULL; 372 } else { 373 td = objcache_get(thread_cache, M_WAITOK); 374 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 375 TDF_RUNQ)) == 0); 376 } 377 crit_exit_gd(gd); 378 KASSERT((td->td_flags & 379 (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) == 380 TDF_ALLOCATED_THREAD, 381 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags)); 382 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK); 383 } 384 385 /* 386 * Try to reuse cached stack. 387 */ 388 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 389 if (flags & TDF_ALLOCATED_STACK) { 390 kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); 391 stack = NULL; 392 } 393 } 394 if (stack == NULL) { 395 if (cpu < 0) 396 stack = (void *)kmem_alloc_stack(&kernel_map, stksize, 0); 397 else 398 stack = (void *)kmem_alloc_stack(&kernel_map, stksize, 399 KM_CPU(cpu)); 400 flags |= TDF_ALLOCATED_STACK; 401 } 402 if (cpu < 0) { 403 cpu = ++cpu_rotator; 404 cpu_ccfence(); 405 cpu = (uint32_t)cpu % (uint32_t)ncpus; 406 } 407 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 408 return(td); 409 } 410 411 /* 412 * Initialize a preexisting thread structure. This function is used by 413 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 414 * 415 * All threads start out in a critical section at a priority of 416 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 417 * appropriate. This function may send an IPI message when the 418 * requested cpu is not the current cpu and consequently gd_tdallq may 419 * not be initialized synchronously from the point of view of the originating 420 * cpu. 421 * 422 * NOTE! we have to be careful in regards to creating threads for other cpus 423 * if SMP has not yet been activated. 424 */ 425 static void 426 lwkt_init_thread_remote(void *arg) 427 { 428 thread_t td = arg; 429 430 /* 431 * Protected by critical section held by IPI dispatch 432 */ 433 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 434 } 435 436 /* 437 * lwkt core thread structural initialization. 438 * 439 * NOTE: All threads are initialized as mpsafe threads. 440 */ 441 void 442 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 443 struct globaldata *gd) 444 { 445 globaldata_t mygd = mycpu; 446 447 bzero(td, sizeof(struct thread)); 448 td->td_kstack = stack; 449 td->td_kstack_size = stksize; 450 td->td_flags = flags; 451 td->td_mpflags = 0; 452 td->td_type = TD_TYPE_GENERIC; 453 td->td_gd = gd; 454 td->td_pri = TDPRI_KERN_DAEMON; 455 td->td_critcount = 1; 456 td->td_toks_have = NULL; 457 td->td_toks_stop = &td->td_toks_base; 458 if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT)) { 459 lwkt_initport_spin(&td->td_msgport, td, 460 (flags & TDF_FIXEDCPU) ? TRUE : FALSE); 461 } else { 462 lwkt_initport_thread(&td->td_msgport, td); 463 } 464 pmap_init_thread(td); 465 /* 466 * Normally initializing a thread for a remote cpu requires sending an 467 * IPI. However, the idlethread is setup before the other cpus are 468 * activated so we have to treat it as a special case. XXX manipulation 469 * of gd_tdallq requires the BGL. 470 */ 471 if (gd == mygd || td == &gd->gd_idlethread) { 472 crit_enter_gd(mygd); 473 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 474 crit_exit_gd(mygd); 475 } else { 476 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 477 } 478 dsched_enter_thread(td); 479 } 480 481 void 482 lwkt_set_comm(thread_t td, const char *ctl, ...) 483 { 484 __va_list va; 485 486 __va_start(va, ctl); 487 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 488 __va_end(va); 489 KTR_LOG(ctxsw_newtd, td, td->td_comm); 490 } 491 492 /* 493 * Prevent the thread from getting destroyed. Note that unlike PHOLD/PRELE 494 * this does not prevent the thread from migrating to another cpu so the 495 * gd_tdallq state is not protected by this. 496 */ 497 void 498 lwkt_hold(thread_t td) 499 { 500 atomic_add_int(&td->td_refs, 1); 501 } 502 503 void 504 lwkt_rele(thread_t td) 505 { 506 KKASSERT(td->td_refs > 0); 507 atomic_add_int(&td->td_refs, -1); 508 } 509 510 void 511 lwkt_free_thread(thread_t td) 512 { 513 KKASSERT(td->td_refs == 0); 514 KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK | 515 TDF_RUNQ | TDF_TSLEEPQ)) == 0); 516 if (td->td_flags & TDF_ALLOCATED_THREAD) { 517 objcache_put(thread_cache, td); 518 } else if (td->td_flags & TDF_ALLOCATED_STACK) { 519 /* client-allocated struct with internally allocated stack */ 520 KASSERT(td->td_kstack && td->td_kstack_size > 0, 521 ("lwkt_free_thread: corrupted stack")); 522 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 523 td->td_kstack = NULL; 524 td->td_kstack_size = 0; 525 } 526 527 KTR_LOG(ctxsw_deadtd, td); 528 } 529 530 531 /* 532 * Switch to the next runnable lwkt. If no LWKTs are runnable then 533 * switch to the idlethread. Switching must occur within a critical 534 * section to avoid races with the scheduling queue. 535 * 536 * We always have full control over our cpu's run queue. Other cpus 537 * that wish to manipulate our queue must use the cpu_*msg() calls to 538 * talk to our cpu, so a critical section is all that is needed and 539 * the result is very, very fast thread switching. 540 * 541 * The LWKT scheduler uses a fixed priority model and round-robins at 542 * each priority level. User process scheduling is a totally 543 * different beast and LWKT priorities should not be confused with 544 * user process priorities. 545 * 546 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 547 * is not called by the current thread in the preemption case, only when 548 * the preempting thread blocks (in order to return to the original thread). 549 * 550 * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread 551 * migration and tsleep deschedule the current lwkt thread and call 552 * lwkt_switch(). In particular, the target cpu of the migration fully 553 * expects the thread to become non-runnable and can deadlock against 554 * cpusync operations if we run any IPIs prior to switching the thread out. 555 * 556 * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF 557 * THE CURRENT THREAD HAS BEEN DESCHEDULED! 558 */ 559 void 560 lwkt_switch(void) 561 { 562 globaldata_t gd = mycpu; 563 thread_t td = gd->gd_curthread; 564 thread_t ntd; 565 thread_t xtd; 566 int upri; 567 #ifdef LOOPMASK 568 uint64_t tsc_base = rdtsc(); 569 #endif 570 571 KKASSERT(gd->gd_processing_ipiq == 0); 572 KKASSERT(td->td_flags & TDF_RUNNING); 573 574 /* 575 * Switching from within a 'fast' (non thread switched) interrupt or IPI 576 * is illegal. However, we may have to do it anyway if we hit a fatal 577 * kernel trap or we have paniced. 578 * 579 * If this case occurs save and restore the interrupt nesting level. 580 */ 581 if (gd->gd_intr_nesting_level) { 582 int savegdnest; 583 int savegdtrap; 584 585 if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) { 586 panic("lwkt_switch: Attempt to switch from a " 587 "fast interrupt, ipi, or hard code section, " 588 "td %p\n", 589 td); 590 } else { 591 savegdnest = gd->gd_intr_nesting_level; 592 savegdtrap = gd->gd_trap_nesting_level; 593 gd->gd_intr_nesting_level = 0; 594 gd->gd_trap_nesting_level = 0; 595 if ((td->td_flags & TDF_PANICWARN) == 0) { 596 td->td_flags |= TDF_PANICWARN; 597 kprintf("Warning: thread switch from interrupt, IPI, " 598 "or hard code section.\n" 599 "thread %p (%s)\n", td, td->td_comm); 600 print_backtrace(-1); 601 } 602 lwkt_switch(); 603 gd->gd_intr_nesting_level = savegdnest; 604 gd->gd_trap_nesting_level = savegdtrap; 605 return; 606 } 607 } 608 609 /* 610 * Release our current user process designation if we are blocking 611 * or if a user reschedule was requested. 612 * 613 * NOTE: This function is NOT called if we are switching into or 614 * returning from a preemption. 615 * 616 * NOTE: Releasing our current user process designation may cause 617 * it to be assigned to another thread, which in turn will 618 * cause us to block in the usched acquire code when we attempt 619 * to return to userland. 620 * 621 * NOTE: On SMP systems this can be very nasty when heavy token 622 * contention is present so we want to be careful not to 623 * release the designation gratuitously. 624 */ 625 if (td->td_release && 626 (user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) { 627 td->td_release(td); 628 } 629 630 /* 631 * Release all tokens. Once we do this we must remain in the critical 632 * section and cannot run IPIs or other interrupts until we switch away 633 * because they may implode if they try to get a token using our thread 634 * context. 635 */ 636 crit_enter_gd(gd); 637 if (TD_TOKS_HELD(td)) 638 lwkt_relalltokens(td); 639 640 /* 641 * We had better not be holding any spin locks, but don't get into an 642 * endless panic loop. 643 */ 644 KASSERT(gd->gd_spinlocks == 0 || panicstr != NULL, 645 ("lwkt_switch: still holding %d exclusive spinlocks!", 646 gd->gd_spinlocks)); 647 648 #ifdef INVARIANTS 649 if (td->td_cscount) { 650 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 651 td); 652 if (panic_on_cscount) 653 panic("switching while mastering cpusync"); 654 } 655 #endif 656 657 /* 658 * If we had preempted another thread on this cpu, resume the preempted 659 * thread. This occurs transparently, whether the preempted thread 660 * was scheduled or not (it may have been preempted after descheduling 661 * itself). 662 * 663 * We have to setup the MP lock for the original thread after backing 664 * out the adjustment that was made to curthread when the original 665 * was preempted. 666 */ 667 if ((ntd = td->td_preempted) != NULL) { 668 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 669 ntd->td_flags |= TDF_PREEMPT_DONE; 670 ntd->td_contended = 0; /* reset contended */ 671 672 /* 673 * The interrupt may have woken a thread up, we need to properly 674 * set the reschedule flag if the originally interrupted thread is 675 * at a lower priority. 676 * 677 * NOTE: The interrupt may not have descheduled ntd. 678 * 679 * NOTE: We do not reschedule if there are no threads on the runq. 680 * (ntd could be the idlethread). 681 */ 682 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 683 if (xtd && xtd != ntd) 684 need_lwkt_resched(); 685 goto havethread_preempted; 686 } 687 688 /* 689 * Figure out switch target. If we cannot switch to our desired target 690 * look for a thread that we can switch to. 691 * 692 * NOTE! The limited spin loop and related parameters are extremely 693 * important for system performance, particularly for pipes and 694 * concurrent conflicting VM faults. 695 */ 696 clear_lwkt_resched(); 697 ntd = TAILQ_FIRST(&gd->gd_tdrunq); 698 699 if (ntd) { 700 do { 701 if (TD_TOKS_NOT_HELD(ntd) || 702 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops))) 703 { 704 goto havethread; 705 } 706 ++ntd->td_contended; /* overflow ok */ 707 if (gd->gd_indefinite.type == 0) 708 indefinite_init(&gd->gd_indefinite, NULL, 0, 't'); 709 #ifdef LOOPMASK 710 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 711 kprintf("lwkt_switch: excessive contended %d " 712 "thread %p\n", ntd->td_contended, ntd); 713 tsc_base = rdtsc(); 714 } 715 #endif 716 } while (ntd->td_contended < (lwkt_spin_loops >> 1)); 717 upri = ntd->td_upri; 718 719 /* 720 * Bleh, the thread we wanted to switch to has a contended token. 721 * See if we can switch to another thread. 722 * 723 * We generally don't want to do this because it represents a 724 * priority inversion, but contending tokens on the same cpu can 725 * cause real problems if we don't now that we have an exclusive 726 * priority mechanism over shared for tokens. 727 * 728 * The solution is to allow threads with pending tokens to compete 729 * for them (a lower priority thread will get less cpu once it 730 * returns from the kernel anyway). If a thread does not have 731 * any contending tokens, we go by td_pri and upri. 732 */ 733 while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) { 734 if (TD_TOKS_NOT_HELD(ntd) && 735 ntd->td_pri < TDPRI_KERN_LPSCHED && upri > ntd->td_upri) { 736 continue; 737 } 738 if (upri < ntd->td_upri) 739 upri = ntd->td_upri; 740 741 /* 742 * Try this one. 743 */ 744 if (TD_TOKS_NOT_HELD(ntd) || 745 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops))) { 746 goto havethread; 747 } 748 ++ntd->td_contended; /* overflow ok */ 749 } 750 751 /* 752 * Fall through, switch to idle thread to get us out of the current 753 * context. Since we were contended, prevent HLT by flagging a 754 * LWKT reschedule. 755 */ 756 need_lwkt_resched(); 757 } 758 759 /* 760 * We either contended on ntd or the runq is empty. We must switch 761 * through the idle thread to get out of the current context. 762 */ 763 ntd = &gd->gd_idlethread; 764 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 765 ASSERT_NO_TOKENS_HELD(ntd); 766 cpu_time.cp_msg[0] = 0; 767 goto haveidle; 768 769 havethread: 770 /* 771 * Clear gd_idle_repeat when doing a normal switch to a non-idle 772 * thread. 773 */ 774 ntd->td_wmesg = NULL; 775 ntd->td_contended = 0; /* reset once scheduled */ 776 ++gd->gd_cnt.v_swtch; 777 gd->gd_idle_repeat = 0; 778 779 /* 780 * If we were busy waiting record final disposition 781 */ 782 if (gd->gd_indefinite.type) 783 indefinite_done(&gd->gd_indefinite); 784 785 havethread_preempted: 786 /* 787 * If the new target does not need the MP lock and we are holding it, 788 * release the MP lock. If the new target requires the MP lock we have 789 * already acquired it for the target. 790 */ 791 ; 792 haveidle: 793 KASSERT(ntd->td_critcount, 794 ("priority problem in lwkt_switch %d %d", 795 td->td_critcount, ntd->td_critcount)); 796 797 if (td != ntd) { 798 /* 799 * Execute the actual thread switch operation. This function 800 * returns to the current thread and returns the previous thread 801 * (which may be different from the thread we switched to). 802 * 803 * We are responsible for marking ntd as TDF_RUNNING. 804 */ 805 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 806 #ifdef DEBUG_LWKT_THREAD 807 ++switch_count; 808 #endif 809 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd); 810 ntd->td_flags |= TDF_RUNNING; 811 lwkt_switch_return(td->td_switch(ntd)); 812 /* ntd invalid, td_switch() can return a different thread_t */ 813 } 814 815 /* 816 * catch-all. XXX is this strictly needed? 817 */ 818 splz_check(); 819 820 /* NOTE: current cpu may have changed after switch */ 821 crit_exit_quick(td); 822 } 823 824 /* 825 * Called by assembly in the td_switch (thread restore path) for thread 826 * bootstrap cases which do not 'return' to lwkt_switch(). 827 */ 828 void 829 lwkt_switch_return(thread_t otd) 830 { 831 globaldata_t rgd; 832 #ifdef LOOPMASK 833 uint64_t tsc_base = rdtsc(); 834 #endif 835 int exiting; 836 837 exiting = otd->td_flags & TDF_EXITING; 838 cpu_ccfence(); 839 840 /* 841 * Check if otd was migrating. Now that we are on ntd we can finish 842 * up the migration. This is a bit messy but it is the only place 843 * where td is known to be fully descheduled. 844 * 845 * We can only activate the migration if otd was migrating but not 846 * held on the cpu due to a preemption chain. We still have to 847 * clear TDF_RUNNING on the old thread either way. 848 * 849 * We are responsible for clearing the previously running thread's 850 * TDF_RUNNING. 851 */ 852 if ((rgd = otd->td_migrate_gd) != NULL && 853 (otd->td_flags & TDF_PREEMPT_LOCK) == 0) { 854 KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) == 855 (TDF_MIGRATING | TDF_RUNNING)); 856 otd->td_migrate_gd = NULL; 857 otd->td_flags &= ~TDF_RUNNING; 858 lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd); 859 } else { 860 otd->td_flags &= ~TDF_RUNNING; 861 } 862 863 /* 864 * Final exit validations (see lwp_wait()). Note that otd becomes 865 * invalid the *instant* we set TDF_MP_EXITSIG. 866 * 867 * Use the EXITING status loaded from before we clear TDF_RUNNING, 868 * because if it is not set otd becomes invalid the instant we clear 869 * TDF_RUNNING on it (otherwise, if the system is fast enough, we 870 * might 'steal' TDF_EXITING from another switch-return!). 871 */ 872 while (exiting) { 873 u_int mpflags; 874 875 mpflags = otd->td_mpflags; 876 cpu_ccfence(); 877 878 if (mpflags & TDF_MP_EXITWAIT) { 879 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 880 mpflags | TDF_MP_EXITSIG)) { 881 wakeup(otd); 882 break; 883 } 884 } else { 885 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 886 mpflags | TDF_MP_EXITSIG)) { 887 wakeup(otd); 888 break; 889 } 890 } 891 892 #ifdef LOOPMASK 893 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 894 kprintf("lwkt_switch_return: excessive TDF_EXITING " 895 "thread %p\n", otd); 896 tsc_base = rdtsc(); 897 } 898 #endif 899 } 900 } 901 902 /* 903 * Request that the target thread preempt the current thread. Preemption 904 * can only occur only: 905 * 906 * - If our critical section is the one that we were called with 907 * - The relative priority of the target thread is higher 908 * - The target is not excessively interrupt-nested via td_nest_count 909 * - The target thread holds no tokens. 910 * - The target thread is not already scheduled and belongs to the 911 * current cpu. 912 * - The current thread is not holding any spin-locks. 913 * 914 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 915 * this is called via lwkt_schedule() through the td_preemptable callback. 916 * critcount is the managed critical priority that we should ignore in order 917 * to determine whether preemption is possible (aka usually just the crit 918 * priority of lwkt_schedule() itself). 919 * 920 * Preemption is typically limited to interrupt threads. 921 * 922 * Operation works in a fairly straight-forward manner. The normal 923 * scheduling code is bypassed and we switch directly to the target 924 * thread. When the target thread attempts to block or switch away 925 * code at the base of lwkt_switch() will switch directly back to our 926 * thread. Our thread is able to retain whatever tokens it holds and 927 * if the target needs one of them the target will switch back to us 928 * and reschedule itself normally. 929 */ 930 void 931 lwkt_preempt(thread_t ntd, int critcount) 932 { 933 struct globaldata *gd = mycpu; 934 thread_t xtd; 935 thread_t td; 936 int save_gd_intr_nesting_level; 937 938 /* 939 * The caller has put us in a critical section. We can only preempt 940 * if the caller of the caller was not in a critical section (basically 941 * a local interrupt), as determined by the 'critcount' parameter. We 942 * also can't preempt if the caller is holding any spinlocks (even if 943 * he isn't in a critical section). This also handles the tokens test. 944 * 945 * YYY The target thread must be in a critical section (else it must 946 * inherit our critical section? I dunno yet). 947 */ 948 KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri)); 949 950 td = gd->gd_curthread; 951 if (preempt_enable == 0) { 952 #ifdef DEBUG_LWKT_THREAD 953 ++preempt_miss; 954 #endif 955 return; 956 } 957 if (ntd->td_pri <= td->td_pri) { 958 #ifdef DEBUG_LWKT_THREAD 959 ++preempt_miss; 960 #endif 961 return; 962 } 963 if (td->td_critcount > critcount) { 964 #ifdef DEBUG_LWKT_THREAD 965 ++preempt_miss; 966 #endif 967 return; 968 } 969 if (td->td_nest_count >= 2) { 970 #ifdef DEBUG_LWKT_THREAD 971 ++preempt_miss; 972 #endif 973 return; 974 } 975 if (td->td_cscount) { 976 #ifdef DEBUG_LWKT_THREAD 977 ++preempt_miss; 978 #endif 979 return; 980 } 981 if (ntd->td_gd != gd) { 982 #ifdef DEBUG_LWKT_THREAD 983 ++preempt_miss; 984 #endif 985 return; 986 } 987 988 /* 989 * We don't have to check spinlocks here as they will also bump 990 * td_critcount. 991 * 992 * Do not try to preempt if the target thread is holding any tokens. 993 * We could try to acquire the tokens but this case is so rare there 994 * is no need to support it. 995 */ 996 KKASSERT(gd->gd_spinlocks == 0); 997 998 if (TD_TOKS_HELD(ntd)) { 999 #ifdef DEBUG_LWKT_THREAD 1000 ++preempt_miss; 1001 #endif 1002 return; 1003 } 1004 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 1005 #ifdef DEBUG_LWKT_THREAD 1006 ++preempt_weird; 1007 #endif 1008 return; 1009 } 1010 if (ntd->td_preempted) { 1011 #ifdef DEBUG_LWKT_THREAD 1012 ++preempt_hit; 1013 #endif 1014 return; 1015 } 1016 KKASSERT(gd->gd_processing_ipiq == 0); 1017 1018 /* 1019 * Since we are able to preempt the current thread, there is no need to 1020 * call need_lwkt_resched(). 1021 * 1022 * We must temporarily clear gd_intr_nesting_level around the switch 1023 * since switchouts from the target thread are allowed (they will just 1024 * return to our thread), and since the target thread has its own stack. 1025 * 1026 * A preemption must switch back to the original thread, assert the 1027 * case. 1028 */ 1029 #ifdef DEBUG_LWKT_THREAD 1030 ++preempt_hit; 1031 #endif 1032 ntd->td_preempted = td; 1033 td->td_flags |= TDF_PREEMPT_LOCK; 1034 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd); 1035 save_gd_intr_nesting_level = gd->gd_intr_nesting_level; 1036 gd->gd_intr_nesting_level = 0; 1037 1038 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 1039 ntd->td_flags |= TDF_RUNNING; 1040 xtd = td->td_switch(ntd); 1041 KKASSERT(xtd == ntd); 1042 lwkt_switch_return(xtd); 1043 gd->gd_intr_nesting_level = save_gd_intr_nesting_level; 1044 1045 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 1046 ntd->td_preempted = NULL; 1047 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 1048 } 1049 1050 /* 1051 * Conditionally call splz() if gd_reqflags indicates work is pending. 1052 * This will work inside a critical section but not inside a hard code 1053 * section. 1054 * 1055 * (self contained on a per cpu basis) 1056 */ 1057 void 1058 splz_check(void) 1059 { 1060 globaldata_t gd = mycpu; 1061 thread_t td = gd->gd_curthread; 1062 1063 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && 1064 gd->gd_intr_nesting_level == 0 && 1065 td->td_nest_count < 2) 1066 { 1067 splz(); 1068 } 1069 } 1070 1071 /* 1072 * This version is integrated into crit_exit, reqflags has already 1073 * been tested but td_critcount has not. 1074 * 1075 * We only want to execute the splz() on the 1->0 transition of 1076 * critcount and not in a hard code section or if too deeply nested. 1077 * 1078 * NOTE: gd->gd_spinlocks is implied to be 0 when td_critcount is 0. 1079 */ 1080 void 1081 lwkt_maybe_splz(thread_t td) 1082 { 1083 globaldata_t gd = td->td_gd; 1084 1085 if (td->td_critcount == 0 && 1086 gd->gd_intr_nesting_level == 0 && 1087 td->td_nest_count < 2) 1088 { 1089 splz(); 1090 } 1091 } 1092 1093 /* 1094 * Drivers which set up processing co-threads can call this function to 1095 * run the co-thread at a higher priority and to allow it to preempt 1096 * normal threads. 1097 */ 1098 void 1099 lwkt_set_interrupt_support_thread(void) 1100 { 1101 thread_t td = curthread; 1102 1103 lwkt_setpri_self(TDPRI_INT_SUPPORT); 1104 td->td_flags |= TDF_INTTHREAD; 1105 td->td_preemptable = lwkt_preempt; 1106 } 1107 1108 1109 /* 1110 * This function is used to negotiate a passive release of the current 1111 * process/lwp designation with the user scheduler, allowing the user 1112 * scheduler to schedule another user thread. The related kernel thread 1113 * (curthread) continues running in the released state. 1114 */ 1115 void 1116 lwkt_passive_release(struct thread *td) 1117 { 1118 struct lwp *lp = td->td_lwp; 1119 1120 td->td_release = NULL; 1121 lwkt_setpri_self(TDPRI_KERN_USER); 1122 1123 lp->lwp_proc->p_usched->release_curproc(lp); 1124 } 1125 1126 1127 /* 1128 * This implements a LWKT yield, allowing a kernel thread to yield to other 1129 * kernel threads at the same or higher priority. This function can be 1130 * called in a tight loop and will typically only yield once per tick. 1131 * 1132 * Most kernel threads run at the same priority in order to allow equal 1133 * sharing. 1134 * 1135 * (self contained on a per cpu basis) 1136 */ 1137 void 1138 lwkt_yield(void) 1139 { 1140 globaldata_t gd = mycpu; 1141 thread_t td = gd->gd_curthread; 1142 1143 /* 1144 * Should never be called with spinlocks held but there is a path 1145 * via ACPI where it might happen. 1146 */ 1147 if (gd->gd_spinlocks) 1148 return; 1149 1150 /* 1151 * Safe to call splz if we are not too-heavily nested. 1152 */ 1153 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1154 splz(); 1155 1156 /* 1157 * Caller allows switching 1158 */ 1159 if (lwkt_resched_wanted()) { 1160 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD); 1161 lwkt_schedule_self(td); 1162 lwkt_switch(); 1163 } 1164 } 1165 1166 /* 1167 * The quick version processes pending interrupts and higher-priority 1168 * LWKT threads but will not round-robin same-priority LWKT threads. 1169 * 1170 * When called while attempting to return to userland the only same-pri 1171 * threads are the ones which have already tried to become the current 1172 * user process. 1173 */ 1174 void 1175 lwkt_yield_quick(void) 1176 { 1177 globaldata_t gd = mycpu; 1178 thread_t td = gd->gd_curthread; 1179 1180 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1181 splz(); 1182 if (lwkt_resched_wanted()) { 1183 crit_enter(); 1184 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) { 1185 clear_lwkt_resched(); 1186 } else { 1187 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD); 1188 lwkt_schedule_self(curthread); 1189 lwkt_switch(); 1190 } 1191 crit_exit(); 1192 } 1193 } 1194 1195 /* 1196 * This yield is designed for kernel threads with a user context. 1197 * 1198 * The kernel acting on behalf of the user is potentially cpu-bound, 1199 * this function will efficiently allow other threads to run and also 1200 * switch to other processes by releasing. 1201 * 1202 * The lwkt_user_yield() function is designed to have very low overhead 1203 * if no yield is determined to be needed. 1204 */ 1205 void 1206 lwkt_user_yield(void) 1207 { 1208 globaldata_t gd = mycpu; 1209 thread_t td = gd->gd_curthread; 1210 1211 /* 1212 * Should never be called with spinlocks held but there is a path 1213 * via ACPI where it might happen. 1214 */ 1215 if (gd->gd_spinlocks) 1216 return; 1217 1218 /* 1219 * Always run any pending interrupts in case we are in a critical 1220 * section. 1221 */ 1222 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1223 splz(); 1224 1225 /* 1226 * Switch (which forces a release) if another kernel thread needs 1227 * the cpu, if userland wants us to resched, or if our kernel 1228 * quantum has run out. 1229 */ 1230 if (lwkt_resched_wanted() || 1231 user_resched_wanted()) 1232 { 1233 lwkt_switch(); 1234 } 1235 1236 #if 0 1237 /* 1238 * Reacquire the current process if we are released. 1239 * 1240 * XXX not implemented atm. The kernel may be holding locks and such, 1241 * so we want the thread to continue to receive cpu. 1242 */ 1243 if (td->td_release == NULL && lp) { 1244 lp->lwp_proc->p_usched->acquire_curproc(lp); 1245 td->td_release = lwkt_passive_release; 1246 lwkt_setpri_self(TDPRI_USER_NORM); 1247 } 1248 #endif 1249 } 1250 1251 /* 1252 * Generic schedule. Possibly schedule threads belonging to other cpus and 1253 * deal with threads that might be blocked on a wait queue. 1254 * 1255 * We have a little helper inline function which does additional work after 1256 * the thread has been enqueued, including dealing with preemption and 1257 * setting need_lwkt_resched() (which prevents the kernel from returning 1258 * to userland until it has processed higher priority threads). 1259 * 1260 * It is possible for this routine to be called after a failed _enqueue 1261 * (due to the target thread migrating, sleeping, or otherwise blocked). 1262 * We have to check that the thread is actually on the run queue! 1263 */ 1264 static __inline 1265 void 1266 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount) 1267 { 1268 if (ntd->td_flags & TDF_RUNQ) { 1269 if (ntd->td_preemptable) { 1270 ntd->td_preemptable(ntd, ccount); /* YYY +token */ 1271 } 1272 } 1273 } 1274 1275 static __inline 1276 void 1277 _lwkt_schedule(thread_t td) 1278 { 1279 globaldata_t mygd = mycpu; 1280 1281 KASSERT(td != &td->td_gd->gd_idlethread, 1282 ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 1283 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 1284 crit_enter_gd(mygd); 1285 KKASSERT(td->td_lwp == NULL || 1286 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1287 1288 if (td == mygd->gd_curthread) { 1289 _lwkt_enqueue(td); 1290 } else { 1291 /* 1292 * If we own the thread, there is no race (since we are in a 1293 * critical section). If we do not own the thread there might 1294 * be a race but the target cpu will deal with it. 1295 */ 1296 if (td->td_gd == mygd) { 1297 _lwkt_enqueue(td); 1298 _lwkt_schedule_post(mygd, td, 1); 1299 } else { 1300 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); 1301 } 1302 } 1303 crit_exit_gd(mygd); 1304 } 1305 1306 void 1307 lwkt_schedule(thread_t td) 1308 { 1309 _lwkt_schedule(td); 1310 } 1311 1312 void 1313 lwkt_schedule_noresched(thread_t td) /* XXX not impl */ 1314 { 1315 _lwkt_schedule(td); 1316 } 1317 1318 /* 1319 * When scheduled remotely if frame != NULL the IPIQ is being 1320 * run via doreti or an interrupt then preemption can be allowed. 1321 * 1322 * To allow preemption we have to drop the critical section so only 1323 * one is present in _lwkt_schedule_post. 1324 */ 1325 static void 1326 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) 1327 { 1328 thread_t td = curthread; 1329 thread_t ntd = arg; 1330 1331 if (frame && ntd->td_preemptable) { 1332 crit_exit_noyield(td); 1333 _lwkt_schedule(ntd); 1334 crit_enter_quick(td); 1335 } else { 1336 _lwkt_schedule(ntd); 1337 } 1338 } 1339 1340 /* 1341 * Thread migration using a 'Pull' method. The thread may or may not be 1342 * the current thread. It MUST be descheduled and in a stable state. 1343 * lwkt_giveaway() must be called on the cpu owning the thread. 1344 * 1345 * At any point after lwkt_giveaway() is called, the target cpu may 1346 * 'pull' the thread by calling lwkt_acquire(). 1347 * 1348 * We have to make sure the thread is not sitting on a per-cpu tsleep 1349 * queue or it will blow up when it moves to another cpu. 1350 * 1351 * MPSAFE - must be called under very specific conditions. 1352 */ 1353 void 1354 lwkt_giveaway(thread_t td) 1355 { 1356 globaldata_t gd = mycpu; 1357 1358 crit_enter_gd(gd); 1359 if (td->td_flags & TDF_TSLEEPQ) 1360 tsleep_remove(td); 1361 KKASSERT(td->td_gd == gd); 1362 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1363 td->td_flags |= TDF_MIGRATING; 1364 crit_exit_gd(gd); 1365 } 1366 1367 void 1368 lwkt_acquire(thread_t td) 1369 { 1370 globaldata_t gd; 1371 globaldata_t mygd; 1372 1373 KKASSERT(td->td_flags & TDF_MIGRATING); 1374 gd = td->td_gd; 1375 mygd = mycpu; 1376 if (gd != mycpu) { 1377 #ifdef LOOPMASK 1378 uint64_t tsc_base = rdtsc(); 1379 #endif 1380 cpu_lfence(); 1381 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1382 crit_enter_gd(mygd); 1383 DEBUG_PUSH_INFO("lwkt_acquire"); 1384 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1385 lwkt_process_ipiq(); 1386 cpu_lfence(); 1387 #ifdef _KERNEL_VIRTUAL 1388 pthread_yield(); 1389 #endif 1390 #ifdef LOOPMASK 1391 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1392 kprintf("lwkt_acquire: stuck td %p td->td_flags %08x\n", 1393 td, td->td_flags); 1394 tsc_base = rdtsc(); 1395 } 1396 #endif 1397 } 1398 DEBUG_POP_INFO(); 1399 cpu_mfence(); 1400 td->td_gd = mygd; 1401 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1402 td->td_flags &= ~TDF_MIGRATING; 1403 crit_exit_gd(mygd); 1404 } else { 1405 crit_enter_gd(mygd); 1406 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1407 td->td_flags &= ~TDF_MIGRATING; 1408 crit_exit_gd(mygd); 1409 } 1410 } 1411 1412 /* 1413 * Generic deschedule. Descheduling threads other then your own should be 1414 * done only in carefully controlled circumstances. Descheduling is 1415 * asynchronous. 1416 * 1417 * This function may block if the cpu has run out of messages. 1418 */ 1419 void 1420 lwkt_deschedule(thread_t td) 1421 { 1422 crit_enter(); 1423 if (td == curthread) { 1424 _lwkt_dequeue(td); 1425 } else { 1426 if (td->td_gd == mycpu) { 1427 _lwkt_dequeue(td); 1428 } else { 1429 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1430 } 1431 } 1432 crit_exit(); 1433 } 1434 1435 /* 1436 * Set the target thread's priority. This routine does not automatically 1437 * switch to a higher priority thread, LWKT threads are not designed for 1438 * continuous priority changes. Yield if you want to switch. 1439 */ 1440 void 1441 lwkt_setpri(thread_t td, int pri) 1442 { 1443 if (td->td_pri != pri) { 1444 KKASSERT(pri >= 0); 1445 crit_enter(); 1446 if (td->td_flags & TDF_RUNQ) { 1447 KKASSERT(td->td_gd == mycpu); 1448 _lwkt_dequeue(td); 1449 td->td_pri = pri; 1450 _lwkt_enqueue(td); 1451 } else { 1452 td->td_pri = pri; 1453 } 1454 crit_exit(); 1455 } 1456 } 1457 1458 /* 1459 * Set the initial priority for a thread prior to it being scheduled for 1460 * the first time. The thread MUST NOT be scheduled before or during 1461 * this call. The thread may be assigned to a cpu other then the current 1462 * cpu. 1463 * 1464 * Typically used after a thread has been created with TDF_STOPPREQ, 1465 * and before the thread is initially scheduled. 1466 */ 1467 void 1468 lwkt_setpri_initial(thread_t td, int pri) 1469 { 1470 KKASSERT(pri >= 0); 1471 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1472 td->td_pri = pri; 1473 } 1474 1475 void 1476 lwkt_setpri_self(int pri) 1477 { 1478 thread_t td = curthread; 1479 1480 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1481 crit_enter(); 1482 if (td->td_flags & TDF_RUNQ) { 1483 _lwkt_dequeue(td); 1484 td->td_pri = pri; 1485 _lwkt_enqueue(td); 1486 } else { 1487 td->td_pri = pri; 1488 } 1489 crit_exit(); 1490 } 1491 1492 /* 1493 * hz tick scheduler clock for LWKT threads 1494 */ 1495 void 1496 lwkt_schedulerclock(thread_t td) 1497 { 1498 globaldata_t gd = td->td_gd; 1499 thread_t xtd; 1500 1501 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 1502 if (xtd == td) { 1503 /* 1504 * If the current thread is at the head of the runq shift it to the 1505 * end of any equal-priority threads and request a LWKT reschedule 1506 * if it moved. 1507 * 1508 * Ignore upri in this situation. There will only be one user thread 1509 * in user mode, all others will be user threads running in kernel 1510 * mode and we have to make sure they get some cpu. 1511 */ 1512 xtd = TAILQ_NEXT(td, td_threadq); 1513 if (xtd && xtd->td_pri == td->td_pri) { 1514 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 1515 while (xtd && xtd->td_pri == td->td_pri) 1516 xtd = TAILQ_NEXT(xtd, td_threadq); 1517 if (xtd) 1518 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 1519 else 1520 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 1521 need_lwkt_resched(); 1522 } 1523 } else if (xtd) { 1524 /* 1525 * If we scheduled a thread other than the one at the head of the 1526 * queue always request a reschedule every tick. 1527 */ 1528 need_lwkt_resched(); 1529 } 1530 /* else curthread probably the idle thread, no need to reschedule */ 1531 } 1532 1533 /* 1534 * Migrate the current thread to the specified cpu. 1535 * 1536 * This is accomplished by descheduling ourselves from the current cpu 1537 * and setting td_migrate_gd. The lwkt_switch() code will detect that the 1538 * 'old' thread wants to migrate after it has been completely switched out 1539 * and will complete the migration. 1540 * 1541 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1542 * 1543 * We must be sure to release our current process designation (if a user 1544 * process) before clearing out any tsleepq we are on because the release 1545 * code may re-add us. 1546 * 1547 * We must be sure to remove ourselves from the current cpu's tsleepq 1548 * before potentially moving to another queue. The thread can be on 1549 * a tsleepq due to a left-over tsleep_interlock(). 1550 */ 1551 1552 void 1553 lwkt_setcpu_self(globaldata_t rgd) 1554 { 1555 thread_t td = curthread; 1556 1557 if (td->td_gd != rgd) { 1558 crit_enter_quick(td); 1559 1560 if (td->td_release) 1561 td->td_release(td); 1562 if (td->td_flags & TDF_TSLEEPQ) 1563 tsleep_remove(td); 1564 1565 /* 1566 * Set TDF_MIGRATING to prevent a spurious reschedule while we are 1567 * trying to deschedule ourselves and switch away, then deschedule 1568 * ourself, remove us from tdallq, and set td_migrate_gd. Finally, 1569 * call lwkt_switch() to complete the operation. 1570 */ 1571 td->td_flags |= TDF_MIGRATING; 1572 lwkt_deschedule_self(td); 1573 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1574 td->td_migrate_gd = rgd; 1575 lwkt_switch(); 1576 1577 /* 1578 * We are now on the target cpu 1579 */ 1580 KKASSERT(rgd == mycpu); 1581 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); 1582 crit_exit_quick(td); 1583 } 1584 } 1585 1586 void 1587 lwkt_migratecpu(int cpuid) 1588 { 1589 globaldata_t rgd; 1590 1591 rgd = globaldata_find(cpuid); 1592 lwkt_setcpu_self(rgd); 1593 } 1594 1595 /* 1596 * Remote IPI for cpu migration (called while in a critical section so we 1597 * do not have to enter another one). 1598 * 1599 * The thread (td) has already been completely descheduled from the 1600 * originating cpu and we can simply assert the case. The thread is 1601 * assigned to the new cpu and enqueued. 1602 * 1603 * The thread will re-add itself to tdallq when it resumes execution. 1604 */ 1605 static void 1606 lwkt_setcpu_remote(void *arg) 1607 { 1608 thread_t td = arg; 1609 globaldata_t gd = mycpu; 1610 1611 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1612 td->td_gd = gd; 1613 cpu_mfence(); 1614 td->td_flags &= ~TDF_MIGRATING; 1615 KKASSERT(td->td_migrate_gd == NULL); 1616 KKASSERT(td->td_lwp == NULL || 1617 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1618 _lwkt_enqueue(td); 1619 } 1620 1621 struct lwp * 1622 lwkt_preempted_proc(void) 1623 { 1624 thread_t td = curthread; 1625 while (td->td_preempted) 1626 td = td->td_preempted; 1627 return(td->td_lwp); 1628 } 1629 1630 /* 1631 * Create a kernel process/thread/whatever. It shares it's address space 1632 * with proc0 - ie: kernel only. 1633 * 1634 * If the cpu is not specified one will be selected. In the future 1635 * specifying a cpu of -1 will enable kernel thread migration between 1636 * cpus. 1637 */ 1638 int 1639 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp, 1640 thread_t template, int tdflags, int cpu, const char *fmt, ...) 1641 { 1642 thread_t td; 1643 __va_list ap; 1644 1645 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, 1646 tdflags); 1647 if (tdp) 1648 *tdp = td; 1649 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1650 1651 /* 1652 * Set up arg0 for 'ps' etc 1653 */ 1654 __va_start(ap, fmt); 1655 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1656 __va_end(ap); 1657 1658 /* 1659 * Schedule the thread to run 1660 */ 1661 if (td->td_flags & TDF_NOSTART) 1662 td->td_flags &= ~TDF_NOSTART; 1663 else 1664 lwkt_schedule(td); 1665 return 0; 1666 } 1667 1668 /* 1669 * Destroy an LWKT thread. Warning! This function is not called when 1670 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1671 * uses a different reaping mechanism. 1672 */ 1673 void 1674 lwkt_exit(void) 1675 { 1676 thread_t td = curthread; 1677 thread_t std; 1678 globaldata_t gd; 1679 1680 /* 1681 * Do any cleanup that might block here 1682 */ 1683 biosched_done(td); 1684 dsched_exit_thread(td); 1685 1686 /* 1687 * Get us into a critical section to interlock gd_freetd and loop 1688 * until we can get it freed. 1689 * 1690 * We have to cache the current td in gd_freetd because objcache_put()ing 1691 * it would rip it out from under us while our thread is still active. 1692 * 1693 * We are the current thread so of course our own TDF_RUNNING bit will 1694 * be set, so unlike the lwp reap code we don't wait for it to clear. 1695 */ 1696 gd = mycpu; 1697 crit_enter_quick(td); 1698 for (;;) { 1699 if (td->td_refs) { 1700 tsleep(td, 0, "tdreap", 1); 1701 continue; 1702 } 1703 if ((std = gd->gd_freetd) != NULL) { 1704 KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1705 gd->gd_freetd = NULL; 1706 objcache_put(thread_cache, std); 1707 continue; 1708 } 1709 break; 1710 } 1711 1712 /* 1713 * Remove thread resources from kernel lists and deschedule us for 1714 * the last time. We cannot block after this point or we may end 1715 * up with a stale td on the tsleepq. 1716 * 1717 * None of this may block, the critical section is the only thing 1718 * protecting tdallq and the only thing preventing new lwkt_hold() 1719 * thread refs now. 1720 */ 1721 if (td->td_flags & TDF_TSLEEPQ) 1722 tsleep_remove(td); 1723 lwkt_deschedule_self(td); 1724 lwkt_remove_tdallq(td); 1725 KKASSERT(td->td_refs == 0); 1726 1727 /* 1728 * Final cleanup 1729 */ 1730 KKASSERT(gd->gd_freetd == NULL); 1731 if (td->td_flags & TDF_ALLOCATED_THREAD) 1732 gd->gd_freetd = td; 1733 cpu_thread_exit(); 1734 } 1735 1736 void 1737 lwkt_remove_tdallq(thread_t td) 1738 { 1739 KKASSERT(td->td_gd == mycpu); 1740 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1741 } 1742 1743 /* 1744 * Code reduction and branch prediction improvements. Call/return 1745 * overhead on modern cpus often degenerates into 0 cycles due to 1746 * the cpu's branch prediction hardware and return pc cache. We 1747 * can take advantage of this by not inlining medium-complexity 1748 * functions and we can also reduce the branch prediction impact 1749 * by collapsing perfectly predictable branches into a single 1750 * procedure instead of duplicating it. 1751 * 1752 * Is any of this noticeable? Probably not, so I'll take the 1753 * smaller code size. 1754 */ 1755 void 1756 crit_exit_wrapper(__DEBUG_CRIT_ARG__) 1757 { 1758 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__); 1759 } 1760 1761 void 1762 crit_panic(void) 1763 { 1764 thread_t td = curthread; 1765 int lcrit = td->td_critcount; 1766 1767 td->td_critcount = 0; 1768 cpu_ccfence(); 1769 panic("td_critcount is/would-go negative! %p %d", td, lcrit); 1770 /* NOT REACHED */ 1771 } 1772 1773 /* 1774 * Called from debugger/panic on cpus which have been stopped. We must still 1775 * process the IPIQ while stopped. 1776 * 1777 * If we are dumping also try to process any pending interrupts. This may 1778 * or may not work depending on the state of the cpu at the point it was 1779 * stopped. 1780 */ 1781 void 1782 lwkt_smp_stopped(void) 1783 { 1784 globaldata_t gd = mycpu; 1785 1786 if (dumping) { 1787 lwkt_process_ipiq(); 1788 --gd->gd_intr_nesting_level; 1789 splz(); 1790 ++gd->gd_intr_nesting_level; 1791 } else { 1792 lwkt_process_ipiq(); 1793 } 1794 cpu_smp_stopped(); 1795 } 1796