1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * Each cpu in a system has its own self-contained light weight kernel 37 * thread scheduler, which means that generally speaking we only need 38 * to use a critical section to avoid problems. Foreign thread 39 * scheduling is queued via (async) IPIs. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/queue.h> 48 #include <sys/sysctl.h> 49 #include <sys/kthread.h> 50 #include <machine/cpu.h> 51 #include <sys/lock.h> 52 #include <sys/caps.h> 53 #include <sys/spinlock.h> 54 #include <sys/ktr.h> 55 56 #include <sys/thread2.h> 57 #include <sys/spinlock2.h> 58 #include <sys/mplock2.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_param.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_map.h> 66 #include <vm/vm_pager.h> 67 #include <vm/vm_extern.h> 68 69 #include <machine/stdarg.h> 70 #include <machine/smp.h> 71 72 #if !defined(KTR_CTXSW) 73 #define KTR_CTXSW KTR_ALL 74 #endif 75 KTR_INFO_MASTER(ctxsw); 76 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "sw %p > %p", 2 * sizeof(struct thread *)); 77 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "pre %p > %p", 2 * sizeof(struct thread *)); 78 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "new_td %p %s", sizeof (struct thread *) + 79 sizeof(char *)); 80 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "dead_td %p", sizeof (struct thread *)); 81 82 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); 83 84 #ifdef INVARIANTS 85 static int panic_on_cscount = 0; 86 #endif 87 static __int64_t switch_count = 0; 88 static __int64_t preempt_hit = 0; 89 static __int64_t preempt_miss = 0; 90 static __int64_t preempt_weird = 0; 91 static __int64_t token_contention_count __debugvar = 0; 92 static int lwkt_use_spin_port; 93 static struct objcache *thread_cache; 94 95 #ifdef SMP 96 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); 97 #endif 98 99 extern void cpu_heavy_restore(void); 100 extern void cpu_lwkt_restore(void); 101 extern void cpu_kthread_restore(void); 102 extern void cpu_idle_restore(void); 103 104 #ifdef __x86_64__ 105 106 static int 107 jg_tos_ok(struct thread *td) 108 { 109 void *tos; 110 int tos_ok; 111 112 if (td == NULL) { 113 return 1; 114 } 115 KKASSERT(td->td_sp != NULL); 116 tos = ((void **)td->td_sp)[0]; 117 tos_ok = 0; 118 if ((tos == cpu_heavy_restore) || (tos == cpu_lwkt_restore) || 119 (tos == cpu_kthread_restore) || (tos == cpu_idle_restore)) { 120 tos_ok = 1; 121 } 122 return tos_ok; 123 } 124 125 #endif 126 127 /* 128 * We can make all thread ports use the spin backend instead of the thread 129 * backend. This should only be set to debug the spin backend. 130 */ 131 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); 132 133 #ifdef INVARIANTS 134 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 135 #endif 136 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 137 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 138 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 139 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 140 #ifdef INVARIANTS 141 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW, 142 &token_contention_count, 0, "spinning due to token contention"); 143 #endif 144 145 /* 146 * These helper procedures handle the runq, they can only be called from 147 * within a critical section. 148 * 149 * WARNING! Prior to SMP being brought up it is possible to enqueue and 150 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 151 * instead of 'mycpu' when referencing the globaldata structure. Once 152 * SMP live enqueuing and dequeueing only occurs on the current cpu. 153 */ 154 static __inline 155 void 156 _lwkt_dequeue(thread_t td) 157 { 158 if (td->td_flags & TDF_RUNQ) { 159 int nq = td->td_pri & TDPRI_MASK; 160 struct globaldata *gd = td->td_gd; 161 162 td->td_flags &= ~TDF_RUNQ; 163 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 164 /* runqmask is passively cleaned up by the switcher */ 165 } 166 } 167 168 static __inline 169 void 170 _lwkt_enqueue(thread_t td) 171 { 172 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { 173 int nq = td->td_pri & TDPRI_MASK; 174 struct globaldata *gd = td->td_gd; 175 176 td->td_flags |= TDF_RUNQ; 177 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 178 gd->gd_runqmask |= 1 << nq; 179 } 180 } 181 182 static __boolean_t 183 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) 184 { 185 struct thread *td = (struct thread *)obj; 186 187 td->td_kstack = NULL; 188 td->td_kstack_size = 0; 189 td->td_flags = TDF_ALLOCATED_THREAD; 190 return (1); 191 } 192 193 static void 194 _lwkt_thread_dtor(void *obj, void *privdata) 195 { 196 struct thread *td = (struct thread *)obj; 197 198 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD, 199 ("_lwkt_thread_dtor: not allocated from objcache")); 200 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && 201 td->td_kstack_size > 0, 202 ("_lwkt_thread_dtor: corrupted stack")); 203 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 204 } 205 206 /* 207 * Initialize the lwkt s/system. 208 */ 209 void 210 lwkt_init(void) 211 { 212 /* An objcache has 2 magazines per CPU so divide cache size by 2. */ 213 thread_cache = objcache_create_mbacked(M_THREAD, sizeof(struct thread), 214 NULL, CACHE_NTHREADS/2, 215 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); 216 } 217 218 /* 219 * Schedule a thread to run. As the current thread we can always safely 220 * schedule ourselves, and a shortcut procedure is provided for that 221 * function. 222 * 223 * (non-blocking, self contained on a per cpu basis) 224 */ 225 void 226 lwkt_schedule_self(thread_t td) 227 { 228 crit_enter_quick(td); 229 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 230 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 231 _lwkt_enqueue(td); 232 crit_exit_quick(td); 233 } 234 235 /* 236 * Deschedule a thread. 237 * 238 * (non-blocking, self contained on a per cpu basis) 239 */ 240 void 241 lwkt_deschedule_self(thread_t td) 242 { 243 crit_enter_quick(td); 244 _lwkt_dequeue(td); 245 crit_exit_quick(td); 246 } 247 248 /* 249 * LWKTs operate on a per-cpu basis 250 * 251 * WARNING! Called from early boot, 'mycpu' may not work yet. 252 */ 253 void 254 lwkt_gdinit(struct globaldata *gd) 255 { 256 int i; 257 258 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 259 TAILQ_INIT(&gd->gd_tdrunq[i]); 260 gd->gd_runqmask = 0; 261 TAILQ_INIT(&gd->gd_tdallq); 262 } 263 264 /* 265 * Create a new thread. The thread must be associated with a process context 266 * or LWKT start address before it can be scheduled. If the target cpu is 267 * -1 the thread will be created on the current cpu. 268 * 269 * If you intend to create a thread without a process context this function 270 * does everything except load the startup and switcher function. 271 */ 272 thread_t 273 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) 274 { 275 globaldata_t gd = mycpu; 276 void *stack; 277 278 /* 279 * If static thread storage is not supplied allocate a thread. Reuse 280 * a cached free thread if possible. gd_freetd is used to keep an exiting 281 * thread intact through the exit. 282 */ 283 if (td == NULL) { 284 if ((td = gd->gd_freetd) != NULL) 285 gd->gd_freetd = NULL; 286 else 287 td = objcache_get(thread_cache, M_WAITOK); 288 KASSERT((td->td_flags & 289 (TDF_ALLOCATED_THREAD|TDF_RUNNING)) == TDF_ALLOCATED_THREAD, 290 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags)); 291 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK); 292 } 293 294 /* 295 * Try to reuse cached stack. 296 */ 297 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 298 if (flags & TDF_ALLOCATED_STACK) { 299 kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); 300 stack = NULL; 301 } 302 } 303 if (stack == NULL) { 304 stack = (void *)kmem_alloc(&kernel_map, stksize); 305 flags |= TDF_ALLOCATED_STACK; 306 } 307 if (cpu < 0) 308 lwkt_init_thread(td, stack, stksize, flags, gd); 309 else 310 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 311 return(td); 312 } 313 314 /* 315 * Initialize a preexisting thread structure. This function is used by 316 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 317 * 318 * All threads start out in a critical section at a priority of 319 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 320 * appropriate. This function may send an IPI message when the 321 * requested cpu is not the current cpu and consequently gd_tdallq may 322 * not be initialized synchronously from the point of view of the originating 323 * cpu. 324 * 325 * NOTE! we have to be careful in regards to creating threads for other cpus 326 * if SMP has not yet been activated. 327 */ 328 #ifdef SMP 329 330 static void 331 lwkt_init_thread_remote(void *arg) 332 { 333 thread_t td = arg; 334 335 /* 336 * Protected by critical section held by IPI dispatch 337 */ 338 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 339 } 340 341 #endif 342 343 void 344 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 345 struct globaldata *gd) 346 { 347 globaldata_t mygd = mycpu; 348 349 bzero(td, sizeof(struct thread)); 350 td->td_kstack = stack; 351 td->td_kstack_size = stksize; 352 td->td_flags = flags; 353 td->td_gd = gd; 354 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 355 #ifdef SMP 356 if ((flags & TDF_MPSAFE) == 0) 357 td->td_mpcount = 1; 358 #endif 359 if (lwkt_use_spin_port) 360 lwkt_initport_spin(&td->td_msgport); 361 else 362 lwkt_initport_thread(&td->td_msgport, td); 363 pmap_init_thread(td); 364 #ifdef SMP 365 /* 366 * Normally initializing a thread for a remote cpu requires sending an 367 * IPI. However, the idlethread is setup before the other cpus are 368 * activated so we have to treat it as a special case. XXX manipulation 369 * of gd_tdallq requires the BGL. 370 */ 371 if (gd == mygd || td == &gd->gd_idlethread) { 372 crit_enter_gd(mygd); 373 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 374 crit_exit_gd(mygd); 375 } else { 376 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 377 } 378 #else 379 crit_enter_gd(mygd); 380 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 381 crit_exit_gd(mygd); 382 #endif 383 } 384 385 void 386 lwkt_set_comm(thread_t td, const char *ctl, ...) 387 { 388 __va_list va; 389 390 __va_start(va, ctl); 391 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 392 __va_end(va); 393 KTR_LOG(ctxsw_newtd, td, &td->td_comm[0]); 394 } 395 396 void 397 lwkt_hold(thread_t td) 398 { 399 ++td->td_refs; 400 } 401 402 void 403 lwkt_rele(thread_t td) 404 { 405 KKASSERT(td->td_refs > 0); 406 --td->td_refs; 407 } 408 409 void 410 lwkt_wait_free(thread_t td) 411 { 412 while (td->td_refs) 413 tsleep(td, 0, "tdreap", hz); 414 } 415 416 void 417 lwkt_free_thread(thread_t td) 418 { 419 KASSERT((td->td_flags & TDF_RUNNING) == 0, 420 ("lwkt_free_thread: did not exit! %p", td)); 421 422 if (td->td_flags & TDF_ALLOCATED_THREAD) { 423 objcache_put(thread_cache, td); 424 } else if (td->td_flags & TDF_ALLOCATED_STACK) { 425 /* client-allocated struct with internally allocated stack */ 426 KASSERT(td->td_kstack && td->td_kstack_size > 0, 427 ("lwkt_free_thread: corrupted stack")); 428 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 429 td->td_kstack = NULL; 430 td->td_kstack_size = 0; 431 } 432 KTR_LOG(ctxsw_deadtd, td); 433 } 434 435 436 /* 437 * Switch to the next runnable lwkt. If no LWKTs are runnable then 438 * switch to the idlethread. Switching must occur within a critical 439 * section to avoid races with the scheduling queue. 440 * 441 * We always have full control over our cpu's run queue. Other cpus 442 * that wish to manipulate our queue must use the cpu_*msg() calls to 443 * talk to our cpu, so a critical section is all that is needed and 444 * the result is very, very fast thread switching. 445 * 446 * The LWKT scheduler uses a fixed priority model and round-robins at 447 * each priority level. User process scheduling is a totally 448 * different beast and LWKT priorities should not be confused with 449 * user process priorities. 450 * 451 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 452 * cleans it up. Note that the td_switch() function cannot do anything that 453 * requires the MP lock since the MP lock will have already been setup for 454 * the target thread (not the current thread). It's nice to have a scheduler 455 * that does not need the MP lock to work because it allows us to do some 456 * really cool high-performance MP lock optimizations. 457 * 458 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 459 * is not called by the current thread in the preemption case, only when 460 * the preempting thread blocks (in order to return to the original thread). 461 */ 462 void 463 lwkt_switch(void) 464 { 465 globaldata_t gd = mycpu; 466 thread_t td = gd->gd_curthread; 467 thread_t ntd; 468 #ifdef SMP 469 int mpheld; 470 #endif 471 472 /* 473 * Switching from within a 'fast' (non thread switched) interrupt or IPI 474 * is illegal. However, we may have to do it anyway if we hit a fatal 475 * kernel trap or we have paniced. 476 * 477 * If this case occurs save and restore the interrupt nesting level. 478 */ 479 if (gd->gd_intr_nesting_level) { 480 int savegdnest; 481 int savegdtrap; 482 483 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { 484 panic("lwkt_switch: cannot switch from within " 485 "a fast interrupt, yet, td %p\n", td); 486 } else { 487 savegdnest = gd->gd_intr_nesting_level; 488 savegdtrap = gd->gd_trap_nesting_level; 489 gd->gd_intr_nesting_level = 0; 490 gd->gd_trap_nesting_level = 0; 491 if ((td->td_flags & TDF_PANICWARN) == 0) { 492 td->td_flags |= TDF_PANICWARN; 493 kprintf("Warning: thread switch from interrupt or IPI, " 494 "thread %p (%s)\n", td, td->td_comm); 495 print_backtrace(); 496 } 497 lwkt_switch(); 498 gd->gd_intr_nesting_level = savegdnest; 499 gd->gd_trap_nesting_level = savegdtrap; 500 return; 501 } 502 } 503 504 /* 505 * Passive release (used to transition from user to kernel mode 506 * when we block or switch rather then when we enter the kernel). 507 * This function is NOT called if we are switching into a preemption 508 * or returning from a preemption. Typically this causes us to lose 509 * our current process designation (if we have one) and become a true 510 * LWKT thread, and may also hand the current process designation to 511 * another process and schedule thread. 512 */ 513 if (td->td_release) 514 td->td_release(td); 515 516 crit_enter_gd(gd); 517 if (td->td_toks) 518 lwkt_relalltokens(td); 519 520 /* 521 * We had better not be holding any spin locks, but don't get into an 522 * endless panic loop. 523 */ 524 KASSERT(gd->gd_spinlock_rd == NULL || panicstr != NULL, 525 ("lwkt_switch: still holding a shared spinlock %p!", 526 gd->gd_spinlock_rd)); 527 KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL, 528 ("lwkt_switch: still holding %d exclusive spinlocks!", 529 gd->gd_spinlocks_wr)); 530 531 532 #ifdef SMP 533 /* 534 * td_mpcount cannot be used to determine if we currently hold the 535 * MP lock because get_mplock() will increment it prior to attempting 536 * to get the lock, and switch out if it can't. Our ownership of 537 * the actual lock will remain stable while we are in a critical section 538 * (but, of course, another cpu may own or release the lock so the 539 * actual value of mp_lock is not stable). 540 */ 541 mpheld = MP_LOCK_HELD(); 542 #ifdef INVARIANTS 543 if (td->td_cscount) { 544 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 545 td); 546 if (panic_on_cscount) 547 panic("switching while mastering cpusync"); 548 } 549 #endif 550 #endif 551 if ((ntd = td->td_preempted) != NULL) { 552 /* 553 * We had preempted another thread on this cpu, resume the preempted 554 * thread. This occurs transparently, whether the preempted thread 555 * was scheduled or not (it may have been preempted after descheduling 556 * itself). 557 * 558 * We have to setup the MP lock for the original thread after backing 559 * out the adjustment that was made to curthread when the original 560 * was preempted. 561 */ 562 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 563 #ifdef SMP 564 if (ntd->td_mpcount && mpheld == 0) { 565 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", 566 td, ntd, td->td_mpcount, ntd->td_mpcount); 567 } 568 if (ntd->td_mpcount) { 569 td->td_mpcount -= ntd->td_mpcount; 570 KKASSERT(td->td_mpcount >= 0); 571 } 572 #endif 573 ntd->td_flags |= TDF_PREEMPT_DONE; 574 575 /* 576 * The interrupt may have woken a thread up, we need to properly 577 * set the reschedule flag if the originally interrupted thread is 578 * at a lower priority. 579 */ 580 if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) 581 need_lwkt_resched(); 582 /* YYY release mp lock on switchback if original doesn't need it */ 583 } else { 584 /* 585 * Priority queue / round-robin at each priority. Note that user 586 * processes run at a fixed, low priority and the user process 587 * scheduler deals with interactions between user processes 588 * by scheduling and descheduling them from the LWKT queue as 589 * necessary. 590 * 591 * We have to adjust the MP lock for the target thread. If we 592 * need the MP lock and cannot obtain it we try to locate a 593 * thread that does not need the MP lock. If we cannot, we spin 594 * instead of HLT. 595 * 596 * A similar issue exists for the tokens held by the target thread. 597 * If we cannot obtain ownership of the tokens we cannot immediately 598 * schedule the thread. 599 */ 600 601 /* 602 * If an LWKT reschedule was requested, well that is what we are 603 * doing now so clear it. 604 */ 605 clear_lwkt_resched(); 606 again: 607 if (gd->gd_runqmask) { 608 int nq = bsrl(gd->gd_runqmask); 609 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 610 gd->gd_runqmask &= ~(1 << nq); 611 goto again; 612 } 613 #ifdef SMP 614 /* 615 * THREAD SELECTION FOR AN SMP MACHINE BUILD 616 * 617 * If the target needs the MP lock and we couldn't get it, 618 * or if the target is holding tokens and we could not 619 * gain ownership of the tokens, continue looking for a 620 * thread to schedule and spin instead of HLT if we can't. 621 * 622 * NOTE: the mpheld variable invalid after this conditional, it 623 * can change due to both cpu_try_mplock() returning success 624 * AND interactions in lwkt_getalltokens() due to the fact that 625 * we are trying to check the mpcount of a thread other then 626 * the current thread. Because of this, if the current thread 627 * is not holding td_mpcount, an IPI indirectly run via 628 * lwkt_getalltokens() can obtain and release the MP lock and 629 * cause the core MP lock to be released. 630 */ 631 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 632 (ntd->td_toks && lwkt_getalltokens(ntd) == 0) 633 ) { 634 u_int32_t rqmask = gd->gd_runqmask; 635 636 mpheld = MP_LOCK_HELD(); 637 ntd = NULL; 638 while (rqmask) { 639 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 640 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) { 641 /* spinning due to MP lock being held */ 642 continue; 643 } 644 645 /* 646 * mpheld state invalid after getalltokens call returns 647 * failure, but the variable is only needed for 648 * the loop. 649 */ 650 if (ntd->td_toks && !lwkt_getalltokens(ntd)) { 651 /* spinning due to token contention */ 652 #ifdef INVARIANTS 653 ++token_contention_count; 654 #endif 655 mpheld = MP_LOCK_HELD(); 656 continue; 657 } 658 break; 659 } 660 if (ntd) 661 break; 662 rqmask &= ~(1 << nq); 663 nq = bsrl(rqmask); 664 665 /* 666 * We have two choices. We can either refuse to run a 667 * user thread when a kernel thread needs the MP lock 668 * but could not get it, or we can allow it to run but 669 * then expect an IPI (hopefully) later on to force a 670 * reschedule when the MP lock might become available. 671 */ 672 if (nq < TDPRI_KERN_LPSCHED) { 673 break; /* for now refuse to run */ 674 #if 0 675 if (chain_mplock == 0) 676 break; 677 /* continue loop, allow user threads to be scheduled */ 678 #endif 679 } 680 } 681 682 /* 683 * Case where a (kernel) thread needed the MP lock and could 684 * not get one, and we may or may not have found another 685 * thread which does not need the MP lock to run while 686 * we wait (ntd). 687 */ 688 if (ntd == NULL) { 689 ntd = &gd->gd_idlethread; 690 ntd->td_flags |= TDF_IDLE_NOHLT; 691 set_mplock_contention_mask(gd); 692 cpu_mplock_contested(); 693 goto using_idle_thread; 694 } else { 695 clr_mplock_contention_mask(gd); 696 ++gd->gd_cnt.v_swtch; 697 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 698 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 699 } 700 } else { 701 clr_mplock_contention_mask(gd); 702 ++gd->gd_cnt.v_swtch; 703 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 704 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 705 } 706 #else 707 /* 708 * THREAD SELECTION FOR A UP MACHINE BUILD. We don't have to 709 * worry about tokens or the BGL. However, we still have 710 * to call lwkt_getalltokens() in order to properly detect 711 * stale tokens. This call cannot fail for a UP build! 712 */ 713 lwkt_getalltokens(ntd); 714 ++gd->gd_cnt.v_swtch; 715 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 716 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 717 #endif 718 } else { 719 /* 720 * We have nothing to run but only let the idle loop halt 721 * the cpu if there are no pending interrupts. 722 */ 723 ntd = &gd->gd_idlethread; 724 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 725 ntd->td_flags |= TDF_IDLE_NOHLT; 726 #ifdef SMP 727 using_idle_thread: 728 /* 729 * The idle thread should not be holding the MP lock unless we 730 * are trapping in the kernel or in a panic. Since we select the 731 * idle thread unconditionally when no other thread is available, 732 * if the MP lock is desired during a panic or kernel trap, we 733 * have to loop in the scheduler until we get it. 734 */ 735 if (ntd->td_mpcount) { 736 mpheld = MP_LOCK_HELD(); 737 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 738 panic("Idle thread %p was holding the BGL!", ntd); 739 if (mpheld == 0) 740 goto again; 741 } 742 #endif 743 } 744 } 745 KASSERT(ntd->td_pri >= TDPRI_CRIT, 746 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 747 748 /* 749 * Do the actual switch. If the new target does not need the MP lock 750 * and we are holding it, release the MP lock. If the new target requires 751 * the MP lock we have already acquired it for the target. 752 */ 753 #ifdef SMP 754 if (ntd->td_mpcount == 0 ) { 755 if (MP_LOCK_HELD()) 756 cpu_rel_mplock(); 757 } else { 758 ASSERT_MP_LOCK_HELD(ntd); 759 } 760 #endif 761 if (td != ntd) { 762 ++switch_count; 763 #ifdef __x86_64__ 764 { 765 int tos_ok __debugvar = jg_tos_ok(ntd); 766 KKASSERT(tos_ok); 767 } 768 #endif 769 KTR_LOG(ctxsw_sw, td, ntd); 770 td->td_switch(ntd); 771 } 772 /* NOTE: current cpu may have changed after switch */ 773 crit_exit_quick(td); 774 } 775 776 /* 777 * Request that the target thread preempt the current thread. Preemption 778 * only works under a specific set of conditions: 779 * 780 * - We are not preempting ourselves 781 * - The target thread is owned by the current cpu 782 * - We are not currently being preempted 783 * - The target is not currently being preempted 784 * - We are not holding any spin locks 785 * - The target thread is not holding any tokens 786 * - We are able to satisfy the target's MP lock requirements (if any). 787 * 788 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 789 * this is called via lwkt_schedule() through the td_preemptable callback. 790 * critpri is the managed critical priority that we should ignore in order 791 * to determine whether preemption is possible (aka usually just the crit 792 * priority of lwkt_schedule() itself). 793 * 794 * XXX at the moment we run the target thread in a critical section during 795 * the preemption in order to prevent the target from taking interrupts 796 * that *WE* can't. Preemption is strictly limited to interrupt threads 797 * and interrupt-like threads, outside of a critical section, and the 798 * preempted source thread will be resumed the instant the target blocks 799 * whether or not the source is scheduled (i.e. preemption is supposed to 800 * be as transparent as possible). 801 * 802 * The target thread inherits our MP count (added to its own) for the 803 * duration of the preemption in order to preserve the atomicy of the 804 * MP lock during the preemption. Therefore, any preempting targets must be 805 * careful in regards to MP assertions. Note that the MP count may be 806 * out of sync with the physical mp_lock, but we do not have to preserve 807 * the original ownership of the lock if it was out of synch (that is, we 808 * can leave it synchronized on return). 809 */ 810 void 811 lwkt_preempt(thread_t ntd, int critpri) 812 { 813 struct globaldata *gd = mycpu; 814 thread_t td; 815 #ifdef SMP 816 int mpheld; 817 int savecnt; 818 #endif 819 820 /* 821 * The caller has put us in a critical section. We can only preempt 822 * if the caller of the caller was not in a critical section (basically 823 * a local interrupt), as determined by the 'critpri' parameter. We 824 * also can't preempt if the caller is holding any spinlocks (even if 825 * he isn't in a critical section). This also handles the tokens test. 826 * 827 * YYY The target thread must be in a critical section (else it must 828 * inherit our critical section? I dunno yet). 829 * 830 * Set need_lwkt_resched() unconditionally for now YYY. 831 */ 832 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 833 834 td = gd->gd_curthread; 835 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 836 ++preempt_miss; 837 return; 838 } 839 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 840 ++preempt_miss; 841 need_lwkt_resched(); 842 return; 843 } 844 #ifdef SMP 845 if (ntd->td_gd != gd) { 846 ++preempt_miss; 847 need_lwkt_resched(); 848 return; 849 } 850 #endif 851 /* 852 * Take the easy way out and do not preempt if we are holding 853 * any spinlocks. We could test whether the thread(s) being 854 * preempted interlock against the target thread's tokens and whether 855 * we can get all the target thread's tokens, but this situation 856 * should not occur very often so its easier to simply not preempt. 857 * Also, plain spinlocks are impossible to figure out at this point so 858 * just don't preempt. 859 * 860 * Do not try to preempt if the target thread is holding any tokens. 861 * We could try to acquire the tokens but this case is so rare there 862 * is no need to support it. 863 */ 864 if (gd->gd_spinlock_rd || gd->gd_spinlocks_wr) { 865 ++preempt_miss; 866 need_lwkt_resched(); 867 return; 868 } 869 if (ntd->td_toks) { 870 ++preempt_miss; 871 need_lwkt_resched(); 872 return; 873 } 874 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 875 ++preempt_weird; 876 need_lwkt_resched(); 877 return; 878 } 879 if (ntd->td_preempted) { 880 ++preempt_hit; 881 need_lwkt_resched(); 882 return; 883 } 884 #ifdef SMP 885 /* 886 * note: an interrupt might have occured just as we were transitioning 887 * to or from the MP lock. In this case td_mpcount will be pre-disposed 888 * (non-zero) but not actually synchronized with the actual state of the 889 * lock. We can use it to imply an MP lock requirement for the 890 * preemption but we cannot use it to test whether we hold the MP lock 891 * or not. 892 */ 893 savecnt = td->td_mpcount; 894 mpheld = MP_LOCK_HELD(); 895 ntd->td_mpcount += td->td_mpcount; 896 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 897 ntd->td_mpcount -= td->td_mpcount; 898 ++preempt_miss; 899 need_lwkt_resched(); 900 return; 901 } 902 #endif 903 904 /* 905 * Since we are able to preempt the current thread, there is no need to 906 * call need_lwkt_resched(). 907 */ 908 ++preempt_hit; 909 ntd->td_preempted = td; 910 td->td_flags |= TDF_PREEMPT_LOCK; 911 KTR_LOG(ctxsw_pre, td, ntd); 912 td->td_switch(ntd); 913 914 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 915 #ifdef SMP 916 KKASSERT(savecnt == td->td_mpcount); 917 mpheld = MP_LOCK_HELD(); 918 if (mpheld && td->td_mpcount == 0) 919 cpu_rel_mplock(); 920 else if (mpheld == 0 && td->td_mpcount) 921 panic("lwkt_preempt(): MP lock was not held through"); 922 #endif 923 ntd->td_preempted = NULL; 924 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 925 } 926 927 /* 928 * Conditionally call splz() if gd_reqflags indicates work is pending. 929 * 930 * td_nest_count prevents deep nesting via splz() or doreti() which 931 * might otherwise blow out the kernel stack. Note that except for 932 * this special case, we MUST call splz() here to handle any 933 * pending ints, particularly after we switch, or we might accidently 934 * halt the cpu with interrupts pending. 935 * 936 * (self contained on a per cpu basis) 937 */ 938 void 939 splz_check(void) 940 { 941 globaldata_t gd = mycpu; 942 thread_t td = gd->gd_curthread; 943 944 if (gd->gd_reqflags && td->td_nest_count < 2) 945 splz(); 946 } 947 948 /* 949 * This implements a normal yield which will yield to equal priority 950 * threads as well as higher priority threads. Note that gd_reqflags 951 * tests will be handled by the crit_exit() call in lwkt_switch(). 952 * 953 * (self contained on a per cpu basis) 954 */ 955 void 956 lwkt_yield(void) 957 { 958 lwkt_schedule_self(curthread); 959 lwkt_switch(); 960 } 961 962 /* 963 * This function is used along with the lwkt_passive_recover() inline 964 * by the trap code to negotiate a passive release of the current 965 * process/lwp designation with the user scheduler. 966 */ 967 void 968 lwkt_passive_release(struct thread *td) 969 { 970 struct lwp *lp = td->td_lwp; 971 972 td->td_release = NULL; 973 lwkt_setpri_self(TDPRI_KERN_USER); 974 lp->lwp_proc->p_usched->release_curproc(lp); 975 } 976 977 /* 978 * Make a kernel thread act as if it were in user mode with regards 979 * to scheduling, to avoid becoming cpu-bound in the kernel. Kernel 980 * loops which may be potentially cpu-bound can call lwkt_user_yield(). 981 * 982 * The lwkt_user_yield() function is designed to have very low overhead 983 * if no yield is determined to be needed. 984 */ 985 void 986 lwkt_user_yield(void) 987 { 988 thread_t td = curthread; 989 struct lwp *lp = td->td_lwp; 990 991 #ifdef SMP 992 /* 993 * XXX SEVERE TEMPORARY HACK. A cpu-bound operation running in the 994 * kernel can prevent other cpus from servicing interrupt threads 995 * which still require the MP lock (which is a lot of them). This 996 * has a chaining effect since if the interrupt is blocked, so is 997 * the event, so normal scheduling will not pick up on the problem. 998 */ 999 if (mp_lock_contention_mask && td->td_mpcount) { 1000 yield_mplock(td); 1001 } 1002 #endif 1003 1004 /* 1005 * Another kernel thread wants the cpu 1006 */ 1007 if (lwkt_resched_wanted()) 1008 lwkt_switch(); 1009 1010 /* 1011 * If the user scheduler has asynchronously determined that the current 1012 * process (when running in user mode) needs to lose the cpu then make 1013 * sure we are released. 1014 */ 1015 if (user_resched_wanted()) { 1016 if (td->td_release) 1017 td->td_release(td); 1018 } 1019 1020 /* 1021 * If we are released reduce our priority 1022 */ 1023 if (td->td_release == NULL) { 1024 if (lwkt_check_resched(td) > 0) 1025 lwkt_switch(); 1026 if (lp) { 1027 lp->lwp_proc->p_usched->acquire_curproc(lp); 1028 td->td_release = lwkt_passive_release; 1029 lwkt_setpri_self(TDPRI_USER_NORM); 1030 } 1031 } 1032 } 1033 1034 /* 1035 * Return 0 if no runnable threads are pending at the same or higher 1036 * priority as the passed thread. 1037 * 1038 * Return 1 if runnable threads are pending at the same priority. 1039 * 1040 * Return 2 if runnable threads are pending at a higher priority. 1041 */ 1042 int 1043 lwkt_check_resched(thread_t td) 1044 { 1045 int pri = td->td_pri & TDPRI_MASK; 1046 1047 if (td->td_gd->gd_runqmask > (2 << pri) - 1) 1048 return(2); 1049 if (TAILQ_NEXT(td, td_threadq)) 1050 return(1); 1051 return(0); 1052 } 1053 1054 /* 1055 * Generic schedule. Possibly schedule threads belonging to other cpus and 1056 * deal with threads that might be blocked on a wait queue. 1057 * 1058 * We have a little helper inline function which does additional work after 1059 * the thread has been enqueued, including dealing with preemption and 1060 * setting need_lwkt_resched() (which prevents the kernel from returning 1061 * to userland until it has processed higher priority threads). 1062 * 1063 * It is possible for this routine to be called after a failed _enqueue 1064 * (due to the target thread migrating, sleeping, or otherwise blocked). 1065 * We have to check that the thread is actually on the run queue! 1066 * 1067 * reschedok is an optimized constant propagated from lwkt_schedule() or 1068 * lwkt_schedule_noresched(). By default it is non-zero, causing a 1069 * reschedule to be requested if the target thread has a higher priority. 1070 * The port messaging code will set MSG_NORESCHED and cause reschedok to 1071 * be 0, prevented undesired reschedules. 1072 */ 1073 static __inline 1074 void 1075 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri, int reschedok) 1076 { 1077 thread_t otd; 1078 1079 if (ntd->td_flags & TDF_RUNQ) { 1080 if (ntd->td_preemptable && reschedok) { 1081 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 1082 } else if (reschedok) { 1083 otd = curthread; 1084 if ((ntd->td_pri & TDPRI_MASK) > (otd->td_pri & TDPRI_MASK)) 1085 need_lwkt_resched(); 1086 } 1087 } 1088 } 1089 1090 static __inline 1091 void 1092 _lwkt_schedule(thread_t td, int reschedok) 1093 { 1094 globaldata_t mygd = mycpu; 1095 1096 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 1097 crit_enter_gd(mygd); 1098 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 1099 if (td == mygd->gd_curthread) { 1100 _lwkt_enqueue(td); 1101 } else { 1102 /* 1103 * If we own the thread, there is no race (since we are in a 1104 * critical section). If we do not own the thread there might 1105 * be a race but the target cpu will deal with it. 1106 */ 1107 #ifdef SMP 1108 if (td->td_gd == mygd) { 1109 _lwkt_enqueue(td); 1110 _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok); 1111 } else { 1112 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); 1113 } 1114 #else 1115 _lwkt_enqueue(td); 1116 _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok); 1117 #endif 1118 } 1119 crit_exit_gd(mygd); 1120 } 1121 1122 void 1123 lwkt_schedule(thread_t td) 1124 { 1125 _lwkt_schedule(td, 1); 1126 } 1127 1128 void 1129 lwkt_schedule_noresched(thread_t td) 1130 { 1131 _lwkt_schedule(td, 0); 1132 } 1133 1134 #ifdef SMP 1135 1136 /* 1137 * When scheduled remotely if frame != NULL the IPIQ is being 1138 * run via doreti or an interrupt then preemption can be allowed. 1139 * 1140 * To allow preemption we have to drop the critical section so only 1141 * one is present in _lwkt_schedule_post. 1142 */ 1143 static void 1144 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) 1145 { 1146 thread_t td = curthread; 1147 thread_t ntd = arg; 1148 1149 if (frame && ntd->td_preemptable) { 1150 crit_exit_noyield(td); 1151 _lwkt_schedule(ntd, 1); 1152 crit_enter_quick(td); 1153 } else { 1154 _lwkt_schedule(ntd, 1); 1155 } 1156 } 1157 1158 /* 1159 * Thread migration using a 'Pull' method. The thread may or may not be 1160 * the current thread. It MUST be descheduled and in a stable state. 1161 * lwkt_giveaway() must be called on the cpu owning the thread. 1162 * 1163 * At any point after lwkt_giveaway() is called, the target cpu may 1164 * 'pull' the thread by calling lwkt_acquire(). 1165 * 1166 * We have to make sure the thread is not sitting on a per-cpu tsleep 1167 * queue or it will blow up when it moves to another cpu. 1168 * 1169 * MPSAFE - must be called under very specific conditions. 1170 */ 1171 void 1172 lwkt_giveaway(thread_t td) 1173 { 1174 globaldata_t gd = mycpu; 1175 1176 crit_enter_gd(gd); 1177 if (td->td_flags & TDF_TSLEEPQ) 1178 tsleep_remove(td); 1179 KKASSERT(td->td_gd == gd); 1180 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1181 td->td_flags |= TDF_MIGRATING; 1182 crit_exit_gd(gd); 1183 } 1184 1185 void 1186 lwkt_acquire(thread_t td) 1187 { 1188 globaldata_t gd; 1189 globaldata_t mygd; 1190 1191 KKASSERT(td->td_flags & TDF_MIGRATING); 1192 gd = td->td_gd; 1193 mygd = mycpu; 1194 if (gd != mycpu) { 1195 cpu_lfence(); 1196 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1197 crit_enter_gd(mygd); 1198 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1199 #ifdef SMP 1200 lwkt_process_ipiq(); 1201 #endif 1202 cpu_lfence(); 1203 } 1204 td->td_gd = mygd; 1205 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1206 td->td_flags &= ~TDF_MIGRATING; 1207 crit_exit_gd(mygd); 1208 } else { 1209 crit_enter_gd(mygd); 1210 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1211 td->td_flags &= ~TDF_MIGRATING; 1212 crit_exit_gd(mygd); 1213 } 1214 } 1215 1216 #endif 1217 1218 /* 1219 * Generic deschedule. Descheduling threads other then your own should be 1220 * done only in carefully controlled circumstances. Descheduling is 1221 * asynchronous. 1222 * 1223 * This function may block if the cpu has run out of messages. 1224 */ 1225 void 1226 lwkt_deschedule(thread_t td) 1227 { 1228 crit_enter(); 1229 #ifdef SMP 1230 if (td == curthread) { 1231 _lwkt_dequeue(td); 1232 } else { 1233 if (td->td_gd == mycpu) { 1234 _lwkt_dequeue(td); 1235 } else { 1236 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1237 } 1238 } 1239 #else 1240 _lwkt_dequeue(td); 1241 #endif 1242 crit_exit(); 1243 } 1244 1245 /* 1246 * Set the target thread's priority. This routine does not automatically 1247 * switch to a higher priority thread, LWKT threads are not designed for 1248 * continuous priority changes. Yield if you want to switch. 1249 * 1250 * We have to retain the critical section count which uses the high bits 1251 * of the td_pri field. The specified priority may also indicate zero or 1252 * more critical sections by adding TDPRI_CRIT*N. 1253 * 1254 * Note that we requeue the thread whether it winds up on a different runq 1255 * or not. uio_yield() depends on this and the routine is not normally 1256 * called with the same priority otherwise. 1257 */ 1258 void 1259 lwkt_setpri(thread_t td, int pri) 1260 { 1261 KKASSERT(pri >= 0); 1262 KKASSERT(td->td_gd == mycpu); 1263 crit_enter(); 1264 if (td->td_flags & TDF_RUNQ) { 1265 _lwkt_dequeue(td); 1266 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1267 _lwkt_enqueue(td); 1268 } else { 1269 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1270 } 1271 crit_exit(); 1272 } 1273 1274 /* 1275 * Set the initial priority for a thread prior to it being scheduled for 1276 * the first time. The thread MUST NOT be scheduled before or during 1277 * this call. The thread may be assigned to a cpu other then the current 1278 * cpu. 1279 * 1280 * Typically used after a thread has been created with TDF_STOPPREQ, 1281 * and before the thread is initially scheduled. 1282 */ 1283 void 1284 lwkt_setpri_initial(thread_t td, int pri) 1285 { 1286 KKASSERT(pri >= 0); 1287 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1288 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1289 } 1290 1291 void 1292 lwkt_setpri_self(int pri) 1293 { 1294 thread_t td = curthread; 1295 1296 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1297 crit_enter(); 1298 if (td->td_flags & TDF_RUNQ) { 1299 _lwkt_dequeue(td); 1300 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1301 _lwkt_enqueue(td); 1302 } else { 1303 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1304 } 1305 crit_exit(); 1306 } 1307 1308 /* 1309 * Migrate the current thread to the specified cpu. 1310 * 1311 * This is accomplished by descheduling ourselves from the current cpu, 1312 * moving our thread to the tdallq of the target cpu, IPI messaging the 1313 * target cpu, and switching out. TDF_MIGRATING prevents scheduling 1314 * races while the thread is being migrated. 1315 * 1316 * We must be sure to remove ourselves from the current cpu's tsleepq 1317 * before potentially moving to another queue. The thread can be on 1318 * a tsleepq due to a left-over tsleep_interlock(). 1319 */ 1320 #ifdef SMP 1321 static void lwkt_setcpu_remote(void *arg); 1322 #endif 1323 1324 void 1325 lwkt_setcpu_self(globaldata_t rgd) 1326 { 1327 #ifdef SMP 1328 thread_t td = curthread; 1329 1330 if (td->td_gd != rgd) { 1331 crit_enter_quick(td); 1332 if (td->td_flags & TDF_TSLEEPQ) 1333 tsleep_remove(td); 1334 td->td_flags |= TDF_MIGRATING; 1335 lwkt_deschedule_self(td); 1336 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1337 lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td); 1338 lwkt_switch(); 1339 /* we are now on the target cpu */ 1340 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); 1341 crit_exit_quick(td); 1342 } 1343 #endif 1344 } 1345 1346 void 1347 lwkt_migratecpu(int cpuid) 1348 { 1349 #ifdef SMP 1350 globaldata_t rgd; 1351 1352 rgd = globaldata_find(cpuid); 1353 lwkt_setcpu_self(rgd); 1354 #endif 1355 } 1356 1357 /* 1358 * Remote IPI for cpu migration (called while in a critical section so we 1359 * do not have to enter another one). The thread has already been moved to 1360 * our cpu's allq, but we must wait for the thread to be completely switched 1361 * out on the originating cpu before we schedule it on ours or the stack 1362 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD 1363 * change to main memory. 1364 * 1365 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races 1366 * against wakeups. It is best if this interface is used only when there 1367 * are no pending events that might try to schedule the thread. 1368 */ 1369 #ifdef SMP 1370 static void 1371 lwkt_setcpu_remote(void *arg) 1372 { 1373 thread_t td = arg; 1374 globaldata_t gd = mycpu; 1375 1376 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1377 #ifdef SMP 1378 lwkt_process_ipiq(); 1379 #endif 1380 cpu_lfence(); 1381 } 1382 td->td_gd = gd; 1383 cpu_sfence(); 1384 td->td_flags &= ~TDF_MIGRATING; 1385 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 1386 _lwkt_enqueue(td); 1387 } 1388 #endif 1389 1390 struct lwp * 1391 lwkt_preempted_proc(void) 1392 { 1393 thread_t td = curthread; 1394 while (td->td_preempted) 1395 td = td->td_preempted; 1396 return(td->td_lwp); 1397 } 1398 1399 /* 1400 * Create a kernel process/thread/whatever. It shares it's address space 1401 * with proc0 - ie: kernel only. 1402 * 1403 * NOTE! By default new threads are created with the MP lock held. A 1404 * thread which does not require the MP lock should release it by calling 1405 * rel_mplock() at the start of the new thread. 1406 */ 1407 int 1408 lwkt_create(void (*func)(void *), void *arg, 1409 struct thread **tdp, thread_t template, int tdflags, int cpu, 1410 const char *fmt, ...) 1411 { 1412 thread_t td; 1413 __va_list ap; 1414 1415 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, 1416 tdflags); 1417 if (tdp) 1418 *tdp = td; 1419 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1420 1421 /* 1422 * Set up arg0 for 'ps' etc 1423 */ 1424 __va_start(ap, fmt); 1425 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1426 __va_end(ap); 1427 1428 /* 1429 * Schedule the thread to run 1430 */ 1431 if ((td->td_flags & TDF_STOPREQ) == 0) 1432 lwkt_schedule(td); 1433 else 1434 td->td_flags &= ~TDF_STOPREQ; 1435 return 0; 1436 } 1437 1438 /* 1439 * Destroy an LWKT thread. Warning! This function is not called when 1440 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1441 * uses a different reaping mechanism. 1442 */ 1443 void 1444 lwkt_exit(void) 1445 { 1446 thread_t td = curthread; 1447 thread_t std; 1448 globaldata_t gd; 1449 1450 if (td->td_flags & TDF_VERBOSE) 1451 kprintf("kthread %p %s has exited\n", td, td->td_comm); 1452 caps_exit(td); 1453 1454 /* 1455 * Get us into a critical section to interlock gd_freetd and loop 1456 * until we can get it freed. 1457 * 1458 * We have to cache the current td in gd_freetd because objcache_put()ing 1459 * it would rip it out from under us while our thread is still active. 1460 */ 1461 gd = mycpu; 1462 crit_enter_quick(td); 1463 while ((std = gd->gd_freetd) != NULL) { 1464 gd->gd_freetd = NULL; 1465 objcache_put(thread_cache, std); 1466 } 1467 1468 /* 1469 * Remove thread resources from kernel lists and deschedule us for 1470 * the last time. 1471 */ 1472 if (td->td_flags & TDF_TSLEEPQ) 1473 tsleep_remove(td); 1474 biosched_done(td); 1475 lwkt_deschedule_self(td); 1476 lwkt_remove_tdallq(td); 1477 if (td->td_flags & TDF_ALLOCATED_THREAD) 1478 gd->gd_freetd = td; 1479 cpu_thread_exit(); 1480 } 1481 1482 void 1483 lwkt_remove_tdallq(thread_t td) 1484 { 1485 KKASSERT(td->td_gd == mycpu); 1486 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1487 } 1488 1489 void 1490 crit_panic(void) 1491 { 1492 thread_t td = curthread; 1493 int lpri = td->td_pri; 1494 1495 td->td_pri = 0; 1496 panic("td_pri is/would-go negative! %p %d", td, lpri); 1497 } 1498 1499 #ifdef SMP 1500 1501 /* 1502 * Called from debugger/panic on cpus which have been stopped. We must still 1503 * process the IPIQ while stopped, even if we were stopped while in a critical 1504 * section (XXX). 1505 * 1506 * If we are dumping also try to process any pending interrupts. This may 1507 * or may not work depending on the state of the cpu at the point it was 1508 * stopped. 1509 */ 1510 void 1511 lwkt_smp_stopped(void) 1512 { 1513 globaldata_t gd = mycpu; 1514 1515 crit_enter_gd(gd); 1516 if (dumping) { 1517 lwkt_process_ipiq(); 1518 splz(); 1519 } else { 1520 lwkt_process_ipiq(); 1521 } 1522 crit_exit_gd(gd); 1523 } 1524 1525 #endif 1526