1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.108 2007/05/24 05:51:27 dillon Exp $ 35 */ 36 37 /* 38 * Each cpu in a system has its own self-contained light weight kernel 39 * thread scheduler, which means that generally speaking we only need 40 * to use a critical section to avoid problems. Foreign thread 41 * scheduling is queued via (async) IPIs. 42 */ 43 44 #ifdef _KERNEL 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/rtprio.h> 51 #include <sys/queue.h> 52 #include <sys/sysctl.h> 53 #include <sys/kthread.h> 54 #include <machine/cpu.h> 55 #include <sys/lock.h> 56 #include <sys/caps.h> 57 #include <sys/spinlock.h> 58 #include <sys/ktr.h> 59 60 #include <sys/thread2.h> 61 #include <sys/spinlock2.h> 62 63 #include <vm/vm.h> 64 #include <vm/vm_param.h> 65 #include <vm/vm_kern.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_page.h> 68 #include <vm/vm_map.h> 69 #include <vm/vm_pager.h> 70 #include <vm/vm_extern.h> 71 #include <vm/vm_zone.h> 72 73 #include <machine/stdarg.h> 74 #include <machine/smp.h> 75 76 #else 77 78 #include <sys/stdint.h> 79 #include <libcaps/thread.h> 80 #include <sys/thread.h> 81 #include <sys/msgport.h> 82 #include <sys/errno.h> 83 #include <libcaps/globaldata.h> 84 #include <machine/cpufunc.h> 85 #include <sys/thread2.h> 86 #include <sys/msgport2.h> 87 #include <stdio.h> 88 #include <stdlib.h> 89 #include <string.h> 90 #include <machine/lock.h> 91 #include <machine/atomic.h> 92 #include <machine/cpu.h> 93 94 #endif 95 96 static int untimely_switch = 0; 97 #ifdef INVARIANTS 98 static int panic_on_cscount = 0; 99 #endif 100 static __int64_t switch_count = 0; 101 static __int64_t preempt_hit = 0; 102 static __int64_t preempt_miss = 0; 103 static __int64_t preempt_weird = 0; 104 static __int64_t token_contention_count = 0; 105 static __int64_t mplock_contention_count = 0; 106 static int lwkt_use_spin_port; 107 108 #ifdef _KERNEL 109 110 /* 111 * We can make all thread ports use the spin backend instead of the thread 112 * backend. This should only be set to debug the spin backend. 113 */ 114 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); 115 116 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 117 #ifdef INVARIANTS 118 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 119 #endif 120 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 121 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 122 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 123 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 124 #ifdef INVARIANTS 125 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW, 126 &token_contention_count, 0, "spinning due to token contention"); 127 SYSCTL_QUAD(_lwkt, OID_AUTO, mplock_contention_count, CTLFLAG_RW, 128 &mplock_contention_count, 0, "spinning due to MPLOCK contention"); 129 #endif 130 #endif 131 132 /* 133 * Kernel Trace 134 */ 135 #ifdef _KERNEL 136 137 #if !defined(KTR_GIANT_CONTENTION) 138 #define KTR_GIANT_CONTENTION KTR_ALL 139 #endif 140 141 KTR_INFO_MASTER(giant); 142 KTR_INFO(KTR_GIANT_CONTENTION, giant, beg, 0, "thread=%p", sizeof(void *)); 143 KTR_INFO(KTR_GIANT_CONTENTION, giant, end, 1, "thread=%p", sizeof(void *)); 144 145 #define loggiant(name) KTR_LOG(giant_ ## name, curthread) 146 147 #endif 148 149 /* 150 * These helper procedures handle the runq, they can only be called from 151 * within a critical section. 152 * 153 * WARNING! Prior to SMP being brought up it is possible to enqueue and 154 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 155 * instead of 'mycpu' when referencing the globaldata structure. Once 156 * SMP live enqueuing and dequeueing only occurs on the current cpu. 157 */ 158 static __inline 159 void 160 _lwkt_dequeue(thread_t td) 161 { 162 if (td->td_flags & TDF_RUNQ) { 163 int nq = td->td_pri & TDPRI_MASK; 164 struct globaldata *gd = td->td_gd; 165 166 td->td_flags &= ~TDF_RUNQ; 167 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 168 /* runqmask is passively cleaned up by the switcher */ 169 } 170 } 171 172 static __inline 173 void 174 _lwkt_enqueue(thread_t td) 175 { 176 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_TSLEEPQ|TDF_BLOCKQ)) == 0) { 177 int nq = td->td_pri & TDPRI_MASK; 178 struct globaldata *gd = td->td_gd; 179 180 td->td_flags |= TDF_RUNQ; 181 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 182 gd->gd_runqmask |= 1 << nq; 183 } 184 } 185 186 /* 187 * Schedule a thread to run. As the current thread we can always safely 188 * schedule ourselves, and a shortcut procedure is provided for that 189 * function. 190 * 191 * (non-blocking, self contained on a per cpu basis) 192 */ 193 void 194 lwkt_schedule_self(thread_t td) 195 { 196 crit_enter_quick(td); 197 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 198 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 199 _lwkt_enqueue(td); 200 crit_exit_quick(td); 201 } 202 203 /* 204 * Deschedule a thread. 205 * 206 * (non-blocking, self contained on a per cpu basis) 207 */ 208 void 209 lwkt_deschedule_self(thread_t td) 210 { 211 crit_enter_quick(td); 212 _lwkt_dequeue(td); 213 crit_exit_quick(td); 214 } 215 216 #ifdef _KERNEL 217 218 /* 219 * LWKTs operate on a per-cpu basis 220 * 221 * WARNING! Called from early boot, 'mycpu' may not work yet. 222 */ 223 void 224 lwkt_gdinit(struct globaldata *gd) 225 { 226 int i; 227 228 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 229 TAILQ_INIT(&gd->gd_tdrunq[i]); 230 gd->gd_runqmask = 0; 231 TAILQ_INIT(&gd->gd_tdallq); 232 } 233 234 #endif /* _KERNEL */ 235 236 /* 237 * Create a new thread. The thread must be associated with a process context 238 * or LWKT start address before it can be scheduled. If the target cpu is 239 * -1 the thread will be created on the current cpu. 240 * 241 * If you intend to create a thread without a process context this function 242 * does everything except load the startup and switcher function. 243 */ 244 thread_t 245 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) 246 { 247 void *stack; 248 globaldata_t gd = mycpu; 249 250 if (td == NULL) { 251 crit_enter_gd(gd); 252 if (gd->gd_tdfreecount > 0) { 253 --gd->gd_tdfreecount; 254 td = TAILQ_FIRST(&gd->gd_tdfreeq); 255 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 256 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 257 TAILQ_REMOVE(&gd->gd_tdfreeq, td, td_threadq); 258 crit_exit_gd(gd); 259 flags |= td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 260 } else { 261 crit_exit_gd(gd); 262 #ifdef _KERNEL 263 td = zalloc(thread_zone); 264 #else 265 td = malloc(sizeof(struct thread)); 266 #endif 267 td->td_kstack = NULL; 268 td->td_kstack_size = 0; 269 flags |= TDF_ALLOCATED_THREAD; 270 } 271 } 272 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 273 if (flags & TDF_ALLOCATED_STACK) { 274 #ifdef _KERNEL 275 kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); 276 #else 277 libcaps_free_stack(stack, td->td_kstack_size); 278 #endif 279 stack = NULL; 280 } 281 } 282 if (stack == NULL) { 283 #ifdef _KERNEL 284 stack = (void *)kmem_alloc(&kernel_map, stksize); 285 #else 286 stack = libcaps_alloc_stack(stksize); 287 #endif 288 flags |= TDF_ALLOCATED_STACK; 289 } 290 if (cpu < 0) 291 lwkt_init_thread(td, stack, stksize, flags, mycpu); 292 else 293 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 294 return(td); 295 } 296 297 #ifdef _KERNEL 298 299 /* 300 * Initialize a preexisting thread structure. This function is used by 301 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 302 * 303 * All threads start out in a critical section at a priority of 304 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 305 * appropriate. This function may send an IPI message when the 306 * requested cpu is not the current cpu and consequently gd_tdallq may 307 * not be initialized synchronously from the point of view of the originating 308 * cpu. 309 * 310 * NOTE! we have to be careful in regards to creating threads for other cpus 311 * if SMP has not yet been activated. 312 */ 313 #ifdef SMP 314 315 static void 316 lwkt_init_thread_remote(void *arg) 317 { 318 thread_t td = arg; 319 320 /* 321 * Protected by critical section held by IPI dispatch 322 */ 323 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 324 } 325 326 #endif 327 328 void 329 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 330 struct globaldata *gd) 331 { 332 globaldata_t mygd = mycpu; 333 334 bzero(td, sizeof(struct thread)); 335 td->td_kstack = stack; 336 td->td_kstack_size = stksize; 337 td->td_flags = flags; 338 td->td_gd = gd; 339 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 340 #ifdef SMP 341 if ((flags & TDF_MPSAFE) == 0) 342 td->td_mpcount = 1; 343 #endif 344 if (lwkt_use_spin_port) 345 lwkt_initport_spin(&td->td_msgport); 346 else 347 lwkt_initport_thread(&td->td_msgport, td); 348 pmap_init_thread(td); 349 #ifdef SMP 350 /* 351 * Normally initializing a thread for a remote cpu requires sending an 352 * IPI. However, the idlethread is setup before the other cpus are 353 * activated so we have to treat it as a special case. XXX manipulation 354 * of gd_tdallq requires the BGL. 355 */ 356 if (gd == mygd || td == &gd->gd_idlethread) { 357 crit_enter_gd(mygd); 358 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 359 crit_exit_gd(mygd); 360 } else { 361 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 362 } 363 #else 364 crit_enter_gd(mygd); 365 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 366 crit_exit_gd(mygd); 367 #endif 368 } 369 370 #endif /* _KERNEL */ 371 372 void 373 lwkt_set_comm(thread_t td, const char *ctl, ...) 374 { 375 __va_list va; 376 377 __va_start(va, ctl); 378 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 379 __va_end(va); 380 } 381 382 void 383 lwkt_hold(thread_t td) 384 { 385 ++td->td_refs; 386 } 387 388 void 389 lwkt_rele(thread_t td) 390 { 391 KKASSERT(td->td_refs > 0); 392 --td->td_refs; 393 } 394 395 #ifdef _KERNEL 396 397 void 398 lwkt_wait_free(thread_t td) 399 { 400 while (td->td_refs) 401 tsleep(td, 0, "tdreap", hz); 402 } 403 404 #endif 405 406 void 407 lwkt_free_thread(thread_t td) 408 { 409 struct globaldata *gd = mycpu; 410 411 KASSERT((td->td_flags & TDF_RUNNING) == 0, 412 ("lwkt_free_thread: did not exit! %p", td)); 413 414 crit_enter_gd(gd); 415 if (gd->gd_tdfreecount < CACHE_NTHREADS && 416 (td->td_flags & TDF_ALLOCATED_THREAD) 417 ) { 418 ++gd->gd_tdfreecount; 419 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 420 crit_exit_gd(gd); 421 } else { 422 crit_exit_gd(gd); 423 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 424 #ifdef _KERNEL 425 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 426 #else 427 libcaps_free_stack(td->td_kstack, td->td_kstack_size); 428 #endif 429 /* gd invalid */ 430 td->td_kstack = NULL; 431 td->td_kstack_size = 0; 432 } 433 if (td->td_flags & TDF_ALLOCATED_THREAD) { 434 #ifdef _KERNEL 435 zfree(thread_zone, td); 436 #else 437 free(td); 438 #endif 439 } 440 } 441 } 442 443 444 /* 445 * Switch to the next runnable lwkt. If no LWKTs are runnable then 446 * switch to the idlethread. Switching must occur within a critical 447 * section to avoid races with the scheduling queue. 448 * 449 * We always have full control over our cpu's run queue. Other cpus 450 * that wish to manipulate our queue must use the cpu_*msg() calls to 451 * talk to our cpu, so a critical section is all that is needed and 452 * the result is very, very fast thread switching. 453 * 454 * The LWKT scheduler uses a fixed priority model and round-robins at 455 * each priority level. User process scheduling is a totally 456 * different beast and LWKT priorities should not be confused with 457 * user process priorities. 458 * 459 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 460 * cleans it up. Note that the td_switch() function cannot do anything that 461 * requires the MP lock since the MP lock will have already been setup for 462 * the target thread (not the current thread). It's nice to have a scheduler 463 * that does not need the MP lock to work because it allows us to do some 464 * really cool high-performance MP lock optimizations. 465 * 466 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 467 * is not called by the current thread in the preemption case, only when 468 * the preempting thread blocks (in order to return to the original thread). 469 */ 470 void 471 lwkt_switch(void) 472 { 473 globaldata_t gd = mycpu; 474 thread_t td = gd->gd_curthread; 475 thread_t ntd; 476 #ifdef SMP 477 int mpheld; 478 #endif 479 480 /* 481 * Switching from within a 'fast' (non thread switched) interrupt or IPI 482 * is illegal. However, we may have to do it anyway if we hit a fatal 483 * kernel trap or we have paniced. 484 * 485 * If this case occurs save and restore the interrupt nesting level. 486 */ 487 if (gd->gd_intr_nesting_level) { 488 int savegdnest; 489 int savegdtrap; 490 491 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { 492 panic("lwkt_switch: cannot switch from within " 493 "a fast interrupt, yet, td %p\n", td); 494 } else { 495 savegdnest = gd->gd_intr_nesting_level; 496 savegdtrap = gd->gd_trap_nesting_level; 497 gd->gd_intr_nesting_level = 0; 498 gd->gd_trap_nesting_level = 0; 499 if ((td->td_flags & TDF_PANICWARN) == 0) { 500 td->td_flags |= TDF_PANICWARN; 501 kprintf("Warning: thread switch from interrupt or IPI, " 502 "thread %p (%s)\n", td, td->td_comm); 503 #ifdef DDB 504 db_print_backtrace(); 505 #endif 506 } 507 lwkt_switch(); 508 gd->gd_intr_nesting_level = savegdnest; 509 gd->gd_trap_nesting_level = savegdtrap; 510 return; 511 } 512 } 513 514 /* 515 * Passive release (used to transition from user to kernel mode 516 * when we block or switch rather then when we enter the kernel). 517 * This function is NOT called if we are switching into a preemption 518 * or returning from a preemption. Typically this causes us to lose 519 * our current process designation (if we have one) and become a true 520 * LWKT thread, and may also hand the current process designation to 521 * another process and schedule thread. 522 */ 523 if (td->td_release) 524 td->td_release(td); 525 526 crit_enter_gd(gd); 527 #ifdef SMP 528 if (td->td_toks) 529 lwkt_relalltokens(td); 530 #endif 531 532 /* 533 * We had better not be holding any spin locks, but don't get into an 534 * endless panic loop. 535 */ 536 KASSERT(gd->gd_spinlock_rd == NULL || panicstr != NULL, 537 ("lwkt_switch: still holding a shared spinlock %p!", 538 gd->gd_spinlock_rd)); 539 KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL, 540 ("lwkt_switch: still holding %d exclusive spinlocks!", 541 gd->gd_spinlocks_wr)); 542 543 544 #ifdef SMP 545 /* 546 * td_mpcount cannot be used to determine if we currently hold the 547 * MP lock because get_mplock() will increment it prior to attempting 548 * to get the lock, and switch out if it can't. Our ownership of 549 * the actual lock will remain stable while we are in a critical section 550 * (but, of course, another cpu may own or release the lock so the 551 * actual value of mp_lock is not stable). 552 */ 553 mpheld = MP_LOCK_HELD(); 554 #ifdef INVARIANTS 555 if (td->td_cscount) { 556 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 557 td); 558 if (panic_on_cscount) 559 panic("switching while mastering cpusync"); 560 } 561 #endif 562 #endif 563 if ((ntd = td->td_preempted) != NULL) { 564 /* 565 * We had preempted another thread on this cpu, resume the preempted 566 * thread. This occurs transparently, whether the preempted thread 567 * was scheduled or not (it may have been preempted after descheduling 568 * itself). 569 * 570 * We have to setup the MP lock for the original thread after backing 571 * out the adjustment that was made to curthread when the original 572 * was preempted. 573 */ 574 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 575 #ifdef SMP 576 if (ntd->td_mpcount && mpheld == 0) { 577 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", 578 td, ntd, td->td_mpcount, ntd->td_mpcount); 579 } 580 if (ntd->td_mpcount) { 581 td->td_mpcount -= ntd->td_mpcount; 582 KKASSERT(td->td_mpcount >= 0); 583 } 584 #endif 585 ntd->td_flags |= TDF_PREEMPT_DONE; 586 587 /* 588 * XXX. The interrupt may have woken a thread up, we need to properly 589 * set the reschedule flag if the originally interrupted thread is at 590 * a lower priority. 591 */ 592 if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) 593 need_lwkt_resched(); 594 /* YYY release mp lock on switchback if original doesn't need it */ 595 } else { 596 /* 597 * Priority queue / round-robin at each priority. Note that user 598 * processes run at a fixed, low priority and the user process 599 * scheduler deals with interactions between user processes 600 * by scheduling and descheduling them from the LWKT queue as 601 * necessary. 602 * 603 * We have to adjust the MP lock for the target thread. If we 604 * need the MP lock and cannot obtain it we try to locate a 605 * thread that does not need the MP lock. If we cannot, we spin 606 * instead of HLT. 607 * 608 * A similar issue exists for the tokens held by the target thread. 609 * If we cannot obtain ownership of the tokens we cannot immediately 610 * schedule the thread. 611 */ 612 613 /* 614 * If an LWKT reschedule was requested, well that is what we are 615 * doing now so clear it. 616 */ 617 clear_lwkt_resched(); 618 again: 619 if (gd->gd_runqmask) { 620 int nq = bsrl(gd->gd_runqmask); 621 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 622 gd->gd_runqmask &= ~(1 << nq); 623 goto again; 624 } 625 #ifdef SMP 626 /* 627 * THREAD SELECTION FOR AN SMP MACHINE BUILD 628 * 629 * If the target needs the MP lock and we couldn't get it, 630 * or if the target is holding tokens and we could not 631 * gain ownership of the tokens, continue looking for a 632 * thread to schedule and spin instead of HLT if we can't. 633 * 634 * NOTE: the mpheld variable invalid after this conditional, it 635 * can change due to both cpu_try_mplock() returning success 636 * AND interactions in lwkt_getalltokens() due to the fact that 637 * we are trying to check the mpcount of a thread other then 638 * the current thread. Because of this, if the current thread 639 * is not holding td_mpcount, an IPI indirectly run via 640 * lwkt_getalltokens() can obtain and release the MP lock and 641 * cause the core MP lock to be released. 642 */ 643 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 644 (ntd->td_toks && lwkt_getalltokens(ntd) == 0) 645 ) { 646 u_int32_t rqmask = gd->gd_runqmask; 647 648 mpheld = MP_LOCK_HELD(); 649 ntd = NULL; 650 while (rqmask) { 651 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 652 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) { 653 /* spinning due to MP lock being held */ 654 #ifdef INVARIANTS 655 ++mplock_contention_count; 656 #endif 657 /* mplock still not held, 'mpheld' still valid */ 658 continue; 659 } 660 661 /* 662 * mpheld state invalid after getalltokens call returns 663 * failure, but the variable is only needed for 664 * the loop. 665 */ 666 if (ntd->td_toks && !lwkt_getalltokens(ntd)) { 667 /* spinning due to token contention */ 668 #ifdef INVARIANTS 669 ++token_contention_count; 670 #endif 671 mpheld = MP_LOCK_HELD(); 672 continue; 673 } 674 break; 675 } 676 if (ntd) 677 break; 678 rqmask &= ~(1 << nq); 679 nq = bsrl(rqmask); 680 } 681 if (ntd == NULL) { 682 ntd = &gd->gd_idlethread; 683 ntd->td_flags |= TDF_IDLE_NOHLT; 684 goto using_idle_thread; 685 } else { 686 ++gd->gd_cnt.v_swtch; 687 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 688 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 689 } 690 } else { 691 ++gd->gd_cnt.v_swtch; 692 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 693 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 694 } 695 #else 696 /* 697 * THREAD SELECTION FOR A UP MACHINE BUILD. We don't have to 698 * worry about tokens or the BGL. 699 */ 700 ++gd->gd_cnt.v_swtch; 701 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 702 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 703 #endif 704 } else { 705 /* 706 * We have nothing to run but only let the idle loop halt 707 * the cpu if there are no pending interrupts. 708 */ 709 ntd = &gd->gd_idlethread; 710 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 711 ntd->td_flags |= TDF_IDLE_NOHLT; 712 #ifdef SMP 713 using_idle_thread: 714 /* 715 * The idle thread should not be holding the MP lock unless we 716 * are trapping in the kernel or in a panic. Since we select the 717 * idle thread unconditionally when no other thread is available, 718 * if the MP lock is desired during a panic or kernel trap, we 719 * have to loop in the scheduler until we get it. 720 */ 721 if (ntd->td_mpcount) { 722 mpheld = MP_LOCK_HELD(); 723 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 724 panic("Idle thread %p was holding the BGL!", ntd); 725 else if (mpheld == 0) 726 goto again; 727 } 728 #endif 729 } 730 } 731 KASSERT(ntd->td_pri >= TDPRI_CRIT, 732 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 733 734 /* 735 * Do the actual switch. If the new target does not need the MP lock 736 * and we are holding it, release the MP lock. If the new target requires 737 * the MP lock we have already acquired it for the target. 738 */ 739 #ifdef SMP 740 if (ntd->td_mpcount == 0 ) { 741 if (MP_LOCK_HELD()) 742 cpu_rel_mplock(); 743 } else { 744 ASSERT_MP_LOCK_HELD(ntd); 745 } 746 #endif 747 if (td != ntd) { 748 ++switch_count; 749 td->td_switch(ntd); 750 } 751 /* NOTE: current cpu may have changed after switch */ 752 crit_exit_quick(td); 753 } 754 755 /* 756 * Request that the target thread preempt the current thread. Preemption 757 * only works under a specific set of conditions: 758 * 759 * - We are not preempting ourselves 760 * - The target thread is owned by the current cpu 761 * - We are not currently being preempted 762 * - The target is not currently being preempted 763 * - We are able to satisfy the target's MP lock requirements (if any). 764 * 765 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 766 * this is called via lwkt_schedule() through the td_preemptable callback. 767 * critpri is the managed critical priority that we should ignore in order 768 * to determine whether preemption is possible (aka usually just the crit 769 * priority of lwkt_schedule() itself). 770 * 771 * XXX at the moment we run the target thread in a critical section during 772 * the preemption in order to prevent the target from taking interrupts 773 * that *WE* can't. Preemption is strictly limited to interrupt threads 774 * and interrupt-like threads, outside of a critical section, and the 775 * preempted source thread will be resumed the instant the target blocks 776 * whether or not the source is scheduled (i.e. preemption is supposed to 777 * be as transparent as possible). 778 * 779 * The target thread inherits our MP count (added to its own) for the 780 * duration of the preemption in order to preserve the atomicy of the 781 * MP lock during the preemption. Therefore, any preempting targets must be 782 * careful in regards to MP assertions. Note that the MP count may be 783 * out of sync with the physical mp_lock, but we do not have to preserve 784 * the original ownership of the lock if it was out of synch (that is, we 785 * can leave it synchronized on return). 786 */ 787 void 788 lwkt_preempt(thread_t ntd, int critpri) 789 { 790 struct globaldata *gd = mycpu; 791 thread_t td; 792 #ifdef SMP 793 int mpheld; 794 int savecnt; 795 #endif 796 797 /* 798 * The caller has put us in a critical section. We can only preempt 799 * if the caller of the caller was not in a critical section (basically 800 * a local interrupt), as determined by the 'critpri' parameter. We 801 * also acn't preempt if the caller is holding any spinlocks (even if 802 * he isn't in a critical section). This also handles the tokens test. 803 * 804 * YYY The target thread must be in a critical section (else it must 805 * inherit our critical section? I dunno yet). 806 * 807 * Set need_lwkt_resched() unconditionally for now YYY. 808 */ 809 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 810 811 td = gd->gd_curthread; 812 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 813 ++preempt_miss; 814 return; 815 } 816 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 817 ++preempt_miss; 818 need_lwkt_resched(); 819 return; 820 } 821 #ifdef SMP 822 if (ntd->td_gd != gd) { 823 ++preempt_miss; 824 need_lwkt_resched(); 825 return; 826 } 827 #endif 828 /* 829 * Take the easy way out and do not preempt if the target is holding 830 * any spinlocks. We could test whether the thread(s) being 831 * preempted interlock against the target thread's tokens and whether 832 * we can get all the target thread's tokens, but this situation 833 * should not occur very often so its easier to simply not preempt. 834 * Also, plain spinlocks are impossible to figure out at this point so 835 * just don't preempt. 836 */ 837 if (gd->gd_spinlock_rd || gd->gd_spinlocks_wr) { 838 ++preempt_miss; 839 need_lwkt_resched(); 840 return; 841 } 842 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 843 ++preempt_weird; 844 need_lwkt_resched(); 845 return; 846 } 847 if (ntd->td_preempted) { 848 ++preempt_hit; 849 need_lwkt_resched(); 850 return; 851 } 852 #ifdef SMP 853 /* 854 * note: an interrupt might have occured just as we were transitioning 855 * to or from the MP lock. In this case td_mpcount will be pre-disposed 856 * (non-zero) but not actually synchronized with the actual state of the 857 * lock. We can use it to imply an MP lock requirement for the 858 * preemption but we cannot use it to test whether we hold the MP lock 859 * or not. 860 */ 861 savecnt = td->td_mpcount; 862 mpheld = MP_LOCK_HELD(); 863 ntd->td_mpcount += td->td_mpcount; 864 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 865 ntd->td_mpcount -= td->td_mpcount; 866 ++preempt_miss; 867 need_lwkt_resched(); 868 return; 869 } 870 #endif 871 872 /* 873 * Since we are able to preempt the current thread, there is no need to 874 * call need_lwkt_resched(). 875 */ 876 ++preempt_hit; 877 ntd->td_preempted = td; 878 td->td_flags |= TDF_PREEMPT_LOCK; 879 td->td_switch(ntd); 880 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 881 #ifdef SMP 882 KKASSERT(savecnt == td->td_mpcount); 883 mpheld = MP_LOCK_HELD(); 884 if (mpheld && td->td_mpcount == 0) 885 cpu_rel_mplock(); 886 else if (mpheld == 0 && td->td_mpcount) 887 panic("lwkt_preempt(): MP lock was not held through"); 888 #endif 889 ntd->td_preempted = NULL; 890 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 891 } 892 893 /* 894 * Yield our thread while higher priority threads are pending. This is 895 * typically called when we leave a critical section but it can be safely 896 * called while we are in a critical section. 897 * 898 * This function will not generally yield to equal priority threads but it 899 * can occur as a side effect. Note that lwkt_switch() is called from 900 * inside the critical section to prevent its own crit_exit() from reentering 901 * lwkt_yield_quick(). 902 * 903 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 904 * came along but was blocked and made pending. 905 * 906 * (self contained on a per cpu basis) 907 */ 908 void 909 lwkt_yield_quick(void) 910 { 911 globaldata_t gd = mycpu; 912 thread_t td = gd->gd_curthread; 913 914 /* 915 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 916 * it with a non-zero cpl then we might not wind up calling splz after 917 * a task switch when the critical section is exited even though the 918 * new task could accept the interrupt. 919 * 920 * XXX from crit_exit() only called after last crit section is released. 921 * If called directly will run splz() even if in a critical section. 922 * 923 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 924 * except for this special case, we MUST call splz() here to handle any 925 * pending ints, particularly after we switch, or we might accidently 926 * halt the cpu with interrupts pending. 927 */ 928 if (gd->gd_reqflags && td->td_nest_count < 2) 929 splz(); 930 931 /* 932 * YYY enabling will cause wakeup() to task-switch, which really 933 * confused the old 4.x code. This is a good way to simulate 934 * preemption and MP without actually doing preemption or MP, because a 935 * lot of code assumes that wakeup() does not block. 936 */ 937 if (untimely_switch && td->td_nest_count == 0 && 938 gd->gd_intr_nesting_level == 0 939 ) { 940 crit_enter_quick(td); 941 /* 942 * YYY temporary hacks until we disassociate the userland scheduler 943 * from the LWKT scheduler. 944 */ 945 if (td->td_flags & TDF_RUNQ) { 946 lwkt_switch(); /* will not reenter yield function */ 947 } else { 948 lwkt_schedule_self(td); /* make sure we are scheduled */ 949 lwkt_switch(); /* will not reenter yield function */ 950 lwkt_deschedule_self(td); /* make sure we are descheduled */ 951 } 952 crit_exit_noyield(td); 953 } 954 } 955 956 /* 957 * This implements a normal yield which, unlike _quick, will yield to equal 958 * priority threads as well. Note that gd_reqflags tests will be handled by 959 * the crit_exit() call in lwkt_switch(). 960 * 961 * (self contained on a per cpu basis) 962 */ 963 void 964 lwkt_yield(void) 965 { 966 lwkt_schedule_self(curthread); 967 lwkt_switch(); 968 } 969 970 /* 971 * Generic schedule. Possibly schedule threads belonging to other cpus and 972 * deal with threads that might be blocked on a wait queue. 973 * 974 * We have a little helper inline function which does additional work after 975 * the thread has been enqueued, including dealing with preemption and 976 * setting need_lwkt_resched() (which prevents the kernel from returning 977 * to userland until it has processed higher priority threads). 978 * 979 * It is possible for this routine to be called after a failed _enqueue 980 * (due to the target thread migrating, sleeping, or otherwise blocked). 981 * We have to check that the thread is actually on the run queue! 982 */ 983 static __inline 984 void 985 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri) 986 { 987 if (ntd->td_flags & TDF_RUNQ) { 988 if (ntd->td_preemptable) { 989 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 990 } else if ((ntd->td_flags & TDF_NORESCHED) == 0 && 991 (ntd->td_pri & TDPRI_MASK) > (gd->gd_curthread->td_pri & TDPRI_MASK) 992 ) { 993 need_lwkt_resched(); 994 } 995 } 996 } 997 998 void 999 lwkt_schedule(thread_t td) 1000 { 1001 globaldata_t mygd = mycpu; 1002 1003 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 1004 crit_enter_gd(mygd); 1005 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 1006 if (td == mygd->gd_curthread) { 1007 _lwkt_enqueue(td); 1008 } else { 1009 /* 1010 * If we own the thread, there is no race (since we are in a 1011 * critical section). If we do not own the thread there might 1012 * be a race but the target cpu will deal with it. 1013 */ 1014 #ifdef SMP 1015 if (td->td_gd == mygd) { 1016 _lwkt_enqueue(td); 1017 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1018 } else { 1019 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); 1020 } 1021 #else 1022 _lwkt_enqueue(td); 1023 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1024 #endif 1025 } 1026 crit_exit_gd(mygd); 1027 } 1028 1029 #ifdef SMP 1030 1031 /* 1032 * Thread migration using a 'Pull' method. The thread may or may not be 1033 * the current thread. It MUST be descheduled and in a stable state. 1034 * lwkt_giveaway() must be called on the cpu owning the thread. 1035 * 1036 * At any point after lwkt_giveaway() is called, the target cpu may 1037 * 'pull' the thread by calling lwkt_acquire(). 1038 * 1039 * MPSAFE - must be called under very specific conditions. 1040 */ 1041 void 1042 lwkt_giveaway(thread_t td) 1043 { 1044 globaldata_t gd = mycpu; 1045 1046 crit_enter_gd(gd); 1047 KKASSERT(td->td_gd == gd); 1048 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1049 td->td_flags |= TDF_MIGRATING; 1050 crit_exit_gd(gd); 1051 } 1052 1053 void 1054 lwkt_acquire(thread_t td) 1055 { 1056 globaldata_t gd; 1057 globaldata_t mygd; 1058 1059 KKASSERT(td->td_flags & TDF_MIGRATING); 1060 gd = td->td_gd; 1061 mygd = mycpu; 1062 if (gd != mycpu) { 1063 cpu_lfence(); 1064 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1065 crit_enter_gd(mygd); 1066 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) 1067 cpu_lfence(); 1068 td->td_gd = mygd; 1069 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1070 td->td_flags &= ~TDF_MIGRATING; 1071 crit_exit_gd(mygd); 1072 } else { 1073 crit_enter_gd(mygd); 1074 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1075 td->td_flags &= ~TDF_MIGRATING; 1076 crit_exit_gd(mygd); 1077 } 1078 } 1079 1080 #endif 1081 1082 /* 1083 * Generic deschedule. Descheduling threads other then your own should be 1084 * done only in carefully controlled circumstances. Descheduling is 1085 * asynchronous. 1086 * 1087 * This function may block if the cpu has run out of messages. 1088 */ 1089 void 1090 lwkt_deschedule(thread_t td) 1091 { 1092 crit_enter(); 1093 #ifdef SMP 1094 if (td == curthread) { 1095 _lwkt_dequeue(td); 1096 } else { 1097 if (td->td_gd == mycpu) { 1098 _lwkt_dequeue(td); 1099 } else { 1100 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1101 } 1102 } 1103 #else 1104 _lwkt_dequeue(td); 1105 #endif 1106 crit_exit(); 1107 } 1108 1109 /* 1110 * Set the target thread's priority. This routine does not automatically 1111 * switch to a higher priority thread, LWKT threads are not designed for 1112 * continuous priority changes. Yield if you want to switch. 1113 * 1114 * We have to retain the critical section count which uses the high bits 1115 * of the td_pri field. The specified priority may also indicate zero or 1116 * more critical sections by adding TDPRI_CRIT*N. 1117 * 1118 * Note that we requeue the thread whether it winds up on a different runq 1119 * or not. uio_yield() depends on this and the routine is not normally 1120 * called with the same priority otherwise. 1121 */ 1122 void 1123 lwkt_setpri(thread_t td, int pri) 1124 { 1125 KKASSERT(pri >= 0); 1126 KKASSERT(td->td_gd == mycpu); 1127 crit_enter(); 1128 if (td->td_flags & TDF_RUNQ) { 1129 _lwkt_dequeue(td); 1130 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1131 _lwkt_enqueue(td); 1132 } else { 1133 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1134 } 1135 crit_exit(); 1136 } 1137 1138 void 1139 lwkt_setpri_self(int pri) 1140 { 1141 thread_t td = curthread; 1142 1143 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1144 crit_enter(); 1145 if (td->td_flags & TDF_RUNQ) { 1146 _lwkt_dequeue(td); 1147 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1148 _lwkt_enqueue(td); 1149 } else { 1150 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1151 } 1152 crit_exit(); 1153 } 1154 1155 /* 1156 * Determine if there is a runnable thread at a higher priority then 1157 * the current thread. lwkt_setpri() does not check this automatically. 1158 * Return 1 if there is, 0 if there isn't. 1159 * 1160 * Example: if bit 31 of runqmask is set and the current thread is priority 1161 * 30, then we wind up checking the mask: 0x80000000 against 0x7fffffff. 1162 * 1163 * If nq reaches 31 the shift operation will overflow to 0 and we will wind 1164 * up comparing against 0xffffffff, a comparison that will always be false. 1165 */ 1166 int 1167 lwkt_checkpri_self(void) 1168 { 1169 globaldata_t gd = mycpu; 1170 thread_t td = gd->gd_curthread; 1171 int nq = td->td_pri & TDPRI_MASK; 1172 1173 while (gd->gd_runqmask > (__uint32_t)(2 << nq) - 1) { 1174 if (TAILQ_FIRST(&gd->gd_tdrunq[nq + 1])) 1175 return(1); 1176 ++nq; 1177 } 1178 return(0); 1179 } 1180 1181 /* 1182 * Migrate the current thread to the specified cpu. 1183 * 1184 * This is accomplished by descheduling ourselves from the current cpu, 1185 * moving our thread to the tdallq of the target cpu, IPI messaging the 1186 * target cpu, and switching out. TDF_MIGRATING prevents scheduling 1187 * races while the thread is being migrated. 1188 */ 1189 #ifdef SMP 1190 static void lwkt_setcpu_remote(void *arg); 1191 #endif 1192 1193 void 1194 lwkt_setcpu_self(globaldata_t rgd) 1195 { 1196 #ifdef SMP 1197 thread_t td = curthread; 1198 1199 if (td->td_gd != rgd) { 1200 crit_enter_quick(td); 1201 td->td_flags |= TDF_MIGRATING; 1202 lwkt_deschedule_self(td); 1203 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1204 lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td); 1205 lwkt_switch(); 1206 /* we are now on the target cpu */ 1207 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); 1208 crit_exit_quick(td); 1209 } 1210 #endif 1211 } 1212 1213 void 1214 lwkt_migratecpu(int cpuid) 1215 { 1216 #ifdef SMP 1217 globaldata_t rgd; 1218 1219 rgd = globaldata_find(cpuid); 1220 lwkt_setcpu_self(rgd); 1221 #endif 1222 } 1223 1224 /* 1225 * Remote IPI for cpu migration (called while in a critical section so we 1226 * do not have to enter another one). The thread has already been moved to 1227 * our cpu's allq, but we must wait for the thread to be completely switched 1228 * out on the originating cpu before we schedule it on ours or the stack 1229 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD 1230 * change to main memory. 1231 * 1232 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races 1233 * against wakeups. It is best if this interface is used only when there 1234 * are no pending events that might try to schedule the thread. 1235 */ 1236 #ifdef SMP 1237 static void 1238 lwkt_setcpu_remote(void *arg) 1239 { 1240 thread_t td = arg; 1241 globaldata_t gd = mycpu; 1242 1243 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) 1244 cpu_lfence(); 1245 td->td_gd = gd; 1246 cpu_sfence(); 1247 td->td_flags &= ~TDF_MIGRATING; 1248 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 1249 _lwkt_enqueue(td); 1250 } 1251 #endif 1252 1253 struct lwp * 1254 lwkt_preempted_proc(void) 1255 { 1256 thread_t td = curthread; 1257 while (td->td_preempted) 1258 td = td->td_preempted; 1259 return(td->td_lwp); 1260 } 1261 1262 /* 1263 * Create a kernel process/thread/whatever. It shares it's address space 1264 * with proc0 - ie: kernel only. 1265 * 1266 * NOTE! By default new threads are created with the MP lock held. A 1267 * thread which does not require the MP lock should release it by calling 1268 * rel_mplock() at the start of the new thread. 1269 */ 1270 int 1271 lwkt_create(void (*func)(void *), void *arg, 1272 struct thread **tdp, thread_t template, int tdflags, int cpu, 1273 const char *fmt, ...) 1274 { 1275 thread_t td; 1276 __va_list ap; 1277 1278 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, 1279 tdflags); 1280 if (tdp) 1281 *tdp = td; 1282 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1283 1284 /* 1285 * Set up arg0 for 'ps' etc 1286 */ 1287 __va_start(ap, fmt); 1288 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1289 __va_end(ap); 1290 1291 /* 1292 * Schedule the thread to run 1293 */ 1294 if ((td->td_flags & TDF_STOPREQ) == 0) 1295 lwkt_schedule(td); 1296 else 1297 td->td_flags &= ~TDF_STOPREQ; 1298 return 0; 1299 } 1300 1301 /* 1302 * kthread_* is specific to the kernel and is not needed by userland. 1303 */ 1304 #ifdef _KERNEL 1305 1306 /* 1307 * Destroy an LWKT thread. Warning! This function is not called when 1308 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1309 * uses a different reaping mechanism. 1310 */ 1311 void 1312 lwkt_exit(void) 1313 { 1314 thread_t td = curthread; 1315 globaldata_t gd; 1316 1317 if (td->td_flags & TDF_VERBOSE) 1318 kprintf("kthread %p %s has exited\n", td, td->td_comm); 1319 caps_exit(td); 1320 crit_enter_quick(td); 1321 lwkt_deschedule_self(td); 1322 gd = mycpu; 1323 lwkt_remove_tdallq(td); 1324 if (td->td_flags & TDF_ALLOCATED_THREAD) { 1325 ++gd->gd_tdfreecount; 1326 TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq); 1327 } 1328 cpu_thread_exit(); 1329 } 1330 1331 void 1332 lwkt_remove_tdallq(thread_t td) 1333 { 1334 KKASSERT(td->td_gd == mycpu); 1335 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1336 } 1337 1338 #endif /* _KERNEL */ 1339 1340 void 1341 crit_panic(void) 1342 { 1343 thread_t td = curthread; 1344 int lpri = td->td_pri; 1345 1346 td->td_pri = 0; 1347 panic("td_pri is/would-go negative! %p %d", td, lpri); 1348 } 1349 1350 #ifdef SMP 1351 1352 /* 1353 * Called from debugger/panic on cpus which have been stopped. We must still 1354 * process the IPIQ while stopped, even if we were stopped while in a critical 1355 * section (XXX). 1356 * 1357 * If we are dumping also try to process any pending interrupts. This may 1358 * or may not work depending on the state of the cpu at the point it was 1359 * stopped. 1360 */ 1361 void 1362 lwkt_smp_stopped(void) 1363 { 1364 globaldata_t gd = mycpu; 1365 1366 crit_enter_gd(gd); 1367 if (dumping) { 1368 lwkt_process_ipiq(); 1369 splz(); 1370 } else { 1371 lwkt_process_ipiq(); 1372 } 1373 crit_exit_gd(gd); 1374 } 1375 1376 /* 1377 * get_mplock() calls this routine if it is unable to obtain the MP lock. 1378 * get_mplock() has already incremented td_mpcount. We must block and 1379 * not return until giant is held. 1380 * 1381 * All we have to do is lwkt_switch() away. The LWKT scheduler will not 1382 * reschedule the thread until it can obtain the giant lock for it. 1383 */ 1384 void 1385 lwkt_mp_lock_contested(void) 1386 { 1387 #ifdef _KERNEL 1388 loggiant(beg); 1389 #endif 1390 lwkt_switch(); 1391 #ifdef _KERNEL 1392 loggiant(end); 1393 #endif 1394 } 1395 1396 #endif 1397