1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.85 2005/11/08 22:38:43 dillon Exp $ 35 */ 36 37 /* 38 * Each cpu in a system has its own self-contained light weight kernel 39 * thread scheduler, which means that generally speaking we only need 40 * to use a critical section to avoid problems. Foreign thread 41 * scheduling is queued via (async) IPIs. 42 */ 43 44 #ifdef _KERNEL 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/rtprio.h> 51 #include <sys/queue.h> 52 #include <sys/thread2.h> 53 #include <sys/sysctl.h> 54 #include <sys/kthread.h> 55 #include <machine/cpu.h> 56 #include <sys/lock.h> 57 #include <sys/caps.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <vm/vm_kern.h> 62 #include <vm/vm_object.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_pager.h> 66 #include <vm/vm_extern.h> 67 #include <vm/vm_zone.h> 68 69 #include <machine/stdarg.h> 70 #include <machine/ipl.h> 71 #include <machine/smp.h> 72 73 #else 74 75 #include <sys/stdint.h> 76 #include <libcaps/thread.h> 77 #include <sys/thread.h> 78 #include <sys/msgport.h> 79 #include <sys/errno.h> 80 #include <libcaps/globaldata.h> 81 #include <machine/cpufunc.h> 82 #include <sys/thread2.h> 83 #include <sys/msgport2.h> 84 #include <stdio.h> 85 #include <stdlib.h> 86 #include <string.h> 87 #include <machine/lock.h> 88 #include <machine/atomic.h> 89 #include <machine/cpu.h> 90 91 #endif 92 93 static int untimely_switch = 0; 94 #ifdef INVARIANTS 95 static int panic_on_cscount = 0; 96 #endif 97 static __int64_t switch_count = 0; 98 static __int64_t preempt_hit = 0; 99 static __int64_t preempt_miss = 0; 100 static __int64_t preempt_weird = 0; 101 static __int64_t token_contention_count = 0; 102 static __int64_t mplock_contention_count = 0; 103 104 #ifdef _KERNEL 105 106 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 107 #ifdef INVARIANTS 108 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 109 #endif 110 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 111 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 112 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 113 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 114 #ifdef INVARIANTS 115 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW, 116 &token_contention_count, 0, "spinning due to token contention"); 117 SYSCTL_QUAD(_lwkt, OID_AUTO, mplock_contention_count, CTLFLAG_RW, 118 &mplock_contention_count, 0, "spinning due to MPLOCK contention"); 119 #endif 120 #endif 121 122 /* 123 * These helper procedures handle the runq, they can only be called from 124 * within a critical section. 125 * 126 * WARNING! Prior to SMP being brought up it is possible to enqueue and 127 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 128 * instead of 'mycpu' when referencing the globaldata structure. Once 129 * SMP live enqueuing and dequeueing only occurs on the current cpu. 130 */ 131 static __inline 132 void 133 _lwkt_dequeue(thread_t td) 134 { 135 if (td->td_flags & TDF_RUNQ) { 136 int nq = td->td_pri & TDPRI_MASK; 137 struct globaldata *gd = td->td_gd; 138 139 td->td_flags &= ~TDF_RUNQ; 140 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 141 /* runqmask is passively cleaned up by the switcher */ 142 } 143 } 144 145 static __inline 146 void 147 _lwkt_enqueue(thread_t td) 148 { 149 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING)) == 0) { 150 int nq = td->td_pri & TDPRI_MASK; 151 struct globaldata *gd = td->td_gd; 152 153 td->td_flags |= TDF_RUNQ; 154 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 155 gd->gd_runqmask |= 1 << nq; 156 } 157 } 158 159 /* 160 * Schedule a thread to run. As the current thread we can always safely 161 * schedule ourselves, and a shortcut procedure is provided for that 162 * function. 163 * 164 * (non-blocking, self contained on a per cpu basis) 165 */ 166 void 167 lwkt_schedule_self(thread_t td) 168 { 169 crit_enter_quick(td); 170 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 171 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 172 _lwkt_enqueue(td); 173 #ifdef _KERNEL 174 if (td->td_proc && td->td_proc->p_stat == SSLEEP) 175 panic("SCHED SELF PANIC"); 176 #endif 177 crit_exit_quick(td); 178 } 179 180 /* 181 * Deschedule a thread. 182 * 183 * (non-blocking, self contained on a per cpu basis) 184 */ 185 void 186 lwkt_deschedule_self(thread_t td) 187 { 188 crit_enter_quick(td); 189 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 190 _lwkt_dequeue(td); 191 crit_exit_quick(td); 192 } 193 194 #ifdef _KERNEL 195 196 /* 197 * LWKTs operate on a per-cpu basis 198 * 199 * WARNING! Called from early boot, 'mycpu' may not work yet. 200 */ 201 void 202 lwkt_gdinit(struct globaldata *gd) 203 { 204 int i; 205 206 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 207 TAILQ_INIT(&gd->gd_tdrunq[i]); 208 gd->gd_runqmask = 0; 209 TAILQ_INIT(&gd->gd_tdallq); 210 } 211 212 #endif /* _KERNEL */ 213 214 /* 215 * Initialize a thread wait structure prior to first use. 216 * 217 * NOTE! called from low level boot code, we cannot do anything fancy! 218 */ 219 void 220 lwkt_wait_init(lwkt_wait_t w) 221 { 222 lwkt_token_init(&w->wa_token); 223 TAILQ_INIT(&w->wa_waitq); 224 w->wa_gen = 0; 225 w->wa_count = 0; 226 } 227 228 /* 229 * Create a new thread. The thread must be associated with a process context 230 * or LWKT start address before it can be scheduled. If the target cpu is 231 * -1 the thread will be created on the current cpu. 232 * 233 * If you intend to create a thread without a process context this function 234 * does everything except load the startup and switcher function. 235 */ 236 thread_t 237 lwkt_alloc_thread(struct thread *td, int stksize, int cpu) 238 { 239 void *stack; 240 int flags = 0; 241 globaldata_t gd = mycpu; 242 243 if (td == NULL) { 244 crit_enter_gd(gd); 245 if (gd->gd_tdfreecount > 0) { 246 --gd->gd_tdfreecount; 247 td = TAILQ_FIRST(&gd->gd_tdfreeq); 248 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 249 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 250 TAILQ_REMOVE(&gd->gd_tdfreeq, td, td_threadq); 251 crit_exit_gd(gd); 252 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 253 } else { 254 crit_exit_gd(gd); 255 #ifdef _KERNEL 256 td = zalloc(thread_zone); 257 #else 258 td = malloc(sizeof(struct thread)); 259 #endif 260 td->td_kstack = NULL; 261 td->td_kstack_size = 0; 262 flags |= TDF_ALLOCATED_THREAD; 263 } 264 } 265 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 266 if (flags & TDF_ALLOCATED_STACK) { 267 #ifdef _KERNEL 268 kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); 269 #else 270 libcaps_free_stack(stack, td->td_kstack_size); 271 #endif 272 stack = NULL; 273 } 274 } 275 if (stack == NULL) { 276 #ifdef _KERNEL 277 stack = (void *)kmem_alloc(kernel_map, stksize); 278 #else 279 stack = libcaps_alloc_stack(stksize); 280 #endif 281 flags |= TDF_ALLOCATED_STACK; 282 } 283 if (cpu < 0) 284 lwkt_init_thread(td, stack, stksize, flags, mycpu); 285 else 286 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 287 return(td); 288 } 289 290 #ifdef _KERNEL 291 292 /* 293 * Initialize a preexisting thread structure. This function is used by 294 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 295 * 296 * All threads start out in a critical section at a priority of 297 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 298 * appropriate. This function may send an IPI message when the 299 * requested cpu is not the current cpu and consequently gd_tdallq may 300 * not be initialized synchronously from the point of view of the originating 301 * cpu. 302 * 303 * NOTE! we have to be careful in regards to creating threads for other cpus 304 * if SMP has not yet been activated. 305 */ 306 #ifdef SMP 307 308 static void 309 lwkt_init_thread_remote(void *arg) 310 { 311 thread_t td = arg; 312 313 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 314 } 315 316 #endif 317 318 void 319 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 320 struct globaldata *gd) 321 { 322 globaldata_t mygd = mycpu; 323 324 bzero(td, sizeof(struct thread)); 325 td->td_kstack = stack; 326 td->td_kstack_size = stksize; 327 td->td_flags |= flags; 328 td->td_gd = gd; 329 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 330 lwkt_initport(&td->td_msgport, td); 331 pmap_init_thread(td); 332 #ifdef SMP 333 /* 334 * Normally initializing a thread for a remote cpu requires sending an 335 * IPI. However, the idlethread is setup before the other cpus are 336 * activated so we have to treat it as a special case. XXX manipulation 337 * of gd_tdallq requires the BGL. 338 */ 339 if (gd == mygd || td == &gd->gd_idlethread) { 340 crit_enter_gd(mygd); 341 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 342 crit_exit_gd(mygd); 343 } else { 344 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 345 } 346 #else 347 crit_enter_gd(mygd); 348 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 349 crit_exit_gd(mygd); 350 #endif 351 } 352 353 #endif /* _KERNEL */ 354 355 void 356 lwkt_set_comm(thread_t td, const char *ctl, ...) 357 { 358 __va_list va; 359 360 __va_start(va, ctl); 361 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 362 __va_end(va); 363 } 364 365 void 366 lwkt_hold(thread_t td) 367 { 368 ++td->td_refs; 369 } 370 371 void 372 lwkt_rele(thread_t td) 373 { 374 KKASSERT(td->td_refs > 0); 375 --td->td_refs; 376 } 377 378 #ifdef _KERNEL 379 380 void 381 lwkt_wait_free(thread_t td) 382 { 383 while (td->td_refs) 384 tsleep(td, 0, "tdreap", hz); 385 } 386 387 #endif 388 389 void 390 lwkt_free_thread(thread_t td) 391 { 392 struct globaldata *gd = mycpu; 393 394 KASSERT((td->td_flags & TDF_RUNNING) == 0, 395 ("lwkt_free_thread: did not exit! %p", td)); 396 397 crit_enter_gd(gd); 398 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 399 if (gd->gd_tdfreecount < CACHE_NTHREADS && 400 (td->td_flags & TDF_ALLOCATED_THREAD) 401 ) { 402 ++gd->gd_tdfreecount; 403 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 404 crit_exit_gd(gd); 405 } else { 406 crit_exit_gd(gd); 407 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 408 #ifdef _KERNEL 409 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 410 #else 411 libcaps_free_stack(td->td_kstack, td->td_kstack_size); 412 #endif 413 /* gd invalid */ 414 td->td_kstack = NULL; 415 td->td_kstack_size = 0; 416 } 417 if (td->td_flags & TDF_ALLOCATED_THREAD) { 418 #ifdef _KERNEL 419 zfree(thread_zone, td); 420 #else 421 free(td); 422 #endif 423 } 424 } 425 } 426 427 428 /* 429 * Switch to the next runnable lwkt. If no LWKTs are runnable then 430 * switch to the idlethread. Switching must occur within a critical 431 * section to avoid races with the scheduling queue. 432 * 433 * We always have full control over our cpu's run queue. Other cpus 434 * that wish to manipulate our queue must use the cpu_*msg() calls to 435 * talk to our cpu, so a critical section is all that is needed and 436 * the result is very, very fast thread switching. 437 * 438 * The LWKT scheduler uses a fixed priority model and round-robins at 439 * each priority level. User process scheduling is a totally 440 * different beast and LWKT priorities should not be confused with 441 * user process priorities. 442 * 443 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 444 * cleans it up. Note that the td_switch() function cannot do anything that 445 * requires the MP lock since the MP lock will have already been setup for 446 * the target thread (not the current thread). It's nice to have a scheduler 447 * that does not need the MP lock to work because it allows us to do some 448 * really cool high-performance MP lock optimizations. 449 * 450 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 451 * is not called by the current thread in the preemption case, only when 452 * the preempting thread blocks (in order to return to the original thread). 453 */ 454 void 455 lwkt_switch(void) 456 { 457 globaldata_t gd = mycpu; 458 thread_t td = gd->gd_curthread; 459 thread_t ntd; 460 #ifdef SMP 461 int mpheld; 462 #endif 463 464 /* 465 * We had better not be holding any spin locks. 466 */ 467 KKASSERT(td->td_spinlocks == 0); 468 469 /* 470 * Switching from within a 'fast' (non thread switched) interrupt or IPI 471 * is illegal. However, we may have to do it anyway if we hit a fatal 472 * kernel trap or we have paniced. 473 * 474 * If this case occurs save and restore the interrupt nesting level. 475 */ 476 if (gd->gd_intr_nesting_level) { 477 int savegdnest; 478 int savegdtrap; 479 480 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { 481 panic("lwkt_switch: cannot switch from within " 482 "a fast interrupt, yet, td %p\n", td); 483 } else { 484 savegdnest = gd->gd_intr_nesting_level; 485 savegdtrap = gd->gd_trap_nesting_level; 486 gd->gd_intr_nesting_level = 0; 487 gd->gd_trap_nesting_level = 0; 488 if ((td->td_flags & TDF_PANICWARN) == 0) { 489 td->td_flags |= TDF_PANICWARN; 490 printf("Warning: thread switch from interrupt or IPI, " 491 "thread %p (%s)\n", td, td->td_comm); 492 #ifdef DDB 493 db_print_backtrace(); 494 #endif 495 } 496 lwkt_switch(); 497 gd->gd_intr_nesting_level = savegdnest; 498 gd->gd_trap_nesting_level = savegdtrap; 499 return; 500 } 501 } 502 503 /* 504 * Passive release (used to transition from user to kernel mode 505 * when we block or switch rather then when we enter the kernel). 506 * This function is NOT called if we are switching into a preemption 507 * or returning from a preemption. Typically this causes us to lose 508 * our current process designation (if we have one) and become a true 509 * LWKT thread, and may also hand the current process designation to 510 * another process and schedule thread. 511 */ 512 if (td->td_release) 513 td->td_release(td); 514 515 crit_enter_gd(gd); 516 517 #ifdef SMP 518 /* 519 * td_mpcount cannot be used to determine if we currently hold the 520 * MP lock because get_mplock() will increment it prior to attempting 521 * to get the lock, and switch out if it can't. Our ownership of 522 * the actual lock will remain stable while we are in a critical section 523 * (but, of course, another cpu may own or release the lock so the 524 * actual value of mp_lock is not stable). 525 */ 526 mpheld = MP_LOCK_HELD(); 527 #ifdef INVARIANTS 528 if (td->td_cscount) { 529 printf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 530 td); 531 if (panic_on_cscount) 532 panic("switching while mastering cpusync"); 533 } 534 #endif 535 #endif 536 if ((ntd = td->td_preempted) != NULL) { 537 /* 538 * We had preempted another thread on this cpu, resume the preempted 539 * thread. This occurs transparently, whether the preempted thread 540 * was scheduled or not (it may have been preempted after descheduling 541 * itself). 542 * 543 * We have to setup the MP lock for the original thread after backing 544 * out the adjustment that was made to curthread when the original 545 * was preempted. 546 */ 547 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 548 #ifdef SMP 549 if (ntd->td_mpcount && mpheld == 0) { 550 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", 551 td, ntd, td->td_mpcount, ntd->td_mpcount); 552 } 553 if (ntd->td_mpcount) { 554 td->td_mpcount -= ntd->td_mpcount; 555 KKASSERT(td->td_mpcount >= 0); 556 } 557 #endif 558 ntd->td_flags |= TDF_PREEMPT_DONE; 559 560 /* 561 * XXX. The interrupt may have woken a thread up, we need to properly 562 * set the reschedule flag if the originally interrupted thread is at 563 * a lower priority. 564 */ 565 if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) 566 need_lwkt_resched(); 567 /* YYY release mp lock on switchback if original doesn't need it */ 568 } else { 569 /* 570 * Priority queue / round-robin at each priority. Note that user 571 * processes run at a fixed, low priority and the user process 572 * scheduler deals with interactions between user processes 573 * by scheduling and descheduling them from the LWKT queue as 574 * necessary. 575 * 576 * We have to adjust the MP lock for the target thread. If we 577 * need the MP lock and cannot obtain it we try to locate a 578 * thread that does not need the MP lock. If we cannot, we spin 579 * instead of HLT. 580 * 581 * A similar issue exists for the tokens held by the target thread. 582 * If we cannot obtain ownership of the tokens we cannot immediately 583 * schedule the thread. 584 */ 585 586 /* 587 * We are switching threads. If there are any pending requests for 588 * tokens we can satisfy all of them here. 589 */ 590 #ifdef SMP 591 if (gd->gd_tokreqbase) 592 lwkt_drain_token_requests(); 593 #endif 594 595 /* 596 * If an LWKT reschedule was requested, well that is what we are 597 * doing now so clear it. 598 */ 599 clear_lwkt_resched(); 600 again: 601 if (gd->gd_runqmask) { 602 int nq = bsrl(gd->gd_runqmask); 603 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 604 gd->gd_runqmask &= ~(1 << nq); 605 goto again; 606 } 607 #ifdef SMP 608 /* 609 * THREAD SELECTION FOR AN SMP MACHINE BUILD 610 * 611 * If the target needs the MP lock and we couldn't get it, 612 * or if the target is holding tokens and we could not 613 * gain ownership of the tokens, continue looking for a 614 * thread to schedule and spin instead of HLT if we can't. 615 * 616 * NOTE: the mpheld variable invalid after this conditional, it 617 * can change due to both cpu_try_mplock() returning success 618 * AND interactions in lwkt_chktokens() due to the fact that 619 * we are trying to check the mpcount of a thread other then 620 * the current thread. Because of this, if the current thread 621 * is not holding td_mpcount, an IPI indirectly run via 622 * lwkt_chktokens() can obtain and release the MP lock and 623 * cause the core MP lock to be released. 624 */ 625 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 626 (ntd->td_toks && lwkt_chktokens(ntd) == 0) 627 ) { 628 u_int32_t rqmask = gd->gd_runqmask; 629 630 mpheld = MP_LOCK_HELD(); 631 ntd = NULL; 632 while (rqmask) { 633 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 634 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) { 635 /* spinning due to MP lock being held */ 636 #ifdef INVARIANTS 637 ++mplock_contention_count; 638 #endif 639 /* mplock still not held, 'mpheld' still valid */ 640 continue; 641 } 642 643 /* 644 * mpheld state invalid after chktokens call returns 645 * failure, but the variable is only needed for 646 * the loop. 647 */ 648 if (ntd->td_toks && !lwkt_chktokens(ntd)) { 649 /* spinning due to token contention */ 650 #ifdef INVARIANTS 651 ++token_contention_count; 652 #endif 653 mpheld = MP_LOCK_HELD(); 654 continue; 655 } 656 break; 657 } 658 if (ntd) 659 break; 660 rqmask &= ~(1 << nq); 661 nq = bsrl(rqmask); 662 } 663 if (ntd == NULL) { 664 ntd = &gd->gd_idlethread; 665 ntd->td_flags |= TDF_IDLE_NOHLT; 666 goto using_idle_thread; 667 } else { 668 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 669 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 670 } 671 } else { 672 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 673 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 674 } 675 #else 676 /* 677 * THREAD SELECTION FOR A UP MACHINE BUILD. We don't have to 678 * worry about tokens or the BGL. 679 */ 680 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 681 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 682 #endif 683 } else { 684 /* 685 * We have nothing to run but only let the idle loop halt 686 * the cpu if there are no pending interrupts. 687 */ 688 ntd = &gd->gd_idlethread; 689 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 690 ntd->td_flags |= TDF_IDLE_NOHLT; 691 #ifdef SMP 692 using_idle_thread: 693 /* 694 * The idle thread should not be holding the MP lock unless we 695 * are trapping in the kernel or in a panic. Since we select the 696 * idle thread unconditionally when no other thread is available, 697 * if the MP lock is desired during a panic or kernel trap, we 698 * have to loop in the scheduler until we get it. 699 */ 700 if (ntd->td_mpcount) { 701 mpheld = MP_LOCK_HELD(); 702 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 703 panic("Idle thread %p was holding the BGL!", ntd); 704 else if (mpheld == 0) 705 goto again; 706 } 707 #endif 708 } 709 } 710 KASSERT(ntd->td_pri >= TDPRI_CRIT, 711 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 712 713 /* 714 * Do the actual switch. If the new target does not need the MP lock 715 * and we are holding it, release the MP lock. If the new target requires 716 * the MP lock we have already acquired it for the target. 717 */ 718 #ifdef SMP 719 if (ntd->td_mpcount == 0 ) { 720 if (MP_LOCK_HELD()) 721 cpu_rel_mplock(); 722 } else { 723 ASSERT_MP_LOCK_HELD(ntd); 724 } 725 #endif 726 if (td != ntd) { 727 ++switch_count; 728 td->td_switch(ntd); 729 } 730 /* NOTE: current cpu may have changed after switch */ 731 crit_exit_quick(td); 732 } 733 734 /* 735 * Request that the target thread preempt the current thread. Preemption 736 * only works under a specific set of conditions: 737 * 738 * - We are not preempting ourselves 739 * - The target thread is owned by the current cpu 740 * - We are not currently being preempted 741 * - The target is not currently being preempted 742 * - We are able to satisfy the target's MP lock requirements (if any). 743 * 744 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 745 * this is called via lwkt_schedule() through the td_preemptable callback. 746 * critpri is the managed critical priority that we should ignore in order 747 * to determine whether preemption is possible (aka usually just the crit 748 * priority of lwkt_schedule() itself). 749 * 750 * XXX at the moment we run the target thread in a critical section during 751 * the preemption in order to prevent the target from taking interrupts 752 * that *WE* can't. Preemption is strictly limited to interrupt threads 753 * and interrupt-like threads, outside of a critical section, and the 754 * preempted source thread will be resumed the instant the target blocks 755 * whether or not the source is scheduled (i.e. preemption is supposed to 756 * be as transparent as possible). 757 * 758 * The target thread inherits our MP count (added to its own) for the 759 * duration of the preemption in order to preserve the atomicy of the 760 * MP lock during the preemption. Therefore, any preempting targets must be 761 * careful in regards to MP assertions. Note that the MP count may be 762 * out of sync with the physical mp_lock, but we do not have to preserve 763 * the original ownership of the lock if it was out of synch (that is, we 764 * can leave it synchronized on return). 765 */ 766 void 767 lwkt_preempt(thread_t ntd, int critpri) 768 { 769 struct globaldata *gd = mycpu; 770 thread_t td; 771 #ifdef SMP 772 int mpheld; 773 int savecnt; 774 #endif 775 776 /* 777 * The caller has put us in a critical section. We can only preempt 778 * if the caller of the caller was not in a critical section (basically 779 * a local interrupt), as determined by the 'critpri' parameter. 780 * 781 * YYY The target thread must be in a critical section (else it must 782 * inherit our critical section? I dunno yet). 783 * 784 * Any tokens held by the target may not be held by thread(s) being 785 * preempted. We take the easy way out and do not preempt if 786 * the target is holding tokens. 787 * 788 * Set need_lwkt_resched() unconditionally for now YYY. 789 */ 790 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 791 792 td = gd->gd_curthread; 793 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 794 ++preempt_miss; 795 return; 796 } 797 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 798 ++preempt_miss; 799 need_lwkt_resched(); 800 return; 801 } 802 #ifdef SMP 803 if (ntd->td_gd != gd) { 804 ++preempt_miss; 805 need_lwkt_resched(); 806 return; 807 } 808 #endif 809 /* 810 * Take the easy way out and do not preempt if the target is holding 811 * one or more tokens. We could test whether the thread(s) being 812 * preempted interlock against the target thread's tokens and whether 813 * we can get all the target thread's tokens, but this situation 814 * should not occur very often so its easier to simply not preempt. 815 */ 816 if (ntd->td_toks != NULL) { 817 ++preempt_miss; 818 need_lwkt_resched(); 819 return; 820 } 821 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 822 ++preempt_weird; 823 need_lwkt_resched(); 824 return; 825 } 826 if (ntd->td_preempted) { 827 ++preempt_hit; 828 need_lwkt_resched(); 829 return; 830 } 831 #ifdef SMP 832 /* 833 * note: an interrupt might have occured just as we were transitioning 834 * to or from the MP lock. In this case td_mpcount will be pre-disposed 835 * (non-zero) but not actually synchronized with the actual state of the 836 * lock. We can use it to imply an MP lock requirement for the 837 * preemption but we cannot use it to test whether we hold the MP lock 838 * or not. 839 */ 840 savecnt = td->td_mpcount; 841 mpheld = MP_LOCK_HELD(); 842 ntd->td_mpcount += td->td_mpcount; 843 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 844 ntd->td_mpcount -= td->td_mpcount; 845 ++preempt_miss; 846 need_lwkt_resched(); 847 return; 848 } 849 #endif 850 851 /* 852 * Since we are able to preempt the current thread, there is no need to 853 * call need_lwkt_resched(). 854 */ 855 ++preempt_hit; 856 ntd->td_preempted = td; 857 td->td_flags |= TDF_PREEMPT_LOCK; 858 td->td_switch(ntd); 859 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 860 #ifdef SMP 861 KKASSERT(savecnt == td->td_mpcount); 862 mpheld = MP_LOCK_HELD(); 863 if (mpheld && td->td_mpcount == 0) 864 cpu_rel_mplock(); 865 else if (mpheld == 0 && td->td_mpcount) 866 panic("lwkt_preempt(): MP lock was not held through"); 867 #endif 868 ntd->td_preempted = NULL; 869 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 870 } 871 872 /* 873 * Yield our thread while higher priority threads are pending. This is 874 * typically called when we leave a critical section but it can be safely 875 * called while we are in a critical section. 876 * 877 * This function will not generally yield to equal priority threads but it 878 * can occur as a side effect. Note that lwkt_switch() is called from 879 * inside the critical section to prevent its own crit_exit() from reentering 880 * lwkt_yield_quick(). 881 * 882 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 883 * came along but was blocked and made pending. 884 * 885 * (self contained on a per cpu basis) 886 */ 887 void 888 lwkt_yield_quick(void) 889 { 890 globaldata_t gd = mycpu; 891 thread_t td = gd->gd_curthread; 892 893 /* 894 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 895 * it with a non-zero cpl then we might not wind up calling splz after 896 * a task switch when the critical section is exited even though the 897 * new task could accept the interrupt. 898 * 899 * XXX from crit_exit() only called after last crit section is released. 900 * If called directly will run splz() even if in a critical section. 901 * 902 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 903 * except for this special case, we MUST call splz() here to handle any 904 * pending ints, particularly after we switch, or we might accidently 905 * halt the cpu with interrupts pending. 906 */ 907 if (gd->gd_reqflags && td->td_nest_count < 2) 908 splz(); 909 910 /* 911 * YYY enabling will cause wakeup() to task-switch, which really 912 * confused the old 4.x code. This is a good way to simulate 913 * preemption and MP without actually doing preemption or MP, because a 914 * lot of code assumes that wakeup() does not block. 915 */ 916 if (untimely_switch && td->td_nest_count == 0 && 917 gd->gd_intr_nesting_level == 0 918 ) { 919 crit_enter_quick(td); 920 /* 921 * YYY temporary hacks until we disassociate the userland scheduler 922 * from the LWKT scheduler. 923 */ 924 if (td->td_flags & TDF_RUNQ) { 925 lwkt_switch(); /* will not reenter yield function */ 926 } else { 927 lwkt_schedule_self(td); /* make sure we are scheduled */ 928 lwkt_switch(); /* will not reenter yield function */ 929 lwkt_deschedule_self(td); /* make sure we are descheduled */ 930 } 931 crit_exit_noyield(td); 932 } 933 } 934 935 /* 936 * This implements a normal yield which, unlike _quick, will yield to equal 937 * priority threads as well. Note that gd_reqflags tests will be handled by 938 * the crit_exit() call in lwkt_switch(). 939 * 940 * (self contained on a per cpu basis) 941 */ 942 void 943 lwkt_yield(void) 944 { 945 lwkt_schedule_self(curthread); 946 lwkt_switch(); 947 } 948 949 /* 950 * Generic schedule. Possibly schedule threads belonging to other cpus and 951 * deal with threads that might be blocked on a wait queue. 952 * 953 * We have a little helper inline function which does additional work after 954 * the thread has been enqueued, including dealing with preemption and 955 * setting need_lwkt_resched() (which prevents the kernel from returning 956 * to userland until it has processed higher priority threads). 957 */ 958 static __inline 959 void 960 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri) 961 { 962 if (ntd->td_preemptable) { 963 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 964 } else if ((ntd->td_flags & TDF_NORESCHED) == 0 && 965 (ntd->td_pri & TDPRI_MASK) > (gd->gd_curthread->td_pri & TDPRI_MASK) 966 ) { 967 need_lwkt_resched(); 968 } 969 } 970 971 void 972 lwkt_schedule(thread_t td) 973 { 974 globaldata_t mygd = mycpu; 975 976 #ifdef INVARIANTS 977 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 978 if ((td->td_flags & TDF_PREEMPT_LOCK) == 0 && td->td_proc 979 && td->td_proc->p_stat == SSLEEP 980 ) { 981 printf("PANIC schedule curtd = %p (%d %d) target %p (%d %d)\n", 982 curthread, 983 curthread->td_proc ? curthread->td_proc->p_pid : -1, 984 curthread->td_proc ? curthread->td_proc->p_stat : -1, 985 td, 986 td->td_proc ? td->td_proc->p_pid : -1, 987 td->td_proc ? td->td_proc->p_stat : -1 988 ); 989 panic("SCHED PANIC"); 990 } 991 #endif 992 crit_enter_gd(mygd); 993 if (td == mygd->gd_curthread) { 994 _lwkt_enqueue(td); 995 } else { 996 lwkt_wait_t w; 997 998 /* 999 * If the thread is on a wait list we have to send our scheduling 1000 * request to the owner of the wait structure. Otherwise we send 1001 * the scheduling request to the cpu owning the thread. Races 1002 * are ok, the target will forward the message as necessary (the 1003 * message may chase the thread around before it finally gets 1004 * acted upon). 1005 * 1006 * (remember, wait structures use stable storage) 1007 * 1008 * NOTE: we have to account for the number of critical sections 1009 * under our control when calling _lwkt_schedule_post() so it 1010 * can figure out whether preemption is allowed. 1011 * 1012 * NOTE: The wait structure algorithms are a mess and need to be 1013 * rewritten. 1014 * 1015 * NOTE: We cannot safely acquire or release a token, even 1016 * non-blocking, because this routine may be called in the context 1017 * of a thread already holding the token and thus not provide any 1018 * interlock protection. We cannot safely manipulate the td_toks 1019 * list for the same reason. Instead we depend on our critical 1020 * section if the token is owned by our cpu. 1021 */ 1022 if ((w = td->td_wait) != NULL) { 1023 if (w->wa_token.t_cpu == mygd) { 1024 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1025 --w->wa_count; 1026 td->td_wait = NULL; 1027 #ifdef SMP 1028 if (td->td_gd == mygd) { 1029 _lwkt_enqueue(td); 1030 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1031 } else { 1032 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); 1033 } 1034 #else 1035 _lwkt_enqueue(td); 1036 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1037 #endif 1038 } else { 1039 #ifdef SMP 1040 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc1_t)lwkt_schedule, td); 1041 #else 1042 panic("bad token %p", &w->wa_token); 1043 #endif 1044 } 1045 } else { 1046 /* 1047 * If the wait structure is NULL and we own the thread, there 1048 * is no race (since we are in a critical section). If we 1049 * do not own the thread there might be a race but the 1050 * target cpu will deal with it. 1051 */ 1052 #ifdef SMP 1053 if (td->td_gd == mygd) { 1054 _lwkt_enqueue(td); 1055 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1056 } else { 1057 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); 1058 } 1059 #else 1060 _lwkt_enqueue(td); 1061 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1062 #endif 1063 } 1064 } 1065 crit_exit_gd(mygd); 1066 } 1067 1068 /* 1069 * Managed acquisition. This code assumes that the MP lock is held for 1070 * the tdallq operation and that the thread has been descheduled from its 1071 * original cpu. We also have to wait for the thread to be entirely switched 1072 * out on its original cpu (this is usually fast enough that we never loop) 1073 * since the LWKT system does not have to hold the MP lock while switching 1074 * and the target may have released it before switching. 1075 */ 1076 void 1077 lwkt_acquire(thread_t td) 1078 { 1079 globaldata_t gd; 1080 globaldata_t mygd; 1081 1082 gd = td->td_gd; 1083 mygd = mycpu; 1084 cpu_lfence(); 1085 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1086 while (td->td_flags & TDF_RUNNING) /* XXX spin */ 1087 cpu_lfence(); 1088 if (gd != mygd) { 1089 crit_enter_gd(mygd); 1090 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 1091 td->td_gd = mygd; 1092 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); /* protected by BGL */ 1093 crit_exit_gd(mygd); 1094 } 1095 } 1096 1097 /* 1098 * Generic deschedule. Descheduling threads other then your own should be 1099 * done only in carefully controlled circumstances. Descheduling is 1100 * asynchronous. 1101 * 1102 * This function may block if the cpu has run out of messages. 1103 */ 1104 void 1105 lwkt_deschedule(thread_t td) 1106 { 1107 crit_enter(); 1108 #ifdef SMP 1109 if (td == curthread) { 1110 _lwkt_dequeue(td); 1111 } else { 1112 if (td->td_gd == mycpu) { 1113 _lwkt_dequeue(td); 1114 } else { 1115 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1116 } 1117 } 1118 #else 1119 _lwkt_dequeue(td); 1120 #endif 1121 crit_exit(); 1122 } 1123 1124 /* 1125 * Set the target thread's priority. This routine does not automatically 1126 * switch to a higher priority thread, LWKT threads are not designed for 1127 * continuous priority changes. Yield if you want to switch. 1128 * 1129 * We have to retain the critical section count which uses the high bits 1130 * of the td_pri field. The specified priority may also indicate zero or 1131 * more critical sections by adding TDPRI_CRIT*N. 1132 * 1133 * Note that we requeue the thread whether it winds up on a different runq 1134 * or not. uio_yield() depends on this and the routine is not normally 1135 * called with the same priority otherwise. 1136 */ 1137 void 1138 lwkt_setpri(thread_t td, int pri) 1139 { 1140 KKASSERT(pri >= 0); 1141 KKASSERT(td->td_gd == mycpu); 1142 crit_enter(); 1143 if (td->td_flags & TDF_RUNQ) { 1144 _lwkt_dequeue(td); 1145 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1146 _lwkt_enqueue(td); 1147 } else { 1148 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1149 } 1150 crit_exit(); 1151 } 1152 1153 void 1154 lwkt_setpri_self(int pri) 1155 { 1156 thread_t td = curthread; 1157 1158 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1159 crit_enter(); 1160 if (td->td_flags & TDF_RUNQ) { 1161 _lwkt_dequeue(td); 1162 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1163 _lwkt_enqueue(td); 1164 } else { 1165 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1166 } 1167 crit_exit(); 1168 } 1169 1170 /* 1171 * Determine if there is a runnable thread at a higher priority then 1172 * the current thread. lwkt_setpri() does not check this automatically. 1173 * Return 1 if there is, 0 if there isn't. 1174 * 1175 * Example: if bit 31 of runqmask is set and the current thread is priority 1176 * 30, then we wind up checking the mask: 0x80000000 against 0x7fffffff. 1177 * 1178 * If nq reaches 31 the shift operation will overflow to 0 and we will wind 1179 * up comparing against 0xffffffff, a comparison that will always be false. 1180 */ 1181 int 1182 lwkt_checkpri_self(void) 1183 { 1184 globaldata_t gd = mycpu; 1185 thread_t td = gd->gd_curthread; 1186 int nq = td->td_pri & TDPRI_MASK; 1187 1188 while (gd->gd_runqmask > (__uint32_t)(2 << nq) - 1) { 1189 if (TAILQ_FIRST(&gd->gd_tdrunq[nq + 1])) 1190 return(1); 1191 ++nq; 1192 } 1193 return(0); 1194 } 1195 1196 /* 1197 * Migrate the current thread to the specified cpu. The BGL must be held 1198 * (for the gd_tdallq manipulation XXX). This is accomplished by 1199 * descheduling ourselves from the current cpu, moving our thread to the 1200 * tdallq of the target cpu, IPI messaging the target cpu, and switching out. 1201 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1202 */ 1203 #ifdef SMP 1204 static void lwkt_setcpu_remote(void *arg); 1205 #endif 1206 1207 void 1208 lwkt_setcpu_self(globaldata_t rgd) 1209 { 1210 #ifdef SMP 1211 thread_t td = curthread; 1212 1213 if (td->td_gd != rgd) { 1214 crit_enter_quick(td); 1215 td->td_flags |= TDF_MIGRATING; 1216 lwkt_deschedule_self(td); 1217 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); /* protected by BGL */ 1218 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); /* protected by BGL */ 1219 lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td); 1220 lwkt_switch(); 1221 /* we are now on the target cpu */ 1222 crit_exit_quick(td); 1223 } 1224 #endif 1225 } 1226 1227 /* 1228 * Remote IPI for cpu migration (called while in a critical section so we 1229 * do not have to enter another one). The thread has already been moved to 1230 * our cpu's allq, but we must wait for the thread to be completely switched 1231 * out on the originating cpu before we schedule it on ours or the stack 1232 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD 1233 * change to main memory. 1234 * 1235 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races 1236 * against wakeups. It is best if this interface is used only when there 1237 * are no pending events that might try to schedule the thread. 1238 */ 1239 #ifdef SMP 1240 static void 1241 lwkt_setcpu_remote(void *arg) 1242 { 1243 thread_t td = arg; 1244 globaldata_t gd = mycpu; 1245 1246 while (td->td_flags & TDF_RUNNING) 1247 cpu_lfence(); 1248 td->td_gd = gd; 1249 cpu_sfence(); 1250 td->td_flags &= ~TDF_MIGRATING; 1251 _lwkt_enqueue(td); 1252 } 1253 #endif 1254 1255 struct lwp * 1256 lwkt_preempted_proc(void) 1257 { 1258 thread_t td = curthread; 1259 while (td->td_preempted) 1260 td = td->td_preempted; 1261 return(td->td_lwp); 1262 } 1263 1264 /* 1265 * Block on the specified wait queue until signaled. A generation number 1266 * must be supplied to interlock the wait queue. The function will 1267 * return immediately if the generation number does not match the wait 1268 * structure's generation number. 1269 */ 1270 void 1271 lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen) 1272 { 1273 thread_t td = curthread; 1274 lwkt_tokref ilock; 1275 1276 lwkt_gettoken(&ilock, &w->wa_token); 1277 crit_enter(); 1278 if (w->wa_gen == *gen) { 1279 _lwkt_dequeue(td); 1280 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); 1281 ++w->wa_count; 1282 td->td_wait = w; 1283 td->td_wmesg = wmesg; 1284 again: 1285 lwkt_switch(); 1286 if (td->td_wmesg != NULL) { 1287 _lwkt_dequeue(td); 1288 goto again; 1289 } 1290 } 1291 crit_exit(); 1292 *gen = w->wa_gen; 1293 lwkt_reltoken(&ilock); 1294 } 1295 1296 /* 1297 * Signal a wait queue. We gain ownership of the wait queue in order to 1298 * signal it. Once a thread is removed from the wait queue we have to 1299 * deal with the cpu owning the thread. 1300 * 1301 * Note: alternatively we could message the target cpu owning the wait 1302 * queue. YYY implement as sysctl. 1303 */ 1304 void 1305 lwkt_signal(lwkt_wait_t w, int count) 1306 { 1307 thread_t td; 1308 lwkt_tokref ilock; 1309 1310 lwkt_gettoken(&ilock, &w->wa_token); 1311 ++w->wa_gen; 1312 crit_enter(); 1313 if (count < 0) 1314 count = w->wa_count; 1315 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { 1316 --count; 1317 --w->wa_count; 1318 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1319 td->td_wait = NULL; 1320 td->td_wmesg = NULL; 1321 #ifdef SMP 1322 if (td->td_gd == mycpu) { 1323 _lwkt_enqueue(td); 1324 } else { 1325 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); 1326 } 1327 #else 1328 _lwkt_enqueue(td); 1329 #endif 1330 } 1331 crit_exit(); 1332 lwkt_reltoken(&ilock); 1333 } 1334 1335 /* 1336 * Create a kernel process/thread/whatever. It shares it's address space 1337 * with proc0 - ie: kernel only. 1338 * 1339 * NOTE! By default new threads are created with the MP lock held. A 1340 * thread which does not require the MP lock should release it by calling 1341 * rel_mplock() at the start of the new thread. 1342 */ 1343 int 1344 lwkt_create(void (*func)(void *), void *arg, 1345 struct thread **tdp, thread_t template, int tdflags, int cpu, 1346 const char *fmt, ...) 1347 { 1348 thread_t td; 1349 __va_list ap; 1350 1351 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu); 1352 if (tdp) 1353 *tdp = td; 1354 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1355 td->td_flags |= TDF_VERBOSE | tdflags; 1356 #ifdef SMP 1357 td->td_mpcount = 1; 1358 #endif 1359 1360 /* 1361 * Set up arg0 for 'ps' etc 1362 */ 1363 __va_start(ap, fmt); 1364 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1365 __va_end(ap); 1366 1367 /* 1368 * Schedule the thread to run 1369 */ 1370 if ((td->td_flags & TDF_STOPREQ) == 0) 1371 lwkt_schedule(td); 1372 else 1373 td->td_flags &= ~TDF_STOPREQ; 1374 return 0; 1375 } 1376 1377 /* 1378 * kthread_* is specific to the kernel and is not needed by userland. 1379 */ 1380 #ifdef _KERNEL 1381 1382 /* 1383 * Destroy an LWKT thread. Warning! This function is not called when 1384 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1385 * uses a different reaping mechanism. 1386 */ 1387 void 1388 lwkt_exit(void) 1389 { 1390 thread_t td = curthread; 1391 globaldata_t gd; 1392 1393 if (td->td_flags & TDF_VERBOSE) 1394 printf("kthread %p %s has exited\n", td, td->td_comm); 1395 caps_exit(td); 1396 crit_enter_quick(td); 1397 lwkt_deschedule_self(td); 1398 gd = mycpu; 1399 KKASSERT(gd == td->td_gd); 1400 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1401 if (td->td_flags & TDF_ALLOCATED_THREAD) { 1402 ++gd->gd_tdfreecount; 1403 TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq); 1404 } 1405 cpu_thread_exit(); 1406 } 1407 1408 #endif /* _KERNEL */ 1409 1410 void 1411 crit_panic(void) 1412 { 1413 thread_t td = curthread; 1414 int lpri = td->td_pri; 1415 1416 td->td_pri = 0; 1417 panic("td_pri is/would-go negative! %p %d", td, lpri); 1418 } 1419 1420 #ifdef SMP 1421 1422 /* 1423 * Called from debugger/panic on cpus which have been stopped. We must still 1424 * process the IPIQ while stopped, even if we were stopped while in a critical 1425 * section (XXX). 1426 * 1427 * If we are dumping also try to process any pending interrupts. This may 1428 * or may not work depending on the state of the cpu at the point it was 1429 * stopped. 1430 */ 1431 void 1432 lwkt_smp_stopped(void) 1433 { 1434 globaldata_t gd = mycpu; 1435 1436 crit_enter_gd(gd); 1437 if (dumping) { 1438 lwkt_process_ipiq(); 1439 splz(); 1440 } else { 1441 lwkt_process_ipiq(); 1442 } 1443 crit_exit_gd(gd); 1444 } 1445 1446 #endif 1447