1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.86 2005/11/14 18:50:05 dillon Exp $ 35 */ 36 37 /* 38 * Each cpu in a system has its own self-contained light weight kernel 39 * thread scheduler, which means that generally speaking we only need 40 * to use a critical section to avoid problems. Foreign thread 41 * scheduling is queued via (async) IPIs. 42 */ 43 44 #ifdef _KERNEL 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/rtprio.h> 51 #include <sys/queue.h> 52 #include <sys/thread2.h> 53 #include <sys/sysctl.h> 54 #include <sys/kthread.h> 55 #include <machine/cpu.h> 56 #include <sys/lock.h> 57 #include <sys/caps.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <vm/vm_kern.h> 62 #include <vm/vm_object.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_pager.h> 66 #include <vm/vm_extern.h> 67 #include <vm/vm_zone.h> 68 69 #include <machine/stdarg.h> 70 #include <machine/ipl.h> 71 #include <machine/smp.h> 72 73 #else 74 75 #include <sys/stdint.h> 76 #include <libcaps/thread.h> 77 #include <sys/thread.h> 78 #include <sys/msgport.h> 79 #include <sys/errno.h> 80 #include <libcaps/globaldata.h> 81 #include <machine/cpufunc.h> 82 #include <sys/thread2.h> 83 #include <sys/msgport2.h> 84 #include <stdio.h> 85 #include <stdlib.h> 86 #include <string.h> 87 #include <machine/lock.h> 88 #include <machine/atomic.h> 89 #include <machine/cpu.h> 90 91 #endif 92 93 static int untimely_switch = 0; 94 #ifdef INVARIANTS 95 static int panic_on_cscount = 0; 96 #endif 97 static __int64_t switch_count = 0; 98 static __int64_t preempt_hit = 0; 99 static __int64_t preempt_miss = 0; 100 static __int64_t preempt_weird = 0; 101 static __int64_t token_contention_count = 0; 102 static __int64_t mplock_contention_count = 0; 103 104 #ifdef _KERNEL 105 106 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 107 #ifdef INVARIANTS 108 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 109 #endif 110 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 111 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 112 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 113 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 114 #ifdef INVARIANTS 115 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW, 116 &token_contention_count, 0, "spinning due to token contention"); 117 SYSCTL_QUAD(_lwkt, OID_AUTO, mplock_contention_count, CTLFLAG_RW, 118 &mplock_contention_count, 0, "spinning due to MPLOCK contention"); 119 #endif 120 #endif 121 122 /* 123 * These helper procedures handle the runq, they can only be called from 124 * within a critical section. 125 * 126 * WARNING! Prior to SMP being brought up it is possible to enqueue and 127 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 128 * instead of 'mycpu' when referencing the globaldata structure. Once 129 * SMP live enqueuing and dequeueing only occurs on the current cpu. 130 */ 131 static __inline 132 void 133 _lwkt_dequeue(thread_t td) 134 { 135 if (td->td_flags & TDF_RUNQ) { 136 int nq = td->td_pri & TDPRI_MASK; 137 struct globaldata *gd = td->td_gd; 138 139 td->td_flags &= ~TDF_RUNQ; 140 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 141 /* runqmask is passively cleaned up by the switcher */ 142 } 143 } 144 145 static __inline 146 void 147 _lwkt_enqueue(thread_t td) 148 { 149 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_TSLEEPQ|TDF_BLOCKQ)) == 0) { 150 int nq = td->td_pri & TDPRI_MASK; 151 struct globaldata *gd = td->td_gd; 152 153 td->td_flags |= TDF_RUNQ; 154 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 155 gd->gd_runqmask |= 1 << nq; 156 } 157 } 158 159 /* 160 * Schedule a thread to run. As the current thread we can always safely 161 * schedule ourselves, and a shortcut procedure is provided for that 162 * function. 163 * 164 * (non-blocking, self contained on a per cpu basis) 165 */ 166 void 167 lwkt_schedule_self(thread_t td) 168 { 169 crit_enter_quick(td); 170 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 171 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 172 KKASSERT(td->td_proc == NULL || (td->td_proc->p_flag & P_ONRUNQ) == 0); 173 _lwkt_enqueue(td); 174 crit_exit_quick(td); 175 } 176 177 /* 178 * Deschedule a thread. 179 * 180 * (non-blocking, self contained on a per cpu basis) 181 */ 182 void 183 lwkt_deschedule_self(thread_t td) 184 { 185 crit_enter_quick(td); 186 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 187 _lwkt_dequeue(td); 188 crit_exit_quick(td); 189 } 190 191 #ifdef _KERNEL 192 193 /* 194 * LWKTs operate on a per-cpu basis 195 * 196 * WARNING! Called from early boot, 'mycpu' may not work yet. 197 */ 198 void 199 lwkt_gdinit(struct globaldata *gd) 200 { 201 int i; 202 203 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 204 TAILQ_INIT(&gd->gd_tdrunq[i]); 205 gd->gd_runqmask = 0; 206 TAILQ_INIT(&gd->gd_tdallq); 207 } 208 209 #endif /* _KERNEL */ 210 211 /* 212 * Initialize a thread wait structure prior to first use. 213 * 214 * NOTE! called from low level boot code, we cannot do anything fancy! 215 */ 216 void 217 lwkt_wait_init(lwkt_wait_t w) 218 { 219 lwkt_token_init(&w->wa_token); 220 TAILQ_INIT(&w->wa_waitq); 221 w->wa_gen = 0; 222 w->wa_count = 0; 223 } 224 225 /* 226 * Create a new thread. The thread must be associated with a process context 227 * or LWKT start address before it can be scheduled. If the target cpu is 228 * -1 the thread will be created on the current cpu. 229 * 230 * If you intend to create a thread without a process context this function 231 * does everything except load the startup and switcher function. 232 */ 233 thread_t 234 lwkt_alloc_thread(struct thread *td, int stksize, int cpu) 235 { 236 void *stack; 237 int flags = 0; 238 globaldata_t gd = mycpu; 239 240 if (td == NULL) { 241 crit_enter_gd(gd); 242 if (gd->gd_tdfreecount > 0) { 243 --gd->gd_tdfreecount; 244 td = TAILQ_FIRST(&gd->gd_tdfreeq); 245 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 246 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 247 TAILQ_REMOVE(&gd->gd_tdfreeq, td, td_threadq); 248 crit_exit_gd(gd); 249 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 250 } else { 251 crit_exit_gd(gd); 252 #ifdef _KERNEL 253 td = zalloc(thread_zone); 254 #else 255 td = malloc(sizeof(struct thread)); 256 #endif 257 td->td_kstack = NULL; 258 td->td_kstack_size = 0; 259 flags |= TDF_ALLOCATED_THREAD; 260 } 261 } 262 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 263 if (flags & TDF_ALLOCATED_STACK) { 264 #ifdef _KERNEL 265 kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); 266 #else 267 libcaps_free_stack(stack, td->td_kstack_size); 268 #endif 269 stack = NULL; 270 } 271 } 272 if (stack == NULL) { 273 #ifdef _KERNEL 274 stack = (void *)kmem_alloc(kernel_map, stksize); 275 #else 276 stack = libcaps_alloc_stack(stksize); 277 #endif 278 flags |= TDF_ALLOCATED_STACK; 279 } 280 if (cpu < 0) 281 lwkt_init_thread(td, stack, stksize, flags, mycpu); 282 else 283 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 284 return(td); 285 } 286 287 #ifdef _KERNEL 288 289 /* 290 * Initialize a preexisting thread structure. This function is used by 291 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 292 * 293 * All threads start out in a critical section at a priority of 294 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 295 * appropriate. This function may send an IPI message when the 296 * requested cpu is not the current cpu and consequently gd_tdallq may 297 * not be initialized synchronously from the point of view of the originating 298 * cpu. 299 * 300 * NOTE! we have to be careful in regards to creating threads for other cpus 301 * if SMP has not yet been activated. 302 */ 303 #ifdef SMP 304 305 static void 306 lwkt_init_thread_remote(void *arg) 307 { 308 thread_t td = arg; 309 310 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 311 } 312 313 #endif 314 315 void 316 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 317 struct globaldata *gd) 318 { 319 globaldata_t mygd = mycpu; 320 321 bzero(td, sizeof(struct thread)); 322 td->td_kstack = stack; 323 td->td_kstack_size = stksize; 324 td->td_flags |= flags; 325 td->td_gd = gd; 326 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 327 lwkt_initport(&td->td_msgport, td); 328 pmap_init_thread(td); 329 #ifdef SMP 330 /* 331 * Normally initializing a thread for a remote cpu requires sending an 332 * IPI. However, the idlethread is setup before the other cpus are 333 * activated so we have to treat it as a special case. XXX manipulation 334 * of gd_tdallq requires the BGL. 335 */ 336 if (gd == mygd || td == &gd->gd_idlethread) { 337 crit_enter_gd(mygd); 338 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 339 crit_exit_gd(mygd); 340 } else { 341 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 342 } 343 #else 344 crit_enter_gd(mygd); 345 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 346 crit_exit_gd(mygd); 347 #endif 348 } 349 350 #endif /* _KERNEL */ 351 352 void 353 lwkt_set_comm(thread_t td, const char *ctl, ...) 354 { 355 __va_list va; 356 357 __va_start(va, ctl); 358 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 359 __va_end(va); 360 } 361 362 void 363 lwkt_hold(thread_t td) 364 { 365 ++td->td_refs; 366 } 367 368 void 369 lwkt_rele(thread_t td) 370 { 371 KKASSERT(td->td_refs > 0); 372 --td->td_refs; 373 } 374 375 #ifdef _KERNEL 376 377 void 378 lwkt_wait_free(thread_t td) 379 { 380 while (td->td_refs) 381 tsleep(td, 0, "tdreap", hz); 382 } 383 384 #endif 385 386 void 387 lwkt_free_thread(thread_t td) 388 { 389 struct globaldata *gd = mycpu; 390 391 KASSERT((td->td_flags & TDF_RUNNING) == 0, 392 ("lwkt_free_thread: did not exit! %p", td)); 393 394 crit_enter_gd(gd); 395 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 396 if (gd->gd_tdfreecount < CACHE_NTHREADS && 397 (td->td_flags & TDF_ALLOCATED_THREAD) 398 ) { 399 ++gd->gd_tdfreecount; 400 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 401 crit_exit_gd(gd); 402 } else { 403 crit_exit_gd(gd); 404 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 405 #ifdef _KERNEL 406 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 407 #else 408 libcaps_free_stack(td->td_kstack, td->td_kstack_size); 409 #endif 410 /* gd invalid */ 411 td->td_kstack = NULL; 412 td->td_kstack_size = 0; 413 } 414 if (td->td_flags & TDF_ALLOCATED_THREAD) { 415 #ifdef _KERNEL 416 zfree(thread_zone, td); 417 #else 418 free(td); 419 #endif 420 } 421 } 422 } 423 424 425 /* 426 * Switch to the next runnable lwkt. If no LWKTs are runnable then 427 * switch to the idlethread. Switching must occur within a critical 428 * section to avoid races with the scheduling queue. 429 * 430 * We always have full control over our cpu's run queue. Other cpus 431 * that wish to manipulate our queue must use the cpu_*msg() calls to 432 * talk to our cpu, so a critical section is all that is needed and 433 * the result is very, very fast thread switching. 434 * 435 * The LWKT scheduler uses a fixed priority model and round-robins at 436 * each priority level. User process scheduling is a totally 437 * different beast and LWKT priorities should not be confused with 438 * user process priorities. 439 * 440 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 441 * cleans it up. Note that the td_switch() function cannot do anything that 442 * requires the MP lock since the MP lock will have already been setup for 443 * the target thread (not the current thread). It's nice to have a scheduler 444 * that does not need the MP lock to work because it allows us to do some 445 * really cool high-performance MP lock optimizations. 446 * 447 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 448 * is not called by the current thread in the preemption case, only when 449 * the preempting thread blocks (in order to return to the original thread). 450 */ 451 void 452 lwkt_switch(void) 453 { 454 globaldata_t gd = mycpu; 455 thread_t td = gd->gd_curthread; 456 thread_t ntd; 457 #ifdef SMP 458 int mpheld; 459 #endif 460 461 /* 462 * We had better not be holding any spin locks. 463 */ 464 KKASSERT(td->td_spinlocks == 0); 465 466 /* 467 * Switching from within a 'fast' (non thread switched) interrupt or IPI 468 * is illegal. However, we may have to do it anyway if we hit a fatal 469 * kernel trap or we have paniced. 470 * 471 * If this case occurs save and restore the interrupt nesting level. 472 */ 473 if (gd->gd_intr_nesting_level) { 474 int savegdnest; 475 int savegdtrap; 476 477 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { 478 panic("lwkt_switch: cannot switch from within " 479 "a fast interrupt, yet, td %p\n", td); 480 } else { 481 savegdnest = gd->gd_intr_nesting_level; 482 savegdtrap = gd->gd_trap_nesting_level; 483 gd->gd_intr_nesting_level = 0; 484 gd->gd_trap_nesting_level = 0; 485 if ((td->td_flags & TDF_PANICWARN) == 0) { 486 td->td_flags |= TDF_PANICWARN; 487 printf("Warning: thread switch from interrupt or IPI, " 488 "thread %p (%s)\n", td, td->td_comm); 489 #ifdef DDB 490 db_print_backtrace(); 491 #endif 492 } 493 lwkt_switch(); 494 gd->gd_intr_nesting_level = savegdnest; 495 gd->gd_trap_nesting_level = savegdtrap; 496 return; 497 } 498 } 499 500 /* 501 * Passive release (used to transition from user to kernel mode 502 * when we block or switch rather then when we enter the kernel). 503 * This function is NOT called if we are switching into a preemption 504 * or returning from a preemption. Typically this causes us to lose 505 * our current process designation (if we have one) and become a true 506 * LWKT thread, and may also hand the current process designation to 507 * another process and schedule thread. 508 */ 509 if (td->td_release) 510 td->td_release(td); 511 512 crit_enter_gd(gd); 513 514 #ifdef SMP 515 /* 516 * td_mpcount cannot be used to determine if we currently hold the 517 * MP lock because get_mplock() will increment it prior to attempting 518 * to get the lock, and switch out if it can't. Our ownership of 519 * the actual lock will remain stable while we are in a critical section 520 * (but, of course, another cpu may own or release the lock so the 521 * actual value of mp_lock is not stable). 522 */ 523 mpheld = MP_LOCK_HELD(); 524 #ifdef INVARIANTS 525 if (td->td_cscount) { 526 printf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 527 td); 528 if (panic_on_cscount) 529 panic("switching while mastering cpusync"); 530 } 531 #endif 532 #endif 533 if ((ntd = td->td_preempted) != NULL) { 534 /* 535 * We had preempted another thread on this cpu, resume the preempted 536 * thread. This occurs transparently, whether the preempted thread 537 * was scheduled or not (it may have been preempted after descheduling 538 * itself). 539 * 540 * We have to setup the MP lock for the original thread after backing 541 * out the adjustment that was made to curthread when the original 542 * was preempted. 543 */ 544 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 545 #ifdef SMP 546 if (ntd->td_mpcount && mpheld == 0) { 547 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", 548 td, ntd, td->td_mpcount, ntd->td_mpcount); 549 } 550 if (ntd->td_mpcount) { 551 td->td_mpcount -= ntd->td_mpcount; 552 KKASSERT(td->td_mpcount >= 0); 553 } 554 #endif 555 ntd->td_flags |= TDF_PREEMPT_DONE; 556 557 /* 558 * XXX. The interrupt may have woken a thread up, we need to properly 559 * set the reschedule flag if the originally interrupted thread is at 560 * a lower priority. 561 */ 562 if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) 563 need_lwkt_resched(); 564 /* YYY release mp lock on switchback if original doesn't need it */ 565 } else { 566 /* 567 * Priority queue / round-robin at each priority. Note that user 568 * processes run at a fixed, low priority and the user process 569 * scheduler deals with interactions between user processes 570 * by scheduling and descheduling them from the LWKT queue as 571 * necessary. 572 * 573 * We have to adjust the MP lock for the target thread. If we 574 * need the MP lock and cannot obtain it we try to locate a 575 * thread that does not need the MP lock. If we cannot, we spin 576 * instead of HLT. 577 * 578 * A similar issue exists for the tokens held by the target thread. 579 * If we cannot obtain ownership of the tokens we cannot immediately 580 * schedule the thread. 581 */ 582 583 /* 584 * We are switching threads. If there are any pending requests for 585 * tokens we can satisfy all of them here. 586 */ 587 #ifdef SMP 588 if (gd->gd_tokreqbase) 589 lwkt_drain_token_requests(); 590 #endif 591 592 /* 593 * If an LWKT reschedule was requested, well that is what we are 594 * doing now so clear it. 595 */ 596 clear_lwkt_resched(); 597 again: 598 if (gd->gd_runqmask) { 599 int nq = bsrl(gd->gd_runqmask); 600 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 601 gd->gd_runqmask &= ~(1 << nq); 602 goto again; 603 } 604 #ifdef SMP 605 /* 606 * THREAD SELECTION FOR AN SMP MACHINE BUILD 607 * 608 * If the target needs the MP lock and we couldn't get it, 609 * or if the target is holding tokens and we could not 610 * gain ownership of the tokens, continue looking for a 611 * thread to schedule and spin instead of HLT if we can't. 612 * 613 * NOTE: the mpheld variable invalid after this conditional, it 614 * can change due to both cpu_try_mplock() returning success 615 * AND interactions in lwkt_chktokens() due to the fact that 616 * we are trying to check the mpcount of a thread other then 617 * the current thread. Because of this, if the current thread 618 * is not holding td_mpcount, an IPI indirectly run via 619 * lwkt_chktokens() can obtain and release the MP lock and 620 * cause the core MP lock to be released. 621 */ 622 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 623 (ntd->td_toks && lwkt_chktokens(ntd) == 0) 624 ) { 625 u_int32_t rqmask = gd->gd_runqmask; 626 627 mpheld = MP_LOCK_HELD(); 628 ntd = NULL; 629 while (rqmask) { 630 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 631 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) { 632 /* spinning due to MP lock being held */ 633 #ifdef INVARIANTS 634 ++mplock_contention_count; 635 #endif 636 /* mplock still not held, 'mpheld' still valid */ 637 continue; 638 } 639 640 /* 641 * mpheld state invalid after chktokens call returns 642 * failure, but the variable is only needed for 643 * the loop. 644 */ 645 if (ntd->td_toks && !lwkt_chktokens(ntd)) { 646 /* spinning due to token contention */ 647 #ifdef INVARIANTS 648 ++token_contention_count; 649 #endif 650 mpheld = MP_LOCK_HELD(); 651 continue; 652 } 653 break; 654 } 655 if (ntd) 656 break; 657 rqmask &= ~(1 << nq); 658 nq = bsrl(rqmask); 659 } 660 if (ntd == NULL) { 661 ntd = &gd->gd_idlethread; 662 ntd->td_flags |= TDF_IDLE_NOHLT; 663 goto using_idle_thread; 664 } else { 665 ++gd->gd_cnt.v_swtch; 666 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 667 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 668 } 669 } else { 670 ++gd->gd_cnt.v_swtch; 671 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 672 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 673 } 674 #else 675 /* 676 * THREAD SELECTION FOR A UP MACHINE BUILD. We don't have to 677 * worry about tokens or the BGL. 678 */ 679 ++gd->gd_cnt.v_swtch; 680 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 681 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 682 #endif 683 } else { 684 /* 685 * We have nothing to run but only let the idle loop halt 686 * the cpu if there are no pending interrupts. 687 */ 688 ntd = &gd->gd_idlethread; 689 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 690 ntd->td_flags |= TDF_IDLE_NOHLT; 691 #ifdef SMP 692 using_idle_thread: 693 /* 694 * The idle thread should not be holding the MP lock unless we 695 * are trapping in the kernel or in a panic. Since we select the 696 * idle thread unconditionally when no other thread is available, 697 * if the MP lock is desired during a panic or kernel trap, we 698 * have to loop in the scheduler until we get it. 699 */ 700 if (ntd->td_mpcount) { 701 mpheld = MP_LOCK_HELD(); 702 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 703 panic("Idle thread %p was holding the BGL!", ntd); 704 else if (mpheld == 0) 705 goto again; 706 } 707 #endif 708 } 709 } 710 KASSERT(ntd->td_pri >= TDPRI_CRIT, 711 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 712 713 /* 714 * Do the actual switch. If the new target does not need the MP lock 715 * and we are holding it, release the MP lock. If the new target requires 716 * the MP lock we have already acquired it for the target. 717 */ 718 #ifdef SMP 719 if (ntd->td_mpcount == 0 ) { 720 if (MP_LOCK_HELD()) 721 cpu_rel_mplock(); 722 } else { 723 ASSERT_MP_LOCK_HELD(ntd); 724 } 725 #endif 726 if (td != ntd) { 727 ++switch_count; 728 td->td_switch(ntd); 729 } 730 /* NOTE: current cpu may have changed after switch */ 731 crit_exit_quick(td); 732 } 733 734 /* 735 * Request that the target thread preempt the current thread. Preemption 736 * only works under a specific set of conditions: 737 * 738 * - We are not preempting ourselves 739 * - The target thread is owned by the current cpu 740 * - We are not currently being preempted 741 * - The target is not currently being preempted 742 * - We are able to satisfy the target's MP lock requirements (if any). 743 * 744 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 745 * this is called via lwkt_schedule() through the td_preemptable callback. 746 * critpri is the managed critical priority that we should ignore in order 747 * to determine whether preemption is possible (aka usually just the crit 748 * priority of lwkt_schedule() itself). 749 * 750 * XXX at the moment we run the target thread in a critical section during 751 * the preemption in order to prevent the target from taking interrupts 752 * that *WE* can't. Preemption is strictly limited to interrupt threads 753 * and interrupt-like threads, outside of a critical section, and the 754 * preempted source thread will be resumed the instant the target blocks 755 * whether or not the source is scheduled (i.e. preemption is supposed to 756 * be as transparent as possible). 757 * 758 * The target thread inherits our MP count (added to its own) for the 759 * duration of the preemption in order to preserve the atomicy of the 760 * MP lock during the preemption. Therefore, any preempting targets must be 761 * careful in regards to MP assertions. Note that the MP count may be 762 * out of sync with the physical mp_lock, but we do not have to preserve 763 * the original ownership of the lock if it was out of synch (that is, we 764 * can leave it synchronized on return). 765 */ 766 void 767 lwkt_preempt(thread_t ntd, int critpri) 768 { 769 struct globaldata *gd = mycpu; 770 thread_t td; 771 #ifdef SMP 772 int mpheld; 773 int savecnt; 774 #endif 775 776 /* 777 * The caller has put us in a critical section. We can only preempt 778 * if the caller of the caller was not in a critical section (basically 779 * a local interrupt), as determined by the 'critpri' parameter. 780 * 781 * YYY The target thread must be in a critical section (else it must 782 * inherit our critical section? I dunno yet). 783 * 784 * Any tokens held by the target may not be held by thread(s) being 785 * preempted. We take the easy way out and do not preempt if 786 * the target is holding tokens. 787 * 788 * Set need_lwkt_resched() unconditionally for now YYY. 789 */ 790 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 791 792 td = gd->gd_curthread; 793 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 794 ++preempt_miss; 795 return; 796 } 797 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 798 ++preempt_miss; 799 need_lwkt_resched(); 800 return; 801 } 802 #ifdef SMP 803 if (ntd->td_gd != gd) { 804 ++preempt_miss; 805 need_lwkt_resched(); 806 return; 807 } 808 #endif 809 /* 810 * Take the easy way out and do not preempt if the target is holding 811 * one or more tokens. We could test whether the thread(s) being 812 * preempted interlock against the target thread's tokens and whether 813 * we can get all the target thread's tokens, but this situation 814 * should not occur very often so its easier to simply not preempt. 815 */ 816 if (ntd->td_toks != NULL) { 817 ++preempt_miss; 818 need_lwkt_resched(); 819 return; 820 } 821 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 822 ++preempt_weird; 823 need_lwkt_resched(); 824 return; 825 } 826 if (ntd->td_preempted) { 827 ++preempt_hit; 828 need_lwkt_resched(); 829 return; 830 } 831 #ifdef SMP 832 /* 833 * note: an interrupt might have occured just as we were transitioning 834 * to or from the MP lock. In this case td_mpcount will be pre-disposed 835 * (non-zero) but not actually synchronized with the actual state of the 836 * lock. We can use it to imply an MP lock requirement for the 837 * preemption but we cannot use it to test whether we hold the MP lock 838 * or not. 839 */ 840 savecnt = td->td_mpcount; 841 mpheld = MP_LOCK_HELD(); 842 ntd->td_mpcount += td->td_mpcount; 843 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 844 ntd->td_mpcount -= td->td_mpcount; 845 ++preempt_miss; 846 need_lwkt_resched(); 847 return; 848 } 849 #endif 850 851 /* 852 * Since we are able to preempt the current thread, there is no need to 853 * call need_lwkt_resched(). 854 */ 855 ++preempt_hit; 856 ntd->td_preempted = td; 857 td->td_flags |= TDF_PREEMPT_LOCK; 858 td->td_switch(ntd); 859 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 860 #ifdef SMP 861 KKASSERT(savecnt == td->td_mpcount); 862 mpheld = MP_LOCK_HELD(); 863 if (mpheld && td->td_mpcount == 0) 864 cpu_rel_mplock(); 865 else if (mpheld == 0 && td->td_mpcount) 866 panic("lwkt_preempt(): MP lock was not held through"); 867 #endif 868 ntd->td_preempted = NULL; 869 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 870 } 871 872 /* 873 * Yield our thread while higher priority threads are pending. This is 874 * typically called when we leave a critical section but it can be safely 875 * called while we are in a critical section. 876 * 877 * This function will not generally yield to equal priority threads but it 878 * can occur as a side effect. Note that lwkt_switch() is called from 879 * inside the critical section to prevent its own crit_exit() from reentering 880 * lwkt_yield_quick(). 881 * 882 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 883 * came along but was blocked and made pending. 884 * 885 * (self contained on a per cpu basis) 886 */ 887 void 888 lwkt_yield_quick(void) 889 { 890 globaldata_t gd = mycpu; 891 thread_t td = gd->gd_curthread; 892 893 /* 894 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 895 * it with a non-zero cpl then we might not wind up calling splz after 896 * a task switch when the critical section is exited even though the 897 * new task could accept the interrupt. 898 * 899 * XXX from crit_exit() only called after last crit section is released. 900 * If called directly will run splz() even if in a critical section. 901 * 902 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 903 * except for this special case, we MUST call splz() here to handle any 904 * pending ints, particularly after we switch, or we might accidently 905 * halt the cpu with interrupts pending. 906 */ 907 if (gd->gd_reqflags && td->td_nest_count < 2) 908 splz(); 909 910 /* 911 * YYY enabling will cause wakeup() to task-switch, which really 912 * confused the old 4.x code. This is a good way to simulate 913 * preemption and MP without actually doing preemption or MP, because a 914 * lot of code assumes that wakeup() does not block. 915 */ 916 if (untimely_switch && td->td_nest_count == 0 && 917 gd->gd_intr_nesting_level == 0 918 ) { 919 crit_enter_quick(td); 920 /* 921 * YYY temporary hacks until we disassociate the userland scheduler 922 * from the LWKT scheduler. 923 */ 924 if (td->td_flags & TDF_RUNQ) { 925 lwkt_switch(); /* will not reenter yield function */ 926 } else { 927 lwkt_schedule_self(td); /* make sure we are scheduled */ 928 lwkt_switch(); /* will not reenter yield function */ 929 lwkt_deschedule_self(td); /* make sure we are descheduled */ 930 } 931 crit_exit_noyield(td); 932 } 933 } 934 935 /* 936 * This implements a normal yield which, unlike _quick, will yield to equal 937 * priority threads as well. Note that gd_reqflags tests will be handled by 938 * the crit_exit() call in lwkt_switch(). 939 * 940 * (self contained on a per cpu basis) 941 */ 942 void 943 lwkt_yield(void) 944 { 945 lwkt_schedule_self(curthread); 946 lwkt_switch(); 947 } 948 949 /* 950 * Generic schedule. Possibly schedule threads belonging to other cpus and 951 * deal with threads that might be blocked on a wait queue. 952 * 953 * We have a little helper inline function which does additional work after 954 * the thread has been enqueued, including dealing with preemption and 955 * setting need_lwkt_resched() (which prevents the kernel from returning 956 * to userland until it has processed higher priority threads). 957 */ 958 static __inline 959 void 960 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri) 961 { 962 if (ntd->td_preemptable) { 963 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 964 } else if ((ntd->td_flags & TDF_NORESCHED) == 0 && 965 (ntd->td_pri & TDPRI_MASK) > (gd->gd_curthread->td_pri & TDPRI_MASK) 966 ) { 967 need_lwkt_resched(); 968 } 969 } 970 971 void 972 lwkt_schedule(thread_t td) 973 { 974 globaldata_t mygd = mycpu; 975 976 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 977 crit_enter_gd(mygd); 978 KKASSERT(td->td_proc == NULL || (td->td_proc->p_flag & P_ONRUNQ) == 0); 979 if (td == mygd->gd_curthread) { 980 _lwkt_enqueue(td); 981 } else { 982 lwkt_wait_t w; 983 984 /* 985 * If the thread is on a wait list we have to send our scheduling 986 * request to the owner of the wait structure. Otherwise we send 987 * the scheduling request to the cpu owning the thread. Races 988 * are ok, the target will forward the message as necessary (the 989 * message may chase the thread around before it finally gets 990 * acted upon). 991 * 992 * (remember, wait structures use stable storage) 993 * 994 * NOTE: we have to account for the number of critical sections 995 * under our control when calling _lwkt_schedule_post() so it 996 * can figure out whether preemption is allowed. 997 * 998 * NOTE: The wait structure algorithms are a mess and need to be 999 * rewritten. 1000 * 1001 * NOTE: We cannot safely acquire or release a token, even 1002 * non-blocking, because this routine may be called in the context 1003 * of a thread already holding the token and thus not provide any 1004 * interlock protection. We cannot safely manipulate the td_toks 1005 * list for the same reason. Instead we depend on our critical 1006 * section if the token is owned by our cpu. 1007 */ 1008 if ((w = td->td_wait) != NULL) { 1009 if (w->wa_token.t_cpu == mygd) { 1010 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1011 --w->wa_count; 1012 td->td_wait = NULL; 1013 #ifdef SMP 1014 if (td->td_gd == mygd) { 1015 _lwkt_enqueue(td); 1016 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1017 } else { 1018 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); 1019 } 1020 #else 1021 _lwkt_enqueue(td); 1022 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1023 #endif 1024 } else { 1025 #ifdef SMP 1026 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc1_t)lwkt_schedule, td); 1027 #else 1028 panic("bad token %p", &w->wa_token); 1029 #endif 1030 } 1031 } else { 1032 /* 1033 * If the wait structure is NULL and we own the thread, there 1034 * is no race (since we are in a critical section). If we 1035 * do not own the thread there might be a race but the 1036 * target cpu will deal with it. 1037 */ 1038 #ifdef SMP 1039 if (td->td_gd == mygd) { 1040 _lwkt_enqueue(td); 1041 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1042 } else { 1043 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); 1044 } 1045 #else 1046 _lwkt_enqueue(td); 1047 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 1048 #endif 1049 } 1050 } 1051 crit_exit_gd(mygd); 1052 } 1053 1054 /* 1055 * Managed acquisition. This code assumes that the MP lock is held for 1056 * the tdallq operation and that the thread has been descheduled from its 1057 * original cpu. We also have to wait for the thread to be entirely switched 1058 * out on its original cpu (this is usually fast enough that we never loop) 1059 * since the LWKT system does not have to hold the MP lock while switching 1060 * and the target may have released it before switching. 1061 */ 1062 void 1063 lwkt_acquire(thread_t td) 1064 { 1065 globaldata_t gd; 1066 globaldata_t mygd; 1067 1068 gd = td->td_gd; 1069 mygd = mycpu; 1070 cpu_lfence(); 1071 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1072 while (td->td_flags & TDF_RUNNING) /* XXX spin */ 1073 cpu_lfence(); 1074 if (gd != mygd) { 1075 crit_enter_gd(mygd); 1076 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 1077 td->td_gd = mygd; 1078 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); /* protected by BGL */ 1079 crit_exit_gd(mygd); 1080 } 1081 } 1082 1083 /* 1084 * Generic deschedule. Descheduling threads other then your own should be 1085 * done only in carefully controlled circumstances. Descheduling is 1086 * asynchronous. 1087 * 1088 * This function may block if the cpu has run out of messages. 1089 */ 1090 void 1091 lwkt_deschedule(thread_t td) 1092 { 1093 crit_enter(); 1094 #ifdef SMP 1095 if (td == curthread) { 1096 _lwkt_dequeue(td); 1097 } else { 1098 if (td->td_gd == mycpu) { 1099 _lwkt_dequeue(td); 1100 } else { 1101 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1102 } 1103 } 1104 #else 1105 _lwkt_dequeue(td); 1106 #endif 1107 crit_exit(); 1108 } 1109 1110 /* 1111 * Set the target thread's priority. This routine does not automatically 1112 * switch to a higher priority thread, LWKT threads are not designed for 1113 * continuous priority changes. Yield if you want to switch. 1114 * 1115 * We have to retain the critical section count which uses the high bits 1116 * of the td_pri field. The specified priority may also indicate zero or 1117 * more critical sections by adding TDPRI_CRIT*N. 1118 * 1119 * Note that we requeue the thread whether it winds up on a different runq 1120 * or not. uio_yield() depends on this and the routine is not normally 1121 * called with the same priority otherwise. 1122 */ 1123 void 1124 lwkt_setpri(thread_t td, int pri) 1125 { 1126 KKASSERT(pri >= 0); 1127 KKASSERT(td->td_gd == mycpu); 1128 crit_enter(); 1129 if (td->td_flags & TDF_RUNQ) { 1130 _lwkt_dequeue(td); 1131 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1132 _lwkt_enqueue(td); 1133 } else { 1134 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1135 } 1136 crit_exit(); 1137 } 1138 1139 void 1140 lwkt_setpri_self(int pri) 1141 { 1142 thread_t td = curthread; 1143 1144 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1145 crit_enter(); 1146 if (td->td_flags & TDF_RUNQ) { 1147 _lwkt_dequeue(td); 1148 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1149 _lwkt_enqueue(td); 1150 } else { 1151 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1152 } 1153 crit_exit(); 1154 } 1155 1156 /* 1157 * Determine if there is a runnable thread at a higher priority then 1158 * the current thread. lwkt_setpri() does not check this automatically. 1159 * Return 1 if there is, 0 if there isn't. 1160 * 1161 * Example: if bit 31 of runqmask is set and the current thread is priority 1162 * 30, then we wind up checking the mask: 0x80000000 against 0x7fffffff. 1163 * 1164 * If nq reaches 31 the shift operation will overflow to 0 and we will wind 1165 * up comparing against 0xffffffff, a comparison that will always be false. 1166 */ 1167 int 1168 lwkt_checkpri_self(void) 1169 { 1170 globaldata_t gd = mycpu; 1171 thread_t td = gd->gd_curthread; 1172 int nq = td->td_pri & TDPRI_MASK; 1173 1174 while (gd->gd_runqmask > (__uint32_t)(2 << nq) - 1) { 1175 if (TAILQ_FIRST(&gd->gd_tdrunq[nq + 1])) 1176 return(1); 1177 ++nq; 1178 } 1179 return(0); 1180 } 1181 1182 /* 1183 * Migrate the current thread to the specified cpu. The BGL must be held 1184 * (for the gd_tdallq manipulation XXX). This is accomplished by 1185 * descheduling ourselves from the current cpu, moving our thread to the 1186 * tdallq of the target cpu, IPI messaging the target cpu, and switching out. 1187 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1188 */ 1189 #ifdef SMP 1190 static void lwkt_setcpu_remote(void *arg); 1191 #endif 1192 1193 void 1194 lwkt_setcpu_self(globaldata_t rgd) 1195 { 1196 #ifdef SMP 1197 thread_t td = curthread; 1198 1199 if (td->td_gd != rgd) { 1200 crit_enter_quick(td); 1201 td->td_flags |= TDF_MIGRATING; 1202 lwkt_deschedule_self(td); 1203 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); /* protected by BGL */ 1204 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); /* protected by BGL */ 1205 lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td); 1206 lwkt_switch(); 1207 /* we are now on the target cpu */ 1208 crit_exit_quick(td); 1209 } 1210 #endif 1211 } 1212 1213 /* 1214 * Remote IPI for cpu migration (called while in a critical section so we 1215 * do not have to enter another one). The thread has already been moved to 1216 * our cpu's allq, but we must wait for the thread to be completely switched 1217 * out on the originating cpu before we schedule it on ours or the stack 1218 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD 1219 * change to main memory. 1220 * 1221 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races 1222 * against wakeups. It is best if this interface is used only when there 1223 * are no pending events that might try to schedule the thread. 1224 */ 1225 #ifdef SMP 1226 static void 1227 lwkt_setcpu_remote(void *arg) 1228 { 1229 thread_t td = arg; 1230 globaldata_t gd = mycpu; 1231 1232 while (td->td_flags & TDF_RUNNING) 1233 cpu_lfence(); 1234 td->td_gd = gd; 1235 cpu_sfence(); 1236 td->td_flags &= ~TDF_MIGRATING; 1237 KKASSERT(td->td_proc == NULL || (td->td_proc->p_flag & P_ONRUNQ) == 0); 1238 _lwkt_enqueue(td); 1239 } 1240 #endif 1241 1242 struct lwp * 1243 lwkt_preempted_proc(void) 1244 { 1245 thread_t td = curthread; 1246 while (td->td_preempted) 1247 td = td->td_preempted; 1248 return(td->td_lwp); 1249 } 1250 1251 /* 1252 * Block on the specified wait queue until signaled. A generation number 1253 * must be supplied to interlock the wait queue. The function will 1254 * return immediately if the generation number does not match the wait 1255 * structure's generation number. 1256 */ 1257 void 1258 lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen) 1259 { 1260 thread_t td = curthread; 1261 lwkt_tokref ilock; 1262 1263 lwkt_gettoken(&ilock, &w->wa_token); 1264 crit_enter(); 1265 if (w->wa_gen == *gen) { 1266 _lwkt_dequeue(td); 1267 td->td_flags |= TDF_BLOCKQ; 1268 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); 1269 ++w->wa_count; 1270 td->td_wait = w; 1271 td->td_wmesg = wmesg; 1272 lwkt_switch(); 1273 KKASSERT((td->td_flags & TDF_BLOCKQ) == 0); 1274 td->td_wmesg = NULL; 1275 } 1276 crit_exit(); 1277 *gen = w->wa_gen; 1278 lwkt_reltoken(&ilock); 1279 } 1280 1281 /* 1282 * Signal a wait queue. We gain ownership of the wait queue in order to 1283 * signal it. Once a thread is removed from the wait queue we have to 1284 * deal with the cpu owning the thread. 1285 * 1286 * Note: alternatively we could message the target cpu owning the wait 1287 * queue. YYY implement as sysctl. 1288 */ 1289 void 1290 lwkt_signal(lwkt_wait_t w, int count) 1291 { 1292 thread_t td; 1293 lwkt_tokref ilock; 1294 1295 lwkt_gettoken(&ilock, &w->wa_token); 1296 ++w->wa_gen; 1297 crit_enter(); 1298 if (count < 0) 1299 count = w->wa_count; 1300 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { 1301 --count; 1302 --w->wa_count; 1303 KKASSERT(td->td_flags & TDF_BLOCKQ); 1304 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1305 td->td_flags &= ~TDF_BLOCKQ; 1306 td->td_wait = NULL; 1307 KKASSERT(td->td_proc == NULL || (td->td_proc->p_flag & P_ONRUNQ) == 0); 1308 #ifdef SMP 1309 if (td->td_gd == mycpu) { 1310 _lwkt_enqueue(td); 1311 } else { 1312 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_schedule, td); 1313 } 1314 #else 1315 _lwkt_enqueue(td); 1316 #endif 1317 } 1318 crit_exit(); 1319 lwkt_reltoken(&ilock); 1320 } 1321 1322 /* 1323 * Create a kernel process/thread/whatever. It shares it's address space 1324 * with proc0 - ie: kernel only. 1325 * 1326 * NOTE! By default new threads are created with the MP lock held. A 1327 * thread which does not require the MP lock should release it by calling 1328 * rel_mplock() at the start of the new thread. 1329 */ 1330 int 1331 lwkt_create(void (*func)(void *), void *arg, 1332 struct thread **tdp, thread_t template, int tdflags, int cpu, 1333 const char *fmt, ...) 1334 { 1335 thread_t td; 1336 __va_list ap; 1337 1338 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu); 1339 if (tdp) 1340 *tdp = td; 1341 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1342 td->td_flags |= TDF_VERBOSE | tdflags; 1343 #ifdef SMP 1344 td->td_mpcount = 1; 1345 #endif 1346 1347 /* 1348 * Set up arg0 for 'ps' etc 1349 */ 1350 __va_start(ap, fmt); 1351 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1352 __va_end(ap); 1353 1354 /* 1355 * Schedule the thread to run 1356 */ 1357 if ((td->td_flags & TDF_STOPREQ) == 0) 1358 lwkt_schedule(td); 1359 else 1360 td->td_flags &= ~TDF_STOPREQ; 1361 return 0; 1362 } 1363 1364 /* 1365 * kthread_* is specific to the kernel and is not needed by userland. 1366 */ 1367 #ifdef _KERNEL 1368 1369 /* 1370 * Destroy an LWKT thread. Warning! This function is not called when 1371 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1372 * uses a different reaping mechanism. 1373 */ 1374 void 1375 lwkt_exit(void) 1376 { 1377 thread_t td = curthread; 1378 globaldata_t gd; 1379 1380 if (td->td_flags & TDF_VERBOSE) 1381 printf("kthread %p %s has exited\n", td, td->td_comm); 1382 caps_exit(td); 1383 crit_enter_quick(td); 1384 lwkt_deschedule_self(td); 1385 gd = mycpu; 1386 KKASSERT(gd == td->td_gd); 1387 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1388 if (td->td_flags & TDF_ALLOCATED_THREAD) { 1389 ++gd->gd_tdfreecount; 1390 TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq); 1391 } 1392 cpu_thread_exit(); 1393 } 1394 1395 #endif /* _KERNEL */ 1396 1397 void 1398 crit_panic(void) 1399 { 1400 thread_t td = curthread; 1401 int lpri = td->td_pri; 1402 1403 td->td_pri = 0; 1404 panic("td_pri is/would-go negative! %p %d", td, lpri); 1405 } 1406 1407 #ifdef SMP 1408 1409 /* 1410 * Called from debugger/panic on cpus which have been stopped. We must still 1411 * process the IPIQ while stopped, even if we were stopped while in a critical 1412 * section (XXX). 1413 * 1414 * If we are dumping also try to process any pending interrupts. This may 1415 * or may not work depending on the state of the cpu at the point it was 1416 * stopped. 1417 */ 1418 void 1419 lwkt_smp_stopped(void) 1420 { 1421 globaldata_t gd = mycpu; 1422 1423 crit_enter_gd(gd); 1424 if (dumping) { 1425 lwkt_process_ipiq(); 1426 splz(); 1427 } else { 1428 lwkt_process_ipiq(); 1429 } 1430 crit_exit_gd(gd); 1431 } 1432 1433 #endif 1434