1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.58 2004/03/30 19:14:11 dillon Exp $ 27 */ 28 29 /* 30 * Each cpu in a system has its own self-contained light weight kernel 31 * thread scheduler, which means that generally speaking we only need 32 * to use a critical section to avoid problems. Foreign thread 33 * scheduling is queued via (async) IPIs. 34 */ 35 36 #ifdef _KERNEL 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/proc.h> 42 #include <sys/rtprio.h> 43 #include <sys/queue.h> 44 #include <sys/thread2.h> 45 #include <sys/sysctl.h> 46 #include <sys/kthread.h> 47 #include <machine/cpu.h> 48 #include <sys/lock.h> 49 #include <sys/caps.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_page.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_pager.h> 58 #include <vm/vm_extern.h> 59 #include <vm/vm_zone.h> 60 61 #include <machine/stdarg.h> 62 #include <machine/ipl.h> 63 #include <machine/smp.h> 64 65 #define THREAD_STACK (UPAGES * PAGE_SIZE) 66 67 #else 68 69 #include <sys/stdint.h> 70 #include <libcaps/thread.h> 71 #include <sys/thread.h> 72 #include <sys/msgport.h> 73 #include <sys/errno.h> 74 #include <libcaps/globaldata.h> 75 #include <sys/thread2.h> 76 #include <sys/msgport2.h> 77 #include <stdio.h> 78 #include <stdlib.h> 79 #include <string.h> 80 #include <machine/cpufunc.h> 81 #include <machine/lock.h> 82 83 #endif 84 85 static int untimely_switch = 0; 86 #ifdef INVARIANTS 87 static int panic_on_cscount = 0; 88 #endif 89 static __int64_t switch_count = 0; 90 static __int64_t preempt_hit = 0; 91 static __int64_t preempt_miss = 0; 92 static __int64_t preempt_weird = 0; 93 94 #ifdef _KERNEL 95 96 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 97 #ifdef INVARIANTS 98 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 99 #endif 100 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 101 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 102 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 103 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 104 105 #endif 106 107 /* 108 * These helper procedures handle the runq, they can only be called from 109 * within a critical section. 110 * 111 * WARNING! Prior to SMP being brought up it is possible to enqueue and 112 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 113 * instead of 'mycpu' when referencing the globaldata structure. Once 114 * SMP live enqueuing and dequeueing only occurs on the current cpu. 115 */ 116 static __inline 117 void 118 _lwkt_dequeue(thread_t td) 119 { 120 if (td->td_flags & TDF_RUNQ) { 121 int nq = td->td_pri & TDPRI_MASK; 122 struct globaldata *gd = td->td_gd; 123 124 td->td_flags &= ~TDF_RUNQ; 125 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 126 /* runqmask is passively cleaned up by the switcher */ 127 } 128 } 129 130 static __inline 131 void 132 _lwkt_enqueue(thread_t td) 133 { 134 if ((td->td_flags & TDF_RUNQ) == 0) { 135 int nq = td->td_pri & TDPRI_MASK; 136 struct globaldata *gd = td->td_gd; 137 138 td->td_flags |= TDF_RUNQ; 139 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 140 gd->gd_runqmask |= 1 << nq; 141 } 142 } 143 144 #ifdef _KERNEL 145 146 /* 147 * LWKTs operate on a per-cpu basis 148 * 149 * WARNING! Called from early boot, 'mycpu' may not work yet. 150 */ 151 void 152 lwkt_gdinit(struct globaldata *gd) 153 { 154 int i; 155 156 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 157 TAILQ_INIT(&gd->gd_tdrunq[i]); 158 gd->gd_runqmask = 0; 159 TAILQ_INIT(&gd->gd_tdallq); 160 } 161 162 #endif /* _KERNEL */ 163 164 /* 165 * Initialize a thread wait structure prior to first use. 166 * 167 * NOTE! called from low level boot code, we cannot do anything fancy! 168 */ 169 void 170 lwkt_wait_init(lwkt_wait_t w) 171 { 172 lwkt_token_init(&w->wa_token); 173 TAILQ_INIT(&w->wa_waitq); 174 w->wa_gen = 0; 175 w->wa_count = 0; 176 } 177 178 /* 179 * Create a new thread. The thread must be associated with a process context 180 * or LWKT start address before it can be scheduled. If the target cpu is 181 * -1 the thread will be created on the current cpu. 182 * 183 * If you intend to create a thread without a process context this function 184 * does everything except load the startup and switcher function. 185 */ 186 thread_t 187 lwkt_alloc_thread(struct thread *td, int cpu) 188 { 189 void *stack; 190 int flags = 0; 191 192 if (td == NULL) { 193 crit_enter(); 194 if (mycpu->gd_tdfreecount > 0) { 195 --mycpu->gd_tdfreecount; 196 td = TAILQ_FIRST(&mycpu->gd_tdfreeq); 197 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 198 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 199 TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq); 200 crit_exit(); 201 stack = td->td_kstack; 202 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 203 } else { 204 crit_exit(); 205 #ifdef _KERNEL 206 td = zalloc(thread_zone); 207 #else 208 td = malloc(sizeof(struct thread)); 209 #endif 210 td->td_kstack = NULL; 211 flags |= TDF_ALLOCATED_THREAD; 212 } 213 } 214 if ((stack = td->td_kstack) == NULL) { 215 #ifdef _KERNEL 216 stack = (void *)kmem_alloc(kernel_map, THREAD_STACK); 217 #else 218 stack = libcaps_alloc_stack(THREAD_STACK); 219 #endif 220 flags |= TDF_ALLOCATED_STACK; 221 } 222 if (cpu < 0) 223 lwkt_init_thread(td, stack, flags, mycpu); 224 else 225 lwkt_init_thread(td, stack, flags, globaldata_find(cpu)); 226 return(td); 227 } 228 229 #ifdef _KERNEL 230 231 /* 232 * Initialize a preexisting thread structure. This function is used by 233 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 234 * 235 * All threads start out in a critical section at a priority of 236 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 237 * appropriate. This function may send an IPI message when the 238 * requested cpu is not the current cpu and consequently gd_tdallq may 239 * not be initialized synchronously from the point of view of the originating 240 * cpu. 241 * 242 * NOTE! we have to be careful in regards to creating threads for other cpus 243 * if SMP has not yet been activated. 244 */ 245 #ifdef SMP 246 247 static void 248 lwkt_init_thread_remote(void *arg) 249 { 250 thread_t td = arg; 251 252 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 253 } 254 255 #endif 256 257 void 258 lwkt_init_thread(thread_t td, void *stack, int flags, struct globaldata *gd) 259 { 260 bzero(td, sizeof(struct thread)); 261 td->td_kstack = stack; 262 td->td_flags |= flags; 263 td->td_gd = gd; 264 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 265 lwkt_initport(&td->td_msgport, td); 266 pmap_init_thread(td); 267 #ifdef SMP 268 if (gd == mycpu) { 269 crit_enter(); 270 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 271 crit_exit(); 272 } else { 273 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 274 } 275 #else 276 crit_enter(); 277 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 278 crit_exit(); 279 #endif 280 } 281 282 #endif /* _KERNEL */ 283 284 void 285 lwkt_set_comm(thread_t td, const char *ctl, ...) 286 { 287 __va_list va; 288 289 __va_start(va, ctl); 290 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 291 __va_end(va); 292 } 293 294 void 295 lwkt_hold(thread_t td) 296 { 297 ++td->td_refs; 298 } 299 300 void 301 lwkt_rele(thread_t td) 302 { 303 KKASSERT(td->td_refs > 0); 304 --td->td_refs; 305 } 306 307 #ifdef _KERNEL 308 309 void 310 lwkt_wait_free(thread_t td) 311 { 312 while (td->td_refs) 313 tsleep(td, 0, "tdreap", hz); 314 } 315 316 #endif 317 318 void 319 lwkt_free_thread(thread_t td) 320 { 321 struct globaldata *gd = mycpu; 322 323 KASSERT((td->td_flags & TDF_RUNNING) == 0, 324 ("lwkt_free_thread: did not exit! %p", td)); 325 326 crit_enter(); 327 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 328 if (gd->gd_tdfreecount < CACHE_NTHREADS && 329 (td->td_flags & TDF_ALLOCATED_THREAD) 330 ) { 331 ++gd->gd_tdfreecount; 332 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 333 crit_exit(); 334 } else { 335 crit_exit(); 336 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 337 #ifdef _KERNEL 338 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, THREAD_STACK); 339 #else 340 libcaps_free_stack(td->td_kstack, THREAD_STACK); 341 #endif 342 /* gd invalid */ 343 td->td_kstack = NULL; 344 } 345 if (td->td_flags & TDF_ALLOCATED_THREAD) { 346 #ifdef _KERNEL 347 zfree(thread_zone, td); 348 #else 349 free(td); 350 #endif 351 } 352 } 353 } 354 355 356 /* 357 * Switch to the next runnable lwkt. If no LWKTs are runnable then 358 * switch to the idlethread. Switching must occur within a critical 359 * section to avoid races with the scheduling queue. 360 * 361 * We always have full control over our cpu's run queue. Other cpus 362 * that wish to manipulate our queue must use the cpu_*msg() calls to 363 * talk to our cpu, so a critical section is all that is needed and 364 * the result is very, very fast thread switching. 365 * 366 * The LWKT scheduler uses a fixed priority model and round-robins at 367 * each priority level. User process scheduling is a totally 368 * different beast and LWKT priorities should not be confused with 369 * user process priorities. 370 * 371 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 372 * cleans it up. Note that the td_switch() function cannot do anything that 373 * requires the MP lock since the MP lock will have already been setup for 374 * the target thread (not the current thread). It's nice to have a scheduler 375 * that does not need the MP lock to work because it allows us to do some 376 * really cool high-performance MP lock optimizations. 377 */ 378 379 void 380 lwkt_switch(void) 381 { 382 globaldata_t gd; 383 thread_t td = curthread; 384 thread_t ntd; 385 #ifdef SMP 386 int mpheld; 387 #endif 388 389 /* 390 * Switching from within a 'fast' (non thread switched) interrupt is 391 * illegal. 392 */ 393 if (mycpu->gd_intr_nesting_level && panicstr == NULL) { 394 panic("lwkt_switch: cannot switch from within a fast interrupt, yet\n"); 395 } 396 397 /* 398 * Passive release (used to transition from user to kernel mode 399 * when we block or switch rather then when we enter the kernel). 400 * This function is NOT called if we are switching into a preemption 401 * or returning from a preemption. Typically this causes us to lose 402 * our current process designation (if we have one) and become a true 403 * LWKT thread, and may also hand the current process designation to 404 * another process and schedule thread. 405 */ 406 if (td->td_release) 407 td->td_release(td); 408 409 crit_enter(); 410 ++switch_count; 411 412 #ifdef SMP 413 /* 414 * td_mpcount cannot be used to determine if we currently hold the 415 * MP lock because get_mplock() will increment it prior to attempting 416 * to get the lock, and switch out if it can't. Our ownership of 417 * the actual lock will remain stable while we are in a critical section 418 * (but, of course, another cpu may own or release the lock so the 419 * actual value of mp_lock is not stable). 420 */ 421 mpheld = MP_LOCK_HELD(); 422 #ifdef INVARIANTS 423 if (td->td_cscount) { 424 printf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 425 td); 426 if (panic_on_cscount) 427 panic("switching while mastering cpusync"); 428 } 429 #endif 430 #endif 431 if ((ntd = td->td_preempted) != NULL) { 432 /* 433 * We had preempted another thread on this cpu, resume the preempted 434 * thread. This occurs transparently, whether the preempted thread 435 * was scheduled or not (it may have been preempted after descheduling 436 * itself). 437 * 438 * We have to setup the MP lock for the original thread after backing 439 * out the adjustment that was made to curthread when the original 440 * was preempted. 441 */ 442 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 443 #ifdef SMP 444 if (ntd->td_mpcount && mpheld == 0) { 445 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d\n", 446 td, ntd, td->td_mpcount, ntd->td_mpcount); 447 } 448 if (ntd->td_mpcount) { 449 td->td_mpcount -= ntd->td_mpcount; 450 KKASSERT(td->td_mpcount >= 0); 451 } 452 #endif 453 ntd->td_flags |= TDF_PREEMPT_DONE; 454 /* YYY release mp lock on switchback if original doesn't need it */ 455 } else { 456 /* 457 * Priority queue / round-robin at each priority. Note that user 458 * processes run at a fixed, low priority and the user process 459 * scheduler deals with interactions between user processes 460 * by scheduling and descheduling them from the LWKT queue as 461 * necessary. 462 * 463 * We have to adjust the MP lock for the target thread. If we 464 * need the MP lock and cannot obtain it we try to locate a 465 * thread that does not need the MP lock. If we cannot, we spin 466 * instead of HLT. 467 * 468 * A similar issue exists for the tokens held by the target thread. 469 * If we cannot obtain ownership of the tokens we cannot immediately 470 * schedule the thread. 471 */ 472 473 /* 474 * We are switching threads. If there are any pending requests for 475 * tokens we can satisfy all of them here. 476 */ 477 gd = mycpu; 478 #ifdef SMP 479 if (gd->gd_tokreqbase) 480 lwkt_drain_token_requests(); 481 #endif 482 483 again: 484 if (gd->gd_runqmask) { 485 int nq = bsrl(gd->gd_runqmask); 486 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 487 gd->gd_runqmask &= ~(1 << nq); 488 goto again; 489 } 490 #ifdef SMP 491 /* 492 * If the target needs the MP lock and we couldn't get it, 493 * or if the target is holding tokens and we could not 494 * gain ownership of the tokens, continue looking for a 495 * thread to schedule and spin instead of HLT if we can't. 496 */ 497 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 498 (ntd->td_toks && lwkt_chktokens(ntd) == 0) 499 ) { 500 u_int32_t rqmask = gd->gd_runqmask; 501 while (rqmask) { 502 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 503 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) 504 continue; 505 mpheld = MP_LOCK_HELD(); 506 if (ntd->td_toks && !lwkt_chktokens(ntd)) 507 continue; 508 break; 509 } 510 if (ntd) 511 break; 512 rqmask &= ~(1 << nq); 513 nq = bsrl(rqmask); 514 } 515 if (ntd == NULL) { 516 ntd = &gd->gd_idlethread; 517 ntd->td_flags |= TDF_IDLE_NOHLT; 518 } else { 519 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 520 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 521 } 522 } else { 523 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 524 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 525 } 526 #else 527 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 528 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 529 #endif 530 } else { 531 /* 532 * We have nothing to run but only let the idle loop halt 533 * the cpu if there are no pending interrupts. 534 */ 535 ntd = &gd->gd_idlethread; 536 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 537 ntd->td_flags |= TDF_IDLE_NOHLT; 538 } 539 } 540 KASSERT(ntd->td_pri >= TDPRI_CRIT, 541 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 542 543 /* 544 * Do the actual switch. If the new target does not need the MP lock 545 * and we are holding it, release the MP lock. If the new target requires 546 * the MP lock we have already acquired it for the target. 547 */ 548 #ifdef SMP 549 if (ntd->td_mpcount == 0 ) { 550 if (MP_LOCK_HELD()) 551 cpu_rel_mplock(); 552 } else { 553 ASSERT_MP_LOCK_HELD(); 554 } 555 #endif 556 if (td != ntd) { 557 td->td_switch(ntd); 558 } 559 560 crit_exit(); 561 } 562 563 /* 564 * Request that the target thread preempt the current thread. Preemption 565 * only works under a specific set of conditions: 566 * 567 * - We are not preempting ourselves 568 * - The target thread is owned by the current cpu 569 * - We are not currently being preempted 570 * - The target is not currently being preempted 571 * - We are able to satisfy the target's MP lock requirements (if any). 572 * 573 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 574 * this is called via lwkt_schedule() through the td_preemptable callback. 575 * critpri is the managed critical priority that we should ignore in order 576 * to determine whether preemption is possible (aka usually just the crit 577 * priority of lwkt_schedule() itself). 578 * 579 * XXX at the moment we run the target thread in a critical section during 580 * the preemption in order to prevent the target from taking interrupts 581 * that *WE* can't. Preemption is strictly limited to interrupt threads 582 * and interrupt-like threads, outside of a critical section, and the 583 * preempted source thread will be resumed the instant the target blocks 584 * whether or not the source is scheduled (i.e. preemption is supposed to 585 * be as transparent as possible). 586 * 587 * The target thread inherits our MP count (added to its own) for the 588 * duration of the preemption in order to preserve the atomicy of the 589 * MP lock during the preemption. Therefore, any preempting targets must be 590 * careful in regards to MP assertions. Note that the MP count may be 591 * out of sync with the physical mp_lock, but we do not have to preserve 592 * the original ownership of the lock if it was out of synch (that is, we 593 * can leave it synchronized on return). 594 */ 595 void 596 lwkt_preempt(thread_t ntd, int critpri) 597 { 598 struct globaldata *gd = mycpu; 599 thread_t td; 600 #ifdef SMP 601 int mpheld; 602 int savecnt; 603 #endif 604 605 /* 606 * The caller has put us in a critical section. We can only preempt 607 * if the caller of the caller was not in a critical section (basically 608 * a local interrupt), as determined by the 'critpri' parameter. 609 * 610 * YYY The target thread must be in a critical section (else it must 611 * inherit our critical section? I dunno yet). 612 * 613 * Any tokens held by the target may not be held by thread(s) being 614 * preempted. We take the easy way out and do not preempt if 615 * the target is holding tokens. 616 * 617 * Set need_lwkt_resched() unconditionally for now YYY. 618 */ 619 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 620 621 td = gd->gd_curthread; 622 need_lwkt_resched(); 623 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 624 ++preempt_miss; 625 return; 626 } 627 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 628 ++preempt_miss; 629 return; 630 } 631 #ifdef SMP 632 if (ntd->td_gd != gd) { 633 ++preempt_miss; 634 return; 635 } 636 #endif 637 /* 638 * Take the easy way out and do not preempt if the target is holding 639 * one or more tokens. We could test whether the thread(s) being 640 * preempted interlock against the target thread's tokens and whether 641 * we can get all the target thread's tokens, but this situation 642 * should not occur very often so its easier to simply not preempt. 643 */ 644 if (ntd->td_toks != NULL) { 645 ++preempt_miss; 646 return; 647 } 648 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 649 ++preempt_weird; 650 return; 651 } 652 if (ntd->td_preempted) { 653 ++preempt_hit; 654 return; 655 } 656 #ifdef SMP 657 /* 658 * note: an interrupt might have occured just as we were transitioning 659 * to or from the MP lock. In this case td_mpcount will be pre-disposed 660 * (non-zero) but not actually synchronized with the actual state of the 661 * lock. We can use it to imply an MP lock requirement for the 662 * preemption but we cannot use it to test whether we hold the MP lock 663 * or not. 664 */ 665 savecnt = td->td_mpcount; 666 mpheld = MP_LOCK_HELD(); 667 ntd->td_mpcount += td->td_mpcount; 668 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 669 ntd->td_mpcount -= td->td_mpcount; 670 ++preempt_miss; 671 return; 672 } 673 #endif 674 675 ++preempt_hit; 676 ntd->td_preempted = td; 677 td->td_flags |= TDF_PREEMPT_LOCK; 678 td->td_switch(ntd); 679 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 680 #ifdef SMP 681 KKASSERT(savecnt == td->td_mpcount); 682 mpheld = MP_LOCK_HELD(); 683 if (mpheld && td->td_mpcount == 0) 684 cpu_rel_mplock(); 685 else if (mpheld == 0 && td->td_mpcount) 686 panic("lwkt_preempt(): MP lock was not held through"); 687 #endif 688 ntd->td_preempted = NULL; 689 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 690 } 691 692 /* 693 * Yield our thread while higher priority threads are pending. This is 694 * typically called when we leave a critical section but it can be safely 695 * called while we are in a critical section. 696 * 697 * This function will not generally yield to equal priority threads but it 698 * can occur as a side effect. Note that lwkt_switch() is called from 699 * inside the critical section to prevent its own crit_exit() from reentering 700 * lwkt_yield_quick(). 701 * 702 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 703 * came along but was blocked and made pending. 704 * 705 * (self contained on a per cpu basis) 706 */ 707 void 708 lwkt_yield_quick(void) 709 { 710 globaldata_t gd = mycpu; 711 thread_t td = gd->gd_curthread; 712 713 /* 714 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 715 * it with a non-zero cpl then we might not wind up calling splz after 716 * a task switch when the critical section is exited even though the 717 * new task could accept the interrupt. 718 * 719 * XXX from crit_exit() only called after last crit section is released. 720 * If called directly will run splz() even if in a critical section. 721 * 722 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 723 * except for this special case, we MUST call splz() here to handle any 724 * pending ints, particularly after we switch, or we might accidently 725 * halt the cpu with interrupts pending. 726 */ 727 if (gd->gd_reqflags && td->td_nest_count < 2) 728 splz(); 729 730 /* 731 * YYY enabling will cause wakeup() to task-switch, which really 732 * confused the old 4.x code. This is a good way to simulate 733 * preemption and MP without actually doing preemption or MP, because a 734 * lot of code assumes that wakeup() does not block. 735 */ 736 if (untimely_switch && td->td_nest_count == 0 && 737 gd->gd_intr_nesting_level == 0 738 ) { 739 crit_enter(); 740 /* 741 * YYY temporary hacks until we disassociate the userland scheduler 742 * from the LWKT scheduler. 743 */ 744 if (td->td_flags & TDF_RUNQ) { 745 lwkt_switch(); /* will not reenter yield function */ 746 } else { 747 lwkt_schedule_self(); /* make sure we are scheduled */ 748 lwkt_switch(); /* will not reenter yield function */ 749 lwkt_deschedule_self(); /* make sure we are descheduled */ 750 } 751 crit_exit_noyield(td); 752 } 753 } 754 755 /* 756 * This implements a normal yield which, unlike _quick, will yield to equal 757 * priority threads as well. Note that gd_reqflags tests will be handled by 758 * the crit_exit() call in lwkt_switch(). 759 * 760 * (self contained on a per cpu basis) 761 */ 762 void 763 lwkt_yield(void) 764 { 765 lwkt_schedule_self(); 766 lwkt_switch(); 767 } 768 769 /* 770 * Schedule a thread to run. As the current thread we can always safely 771 * schedule ourselves, and a shortcut procedure is provided for that 772 * function. 773 * 774 * (non-blocking, self contained on a per cpu basis) 775 */ 776 void 777 lwkt_schedule_self(void) 778 { 779 thread_t td = curthread; 780 781 crit_enter_quick(td); 782 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 783 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 784 _lwkt_enqueue(td); 785 #ifdef _KERNEL 786 if (td->td_proc && td->td_proc->p_stat == SSLEEP) 787 panic("SCHED SELF PANIC"); 788 #endif 789 crit_exit_quick(td); 790 } 791 792 /* 793 * Generic schedule. Possibly schedule threads belonging to other cpus and 794 * deal with threads that might be blocked on a wait queue. 795 * 796 * We have a little helper inline function which does additional work after 797 * the thread has been enqueued, including dealing with preemption and 798 * setting need_lwkt_resched() (which prevents the kernel from returning 799 * to userland until it has processed higher priority threads). 800 */ 801 static __inline 802 void 803 _lwkt_schedule_post(thread_t ntd, int cpri) 804 { 805 if (ntd->td_preemptable) { 806 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 807 } else { 808 if ((ntd->td_flags & TDF_NORESCHED) == 0) { 809 if ((ntd->td_pri & TDPRI_MASK) >= TDPRI_KERN_USER) 810 need_lwkt_resched(); 811 } 812 } 813 } 814 815 void 816 lwkt_schedule(thread_t td) 817 { 818 #ifdef INVARIANTS 819 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 820 if ((td->td_flags & TDF_PREEMPT_LOCK) == 0 && td->td_proc 821 && td->td_proc->p_stat == SSLEEP 822 ) { 823 printf("PANIC schedule curtd = %p (%d %d) target %p (%d %d)\n", 824 curthread, 825 curthread->td_proc ? curthread->td_proc->p_pid : -1, 826 curthread->td_proc ? curthread->td_proc->p_stat : -1, 827 td, 828 td->td_proc ? curthread->td_proc->p_pid : -1, 829 td->td_proc ? curthread->td_proc->p_stat : -1 830 ); 831 panic("SCHED PANIC"); 832 } 833 #endif 834 crit_enter(); 835 if (td == curthread) { 836 _lwkt_enqueue(td); 837 } else { 838 lwkt_wait_t w; 839 840 /* 841 * If the thread is on a wait list we have to send our scheduling 842 * request to the owner of the wait structure. Otherwise we send 843 * the scheduling request to the cpu owning the thread. Races 844 * are ok, the target will forward the message as necessary (the 845 * message may chase the thread around before it finally gets 846 * acted upon). 847 * 848 * (remember, wait structures use stable storage) 849 * 850 * NOTE: tokens no longer enter a critical section, so we only need 851 * to account for the crit_enter() above when calling 852 * _lwkt_schedule_post(). 853 */ 854 if ((w = td->td_wait) != NULL) { 855 lwkt_tokref wref; 856 857 if (lwkt_trytoken(&wref, &w->wa_token)) { 858 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 859 --w->wa_count; 860 td->td_wait = NULL; 861 #ifdef SMP 862 if (td->td_gd == mycpu) { 863 _lwkt_enqueue(td); 864 _lwkt_schedule_post(td, TDPRI_CRIT); 865 } else { 866 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 867 } 868 #else 869 _lwkt_enqueue(td); 870 _lwkt_schedule_post(td, TDPRI_CRIT); 871 #endif 872 lwkt_reltoken(&wref); 873 } else { 874 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc_t)lwkt_schedule, td); 875 } 876 } else { 877 /* 878 * If the wait structure is NULL and we own the thread, there 879 * is no race (since we are in a critical section). If we 880 * do not own the thread there might be a race but the 881 * target cpu will deal with it. 882 */ 883 #ifdef SMP 884 if (td->td_gd == mycpu) { 885 _lwkt_enqueue(td); 886 _lwkt_schedule_post(td, TDPRI_CRIT); 887 } else { 888 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 889 } 890 #else 891 _lwkt_enqueue(td); 892 _lwkt_schedule_post(td, TDPRI_CRIT); 893 #endif 894 } 895 } 896 crit_exit(); 897 } 898 899 /* 900 * Managed acquisition. This code assumes that the MP lock is held for 901 * the tdallq operation and that the thread has been descheduled from its 902 * original cpu. We also have to wait for the thread to be entirely switched 903 * out on its original cpu (this is usually fast enough that we never loop) 904 * since the LWKT system does not have to hold the MP lock while switching 905 * and the target may have released it before switching. 906 */ 907 void 908 lwkt_acquire(thread_t td) 909 { 910 struct globaldata *gd; 911 912 gd = td->td_gd; 913 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 914 while (td->td_flags & TDF_RUNNING) /* XXX spin */ 915 ; 916 if (gd != mycpu) { 917 crit_enter(); 918 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 919 gd = mycpu; 920 td->td_gd = gd; 921 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 922 crit_exit(); 923 } 924 } 925 926 /* 927 * Deschedule a thread. 928 * 929 * (non-blocking, self contained on a per cpu basis) 930 */ 931 void 932 lwkt_deschedule_self(void) 933 { 934 thread_t td = curthread; 935 936 crit_enter(); 937 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 938 _lwkt_dequeue(td); 939 crit_exit(); 940 } 941 942 /* 943 * Generic deschedule. Descheduling threads other then your own should be 944 * done only in carefully controlled circumstances. Descheduling is 945 * asynchronous. 946 * 947 * This function may block if the cpu has run out of messages. 948 */ 949 void 950 lwkt_deschedule(thread_t td) 951 { 952 crit_enter(); 953 if (td == curthread) { 954 _lwkt_dequeue(td); 955 } else { 956 if (td->td_gd == mycpu) { 957 _lwkt_dequeue(td); 958 } else { 959 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_deschedule, td); 960 } 961 } 962 crit_exit(); 963 } 964 965 /* 966 * Set the target thread's priority. This routine does not automatically 967 * switch to a higher priority thread, LWKT threads are not designed for 968 * continuous priority changes. Yield if you want to switch. 969 * 970 * We have to retain the critical section count which uses the high bits 971 * of the td_pri field. The specified priority may also indicate zero or 972 * more critical sections by adding TDPRI_CRIT*N. 973 * 974 * Note that we requeue the thread whether it winds up on a different runq 975 * or not. uio_yield() depends on this and the routine is not normally 976 * called with the same priority otherwise. 977 */ 978 void 979 lwkt_setpri(thread_t td, int pri) 980 { 981 KKASSERT(pri >= 0); 982 KKASSERT(td->td_gd == mycpu); 983 crit_enter(); 984 if (td->td_flags & TDF_RUNQ) { 985 _lwkt_dequeue(td); 986 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 987 _lwkt_enqueue(td); 988 } else { 989 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 990 } 991 crit_exit(); 992 } 993 994 void 995 lwkt_setpri_self(int pri) 996 { 997 thread_t td = curthread; 998 999 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1000 crit_enter(); 1001 if (td->td_flags & TDF_RUNQ) { 1002 _lwkt_dequeue(td); 1003 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1004 _lwkt_enqueue(td); 1005 } else { 1006 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1007 } 1008 crit_exit(); 1009 } 1010 1011 struct proc * 1012 lwkt_preempted_proc(void) 1013 { 1014 thread_t td = curthread; 1015 while (td->td_preempted) 1016 td = td->td_preempted; 1017 return(td->td_proc); 1018 } 1019 1020 /* 1021 * Block on the specified wait queue until signaled. A generation number 1022 * must be supplied to interlock the wait queue. The function will 1023 * return immediately if the generation number does not match the wait 1024 * structure's generation number. 1025 */ 1026 void 1027 lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen) 1028 { 1029 thread_t td = curthread; 1030 lwkt_tokref ilock; 1031 1032 lwkt_gettoken(&ilock, &w->wa_token); 1033 crit_enter(); 1034 if (w->wa_gen == *gen) { 1035 _lwkt_dequeue(td); 1036 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); 1037 ++w->wa_count; 1038 td->td_wait = w; 1039 td->td_wmesg = wmesg; 1040 again: 1041 lwkt_switch(); 1042 if (td->td_wmesg != NULL) { 1043 _lwkt_dequeue(td); 1044 goto again; 1045 } 1046 } 1047 crit_exit(); 1048 *gen = w->wa_gen; 1049 lwkt_reltoken(&ilock); 1050 } 1051 1052 /* 1053 * Signal a wait queue. We gain ownership of the wait queue in order to 1054 * signal it. Once a thread is removed from the wait queue we have to 1055 * deal with the cpu owning the thread. 1056 * 1057 * Note: alternatively we could message the target cpu owning the wait 1058 * queue. YYY implement as sysctl. 1059 */ 1060 void 1061 lwkt_signal(lwkt_wait_t w, int count) 1062 { 1063 thread_t td; 1064 lwkt_tokref ilock; 1065 1066 lwkt_gettoken(&ilock, &w->wa_token); 1067 ++w->wa_gen; 1068 crit_enter(); 1069 if (count < 0) 1070 count = w->wa_count; 1071 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { 1072 --count; 1073 --w->wa_count; 1074 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1075 td->td_wait = NULL; 1076 td->td_wmesg = NULL; 1077 if (td->td_gd == mycpu) { 1078 _lwkt_enqueue(td); 1079 } else { 1080 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 1081 } 1082 } 1083 crit_exit(); 1084 lwkt_reltoken(&ilock); 1085 } 1086 1087 /* 1088 * Create a kernel process/thread/whatever. It shares it's address space 1089 * with proc0 - ie: kernel only. 1090 * 1091 * NOTE! By default new threads are created with the MP lock held. A 1092 * thread which does not require the MP lock should release it by calling 1093 * rel_mplock() at the start of the new thread. 1094 */ 1095 int 1096 lwkt_create(void (*func)(void *), void *arg, 1097 struct thread **tdp, thread_t template, int tdflags, int cpu, 1098 const char *fmt, ...) 1099 { 1100 thread_t td; 1101 __va_list ap; 1102 1103 td = lwkt_alloc_thread(template, cpu); 1104 if (tdp) 1105 *tdp = td; 1106 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1107 td->td_flags |= TDF_VERBOSE | tdflags; 1108 #ifdef SMP 1109 td->td_mpcount = 1; 1110 #endif 1111 1112 /* 1113 * Set up arg0 for 'ps' etc 1114 */ 1115 __va_start(ap, fmt); 1116 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1117 __va_end(ap); 1118 1119 /* 1120 * Schedule the thread to run 1121 */ 1122 if ((td->td_flags & TDF_STOPREQ) == 0) 1123 lwkt_schedule(td); 1124 else 1125 td->td_flags &= ~TDF_STOPREQ; 1126 return 0; 1127 } 1128 1129 /* 1130 * kthread_* is specific to the kernel and is not needed by userland. 1131 */ 1132 #ifdef _KERNEL 1133 1134 /* 1135 * Destroy an LWKT thread. Warning! This function is not called when 1136 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1137 * uses a different reaping mechanism. 1138 */ 1139 void 1140 lwkt_exit(void) 1141 { 1142 thread_t td = curthread; 1143 1144 if (td->td_flags & TDF_VERBOSE) 1145 printf("kthread %p %s has exited\n", td, td->td_comm); 1146 caps_exit(td); 1147 crit_enter(); 1148 lwkt_deschedule_self(); 1149 ++mycpu->gd_tdfreecount; 1150 TAILQ_INSERT_TAIL(&mycpu->gd_tdfreeq, td, td_threadq); 1151 cpu_thread_exit(); 1152 } 1153 1154 /* 1155 * Create a kernel process/thread/whatever. It shares it's address space 1156 * with proc0 - ie: kernel only. 5.x compatible. 1157 * 1158 * NOTE! By default kthreads are created with the MP lock held. A 1159 * thread which does not require the MP lock should release it by calling 1160 * rel_mplock() at the start of the new thread. 1161 */ 1162 int 1163 kthread_create(void (*func)(void *), void *arg, 1164 struct thread **tdp, const char *fmt, ...) 1165 { 1166 thread_t td; 1167 __va_list ap; 1168 1169 td = lwkt_alloc_thread(NULL, -1); 1170 if (tdp) 1171 *tdp = td; 1172 cpu_set_thread_handler(td, kthread_exit, func, arg); 1173 td->td_flags |= TDF_VERBOSE; 1174 #ifdef SMP 1175 td->td_mpcount = 1; 1176 #endif 1177 1178 /* 1179 * Set up arg0 for 'ps' etc 1180 */ 1181 __va_start(ap, fmt); 1182 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1183 __va_end(ap); 1184 1185 /* 1186 * Schedule the thread to run 1187 */ 1188 lwkt_schedule(td); 1189 return 0; 1190 } 1191 1192 /* 1193 * Destroy an LWKT thread. Warning! This function is not called when 1194 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1195 * uses a different reaping mechanism. 1196 * 1197 * XXX duplicates lwkt_exit() 1198 */ 1199 void 1200 kthread_exit(void) 1201 { 1202 lwkt_exit(); 1203 } 1204 1205 #endif /* _KERNEL */ 1206 1207 void 1208 crit_panic(void) 1209 { 1210 thread_t td = curthread; 1211 int lpri = td->td_pri; 1212 1213 td->td_pri = 0; 1214 panic("td_pri is/would-go negative! %p %d", td, lpri); 1215 } 1216 1217