1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.59 2004/04/10 20:55:23 dillon Exp $ 27 */ 28 29 /* 30 * Each cpu in a system has its own self-contained light weight kernel 31 * thread scheduler, which means that generally speaking we only need 32 * to use a critical section to avoid problems. Foreign thread 33 * scheduling is queued via (async) IPIs. 34 */ 35 36 #ifdef _KERNEL 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/proc.h> 42 #include <sys/rtprio.h> 43 #include <sys/queue.h> 44 #include <sys/thread2.h> 45 #include <sys/sysctl.h> 46 #include <sys/kthread.h> 47 #include <machine/cpu.h> 48 #include <sys/lock.h> 49 #include <sys/caps.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_page.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_pager.h> 58 #include <vm/vm_extern.h> 59 #include <vm/vm_zone.h> 60 61 #include <machine/stdarg.h> 62 #include <machine/ipl.h> 63 #include <machine/smp.h> 64 65 #define THREAD_STACK (UPAGES * PAGE_SIZE) 66 67 #else 68 69 #include <sys/stdint.h> 70 #include <libcaps/thread.h> 71 #include <sys/thread.h> 72 #include <sys/msgport.h> 73 #include <sys/errno.h> 74 #include <libcaps/globaldata.h> 75 #include <sys/thread2.h> 76 #include <sys/msgport2.h> 77 #include <stdio.h> 78 #include <stdlib.h> 79 #include <string.h> 80 #include <machine/cpufunc.h> 81 #include <machine/lock.h> 82 83 #endif 84 85 static int untimely_switch = 0; 86 #ifdef INVARIANTS 87 static int panic_on_cscount = 0; 88 #endif 89 static __int64_t switch_count = 0; 90 static __int64_t preempt_hit = 0; 91 static __int64_t preempt_miss = 0; 92 static __int64_t preempt_weird = 0; 93 94 #ifdef _KERNEL 95 96 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 97 #ifdef INVARIANTS 98 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 99 #endif 100 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 101 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 102 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 103 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 104 105 #endif 106 107 /* 108 * These helper procedures handle the runq, they can only be called from 109 * within a critical section. 110 * 111 * WARNING! Prior to SMP being brought up it is possible to enqueue and 112 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 113 * instead of 'mycpu' when referencing the globaldata structure. Once 114 * SMP live enqueuing and dequeueing only occurs on the current cpu. 115 */ 116 static __inline 117 void 118 _lwkt_dequeue(thread_t td) 119 { 120 if (td->td_flags & TDF_RUNQ) { 121 int nq = td->td_pri & TDPRI_MASK; 122 struct globaldata *gd = td->td_gd; 123 124 td->td_flags &= ~TDF_RUNQ; 125 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 126 /* runqmask is passively cleaned up by the switcher */ 127 } 128 } 129 130 static __inline 131 void 132 _lwkt_enqueue(thread_t td) 133 { 134 if ((td->td_flags & TDF_RUNQ) == 0) { 135 int nq = td->td_pri & TDPRI_MASK; 136 struct globaldata *gd = td->td_gd; 137 138 td->td_flags |= TDF_RUNQ; 139 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 140 gd->gd_runqmask |= 1 << nq; 141 } 142 } 143 144 /* 145 * Schedule a thread to run. As the current thread we can always safely 146 * schedule ourselves, and a shortcut procedure is provided for that 147 * function. 148 * 149 * (non-blocking, self contained on a per cpu basis) 150 */ 151 void 152 lwkt_schedule_self(thread_t td) 153 { 154 crit_enter_quick(td); 155 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 156 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 157 _lwkt_enqueue(td); 158 #ifdef _KERNEL 159 if (td->td_proc && td->td_proc->p_stat == SSLEEP) 160 panic("SCHED SELF PANIC"); 161 #endif 162 crit_exit_quick(td); 163 } 164 165 /* 166 * Deschedule a thread. 167 * 168 * (non-blocking, self contained on a per cpu basis) 169 */ 170 void 171 lwkt_deschedule_self(thread_t td) 172 { 173 crit_enter_quick(td); 174 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 175 _lwkt_dequeue(td); 176 crit_exit_quick(td); 177 } 178 179 #ifdef _KERNEL 180 181 /* 182 * LWKTs operate on a per-cpu basis 183 * 184 * WARNING! Called from early boot, 'mycpu' may not work yet. 185 */ 186 void 187 lwkt_gdinit(struct globaldata *gd) 188 { 189 int i; 190 191 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 192 TAILQ_INIT(&gd->gd_tdrunq[i]); 193 gd->gd_runqmask = 0; 194 TAILQ_INIT(&gd->gd_tdallq); 195 } 196 197 #endif /* _KERNEL */ 198 199 /* 200 * Initialize a thread wait structure prior to first use. 201 * 202 * NOTE! called from low level boot code, we cannot do anything fancy! 203 */ 204 void 205 lwkt_wait_init(lwkt_wait_t w) 206 { 207 lwkt_token_init(&w->wa_token); 208 TAILQ_INIT(&w->wa_waitq); 209 w->wa_gen = 0; 210 w->wa_count = 0; 211 } 212 213 /* 214 * Create a new thread. The thread must be associated with a process context 215 * or LWKT start address before it can be scheduled. If the target cpu is 216 * -1 the thread will be created on the current cpu. 217 * 218 * If you intend to create a thread without a process context this function 219 * does everything except load the startup and switcher function. 220 */ 221 thread_t 222 lwkt_alloc_thread(struct thread *td, int cpu) 223 { 224 void *stack; 225 int flags = 0; 226 globaldata_t gd = mycpu; 227 228 if (td == NULL) { 229 crit_enter_gd(gd); 230 if (gd->gd_tdfreecount > 0) { 231 --gd->gd_tdfreecount; 232 td = TAILQ_FIRST(&gd->gd_tdfreeq); 233 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 234 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 235 TAILQ_REMOVE(&gd->gd_tdfreeq, td, td_threadq); 236 crit_exit_gd(gd); 237 stack = td->td_kstack; 238 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 239 } else { 240 crit_exit_gd(gd); 241 #ifdef _KERNEL 242 td = zalloc(thread_zone); 243 #else 244 td = malloc(sizeof(struct thread)); 245 #endif 246 td->td_kstack = NULL; 247 flags |= TDF_ALLOCATED_THREAD; 248 } 249 } 250 if ((stack = td->td_kstack) == NULL) { 251 #ifdef _KERNEL 252 stack = (void *)kmem_alloc(kernel_map, THREAD_STACK); 253 #else 254 stack = libcaps_alloc_stack(THREAD_STACK); 255 #endif 256 flags |= TDF_ALLOCATED_STACK; 257 } 258 if (cpu < 0) 259 lwkt_init_thread(td, stack, flags, mycpu); 260 else 261 lwkt_init_thread(td, stack, flags, globaldata_find(cpu)); 262 return(td); 263 } 264 265 #ifdef _KERNEL 266 267 /* 268 * Initialize a preexisting thread structure. This function is used by 269 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 270 * 271 * All threads start out in a critical section at a priority of 272 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 273 * appropriate. This function may send an IPI message when the 274 * requested cpu is not the current cpu and consequently gd_tdallq may 275 * not be initialized synchronously from the point of view of the originating 276 * cpu. 277 * 278 * NOTE! we have to be careful in regards to creating threads for other cpus 279 * if SMP has not yet been activated. 280 */ 281 #ifdef SMP 282 283 static void 284 lwkt_init_thread_remote(void *arg) 285 { 286 thread_t td = arg; 287 288 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 289 } 290 291 #endif 292 293 void 294 lwkt_init_thread(thread_t td, void *stack, int flags, struct globaldata *gd) 295 { 296 globaldata_t mygd = mycpu; 297 298 bzero(td, sizeof(struct thread)); 299 td->td_kstack = stack; 300 td->td_flags |= flags; 301 td->td_gd = gd; 302 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 303 lwkt_initport(&td->td_msgport, td); 304 pmap_init_thread(td); 305 #ifdef SMP 306 if (gd == mygd) { 307 crit_enter_gd(mygd); 308 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 309 crit_exit_gd(mygd); 310 } else { 311 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 312 } 313 #else 314 crit_enter_gd(mygd); 315 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 316 crit_exit_gd(mygd); 317 #endif 318 } 319 320 #endif /* _KERNEL */ 321 322 void 323 lwkt_set_comm(thread_t td, const char *ctl, ...) 324 { 325 __va_list va; 326 327 __va_start(va, ctl); 328 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 329 __va_end(va); 330 } 331 332 void 333 lwkt_hold(thread_t td) 334 { 335 ++td->td_refs; 336 } 337 338 void 339 lwkt_rele(thread_t td) 340 { 341 KKASSERT(td->td_refs > 0); 342 --td->td_refs; 343 } 344 345 #ifdef _KERNEL 346 347 void 348 lwkt_wait_free(thread_t td) 349 { 350 while (td->td_refs) 351 tsleep(td, 0, "tdreap", hz); 352 } 353 354 #endif 355 356 void 357 lwkt_free_thread(thread_t td) 358 { 359 struct globaldata *gd = mycpu; 360 361 KASSERT((td->td_flags & TDF_RUNNING) == 0, 362 ("lwkt_free_thread: did not exit! %p", td)); 363 364 crit_enter_gd(gd); 365 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 366 if (gd->gd_tdfreecount < CACHE_NTHREADS && 367 (td->td_flags & TDF_ALLOCATED_THREAD) 368 ) { 369 ++gd->gd_tdfreecount; 370 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 371 crit_exit_gd(gd); 372 } else { 373 crit_exit_gd(gd); 374 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 375 #ifdef _KERNEL 376 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, THREAD_STACK); 377 #else 378 libcaps_free_stack(td->td_kstack, THREAD_STACK); 379 #endif 380 /* gd invalid */ 381 td->td_kstack = NULL; 382 } 383 if (td->td_flags & TDF_ALLOCATED_THREAD) { 384 #ifdef _KERNEL 385 zfree(thread_zone, td); 386 #else 387 free(td); 388 #endif 389 } 390 } 391 } 392 393 394 /* 395 * Switch to the next runnable lwkt. If no LWKTs are runnable then 396 * switch to the idlethread. Switching must occur within a critical 397 * section to avoid races with the scheduling queue. 398 * 399 * We always have full control over our cpu's run queue. Other cpus 400 * that wish to manipulate our queue must use the cpu_*msg() calls to 401 * talk to our cpu, so a critical section is all that is needed and 402 * the result is very, very fast thread switching. 403 * 404 * The LWKT scheduler uses a fixed priority model and round-robins at 405 * each priority level. User process scheduling is a totally 406 * different beast and LWKT priorities should not be confused with 407 * user process priorities. 408 * 409 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 410 * cleans it up. Note that the td_switch() function cannot do anything that 411 * requires the MP lock since the MP lock will have already been setup for 412 * the target thread (not the current thread). It's nice to have a scheduler 413 * that does not need the MP lock to work because it allows us to do some 414 * really cool high-performance MP lock optimizations. 415 */ 416 417 void 418 lwkt_switch(void) 419 { 420 globaldata_t gd = mycpu; 421 thread_t td = gd->gd_curthread; 422 thread_t ntd; 423 #ifdef SMP 424 int mpheld; 425 #endif 426 427 /* 428 * Switching from within a 'fast' (non thread switched) interrupt is 429 * illegal. 430 */ 431 if (gd->gd_intr_nesting_level && panicstr == NULL) { 432 panic("lwkt_switch: cannot switch from within a fast interrupt, yet\n"); 433 } 434 435 /* 436 * Passive release (used to transition from user to kernel mode 437 * when we block or switch rather then when we enter the kernel). 438 * This function is NOT called if we are switching into a preemption 439 * or returning from a preemption. Typically this causes us to lose 440 * our current process designation (if we have one) and become a true 441 * LWKT thread, and may also hand the current process designation to 442 * another process and schedule thread. 443 */ 444 if (td->td_release) 445 td->td_release(td); 446 447 crit_enter_gd(gd); 448 ++switch_count; 449 450 #ifdef SMP 451 /* 452 * td_mpcount cannot be used to determine if we currently hold the 453 * MP lock because get_mplock() will increment it prior to attempting 454 * to get the lock, and switch out if it can't. Our ownership of 455 * the actual lock will remain stable while we are in a critical section 456 * (but, of course, another cpu may own or release the lock so the 457 * actual value of mp_lock is not stable). 458 */ 459 mpheld = MP_LOCK_HELD(); 460 #ifdef INVARIANTS 461 if (td->td_cscount) { 462 printf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 463 td); 464 if (panic_on_cscount) 465 panic("switching while mastering cpusync"); 466 } 467 #endif 468 #endif 469 if ((ntd = td->td_preempted) != NULL) { 470 /* 471 * We had preempted another thread on this cpu, resume the preempted 472 * thread. This occurs transparently, whether the preempted thread 473 * was scheduled or not (it may have been preempted after descheduling 474 * itself). 475 * 476 * We have to setup the MP lock for the original thread after backing 477 * out the adjustment that was made to curthread when the original 478 * was preempted. 479 */ 480 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 481 #ifdef SMP 482 if (ntd->td_mpcount && mpheld == 0) { 483 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d\n", 484 td, ntd, td->td_mpcount, ntd->td_mpcount); 485 } 486 if (ntd->td_mpcount) { 487 td->td_mpcount -= ntd->td_mpcount; 488 KKASSERT(td->td_mpcount >= 0); 489 } 490 #endif 491 ntd->td_flags |= TDF_PREEMPT_DONE; 492 /* YYY release mp lock on switchback if original doesn't need it */ 493 } else { 494 /* 495 * Priority queue / round-robin at each priority. Note that user 496 * processes run at a fixed, low priority and the user process 497 * scheduler deals with interactions between user processes 498 * by scheduling and descheduling them from the LWKT queue as 499 * necessary. 500 * 501 * We have to adjust the MP lock for the target thread. If we 502 * need the MP lock and cannot obtain it we try to locate a 503 * thread that does not need the MP lock. If we cannot, we spin 504 * instead of HLT. 505 * 506 * A similar issue exists for the tokens held by the target thread. 507 * If we cannot obtain ownership of the tokens we cannot immediately 508 * schedule the thread. 509 */ 510 511 /* 512 * We are switching threads. If there are any pending requests for 513 * tokens we can satisfy all of them here. 514 */ 515 #ifdef SMP 516 if (gd->gd_tokreqbase) 517 lwkt_drain_token_requests(); 518 #endif 519 520 again: 521 if (gd->gd_runqmask) { 522 int nq = bsrl(gd->gd_runqmask); 523 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 524 gd->gd_runqmask &= ~(1 << nq); 525 goto again; 526 } 527 #ifdef SMP 528 /* 529 * If the target needs the MP lock and we couldn't get it, 530 * or if the target is holding tokens and we could not 531 * gain ownership of the tokens, continue looking for a 532 * thread to schedule and spin instead of HLT if we can't. 533 */ 534 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 535 (ntd->td_toks && lwkt_chktokens(ntd) == 0) 536 ) { 537 u_int32_t rqmask = gd->gd_runqmask; 538 while (rqmask) { 539 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 540 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) 541 continue; 542 mpheld = MP_LOCK_HELD(); 543 if (ntd->td_toks && !lwkt_chktokens(ntd)) 544 continue; 545 break; 546 } 547 if (ntd) 548 break; 549 rqmask &= ~(1 << nq); 550 nq = bsrl(rqmask); 551 } 552 if (ntd == NULL) { 553 ntd = &gd->gd_idlethread; 554 ntd->td_flags |= TDF_IDLE_NOHLT; 555 } else { 556 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 557 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 558 } 559 } else { 560 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 561 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 562 } 563 #else 564 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 565 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 566 #endif 567 } else { 568 /* 569 * We have nothing to run but only let the idle loop halt 570 * the cpu if there are no pending interrupts. 571 */ 572 ntd = &gd->gd_idlethread; 573 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 574 ntd->td_flags |= TDF_IDLE_NOHLT; 575 } 576 } 577 KASSERT(ntd->td_pri >= TDPRI_CRIT, 578 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 579 580 /* 581 * Do the actual switch. If the new target does not need the MP lock 582 * and we are holding it, release the MP lock. If the new target requires 583 * the MP lock we have already acquired it for the target. 584 */ 585 #ifdef SMP 586 if (ntd->td_mpcount == 0 ) { 587 if (MP_LOCK_HELD()) 588 cpu_rel_mplock(); 589 } else { 590 ASSERT_MP_LOCK_HELD(); 591 } 592 #endif 593 if (td != ntd) 594 td->td_switch(ntd); 595 /* NOTE: current cpu may have changed after switch */ 596 crit_exit_quick(td); 597 } 598 599 /* 600 * Request that the target thread preempt the current thread. Preemption 601 * only works under a specific set of conditions: 602 * 603 * - We are not preempting ourselves 604 * - The target thread is owned by the current cpu 605 * - We are not currently being preempted 606 * - The target is not currently being preempted 607 * - We are able to satisfy the target's MP lock requirements (if any). 608 * 609 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 610 * this is called via lwkt_schedule() through the td_preemptable callback. 611 * critpri is the managed critical priority that we should ignore in order 612 * to determine whether preemption is possible (aka usually just the crit 613 * priority of lwkt_schedule() itself). 614 * 615 * XXX at the moment we run the target thread in a critical section during 616 * the preemption in order to prevent the target from taking interrupts 617 * that *WE* can't. Preemption is strictly limited to interrupt threads 618 * and interrupt-like threads, outside of a critical section, and the 619 * preempted source thread will be resumed the instant the target blocks 620 * whether or not the source is scheduled (i.e. preemption is supposed to 621 * be as transparent as possible). 622 * 623 * The target thread inherits our MP count (added to its own) for the 624 * duration of the preemption in order to preserve the atomicy of the 625 * MP lock during the preemption. Therefore, any preempting targets must be 626 * careful in regards to MP assertions. Note that the MP count may be 627 * out of sync with the physical mp_lock, but we do not have to preserve 628 * the original ownership of the lock if it was out of synch (that is, we 629 * can leave it synchronized on return). 630 */ 631 void 632 lwkt_preempt(thread_t ntd, int critpri) 633 { 634 struct globaldata *gd = mycpu; 635 thread_t td; 636 #ifdef SMP 637 int mpheld; 638 int savecnt; 639 #endif 640 641 /* 642 * The caller has put us in a critical section. We can only preempt 643 * if the caller of the caller was not in a critical section (basically 644 * a local interrupt), as determined by the 'critpri' parameter. 645 * 646 * YYY The target thread must be in a critical section (else it must 647 * inherit our critical section? I dunno yet). 648 * 649 * Any tokens held by the target may not be held by thread(s) being 650 * preempted. We take the easy way out and do not preempt if 651 * the target is holding tokens. 652 * 653 * Set need_lwkt_resched() unconditionally for now YYY. 654 */ 655 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 656 657 td = gd->gd_curthread; 658 need_lwkt_resched(); 659 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 660 ++preempt_miss; 661 return; 662 } 663 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 664 ++preempt_miss; 665 return; 666 } 667 #ifdef SMP 668 if (ntd->td_gd != gd) { 669 ++preempt_miss; 670 return; 671 } 672 #endif 673 /* 674 * Take the easy way out and do not preempt if the target is holding 675 * one or more tokens. We could test whether the thread(s) being 676 * preempted interlock against the target thread's tokens and whether 677 * we can get all the target thread's tokens, but this situation 678 * should not occur very often so its easier to simply not preempt. 679 */ 680 if (ntd->td_toks != NULL) { 681 ++preempt_miss; 682 return; 683 } 684 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 685 ++preempt_weird; 686 return; 687 } 688 if (ntd->td_preempted) { 689 ++preempt_hit; 690 return; 691 } 692 #ifdef SMP 693 /* 694 * note: an interrupt might have occured just as we were transitioning 695 * to or from the MP lock. In this case td_mpcount will be pre-disposed 696 * (non-zero) but not actually synchronized with the actual state of the 697 * lock. We can use it to imply an MP lock requirement for the 698 * preemption but we cannot use it to test whether we hold the MP lock 699 * or not. 700 */ 701 savecnt = td->td_mpcount; 702 mpheld = MP_LOCK_HELD(); 703 ntd->td_mpcount += td->td_mpcount; 704 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 705 ntd->td_mpcount -= td->td_mpcount; 706 ++preempt_miss; 707 return; 708 } 709 #endif 710 711 ++preempt_hit; 712 ntd->td_preempted = td; 713 td->td_flags |= TDF_PREEMPT_LOCK; 714 td->td_switch(ntd); 715 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 716 #ifdef SMP 717 KKASSERT(savecnt == td->td_mpcount); 718 mpheld = MP_LOCK_HELD(); 719 if (mpheld && td->td_mpcount == 0) 720 cpu_rel_mplock(); 721 else if (mpheld == 0 && td->td_mpcount) 722 panic("lwkt_preempt(): MP lock was not held through"); 723 #endif 724 ntd->td_preempted = NULL; 725 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 726 } 727 728 /* 729 * Yield our thread while higher priority threads are pending. This is 730 * typically called when we leave a critical section but it can be safely 731 * called while we are in a critical section. 732 * 733 * This function will not generally yield to equal priority threads but it 734 * can occur as a side effect. Note that lwkt_switch() is called from 735 * inside the critical section to prevent its own crit_exit() from reentering 736 * lwkt_yield_quick(). 737 * 738 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 739 * came along but was blocked and made pending. 740 * 741 * (self contained on a per cpu basis) 742 */ 743 void 744 lwkt_yield_quick(void) 745 { 746 globaldata_t gd = mycpu; 747 thread_t td = gd->gd_curthread; 748 749 /* 750 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 751 * it with a non-zero cpl then we might not wind up calling splz after 752 * a task switch when the critical section is exited even though the 753 * new task could accept the interrupt. 754 * 755 * XXX from crit_exit() only called after last crit section is released. 756 * If called directly will run splz() even if in a critical section. 757 * 758 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 759 * except for this special case, we MUST call splz() here to handle any 760 * pending ints, particularly after we switch, or we might accidently 761 * halt the cpu with interrupts pending. 762 */ 763 if (gd->gd_reqflags && td->td_nest_count < 2) 764 splz(); 765 766 /* 767 * YYY enabling will cause wakeup() to task-switch, which really 768 * confused the old 4.x code. This is a good way to simulate 769 * preemption and MP without actually doing preemption or MP, because a 770 * lot of code assumes that wakeup() does not block. 771 */ 772 if (untimely_switch && td->td_nest_count == 0 && 773 gd->gd_intr_nesting_level == 0 774 ) { 775 crit_enter_quick(td); 776 /* 777 * YYY temporary hacks until we disassociate the userland scheduler 778 * from the LWKT scheduler. 779 */ 780 if (td->td_flags & TDF_RUNQ) { 781 lwkt_switch(); /* will not reenter yield function */ 782 } else { 783 lwkt_schedule_self(td); /* make sure we are scheduled */ 784 lwkt_switch(); /* will not reenter yield function */ 785 lwkt_deschedule_self(td); /* make sure we are descheduled */ 786 } 787 crit_exit_noyield(td); 788 } 789 } 790 791 /* 792 * This implements a normal yield which, unlike _quick, will yield to equal 793 * priority threads as well. Note that gd_reqflags tests will be handled by 794 * the crit_exit() call in lwkt_switch(). 795 * 796 * (self contained on a per cpu basis) 797 */ 798 void 799 lwkt_yield(void) 800 { 801 lwkt_schedule_self(curthread); 802 lwkt_switch(); 803 } 804 805 /* 806 * Generic schedule. Possibly schedule threads belonging to other cpus and 807 * deal with threads that might be blocked on a wait queue. 808 * 809 * We have a little helper inline function which does additional work after 810 * the thread has been enqueued, including dealing with preemption and 811 * setting need_lwkt_resched() (which prevents the kernel from returning 812 * to userland until it has processed higher priority threads). 813 */ 814 static __inline 815 void 816 _lwkt_schedule_post(thread_t ntd, int cpri) 817 { 818 if (ntd->td_preemptable) { 819 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 820 } else { 821 if ((ntd->td_flags & TDF_NORESCHED) == 0) { 822 if ((ntd->td_pri & TDPRI_MASK) >= TDPRI_KERN_USER) 823 need_lwkt_resched(); 824 } 825 } 826 } 827 828 void 829 lwkt_schedule(thread_t td) 830 { 831 globaldata_t mygd = mycpu; 832 833 #ifdef INVARIANTS 834 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 835 if ((td->td_flags & TDF_PREEMPT_LOCK) == 0 && td->td_proc 836 && td->td_proc->p_stat == SSLEEP 837 ) { 838 printf("PANIC schedule curtd = %p (%d %d) target %p (%d %d)\n", 839 curthread, 840 curthread->td_proc ? curthread->td_proc->p_pid : -1, 841 curthread->td_proc ? curthread->td_proc->p_stat : -1, 842 td, 843 td->td_proc ? curthread->td_proc->p_pid : -1, 844 td->td_proc ? curthread->td_proc->p_stat : -1 845 ); 846 panic("SCHED PANIC"); 847 } 848 #endif 849 crit_enter_gd(mygd); 850 if (td == mygd->gd_curthread) { 851 _lwkt_enqueue(td); 852 } else { 853 lwkt_wait_t w; 854 855 /* 856 * If the thread is on a wait list we have to send our scheduling 857 * request to the owner of the wait structure. Otherwise we send 858 * the scheduling request to the cpu owning the thread. Races 859 * are ok, the target will forward the message as necessary (the 860 * message may chase the thread around before it finally gets 861 * acted upon). 862 * 863 * (remember, wait structures use stable storage) 864 * 865 * NOTE: tokens no longer enter a critical section, so we only need 866 * to account for the crit_enter() above when calling 867 * _lwkt_schedule_post(). 868 */ 869 if ((w = td->td_wait) != NULL) { 870 lwkt_tokref wref; 871 872 if (lwkt_trytoken(&wref, &w->wa_token)) { 873 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 874 --w->wa_count; 875 td->td_wait = NULL; 876 #ifdef SMP 877 if (td->td_gd == mycpu) { 878 _lwkt_enqueue(td); 879 _lwkt_schedule_post(td, TDPRI_CRIT); 880 } else { 881 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 882 } 883 #else 884 _lwkt_enqueue(td); 885 _lwkt_schedule_post(td, TDPRI_CRIT); 886 #endif 887 lwkt_reltoken(&wref); 888 } else { 889 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc_t)lwkt_schedule, td); 890 } 891 } else { 892 /* 893 * If the wait structure is NULL and we own the thread, there 894 * is no race (since we are in a critical section). If we 895 * do not own the thread there might be a race but the 896 * target cpu will deal with it. 897 */ 898 #ifdef SMP 899 if (td->td_gd == mygd) { 900 _lwkt_enqueue(td); 901 _lwkt_schedule_post(td, TDPRI_CRIT); 902 } else { 903 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 904 } 905 #else 906 _lwkt_enqueue(td); 907 _lwkt_schedule_post(td, TDPRI_CRIT); 908 #endif 909 } 910 } 911 crit_exit_gd(mygd); 912 } 913 914 /* 915 * Managed acquisition. This code assumes that the MP lock is held for 916 * the tdallq operation and that the thread has been descheduled from its 917 * original cpu. We also have to wait for the thread to be entirely switched 918 * out on its original cpu (this is usually fast enough that we never loop) 919 * since the LWKT system does not have to hold the MP lock while switching 920 * and the target may have released it before switching. 921 */ 922 void 923 lwkt_acquire(thread_t td) 924 { 925 globaldata_t gd; 926 globaldata_t mygd; 927 928 gd = td->td_gd; 929 mygd = mycpu; 930 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 931 while (td->td_flags & TDF_RUNNING) /* XXX spin */ 932 ; 933 if (gd != mygd) { 934 crit_enter_gd(mygd); 935 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 936 td->td_gd = mygd; 937 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); /* protected by BGL */ 938 crit_exit_gd(mygd); 939 } 940 } 941 942 /* 943 * Generic deschedule. Descheduling threads other then your own should be 944 * done only in carefully controlled circumstances. Descheduling is 945 * asynchronous. 946 * 947 * This function may block if the cpu has run out of messages. 948 */ 949 void 950 lwkt_deschedule(thread_t td) 951 { 952 crit_enter(); 953 if (td == curthread) { 954 _lwkt_dequeue(td); 955 } else { 956 if (td->td_gd == mycpu) { 957 _lwkt_dequeue(td); 958 } else { 959 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_deschedule, td); 960 } 961 } 962 crit_exit(); 963 } 964 965 /* 966 * Set the target thread's priority. This routine does not automatically 967 * switch to a higher priority thread, LWKT threads are not designed for 968 * continuous priority changes. Yield if you want to switch. 969 * 970 * We have to retain the critical section count which uses the high bits 971 * of the td_pri field. The specified priority may also indicate zero or 972 * more critical sections by adding TDPRI_CRIT*N. 973 * 974 * Note that we requeue the thread whether it winds up on a different runq 975 * or not. uio_yield() depends on this and the routine is not normally 976 * called with the same priority otherwise. 977 */ 978 void 979 lwkt_setpri(thread_t td, int pri) 980 { 981 KKASSERT(pri >= 0); 982 KKASSERT(td->td_gd == mycpu); 983 crit_enter(); 984 if (td->td_flags & TDF_RUNQ) { 985 _lwkt_dequeue(td); 986 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 987 _lwkt_enqueue(td); 988 } else { 989 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 990 } 991 crit_exit(); 992 } 993 994 void 995 lwkt_setpri_self(int pri) 996 { 997 thread_t td = curthread; 998 999 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1000 crit_enter(); 1001 if (td->td_flags & TDF_RUNQ) { 1002 _lwkt_dequeue(td); 1003 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1004 _lwkt_enqueue(td); 1005 } else { 1006 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1007 } 1008 crit_exit(); 1009 } 1010 1011 struct proc * 1012 lwkt_preempted_proc(void) 1013 { 1014 thread_t td = curthread; 1015 while (td->td_preempted) 1016 td = td->td_preempted; 1017 return(td->td_proc); 1018 } 1019 1020 /* 1021 * Block on the specified wait queue until signaled. A generation number 1022 * must be supplied to interlock the wait queue. The function will 1023 * return immediately if the generation number does not match the wait 1024 * structure's generation number. 1025 */ 1026 void 1027 lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen) 1028 { 1029 thread_t td = curthread; 1030 lwkt_tokref ilock; 1031 1032 lwkt_gettoken(&ilock, &w->wa_token); 1033 crit_enter(); 1034 if (w->wa_gen == *gen) { 1035 _lwkt_dequeue(td); 1036 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); 1037 ++w->wa_count; 1038 td->td_wait = w; 1039 td->td_wmesg = wmesg; 1040 again: 1041 lwkt_switch(); 1042 if (td->td_wmesg != NULL) { 1043 _lwkt_dequeue(td); 1044 goto again; 1045 } 1046 } 1047 crit_exit(); 1048 *gen = w->wa_gen; 1049 lwkt_reltoken(&ilock); 1050 } 1051 1052 /* 1053 * Signal a wait queue. We gain ownership of the wait queue in order to 1054 * signal it. Once a thread is removed from the wait queue we have to 1055 * deal with the cpu owning the thread. 1056 * 1057 * Note: alternatively we could message the target cpu owning the wait 1058 * queue. YYY implement as sysctl. 1059 */ 1060 void 1061 lwkt_signal(lwkt_wait_t w, int count) 1062 { 1063 thread_t td; 1064 lwkt_tokref ilock; 1065 1066 lwkt_gettoken(&ilock, &w->wa_token); 1067 ++w->wa_gen; 1068 crit_enter(); 1069 if (count < 0) 1070 count = w->wa_count; 1071 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { 1072 --count; 1073 --w->wa_count; 1074 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1075 td->td_wait = NULL; 1076 td->td_wmesg = NULL; 1077 if (td->td_gd == mycpu) { 1078 _lwkt_enqueue(td); 1079 } else { 1080 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 1081 } 1082 } 1083 crit_exit(); 1084 lwkt_reltoken(&ilock); 1085 } 1086 1087 /* 1088 * Create a kernel process/thread/whatever. It shares it's address space 1089 * with proc0 - ie: kernel only. 1090 * 1091 * NOTE! By default new threads are created with the MP lock held. A 1092 * thread which does not require the MP lock should release it by calling 1093 * rel_mplock() at the start of the new thread. 1094 */ 1095 int 1096 lwkt_create(void (*func)(void *), void *arg, 1097 struct thread **tdp, thread_t template, int tdflags, int cpu, 1098 const char *fmt, ...) 1099 { 1100 thread_t td; 1101 __va_list ap; 1102 1103 td = lwkt_alloc_thread(template, cpu); 1104 if (tdp) 1105 *tdp = td; 1106 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1107 td->td_flags |= TDF_VERBOSE | tdflags; 1108 #ifdef SMP 1109 td->td_mpcount = 1; 1110 #endif 1111 1112 /* 1113 * Set up arg0 for 'ps' etc 1114 */ 1115 __va_start(ap, fmt); 1116 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1117 __va_end(ap); 1118 1119 /* 1120 * Schedule the thread to run 1121 */ 1122 if ((td->td_flags & TDF_STOPREQ) == 0) 1123 lwkt_schedule(td); 1124 else 1125 td->td_flags &= ~TDF_STOPREQ; 1126 return 0; 1127 } 1128 1129 /* 1130 * kthread_* is specific to the kernel and is not needed by userland. 1131 */ 1132 #ifdef _KERNEL 1133 1134 /* 1135 * Destroy an LWKT thread. Warning! This function is not called when 1136 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1137 * uses a different reaping mechanism. 1138 */ 1139 void 1140 lwkt_exit(void) 1141 { 1142 thread_t td = curthread; 1143 1144 if (td->td_flags & TDF_VERBOSE) 1145 printf("kthread %p %s has exited\n", td, td->td_comm); 1146 caps_exit(td); 1147 crit_enter_quick(td); 1148 lwkt_deschedule_self(td); 1149 ++mycpu->gd_tdfreecount; 1150 TAILQ_INSERT_TAIL(&mycpu->gd_tdfreeq, td, td_threadq); 1151 cpu_thread_exit(); 1152 } 1153 1154 /* 1155 * Create a kernel process/thread/whatever. It shares it's address space 1156 * with proc0 - ie: kernel only. 5.x compatible. 1157 * 1158 * NOTE! By default kthreads are created with the MP lock held. A 1159 * thread which does not require the MP lock should release it by calling 1160 * rel_mplock() at the start of the new thread. 1161 */ 1162 int 1163 kthread_create(void (*func)(void *), void *arg, 1164 struct thread **tdp, const char *fmt, ...) 1165 { 1166 thread_t td; 1167 __va_list ap; 1168 1169 td = lwkt_alloc_thread(NULL, -1); 1170 if (tdp) 1171 *tdp = td; 1172 cpu_set_thread_handler(td, kthread_exit, func, arg); 1173 td->td_flags |= TDF_VERBOSE; 1174 #ifdef SMP 1175 td->td_mpcount = 1; 1176 #endif 1177 1178 /* 1179 * Set up arg0 for 'ps' etc 1180 */ 1181 __va_start(ap, fmt); 1182 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1183 __va_end(ap); 1184 1185 /* 1186 * Schedule the thread to run 1187 */ 1188 lwkt_schedule(td); 1189 return 0; 1190 } 1191 1192 /* 1193 * Destroy an LWKT thread. Warning! This function is not called when 1194 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1195 * uses a different reaping mechanism. 1196 * 1197 * XXX duplicates lwkt_exit() 1198 */ 1199 void 1200 kthread_exit(void) 1201 { 1202 lwkt_exit(); 1203 } 1204 1205 #endif /* _KERNEL */ 1206 1207 void 1208 crit_panic(void) 1209 { 1210 thread_t td = curthread; 1211 int lpri = td->td_pri; 1212 1213 td->td_pri = 0; 1214 panic("td_pri is/would-go negative! %p %d", td, lpri); 1215 } 1216 1217