1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 35 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vmmeter.h> 47 #include <sys/sysctl.h> 48 #include <sys/lock.h> 49 #include <sys/uio.h> 50 #include <sys/kcollect.h> 51 #ifdef KTRACE 52 #include <sys/ktrace.h> 53 #endif 54 #include <sys/ktr.h> 55 #include <sys/serialize.h> 56 57 #include <sys/signal2.h> 58 #include <sys/thread2.h> 59 #include <sys/spinlock2.h> 60 #include <sys/mutex2.h> 61 62 #include <machine/cpu.h> 63 #include <machine/smp.h> 64 65 TAILQ_HEAD(tslpque, thread); 66 67 static void sched_setup (void *dummy); 68 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL); 69 static void sched_dyninit (void *dummy); 70 SYSINIT(sched_dyninit, SI_BOOT1_DYNALLOC, SI_ORDER_FIRST, sched_dyninit, NULL); 71 72 int lbolt; 73 void *lbolt_syncer; 74 int ncpus; 75 int ncpus2, ncpus2_shift, ncpus2_mask; /* note: mask not cpumask_t */ 76 int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */ 77 int safepri; 78 int tsleep_now_works; 79 int tsleep_crypto_dump = 0; 80 81 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues"); 82 83 #define __DEALL(ident) __DEQUALIFY(void *, ident) 84 85 #if !defined(KTR_TSLEEP) 86 #define KTR_TSLEEP KTR_ALL 87 #endif 88 KTR_INFO_MASTER(tsleep); 89 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident); 90 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit"); 91 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident); 92 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit"); 93 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident); 94 95 #define logtsleep1(name) KTR_LOG(tsleep_ ## name) 96 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val) 97 98 struct loadavg averunnable = 99 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 100 /* 101 * Constants for averages over 1, 5, and 15 minutes 102 * when sampling at 5 second intervals. 103 */ 104 static fixpt_t cexp[3] = { 105 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 106 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 107 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 108 }; 109 110 static void endtsleep (void *); 111 static void loadav (void *arg); 112 static void schedcpu (void *arg); 113 114 static int pctcpu_decay = 10; 115 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, &pctcpu_decay, 0, ""); 116 117 /* 118 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale 119 */ 120 int fscale __unused = FSCALE; /* exported to systat */ 121 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 122 123 /* 124 * Recompute process priorities, once a second. 125 * 126 * Since the userland schedulers are typically event oriented, if the 127 * estcpu calculation at wakeup() time is not sufficient to make a 128 * process runnable relative to other processes in the system we have 129 * a 1-second recalc to help out. 130 * 131 * This code also allows us to store sysclock_t data in the process structure 132 * without fear of an overrun, since sysclock_t are guarenteed to hold 133 * several seconds worth of count. 134 * 135 * WARNING! callouts can preempt normal threads. However, they will not 136 * preempt a thread holding a spinlock so we *can* safely use spinlocks. 137 */ 138 static int schedcpu_stats(struct proc *p, void *data __unused); 139 static int schedcpu_resource(struct proc *p, void *data __unused); 140 141 static void 142 schedcpu(void *arg) 143 { 144 allproc_scan(schedcpu_stats, NULL, 1); 145 allproc_scan(schedcpu_resource, NULL, 1); 146 if (mycpu->gd_cpuid == 0) { 147 wakeup((caddr_t)&lbolt); 148 wakeup(lbolt_syncer); 149 } 150 callout_reset(&mycpu->gd_schedcpu_callout, hz, schedcpu, NULL); 151 } 152 153 /* 154 * General process statistics once a second 155 */ 156 static int 157 schedcpu_stats(struct proc *p, void *data __unused) 158 { 159 struct lwp *lp; 160 161 /* 162 * Threads may not be completely set up if process in SIDL state. 163 */ 164 if (p->p_stat == SIDL) 165 return(0); 166 167 PHOLD(p); 168 if (lwkt_trytoken(&p->p_token) == FALSE) { 169 PRELE(p); 170 return(0); 171 } 172 173 p->p_swtime++; 174 FOREACH_LWP_IN_PROC(lp, p) { 175 if (lp->lwp_stat == LSSLEEP) { 176 ++lp->lwp_slptime; 177 if (lp->lwp_slptime == 1) 178 p->p_usched->uload_update(lp); 179 } 180 181 /* 182 * Only recalculate processes that are active or have slept 183 * less then 2 seconds. The schedulers understand this. 184 * Otherwise decay by 50% per second. 185 */ 186 if (lp->lwp_slptime <= 1) { 187 p->p_usched->recalculate(lp); 188 } else { 189 int decay; 190 191 decay = pctcpu_decay; 192 cpu_ccfence(); 193 if (decay <= 1) 194 decay = 1; 195 if (decay > 100) 196 decay = 100; 197 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay; 198 } 199 } 200 lwkt_reltoken(&p->p_token); 201 lwkt_yield(); 202 PRELE(p); 203 return(0); 204 } 205 206 /* 207 * Resource checks. XXX break out since ksignal/killproc can block, 208 * limiting us to one process killed per second. There is probably 209 * a better way. 210 */ 211 static int 212 schedcpu_resource(struct proc *p, void *data __unused) 213 { 214 u_int64_t ttime; 215 struct lwp *lp; 216 217 if (p->p_stat == SIDL) 218 return(0); 219 220 PHOLD(p); 221 if (lwkt_trytoken(&p->p_token) == FALSE) { 222 PRELE(p); 223 return(0); 224 } 225 226 if (p->p_stat == SZOMB || p->p_limit == NULL) { 227 lwkt_reltoken(&p->p_token); 228 PRELE(p); 229 return(0); 230 } 231 232 ttime = 0; 233 FOREACH_LWP_IN_PROC(lp, p) { 234 /* 235 * We may have caught an lp in the middle of being 236 * created, lwp_thread can be NULL. 237 */ 238 if (lp->lwp_thread) { 239 ttime += lp->lwp_thread->td_sticks; 240 ttime += lp->lwp_thread->td_uticks; 241 } 242 } 243 244 switch(plimit_testcpulimit(p->p_limit, ttime)) { 245 case PLIMIT_TESTCPU_KILL: 246 killproc(p, "exceeded maximum CPU limit"); 247 break; 248 case PLIMIT_TESTCPU_XCPU: 249 if ((p->p_flags & P_XCPU) == 0) { 250 p->p_flags |= P_XCPU; 251 ksignal(p, SIGXCPU); 252 } 253 break; 254 default: 255 break; 256 } 257 lwkt_reltoken(&p->p_token); 258 lwkt_yield(); 259 PRELE(p); 260 return(0); 261 } 262 263 /* 264 * This is only used by ps. Generate a cpu percentage use over 265 * a period of one second. 266 */ 267 void 268 updatepcpu(struct lwp *lp, int cpticks, int ttlticks) 269 { 270 fixpt_t acc; 271 int remticks; 272 273 acc = (cpticks << FSHIFT) / ttlticks; 274 if (ttlticks >= ESTCPUFREQ) { 275 lp->lwp_pctcpu = acc; 276 } else { 277 remticks = ESTCPUFREQ - ttlticks; 278 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) / 279 ESTCPUFREQ; 280 } 281 } 282 283 /* 284 * Handy macros to calculate hash indices. LOOKUP() calculates the 285 * global cpumask hash index, TCHASHSHIFT() converts that into the 286 * pcpu hash index. 287 * 288 * By making the pcpu hash arrays smaller we save a significant amount 289 * of memory at very low cost. The real cost is in IPIs, which are handled 290 * by the much larger global cpumask hash table. 291 */ 292 #define LOOKUP_PRIME 66555444443333333ULL 293 #define LOOKUP(x) ((((uintptr_t)(x) + ((uintptr_t)(x) >> 18)) ^ \ 294 LOOKUP_PRIME) % slpque_tablesize) 295 #define TCHASHSHIFT(x) ((x) >> 4) 296 297 static uint32_t slpque_tablesize; 298 static cpumask_t *slpque_cpumasks; 299 300 SYSCTL_UINT(_kern, OID_AUTO, slpque_tablesize, CTLFLAG_RD, &slpque_tablesize, 301 0, ""); 302 303 /* 304 * This is a dandy function that allows us to interlock tsleep/wakeup 305 * operations with unspecified upper level locks, such as lockmgr locks, 306 * simply by holding a critical section. The sequence is: 307 * 308 * (acquire upper level lock) 309 * tsleep_interlock(blah) 310 * (release upper level lock) 311 * tsleep(blah, ...) 312 * 313 * Basically this functions queues us on the tsleep queue without actually 314 * descheduling us. When tsleep() is later called with PINTERLOCK it 315 * assumes the thread was already queued, otherwise it queues it there. 316 * 317 * Thus it is possible to receive the wakeup prior to going to sleep and 318 * the race conditions are covered. 319 */ 320 static __inline void 321 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags) 322 { 323 thread_t td = gd->gd_curthread; 324 uint32_t cid; 325 uint32_t gid; 326 327 crit_enter_quick(td); 328 if (td->td_flags & TDF_TSLEEPQ) { 329 cid = LOOKUP(td->td_wchan); 330 gid = TCHASHSHIFT(cid); 331 TAILQ_REMOVE(&gd->gd_tsleep_hash[gid], td, td_sleepq); 332 if (TAILQ_FIRST(&gd->gd_tsleep_hash[gid]) == NULL) { 333 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 334 gd->gd_cpuid); 335 } 336 } else { 337 td->td_flags |= TDF_TSLEEPQ; 338 } 339 cid = LOOKUP(ident); 340 gid = TCHASHSHIFT(cid); 341 TAILQ_INSERT_TAIL(&gd->gd_tsleep_hash[gid], td, td_sleepq); 342 ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[cid], gd->gd_cpuid); 343 td->td_wchan = ident; 344 td->td_wdomain = flags & PDOMAIN_MASK; 345 crit_exit_quick(td); 346 } 347 348 void 349 tsleep_interlock(const volatile void *ident, int flags) 350 { 351 _tsleep_interlock(mycpu, ident, flags); 352 } 353 354 /* 355 * Remove thread from sleepq. Must be called with a critical section held. 356 * The thread must not be migrating. 357 */ 358 static __inline void 359 _tsleep_remove(thread_t td) 360 { 361 globaldata_t gd = mycpu; 362 uint32_t cid; 363 uint32_t gid; 364 365 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td)); 366 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 367 if (td->td_flags & TDF_TSLEEPQ) { 368 td->td_flags &= ~TDF_TSLEEPQ; 369 cid = LOOKUP(td->td_wchan); 370 gid = TCHASHSHIFT(cid); 371 TAILQ_REMOVE(&gd->gd_tsleep_hash[gid], td, td_sleepq); 372 if (TAILQ_FIRST(&gd->gd_tsleep_hash[gid]) == NULL) { 373 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 374 gd->gd_cpuid); 375 } 376 td->td_wchan = NULL; 377 td->td_wdomain = 0; 378 } 379 } 380 381 void 382 tsleep_remove(thread_t td) 383 { 384 _tsleep_remove(td); 385 } 386 387 /* 388 * General sleep call. Suspends the current process until a wakeup is 389 * performed on the specified identifier. The process will then be made 390 * runnable with the specified priority. Sleeps at most timo/hz seconds 391 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 392 * before and after sleeping, else signals are not checked. Returns 0 if 393 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 394 * signal needs to be delivered, ERESTART is returned if the current system 395 * call should be restarted if possible, and EINTR is returned if the system 396 * call should be interrupted by the signal (return EINTR). 397 * 398 * Note that if we are a process, we release_curproc() before messing with 399 * the LWKT scheduler. 400 * 401 * During autoconfiguration or after a panic, a sleep will simply 402 * lower the priority briefly to allow interrupts, then return. 403 * 404 * WARNING! This code can't block (short of switching away), or bad things 405 * will happen. No getting tokens, no blocking locks, etc. 406 */ 407 int 408 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) 409 { 410 struct thread *td = curthread; 411 struct lwp *lp = td->td_lwp; 412 struct proc *p = td->td_proc; /* may be NULL */ 413 globaldata_t gd; 414 int sig; 415 int catch; 416 int error; 417 int oldpri; 418 struct callout thandle; 419 420 /* 421 * Currently a severe hack. Make sure any delayed wakeups 422 * are flushed before we sleep or we might deadlock on whatever 423 * event we are sleeping on. 424 */ 425 if (td->td_flags & TDF_DELAYED_WAKEUP) 426 wakeup_end_delayed(); 427 428 /* 429 * NOTE: removed KTRPOINT, it could cause races due to blocking 430 * even in stable. Just scrap it for now. 431 */ 432 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) { 433 /* 434 * After a panic, or before we actually have an operational 435 * softclock, just give interrupts a chance, then just return; 436 * 437 * don't run any other procs or panic below, 438 * in case this is the idle process and already asleep. 439 */ 440 splz(); 441 oldpri = td->td_pri; 442 lwkt_setpri_self(safepri); 443 lwkt_switch(); 444 lwkt_setpri_self(oldpri); 445 return (0); 446 } 447 logtsleep2(tsleep_beg, ident); 448 gd = td->td_gd; 449 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */ 450 td->td_wakefromcpu = -1; /* overwritten by _wakeup */ 451 452 /* 453 * NOTE: all of this occurs on the current cpu, including any 454 * callout-based wakeups, so a critical section is a sufficient 455 * interlock. 456 * 457 * The entire sequence through to where we actually sleep must 458 * run without breaking the critical section. 459 */ 460 catch = flags & PCATCH; 461 error = 0; 462 sig = 0; 463 464 crit_enter_quick(td); 465 466 KASSERT(ident != NULL, ("tsleep: no ident")); 467 KASSERT(lp == NULL || 468 lp->lwp_stat == LSRUN || /* Obvious */ 469 lp->lwp_stat == LSSTOP, /* Set in tstop */ 470 ("tsleep %p %s %d", 471 ident, wmesg, lp->lwp_stat)); 472 473 /* 474 * We interlock the sleep queue if the caller has not already done 475 * it for us. This must be done before we potentially acquire any 476 * tokens or we can loose the wakeup. 477 */ 478 if ((flags & PINTERLOCKED) == 0) { 479 _tsleep_interlock(gd, ident, flags); 480 } 481 482 /* 483 * Setup for the current process (if this is a process). We must 484 * interlock with lwp_token to avoid remote wakeup races via 485 * setrunnable() 486 */ 487 if (lp) { 488 lwkt_gettoken(&lp->lwp_token); 489 490 /* 491 * If the umbrella process is in the SCORE state then 492 * make sure that the thread is flagged going into a 493 * normal sleep to allow the core dump to proceed, otherwise 494 * the coredump can end up waiting forever. If the normal 495 * sleep is woken up, the thread will enter a stopped state 496 * upon return to userland. 497 * 498 * We do not want to interrupt or cause a thread exist at 499 * this juncture because that will mess-up the state the 500 * coredump is trying to save. 501 */ 502 if (p->p_stat == SCORE && 503 (lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 504 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 505 ++p->p_nstopped; 506 } 507 508 /* 509 * PCATCH requested. 510 */ 511 if (catch) { 512 /* 513 * Early termination if PCATCH was set and a 514 * signal is pending, interlocked with the 515 * critical section. 516 * 517 * Early termination only occurs when tsleep() is 518 * entered while in a normal LSRUN state. 519 */ 520 if ((sig = CURSIG(lp)) != 0) 521 goto resume; 522 523 /* 524 * Causes ksignal to wake us up if a signal is 525 * received (interlocked with lp->lwp_token). 526 */ 527 lp->lwp_flags |= LWP_SINTR; 528 } 529 } else { 530 KKASSERT(p == NULL); 531 } 532 533 /* 534 * Make sure the current process has been untangled from 535 * the userland scheduler and initialize slptime to start 536 * counting. 537 * 538 * NOTE: td->td_wakefromcpu is pre-set by the release function 539 * for the dfly scheduler, and then adjusted by _wakeup() 540 */ 541 if (lp) { 542 p->p_usched->release_curproc(lp); 543 lp->lwp_slptime = 0; 544 } 545 546 /* 547 * If the interlocked flag is set but our cpu bit in the slpqueue 548 * is no longer set, then a wakeup was processed inbetween the 549 * tsleep_interlock() (ours or the callers), and here. This can 550 * occur under numerous circumstances including when we release the 551 * current process. 552 * 553 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s) 554 * to process incoming IPIs, thus draining incoming wakeups. 555 */ 556 if ((td->td_flags & TDF_TSLEEPQ) == 0) { 557 logtsleep2(ilockfail, ident); 558 goto resume; 559 } 560 561 /* 562 * scheduling is blocked while in a critical section. Coincide 563 * the descheduled-by-tsleep flag with the descheduling of the 564 * lwkt. 565 * 566 * The timer callout is localized on our cpu and interlocked by 567 * our critical section. 568 */ 569 lwkt_deschedule_self(td); 570 td->td_flags |= TDF_TSLEEP_DESCHEDULED; 571 td->td_wmesg = wmesg; 572 573 /* 574 * Setup the timeout, if any. The timeout is only operable while 575 * the thread is flagged descheduled. 576 */ 577 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0); 578 if (timo) { 579 callout_init_mp(&thandle); 580 callout_reset(&thandle, timo, endtsleep, td); 581 } 582 583 /* 584 * Beddy bye bye. 585 */ 586 if (lp) { 587 /* 588 * Ok, we are sleeping. Place us in the SSLEEP state. 589 */ 590 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 591 592 /* 593 * tstop() sets LSSTOP, so don't fiddle with that. 594 */ 595 if (lp->lwp_stat != LSSTOP) 596 lp->lwp_stat = LSSLEEP; 597 lp->lwp_ru.ru_nvcsw++; 598 p->p_usched->uload_update(lp); 599 lwkt_switch(); 600 601 /* 602 * And when we are woken up, put us back in LSRUN. If we 603 * slept for over a second, recalculate our estcpu. 604 */ 605 lp->lwp_stat = LSRUN; 606 if (lp->lwp_slptime) { 607 p->p_usched->uload_update(lp); 608 p->p_usched->recalculate(lp); 609 } 610 lp->lwp_slptime = 0; 611 } else { 612 lwkt_switch(); 613 } 614 615 /* 616 * Make sure we haven't switched cpus while we were asleep. It's 617 * not supposed to happen. Cleanup our temporary flags. 618 */ 619 KKASSERT(gd == td->td_gd); 620 621 /* 622 * Cleanup the timeout. If the timeout has already occured thandle 623 * has already been stopped, otherwise stop thandle. If the timeout 624 * is running (the callout thread must be blocked trying to get 625 * lwp_token) then wait for us to get scheduled. 626 */ 627 if (timo) { 628 while (td->td_flags & TDF_TIMEOUT_RUNNING) { 629 /* else we won't get rescheduled! */ 630 if (lp->lwp_stat != LSSTOP) 631 lp->lwp_stat = LSSLEEP; 632 lwkt_deschedule_self(td); 633 td->td_wmesg = "tsrace"; 634 lwkt_switch(); 635 kprintf("td %p %s: timeout race\n", td, td->td_comm); 636 } 637 if (td->td_flags & TDF_TIMEOUT) { 638 td->td_flags &= ~TDF_TIMEOUT; 639 error = EWOULDBLOCK; 640 } else { 641 /* does not block when on same cpu */ 642 callout_stop(&thandle); 643 } 644 } 645 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; 646 647 /* 648 * Make sure we have been removed from the sleepq. In most 649 * cases this will have been done for us already but it is 650 * possible for a scheduling IPI to be in-flight from a 651 * previous tsleep/tsleep_interlock() or due to a straight-out 652 * call to lwkt_schedule() (in the case of an interrupt thread), 653 * causing a spurious wakeup. 654 */ 655 _tsleep_remove(td); 656 td->td_wmesg = NULL; 657 658 /* 659 * Figure out the correct error return. If interrupted by a 660 * signal we want to return EINTR or ERESTART. 661 */ 662 resume: 663 if (lp) { 664 if (catch && error == 0) { 665 if (sig != 0 || (sig = CURSIG(lp))) { 666 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 667 error = EINTR; 668 else 669 error = ERESTART; 670 } 671 } 672 673 lp->lwp_flags &= ~LWP_SINTR; 674 675 /* 676 * Unconditionally set us to LSRUN on resume. lwp_stat could 677 * be in a weird state due to the goto resume, particularly 678 * when tsleep() is called from tstop(). 679 */ 680 lp->lwp_stat = LSRUN; 681 lwkt_reltoken(&lp->lwp_token); 682 } 683 logtsleep1(tsleep_end); 684 crit_exit_quick(td); 685 return (error); 686 } 687 688 /* 689 * Interlocked spinlock sleep. An exclusively held spinlock must 690 * be passed to ssleep(). The function will atomically release the 691 * spinlock and tsleep on the ident, then reacquire the spinlock and 692 * return. 693 * 694 * This routine is fairly important along the critical path, so optimize it 695 * heavily. 696 */ 697 int 698 ssleep(const volatile void *ident, struct spinlock *spin, int flags, 699 const char *wmesg, int timo) 700 { 701 globaldata_t gd = mycpu; 702 int error; 703 704 _tsleep_interlock(gd, ident, flags); 705 spin_unlock_quick(gd, spin); 706 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 707 _spin_lock_quick(gd, spin, wmesg); 708 709 return (error); 710 } 711 712 int 713 lksleep(const volatile void *ident, struct lock *lock, int flags, 714 const char *wmesg, int timo) 715 { 716 globaldata_t gd = mycpu; 717 int error; 718 719 _tsleep_interlock(gd, ident, flags); 720 lockmgr(lock, LK_RELEASE); 721 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 722 lockmgr(lock, LK_EXCLUSIVE); 723 724 return (error); 725 } 726 727 /* 728 * Interlocked mutex sleep. An exclusively held mutex must be passed 729 * to mtxsleep(). The function will atomically release the mutex 730 * and tsleep on the ident, then reacquire the mutex and return. 731 */ 732 int 733 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags, 734 const char *wmesg, int timo) 735 { 736 globaldata_t gd = mycpu; 737 int error; 738 739 _tsleep_interlock(gd, ident, flags); 740 mtx_unlock(mtx); 741 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 742 mtx_lock_ex_quick(mtx); 743 744 return (error); 745 } 746 747 /* 748 * Interlocked serializer sleep. An exclusively held serializer must 749 * be passed to zsleep(). The function will atomically release 750 * the serializer and tsleep on the ident, then reacquire the serializer 751 * and return. 752 */ 753 int 754 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags, 755 const char *wmesg, int timo) 756 { 757 globaldata_t gd = mycpu; 758 int ret; 759 760 ASSERT_SERIALIZED(slz); 761 762 _tsleep_interlock(gd, ident, flags); 763 lwkt_serialize_exit(slz); 764 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 765 lwkt_serialize_enter(slz); 766 767 return ret; 768 } 769 770 /* 771 * Directly block on the LWKT thread by descheduling it. This 772 * is much faster then tsleep(), but the only legal way to wake 773 * us up is to directly schedule the thread. 774 * 775 * Setting TDF_SINTR will cause new signals to directly schedule us. 776 * 777 * This routine must be called while in a critical section. 778 */ 779 int 780 lwkt_sleep(const char *wmesg, int flags) 781 { 782 thread_t td = curthread; 783 int sig; 784 785 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) { 786 td->td_flags |= TDF_BLOCKED; 787 td->td_wmesg = wmesg; 788 lwkt_deschedule_self(td); 789 lwkt_switch(); 790 td->td_wmesg = NULL; 791 td->td_flags &= ~TDF_BLOCKED; 792 return(0); 793 } 794 if ((sig = CURSIG(td->td_lwp)) != 0) { 795 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig)) 796 return(EINTR); 797 else 798 return(ERESTART); 799 800 } 801 td->td_flags |= TDF_BLOCKED | TDF_SINTR; 802 td->td_wmesg = wmesg; 803 lwkt_deschedule_self(td); 804 lwkt_switch(); 805 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR); 806 td->td_wmesg = NULL; 807 return(0); 808 } 809 810 /* 811 * Implement the timeout for tsleep. 812 * 813 * This type of callout timeout is scheduled on the same cpu the process 814 * is sleeping on. Also, at the moment, the MP lock is held. 815 */ 816 static void 817 endtsleep(void *arg) 818 { 819 thread_t td = arg; 820 struct lwp *lp; 821 822 /* 823 * We are going to have to get the lwp_token, which means we might 824 * block. This can race a tsleep getting woken up by other means 825 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our 826 * processing to complete (sorry tsleep!). 827 * 828 * We can safely set td_flags because td MUST be on the same cpu 829 * as we are. 830 */ 831 KKASSERT(td->td_gd == mycpu); 832 crit_enter(); 833 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT; 834 835 /* 836 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread 837 * from exiting the tsleep on us. The flag is interlocked by virtue 838 * of lp being on the same cpu as we are. 839 */ 840 if ((lp = td->td_lwp) != NULL) 841 lwkt_gettoken(&lp->lwp_token); 842 843 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED); 844 845 if (lp) { 846 /* 847 * callout timer should normally never be set in tstop() 848 * because it passes a timeout of 0. However, there is a 849 * case during thread exit (which SSTOP's all the threads) 850 * for which tstop() must break out and can (properly) leave 851 * the thread in LSSTOP. 852 */ 853 KKASSERT(lp->lwp_stat != LSSTOP || 854 (lp->lwp_mpflags & LWP_MP_WEXIT)); 855 setrunnable(lp); 856 lwkt_reltoken(&lp->lwp_token); 857 } else { 858 _tsleep_remove(td); 859 lwkt_schedule(td); 860 } 861 KKASSERT(td->td_gd == mycpu); 862 td->td_flags &= ~TDF_TIMEOUT_RUNNING; 863 crit_exit(); 864 } 865 866 /* 867 * Make all processes sleeping on the specified identifier runnable. 868 * count may be zero or one only. 869 * 870 * The domain encodes the sleep/wakeup domain, flags, plus the originating 871 * cpu. 872 * 873 * This call may run without the MP lock held. We can only manipulate thread 874 * state on the cpu owning the thread. We CANNOT manipulate process state 875 * at all. 876 * 877 * _wakeup() can be passed to an IPI so we can't use (const volatile 878 * void *ident). 879 */ 880 static void 881 _wakeup(void *ident, int domain) 882 { 883 struct tslpque *qp; 884 struct thread *td; 885 struct thread *ntd; 886 globaldata_t gd; 887 cpumask_t mask; 888 uint32_t cid; 889 uint32_t gid; 890 891 crit_enter(); 892 logtsleep2(wakeup_beg, ident); 893 gd = mycpu; 894 cid = LOOKUP(ident); 895 gid = TCHASHSHIFT(cid); 896 qp = &gd->gd_tsleep_hash[gid]; 897 restart: 898 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 899 ntd = TAILQ_NEXT(td, td_sleepq); 900 if (td->td_wchan == ident && 901 td->td_wdomain == (domain & PDOMAIN_MASK) 902 ) { 903 KKASSERT(td->td_gd == gd); 904 _tsleep_remove(td); 905 td->td_wakefromcpu = PWAKEUP_DECODE(domain); 906 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { 907 lwkt_schedule(td); 908 if (domain & PWAKEUP_ONE) 909 goto done; 910 } 911 goto restart; 912 } 913 } 914 915 /* 916 * Because a bunch of cpumask array entries cover the same queue, it 917 * is possible for our bit to remain set in some of them and cause 918 * spurious wakeup IPIs later on. Make sure that the bit is cleared 919 * when a spurious IPI occurs to prevent further spurious IPIs. 920 */ 921 if (TAILQ_FIRST(qp) == NULL) { 922 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], gd->gd_cpuid); 923 } 924 925 /* 926 * We finished checking the current cpu but there still may be 927 * more work to do. Either wakeup_one was requested and no matching 928 * thread was found, or a normal wakeup was requested and we have 929 * to continue checking cpus. 930 * 931 * It should be noted that this scheme is actually less expensive then 932 * the old scheme when waking up multiple threads, since we send 933 * only one IPI message per target candidate which may then schedule 934 * multiple threads. Before we could have wound up sending an IPI 935 * message for each thread on the target cpu (!= current cpu) that 936 * needed to be woken up. 937 * 938 * NOTE: Wakeups occuring on remote cpus are asynchronous. This 939 * should be ok since we are passing idents in the IPI rather 940 * then thread pointers. 941 * 942 * NOTE: We MUST mfence (or use an atomic op) prior to reading 943 * the cpumask, as another cpu may have written to it in 944 * a fashion interlocked with whatever the caller did before 945 * calling wakeup(). Otherwise we might miss the interaction 946 * (kern_mutex.c can cause this problem). 947 * 948 * lfence is insufficient as it may allow a written state to 949 * reorder around the cpumask load. 950 */ 951 if ((domain & PWAKEUP_MYCPU) == 0) { 952 cpu_mfence(); 953 mask = slpque_cpumasks[cid]; 954 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 955 if (CPUMASK_TESTNZERO(mask)) { 956 lwkt_send_ipiq2_mask(mask, _wakeup, ident, 957 domain | PWAKEUP_MYCPU); 958 } 959 } 960 done: 961 logtsleep1(wakeup_end); 962 crit_exit(); 963 } 964 965 /* 966 * Wakeup all threads tsleep()ing on the specified ident, on all cpus 967 */ 968 void 969 wakeup(const volatile void *ident) 970 { 971 globaldata_t gd = mycpu; 972 thread_t td = gd->gd_curthread; 973 974 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) { 975 /* 976 * If we are in a delayed wakeup section, record up to two wakeups in 977 * a per-CPU queue and issue them when we block or exit the delayed 978 * wakeup section. 979 */ 980 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident)) 981 return; 982 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident)) 983 return; 984 985 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]), 986 __DEALL(ident)); 987 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]), 988 __DEALL(ident)); 989 } 990 991 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid)); 992 } 993 994 /* 995 * Wakeup one thread tsleep()ing on the specified ident, on any cpu. 996 */ 997 void 998 wakeup_one(const volatile void *ident) 999 { 1000 /* XXX potentially round-robin the first responding cpu */ 1001 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1002 PWAKEUP_ONE); 1003 } 1004 1005 /* 1006 * Wakeup threads tsleep()ing on the specified ident on the current cpu 1007 * only. 1008 */ 1009 void 1010 wakeup_mycpu(const volatile void *ident) 1011 { 1012 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1013 PWAKEUP_MYCPU); 1014 } 1015 1016 /* 1017 * Wakeup one thread tsleep()ing on the specified ident on the current cpu 1018 * only. 1019 */ 1020 void 1021 wakeup_mycpu_one(const volatile void *ident) 1022 { 1023 /* XXX potentially round-robin the first responding cpu */ 1024 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1025 PWAKEUP_MYCPU | PWAKEUP_ONE); 1026 } 1027 1028 /* 1029 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu 1030 * only. 1031 */ 1032 void 1033 wakeup_oncpu(globaldata_t gd, const volatile void *ident) 1034 { 1035 globaldata_t mygd = mycpu; 1036 if (gd == mycpu) { 1037 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1038 PWAKEUP_MYCPU); 1039 } else { 1040 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1041 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1042 PWAKEUP_MYCPU); 1043 } 1044 } 1045 1046 /* 1047 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu 1048 * only. 1049 */ 1050 void 1051 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) 1052 { 1053 globaldata_t mygd = mycpu; 1054 if (gd == mygd) { 1055 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1056 PWAKEUP_MYCPU | PWAKEUP_ONE); 1057 } else { 1058 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1059 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1060 PWAKEUP_MYCPU | PWAKEUP_ONE); 1061 } 1062 } 1063 1064 /* 1065 * Wakeup all threads waiting on the specified ident that slept using 1066 * the specified domain, on all cpus. 1067 */ 1068 void 1069 wakeup_domain(const volatile void *ident, int domain) 1070 { 1071 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid)); 1072 } 1073 1074 /* 1075 * Wakeup one thread waiting on the specified ident that slept using 1076 * the specified domain, on any cpu. 1077 */ 1078 void 1079 wakeup_domain_one(const volatile void *ident, int domain) 1080 { 1081 /* XXX potentially round-robin the first responding cpu */ 1082 _wakeup(__DEALL(ident), 1083 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE); 1084 } 1085 1086 void 1087 wakeup_start_delayed(void) 1088 { 1089 globaldata_t gd = mycpu; 1090 1091 crit_enter(); 1092 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP; 1093 crit_exit(); 1094 } 1095 1096 void 1097 wakeup_end_delayed(void) 1098 { 1099 globaldata_t gd = mycpu; 1100 1101 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) { 1102 crit_enter(); 1103 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP; 1104 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) { 1105 if (gd->gd_delayed_wakeup[0]) { 1106 wakeup(gd->gd_delayed_wakeup[0]); 1107 gd->gd_delayed_wakeup[0] = NULL; 1108 } 1109 if (gd->gd_delayed_wakeup[1]) { 1110 wakeup(gd->gd_delayed_wakeup[1]); 1111 gd->gd_delayed_wakeup[1] = NULL; 1112 } 1113 } 1114 crit_exit(); 1115 } 1116 } 1117 1118 /* 1119 * setrunnable() 1120 * 1121 * Make a process runnable. lp->lwp_token must be held on call and this 1122 * function must be called from the cpu owning lp. 1123 * 1124 * This only has an effect if we are in LSSTOP or LSSLEEP. 1125 */ 1126 void 1127 setrunnable(struct lwp *lp) 1128 { 1129 thread_t td = lp->lwp_thread; 1130 1131 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); 1132 KKASSERT(td->td_gd == mycpu); 1133 crit_enter(); 1134 if (lp->lwp_stat == LSSTOP) 1135 lp->lwp_stat = LSSLEEP; 1136 if (lp->lwp_stat == LSSLEEP) { 1137 _tsleep_remove(td); 1138 lwkt_schedule(td); 1139 } else if (td->td_flags & TDF_SINTR) { 1140 lwkt_schedule(td); 1141 } 1142 crit_exit(); 1143 } 1144 1145 /* 1146 * The process is stopped due to some condition, usually because p_stat is 1147 * set to SSTOP, but also possibly due to being traced. 1148 * 1149 * Caller must hold p->p_token 1150 * 1151 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED 1152 * because the parent may check the child's status before the child actually 1153 * gets to this routine. 1154 * 1155 * This routine is called with the current lwp only, typically just 1156 * before returning to userland if the process state is detected as 1157 * possibly being in a stopped state. 1158 */ 1159 void 1160 tstop(void) 1161 { 1162 struct lwp *lp = curthread->td_lwp; 1163 struct proc *p = lp->lwp_proc; 1164 struct proc *q; 1165 1166 lwkt_gettoken(&lp->lwp_token); 1167 crit_enter(); 1168 1169 /* 1170 * If LWP_MP_WSTOP is set, we were sleeping 1171 * while our process was stopped. At this point 1172 * we were already counted as stopped. 1173 */ 1174 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1175 /* 1176 * If we're the last thread to stop, signal 1177 * our parent. 1178 */ 1179 p->p_nstopped++; 1180 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1181 wakeup(&p->p_nstopped); 1182 if (p->p_nstopped == p->p_nthreads) { 1183 /* 1184 * Token required to interlock kern_wait() 1185 */ 1186 q = p->p_pptr; 1187 PHOLD(q); 1188 lwkt_gettoken(&q->p_token); 1189 p->p_flags &= ~P_WAITED; 1190 wakeup(p->p_pptr); 1191 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1192 ksignal(q, SIGCHLD); 1193 lwkt_reltoken(&q->p_token); 1194 PRELE(q); 1195 } 1196 } 1197 1198 /* 1199 * Wait here while in a stopped state, interlocked with lwp_token. 1200 * We must break-out if the whole process is trying to exit. 1201 */ 1202 while (STOPLWP(p, lp)) { 1203 lp->lwp_stat = LSSTOP; 1204 tsleep(p, 0, "stop", 0); 1205 } 1206 p->p_nstopped--; 1207 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1208 crit_exit(); 1209 lwkt_reltoken(&lp->lwp_token); 1210 } 1211 1212 /* 1213 * Compute a tenex style load average of a quantity on 1214 * 1, 5 and 15 minute intervals. This is a pcpu callout. 1215 * 1216 * We segment the lwp scan on a pcpu basis. This does NOT 1217 * mean the associated lwps are on this cpu, it is done 1218 * just to break the work up. 1219 * 1220 * The callout on cpu0 rolls up the stats from the other 1221 * cpus. 1222 */ 1223 static int loadav_count_runnable(struct lwp *p, void *data); 1224 1225 static void 1226 loadav(void *arg) 1227 { 1228 globaldata_t gd = mycpu; 1229 struct loadavg *avg; 1230 int i, nrun; 1231 1232 nrun = 0; 1233 alllwp_scan(loadav_count_runnable, &nrun, 1); 1234 gd->gd_loadav_nrunnable = nrun; 1235 if (gd->gd_cpuid == 0) { 1236 avg = &averunnable; 1237 nrun = 0; 1238 for (i = 0; i < ncpus; ++i) 1239 nrun += globaldata_find(i)->gd_loadav_nrunnable; 1240 for (i = 0; i < 3; i++) { 1241 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1242 (long)nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1243 } 1244 } 1245 1246 /* 1247 * Schedule the next update to occur after 5 seconds, but add a 1248 * random variation to avoid synchronisation with processes that 1249 * run at regular intervals. 1250 */ 1251 callout_reset(&gd->gd_loadav_callout, 1252 hz * 4 + (int)(krandom() % (hz * 2 + 1)), 1253 loadav, NULL); 1254 } 1255 1256 static int 1257 loadav_count_runnable(struct lwp *lp, void *data) 1258 { 1259 int *nrunp = data; 1260 thread_t td; 1261 1262 switch (lp->lwp_stat) { 1263 case LSRUN: 1264 if ((td = lp->lwp_thread) == NULL) 1265 break; 1266 if (td->td_flags & TDF_BLOCKED) 1267 break; 1268 ++*nrunp; 1269 break; 1270 default: 1271 break; 1272 } 1273 lwkt_yield(); 1274 return(0); 1275 } 1276 1277 /* 1278 * Regular data collection 1279 */ 1280 static uint64_t 1281 collect_load_callback(int n) 1282 { 1283 int fscale = averunnable.fscale; 1284 1285 return ((averunnable.ldavg[0] * 100 + (fscale >> 1)) / fscale); 1286 } 1287 1288 static void 1289 sched_setup(void *dummy __unused) 1290 { 1291 globaldata_t save_gd = mycpu; 1292 globaldata_t gd; 1293 int n; 1294 1295 kcollect_register(KCOLLECT_LOAD, "load", collect_load_callback, 1296 KCOLLECT_SCALE(KCOLLECT_LOAD_FORMAT, 0)); 1297 1298 /* 1299 * Kick off timeout driven events by calling first time. We 1300 * split the work across available cpus to help scale it, 1301 * it can eat a lot of cpu when there are a lot of processes 1302 * on the system. 1303 */ 1304 for (n = 0; n < ncpus; ++n) { 1305 gd = globaldata_find(n); 1306 lwkt_setcpu_self(gd); 1307 callout_init_mp(&gd->gd_loadav_callout); 1308 callout_init_mp(&gd->gd_schedcpu_callout); 1309 schedcpu(NULL); 1310 loadav(NULL); 1311 } 1312 lwkt_setcpu_self(save_gd); 1313 } 1314 1315 /* 1316 * Extremely early initialization, dummy-up the tables so we don't have 1317 * to conditionalize for NULL in _wakeup() and tsleep_interlock(). Even 1318 * though the system isn't blocking this early, these functions still 1319 * try to access the hash table. 1320 * 1321 * This setup will be overridden once sched_dyninit() -> sleep_gdinit() 1322 * is called. 1323 */ 1324 void 1325 sleep_early_gdinit(globaldata_t gd) 1326 { 1327 static struct tslpque dummy_slpque; 1328 static cpumask_t dummy_cpumasks; 1329 1330 slpque_tablesize = 1; 1331 gd->gd_tsleep_hash = &dummy_slpque; 1332 slpque_cpumasks = &dummy_cpumasks; 1333 TAILQ_INIT(&dummy_slpque); 1334 } 1335 1336 /* 1337 * PCPU initialization. Called after KMALLOC is operational, by 1338 * sched_dyninit() for cpu 0, and by mi_gdinit() for other cpus later. 1339 * 1340 * WARNING! The pcpu hash table is smaller than the global cpumask 1341 * hash table, which can save us a lot of memory when maxproc 1342 * is set high. 1343 */ 1344 void 1345 sleep_gdinit(globaldata_t gd) 1346 { 1347 struct thread *td; 1348 uint32_t n; 1349 uint32_t i; 1350 1351 /* 1352 * This shouldn't happen, that is there shouldn't be any threads 1353 * waiting on the dummy tsleep queue this early in the boot. 1354 */ 1355 if (gd->gd_cpuid == 0) { 1356 TAILQ_FOREACH(td, &gd->gd_tsleep_hash[0], td_sleepq) { 1357 kprintf("SLEEP_GDINIT SWITCH %s\n", td->td_comm); 1358 } 1359 } 1360 1361 /* 1362 * Note that we have to allocate one extra slot because we are 1363 * shifting a modulo value. TCHASHSHIFT(slpque_tablesize - 1) can 1364 * return the same value as TCHASHSHIFT(slpque_tablesize). 1365 */ 1366 n = TCHASHSHIFT(slpque_tablesize) + 1; 1367 1368 gd->gd_tsleep_hash = kmalloc(sizeof(struct tslpque) * n, 1369 M_TSLEEP, M_WAITOK | M_ZERO); 1370 for (i = 0; i < n; ++i) 1371 TAILQ_INIT(&gd->gd_tsleep_hash[i]); 1372 } 1373 1374 /* 1375 * Dynamic initialization after the memory system is operational. 1376 */ 1377 static void 1378 sched_dyninit(void *dummy __unused) 1379 { 1380 int tblsize; 1381 int tblsize2; 1382 int n; 1383 1384 /* 1385 * Calculate table size for slpque hash. We want a prime number 1386 * large enough to avoid overloading slpque_cpumasks when the 1387 * system has a large number of sleeping processes, which will 1388 * spam IPIs on wakeup(). 1389 * 1390 * While it is true this is really a per-lwp factor, generally 1391 * speaking the maxproc limit is a good metric to go by. 1392 */ 1393 for (tblsize = maxproc | 1; ; tblsize += 2) { 1394 if (tblsize % 3 == 0) 1395 continue; 1396 if (tblsize % 5 == 0) 1397 continue; 1398 tblsize2 = (tblsize / 2) | 1; 1399 for (n = 7; n < tblsize2; n += 2) { 1400 if (tblsize % n == 0) 1401 break; 1402 } 1403 if (n == tblsize2) 1404 break; 1405 } 1406 1407 /* 1408 * PIDs are currently limited to 6 digits. Cap the table size 1409 * at double this. 1410 */ 1411 if (tblsize > 2000003) 1412 tblsize = 2000003; 1413 1414 slpque_tablesize = tblsize; 1415 slpque_cpumasks = kmalloc(sizeof(*slpque_cpumasks) * slpque_tablesize, 1416 M_TSLEEP, M_WAITOK | M_ZERO); 1417 sleep_gdinit(mycpu); 1418 } 1419