1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 35 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vmmeter.h> 47 #include <sys/sysctl.h> 48 #include <sys/lock.h> 49 #include <sys/uio.h> 50 #include <sys/kcollect.h> 51 #ifdef KTRACE 52 #include <sys/ktrace.h> 53 #endif 54 #include <sys/ktr.h> 55 #include <sys/serialize.h> 56 57 #include <sys/signal2.h> 58 #include <sys/thread2.h> 59 #include <sys/spinlock2.h> 60 #include <sys/mutex2.h> 61 62 #include <machine/cpu.h> 63 #include <machine/smp.h> 64 65 #include <vm/vm_extern.h> 66 67 struct tslpque { 68 TAILQ_HEAD(, thread) queue; 69 const volatile void *ident0; 70 const volatile void *ident1; 71 const volatile void *ident2; 72 const volatile void *ident3; 73 }; 74 75 static void sched_setup (void *dummy); 76 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL); 77 static void sched_dyninit (void *dummy); 78 SYSINIT(sched_dyninit, SI_BOOT1_DYNALLOC, SI_ORDER_FIRST, sched_dyninit, NULL); 79 80 int lbolt; 81 void *lbolt_syncer; 82 int ncpus; 83 int ncpus2, ncpus2_shift, ncpus2_mask; /* note: mask not cpumask_t */ 84 int ncpus_fit, ncpus_fit_mask; /* note: mask not cpumask_t */ 85 int safepri; 86 int tsleep_now_works; 87 int tsleep_crypto_dump = 0; 88 89 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues"); 90 91 #define __DEALL(ident) __DEQUALIFY(void *, ident) 92 93 #if !defined(KTR_TSLEEP) 94 #define KTR_TSLEEP KTR_ALL 95 #endif 96 KTR_INFO_MASTER(tsleep); 97 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", const volatile void *ident); 98 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit"); 99 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", const volatile void *ident); 100 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit"); 101 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail, 4, "interlock failed %p", const volatile void *ident); 102 103 #define logtsleep1(name) KTR_LOG(tsleep_ ## name) 104 #define logtsleep2(name, val) KTR_LOG(tsleep_ ## name, val) 105 106 struct loadavg averunnable = 107 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 108 /* 109 * Constants for averages over 1, 5, and 15 minutes 110 * when sampling at 5 second intervals. 111 */ 112 static fixpt_t cexp[3] = { 113 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 114 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 115 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 116 }; 117 118 static void endtsleep (void *); 119 static void loadav (void *arg); 120 static void schedcpu (void *arg); 121 122 static int pctcpu_decay = 10; 123 SYSCTL_INT(_kern, OID_AUTO, pctcpu_decay, CTLFLAG_RW, &pctcpu_decay, 0, ""); 124 125 /* 126 * kernel uses `FSCALE', userland (SHOULD) use kern.fscale 127 */ 128 int fscale __unused = FSCALE; /* exported to systat */ 129 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 130 131 /* 132 * Recompute process priorities, once a second. 133 * 134 * Since the userland schedulers are typically event oriented, if the 135 * estcpu calculation at wakeup() time is not sufficient to make a 136 * process runnable relative to other processes in the system we have 137 * a 1-second recalc to help out. 138 * 139 * This code also allows us to store sysclock_t data in the process structure 140 * without fear of an overrun, since sysclock_t are guarenteed to hold 141 * several seconds worth of count. 142 * 143 * WARNING! callouts can preempt normal threads. However, they will not 144 * preempt a thread holding a spinlock so we *can* safely use spinlocks. 145 */ 146 static int schedcpu_stats(struct proc *p, void *data __unused); 147 static int schedcpu_resource(struct proc *p, void *data __unused); 148 149 static void 150 schedcpu(void *arg) 151 { 152 allproc_scan(schedcpu_stats, NULL, 1); 153 allproc_scan(schedcpu_resource, NULL, 1); 154 if (mycpu->gd_cpuid == 0) { 155 wakeup((caddr_t)&lbolt); 156 wakeup(lbolt_syncer); 157 } 158 callout_reset(&mycpu->gd_schedcpu_callout, hz, schedcpu, NULL); 159 } 160 161 /* 162 * General process statistics once a second 163 */ 164 static int 165 schedcpu_stats(struct proc *p, void *data __unused) 166 { 167 struct lwp *lp; 168 169 /* 170 * Threads may not be completely set up if process in SIDL state. 171 */ 172 if (p->p_stat == SIDL) 173 return(0); 174 175 PHOLD(p); 176 if (lwkt_trytoken(&p->p_token) == FALSE) { 177 PRELE(p); 178 return(0); 179 } 180 181 p->p_swtime++; 182 FOREACH_LWP_IN_PROC(lp, p) { 183 if (lp->lwp_stat == LSSLEEP) { 184 ++lp->lwp_slptime; 185 if (lp->lwp_slptime == 1) 186 p->p_usched->uload_update(lp); 187 } 188 189 /* 190 * Only recalculate processes that are active or have slept 191 * less then 2 seconds. The schedulers understand this. 192 * Otherwise decay by 50% per second. 193 */ 194 if (lp->lwp_slptime <= 1) { 195 p->p_usched->recalculate(lp); 196 } else { 197 int decay; 198 199 decay = pctcpu_decay; 200 cpu_ccfence(); 201 if (decay <= 1) 202 decay = 1; 203 if (decay > 100) 204 decay = 100; 205 lp->lwp_pctcpu = (lp->lwp_pctcpu * (decay - 1)) / decay; 206 } 207 } 208 lwkt_reltoken(&p->p_token); 209 lwkt_yield(); 210 PRELE(p); 211 return(0); 212 } 213 214 /* 215 * Resource checks. XXX break out since ksignal/killproc can block, 216 * limiting us to one process killed per second. There is probably 217 * a better way. 218 */ 219 static int 220 schedcpu_resource(struct proc *p, void *data __unused) 221 { 222 u_int64_t ttime; 223 struct lwp *lp; 224 225 if (p->p_stat == SIDL) 226 return(0); 227 228 PHOLD(p); 229 if (lwkt_trytoken(&p->p_token) == FALSE) { 230 PRELE(p); 231 return(0); 232 } 233 234 if (p->p_stat == SZOMB || p->p_limit == NULL) { 235 lwkt_reltoken(&p->p_token); 236 PRELE(p); 237 return(0); 238 } 239 240 ttime = 0; 241 FOREACH_LWP_IN_PROC(lp, p) { 242 /* 243 * We may have caught an lp in the middle of being 244 * created, lwp_thread can be NULL. 245 */ 246 if (lp->lwp_thread) { 247 ttime += lp->lwp_thread->td_sticks; 248 ttime += lp->lwp_thread->td_uticks; 249 } 250 } 251 252 switch(plimit_testcpulimit(p->p_limit, ttime)) { 253 case PLIMIT_TESTCPU_KILL: 254 killproc(p, "exceeded maximum CPU limit"); 255 break; 256 case PLIMIT_TESTCPU_XCPU: 257 if ((p->p_flags & P_XCPU) == 0) { 258 p->p_flags |= P_XCPU; 259 ksignal(p, SIGXCPU); 260 } 261 break; 262 default: 263 break; 264 } 265 lwkt_reltoken(&p->p_token); 266 lwkt_yield(); 267 PRELE(p); 268 return(0); 269 } 270 271 /* 272 * This is only used by ps. Generate a cpu percentage use over 273 * a period of one second. 274 */ 275 void 276 updatepcpu(struct lwp *lp, int cpticks, int ttlticks) 277 { 278 fixpt_t acc; 279 int remticks; 280 281 acc = (cpticks << FSHIFT) / ttlticks; 282 if (ttlticks >= ESTCPUFREQ) { 283 lp->lwp_pctcpu = acc; 284 } else { 285 remticks = ESTCPUFREQ - ttlticks; 286 lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) / 287 ESTCPUFREQ; 288 } 289 } 290 291 /* 292 * Handy macros to calculate hash indices. LOOKUP() calculates the 293 * global cpumask hash index, TCHASHSHIFT() converts that into the 294 * pcpu hash index. 295 * 296 * By making the pcpu hash arrays smaller we save a significant amount 297 * of memory at very low cost. The real cost is in IPIs, which are handled 298 * by the much larger global cpumask hash table. 299 */ 300 #define LOOKUP_PRIME 66555444443333333ULL 301 #define LOOKUP(x) ((((uintptr_t)(x) + ((uintptr_t)(x) >> 18)) ^ \ 302 LOOKUP_PRIME) % slpque_tablesize) 303 #define TCHASHSHIFT(x) ((x) >> 4) 304 305 static uint32_t slpque_tablesize; 306 static cpumask_t *slpque_cpumasks; 307 308 SYSCTL_UINT(_kern, OID_AUTO, slpque_tablesize, CTLFLAG_RD, &slpque_tablesize, 309 0, ""); 310 311 /* 312 * This is a dandy function that allows us to interlock tsleep/wakeup 313 * operations with unspecified upper level locks, such as lockmgr locks, 314 * simply by holding a critical section. The sequence is: 315 * 316 * (acquire upper level lock) 317 * tsleep_interlock(blah) 318 * (release upper level lock) 319 * tsleep(blah, ...) 320 * 321 * Basically this functions queues us on the tsleep queue without actually 322 * descheduling us. When tsleep() is later called with PINTERLOCK it 323 * assumes the thread was already queued, otherwise it queues it there. 324 * 325 * Thus it is possible to receive the wakeup prior to going to sleep and 326 * the race conditions are covered. 327 */ 328 static __inline void 329 _tsleep_interlock(globaldata_t gd, const volatile void *ident, int flags) 330 { 331 thread_t td = gd->gd_curthread; 332 struct tslpque *qp; 333 uint32_t cid; 334 uint32_t gid; 335 336 crit_enter_quick(td); 337 if (td->td_flags & TDF_TSLEEPQ) { 338 cid = LOOKUP(td->td_wchan); 339 gid = TCHASHSHIFT(cid); 340 qp = &gd->gd_tsleep_hash[gid]; 341 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 342 if (TAILQ_FIRST(&qp->queue) == NULL) { 343 qp->ident0 = NULL; 344 qp->ident1 = NULL; 345 qp->ident2 = NULL; 346 qp->ident3 = NULL; 347 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 348 gd->gd_cpuid); 349 } 350 } else { 351 td->td_flags |= TDF_TSLEEPQ; 352 } 353 cid = LOOKUP(ident); 354 gid = TCHASHSHIFT(cid); 355 qp = &gd->gd_tsleep_hash[gid]; 356 TAILQ_INSERT_TAIL(&qp->queue, td, td_sleepq); 357 if (qp->ident0 != ident && qp->ident1 != ident && 358 qp->ident2 != ident && qp->ident3 != ident) { 359 if (qp->ident0 == NULL) 360 qp->ident0 = ident; 361 else if (qp->ident1 == NULL) 362 qp->ident1 = ident; 363 else if (qp->ident2 == NULL) 364 qp->ident2 = ident; 365 else if (qp->ident3 == NULL) 366 qp->ident3 = ident; 367 else 368 qp->ident0 = (void *)(intptr_t)-1; 369 } 370 ATOMIC_CPUMASK_ORBIT(slpque_cpumasks[cid], gd->gd_cpuid); 371 td->td_wchan = ident; 372 td->td_wdomain = flags & PDOMAIN_MASK; 373 crit_exit_quick(td); 374 } 375 376 void 377 tsleep_interlock(const volatile void *ident, int flags) 378 { 379 _tsleep_interlock(mycpu, ident, flags); 380 } 381 382 /* 383 * Remove thread from sleepq. Must be called with a critical section held. 384 * The thread must not be migrating. 385 */ 386 static __inline void 387 _tsleep_remove(thread_t td) 388 { 389 globaldata_t gd = mycpu; 390 struct tslpque *qp; 391 uint32_t cid; 392 uint32_t gid; 393 394 KKASSERT(td->td_gd == gd && IN_CRITICAL_SECT(td)); 395 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 396 if (td->td_flags & TDF_TSLEEPQ) { 397 td->td_flags &= ~TDF_TSLEEPQ; 398 cid = LOOKUP(td->td_wchan); 399 gid = TCHASHSHIFT(cid); 400 qp = &gd->gd_tsleep_hash[gid]; 401 TAILQ_REMOVE(&qp->queue, td, td_sleepq); 402 if (TAILQ_FIRST(&qp->queue) == NULL) { 403 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], 404 gd->gd_cpuid); 405 } 406 td->td_wchan = NULL; 407 td->td_wdomain = 0; 408 } 409 } 410 411 void 412 tsleep_remove(thread_t td) 413 { 414 _tsleep_remove(td); 415 } 416 417 /* 418 * General sleep call. Suspends the current process until a wakeup is 419 * performed on the specified identifier. The process will then be made 420 * runnable with the specified priority. Sleeps at most timo/hz seconds 421 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 422 * before and after sleeping, else signals are not checked. Returns 0 if 423 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 424 * signal needs to be delivered, ERESTART is returned if the current system 425 * call should be restarted if possible, and EINTR is returned if the system 426 * call should be interrupted by the signal (return EINTR). 427 * 428 * Note that if we are a process, we release_curproc() before messing with 429 * the LWKT scheduler. 430 * 431 * During autoconfiguration or after a panic, a sleep will simply 432 * lower the priority briefly to allow interrupts, then return. 433 * 434 * WARNING! This code can't block (short of switching away), or bad things 435 * will happen. No getting tokens, no blocking locks, etc. 436 */ 437 int 438 tsleep(const volatile void *ident, int flags, const char *wmesg, int timo) 439 { 440 struct thread *td = curthread; 441 struct lwp *lp = td->td_lwp; 442 struct proc *p = td->td_proc; /* may be NULL */ 443 globaldata_t gd; 444 int sig; 445 int catch; 446 int error; 447 int oldpri; 448 struct callout thandle; 449 450 /* 451 * Currently a severe hack. Make sure any delayed wakeups 452 * are flushed before we sleep or we might deadlock on whatever 453 * event we are sleeping on. 454 */ 455 if (td->td_flags & TDF_DELAYED_WAKEUP) 456 wakeup_end_delayed(); 457 458 /* 459 * NOTE: removed KTRPOINT, it could cause races due to blocking 460 * even in stable. Just scrap it for now. 461 */ 462 if (!tsleep_crypto_dump && (tsleep_now_works == 0 || panicstr)) { 463 /* 464 * After a panic, or before we actually have an operational 465 * softclock, just give interrupts a chance, then just return; 466 * 467 * don't run any other procs or panic below, 468 * in case this is the idle process and already asleep. 469 */ 470 splz(); 471 oldpri = td->td_pri; 472 lwkt_setpri_self(safepri); 473 lwkt_switch(); 474 lwkt_setpri_self(oldpri); 475 return (0); 476 } 477 logtsleep2(tsleep_beg, ident); 478 gd = td->td_gd; 479 KKASSERT(td != &gd->gd_idlethread); /* you must be kidding! */ 480 td->td_wakefromcpu = -1; /* overwritten by _wakeup */ 481 482 /* 483 * NOTE: all of this occurs on the current cpu, including any 484 * callout-based wakeups, so a critical section is a sufficient 485 * interlock. 486 * 487 * The entire sequence through to where we actually sleep must 488 * run without breaking the critical section. 489 */ 490 catch = flags & PCATCH; 491 error = 0; 492 sig = 0; 493 494 crit_enter_quick(td); 495 496 KASSERT(ident != NULL, ("tsleep: no ident")); 497 KASSERT(lp == NULL || 498 lp->lwp_stat == LSRUN || /* Obvious */ 499 lp->lwp_stat == LSSTOP, /* Set in tstop */ 500 ("tsleep %p %s %d", 501 ident, wmesg, lp->lwp_stat)); 502 503 /* 504 * We interlock the sleep queue if the caller has not already done 505 * it for us. This must be done before we potentially acquire any 506 * tokens or we can loose the wakeup. 507 */ 508 if ((flags & PINTERLOCKED) == 0) { 509 _tsleep_interlock(gd, ident, flags); 510 } 511 512 /* 513 * Setup for the current process (if this is a process). We must 514 * interlock with lwp_token to avoid remote wakeup races via 515 * setrunnable() 516 */ 517 if (lp) { 518 lwkt_gettoken(&lp->lwp_token); 519 520 /* 521 * If the umbrella process is in the SCORE state then 522 * make sure that the thread is flagged going into a 523 * normal sleep to allow the core dump to proceed, otherwise 524 * the coredump can end up waiting forever. If the normal 525 * sleep is woken up, the thread will enter a stopped state 526 * upon return to userland. 527 * 528 * We do not want to interrupt or cause a thread exist at 529 * this juncture because that will mess-up the state the 530 * coredump is trying to save. 531 */ 532 if (p->p_stat == SCORE && 533 (lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 534 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 535 ++p->p_nstopped; 536 } 537 538 /* 539 * PCATCH requested. 540 */ 541 if (catch) { 542 /* 543 * Early termination if PCATCH was set and a 544 * signal is pending, interlocked with the 545 * critical section. 546 * 547 * Early termination only occurs when tsleep() is 548 * entered while in a normal LSRUN state. 549 */ 550 if ((sig = CURSIG(lp)) != 0) 551 goto resume; 552 553 /* 554 * Causes ksignal to wake us up if a signal is 555 * received (interlocked with lp->lwp_token). 556 */ 557 lp->lwp_flags |= LWP_SINTR; 558 } 559 } else { 560 KKASSERT(p == NULL); 561 } 562 563 /* 564 * Make sure the current process has been untangled from 565 * the userland scheduler and initialize slptime to start 566 * counting. 567 * 568 * NOTE: td->td_wakefromcpu is pre-set by the release function 569 * for the dfly scheduler, and then adjusted by _wakeup() 570 */ 571 if (lp) { 572 p->p_usched->release_curproc(lp); 573 lp->lwp_slptime = 0; 574 } 575 576 /* 577 * If the interlocked flag is set but our cpu bit in the slpqueue 578 * is no longer set, then a wakeup was processed inbetween the 579 * tsleep_interlock() (ours or the callers), and here. This can 580 * occur under numerous circumstances including when we release the 581 * current process. 582 * 583 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s) 584 * to process incoming IPIs, thus draining incoming wakeups. 585 */ 586 if ((td->td_flags & TDF_TSLEEPQ) == 0) { 587 logtsleep2(ilockfail, ident); 588 goto resume; 589 } 590 591 /* 592 * scheduling is blocked while in a critical section. Coincide 593 * the descheduled-by-tsleep flag with the descheduling of the 594 * lwkt. 595 * 596 * The timer callout is localized on our cpu and interlocked by 597 * our critical section. 598 */ 599 lwkt_deschedule_self(td); 600 td->td_flags |= TDF_TSLEEP_DESCHEDULED; 601 td->td_wmesg = wmesg; 602 603 /* 604 * Setup the timeout, if any. The timeout is only operable while 605 * the thread is flagged descheduled. 606 */ 607 KKASSERT((td->td_flags & TDF_TIMEOUT) == 0); 608 if (timo) { 609 callout_init_mp(&thandle); 610 callout_reset(&thandle, timo, endtsleep, td); 611 } 612 613 /* 614 * Beddy bye bye. 615 */ 616 if (lp) { 617 /* 618 * Ok, we are sleeping. Place us in the SSLEEP state. 619 */ 620 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 621 622 /* 623 * tstop() sets LSSTOP, so don't fiddle with that. 624 */ 625 if (lp->lwp_stat != LSSTOP) 626 lp->lwp_stat = LSSLEEP; 627 lp->lwp_ru.ru_nvcsw++; 628 p->p_usched->uload_update(lp); 629 lwkt_switch(); 630 631 /* 632 * And when we are woken up, put us back in LSRUN. If we 633 * slept for over a second, recalculate our estcpu. 634 */ 635 lp->lwp_stat = LSRUN; 636 if (lp->lwp_slptime) { 637 p->p_usched->uload_update(lp); 638 p->p_usched->recalculate(lp); 639 } 640 lp->lwp_slptime = 0; 641 } else { 642 lwkt_switch(); 643 } 644 645 /* 646 * Make sure we haven't switched cpus while we were asleep. It's 647 * not supposed to happen. Cleanup our temporary flags. 648 */ 649 KKASSERT(gd == td->td_gd); 650 651 /* 652 * Cleanup the timeout. If the timeout has already occured thandle 653 * has already been stopped, otherwise stop thandle. If the timeout 654 * is running (the callout thread must be blocked trying to get 655 * lwp_token) then wait for us to get scheduled. 656 */ 657 if (timo) { 658 while (td->td_flags & TDF_TIMEOUT_RUNNING) { 659 /* else we won't get rescheduled! */ 660 if (lp->lwp_stat != LSSTOP) 661 lp->lwp_stat = LSSLEEP; 662 lwkt_deschedule_self(td); 663 td->td_wmesg = "tsrace"; 664 lwkt_switch(); 665 kprintf("td %p %s: timeout race\n", td, td->td_comm); 666 } 667 if (td->td_flags & TDF_TIMEOUT) { 668 td->td_flags &= ~TDF_TIMEOUT; 669 error = EWOULDBLOCK; 670 } else { 671 /* does not block when on same cpu */ 672 callout_stop(&thandle); 673 } 674 } 675 td->td_flags &= ~TDF_TSLEEP_DESCHEDULED; 676 677 /* 678 * Make sure we have been removed from the sleepq. In most 679 * cases this will have been done for us already but it is 680 * possible for a scheduling IPI to be in-flight from a 681 * previous tsleep/tsleep_interlock() or due to a straight-out 682 * call to lwkt_schedule() (in the case of an interrupt thread), 683 * causing a spurious wakeup. 684 */ 685 _tsleep_remove(td); 686 td->td_wmesg = NULL; 687 688 /* 689 * Figure out the correct error return. If interrupted by a 690 * signal we want to return EINTR or ERESTART. 691 */ 692 resume: 693 if (lp) { 694 if (catch && error == 0) { 695 if (sig != 0 || (sig = CURSIG(lp))) { 696 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 697 error = EINTR; 698 else 699 error = ERESTART; 700 } 701 } 702 703 lp->lwp_flags &= ~LWP_SINTR; 704 705 /* 706 * Unconditionally set us to LSRUN on resume. lwp_stat could 707 * be in a weird state due to the goto resume, particularly 708 * when tsleep() is called from tstop(). 709 */ 710 lp->lwp_stat = LSRUN; 711 lwkt_reltoken(&lp->lwp_token); 712 } 713 logtsleep1(tsleep_end); 714 crit_exit_quick(td); 715 return (error); 716 } 717 718 /* 719 * Interlocked spinlock sleep. An exclusively held spinlock must 720 * be passed to ssleep(). The function will atomically release the 721 * spinlock and tsleep on the ident, then reacquire the spinlock and 722 * return. 723 * 724 * This routine is fairly important along the critical path, so optimize it 725 * heavily. 726 */ 727 int 728 ssleep(const volatile void *ident, struct spinlock *spin, int flags, 729 const char *wmesg, int timo) 730 { 731 globaldata_t gd = mycpu; 732 int error; 733 734 _tsleep_interlock(gd, ident, flags); 735 spin_unlock_quick(gd, spin); 736 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 737 _spin_lock_quick(gd, spin, wmesg); 738 739 return (error); 740 } 741 742 int 743 lksleep(const volatile void *ident, struct lock *lock, int flags, 744 const char *wmesg, int timo) 745 { 746 globaldata_t gd = mycpu; 747 int error; 748 749 _tsleep_interlock(gd, ident, flags); 750 lockmgr(lock, LK_RELEASE); 751 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 752 lockmgr(lock, LK_EXCLUSIVE); 753 754 return (error); 755 } 756 757 /* 758 * Interlocked mutex sleep. An exclusively held mutex must be passed 759 * to mtxsleep(). The function will atomically release the mutex 760 * and tsleep on the ident, then reacquire the mutex and return. 761 */ 762 int 763 mtxsleep(const volatile void *ident, struct mtx *mtx, int flags, 764 const char *wmesg, int timo) 765 { 766 globaldata_t gd = mycpu; 767 int error; 768 769 _tsleep_interlock(gd, ident, flags); 770 mtx_unlock(mtx); 771 error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 772 mtx_lock_ex_quick(mtx); 773 774 return (error); 775 } 776 777 /* 778 * Interlocked serializer sleep. An exclusively held serializer must 779 * be passed to zsleep(). The function will atomically release 780 * the serializer and tsleep on the ident, then reacquire the serializer 781 * and return. 782 */ 783 int 784 zsleep(const volatile void *ident, struct lwkt_serialize *slz, int flags, 785 const char *wmesg, int timo) 786 { 787 globaldata_t gd = mycpu; 788 int ret; 789 790 ASSERT_SERIALIZED(slz); 791 792 _tsleep_interlock(gd, ident, flags); 793 lwkt_serialize_exit(slz); 794 ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo); 795 lwkt_serialize_enter(slz); 796 797 return ret; 798 } 799 800 /* 801 * Directly block on the LWKT thread by descheduling it. This 802 * is much faster then tsleep(), but the only legal way to wake 803 * us up is to directly schedule the thread. 804 * 805 * Setting TDF_SINTR will cause new signals to directly schedule us. 806 * 807 * This routine must be called while in a critical section. 808 */ 809 int 810 lwkt_sleep(const char *wmesg, int flags) 811 { 812 thread_t td = curthread; 813 int sig; 814 815 if ((flags & PCATCH) == 0 || td->td_lwp == NULL) { 816 td->td_flags |= TDF_BLOCKED; 817 td->td_wmesg = wmesg; 818 lwkt_deschedule_self(td); 819 lwkt_switch(); 820 td->td_wmesg = NULL; 821 td->td_flags &= ~TDF_BLOCKED; 822 return(0); 823 } 824 if ((sig = CURSIG(td->td_lwp)) != 0) { 825 if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig)) 826 return(EINTR); 827 else 828 return(ERESTART); 829 830 } 831 td->td_flags |= TDF_BLOCKED | TDF_SINTR; 832 td->td_wmesg = wmesg; 833 lwkt_deschedule_self(td); 834 lwkt_switch(); 835 td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR); 836 td->td_wmesg = NULL; 837 return(0); 838 } 839 840 /* 841 * Implement the timeout for tsleep. 842 * 843 * This type of callout timeout is scheduled on the same cpu the process 844 * is sleeping on. Also, at the moment, the MP lock is held. 845 */ 846 static void 847 endtsleep(void *arg) 848 { 849 thread_t td = arg; 850 struct lwp *lp; 851 852 /* 853 * We are going to have to get the lwp_token, which means we might 854 * block. This can race a tsleep getting woken up by other means 855 * so set TDF_TIMEOUT_RUNNING to force the tsleep to wait for our 856 * processing to complete (sorry tsleep!). 857 * 858 * We can safely set td_flags because td MUST be on the same cpu 859 * as we are. 860 */ 861 KKASSERT(td->td_gd == mycpu); 862 crit_enter(); 863 td->td_flags |= TDF_TIMEOUT_RUNNING | TDF_TIMEOUT; 864 865 /* 866 * This can block but TDF_TIMEOUT_RUNNING will prevent the thread 867 * from exiting the tsleep on us. The flag is interlocked by virtue 868 * of lp being on the same cpu as we are. 869 */ 870 if ((lp = td->td_lwp) != NULL) 871 lwkt_gettoken(&lp->lwp_token); 872 873 KKASSERT(td->td_flags & TDF_TSLEEP_DESCHEDULED); 874 875 if (lp) { 876 /* 877 * callout timer should normally never be set in tstop() 878 * because it passes a timeout of 0. However, there is a 879 * case during thread exit (which SSTOP's all the threads) 880 * for which tstop() must break out and can (properly) leave 881 * the thread in LSSTOP. 882 */ 883 KKASSERT(lp->lwp_stat != LSSTOP || 884 (lp->lwp_mpflags & LWP_MP_WEXIT)); 885 setrunnable(lp); 886 lwkt_reltoken(&lp->lwp_token); 887 } else { 888 _tsleep_remove(td); 889 lwkt_schedule(td); 890 } 891 KKASSERT(td->td_gd == mycpu); 892 td->td_flags &= ~TDF_TIMEOUT_RUNNING; 893 crit_exit(); 894 } 895 896 /* 897 * Make all processes sleeping on the specified identifier runnable. 898 * count may be zero or one only. 899 * 900 * The domain encodes the sleep/wakeup domain, flags, plus the originating 901 * cpu. 902 * 903 * This call may run without the MP lock held. We can only manipulate thread 904 * state on the cpu owning the thread. We CANNOT manipulate process state 905 * at all. 906 * 907 * _wakeup() can be passed to an IPI so we can't use (const volatile 908 * void *ident). 909 */ 910 static void 911 _wakeup(void *ident, int domain) 912 { 913 struct tslpque *qp; 914 struct thread *td; 915 struct thread *ntd; 916 globaldata_t gd; 917 cpumask_t mask; 918 uint32_t cid; 919 uint32_t gid; 920 int wids = 0; 921 922 crit_enter(); 923 logtsleep2(wakeup_beg, ident); 924 gd = mycpu; 925 cid = LOOKUP(ident); 926 gid = TCHASHSHIFT(cid); 927 qp = &gd->gd_tsleep_hash[gid]; 928 restart: 929 for (td = TAILQ_FIRST(&qp->queue); td != NULL; td = ntd) { 930 ntd = TAILQ_NEXT(td, td_sleepq); 931 if (td->td_wchan == ident && 932 td->td_wdomain == (domain & PDOMAIN_MASK) 933 ) { 934 KKASSERT(td->td_gd == gd); 935 _tsleep_remove(td); 936 td->td_wakefromcpu = PWAKEUP_DECODE(domain); 937 if (td->td_flags & TDF_TSLEEP_DESCHEDULED) { 938 lwkt_schedule(td); 939 if (domain & PWAKEUP_ONE) 940 goto done; 941 } 942 goto restart; 943 } 944 if (td->td_wchan == qp->ident0) 945 wids |= 1; 946 else if (td->td_wchan == qp->ident1) 947 wids |= 2; 948 else if (td->td_wchan == qp->ident2) 949 wids |= 4; 950 else if (td->td_wchan == qp->ident3) 951 wids |= 8; 952 else 953 wids |= 16; /* force ident0 to be retained (-1) */ 954 } 955 956 /* 957 * Because a bunch of cpumask array entries cover the same queue, it 958 * is possible for our bit to remain set in some of them and cause 959 * spurious wakeup IPIs later on. Make sure that the bit is cleared 960 * when a spurious IPI occurs to prevent further spurious IPIs. 961 */ 962 if (TAILQ_FIRST(&qp->queue) == NULL) { 963 ATOMIC_CPUMASK_NANDBIT(slpque_cpumasks[cid], gd->gd_cpuid); 964 qp->ident0 = NULL; 965 qp->ident1 = NULL; 966 qp->ident2 = NULL; 967 qp->ident3 = NULL; 968 } else { 969 if ((wids & 1) == 0) { 970 if ((wids & 16) == 0) 971 qp->ident0 = NULL; 972 } 973 if ((wids & 2) == 0) 974 qp->ident1 = NULL; 975 if ((wids & 4) == 0) 976 qp->ident2 = NULL; 977 if ((wids & 8) == 0) 978 qp->ident3 = NULL; 979 } 980 981 /* 982 * We finished checking the current cpu but there still may be 983 * more work to do. Either wakeup_one was requested and no matching 984 * thread was found, or a normal wakeup was requested and we have 985 * to continue checking cpus. 986 * 987 * It should be noted that this scheme is actually less expensive then 988 * the old scheme when waking up multiple threads, since we send 989 * only one IPI message per target candidate which may then schedule 990 * multiple threads. Before we could have wound up sending an IPI 991 * message for each thread on the target cpu (!= current cpu) that 992 * needed to be woken up. 993 * 994 * NOTE: Wakeups occuring on remote cpus are asynchronous. This 995 * should be ok since we are passing idents in the IPI rather 996 * then thread pointers. 997 * 998 * NOTE: We MUST mfence (or use an atomic op) prior to reading 999 * the cpumask, as another cpu may have written to it in 1000 * a fashion interlocked with whatever the caller did before 1001 * calling wakeup(). Otherwise we might miss the interaction 1002 * (kern_mutex.c can cause this problem). 1003 * 1004 * lfence is insufficient as it may allow a written state to 1005 * reorder around the cpumask load. 1006 */ 1007 if ((domain & PWAKEUP_MYCPU) == 0) { 1008 globaldata_t tgd; 1009 const volatile void *id0; 1010 int n; 1011 1012 cpu_mfence(); 1013 mask = slpque_cpumasks[cid]; 1014 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 1015 while (CPUMASK_TESTNZERO(mask)) { 1016 n = BSRCPUMASK(mask); 1017 CPUMASK_NANDBIT(mask, n); 1018 tgd = globaldata_find(n); 1019 qp = &tgd->gd_tsleep_hash[gid]; 1020 1021 /* 1022 * Both ident0 compares must from a single load 1023 * to avoid ident0 update races crossing the two 1024 * compares. 1025 */ 1026 id0 = qp->ident0; 1027 cpu_ccfence(); 1028 if (id0 == (void *)(intptr_t)-1) { 1029 lwkt_send_ipiq2(tgd, _wakeup, ident, 1030 domain | PWAKEUP_MYCPU); 1031 ++tgd->gd_cnt.v_wakeup_colls; 1032 } else if (id0 == ident || 1033 qp->ident1 == ident || 1034 qp->ident2 == ident || 1035 qp->ident3 == ident) { 1036 lwkt_send_ipiq2(tgd, _wakeup, ident, 1037 domain | PWAKEUP_MYCPU); 1038 } 1039 #if 0 1040 lwkt_send_ipiq2_mask(mask, _wakeup, ident, 1041 domain | PWAKEUP_MYCPU); 1042 #endif 1043 } 1044 } 1045 done: 1046 logtsleep1(wakeup_end); 1047 crit_exit(); 1048 } 1049 1050 /* 1051 * Wakeup all threads tsleep()ing on the specified ident, on all cpus 1052 */ 1053 void 1054 wakeup(const volatile void *ident) 1055 { 1056 globaldata_t gd = mycpu; 1057 thread_t td = gd->gd_curthread; 1058 1059 if (td && (td->td_flags & TDF_DELAYED_WAKEUP)) { 1060 /* 1061 * If we are in a delayed wakeup section, record up to two wakeups in 1062 * a per-CPU queue and issue them when we block or exit the delayed 1063 * wakeup section. 1064 */ 1065 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[0], NULL, ident)) 1066 return; 1067 if (atomic_cmpset_ptr(&gd->gd_delayed_wakeup[1], NULL, ident)) 1068 return; 1069 1070 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[1]), 1071 __DEALL(ident)); 1072 ident = atomic_swap_ptr(__DEQUALIFY(volatile void **, &gd->gd_delayed_wakeup[0]), 1073 __DEALL(ident)); 1074 } 1075 1076 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, gd->gd_cpuid)); 1077 } 1078 1079 /* 1080 * Wakeup one thread tsleep()ing on the specified ident, on any cpu. 1081 */ 1082 void 1083 wakeup_one(const volatile void *ident) 1084 { 1085 /* XXX potentially round-robin the first responding cpu */ 1086 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1087 PWAKEUP_ONE); 1088 } 1089 1090 /* 1091 * Wakeup threads tsleep()ing on the specified ident on the current cpu 1092 * only. 1093 */ 1094 void 1095 wakeup_mycpu(const volatile void *ident) 1096 { 1097 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1098 PWAKEUP_MYCPU); 1099 } 1100 1101 /* 1102 * Wakeup one thread tsleep()ing on the specified ident on the current cpu 1103 * only. 1104 */ 1105 void 1106 wakeup_mycpu_one(const volatile void *ident) 1107 { 1108 /* XXX potentially round-robin the first responding cpu */ 1109 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | 1110 PWAKEUP_MYCPU | PWAKEUP_ONE); 1111 } 1112 1113 /* 1114 * Wakeup all thread tsleep()ing on the specified ident on the specified cpu 1115 * only. 1116 */ 1117 void 1118 wakeup_oncpu(globaldata_t gd, const volatile void *ident) 1119 { 1120 globaldata_t mygd = mycpu; 1121 if (gd == mycpu) { 1122 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1123 PWAKEUP_MYCPU); 1124 } else { 1125 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1126 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1127 PWAKEUP_MYCPU); 1128 } 1129 } 1130 1131 /* 1132 * Wakeup one thread tsleep()ing on the specified ident on the specified cpu 1133 * only. 1134 */ 1135 void 1136 wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) 1137 { 1138 globaldata_t mygd = mycpu; 1139 if (gd == mygd) { 1140 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1141 PWAKEUP_MYCPU | PWAKEUP_ONE); 1142 } else { 1143 lwkt_send_ipiq2(gd, _wakeup, __DEALL(ident), 1144 PWAKEUP_ENCODE(0, mygd->gd_cpuid) | 1145 PWAKEUP_MYCPU | PWAKEUP_ONE); 1146 } 1147 } 1148 1149 /* 1150 * Wakeup all threads waiting on the specified ident that slept using 1151 * the specified domain, on all cpus. 1152 */ 1153 void 1154 wakeup_domain(const volatile void *ident, int domain) 1155 { 1156 _wakeup(__DEALL(ident), PWAKEUP_ENCODE(domain, mycpu->gd_cpuid)); 1157 } 1158 1159 /* 1160 * Wakeup one thread waiting on the specified ident that slept using 1161 * the specified domain, on any cpu. 1162 */ 1163 void 1164 wakeup_domain_one(const volatile void *ident, int domain) 1165 { 1166 /* XXX potentially round-robin the first responding cpu */ 1167 _wakeup(__DEALL(ident), 1168 PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE); 1169 } 1170 1171 void 1172 wakeup_start_delayed(void) 1173 { 1174 globaldata_t gd = mycpu; 1175 1176 crit_enter(); 1177 gd->gd_curthread->td_flags |= TDF_DELAYED_WAKEUP; 1178 crit_exit(); 1179 } 1180 1181 void 1182 wakeup_end_delayed(void) 1183 { 1184 globaldata_t gd = mycpu; 1185 1186 if (gd->gd_curthread->td_flags & TDF_DELAYED_WAKEUP) { 1187 crit_enter(); 1188 gd->gd_curthread->td_flags &= ~TDF_DELAYED_WAKEUP; 1189 if (gd->gd_delayed_wakeup[0] || gd->gd_delayed_wakeup[1]) { 1190 if (gd->gd_delayed_wakeup[0]) { 1191 wakeup(gd->gd_delayed_wakeup[0]); 1192 gd->gd_delayed_wakeup[0] = NULL; 1193 } 1194 if (gd->gd_delayed_wakeup[1]) { 1195 wakeup(gd->gd_delayed_wakeup[1]); 1196 gd->gd_delayed_wakeup[1] = NULL; 1197 } 1198 } 1199 crit_exit(); 1200 } 1201 } 1202 1203 /* 1204 * setrunnable() 1205 * 1206 * Make a process runnable. lp->lwp_token must be held on call and this 1207 * function must be called from the cpu owning lp. 1208 * 1209 * This only has an effect if we are in LSSTOP or LSSLEEP. 1210 */ 1211 void 1212 setrunnable(struct lwp *lp) 1213 { 1214 thread_t td = lp->lwp_thread; 1215 1216 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token); 1217 KKASSERT(td->td_gd == mycpu); 1218 crit_enter(); 1219 if (lp->lwp_stat == LSSTOP) 1220 lp->lwp_stat = LSSLEEP; 1221 if (lp->lwp_stat == LSSLEEP) { 1222 _tsleep_remove(td); 1223 lwkt_schedule(td); 1224 } else if (td->td_flags & TDF_SINTR) { 1225 lwkt_schedule(td); 1226 } 1227 crit_exit(); 1228 } 1229 1230 /* 1231 * The process is stopped due to some condition, usually because p_stat is 1232 * set to SSTOP, but also possibly due to being traced. 1233 * 1234 * Caller must hold p->p_token 1235 * 1236 * NOTE! If the caller sets SSTOP, the caller must also clear P_WAITED 1237 * because the parent may check the child's status before the child actually 1238 * gets to this routine. 1239 * 1240 * This routine is called with the current lwp only, typically just 1241 * before returning to userland if the process state is detected as 1242 * possibly being in a stopped state. 1243 */ 1244 void 1245 tstop(void) 1246 { 1247 struct lwp *lp = curthread->td_lwp; 1248 struct proc *p = lp->lwp_proc; 1249 struct proc *q; 1250 1251 lwkt_gettoken(&lp->lwp_token); 1252 crit_enter(); 1253 1254 /* 1255 * If LWP_MP_WSTOP is set, we were sleeping 1256 * while our process was stopped. At this point 1257 * we were already counted as stopped. 1258 */ 1259 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1260 /* 1261 * If we're the last thread to stop, signal 1262 * our parent. 1263 */ 1264 p->p_nstopped++; 1265 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1266 wakeup(&p->p_nstopped); 1267 if (p->p_nstopped == p->p_nthreads) { 1268 /* 1269 * Token required to interlock kern_wait() 1270 */ 1271 q = p->p_pptr; 1272 PHOLD(q); 1273 lwkt_gettoken(&q->p_token); 1274 p->p_flags &= ~P_WAITED; 1275 wakeup(p->p_pptr); 1276 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1277 ksignal(q, SIGCHLD); 1278 lwkt_reltoken(&q->p_token); 1279 PRELE(q); 1280 } 1281 } 1282 1283 /* 1284 * Wait here while in a stopped state, interlocked with lwp_token. 1285 * We must break-out if the whole process is trying to exit. 1286 */ 1287 while (STOPLWP(p, lp)) { 1288 lp->lwp_stat = LSSTOP; 1289 tsleep(p, 0, "stop", 0); 1290 } 1291 p->p_nstopped--; 1292 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1293 crit_exit(); 1294 lwkt_reltoken(&lp->lwp_token); 1295 } 1296 1297 /* 1298 * Compute a tenex style load average of a quantity on 1299 * 1, 5 and 15 minute intervals. This is a pcpu callout. 1300 * 1301 * We segment the lwp scan on a pcpu basis. This does NOT 1302 * mean the associated lwps are on this cpu, it is done 1303 * just to break the work up. 1304 * 1305 * The callout on cpu0 rolls up the stats from the other 1306 * cpus. 1307 */ 1308 static int loadav_count_runnable(struct lwp *p, void *data); 1309 1310 static void 1311 loadav(void *arg) 1312 { 1313 globaldata_t gd = mycpu; 1314 struct loadavg *avg; 1315 int i, nrun; 1316 1317 nrun = 0; 1318 alllwp_scan(loadav_count_runnable, &nrun, 1); 1319 gd->gd_loadav_nrunnable = nrun; 1320 if (gd->gd_cpuid == 0) { 1321 avg = &averunnable; 1322 nrun = 0; 1323 for (i = 0; i < ncpus; ++i) 1324 nrun += globaldata_find(i)->gd_loadav_nrunnable; 1325 for (i = 0; i < 3; i++) { 1326 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1327 (long)nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1328 } 1329 } 1330 1331 /* 1332 * Schedule the next update to occur after 5 seconds, but add a 1333 * random variation to avoid synchronisation with processes that 1334 * run at regular intervals. 1335 */ 1336 callout_reset(&gd->gd_loadav_callout, 1337 hz * 4 + (int)(krandom() % (hz * 2 + 1)), 1338 loadav, NULL); 1339 } 1340 1341 static int 1342 loadav_count_runnable(struct lwp *lp, void *data) 1343 { 1344 int *nrunp = data; 1345 thread_t td; 1346 1347 switch (lp->lwp_stat) { 1348 case LSRUN: 1349 if ((td = lp->lwp_thread) == NULL) 1350 break; 1351 if (td->td_flags & TDF_BLOCKED) 1352 break; 1353 ++*nrunp; 1354 break; 1355 default: 1356 break; 1357 } 1358 lwkt_yield(); 1359 return(0); 1360 } 1361 1362 /* 1363 * Regular data collection 1364 */ 1365 static uint64_t 1366 collect_load_callback(int n) 1367 { 1368 int fscale = averunnable.fscale; 1369 1370 return ((averunnable.ldavg[0] * 100 + (fscale >> 1)) / fscale); 1371 } 1372 1373 static void 1374 sched_setup(void *dummy __unused) 1375 { 1376 globaldata_t save_gd = mycpu; 1377 globaldata_t gd; 1378 int n; 1379 1380 kcollect_register(KCOLLECT_LOAD, "load", collect_load_callback, 1381 KCOLLECT_SCALE(KCOLLECT_LOAD_FORMAT, 0)); 1382 1383 /* 1384 * Kick off timeout driven events by calling first time. We 1385 * split the work across available cpus to help scale it, 1386 * it can eat a lot of cpu when there are a lot of processes 1387 * on the system. 1388 */ 1389 for (n = 0; n < ncpus; ++n) { 1390 gd = globaldata_find(n); 1391 lwkt_setcpu_self(gd); 1392 callout_init_mp(&gd->gd_loadav_callout); 1393 callout_init_mp(&gd->gd_schedcpu_callout); 1394 schedcpu(NULL); 1395 loadav(NULL); 1396 } 1397 lwkt_setcpu_self(save_gd); 1398 } 1399 1400 /* 1401 * Extremely early initialization, dummy-up the tables so we don't have 1402 * to conditionalize for NULL in _wakeup() and tsleep_interlock(). Even 1403 * though the system isn't blocking this early, these functions still 1404 * try to access the hash table. 1405 * 1406 * This setup will be overridden once sched_dyninit() -> sleep_gdinit() 1407 * is called. 1408 */ 1409 void 1410 sleep_early_gdinit(globaldata_t gd) 1411 { 1412 static struct tslpque dummy_slpque; 1413 static cpumask_t dummy_cpumasks; 1414 1415 slpque_tablesize = 1; 1416 gd->gd_tsleep_hash = &dummy_slpque; 1417 slpque_cpumasks = &dummy_cpumasks; 1418 TAILQ_INIT(&dummy_slpque.queue); 1419 } 1420 1421 /* 1422 * PCPU initialization. Called after KMALLOC is operational, by 1423 * sched_dyninit() for cpu 0, and by mi_gdinit() for other cpus later. 1424 * 1425 * WARNING! The pcpu hash table is smaller than the global cpumask 1426 * hash table, which can save us a lot of memory when maxproc 1427 * is set high. 1428 */ 1429 void 1430 sleep_gdinit(globaldata_t gd) 1431 { 1432 struct thread *td; 1433 size_t hash_size; 1434 uint32_t n; 1435 uint32_t i; 1436 1437 /* 1438 * This shouldn't happen, that is there shouldn't be any threads 1439 * waiting on the dummy tsleep queue this early in the boot. 1440 */ 1441 if (gd->gd_cpuid == 0) { 1442 struct tslpque *qp = &gd->gd_tsleep_hash[0]; 1443 TAILQ_FOREACH(td, &qp->queue, td_sleepq) { 1444 kprintf("SLEEP_GDINIT SWITCH %s\n", td->td_comm); 1445 } 1446 } 1447 1448 /* 1449 * Note that we have to allocate one extra slot because we are 1450 * shifting a modulo value. TCHASHSHIFT(slpque_tablesize - 1) can 1451 * return the same value as TCHASHSHIFT(slpque_tablesize). 1452 */ 1453 n = TCHASHSHIFT(slpque_tablesize) + 1; 1454 1455 hash_size = sizeof(struct tslpque) * n; 1456 gd->gd_tsleep_hash = (void *)kmem_alloc3(&kernel_map, hash_size, 1457 VM_SUBSYS_GD, 1458 KM_CPU(gd->gd_cpuid)); 1459 memset(gd->gd_tsleep_hash, 0, hash_size); 1460 for (i = 0; i < n; ++i) 1461 TAILQ_INIT(&gd->gd_tsleep_hash[i].queue); 1462 } 1463 1464 /* 1465 * Dynamic initialization after the memory system is operational. 1466 */ 1467 static void 1468 sched_dyninit(void *dummy __unused) 1469 { 1470 int tblsize; 1471 int tblsize2; 1472 int n; 1473 1474 /* 1475 * Calculate table size for slpque hash. We want a prime number 1476 * large enough to avoid overloading slpque_cpumasks when the 1477 * system has a large number of sleeping processes, which will 1478 * spam IPIs on wakeup(). 1479 * 1480 * While it is true this is really a per-lwp factor, generally 1481 * speaking the maxproc limit is a good metric to go by. 1482 */ 1483 for (tblsize = maxproc | 1; ; tblsize += 2) { 1484 if (tblsize % 3 == 0) 1485 continue; 1486 if (tblsize % 5 == 0) 1487 continue; 1488 tblsize2 = (tblsize / 2) | 1; 1489 for (n = 7; n < tblsize2; n += 2) { 1490 if (tblsize % n == 0) 1491 break; 1492 } 1493 if (n == tblsize2) 1494 break; 1495 } 1496 1497 /* 1498 * PIDs are currently limited to 6 digits. Cap the table size 1499 * at double this. 1500 */ 1501 if (tblsize > 2000003) 1502 tblsize = 2000003; 1503 1504 slpque_tablesize = tblsize; 1505 slpque_cpumasks = kmalloc(sizeof(*slpque_cpumasks) * slpque_tablesize, 1506 M_TSLEEP, M_WAITOK | M_ZERO); 1507 sleep_gdinit(mycpu); 1508 } 1509