1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.30 2004/03/20 19:16:24 dillon Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/kernel.h> 49 #include <sys/signalvar.h> 50 #include <sys/resourcevar.h> 51 #include <sys/vmmeter.h> 52 #include <sys/sysctl.h> 53 #include <sys/thread2.h> 54 #ifdef KTRACE 55 #include <sys/uio.h> 56 #include <sys/ktrace.h> 57 #endif 58 #include <sys/xwait.h> 59 60 #include <machine/cpu.h> 61 #include <machine/ipl.h> 62 #include <machine/smp.h> 63 64 static void sched_setup (void *dummy); 65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 66 67 int hogticks; 68 int lbolt; 69 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 70 int ncpus; 71 int ncpus2, ncpus2_shift, ncpus2_mask; 72 73 static struct callout loadav_callout; 74 75 struct loadavg averunnable = 76 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 77 /* 78 * Constants for averages over 1, 5, and 15 minutes 79 * when sampling at 5 second intervals. 80 */ 81 static fixpt_t cexp[3] = { 82 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 83 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 84 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 85 }; 86 87 static void endtsleep (void *); 88 static void loadav (void *arg); 89 static void roundrobin (void *arg); 90 static void schedcpu (void *arg); 91 static void updatepri (struct proc *p); 92 static void crit_panicints(void); 93 94 static int 95 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 96 { 97 int error, new_val; 98 99 new_val = sched_quantum * tick; 100 error = sysctl_handle_int(oidp, &new_val, 0, req); 101 if (error != 0 || req->newptr == NULL) 102 return (error); 103 if (new_val < tick) 104 return (EINVAL); 105 sched_quantum = new_val / tick; 106 hogticks = 2 * sched_quantum; 107 return (0); 108 } 109 110 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 111 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); 112 113 int 114 roundrobin_interval(void) 115 { 116 return (sched_quantum); 117 } 118 119 /* 120 * Force switch among equal priority processes every 100ms. 121 * 122 * WARNING! The MP lock is not held on ipi message remotes. 123 */ 124 #ifdef SMP 125 126 static void 127 roundrobin_remote(void *arg) 128 { 129 struct proc *p = lwkt_preempted_proc(); 130 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 131 need_resched(); 132 } 133 134 #endif 135 136 static void 137 roundrobin(void *arg) 138 { 139 struct proc *p = lwkt_preempted_proc(); 140 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 141 need_resched(); 142 #ifdef SMP 143 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL); 144 #endif 145 timeout(roundrobin, NULL, sched_quantum); 146 } 147 148 #ifdef SMP 149 150 void 151 resched_cpus(u_int32_t mask) 152 { 153 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL); 154 } 155 156 #endif 157 158 /* 159 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is 160 * incremented at a rate of ESTCPUFREQ per second, but this is 161 * divided up across all cpu bound processes running in the system so an 162 * individual process will get less under load. 163 * 164 * We want to decay estcpu by 18% per second, but we have to scale to the 165 * load to avoid overpowering the estcpu aggregation. To stabilize the 166 * equation under low loads we make everything relative to a load average 167 * of 1.0. 168 * 169 * estcpu -= estcpu * 0.18 / loadav base equation 170 * estcpu -= (estcpu + ESTCPUFREQ) * 0.18 / (loadav + 1) supplemented 171 * 172 * Note: 0.18 = 100/555 173 */ 174 175 #define decay_cpu(loadav,estcpu) \ 176 (((estcpu + ESTCPUFREQ) * (100 * FSCALE / 555)) / ((loadav) + FSCALE)) 177 178 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 179 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 180 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 181 182 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 183 static int fscale __unused = FSCALE; 184 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 185 186 /* 187 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 188 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 189 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 190 * 191 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 192 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 193 * 194 * If you don't want to bother with the faster/more-accurate formula, you 195 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 196 * (more general) method of calculating the %age of CPU used by a process. 197 */ 198 #define CCPU_SHIFT 11 199 200 /* 201 * Recompute process priorities, every hz ticks. 202 */ 203 /* ARGSUSED */ 204 static void 205 schedcpu(void *arg) 206 { 207 fixpt_t loadfac = averunnable.ldavg[0]; 208 struct proc *p; 209 int s; 210 unsigned int ndecay; 211 212 FOREACH_PROC_IN_SYSTEM(p) { 213 /* 214 * Increment time in/out of memory and sleep time 215 * (if sleeping). We ignore overflow; with 16-bit int's 216 * (remember them?) overflow takes 45 days. 217 */ 218 p->p_swtime++; 219 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 220 p->p_slptime++; 221 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 222 /* 223 * If the process has slept the entire second, 224 * stop recalculating its priority until it wakes up. 225 */ 226 if (p->p_slptime > 1) 227 continue; 228 s = splhigh(); /* prevent state changes and protect run queue */ 229 /* 230 * p_pctcpu is only for ps. 231 */ 232 #if (FSHIFT >= CCPU_SHIFT) 233 p->p_pctcpu += (ESTCPUFREQ == 100)? 234 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 235 100 * (((fixpt_t) p->p_cpticks) 236 << (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ; 237 #else 238 p->p_pctcpu += ((FSCALE - ccpu) * 239 (p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT; 240 #endif 241 p->p_cpticks = 0; 242 ndecay = decay_cpu(loadfac, p->p_estcpu); 243 if (p->p_estcpu > ndecay) 244 p->p_estcpu -= ndecay; 245 else 246 p->p_estcpu = 0; 247 resetpriority(p); 248 splx(s); 249 } 250 wakeup((caddr_t)&lbolt); 251 timeout(schedcpu, (void *)0, hz); 252 } 253 254 /* 255 * Recalculate the priority of a process after it has slept for a while. 256 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 257 * least six times the loadfactor will decay p_estcpu to zero. 258 */ 259 static void 260 updatepri(struct proc *p) 261 { 262 unsigned int ndecay; 263 264 ndecay = decay_cpu(averunnable.ldavg[0], p->p_estcpu) * p->p_slptime; 265 if (p->p_estcpu > ndecay) 266 p->p_estcpu -= ndecay; 267 else 268 p->p_estcpu = 0; 269 resetpriority(p); 270 } 271 272 /* 273 * We're only looking at 7 bits of the address; everything is 274 * aligned to 4, lots of things are aligned to greater powers 275 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 276 */ 277 #define TABLESIZE 128 278 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE]; 279 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 280 281 /* 282 * During autoconfiguration or after a panic, a sleep will simply 283 * lower the priority briefly to allow interrupts, then return. 284 * The priority to be used (safepri) is machine-dependent, thus this 285 * value is initialized and maintained in the machine-dependent layers. 286 * This priority will typically be 0, or the lowest priority 287 * that is safe for use on the interrupt stack; it can be made 288 * higher to block network software interrupts after panics. 289 */ 290 int safepri; 291 292 void 293 sleepinit(void) 294 { 295 int i; 296 297 sched_quantum = hz/10; 298 hogticks = 2 * sched_quantum; 299 for (i = 0; i < TABLESIZE; i++) 300 TAILQ_INIT(&slpque[i]); 301 } 302 303 /* 304 * General sleep call. Suspends the current process until a wakeup is 305 * performed on the specified identifier. The process will then be made 306 * runnable with the specified priority. Sleeps at most timo/hz seconds 307 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 308 * before and after sleeping, else signals are not checked. Returns 0 if 309 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 310 * signal needs to be delivered, ERESTART is returned if the current system 311 * call should be restarted if possible, and EINTR is returned if the system 312 * call should be interrupted by the signal (return EINTR). 313 * 314 * If the process has P_CURPROC set mi_switch() will not re-queue it to 315 * the userland scheduler queues because we are in a SSLEEP state. If 316 * we are not the current process then we have to remove ourselves from 317 * the scheduler queues. 318 * 319 * YYY priority now unused 320 */ 321 int 322 tsleep(void *ident, int flags, const char *wmesg, int timo) 323 { 324 struct thread *td = curthread; 325 struct proc *p = td->td_proc; /* may be NULL */ 326 int s, sig = 0, catch = flags & PCATCH; 327 int id = LOOKUP(ident); 328 struct callout_handle thandle; 329 330 /* 331 * NOTE: removed KTRPOINT, it could cause races due to blocking 332 * even in stable. Just scrap it for now. 333 */ 334 if (cold || panicstr) { 335 /* 336 * After a panic, or during autoconfiguration, 337 * just give interrupts a chance, then just return; 338 * don't run any other procs or panic below, 339 * in case this is the idle process and already asleep. 340 */ 341 crit_panicints(); 342 return (0); 343 } 344 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */ 345 s = splhigh(); 346 KASSERT(ident != NULL, ("tsleep: no ident")); 347 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d", 348 ident, wmesg, p->p_stat)); 349 350 crit_enter(); 351 td->td_wchan = ident; 352 td->td_wmesg = wmesg; 353 if (p) 354 p->p_slptime = 0; 355 lwkt_deschedule_self(); 356 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq); 357 if (timo) 358 thandle = timeout(endtsleep, (void *)td, timo); 359 /* 360 * We put ourselves on the sleep queue and start our timeout 361 * before calling CURSIG, as we could stop there, and a wakeup 362 * or a SIGCONT (or both) could occur while we were stopped. 363 * A SIGCONT would cause us to be marked as SSLEEP 364 * without resuming us, thus we must be ready for sleep 365 * when CURSIG is called. If the wakeup happens while we're 366 * stopped, td->td_wchan will be 0 upon return from CURSIG. 367 */ 368 if (p) { 369 if (catch) { 370 p->p_flag |= P_SINTR; 371 if ((sig = CURSIG(p))) { 372 if (td->td_wchan) { 373 unsleep(td); 374 lwkt_schedule_self(); 375 } 376 p->p_stat = SRUN; 377 goto resume; 378 } 379 if (td->td_wchan == NULL) { 380 catch = 0; 381 goto resume; 382 } 383 } else { 384 sig = 0; 385 } 386 387 /* 388 * If we are not the current process we have to remove ourself 389 * from the run queue. 390 */ 391 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat)); 392 /* 393 * If this is the current 'user' process schedule another one. 394 */ 395 clrrunnable(p, SSLEEP); 396 p->p_stats->p_ru.ru_nvcsw++; 397 KKASSERT(td->td_release || (p->p_flag & P_CURPROC) == 0); 398 mi_switch(); 399 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun")); 400 } else { 401 lwkt_switch(); 402 } 403 resume: 404 crit_exit(); 405 if (p) 406 p->p_flag &= ~P_SINTR; 407 splx(s); 408 if (td->td_flags & TDF_TIMEOUT) { 409 td->td_flags &= ~TDF_TIMEOUT; 410 if (sig == 0) 411 return (EWOULDBLOCK); 412 } else if (timo) { 413 untimeout(endtsleep, (void *)td, thandle); 414 } else if (td->td_wmesg) { 415 /* 416 * This can happen if a thread is woken up directly. Clear 417 * wmesg to avoid debugging confusion. 418 */ 419 td->td_wmesg = NULL; 420 } 421 /* inline of iscaught() */ 422 if (p) { 423 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 424 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 425 return (EINTR); 426 return (ERESTART); 427 } 428 } 429 return (0); 430 } 431 432 /* 433 * Implement the timeout for tsleep. We interlock against 434 * wchan when setting TDF_TIMEOUT. For processes we remove 435 * the sleep if the process is stopped rather then sleeping, 436 * so it remains stopped. 437 */ 438 static void 439 endtsleep(void *arg) 440 { 441 thread_t td = arg; 442 struct proc *p; 443 int s; 444 445 s = splhigh(); 446 if (td->td_wchan) { 447 td->td_flags |= TDF_TIMEOUT; 448 if ((p = td->td_proc) != NULL) { 449 if (p->p_stat == SSLEEP) 450 setrunnable(p); 451 else 452 unsleep(td); 453 } else { 454 unsleep(td); 455 lwkt_schedule(td); 456 } 457 } 458 splx(s); 459 } 460 461 /* 462 * Remove a process from its wait queue 463 */ 464 void 465 unsleep(struct thread *td) 466 { 467 int s; 468 469 s = splhigh(); 470 if (td->td_wchan) { 471 #if 0 472 if (p->p_flag & P_XSLEEP) { 473 struct xwait *w = p->p_wchan; 474 TAILQ_REMOVE(&w->waitq, p, p_procq); 475 p->p_flag &= ~P_XSLEEP; 476 } else 477 #endif 478 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq); 479 td->td_wchan = NULL; 480 } 481 splx(s); 482 } 483 484 #if 0 485 /* 486 * Make all processes sleeping on the explicit lock structure runnable. 487 */ 488 void 489 xwakeup(struct xwait *w) 490 { 491 struct proc *p; 492 int s; 493 494 s = splhigh(); 495 ++w->gen; 496 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) { 497 TAILQ_REMOVE(&w->waitq, p, p_procq); 498 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP), 499 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP)); 500 p->p_wchan = NULL; 501 p->p_flag &= ~P_XSLEEP; 502 if (p->p_stat == SSLEEP) { 503 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 504 if (p->p_slptime > 1) 505 updatepri(p); 506 p->p_slptime = 0; 507 p->p_stat = SRUN; 508 if (p->p_flag & P_INMEM) { 509 setrunqueue(p); 510 } else { 511 p->p_flag |= P_SWAPINREQ; 512 wakeup((caddr_t)&proc0); 513 } 514 } 515 } 516 splx(s); 517 } 518 #endif 519 520 /* 521 * Make all processes sleeping on the specified identifier runnable. 522 */ 523 static void 524 _wakeup(void *ident, int count) 525 { 526 struct slpquehead *qp; 527 struct thread *td; 528 struct thread *ntd; 529 struct proc *p; 530 int s; 531 int id = LOOKUP(ident); 532 533 s = splhigh(); 534 qp = &slpque[id]; 535 restart: 536 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 537 ntd = TAILQ_NEXT(td, td_threadq); 538 if (td->td_wchan == ident) { 539 TAILQ_REMOVE(qp, td, td_threadq); 540 td->td_wchan = NULL; 541 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) { 542 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 543 if (p->p_slptime > 1) 544 updatepri(p); 545 p->p_slptime = 0; 546 p->p_stat = SRUN; 547 if (p->p_flag & P_INMEM) { 548 setrunqueue(p); 549 } else { 550 p->p_flag |= P_SWAPINREQ; 551 wakeup((caddr_t)&proc0); 552 } 553 /* END INLINE EXPANSION */ 554 } else if (p == NULL) { 555 lwkt_schedule(td); 556 } 557 if (--count == 0) 558 break; 559 goto restart; 560 } 561 } 562 splx(s); 563 } 564 565 void 566 wakeup(void *ident) 567 { 568 _wakeup(ident, 0); 569 } 570 571 void 572 wakeup_one(void *ident) 573 { 574 _wakeup(ident, 1); 575 } 576 577 /* 578 * The machine independent parts of mi_switch(). 579 * Must be called at splstatclock() or higher. 580 */ 581 void 582 mi_switch() 583 { 584 struct thread *td = curthread; 585 struct proc *p = td->td_proc; /* XXX */ 586 struct rlimit *rlim; 587 int x; 588 u_int64_t ttime; 589 590 /* 591 * XXX this spl is almost unnecessary. It is partly to allow for 592 * sloppy callers that don't do it (issignal() via CURSIG() is the 593 * main offender). It is partly to work around a bug in the i386 594 * cpu_switch() (the ipl is not preserved). We ran for years 595 * without it. I think there was only a interrupt latency problem. 596 * The main caller, tsleep(), does an splx() a couple of instructions 597 * after calling here. The buggy caller, issignal(), usually calls 598 * here at spl0() and sometimes returns at splhigh(). The process 599 * then runs for a little too long at splhigh(). The ipl gets fixed 600 * when the process returns to user mode (or earlier). 601 * 602 * It would probably be better to always call here at spl0(). Callers 603 * are prepared to give up control to another process, so they must 604 * be prepared to be interrupted. The clock stuff here may not 605 * actually need splstatclock(). 606 */ 607 x = splstatclock(); 608 clear_resched(); 609 610 /* 611 * Check if the process exceeds its cpu resource allocation. 612 * If over max, kill it. Time spent in interrupts is not 613 * included. YYY 64 bit match is expensive. Ick. 614 */ 615 ttime = td->td_sticks + td->td_uticks; 616 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY && 617 ttime > p->p_limit->p_cpulimit) { 618 rlim = &p->p_rlimit[RLIMIT_CPU]; 619 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) { 620 killproc(p, "exceeded maximum CPU limit"); 621 } else { 622 psignal(p, SIGXCPU); 623 if (rlim->rlim_cur < rlim->rlim_max) { 624 /* XXX: we should make a private copy */ 625 rlim->rlim_cur += 5; 626 } 627 } 628 } 629 630 /* 631 * Pick a new current process and record its start time. If we 632 * are in a SSTOPped state we deschedule ourselves. YYY this needs 633 * to be cleaned up, remember that LWKTs stay on their run queue 634 * which works differently then the user scheduler which removes 635 * the process from the runq when it runs it. 636 */ 637 mycpu->gd_cnt.v_swtch++; 638 if (p->p_stat == SSTOP) 639 lwkt_deschedule_self(); 640 lwkt_switch(); 641 642 splx(x); 643 } 644 645 /* 646 * Change process state to be runnable, 647 * placing it on the run queue if it is in memory, 648 * and awakening the swapper if it isn't in memory. 649 */ 650 void 651 setrunnable(struct proc *p) 652 { 653 int s; 654 655 s = splhigh(); 656 switch (p->p_stat) { 657 case 0: 658 case SRUN: 659 case SZOMB: 660 default: 661 panic("setrunnable"); 662 case SSTOP: 663 case SSLEEP: 664 unsleep(p->p_thread); /* e.g. when sending signals */ 665 break; 666 667 case SIDL: 668 break; 669 } 670 p->p_stat = SRUN; 671 if (p->p_flag & P_INMEM) 672 setrunqueue(p); 673 splx(s); 674 if (p->p_slptime > 1) 675 updatepri(p); 676 p->p_slptime = 0; 677 if ((p->p_flag & P_INMEM) == 0) { 678 p->p_flag |= P_SWAPINREQ; 679 wakeup((caddr_t)&proc0); 680 } 681 } 682 683 /* 684 * Change the process state to NOT be runnable, removing it from the run 685 * queue. If P_CURPROC is not set and we are in SRUN the process is on the 686 * run queue (If P_INMEM is not set then it isn't because it is swapped). 687 */ 688 void 689 clrrunnable(struct proc *p, int stat) 690 { 691 int s; 692 693 s = splhigh(); 694 switch(p->p_stat) { 695 case SRUN: 696 if (p->p_flag & P_ONRUNQ) 697 remrunqueue(p); 698 break; 699 default: 700 break; 701 } 702 p->p_stat = stat; 703 splx(s); 704 } 705 706 /* 707 * Compute the priority of a process when running in user mode. 708 * Arrange to reschedule if the resulting priority is better 709 * than that of the current process. 710 */ 711 void 712 resetpriority(struct proc *p) 713 { 714 unsigned int newpriority; 715 int opq; 716 int npq; 717 718 /* 719 * Set p_priority for general process comparisons 720 */ 721 switch(p->p_rtprio.type) { 722 case RTP_PRIO_REALTIME: 723 p->p_priority = PRIBASE_REALTIME + p->p_rtprio.prio; 724 return; 725 case RTP_PRIO_NORMAL: 726 break; 727 case RTP_PRIO_IDLE: 728 p->p_priority = PRIBASE_IDLE + p->p_rtprio.prio; 729 return; 730 case RTP_PRIO_THREAD: 731 p->p_priority = PRIBASE_THREAD + p->p_rtprio.prio; 732 return; 733 } 734 735 /* 736 * NORMAL priorities fall through. These are based on niceness 737 * and cpu use. 738 */ 739 newpriority = NICE_ADJUST(p->p_nice - PRIO_MIN) + 740 p->p_estcpu / ESTCPURAMP; 741 newpriority = min(newpriority, MAXPRI); 742 npq = newpriority / PPQ; 743 crit_enter(); 744 opq = (p->p_priority & PRIMASK) / PPQ; 745 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) { 746 /* 747 * We have to move the process to another queue 748 */ 749 remrunqueue(p); 750 p->p_priority = PRIBASE_NORMAL + newpriority; 751 setrunqueue(p); 752 } else { 753 /* 754 * We can just adjust the priority and it will be picked 755 * up later. 756 */ 757 KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0); 758 p->p_priority = PRIBASE_NORMAL + newpriority; 759 } 760 crit_exit(); 761 } 762 763 /* 764 * Compute a tenex style load average of a quantity on 765 * 1, 5 and 15 minute intervals. 766 */ 767 static void 768 loadav(void *arg) 769 { 770 int i, nrun; 771 struct loadavg *avg; 772 struct proc *p; 773 774 avg = &averunnable; 775 nrun = 0; 776 FOREACH_PROC_IN_SYSTEM(p) { 777 switch (p->p_stat) { 778 case SRUN: 779 case SIDL: 780 nrun++; 781 } 782 } 783 for (i = 0; i < 3; i++) 784 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 785 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 786 787 /* 788 * Schedule the next update to occur after 5 seconds, but add a 789 * random variation to avoid synchronisation with processes that 790 * run at regular intervals. 791 */ 792 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)), 793 loadav, NULL); 794 } 795 796 /* ARGSUSED */ 797 static void 798 sched_setup(void *dummy) 799 { 800 801 callout_init(&loadav_callout); 802 803 /* Kick off timeout driven events by calling first time. */ 804 roundrobin(NULL); 805 schedcpu(NULL); 806 loadav(NULL); 807 } 808 809 /* 810 * We adjust the priority of the current process. The priority of 811 * a process gets worse as it accumulates CPU time. The cpu usage 812 * estimator (p_estcpu) is increased here. resetpriority() will 813 * compute a different priority each time p_estcpu increases by 814 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached). 815 * 816 * The cpu usage estimator ramps up quite quickly when the process is 817 * running (linearly), and decays away exponentially, at a rate which 818 * is proportionally slower when the system is busy. The basic principle 819 * is that the system will 90% forget that the process used a lot of CPU 820 * time in 5 * loadav seconds. This causes the system to favor processes 821 * which haven't run much recently, and to round-robin among other processes. 822 * 823 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD 824 * and we cannot block. 825 */ 826 void 827 schedulerclock(void *dummy) 828 { 829 struct thread *td; 830 struct proc *p; 831 832 td = curthread; 833 if ((p = td->td_proc) != NULL) { 834 p->p_cpticks++; 835 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1); 836 if ((p->p_estcpu % PPQ) == 0 && try_mplock()) { 837 resetpriority(p); 838 rel_mplock(); 839 } 840 } 841 } 842 843 static 844 void 845 crit_panicints(void) 846 { 847 int s; 848 int cpri; 849 850 s = splhigh(); 851 cpri = crit_panic_save(); 852 splx(safepri); 853 crit_panic_restore(cpri); 854 splx(s); 855 } 856 857