1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.31 2004/03/30 19:14:11 dillon Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/kernel.h> 49 #include <sys/signalvar.h> 50 #include <sys/resourcevar.h> 51 #include <sys/vmmeter.h> 52 #include <sys/sysctl.h> 53 #include <sys/thread2.h> 54 #ifdef KTRACE 55 #include <sys/uio.h> 56 #include <sys/ktrace.h> 57 #endif 58 #include <sys/xwait.h> 59 60 #include <machine/cpu.h> 61 #include <machine/ipl.h> 62 #include <machine/smp.h> 63 64 static void sched_setup (void *dummy); 65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 66 67 int hogticks; 68 int lbolt; 69 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 70 int ncpus; 71 int ncpus2, ncpus2_shift, ncpus2_mask; 72 73 static struct callout loadav_callout; 74 75 struct loadavg averunnable = 76 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 77 /* 78 * Constants for averages over 1, 5, and 15 minutes 79 * when sampling at 5 second intervals. 80 */ 81 static fixpt_t cexp[3] = { 82 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 83 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 84 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 85 }; 86 87 static void endtsleep (void *); 88 static void loadav (void *arg); 89 static void roundrobin (void *arg); 90 static void schedcpu (void *arg); 91 static void updatepri (struct proc *p); 92 static void crit_panicints(void); 93 94 static int 95 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 96 { 97 int error, new_val; 98 99 new_val = sched_quantum * tick; 100 error = sysctl_handle_int(oidp, &new_val, 0, req); 101 if (error != 0 || req->newptr == NULL) 102 return (error); 103 if (new_val < tick) 104 return (EINVAL); 105 sched_quantum = new_val / tick; 106 hogticks = 2 * sched_quantum; 107 return (0); 108 } 109 110 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 111 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); 112 113 int 114 roundrobin_interval(void) 115 { 116 return (sched_quantum); 117 } 118 119 /* 120 * Force switch among equal priority processes every 100ms. 121 * 122 * WARNING! The MP lock is not held on ipi message remotes. 123 */ 124 #ifdef SMP 125 126 static void 127 roundrobin_remote(void *arg) 128 { 129 struct proc *p = lwkt_preempted_proc(); 130 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 131 need_user_resched(); 132 } 133 134 #endif 135 136 static void 137 roundrobin(void *arg) 138 { 139 struct proc *p = lwkt_preempted_proc(); 140 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 141 need_user_resched(); 142 #ifdef SMP 143 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL); 144 #endif 145 timeout(roundrobin, NULL, sched_quantum); 146 } 147 148 #ifdef SMP 149 150 void 151 resched_cpus(u_int32_t mask) 152 { 153 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL); 154 } 155 156 #endif 157 158 /* 159 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is 160 * incremented at a rate of ESTCPUVFREQ per second (40hz typ), but this is 161 * divided up across all cpu bound processes running in the system so an 162 * individual process will get less under load. ESTCPULIM typicaly caps 163 * out at ESTCPUMAX (around 376, or 11 nice levels). 164 * 165 * Generally speaking the decay equation needs to break-even on growth 166 * at the limit at all load levels >= 1.0, so if the estimated cpu for 167 * a process increases by (ESTVCPUFREQ / load) per second, then the decay 168 * should reach this value when estcpu reaches ESTCPUMAX. That calculation 169 * is: 170 * 171 * ESTCPUMAX * decay = ESTCPUVFREQ / load 172 * decay = ESTCPUVFREQ / (load * ESTCPUMAX) 173 * decay = estcpu * 0.053 / load 174 * 175 * If the load is less then 1.0 we assume a load of 1.0. 176 */ 177 178 #define cload(loadav) ((loadav) < FSCALE ? FSCALE : (loadav)) 179 #define decay_cpu(loadav,estcpu) \ 180 ((estcpu) * (FSCALE * ESTCPUVFREQ / ESTCPUMAX) / cload(loadav)) 181 182 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 183 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 184 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 185 186 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 187 static int fscale __unused = FSCALE; 188 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 189 190 /* 191 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 192 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 193 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 194 * 195 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 196 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 197 * 198 * If you don't want to bother with the faster/more-accurate formula, you 199 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 200 * (more general) method of calculating the %age of CPU used by a process. 201 */ 202 #define CCPU_SHIFT 11 203 204 /* 205 * Recompute process priorities, once a second. 206 */ 207 /* ARGSUSED */ 208 static void 209 schedcpu(void *arg) 210 { 211 fixpt_t loadfac = averunnable.ldavg[0]; 212 struct proc *p; 213 int s; 214 unsigned int ndecay; 215 216 FOREACH_PROC_IN_SYSTEM(p) { 217 /* 218 * Increment time in/out of memory and sleep time 219 * (if sleeping). We ignore overflow; with 16-bit int's 220 * (remember them?) overflow takes 45 days. 221 */ 222 p->p_swtime++; 223 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 224 p->p_slptime++; 225 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 226 /* 227 * If the process has slept the entire second, 228 * stop recalculating its priority until it wakes up. 229 */ 230 if (p->p_slptime > 1) 231 continue; 232 s = splhigh(); /* prevent state changes and protect run queue */ 233 /* 234 * p_pctcpu is only for ps. 235 */ 236 #if (FSHIFT >= CCPU_SHIFT) 237 p->p_pctcpu += (ESTCPUFREQ == 100)? 238 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 239 100 * (((fixpt_t) p->p_cpticks) 240 << (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ; 241 #else 242 p->p_pctcpu += ((FSCALE - ccpu) * 243 (p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT; 244 #endif 245 p->p_cpticks = 0; 246 ndecay = decay_cpu(loadfac, p->p_estcpu); 247 if (p->p_estcpu > ndecay) 248 p->p_estcpu -= ndecay; 249 else 250 p->p_estcpu = 0; 251 resetpriority(p); 252 splx(s); 253 } 254 wakeup((caddr_t)&lbolt); 255 timeout(schedcpu, (void *)0, hz); 256 } 257 258 /* 259 * Recalculate the priority of a process after it has slept for a while. 260 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 261 * least six times the loadfactor will decay p_estcpu to zero. 262 */ 263 static void 264 updatepri(struct proc *p) 265 { 266 unsigned int ndecay; 267 268 ndecay = decay_cpu(averunnable.ldavg[0], p->p_estcpu) * p->p_slptime; 269 if (p->p_estcpu > ndecay) 270 p->p_estcpu -= ndecay; 271 else 272 p->p_estcpu = 0; 273 resetpriority(p); 274 } 275 276 /* 277 * We're only looking at 7 bits of the address; everything is 278 * aligned to 4, lots of things are aligned to greater powers 279 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 280 */ 281 #define TABLESIZE 128 282 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE]; 283 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 284 285 /* 286 * During autoconfiguration or after a panic, a sleep will simply 287 * lower the priority briefly to allow interrupts, then return. 288 * The priority to be used (safepri) is machine-dependent, thus this 289 * value is initialized and maintained in the machine-dependent layers. 290 * This priority will typically be 0, or the lowest priority 291 * that is safe for use on the interrupt stack; it can be made 292 * higher to block network software interrupts after panics. 293 */ 294 int safepri; 295 296 void 297 sleepinit(void) 298 { 299 int i; 300 301 sched_quantum = hz/10; 302 hogticks = 2 * sched_quantum; 303 for (i = 0; i < TABLESIZE; i++) 304 TAILQ_INIT(&slpque[i]); 305 } 306 307 /* 308 * General sleep call. Suspends the current process until a wakeup is 309 * performed on the specified identifier. The process will then be made 310 * runnable with the specified priority. Sleeps at most timo/hz seconds 311 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 312 * before and after sleeping, else signals are not checked. Returns 0 if 313 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 314 * signal needs to be delivered, ERESTART is returned if the current system 315 * call should be restarted if possible, and EINTR is returned if the system 316 * call should be interrupted by the signal (return EINTR). 317 * 318 * Note that if we are a process, we release_curproc() before messing with 319 * the LWKT scheduler. 320 */ 321 int 322 tsleep(void *ident, int flags, const char *wmesg, int timo) 323 { 324 struct thread *td = curthread; 325 struct proc *p = td->td_proc; /* may be NULL */ 326 int s, sig = 0, catch = flags & PCATCH; 327 int id = LOOKUP(ident); 328 struct callout_handle thandle; 329 330 /* 331 * NOTE: removed KTRPOINT, it could cause races due to blocking 332 * even in stable. Just scrap it for now. 333 */ 334 if (cold || panicstr) { 335 /* 336 * After a panic, or during autoconfiguration, 337 * just give interrupts a chance, then just return; 338 * don't run any other procs or panic below, 339 * in case this is the idle process and already asleep. 340 */ 341 crit_panicints(); 342 return (0); 343 } 344 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */ 345 s = splhigh(); 346 KASSERT(ident != NULL, ("tsleep: no ident")); 347 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d", 348 ident, wmesg, p->p_stat)); 349 350 crit_enter(); 351 td->td_wchan = ident; 352 td->td_wmesg = wmesg; 353 if (p) { 354 if (flags & PNORESCHED) 355 td->td_flags |= TDF_NORESCHED; 356 release_curproc(p); 357 p->p_slptime = 0; 358 } 359 lwkt_deschedule_self(); 360 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq); 361 if (timo) 362 thandle = timeout(endtsleep, (void *)td, timo); 363 /* 364 * We put ourselves on the sleep queue and start our timeout 365 * before calling CURSIG, as we could stop there, and a wakeup 366 * or a SIGCONT (or both) could occur while we were stopped. 367 * A SIGCONT would cause us to be marked as SSLEEP 368 * without resuming us, thus we must be ready for sleep 369 * when CURSIG is called. If the wakeup happens while we're 370 * stopped, td->td_wchan will be 0 upon return from CURSIG. 371 */ 372 if (p) { 373 if (catch) { 374 p->p_flag |= P_SINTR; 375 if ((sig = CURSIG(p))) { 376 if (td->td_wchan) { 377 unsleep(td); 378 lwkt_schedule_self(); 379 } 380 p->p_stat = SRUN; 381 goto resume; 382 } 383 if (td->td_wchan == NULL) { 384 catch = 0; 385 goto resume; 386 } 387 } else { 388 sig = 0; 389 } 390 391 /* 392 * If we are not the current process we have to remove ourself 393 * from the run queue. 394 */ 395 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat)); 396 /* 397 * If this is the current 'user' process schedule another one. 398 */ 399 clrrunnable(p, SSLEEP); 400 p->p_stats->p_ru.ru_nvcsw++; 401 mi_switch(); 402 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun")); 403 } else { 404 lwkt_switch(); 405 } 406 resume: 407 crit_exit(); 408 if (p) 409 p->p_flag &= ~P_SINTR; 410 splx(s); 411 td->td_flags &= ~TDF_NORESCHED; 412 if (td->td_flags & TDF_TIMEOUT) { 413 td->td_flags &= ~TDF_TIMEOUT; 414 if (sig == 0) 415 return (EWOULDBLOCK); 416 } else if (timo) { 417 untimeout(endtsleep, (void *)td, thandle); 418 } else if (td->td_wmesg) { 419 /* 420 * This can happen if a thread is woken up directly. Clear 421 * wmesg to avoid debugging confusion. 422 */ 423 td->td_wmesg = NULL; 424 } 425 /* inline of iscaught() */ 426 if (p) { 427 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 428 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 429 return (EINTR); 430 return (ERESTART); 431 } 432 } 433 return (0); 434 } 435 436 /* 437 * Implement the timeout for tsleep. We interlock against 438 * wchan when setting TDF_TIMEOUT. For processes we remove 439 * the sleep if the process is stopped rather then sleeping, 440 * so it remains stopped. 441 */ 442 static void 443 endtsleep(void *arg) 444 { 445 thread_t td = arg; 446 struct proc *p; 447 int s; 448 449 s = splhigh(); 450 if (td->td_wchan) { 451 td->td_flags |= TDF_TIMEOUT; 452 if ((p = td->td_proc) != NULL) { 453 if (p->p_stat == SSLEEP) 454 setrunnable(p); 455 else 456 unsleep(td); 457 } else { 458 unsleep(td); 459 lwkt_schedule(td); 460 } 461 } 462 splx(s); 463 } 464 465 /* 466 * Remove a process from its wait queue 467 */ 468 void 469 unsleep(struct thread *td) 470 { 471 int s; 472 473 s = splhigh(); 474 if (td->td_wchan) { 475 #if 0 476 if (p->p_flag & P_XSLEEP) { 477 struct xwait *w = p->p_wchan; 478 TAILQ_REMOVE(&w->waitq, p, p_procq); 479 p->p_flag &= ~P_XSLEEP; 480 } else 481 #endif 482 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq); 483 td->td_wchan = NULL; 484 } 485 splx(s); 486 } 487 488 #if 0 489 /* 490 * Make all processes sleeping on the explicit lock structure runnable. 491 */ 492 void 493 xwakeup(struct xwait *w) 494 { 495 struct proc *p; 496 int s; 497 498 s = splhigh(); 499 ++w->gen; 500 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) { 501 TAILQ_REMOVE(&w->waitq, p, p_procq); 502 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP), 503 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP)); 504 p->p_wchan = NULL; 505 p->p_flag &= ~P_XSLEEP; 506 if (p->p_stat == SSLEEP) { 507 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 508 if (p->p_slptime > 1) 509 updatepri(p); 510 p->p_slptime = 0; 511 p->p_stat = SRUN; 512 if (p->p_flag & P_INMEM) { 513 setrunqueue(p); 514 } else { 515 p->p_flag |= P_SWAPINREQ; 516 wakeup((caddr_t)&proc0); 517 } 518 } 519 } 520 splx(s); 521 } 522 #endif 523 524 /* 525 * Make all processes sleeping on the specified identifier runnable. 526 */ 527 static void 528 _wakeup(void *ident, int count) 529 { 530 struct slpquehead *qp; 531 struct thread *td; 532 struct thread *ntd; 533 struct proc *p; 534 int s; 535 int id = LOOKUP(ident); 536 537 s = splhigh(); 538 qp = &slpque[id]; 539 restart: 540 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 541 ntd = TAILQ_NEXT(td, td_threadq); 542 if (td->td_wchan == ident) { 543 TAILQ_REMOVE(qp, td, td_threadq); 544 td->td_wchan = NULL; 545 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) { 546 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 547 if (p->p_slptime > 1) 548 updatepri(p); 549 p->p_slptime = 0; 550 p->p_stat = SRUN; 551 if (p->p_flag & P_INMEM) { 552 setrunqueue(p); 553 } else { 554 p->p_flag |= P_SWAPINREQ; 555 wakeup((caddr_t)&proc0); 556 } 557 /* END INLINE EXPANSION */ 558 } else if (p == NULL) { 559 lwkt_schedule(td); 560 } 561 if (--count == 0) 562 break; 563 goto restart; 564 } 565 } 566 splx(s); 567 } 568 569 void 570 wakeup(void *ident) 571 { 572 _wakeup(ident, 0); 573 } 574 575 void 576 wakeup_one(void *ident) 577 { 578 _wakeup(ident, 1); 579 } 580 581 /* 582 * The machine independent parts of mi_switch(). 583 * Must be called at splstatclock() or higher. 584 */ 585 void 586 mi_switch() 587 { 588 struct thread *td = curthread; 589 struct proc *p = td->td_proc; /* XXX */ 590 struct rlimit *rlim; 591 int x; 592 u_int64_t ttime; 593 594 /* 595 * XXX this spl is almost unnecessary. It is partly to allow for 596 * sloppy callers that don't do it (issignal() via CURSIG() is the 597 * main offender). It is partly to work around a bug in the i386 598 * cpu_switch() (the ipl is not preserved). We ran for years 599 * without it. I think there was only a interrupt latency problem. 600 * The main caller, tsleep(), does an splx() a couple of instructions 601 * after calling here. The buggy caller, issignal(), usually calls 602 * here at spl0() and sometimes returns at splhigh(). The process 603 * then runs for a little too long at splhigh(). The ipl gets fixed 604 * when the process returns to user mode (or earlier). 605 * 606 * It would probably be better to always call here at spl0(). Callers 607 * are prepared to give up control to another process, so they must 608 * be prepared to be interrupted. The clock stuff here may not 609 * actually need splstatclock(). 610 */ 611 x = splstatclock(); 612 613 /* 614 * Check if the process exceeds its cpu resource allocation. 615 * If over max, kill it. Time spent in interrupts is not 616 * included. YYY 64 bit match is expensive. Ick. 617 */ 618 ttime = td->td_sticks + td->td_uticks; 619 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY && 620 ttime > p->p_limit->p_cpulimit) { 621 rlim = &p->p_rlimit[RLIMIT_CPU]; 622 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) { 623 killproc(p, "exceeded maximum CPU limit"); 624 } else { 625 psignal(p, SIGXCPU); 626 if (rlim->rlim_cur < rlim->rlim_max) { 627 /* XXX: we should make a private copy */ 628 rlim->rlim_cur += 5; 629 } 630 } 631 } 632 633 /* 634 * Pick a new current process and record its start time. If we 635 * are in a SSTOPped state we deschedule ourselves. YYY this needs 636 * to be cleaned up, remember that LWKTs stay on their run queue 637 * which works differently then the user scheduler which removes 638 * the process from the runq when it runs it. 639 */ 640 mycpu->gd_cnt.v_swtch++; 641 if (p->p_stat == SSTOP) 642 lwkt_deschedule_self(); 643 lwkt_switch(); 644 645 splx(x); 646 } 647 648 /* 649 * Change process state to be runnable, 650 * placing it on the run queue if it is in memory, 651 * and awakening the swapper if it isn't in memory. 652 */ 653 void 654 setrunnable(struct proc *p) 655 { 656 int s; 657 658 s = splhigh(); 659 switch (p->p_stat) { 660 case 0: 661 case SRUN: 662 case SZOMB: 663 default: 664 panic("setrunnable"); 665 case SSTOP: 666 case SSLEEP: 667 unsleep(p->p_thread); /* e.g. when sending signals */ 668 break; 669 670 case SIDL: 671 break; 672 } 673 p->p_stat = SRUN; 674 if (p->p_flag & P_INMEM) 675 setrunqueue(p); 676 splx(s); 677 if (p->p_slptime > 1) 678 updatepri(p); 679 p->p_slptime = 0; 680 if ((p->p_flag & P_INMEM) == 0) { 681 p->p_flag |= P_SWAPINREQ; 682 wakeup((caddr_t)&proc0); 683 } 684 } 685 686 /* 687 * Change the process state to NOT be runnable, removing it from the run 688 * queue. 689 */ 690 void 691 clrrunnable(struct proc *p, int stat) 692 { 693 crit_enter_quick(p->p_thread); 694 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ)) 695 remrunqueue(p); 696 p->p_stat = stat; 697 crit_exit_quick(p->p_thread); 698 } 699 700 /* 701 * Compute the priority of a process when running in user mode. 702 * Arrange to reschedule if the resulting priority is better 703 * than that of the current process. 704 */ 705 void 706 resetpriority(struct proc *p) 707 { 708 unsigned int newpriority; 709 int opq; 710 int npq; 711 712 /* 713 * Set p_priority for general process comparisons 714 */ 715 switch(p->p_rtprio.type) { 716 case RTP_PRIO_REALTIME: 717 p->p_priority = PRIBASE_REALTIME + p->p_rtprio.prio; 718 return; 719 case RTP_PRIO_NORMAL: 720 break; 721 case RTP_PRIO_IDLE: 722 p->p_priority = PRIBASE_IDLE + p->p_rtprio.prio; 723 return; 724 case RTP_PRIO_THREAD: 725 p->p_priority = PRIBASE_THREAD + p->p_rtprio.prio; 726 return; 727 } 728 729 /* 730 * NORMAL priorities fall through. These are based on niceness 731 * and cpu use. 732 */ 733 newpriority = NICE_ADJUST(p->p_nice - PRIO_MIN) + 734 p->p_estcpu / ESTCPURAMP; 735 newpriority = min(newpriority, MAXPRI); 736 npq = newpriority / PPQ; 737 crit_enter(); 738 opq = (p->p_priority & PRIMASK) / PPQ; 739 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) { 740 /* 741 * We have to move the process to another queue 742 */ 743 remrunqueue(p); 744 p->p_priority = PRIBASE_NORMAL + newpriority; 745 setrunqueue(p); 746 } else { 747 /* 748 * We can just adjust the priority and it will be picked 749 * up later. 750 */ 751 KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0); 752 p->p_priority = PRIBASE_NORMAL + newpriority; 753 } 754 crit_exit(); 755 } 756 757 /* 758 * Compute a tenex style load average of a quantity on 759 * 1, 5 and 15 minute intervals. 760 */ 761 static void 762 loadav(void *arg) 763 { 764 int i, nrun; 765 struct loadavg *avg; 766 struct proc *p; 767 768 avg = &averunnable; 769 nrun = 0; 770 FOREACH_PROC_IN_SYSTEM(p) { 771 switch (p->p_stat) { 772 case SRUN: 773 case SIDL: 774 nrun++; 775 } 776 } 777 for (i = 0; i < 3; i++) 778 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 779 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 780 781 /* 782 * Schedule the next update to occur after 5 seconds, but add a 783 * random variation to avoid synchronisation with processes that 784 * run at regular intervals. 785 */ 786 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)), 787 loadav, NULL); 788 } 789 790 /* ARGSUSED */ 791 static void 792 sched_setup(void *dummy) 793 { 794 795 callout_init(&loadav_callout); 796 797 /* Kick off timeout driven events by calling first time. */ 798 roundrobin(NULL); 799 schedcpu(NULL); 800 loadav(NULL); 801 } 802 803 /* 804 * We adjust the priority of the current process. The priority of 805 * a process gets worse as it accumulates CPU time. The cpu usage 806 * estimator (p_estcpu) is increased here. resetpriority() will 807 * compute a different priority each time p_estcpu increases by 808 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached). 809 * 810 * The cpu usage estimator ramps up quite quickly when the process is 811 * running (linearly), and decays away exponentially, at a rate which 812 * is proportionally slower when the system is busy. The basic principle 813 * is that the system will 90% forget that the process used a lot of CPU 814 * time in 5 * loadav seconds. This causes the system to favor processes 815 * which haven't run much recently, and to round-robin among other processes. 816 * 817 * The actual schedulerclock interrupt rate is ESTCPUFREQ, but we generally 818 * want to ramp-up at a faster rate, ESTCPUVFREQ, so p_estcpu is scaled 819 * by (ESTCPUVFREQ / ESTCPUFREQ). You can control the ramp-up/ramp-down 820 * rate by adjusting ESTCPUVFREQ in sys/proc.h in integer multiples 821 * of ESTCPUFREQ. 822 * 823 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD 824 * and we cannot block. 825 */ 826 void 827 schedulerclock(void *dummy) 828 { 829 struct thread *td; 830 struct proc *p; 831 832 td = curthread; 833 if ((p = td->td_proc) != NULL) { 834 p->p_cpticks++; /* cpticks runs at ESTCPUFREQ */ 835 p->p_estcpu = ESTCPULIM(p->p_estcpu + ESTCPUVFREQ / ESTCPUFREQ); 836 if (try_mplock()) { 837 resetpriority(p); 838 rel_mplock(); 839 } 840 } 841 } 842 843 static 844 void 845 crit_panicints(void) 846 { 847 int s; 848 int cpri; 849 850 s = splhigh(); 851 cpri = crit_panic_save(); 852 splx(safepri); 853 crit_panic_restore(cpri); 854 splx(s); 855 } 856 857