1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ 40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.34 2004/07/24 20:21:35 dillon Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/kernel.h> 49 #include <sys/signalvar.h> 50 #include <sys/resourcevar.h> 51 #include <sys/vmmeter.h> 52 #include <sys/sysctl.h> 53 #include <sys/thread2.h> 54 #ifdef KTRACE 55 #include <sys/uio.h> 56 #include <sys/ktrace.h> 57 #endif 58 #include <sys/xwait.h> 59 60 #include <machine/cpu.h> 61 #include <machine/ipl.h> 62 #include <machine/smp.h> 63 64 static void sched_setup (void *dummy); 65 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 66 67 int hogticks; 68 int lbolt; 69 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 70 int ncpus; 71 int ncpus2, ncpus2_shift, ncpus2_mask; 72 73 static struct callout loadav_callout; 74 75 struct loadavg averunnable = 76 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 77 /* 78 * Constants for averages over 1, 5, and 15 minutes 79 * when sampling at 5 second intervals. 80 */ 81 static fixpt_t cexp[3] = { 82 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 83 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 84 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 85 }; 86 87 static void endtsleep (void *); 88 static void loadav (void *arg); 89 static void roundrobin (void *arg); 90 static void schedcpu (void *arg); 91 static void updatepri (struct proc *p); 92 static void crit_panicints(void); 93 94 static int 95 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 96 { 97 int error, new_val; 98 99 new_val = sched_quantum * tick; 100 error = sysctl_handle_int(oidp, &new_val, 0, req); 101 if (error != 0 || req->newptr == NULL) 102 return (error); 103 if (new_val < tick) 104 return (EINVAL); 105 sched_quantum = new_val / tick; 106 hogticks = 2 * sched_quantum; 107 return (0); 108 } 109 110 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 111 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); 112 113 int 114 roundrobin_interval(void) 115 { 116 return (sched_quantum); 117 } 118 119 /* 120 * Force switch among equal priority processes every 100ms. 121 * 122 * WARNING! The MP lock is not held on ipi message remotes. 123 */ 124 #ifdef SMP 125 126 static void 127 roundrobin_remote(void *arg) 128 { 129 struct proc *p = lwkt_preempted_proc(); 130 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 131 need_user_resched(); 132 } 133 134 #endif 135 136 static void 137 roundrobin(void *arg) 138 { 139 struct proc *p = lwkt_preempted_proc(); 140 if (p == NULL || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 141 need_user_resched(); 142 #ifdef SMP 143 lwkt_send_ipiq_mask(mycpu->gd_other_cpus, roundrobin_remote, NULL); 144 #endif 145 timeout(roundrobin, NULL, sched_quantum); 146 } 147 148 #ifdef SMP 149 150 void 151 resched_cpus(u_int32_t mask) 152 { 153 lwkt_send_ipiq_mask(mask, roundrobin_remote, NULL); 154 } 155 156 #endif 157 158 /* 159 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is 160 * incremented at a rate of ESTCPUVFREQ per second (40hz typ), but this is 161 * divided up across all cpu bound processes running in the system so an 162 * individual process will get less under load. ESTCPULIM typicaly caps 163 * out at ESTCPUMAX (around 376, or 11 nice levels). 164 * 165 * Generally speaking the decay equation needs to break-even on growth 166 * at the limit at all load levels >= 1.0, so if the estimated cpu for 167 * a process increases by (ESTVCPUFREQ / load) per second, then the decay 168 * should reach this value when estcpu reaches ESTCPUMAX. That calculation 169 * is: 170 * 171 * ESTCPUMAX * decay = ESTCPUVFREQ / load 172 * decay = ESTCPUVFREQ / (load * ESTCPUMAX) 173 * decay = estcpu * 0.053 / load 174 * 175 * If the load is less then 1.0 we assume a load of 1.0. 176 */ 177 178 #define cload(loadav) ((loadav) < FSCALE ? FSCALE : (loadav)) 179 #define decay_cpu(loadav,estcpu) \ 180 ((estcpu) * (FSCALE * ESTCPUVFREQ / ESTCPUMAX) / cload(loadav)) 181 182 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 183 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 184 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 185 186 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 187 static int fscale __unused = FSCALE; 188 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 189 190 /* 191 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 192 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 193 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 194 * 195 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 196 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 197 * 198 * If you don't want to bother with the faster/more-accurate formula, you 199 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 200 * (more general) method of calculating the %age of CPU used by a process. 201 */ 202 #define CCPU_SHIFT 11 203 204 /* 205 * Recompute process priorities, once a second. 206 */ 207 /* ARGSUSED */ 208 static void 209 schedcpu(void *arg) 210 { 211 fixpt_t loadfac = averunnable.ldavg[0]; 212 struct proc *p; 213 int s; 214 unsigned int ndecay; 215 216 FOREACH_PROC_IN_SYSTEM(p) { 217 /* 218 * Increment time in/out of memory and sleep time 219 * (if sleeping). We ignore overflow; with 16-bit int's 220 * (remember them?) overflow takes 45 days. 221 */ 222 p->p_swtime++; 223 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 224 p->p_slptime++; 225 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 226 227 /* 228 * If the process has slept the entire second, 229 * stop recalculating its priority until it wakes up. 230 * 231 * Note that interactive calculations do not occur for 232 * long sleeps (because that isn't necessarily indicative 233 * of an interactive process). 234 */ 235 if (p->p_slptime > 1) 236 continue; 237 /* prevent state changes and protect run queue */ 238 s = splhigh(); 239 /* 240 * p_cpticks runs at ESTCPUFREQ but must be divided by the 241 * load average for par-100% use. Higher p_interactive 242 * values mean less interactive, lower values mean more 243 * interactive. 244 */ 245 if ((((fixpt_t)p->p_cpticks * cload(loadfac)) >> FSHIFT) > 246 ESTCPUFREQ / 4) { 247 if (p->p_interactive < 127) 248 ++p->p_interactive; 249 } else { 250 if (p->p_interactive > -127) 251 --p->p_interactive; 252 } 253 /* 254 * p_pctcpu is only for ps. 255 */ 256 #if (FSHIFT >= CCPU_SHIFT) 257 p->p_pctcpu += (ESTCPUFREQ == 100)? 258 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 259 100 * (((fixpt_t) p->p_cpticks) 260 << (FSHIFT - CCPU_SHIFT)) / ESTCPUFREQ; 261 #else 262 p->p_pctcpu += ((FSCALE - ccpu) * 263 (p->p_cpticks * FSCALE / ESTCPUFREQ)) >> FSHIFT; 264 #endif 265 p->p_cpticks = 0; 266 ndecay = decay_cpu(loadfac, p->p_estcpu); 267 if (p->p_estcpu > ndecay) 268 p->p_estcpu -= ndecay; 269 else 270 p->p_estcpu = 0; 271 resetpriority(p); 272 splx(s); 273 } 274 wakeup((caddr_t)&lbolt); 275 timeout(schedcpu, (void *)0, hz); 276 } 277 278 /* 279 * Recalculate the priority of a process after it has slept for a while. 280 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 281 * least six times the loadfactor will decay p_estcpu to zero. 282 */ 283 static void 284 updatepri(struct proc *p) 285 { 286 unsigned int ndecay; 287 288 ndecay = decay_cpu(averunnable.ldavg[0], p->p_estcpu) * p->p_slptime; 289 if (p->p_estcpu > ndecay) 290 p->p_estcpu -= ndecay; 291 else 292 p->p_estcpu = 0; 293 resetpriority(p); 294 } 295 296 /* 297 * We're only looking at 7 bits of the address; everything is 298 * aligned to 4, lots of things are aligned to greater powers 299 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 300 */ 301 #define TABLESIZE 128 302 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE]; 303 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 304 305 /* 306 * During autoconfiguration or after a panic, a sleep will simply 307 * lower the priority briefly to allow interrupts, then return. 308 * The priority to be used (safepri) is machine-dependent, thus this 309 * value is initialized and maintained in the machine-dependent layers. 310 * This priority will typically be 0, or the lowest priority 311 * that is safe for use on the interrupt stack; it can be made 312 * higher to block network software interrupts after panics. 313 */ 314 int safepri; 315 316 void 317 sleepinit(void) 318 { 319 int i; 320 321 sched_quantum = hz/10; 322 hogticks = 2 * sched_quantum; 323 for (i = 0; i < TABLESIZE; i++) 324 TAILQ_INIT(&slpque[i]); 325 } 326 327 /* 328 * General sleep call. Suspends the current process until a wakeup is 329 * performed on the specified identifier. The process will then be made 330 * runnable with the specified priority. Sleeps at most timo/hz seconds 331 * (0 means no timeout). If flags includes PCATCH flag, signals are checked 332 * before and after sleeping, else signals are not checked. Returns 0 if 333 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 334 * signal needs to be delivered, ERESTART is returned if the current system 335 * call should be restarted if possible, and EINTR is returned if the system 336 * call should be interrupted by the signal (return EINTR). 337 * 338 * Note that if we are a process, we release_curproc() before messing with 339 * the LWKT scheduler. 340 */ 341 int 342 tsleep(void *ident, int flags, const char *wmesg, int timo) 343 { 344 struct thread *td = curthread; 345 struct proc *p = td->td_proc; /* may be NULL */ 346 int sig = 0, catch = flags & PCATCH; 347 int id = LOOKUP(ident); 348 struct callout_handle thandle; 349 350 /* 351 * NOTE: removed KTRPOINT, it could cause races due to blocking 352 * even in stable. Just scrap it for now. 353 */ 354 if (cold || panicstr) { 355 /* 356 * After a panic, or during autoconfiguration, 357 * just give interrupts a chance, then just return; 358 * don't run any other procs or panic below, 359 * in case this is the idle process and already asleep. 360 */ 361 crit_panicints(); 362 return (0); 363 } 364 KKASSERT(td != &mycpu->gd_idlethread); /* you must be kidding! */ 365 crit_enter_quick(td); 366 KASSERT(ident != NULL, ("tsleep: no ident")); 367 KASSERT(p == NULL || p->p_stat == SRUN, ("tsleep %p %s %d", 368 ident, wmesg, p->p_stat)); 369 370 td->td_wchan = ident; 371 td->td_wmesg = wmesg; 372 if (p) { 373 if (flags & PNORESCHED) 374 td->td_flags |= TDF_NORESCHED; 375 release_curproc(p); 376 p->p_slptime = 0; 377 } 378 lwkt_deschedule_self(td); 379 TAILQ_INSERT_TAIL(&slpque[id], td, td_threadq); 380 if (timo) 381 thandle = timeout(endtsleep, (void *)td, timo); 382 /* 383 * We put ourselves on the sleep queue and start our timeout 384 * before calling CURSIG, as we could stop there, and a wakeup 385 * or a SIGCONT (or both) could occur while we were stopped. 386 * A SIGCONT would cause us to be marked as SSLEEP 387 * without resuming us, thus we must be ready for sleep 388 * when CURSIG is called. If the wakeup happens while we're 389 * stopped, td->td_wchan will be 0 upon return from CURSIG. 390 */ 391 if (p) { 392 if (catch) { 393 p->p_flag |= P_SINTR; 394 if ((sig = CURSIG(p))) { 395 if (td->td_wchan) { 396 unsleep(td); 397 lwkt_schedule_self(td); 398 } 399 p->p_stat = SRUN; 400 goto resume; 401 } 402 if (td->td_wchan == NULL) { 403 catch = 0; 404 goto resume; 405 } 406 } else { 407 sig = 0; 408 } 409 410 /* 411 * If we are not the current process we have to remove ourself 412 * from the run queue. 413 */ 414 KASSERT(p->p_stat == SRUN, ("PSTAT NOT SRUN %d %d", p->p_pid, p->p_stat)); 415 /* 416 * If this is the current 'user' process schedule another one. 417 */ 418 clrrunnable(p, SSLEEP); 419 p->p_stats->p_ru.ru_nvcsw++; 420 mi_switch(p); 421 KASSERT(p->p_stat == SRUN, ("tsleep: stat not srun")); 422 } else { 423 lwkt_switch(); 424 } 425 resume: 426 if (p) 427 p->p_flag &= ~P_SINTR; 428 crit_exit_quick(td); 429 td->td_flags &= ~TDF_NORESCHED; 430 if (td->td_flags & TDF_TIMEOUT) { 431 td->td_flags &= ~TDF_TIMEOUT; 432 if (sig == 0) 433 return (EWOULDBLOCK); 434 } else if (timo) { 435 untimeout(endtsleep, (void *)td, thandle); 436 } else if (td->td_wmesg) { 437 /* 438 * This can happen if a thread is woken up directly. Clear 439 * wmesg to avoid debugging confusion. 440 */ 441 td->td_wmesg = NULL; 442 } 443 /* inline of iscaught() */ 444 if (p) { 445 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 446 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 447 return (EINTR); 448 return (ERESTART); 449 } 450 } 451 return (0); 452 } 453 454 /* 455 * Implement the timeout for tsleep. We interlock against 456 * wchan when setting TDF_TIMEOUT. For processes we remove 457 * the sleep if the process is stopped rather then sleeping, 458 * so it remains stopped. 459 */ 460 static void 461 endtsleep(void *arg) 462 { 463 thread_t td = arg; 464 struct proc *p; 465 466 crit_enter(); 467 if (td->td_wchan) { 468 td->td_flags |= TDF_TIMEOUT; 469 if ((p = td->td_proc) != NULL) { 470 if (p->p_stat == SSLEEP) 471 setrunnable(p); 472 else 473 unsleep(td); 474 } else { 475 unsleep(td); 476 lwkt_schedule(td); 477 } 478 } 479 crit_exit(); 480 } 481 482 /* 483 * Remove a process from its wait queue 484 */ 485 void 486 unsleep(struct thread *td) 487 { 488 crit_enter(); 489 if (td->td_wchan) { 490 #if 0 491 if (p->p_flag & P_XSLEEP) { 492 struct xwait *w = p->p_wchan; 493 TAILQ_REMOVE(&w->waitq, p, p_procq); 494 p->p_flag &= ~P_XSLEEP; 495 } else 496 #endif 497 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_threadq); 498 td->td_wchan = NULL; 499 } 500 crit_exit(); 501 } 502 503 #if 0 504 /* 505 * Make all processes sleeping on the explicit lock structure runnable. 506 */ 507 void 508 xwakeup(struct xwait *w) 509 { 510 struct proc *p; 511 512 crit_enter(); 513 ++w->gen; 514 while ((p = TAILQ_FIRST(&w->waitq)) != NULL) { 515 TAILQ_REMOVE(&w->waitq, p, p_procq); 516 KASSERT(p->p_wchan == w && (p->p_flag & P_XSLEEP), 517 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p, p->p_wchan, w, p->p_flag & P_XSLEEP)); 518 p->p_wchan = NULL; 519 p->p_flag &= ~P_XSLEEP; 520 if (p->p_stat == SSLEEP) { 521 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 522 if (p->p_slptime > 1) 523 updatepri(p); 524 p->p_slptime = 0; 525 p->p_stat = SRUN; 526 if (p->p_flag & P_INMEM) { 527 lwkt_schedule(td); 528 } else { 529 p->p_flag |= P_SWAPINREQ; 530 wakeup((caddr_t)&proc0); 531 } 532 } 533 } 534 crit_exit(); 535 } 536 #endif 537 538 /* 539 * Make all processes sleeping on the specified identifier runnable. 540 */ 541 static void 542 _wakeup(void *ident, int count) 543 { 544 struct slpquehead *qp; 545 struct thread *td; 546 struct thread *ntd; 547 struct proc *p; 548 int id = LOOKUP(ident); 549 550 crit_enter(); 551 qp = &slpque[id]; 552 restart: 553 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 554 ntd = TAILQ_NEXT(td, td_threadq); 555 if (td->td_wchan == ident) { 556 TAILQ_REMOVE(qp, td, td_threadq); 557 td->td_wchan = NULL; 558 if ((p = td->td_proc) != NULL && p->p_stat == SSLEEP) { 559 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 560 if (p->p_slptime > 1) 561 updatepri(p); 562 p->p_slptime = 0; 563 p->p_stat = SRUN; 564 if (p->p_flag & P_INMEM) { 565 /* 566 * LWKT scheduled now, there is no 567 * userland runq interaction until 568 * the thread tries to return to user 569 * mode. 570 * 571 * setrunqueue(p); 572 */ 573 lwkt_schedule(td); 574 } else { 575 p->p_flag |= P_SWAPINREQ; 576 wakeup((caddr_t)&proc0); 577 } 578 /* END INLINE EXPANSION */ 579 } else if (p == NULL) { 580 lwkt_schedule(td); 581 } 582 if (--count == 0) 583 break; 584 goto restart; 585 } 586 } 587 crit_exit(); 588 } 589 590 void 591 wakeup(void *ident) 592 { 593 _wakeup(ident, 0); 594 } 595 596 void 597 wakeup_one(void *ident) 598 { 599 _wakeup(ident, 1); 600 } 601 602 /* 603 * The machine independent parts of mi_switch(). 604 * 605 * 'p' must be the current process. 606 */ 607 void 608 mi_switch(struct proc *p) 609 { 610 thread_t td = p->p_thread; 611 struct rlimit *rlim; 612 u_int64_t ttime; 613 614 KKASSERT(td == mycpu->gd_curthread); 615 616 crit_enter_quick(td); 617 618 /* 619 * Check if the process exceeds its cpu resource allocation. 620 * If over max, kill it. Time spent in interrupts is not 621 * included. YYY 64 bit match is expensive. Ick. 622 */ 623 ttime = td->td_sticks + td->td_uticks; 624 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY && 625 ttime > p->p_limit->p_cpulimit) { 626 rlim = &p->p_rlimit[RLIMIT_CPU]; 627 if (ttime / (rlim_t)1000000 >= rlim->rlim_max) { 628 killproc(p, "exceeded maximum CPU limit"); 629 } else { 630 psignal(p, SIGXCPU); 631 if (rlim->rlim_cur < rlim->rlim_max) { 632 /* XXX: we should make a private copy */ 633 rlim->rlim_cur += 5; 634 } 635 } 636 } 637 638 /* 639 * If we are in a SSTOPped state we deschedule ourselves. 640 * YYY this needs to be cleaned up, remember that LWKTs stay on 641 * their run queue which works differently then the user scheduler 642 * which removes the process from the runq when it runs it. 643 */ 644 mycpu->gd_cnt.v_swtch++; 645 if (p->p_stat == SSTOP) 646 lwkt_deschedule_self(td); 647 lwkt_switch(); 648 crit_exit_quick(td); 649 } 650 651 /* 652 * Change process state to be runnable, 653 * placing it on the run queue if it is in memory, 654 * and awakening the swapper if it isn't in memory. 655 */ 656 void 657 setrunnable(struct proc *p) 658 { 659 int s; 660 661 s = splhigh(); 662 switch (p->p_stat) { 663 case 0: 664 case SRUN: 665 case SZOMB: 666 default: 667 panic("setrunnable"); 668 case SSTOP: 669 case SSLEEP: 670 unsleep(p->p_thread); /* e.g. when sending signals */ 671 break; 672 673 case SIDL: 674 break; 675 } 676 p->p_stat = SRUN; 677 678 /* 679 * The process is controlled by LWKT at this point, we do not mess 680 * around with the userland scheduler until the thread tries to 681 * return to user mode. 682 */ 683 #if 0 684 if (p->p_flag & P_INMEM) 685 setrunqueue(p); 686 #endif 687 if (p->p_flag & P_INMEM) 688 lwkt_schedule(p->p_thread); 689 splx(s); 690 if (p->p_slptime > 1) 691 updatepri(p); 692 p->p_slptime = 0; 693 if ((p->p_flag & P_INMEM) == 0) { 694 p->p_flag |= P_SWAPINREQ; 695 wakeup((caddr_t)&proc0); 696 } 697 } 698 699 /* 700 * Change the process state to NOT be runnable, removing it from the run 701 * queue. 702 */ 703 void 704 clrrunnable(struct proc *p, int stat) 705 { 706 crit_enter_quick(p->p_thread); 707 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ)) 708 remrunqueue(p); 709 p->p_stat = stat; 710 crit_exit_quick(p->p_thread); 711 } 712 713 /* 714 * Compute the priority of a process when running in user mode. 715 * Arrange to reschedule if the resulting priority is better 716 * than that of the current process. 717 */ 718 void 719 resetpriority(struct proc *p) 720 { 721 int newpriority; 722 int interactive; 723 int opq; 724 int npq; 725 726 /* 727 * Set p_priority for general process comparisons 728 */ 729 switch(p->p_rtprio.type) { 730 case RTP_PRIO_REALTIME: 731 p->p_priority = PRIBASE_REALTIME + p->p_rtprio.prio; 732 return; 733 case RTP_PRIO_NORMAL: 734 break; 735 case RTP_PRIO_IDLE: 736 p->p_priority = PRIBASE_IDLE + p->p_rtprio.prio; 737 return; 738 case RTP_PRIO_THREAD: 739 p->p_priority = PRIBASE_THREAD + p->p_rtprio.prio; 740 return; 741 } 742 743 /* 744 * NORMAL priorities fall through. These are based on niceness 745 * and cpu use. Lower numbers == higher priorities. 746 */ 747 newpriority = (int)(NICE_ADJUST(p->p_nice - PRIO_MIN) + 748 p->p_estcpu / ESTCPURAMP); 749 750 /* 751 * p_interactive is -128 to +127 and represents very long term 752 * interactivity or batch (whereas estcpu is a much faster variable). 753 * Interactivity can modify the priority by up to 8 units either way. 754 * (8 units == approximately 4 nice levels). 755 */ 756 interactive = p->p_interactive / 10; 757 newpriority += interactive; 758 759 newpriority = min(newpriority, MAXPRI); 760 newpriority = max(newpriority, 0); 761 npq = newpriority / PPQ; 762 crit_enter(); 763 opq = (p->p_priority & PRIMASK) / PPQ; 764 if (p->p_stat == SRUN && (p->p_flag & P_ONRUNQ) && opq != npq) { 765 /* 766 * We have to move the process to another queue 767 */ 768 remrunqueue(p); 769 p->p_priority = PRIBASE_NORMAL + newpriority; 770 setrunqueue(p); 771 } else { 772 /* 773 * We can just adjust the priority and it will be picked 774 * up later. 775 */ 776 KKASSERT(opq == npq || (p->p_flag & P_ONRUNQ) == 0); 777 p->p_priority = PRIBASE_NORMAL + newpriority; 778 } 779 crit_exit(); 780 } 781 782 /* 783 * Compute a tenex style load average of a quantity on 784 * 1, 5 and 15 minute intervals. 785 */ 786 static void 787 loadav(void *arg) 788 { 789 int i, nrun; 790 struct loadavg *avg; 791 struct proc *p; 792 thread_t td; 793 794 avg = &averunnable; 795 nrun = 0; 796 FOREACH_PROC_IN_SYSTEM(p) { 797 switch (p->p_stat) { 798 case SRUN: 799 if ((td = p->p_thread) == NULL) 800 break; 801 if (td->td_flags & TDF_BLOCKED) 802 break; 803 /* fall through */ 804 case SIDL: 805 nrun++; 806 break; 807 default: 808 break; 809 } 810 } 811 for (i = 0; i < 3; i++) 812 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 813 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 814 815 /* 816 * Schedule the next update to occur after 5 seconds, but add a 817 * random variation to avoid synchronisation with processes that 818 * run at regular intervals. 819 */ 820 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)), 821 loadav, NULL); 822 } 823 824 /* ARGSUSED */ 825 static void 826 sched_setup(void *dummy) 827 { 828 829 callout_init(&loadav_callout); 830 831 /* Kick off timeout driven events by calling first time. */ 832 roundrobin(NULL); 833 schedcpu(NULL); 834 loadav(NULL); 835 } 836 837 /* 838 * We adjust the priority of the current process. The priority of 839 * a process gets worse as it accumulates CPU time. The cpu usage 840 * estimator (p_estcpu) is increased here. resetpriority() will 841 * compute a different priority each time p_estcpu increases by 842 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached). 843 * 844 * The cpu usage estimator ramps up quite quickly when the process is 845 * running (linearly), and decays away exponentially, at a rate which 846 * is proportionally slower when the system is busy. The basic principle 847 * is that the system will 90% forget that the process used a lot of CPU 848 * time in 5 * loadav seconds. This causes the system to favor processes 849 * which haven't run much recently, and to round-robin among other processes. 850 * 851 * The actual schedulerclock interrupt rate is ESTCPUFREQ, but we generally 852 * want to ramp-up at a faster rate, ESTCPUVFREQ, so p_estcpu is scaled 853 * by (ESTCPUVFREQ / ESTCPUFREQ). You can control the ramp-up/ramp-down 854 * rate by adjusting ESTCPUVFREQ in sys/proc.h in integer multiples 855 * of ESTCPUFREQ. 856 * 857 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD 858 * and we cannot block. 859 */ 860 void 861 schedulerclock(void *dummy) 862 { 863 struct thread *td; 864 struct proc *p; 865 866 td = curthread; 867 if ((p = td->td_proc) != NULL) { 868 p->p_cpticks++; /* cpticks runs at ESTCPUFREQ */ 869 p->p_estcpu = ESTCPULIM(p->p_estcpu + ESTCPUVFREQ / ESTCPUFREQ); 870 if (try_mplock()) { 871 resetpriority(p); 872 rel_mplock(); 873 } 874 } 875 } 876 877 static 878 void 879 crit_panicints(void) 880 { 881 int s; 882 int cpri; 883 884 s = splhigh(); 885 cpri = crit_panic_save(); 886 splx(safepri); 887 crit_panic_restore(cpri); 888 splx(s); 889 } 890 891