1 /* $OpenBSD: sched_bsd.c,v 1.92 2024/05/29 18:55:45 claudio Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/clockintr.h> 43 #include <sys/proc.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/resourcevar.h> 47 #include <uvm/uvm_extern.h> 48 #include <sys/sched.h> 49 #include <sys/timeout.h> 50 #include <sys/smr.h> 51 #include <sys/tracepoint.h> 52 53 #ifdef KTRACE 54 #include <sys/ktrace.h> 55 #endif 56 57 uint64_t roundrobin_period; /* [I] roundrobin period (ns) */ 58 int lbolt; /* once a second sleep address */ 59 60 struct mutex sched_lock; 61 62 void update_loadavg(void *); 63 void schedcpu(void *); 64 uint32_t decay_aftersleep(uint32_t, uint32_t); 65 66 extern struct cpuset sched_idle_cpus; 67 68 /* 69 * constants for averages over 1, 5, and 15 minutes when sampling at 70 * 5 second intervals. 71 */ 72 static const fixpt_t cexp[3] = { 73 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 74 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 75 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 76 }; 77 78 struct loadavg averunnable; 79 80 /* 81 * Force switch among equal priority processes every 100ms. 82 */ 83 void 84 roundrobin(struct clockrequest *cr, void *cf, void *arg) 85 { 86 uint64_t count; 87 struct cpu_info *ci = curcpu(); 88 struct schedstate_percpu *spc = &ci->ci_schedstate; 89 90 count = clockrequest_advance(cr, roundrobin_period); 91 92 if (ci->ci_curproc != NULL) { 93 if (spc->spc_schedflags & SPCF_SEENRR || count >= 2) { 94 /* 95 * The process has already been through a roundrobin 96 * without switching and may be hogging the CPU. 97 * Indicate that the process should yield. 98 */ 99 atomic_setbits_int(&spc->spc_schedflags, 100 SPCF_SEENRR | SPCF_SHOULDYIELD); 101 } else { 102 atomic_setbits_int(&spc->spc_schedflags, 103 SPCF_SEENRR); 104 } 105 } 106 107 if (spc->spc_nrun || spc->spc_schedflags & SPCF_SHOULDYIELD) 108 need_resched(ci); 109 } 110 111 112 113 /* 114 * update_loadav: compute a tenex style load average of a quantity on 115 * 1, 5, and 15 minute intervals. 116 */ 117 void 118 update_loadavg(void *unused) 119 { 120 static struct timeout to = TIMEOUT_INITIALIZER(update_loadavg, NULL); 121 CPU_INFO_ITERATOR cii; 122 struct cpu_info *ci; 123 u_int i, nrun = 0; 124 125 CPU_INFO_FOREACH(cii, ci) { 126 if (!cpuset_isset(&sched_idle_cpus, ci)) 127 nrun++; 128 nrun += ci->ci_schedstate.spc_nrun; 129 } 130 131 for (i = 0; i < 3; i++) { 132 averunnable.ldavg[i] = (cexp[i] * averunnable.ldavg[i] + 133 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 134 } 135 136 timeout_add_sec(&to, 5); 137 } 138 139 /* 140 * Constants for digital decay and forget: 141 * 90% of (p_estcpu) usage in 5 * loadav time 142 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 143 * Note that, as ps(1) mentions, this can let percentages 144 * total over 100% (I've seen 137.9% for 3 processes). 145 * 146 * Note that hardclock updates p_estcpu and p_cpticks independently. 147 * 148 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 149 * That is, the system wants to compute a value of decay such 150 * that the following for loop: 151 * for (i = 0; i < (5 * loadavg); i++) 152 * p_estcpu *= decay; 153 * will compute 154 * p_estcpu *= 0.1; 155 * for all values of loadavg: 156 * 157 * Mathematically this loop can be expressed by saying: 158 * decay ** (5 * loadavg) ~= .1 159 * 160 * The system computes decay as: 161 * decay = (2 * loadavg) / (2 * loadavg + 1) 162 * 163 * We wish to prove that the system's computation of decay 164 * will always fulfill the equation: 165 * decay ** (5 * loadavg) ~= .1 166 * 167 * If we compute b as: 168 * b = 2 * loadavg 169 * then 170 * decay = b / (b + 1) 171 * 172 * We now need to prove two things: 173 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 174 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 175 * 176 * Facts: 177 * For x close to zero, exp(x) =~ 1 + x, since 178 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 179 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 180 * For x close to zero, ln(1+x) =~ x, since 181 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 182 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 183 * ln(.1) =~ -2.30 184 * 185 * Proof of (1): 186 * Solve (factor)**(power) =~ .1 given power (5*loadav): 187 * solving for factor, 188 * ln(factor) =~ (-2.30/5*loadav), or 189 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 190 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 191 * 192 * Proof of (2): 193 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 194 * solving for power, 195 * power*ln(b/(b+1)) =~ -2.30, or 196 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 197 * 198 * Actual power values for the implemented algorithm are as follows: 199 * loadav: 1 2 3 4 200 * power: 5.68 10.32 14.94 19.55 201 */ 202 203 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 204 #define loadfactor(loadav) (2 * (loadav)) 205 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 206 207 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 208 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 209 210 /* 211 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 212 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 213 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 214 * 215 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 216 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 217 * 218 * If you don't want to bother with the faster/more-accurate formula, you 219 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 220 * (more general) method of calculating the %age of CPU used by a process. 221 */ 222 #define CCPU_SHIFT 11 223 224 /* 225 * Recompute process priorities, every second. 226 */ 227 void 228 schedcpu(void *unused) 229 { 230 static struct timeout to = TIMEOUT_INITIALIZER(schedcpu, NULL); 231 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 232 struct proc *p; 233 int s; 234 unsigned int newcpu; 235 236 LIST_FOREACH(p, &allproc, p_list) { 237 /* 238 * Idle threads are never placed on the runqueue, 239 * therefore computing their priority is pointless. 240 */ 241 if (p->p_cpu != NULL && 242 p->p_cpu->ci_schedstate.spc_idleproc == p) 243 continue; 244 /* 245 * Increment sleep time (if sleeping). We ignore overflow. 246 */ 247 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 248 p->p_slptime++; 249 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 250 /* 251 * If the process has slept the entire second, 252 * stop recalculating its priority until it wakes up. 253 */ 254 if (p->p_slptime > 1) 255 continue; 256 SCHED_LOCK(s); 257 /* 258 * p_pctcpu is only for diagnostic tools such as ps. 259 */ 260 #if (FSHIFT >= CCPU_SHIFT) 261 p->p_pctcpu += (stathz == 100)? 262 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 263 100 * (((fixpt_t) p->p_cpticks) 264 << (FSHIFT - CCPU_SHIFT)) / stathz; 265 #else 266 p->p_pctcpu += ((FSCALE - ccpu) * 267 (p->p_cpticks * FSCALE / stathz)) >> FSHIFT; 268 #endif 269 p->p_cpticks = 0; 270 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu); 271 setpriority(p, newcpu, p->p_p->ps_nice); 272 273 if (p->p_stat == SRUN && 274 (p->p_runpri / SCHED_PPQ) != (p->p_usrpri / SCHED_PPQ)) { 275 remrunqueue(p); 276 setrunqueue(p->p_cpu, p, p->p_usrpri); 277 } 278 SCHED_UNLOCK(s); 279 } 280 wakeup(&lbolt); 281 timeout_add_sec(&to, 1); 282 } 283 284 /* 285 * Recalculate the priority of a process after it has slept for a while. 286 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 287 * least six times the loadfactor will decay p_estcpu to zero. 288 */ 289 uint32_t 290 decay_aftersleep(uint32_t estcpu, uint32_t slptime) 291 { 292 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 293 uint32_t newcpu; 294 295 if (slptime > 5 * loadfac) 296 newcpu = 0; 297 else { 298 newcpu = estcpu; 299 slptime--; /* the first time was done in schedcpu */ 300 while (newcpu && --slptime) 301 newcpu = decay_cpu(loadfac, newcpu); 302 303 } 304 305 return (newcpu); 306 } 307 308 /* 309 * General yield call. Puts the current process back on its run queue and 310 * performs a voluntary context switch. 311 */ 312 void 313 yield(void) 314 { 315 struct proc *p = curproc; 316 int s; 317 318 SCHED_LOCK(s); 319 setrunqueue(p->p_cpu, p, p->p_usrpri); 320 p->p_ru.ru_nvcsw++; 321 mi_switch(); 322 SCHED_UNLOCK(s); 323 } 324 325 /* 326 * General preemption call. Puts the current process back on its run queue 327 * and performs an involuntary context switch. If a process is supplied, 328 * we switch to that process. Otherwise, we use the normal process selection 329 * criteria. 330 */ 331 void 332 preempt(void) 333 { 334 struct proc *p = curproc; 335 int s; 336 337 SCHED_LOCK(s); 338 setrunqueue(p->p_cpu, p, p->p_usrpri); 339 p->p_ru.ru_nivcsw++; 340 mi_switch(); 341 SCHED_UNLOCK(s); 342 } 343 344 void 345 mi_switch(void) 346 { 347 struct schedstate_percpu *spc = &curcpu()->ci_schedstate; 348 struct proc *p = curproc; 349 struct proc *nextproc; 350 struct process *pr = p->p_p; 351 struct timespec ts; 352 int oldipl, s; 353 #ifdef MULTIPROCESSOR 354 int hold_count; 355 #endif 356 357 KASSERT(p->p_stat != SONPROC); 358 359 SCHED_ASSERT_LOCKED(); 360 361 #ifdef MULTIPROCESSOR 362 /* 363 * Release the kernel_lock, as we are about to yield the CPU. 364 */ 365 if (_kernel_lock_held()) 366 hold_count = __mp_release_all(&kernel_lock); 367 else 368 hold_count = 0; 369 #endif 370 371 /* 372 * Compute the amount of time during which the current 373 * process was running, and add that to its total so far. 374 */ 375 nanouptime(&ts); 376 if (timespeccmp(&ts, &spc->spc_runtime, <)) { 377 #if 0 378 printf("uptime is not monotonic! " 379 "ts=%lld.%09lu, runtime=%lld.%09lu\n", 380 (long long)tv.tv_sec, tv.tv_nsec, 381 (long long)spc->spc_runtime.tv_sec, 382 spc->spc_runtime.tv_nsec); 383 #endif 384 timespecclear(&ts); 385 } else { 386 timespecsub(&ts, &spc->spc_runtime, &ts); 387 } 388 389 /* add the time counts for this thread to the process's total */ 390 tuagg_locked(pr, p, &ts); 391 392 /* Stop any optional clock interrupts. */ 393 if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) { 394 atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER); 395 clockintr_cancel(&spc->spc_itimer); 396 } 397 if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) { 398 atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); 399 clockintr_cancel(&spc->spc_profclock); 400 } 401 402 /* 403 * Process is about to yield the CPU; clear the appropriate 404 * scheduling flags. 405 */ 406 atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR); 407 408 nextproc = sched_chooseproc(); 409 410 /* preserve old IPL level so we can switch back to that */ 411 oldipl = MUTEX_OLDIPL(&sched_lock); 412 413 if (p != nextproc) { 414 uvmexp.swtch++; 415 TRACEPOINT(sched, off__cpu, nextproc->p_tid + THREAD_PID_OFFSET, 416 nextproc->p_p->ps_pid); 417 cpu_switchto(p, nextproc); 418 TRACEPOINT(sched, on__cpu, NULL); 419 } else { 420 TRACEPOINT(sched, remain__cpu, NULL); 421 p->p_stat = SONPROC; 422 } 423 424 clear_resched(curcpu()); 425 426 SCHED_ASSERT_LOCKED(); 427 428 /* Restore proc's IPL. */ 429 MUTEX_OLDIPL(&sched_lock) = oldipl; 430 SCHED_UNLOCK(s); 431 432 SCHED_ASSERT_UNLOCKED(); 433 434 assertwaitok(); 435 smr_idle(); 436 437 /* 438 * We're running again; record our new start time. We might 439 * be running on a new CPU now, so refetch the schedstate_percpu 440 * pointer. 441 */ 442 KASSERT(p->p_cpu == curcpu()); 443 spc = &p->p_cpu->ci_schedstate; 444 445 /* Start any optional clock interrupts needed by the thread. */ 446 if (ISSET(p->p_p->ps_flags, PS_ITIMER)) { 447 atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER); 448 clockintr_advance(&spc->spc_itimer, hardclock_period); 449 } 450 if (ISSET(p->p_p->ps_flags, PS_PROFIL)) { 451 atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK); 452 clockintr_advance(&spc->spc_profclock, profclock_period); 453 } 454 455 nanouptime(&spc->spc_runtime); 456 457 #ifdef MULTIPROCESSOR 458 /* 459 * Reacquire the kernel_lock now. We do this after we've 460 * released the scheduler lock to avoid deadlock, and before 461 * we reacquire the interlock and the scheduler lock. 462 */ 463 if (hold_count) 464 __mp_acquire_count(&kernel_lock, hold_count); 465 #endif 466 SCHED_LOCK(s); 467 } 468 469 /* 470 * Change process state to be runnable, 471 * placing it on the run queue. 472 */ 473 void 474 setrunnable(struct proc *p) 475 { 476 struct process *pr = p->p_p; 477 u_char prio; 478 479 SCHED_ASSERT_LOCKED(); 480 481 switch (p->p_stat) { 482 case 0: 483 case SRUN: 484 case SONPROC: 485 case SDEAD: 486 case SIDL: 487 default: 488 panic("setrunnable"); 489 case SSTOP: 490 /* 491 * If we're being traced (possibly because someone attached us 492 * while we were stopped), check for a signal from the debugger. 493 */ 494 if ((pr->ps_flags & PS_TRACED) != 0 && pr->ps_xsig != 0) 495 atomic_setbits_int(&p->p_siglist, sigmask(pr->ps_xsig)); 496 prio = p->p_usrpri; 497 setrunqueue(NULL, p, prio); 498 break; 499 case SSLEEP: 500 prio = p->p_slppri; 501 502 /* if not yet asleep, don't add to runqueue */ 503 if (ISSET(p->p_flag, P_WSLEEP)) 504 return; 505 setrunqueue(NULL, p, prio); 506 TRACEPOINT(sched, wakeup, p->p_tid + THREAD_PID_OFFSET, 507 p->p_p->ps_pid, CPU_INFO_UNIT(p->p_cpu)); 508 break; 509 } 510 if (p->p_slptime > 1) { 511 uint32_t newcpu; 512 513 newcpu = decay_aftersleep(p->p_estcpu, p->p_slptime); 514 setpriority(p, newcpu, pr->ps_nice); 515 } 516 p->p_slptime = 0; 517 } 518 519 /* 520 * Compute the priority of a process. 521 */ 522 void 523 setpriority(struct proc *p, uint32_t newcpu, uint8_t nice) 524 { 525 unsigned int newprio; 526 527 newprio = min((PUSER + newcpu + NICE_WEIGHT * (nice - NZERO)), MAXPRI); 528 529 SCHED_ASSERT_LOCKED(); 530 p->p_estcpu = newcpu; 531 p->p_usrpri = newprio; 532 } 533 534 /* 535 * We adjust the priority of the current process. The priority of a process 536 * gets worse as it accumulates CPU time. The cpu usage estimator (p_estcpu) 537 * is increased here. The formula for computing priorities (in kern_synch.c) 538 * will compute a different value each time p_estcpu increases. This can 539 * cause a switch, but unless the priority crosses a PPQ boundary the actual 540 * queue will not change. The cpu usage estimator ramps up quite quickly 541 * when the process is running (linearly), and decays away exponentially, at 542 * a rate which is proportionally slower when the system is busy. The basic 543 * principle is that the system will 90% forget that the process used a lot 544 * of CPU time in 5 * loadav seconds. This causes the system to favor 545 * processes which haven't run much recently, and to round-robin among other 546 * processes. 547 */ 548 void 549 schedclock(struct proc *p) 550 { 551 struct cpu_info *ci = curcpu(); 552 struct schedstate_percpu *spc = &ci->ci_schedstate; 553 uint32_t newcpu; 554 int s; 555 556 if (p == spc->spc_idleproc || spc->spc_spinning) 557 return; 558 559 SCHED_LOCK(s); 560 newcpu = ESTCPULIM(p->p_estcpu + 1); 561 setpriority(p, newcpu, p->p_p->ps_nice); 562 SCHED_UNLOCK(s); 563 } 564 565 void (*cpu_setperf)(int); 566 567 #define PERFPOL_MANUAL 0 568 #define PERFPOL_AUTO 1 569 #define PERFPOL_HIGH 2 570 int perflevel = 100; 571 int perfpolicy = PERFPOL_AUTO; 572 573 #ifndef SMALL_KERNEL 574 /* 575 * The code below handles CPU throttling. 576 */ 577 #include <sys/sysctl.h> 578 579 void setperf_auto(void *); 580 struct timeout setperf_to = TIMEOUT_INITIALIZER(setperf_auto, NULL); 581 extern int hw_power; 582 583 void 584 setperf_auto(void *v) 585 { 586 static uint64_t *idleticks, *totalticks; 587 static int downbeats; 588 int i, j = 0; 589 int speedup = 0; 590 CPU_INFO_ITERATOR cii; 591 struct cpu_info *ci; 592 uint64_t idle, total, allidle = 0, alltotal = 0; 593 594 if (perfpolicy != PERFPOL_AUTO) 595 return; 596 597 if (cpu_setperf == NULL) 598 return; 599 600 if (hw_power) { 601 speedup = 1; 602 goto faster; 603 } 604 605 if (!idleticks) 606 if (!(idleticks = mallocarray(ncpusfound, sizeof(*idleticks), 607 M_DEVBUF, M_NOWAIT | M_ZERO))) 608 return; 609 if (!totalticks) 610 if (!(totalticks = mallocarray(ncpusfound, sizeof(*totalticks), 611 M_DEVBUF, M_NOWAIT | M_ZERO))) { 612 free(idleticks, M_DEVBUF, 613 sizeof(*idleticks) * ncpusfound); 614 return; 615 } 616 CPU_INFO_FOREACH(cii, ci) { 617 if (!cpu_is_online(ci)) 618 continue; 619 total = 0; 620 for (i = 0; i < CPUSTATES; i++) { 621 total += ci->ci_schedstate.spc_cp_time[i]; 622 } 623 total -= totalticks[j]; 624 idle = ci->ci_schedstate.spc_cp_time[CP_IDLE] - idleticks[j]; 625 if (idle < total / 3) 626 speedup = 1; 627 alltotal += total; 628 allidle += idle; 629 idleticks[j] += idle; 630 totalticks[j] += total; 631 j++; 632 } 633 if (allidle < alltotal / 2) 634 speedup = 1; 635 if (speedup && downbeats < 5) 636 downbeats++; 637 638 if (speedup && perflevel != 100) { 639 faster: 640 perflevel = 100; 641 cpu_setperf(perflevel); 642 } else if (!speedup && perflevel != 0 && --downbeats <= 0) { 643 perflevel = 0; 644 cpu_setperf(perflevel); 645 } 646 647 timeout_add_msec(&setperf_to, 100); 648 } 649 650 int 651 sysctl_hwsetperf(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 652 { 653 int err; 654 655 if (!cpu_setperf) 656 return EOPNOTSUPP; 657 658 if (perfpolicy != PERFPOL_MANUAL) 659 return sysctl_rdint(oldp, oldlenp, newp, perflevel); 660 661 err = sysctl_int_bounded(oldp, oldlenp, newp, newlen, 662 &perflevel, 0, 100); 663 if (err) 664 return err; 665 666 if (newp != NULL) 667 cpu_setperf(perflevel); 668 669 return 0; 670 } 671 672 int 673 sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 674 { 675 char policy[32]; 676 int err; 677 678 if (!cpu_setperf) 679 return EOPNOTSUPP; 680 681 switch (perfpolicy) { 682 case PERFPOL_MANUAL: 683 strlcpy(policy, "manual", sizeof(policy)); 684 break; 685 case PERFPOL_AUTO: 686 strlcpy(policy, "auto", sizeof(policy)); 687 break; 688 case PERFPOL_HIGH: 689 strlcpy(policy, "high", sizeof(policy)); 690 break; 691 default: 692 strlcpy(policy, "unknown", sizeof(policy)); 693 break; 694 } 695 696 if (newp == NULL) 697 return sysctl_rdstring(oldp, oldlenp, newp, policy); 698 699 err = sysctl_string(oldp, oldlenp, newp, newlen, policy, sizeof(policy)); 700 if (err) 701 return err; 702 if (strcmp(policy, "manual") == 0) 703 perfpolicy = PERFPOL_MANUAL; 704 else if (strcmp(policy, "auto") == 0) 705 perfpolicy = PERFPOL_AUTO; 706 else if (strcmp(policy, "high") == 0) 707 perfpolicy = PERFPOL_HIGH; 708 else 709 return EINVAL; 710 711 if (perfpolicy == PERFPOL_AUTO) { 712 timeout_add_msec(&setperf_to, 200); 713 } else if (perfpolicy == PERFPOL_HIGH) { 714 perflevel = 100; 715 cpu_setperf(perflevel); 716 } 717 return 0; 718 } 719 #endif 720 721 /* 722 * Start the scheduler's periodic timeouts. 723 */ 724 void 725 scheduler_start(void) 726 { 727 schedcpu(NULL); 728 update_loadavg(NULL); 729 730 #ifndef SMALL_KERNEL 731 if (perfpolicy == PERFPOL_AUTO) 732 timeout_add_msec(&setperf_to, 200); 733 #endif 734 } 735 736