1 /* $OpenBSD: sched_bsd.c,v 1.43 2016/03/09 13:38:50 mpi Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/signalvar.h> 46 #include <sys/resourcevar.h> 47 #include <uvm/uvm_extern.h> 48 #include <sys/sched.h> 49 #include <sys/timeout.h> 50 51 #ifdef KTRACE 52 #include <sys/ktrace.h> 53 #endif 54 55 56 int lbolt; /* once a second sleep address */ 57 int rrticks_init; /* # of hardclock ticks per roundrobin() */ 58 59 #ifdef MULTIPROCESSOR 60 struct __mp_lock sched_lock; 61 #endif 62 63 void schedcpu(void *); 64 void updatepri(struct proc *); 65 66 void 67 scheduler_start(void) 68 { 69 static struct timeout schedcpu_to; 70 71 /* 72 * We avoid polluting the global namespace by keeping the scheduler 73 * timeouts static in this function. 74 * We setup the timeout here and kick schedcpu once to make it do 75 * its job. 76 */ 77 timeout_set(&schedcpu_to, schedcpu, &schedcpu_to); 78 79 rrticks_init = hz / 10; 80 schedcpu(&schedcpu_to); 81 } 82 83 /* 84 * Force switch among equal priority processes every 100ms. 85 */ 86 void 87 roundrobin(struct cpu_info *ci) 88 { 89 struct schedstate_percpu *spc = &ci->ci_schedstate; 90 91 spc->spc_rrticks = rrticks_init; 92 93 if (ci->ci_curproc != NULL) { 94 if (spc->spc_schedflags & SPCF_SEENRR) { 95 /* 96 * The process has already been through a roundrobin 97 * without switching and may be hogging the CPU. 98 * Indicate that the process should yield. 99 */ 100 atomic_setbits_int(&spc->spc_schedflags, 101 SPCF_SHOULDYIELD); 102 } else { 103 atomic_setbits_int(&spc->spc_schedflags, 104 SPCF_SEENRR); 105 } 106 } 107 108 if (spc->spc_nrun) 109 need_resched(ci); 110 } 111 112 /* 113 * Constants for digital decay and forget: 114 * 90% of (p_estcpu) usage in 5 * loadav time 115 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 116 * Note that, as ps(1) mentions, this can let percentages 117 * total over 100% (I've seen 137.9% for 3 processes). 118 * 119 * Note that hardclock updates p_estcpu and p_cpticks independently. 120 * 121 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 122 * That is, the system wants to compute a value of decay such 123 * that the following for loop: 124 * for (i = 0; i < (5 * loadavg); i++) 125 * p_estcpu *= decay; 126 * will compute 127 * p_estcpu *= 0.1; 128 * for all values of loadavg: 129 * 130 * Mathematically this loop can be expressed by saying: 131 * decay ** (5 * loadavg) ~= .1 132 * 133 * The system computes decay as: 134 * decay = (2 * loadavg) / (2 * loadavg + 1) 135 * 136 * We wish to prove that the system's computation of decay 137 * will always fulfill the equation: 138 * decay ** (5 * loadavg) ~= .1 139 * 140 * If we compute b as: 141 * b = 2 * loadavg 142 * then 143 * decay = b / (b + 1) 144 * 145 * We now need to prove two things: 146 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 147 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 148 * 149 * Facts: 150 * For x close to zero, exp(x) =~ 1 + x, since 151 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 152 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 153 * For x close to zero, ln(1+x) =~ x, since 154 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 155 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 156 * ln(.1) =~ -2.30 157 * 158 * Proof of (1): 159 * Solve (factor)**(power) =~ .1 given power (5*loadav): 160 * solving for factor, 161 * ln(factor) =~ (-2.30/5*loadav), or 162 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 163 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 164 * 165 * Proof of (2): 166 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 167 * solving for power, 168 * power*ln(b/(b+1)) =~ -2.30, or 169 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 170 * 171 * Actual power values for the implemented algorithm are as follows: 172 * loadav: 1 2 3 4 173 * power: 5.68 10.32 14.94 19.55 174 */ 175 176 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 177 #define loadfactor(loadav) (2 * (loadav)) 178 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 179 180 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 181 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 182 183 /* 184 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 185 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 186 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 187 * 188 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 189 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 190 * 191 * If you don't want to bother with the faster/more-accurate formula, you 192 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 193 * (more general) method of calculating the %age of CPU used by a process. 194 */ 195 #define CCPU_SHIFT 11 196 197 /* 198 * Recompute process priorities, every second. 199 */ 200 void 201 schedcpu(void *arg) 202 { 203 struct timeout *to = (struct timeout *)arg; 204 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 205 struct proc *p; 206 int s; 207 unsigned int newcpu; 208 int phz; 209 210 /* 211 * If we have a statistics clock, use that to calculate CPU 212 * time, otherwise revert to using the profiling clock (which, 213 * in turn, defaults to hz if there is no separate profiling 214 * clock available) 215 */ 216 phz = stathz ? stathz : profhz; 217 KASSERT(phz); 218 219 LIST_FOREACH(p, &allproc, p_list) { 220 /* 221 * Increment sleep time (if sleeping). We ignore overflow. 222 */ 223 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 224 p->p_slptime++; 225 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 226 /* 227 * If the process has slept the entire second, 228 * stop recalculating its priority until it wakes up. 229 */ 230 if (p->p_slptime > 1) 231 continue; 232 SCHED_LOCK(s); 233 /* 234 * p_pctcpu is only for diagnostic tools such as ps. 235 */ 236 #if (FSHIFT >= CCPU_SHIFT) 237 p->p_pctcpu += (phz == 100)? 238 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 239 100 * (((fixpt_t) p->p_cpticks) 240 << (FSHIFT - CCPU_SHIFT)) / phz; 241 #else 242 p->p_pctcpu += ((FSCALE - ccpu) * 243 (p->p_cpticks * FSCALE / phz)) >> FSHIFT; 244 #endif 245 p->p_cpticks = 0; 246 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu); 247 p->p_estcpu = newcpu; 248 resetpriority(p); 249 if (p->p_priority >= PUSER) { 250 if (p->p_stat == SRUN && 251 (p->p_priority / SCHED_PPQ) != 252 (p->p_usrpri / SCHED_PPQ)) { 253 remrunqueue(p); 254 p->p_priority = p->p_usrpri; 255 setrunqueue(p); 256 } else 257 p->p_priority = p->p_usrpri; 258 } 259 SCHED_UNLOCK(s); 260 } 261 uvm_meter(); 262 wakeup(&lbolt); 263 timeout_add_sec(to, 1); 264 } 265 266 /* 267 * Recalculate the priority of a process after it has slept for a while. 268 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 269 * least six times the loadfactor will decay p_estcpu to zero. 270 */ 271 void 272 updatepri(struct proc *p) 273 { 274 unsigned int newcpu = p->p_estcpu; 275 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 276 277 SCHED_ASSERT_LOCKED(); 278 279 if (p->p_slptime > 5 * loadfac) 280 p->p_estcpu = 0; 281 else { 282 p->p_slptime--; /* the first time was done in schedcpu */ 283 while (newcpu && --p->p_slptime) 284 newcpu = (int) decay_cpu(loadfac, newcpu); 285 p->p_estcpu = newcpu; 286 } 287 resetpriority(p); 288 } 289 290 /* 291 * General yield call. Puts the current process back on its run queue and 292 * performs a voluntary context switch. 293 */ 294 void 295 yield(void) 296 { 297 struct proc *p = curproc; 298 int s; 299 300 SCHED_LOCK(s); 301 p->p_priority = p->p_usrpri; 302 p->p_stat = SRUN; 303 setrunqueue(p); 304 p->p_ru.ru_nvcsw++; 305 mi_switch(); 306 SCHED_UNLOCK(s); 307 } 308 309 /* 310 * General preemption call. Puts the current process back on its run queue 311 * and performs an involuntary context switch. If a process is supplied, 312 * we switch to that process. Otherwise, we use the normal process selection 313 * criteria. 314 */ 315 void 316 preempt(struct proc *newp) 317 { 318 struct proc *p = curproc; 319 int s; 320 321 /* 322 * XXX Switching to a specific process is not supported yet. 323 */ 324 if (newp != NULL) 325 panic("preempt: cpu_preempt not yet implemented"); 326 327 SCHED_LOCK(s); 328 p->p_priority = p->p_usrpri; 329 p->p_stat = SRUN; 330 p->p_cpu = sched_choosecpu(p); 331 setrunqueue(p); 332 p->p_ru.ru_nivcsw++; 333 mi_switch(); 334 SCHED_UNLOCK(s); 335 } 336 337 void 338 mi_switch(void) 339 { 340 struct schedstate_percpu *spc = &curcpu()->ci_schedstate; 341 struct proc *p = curproc; 342 struct proc *nextproc; 343 struct process *pr = p->p_p; 344 struct rlimit *rlim; 345 rlim_t secs; 346 struct timespec ts; 347 #ifdef MULTIPROCESSOR 348 int hold_count; 349 int sched_count; 350 #endif 351 352 assertwaitok(); 353 KASSERT(p->p_stat != SONPROC); 354 355 SCHED_ASSERT_LOCKED(); 356 357 #ifdef MULTIPROCESSOR 358 /* 359 * Release the kernel_lock, as we are about to yield the CPU. 360 */ 361 sched_count = __mp_release_all_but_one(&sched_lock); 362 if (__mp_lock_held(&kernel_lock)) 363 hold_count = __mp_release_all(&kernel_lock); 364 else 365 hold_count = 0; 366 #endif 367 368 /* 369 * Compute the amount of time during which the current 370 * process was running, and add that to its total so far. 371 */ 372 nanouptime(&ts); 373 if (timespeccmp(&ts, &spc->spc_runtime, <)) { 374 #if 0 375 printf("uptime is not monotonic! " 376 "ts=%lld.%09lu, runtime=%lld.%09lu\n", 377 (long long)tv.tv_sec, tv.tv_nsec, 378 (long long)spc->spc_runtime.tv_sec, 379 spc->spc_runtime.tv_nsec); 380 #endif 381 } else { 382 timespecsub(&ts, &spc->spc_runtime, &ts); 383 timespecadd(&p->p_rtime, &ts, &p->p_rtime); 384 } 385 386 /* add the time counts for this thread to the process's total */ 387 tuagg_unlocked(pr, p); 388 389 /* 390 * Check if the process exceeds its cpu resource allocation. 391 * If over max, kill it. 392 */ 393 rlim = &pr->ps_limit->pl_rlimit[RLIMIT_CPU]; 394 secs = pr->ps_tu.tu_runtime.tv_sec; 395 if (secs >= rlim->rlim_cur) { 396 if (secs >= rlim->rlim_max) { 397 psignal(p, SIGKILL); 398 } else { 399 psignal(p, SIGXCPU); 400 if (rlim->rlim_cur < rlim->rlim_max) 401 rlim->rlim_cur += 5; 402 } 403 } 404 405 /* 406 * Process is about to yield the CPU; clear the appropriate 407 * scheduling flags. 408 */ 409 atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR); 410 411 nextproc = sched_chooseproc(); 412 413 if (p != nextproc) { 414 uvmexp.swtch++; 415 cpu_switchto(p, nextproc); 416 } else { 417 p->p_stat = SONPROC; 418 } 419 420 clear_resched(curcpu()); 421 422 SCHED_ASSERT_LOCKED(); 423 424 /* 425 * To preserve lock ordering, we need to release the sched lock 426 * and grab it after we grab the big lock. 427 * In the future, when the sched lock isn't recursive, we'll 428 * just release it here. 429 */ 430 #ifdef MULTIPROCESSOR 431 __mp_unlock(&sched_lock); 432 #endif 433 434 SCHED_ASSERT_UNLOCKED(); 435 436 /* 437 * We're running again; record our new start time. We might 438 * be running on a new CPU now, so don't use the cache'd 439 * schedstate_percpu pointer. 440 */ 441 KASSERT(p->p_cpu == curcpu()); 442 443 nanouptime(&p->p_cpu->ci_schedstate.spc_runtime); 444 445 #ifdef MULTIPROCESSOR 446 /* 447 * Reacquire the kernel_lock now. We do this after we've 448 * released the scheduler lock to avoid deadlock, and before 449 * we reacquire the interlock and the scheduler lock. 450 */ 451 if (hold_count) 452 __mp_acquire_count(&kernel_lock, hold_count); 453 __mp_acquire_count(&sched_lock, sched_count + 1); 454 #endif 455 } 456 457 static __inline void 458 resched_proc(struct proc *p, u_char pri) 459 { 460 struct cpu_info *ci; 461 462 /* 463 * XXXSMP 464 * This does not handle the case where its last 465 * CPU is running a higher-priority process, but every 466 * other CPU is running a lower-priority process. There 467 * are ways to handle this situation, but they're not 468 * currently very pretty, and we also need to weigh the 469 * cost of moving a process from one CPU to another. 470 * 471 * XXXSMP 472 * There is also the issue of locking the other CPU's 473 * sched state, which we currently do not do. 474 */ 475 ci = (p->p_cpu != NULL) ? p->p_cpu : curcpu(); 476 if (pri < ci->ci_schedstate.spc_curpriority) 477 need_resched(ci); 478 } 479 480 /* 481 * Change process state to be runnable, 482 * placing it on the run queue if it is in memory, 483 * and awakening the swapper if it isn't in memory. 484 */ 485 void 486 setrunnable(struct proc *p) 487 { 488 SCHED_ASSERT_LOCKED(); 489 490 switch (p->p_stat) { 491 case 0: 492 case SRUN: 493 case SONPROC: 494 case SDEAD: 495 case SIDL: 496 default: 497 panic("setrunnable"); 498 case SSTOP: 499 /* 500 * If we're being traced (possibly because someone attached us 501 * while we were stopped), check for a signal from the debugger. 502 */ 503 if ((p->p_p->ps_flags & PS_TRACED) != 0 && p->p_xstat != 0) 504 atomic_setbits_int(&p->p_siglist, sigmask(p->p_xstat)); 505 case SSLEEP: 506 unsleep(p); /* e.g. when sending signals */ 507 break; 508 } 509 p->p_stat = SRUN; 510 p->p_cpu = sched_choosecpu(p); 511 setrunqueue(p); 512 if (p->p_slptime > 1) 513 updatepri(p); 514 p->p_slptime = 0; 515 resched_proc(p, p->p_priority); 516 } 517 518 /* 519 * Compute the priority of a process when running in user mode. 520 * Arrange to reschedule if the resulting priority is better 521 * than that of the current process. 522 */ 523 void 524 resetpriority(struct proc *p) 525 { 526 unsigned int newpriority; 527 528 SCHED_ASSERT_LOCKED(); 529 530 newpriority = PUSER + p->p_estcpu + 531 NICE_WEIGHT * (p->p_p->ps_nice - NZERO); 532 newpriority = min(newpriority, MAXPRI); 533 p->p_usrpri = newpriority; 534 resched_proc(p, p->p_usrpri); 535 } 536 537 /* 538 * We adjust the priority of the current process. The priority of a process 539 * gets worse as it accumulates CPU time. The cpu usage estimator (p_estcpu) 540 * is increased here. The formula for computing priorities (in kern_synch.c) 541 * will compute a different value each time p_estcpu increases. This can 542 * cause a switch, but unless the priority crosses a PPQ boundary the actual 543 * queue will not change. The cpu usage estimator ramps up quite quickly 544 * when the process is running (linearly), and decays away exponentially, at 545 * a rate which is proportionally slower when the system is busy. The basic 546 * principle is that the system will 90% forget that the process used a lot 547 * of CPU time in 5 * loadav seconds. This causes the system to favor 548 * processes which haven't run much recently, and to round-robin among other 549 * processes. 550 */ 551 void 552 schedclock(struct proc *p) 553 { 554 int s; 555 556 SCHED_LOCK(s); 557 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1); 558 resetpriority(p); 559 if (p->p_priority >= PUSER) 560 p->p_priority = p->p_usrpri; 561 SCHED_UNLOCK(s); 562 } 563 564 void (*cpu_setperf)(int); 565 566 #define PERFPOL_MANUAL 0 567 #define PERFPOL_AUTO 1 568 #define PERFPOL_HIGH 2 569 int perflevel = 100; 570 int perfpolicy = PERFPOL_MANUAL; 571 572 #ifndef SMALL_KERNEL 573 /* 574 * The code below handles CPU throttling. 575 */ 576 #include <sys/sysctl.h> 577 578 void setperf_auto(void *); 579 struct timeout setperf_to = TIMEOUT_INITIALIZER(setperf_auto, NULL); 580 581 void 582 setperf_auto(void *v) 583 { 584 static uint64_t *idleticks, *totalticks; 585 static int downbeats; 586 587 int i, j; 588 int speedup; 589 CPU_INFO_ITERATOR cii; 590 struct cpu_info *ci; 591 uint64_t idle, total, allidle, alltotal; 592 593 if (perfpolicy != PERFPOL_AUTO) 594 return; 595 596 if (!idleticks) 597 if (!(idleticks = mallocarray(ncpusfound, sizeof(*idleticks), 598 M_DEVBUF, M_NOWAIT | M_ZERO))) 599 return; 600 if (!totalticks) 601 if (!(totalticks = mallocarray(ncpusfound, sizeof(*totalticks), 602 M_DEVBUF, M_NOWAIT | M_ZERO))) { 603 free(idleticks, M_DEVBUF, 604 sizeof(*idleticks) * ncpusfound); 605 return; 606 } 607 608 alltotal = allidle = 0; 609 j = 0; 610 speedup = 0; 611 CPU_INFO_FOREACH(cii, ci) { 612 total = 0; 613 for (i = 0; i < CPUSTATES; i++) { 614 total += ci->ci_schedstate.spc_cp_time[i]; 615 } 616 total -= totalticks[j]; 617 idle = ci->ci_schedstate.spc_cp_time[CP_IDLE] - idleticks[j]; 618 if (idle < total / 3) 619 speedup = 1; 620 alltotal += total; 621 allidle += idle; 622 idleticks[j] += idle; 623 totalticks[j] += total; 624 j++; 625 } 626 if (allidle < alltotal / 2) 627 speedup = 1; 628 if (speedup) 629 downbeats = 5; 630 631 if (speedup && perflevel != 100) { 632 perflevel = 100; 633 cpu_setperf(perflevel); 634 } else if (!speedup && perflevel != 0 && --downbeats <= 0) { 635 perflevel = 0; 636 cpu_setperf(perflevel); 637 } 638 639 timeout_add_msec(&setperf_to, 100); 640 } 641 642 int 643 sysctl_hwsetperf(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 644 { 645 int err, newperf; 646 647 if (!cpu_setperf) 648 return EOPNOTSUPP; 649 650 if (perfpolicy != PERFPOL_MANUAL) 651 return sysctl_rdint(oldp, oldlenp, newp, perflevel); 652 653 newperf = perflevel; 654 err = sysctl_int(oldp, oldlenp, newp, newlen, &newperf); 655 if (err) 656 return err; 657 if (newperf > 100) 658 newperf = 100; 659 if (newperf < 0) 660 newperf = 0; 661 perflevel = newperf; 662 cpu_setperf(perflevel); 663 664 return 0; 665 } 666 667 int 668 sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 669 { 670 char policy[32]; 671 int err; 672 673 if (!cpu_setperf) 674 return EOPNOTSUPP; 675 676 switch (perfpolicy) { 677 case PERFPOL_MANUAL: 678 strlcpy(policy, "manual", sizeof(policy)); 679 break; 680 case PERFPOL_AUTO: 681 strlcpy(policy, "auto", sizeof(policy)); 682 break; 683 case PERFPOL_HIGH: 684 strlcpy(policy, "high", sizeof(policy)); 685 break; 686 default: 687 strlcpy(policy, "unknown", sizeof(policy)); 688 break; 689 } 690 691 if (newp == NULL) 692 return sysctl_rdstring(oldp, oldlenp, newp, policy); 693 694 err = sysctl_string(oldp, oldlenp, newp, newlen, policy, sizeof(policy)); 695 if (err) 696 return err; 697 if (strcmp(policy, "manual") == 0) 698 perfpolicy = PERFPOL_MANUAL; 699 else if (strcmp(policy, "auto") == 0) 700 perfpolicy = PERFPOL_AUTO; 701 else if (strcmp(policy, "high") == 0) 702 perfpolicy = PERFPOL_HIGH; 703 else 704 return EINVAL; 705 706 if (perfpolicy == PERFPOL_AUTO) { 707 timeout_add_msec(&setperf_to, 200); 708 } else if (perfpolicy == PERFPOL_HIGH) { 709 perflevel = 100; 710 cpu_setperf(perflevel); 711 } 712 return 0; 713 } 714 #endif 715