1 /* $OpenBSD: sched_bsd.c,v 1.39 2014/11/12 22:27:45 tedu Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/buf.h> 46 #include <sys/signalvar.h> 47 #include <sys/resourcevar.h> 48 #include <uvm/uvm_extern.h> 49 #include <sys/sched.h> 50 #include <sys/timeout.h> 51 52 #ifdef KTRACE 53 #include <sys/ktrace.h> 54 #endif 55 56 57 int lbolt; /* once a second sleep address */ 58 int rrticks_init; /* # of hardclock ticks per roundrobin() */ 59 60 #ifdef MULTIPROCESSOR 61 struct __mp_lock sched_lock; 62 #endif 63 64 void schedcpu(void *); 65 66 void 67 scheduler_start(void) 68 { 69 static struct timeout schedcpu_to; 70 71 /* 72 * We avoid polluting the global namespace by keeping the scheduler 73 * timeouts static in this function. 74 * We setup the timeouts here and kick schedcpu and roundrobin once to 75 * make them do their job. 76 */ 77 78 timeout_set(&schedcpu_to, schedcpu, &schedcpu_to); 79 80 rrticks_init = hz / 10; 81 schedcpu(&schedcpu_to); 82 } 83 84 /* 85 * Force switch among equal priority processes every 100ms. 86 */ 87 void 88 roundrobin(struct cpu_info *ci) 89 { 90 struct schedstate_percpu *spc = &ci->ci_schedstate; 91 92 spc->spc_rrticks = rrticks_init; 93 94 if (ci->ci_curproc != NULL) { 95 if (spc->spc_schedflags & SPCF_SEENRR) { 96 /* 97 * The process has already been through a roundrobin 98 * without switching and may be hogging the CPU. 99 * Indicate that the process should yield. 100 */ 101 atomic_setbits_int(&spc->spc_schedflags, 102 SPCF_SHOULDYIELD); 103 } else { 104 atomic_setbits_int(&spc->spc_schedflags, 105 SPCF_SEENRR); 106 } 107 } 108 109 if (spc->spc_nrun) 110 need_resched(ci); 111 } 112 113 /* 114 * Constants for digital decay and forget: 115 * 90% of (p_estcpu) usage in 5 * loadav time 116 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 117 * Note that, as ps(1) mentions, this can let percentages 118 * total over 100% (I've seen 137.9% for 3 processes). 119 * 120 * Note that hardclock updates p_estcpu and p_cpticks independently. 121 * 122 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 123 * That is, the system wants to compute a value of decay such 124 * that the following for loop: 125 * for (i = 0; i < (5 * loadavg); i++) 126 * p_estcpu *= decay; 127 * will compute 128 * p_estcpu *= 0.1; 129 * for all values of loadavg: 130 * 131 * Mathematically this loop can be expressed by saying: 132 * decay ** (5 * loadavg) ~= .1 133 * 134 * The system computes decay as: 135 * decay = (2 * loadavg) / (2 * loadavg + 1) 136 * 137 * We wish to prove that the system's computation of decay 138 * will always fulfill the equation: 139 * decay ** (5 * loadavg) ~= .1 140 * 141 * If we compute b as: 142 * b = 2 * loadavg 143 * then 144 * decay = b / (b + 1) 145 * 146 * We now need to prove two things: 147 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 148 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 149 * 150 * Facts: 151 * For x close to zero, exp(x) =~ 1 + x, since 152 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 153 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 154 * For x close to zero, ln(1+x) =~ x, since 155 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 156 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 157 * ln(.1) =~ -2.30 158 * 159 * Proof of (1): 160 * Solve (factor)**(power) =~ .1 given power (5*loadav): 161 * solving for factor, 162 * ln(factor) =~ (-2.30/5*loadav), or 163 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 164 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 165 * 166 * Proof of (2): 167 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 168 * solving for power, 169 * power*ln(b/(b+1)) =~ -2.30, or 170 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 171 * 172 * Actual power values for the implemented algorithm are as follows: 173 * loadav: 1 2 3 4 174 * power: 5.68 10.32 14.94 19.55 175 */ 176 177 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 178 #define loadfactor(loadav) (2 * (loadav)) 179 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 180 181 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 182 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 183 184 /* 185 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 186 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 187 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 188 * 189 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 190 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 191 * 192 * If you don't want to bother with the faster/more-accurate formula, you 193 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 194 * (more general) method of calculating the %age of CPU used by a process. 195 */ 196 #define CCPU_SHIFT 11 197 198 /* 199 * Recompute process priorities, every second. 200 */ 201 void 202 schedcpu(void *arg) 203 { 204 struct timeout *to = (struct timeout *)arg; 205 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 206 struct proc *p; 207 int s; 208 unsigned int newcpu; 209 int phz; 210 211 /* 212 * If we have a statistics clock, use that to calculate CPU 213 * time, otherwise revert to using the profiling clock (which, 214 * in turn, defaults to hz if there is no separate profiling 215 * clock available) 216 */ 217 phz = stathz ? stathz : profhz; 218 KASSERT(phz); 219 220 LIST_FOREACH(p, &allproc, p_list) { 221 /* 222 * Increment time in/out of memory and sleep time 223 * (if sleeping). We ignore overflow; with 16-bit int's 224 * (remember them?) overflow takes 45 days. 225 */ 226 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 227 p->p_slptime++; 228 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 229 /* 230 * If the process has slept the entire second, 231 * stop recalculating its priority until it wakes up. 232 */ 233 if (p->p_slptime > 1) 234 continue; 235 SCHED_LOCK(s); 236 /* 237 * p_pctcpu is only for ps. 238 */ 239 #if (FSHIFT >= CCPU_SHIFT) 240 p->p_pctcpu += (phz == 100)? 241 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 242 100 * (((fixpt_t) p->p_cpticks) 243 << (FSHIFT - CCPU_SHIFT)) / phz; 244 #else 245 p->p_pctcpu += ((FSCALE - ccpu) * 246 (p->p_cpticks * FSCALE / phz)) >> FSHIFT; 247 #endif 248 p->p_cpticks = 0; 249 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu); 250 p->p_estcpu = newcpu; 251 resetpriority(p); 252 if (p->p_priority >= PUSER) { 253 if (p->p_stat == SRUN && 254 (p->p_priority / SCHED_PPQ) != 255 (p->p_usrpri / SCHED_PPQ)) { 256 remrunqueue(p); 257 p->p_priority = p->p_usrpri; 258 setrunqueue(p); 259 } else 260 p->p_priority = p->p_usrpri; 261 } 262 SCHED_UNLOCK(s); 263 } 264 uvm_meter(); 265 wakeup(&lbolt); 266 timeout_add_sec(to, 1); 267 } 268 269 /* 270 * Recalculate the priority of a process after it has slept for a while. 271 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 272 * least six times the loadfactor will decay p_estcpu to zero. 273 */ 274 void 275 updatepri(struct proc *p) 276 { 277 unsigned int newcpu = p->p_estcpu; 278 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 279 280 SCHED_ASSERT_LOCKED(); 281 282 if (p->p_slptime > 5 * loadfac) 283 p->p_estcpu = 0; 284 else { 285 p->p_slptime--; /* the first time was done in schedcpu */ 286 while (newcpu && --p->p_slptime) 287 newcpu = (int) decay_cpu(loadfac, newcpu); 288 p->p_estcpu = newcpu; 289 } 290 resetpriority(p); 291 } 292 293 /* 294 * General yield call. Puts the current process back on its run queue and 295 * performs a voluntary context switch. 296 */ 297 void 298 yield(void) 299 { 300 struct proc *p = curproc; 301 int s; 302 303 SCHED_LOCK(s); 304 p->p_priority = p->p_usrpri; 305 p->p_stat = SRUN; 306 setrunqueue(p); 307 p->p_ru.ru_nvcsw++; 308 mi_switch(); 309 SCHED_UNLOCK(s); 310 } 311 312 /* 313 * General preemption call. Puts the current process back on its run queue 314 * and performs an involuntary context switch. If a process is supplied, 315 * we switch to that process. Otherwise, we use the normal process selection 316 * criteria. 317 */ 318 void 319 preempt(struct proc *newp) 320 { 321 struct proc *p = curproc; 322 int s; 323 324 /* 325 * XXX Switching to a specific process is not supported yet. 326 */ 327 if (newp != NULL) 328 panic("preempt: cpu_preempt not yet implemented"); 329 330 SCHED_LOCK(s); 331 p->p_priority = p->p_usrpri; 332 p->p_stat = SRUN; 333 p->p_cpu = sched_choosecpu(p); 334 setrunqueue(p); 335 p->p_ru.ru_nivcsw++; 336 mi_switch(); 337 SCHED_UNLOCK(s); 338 } 339 340 void 341 mi_switch(void) 342 { 343 struct schedstate_percpu *spc = &curcpu()->ci_schedstate; 344 struct proc *p = curproc; 345 struct proc *nextproc; 346 struct process *pr = p->p_p; 347 struct rlimit *rlim; 348 rlim_t secs; 349 struct timespec ts; 350 #ifdef MULTIPROCESSOR 351 int hold_count; 352 int sched_count; 353 #endif 354 355 assertwaitok(); 356 KASSERT(p->p_stat != SONPROC); 357 358 SCHED_ASSERT_LOCKED(); 359 360 #ifdef MULTIPROCESSOR 361 /* 362 * Release the kernel_lock, as we are about to yield the CPU. 363 */ 364 sched_count = __mp_release_all_but_one(&sched_lock); 365 if (__mp_lock_held(&kernel_lock)) 366 hold_count = __mp_release_all(&kernel_lock); 367 else 368 hold_count = 0; 369 #endif 370 371 /* 372 * Compute the amount of time during which the current 373 * process was running, and add that to its total so far. 374 */ 375 nanouptime(&ts); 376 if (timespeccmp(&ts, &spc->spc_runtime, <)) { 377 #if 0 378 printf("uptime is not monotonic! " 379 "ts=%lld.%09lu, runtime=%lld.%09lu\n", 380 (long long)tv.tv_sec, tv.tv_nsec, 381 (long long)spc->spc_runtime.tv_sec, 382 spc->spc_runtime.tv_nsec); 383 #endif 384 } else { 385 timespecsub(&ts, &spc->spc_runtime, &ts); 386 timespecadd(&p->p_rtime, &ts, &p->p_rtime); 387 } 388 389 /* add the time counts for this thread to the process's total */ 390 tuagg_unlocked(pr, p); 391 392 /* 393 * Check if the process exceeds its cpu resource allocation. 394 * If over max, kill it. 395 */ 396 rlim = &pr->ps_limit->pl_rlimit[RLIMIT_CPU]; 397 secs = pr->ps_tu.tu_runtime.tv_sec; 398 if (secs >= rlim->rlim_cur) { 399 if (secs >= rlim->rlim_max) { 400 psignal(p, SIGKILL); 401 } else { 402 psignal(p, SIGXCPU); 403 if (rlim->rlim_cur < rlim->rlim_max) 404 rlim->rlim_cur += 5; 405 } 406 } 407 408 /* 409 * Process is about to yield the CPU; clear the appropriate 410 * scheduling flags. 411 */ 412 atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR); 413 414 nextproc = sched_chooseproc(); 415 416 if (p != nextproc) { 417 uvmexp.swtch++; 418 cpu_switchto(p, nextproc); 419 } else { 420 p->p_stat = SONPROC; 421 } 422 423 clear_resched(curcpu()); 424 425 SCHED_ASSERT_LOCKED(); 426 427 /* 428 * To preserve lock ordering, we need to release the sched lock 429 * and grab it after we grab the big lock. 430 * In the future, when the sched lock isn't recursive, we'll 431 * just release it here. 432 */ 433 #ifdef MULTIPROCESSOR 434 __mp_unlock(&sched_lock); 435 #endif 436 437 SCHED_ASSERT_UNLOCKED(); 438 439 /* 440 * We're running again; record our new start time. We might 441 * be running on a new CPU now, so don't use the cache'd 442 * schedstate_percpu pointer. 443 */ 444 KASSERT(p->p_cpu == curcpu()); 445 446 nanouptime(&p->p_cpu->ci_schedstate.spc_runtime); 447 448 #ifdef MULTIPROCESSOR 449 /* 450 * Reacquire the kernel_lock now. We do this after we've 451 * released the scheduler lock to avoid deadlock, and before 452 * we reacquire the interlock and the scheduler lock. 453 */ 454 if (hold_count) 455 __mp_acquire_count(&kernel_lock, hold_count); 456 __mp_acquire_count(&sched_lock, sched_count + 1); 457 #endif 458 } 459 460 static __inline void 461 resched_proc(struct proc *p, u_char pri) 462 { 463 struct cpu_info *ci; 464 465 /* 466 * XXXSMP 467 * This does not handle the case where its last 468 * CPU is running a higher-priority process, but every 469 * other CPU is running a lower-priority process. There 470 * are ways to handle this situation, but they're not 471 * currently very pretty, and we also need to weigh the 472 * cost of moving a process from one CPU to another. 473 * 474 * XXXSMP 475 * There is also the issue of locking the other CPU's 476 * sched state, which we currently do not do. 477 */ 478 ci = (p->p_cpu != NULL) ? p->p_cpu : curcpu(); 479 if (pri < ci->ci_schedstate.spc_curpriority) 480 need_resched(ci); 481 } 482 483 /* 484 * Change process state to be runnable, 485 * placing it on the run queue if it is in memory, 486 * and awakening the swapper if it isn't in memory. 487 */ 488 void 489 setrunnable(struct proc *p) 490 { 491 SCHED_ASSERT_LOCKED(); 492 493 switch (p->p_stat) { 494 case 0: 495 case SRUN: 496 case SONPROC: 497 case SDEAD: 498 case SIDL: 499 default: 500 panic("setrunnable"); 501 case SSTOP: 502 /* 503 * If we're being traced (possibly because someone attached us 504 * while we were stopped), check for a signal from the debugger. 505 */ 506 if ((p->p_p->ps_flags & PS_TRACED) != 0 && p->p_xstat != 0) 507 atomic_setbits_int(&p->p_siglist, sigmask(p->p_xstat)); 508 case SSLEEP: 509 unsleep(p); /* e.g. when sending signals */ 510 break; 511 } 512 p->p_stat = SRUN; 513 p->p_cpu = sched_choosecpu(p); 514 setrunqueue(p); 515 if (p->p_slptime > 1) 516 updatepri(p); 517 p->p_slptime = 0; 518 resched_proc(p, p->p_priority); 519 } 520 521 /* 522 * Compute the priority of a process when running in user mode. 523 * Arrange to reschedule if the resulting priority is better 524 * than that of the current process. 525 */ 526 void 527 resetpriority(struct proc *p) 528 { 529 unsigned int newpriority; 530 531 SCHED_ASSERT_LOCKED(); 532 533 newpriority = PUSER + p->p_estcpu + 534 NICE_WEIGHT * (p->p_p->ps_nice - NZERO); 535 newpriority = min(newpriority, MAXPRI); 536 p->p_usrpri = newpriority; 537 resched_proc(p, p->p_usrpri); 538 } 539 540 /* 541 * We adjust the priority of the current process. The priority of a process 542 * gets worse as it accumulates CPU time. The cpu usage estimator (p_estcpu) 543 * is increased here. The formula for computing priorities (in kern_synch.c) 544 * will compute a different value each time p_estcpu increases. This can 545 * cause a switch, but unless the priority crosses a PPQ boundary the actual 546 * queue will not change. The cpu usage estimator ramps up quite quickly 547 * when the process is running (linearly), and decays away exponentially, at 548 * a rate which is proportionally slower when the system is busy. The basic 549 * principle is that the system will 90% forget that the process used a lot 550 * of CPU time in 5 * loadav seconds. This causes the system to favor 551 * processes which haven't run much recently, and to round-robin among other 552 * processes. 553 */ 554 555 void 556 schedclock(struct proc *p) 557 { 558 int s; 559 560 SCHED_LOCK(s); 561 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1); 562 resetpriority(p); 563 if (p->p_priority >= PUSER) 564 p->p_priority = p->p_usrpri; 565 SCHED_UNLOCK(s); 566 } 567 568 void (*cpu_setperf)(int); 569 570 #define PERFPOL_MANUAL 0 571 #define PERFPOL_AUTO 1 572 #define PERFPOL_HIGH 2 573 int perflevel = 100; 574 int perfpolicy = PERFPOL_MANUAL; 575 576 #ifndef SMALL_KERNEL 577 /* 578 * The code below handles CPU throttling. 579 */ 580 #include <sys/sysctl.h> 581 582 struct timeout setperf_to; 583 void setperf_auto(void *); 584 585 void 586 setperf_auto(void *v) 587 { 588 static uint64_t *idleticks, *totalticks; 589 static int downbeats; 590 591 int i, j; 592 int speedup; 593 CPU_INFO_ITERATOR cii; 594 struct cpu_info *ci; 595 uint64_t idle, total, allidle, alltotal; 596 597 if (perfpolicy != PERFPOL_AUTO) 598 return; 599 600 if (!idleticks) 601 if (!(idleticks = malloc(sizeof(*idleticks) * ncpusfound, 602 M_DEVBUF, M_NOWAIT | M_ZERO))) 603 return; 604 if (!totalticks) 605 if (!(totalticks = malloc(sizeof(*totalticks) * ncpusfound, 606 M_DEVBUF, M_NOWAIT | M_ZERO))) { 607 free(idleticks, M_DEVBUF, 608 sizeof(*idleticks) * ncpusfound); 609 return; 610 } 611 612 alltotal = allidle = 0; 613 j = 0; 614 speedup = 0; 615 CPU_INFO_FOREACH(cii, ci) { 616 total = 0; 617 for (i = 0; i < CPUSTATES; i++) { 618 total += ci->ci_schedstate.spc_cp_time[i]; 619 } 620 total -= totalticks[j]; 621 idle = ci->ci_schedstate.spc_cp_time[CP_IDLE] - idleticks[j]; 622 if (idle < total / 3) 623 speedup = 1; 624 alltotal += total; 625 allidle += idle; 626 idleticks[j] += idle; 627 totalticks[j] += total; 628 j++; 629 } 630 if (allidle < alltotal / 2) 631 speedup = 1; 632 if (speedup) 633 downbeats = 5; 634 635 if (speedup && perflevel != 100) { 636 perflevel = 100; 637 cpu_setperf(perflevel); 638 } else if (!speedup && perflevel != 0 && --downbeats <= 0) { 639 perflevel = 0; 640 cpu_setperf(perflevel); 641 } 642 643 timeout_add_msec(&setperf_to, 100); 644 } 645 646 int 647 sysctl_hwsetperf(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 648 { 649 int err, newperf; 650 651 if (!cpu_setperf) 652 return EOPNOTSUPP; 653 654 if (perfpolicy != PERFPOL_MANUAL) 655 return sysctl_rdint(oldp, oldlenp, newp, perflevel); 656 657 newperf = perflevel; 658 err = sysctl_int(oldp, oldlenp, newp, newlen, &newperf); 659 if (err) 660 return err; 661 if (newperf > 100) 662 newperf = 100; 663 if (newperf < 0) 664 newperf = 0; 665 perflevel = newperf; 666 cpu_setperf(perflevel); 667 668 return 0; 669 } 670 671 int 672 sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen) 673 { 674 char policy[32]; 675 int err; 676 677 if (!cpu_setperf) 678 return EOPNOTSUPP; 679 680 switch (perfpolicy) { 681 case PERFPOL_MANUAL: 682 strlcpy(policy, "manual", sizeof(policy)); 683 break; 684 case PERFPOL_AUTO: 685 strlcpy(policy, "auto", sizeof(policy)); 686 break; 687 case PERFPOL_HIGH: 688 strlcpy(policy, "high", sizeof(policy)); 689 break; 690 default: 691 strlcpy(policy, "unknown", sizeof(policy)); 692 break; 693 } 694 695 if (newp == NULL) 696 return sysctl_rdstring(oldp, oldlenp, newp, policy); 697 698 err = sysctl_string(oldp, oldlenp, newp, newlen, policy, sizeof(policy)); 699 if (err) 700 return err; 701 if (strcmp(policy, "manual") == 0) 702 perfpolicy = PERFPOL_MANUAL; 703 else if (strcmp(policy, "auto") == 0) 704 perfpolicy = PERFPOL_AUTO; 705 else if (strcmp(policy, "high") == 0) 706 perfpolicy = PERFPOL_HIGH; 707 else 708 return EINVAL; 709 710 if (perfpolicy == PERFPOL_AUTO) { 711 timeout_add_msec(&setperf_to, 200); 712 } else if (perfpolicy == PERFPOL_HIGH) { 713 perflevel = 100; 714 cpu_setperf(perflevel); 715 } 716 return 0; 717 } 718 #endif 719 720