xref: /openbsd-src/sys/kern/sched_bsd.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /*	$OpenBSD: sched_bsd.c,v 1.50 2019/02/26 14:24:21 visa Exp $	*/
2 /*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/signalvar.h>
46 #include <sys/resourcevar.h>
47 #include <uvm/uvm_extern.h>
48 #include <sys/sched.h>
49 #include <sys/timeout.h>
50 #include <sys/smr.h>
51 
52 #ifdef KTRACE
53 #include <sys/ktrace.h>
54 #endif
55 
56 
57 int	lbolt;			/* once a second sleep address */
58 int	rrticks_init;		/* # of hardclock ticks per roundrobin() */
59 
60 #ifdef MULTIPROCESSOR
61 struct __mp_lock sched_lock;
62 #endif
63 
64 void	 schedcpu(void *);
65 void	 updatepri(struct proc *);
66 
67 void
68 scheduler_start(void)
69 {
70 	static struct timeout schedcpu_to;
71 
72 	/*
73 	 * We avoid polluting the global namespace by keeping the scheduler
74 	 * timeouts static in this function.
75 	 * We setup the timeout here and kick schedcpu once to make it do
76 	 * its job.
77 	 */
78 	timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
79 
80 	rrticks_init = hz / 10;
81 	schedcpu(&schedcpu_to);
82 }
83 
84 /*
85  * Force switch among equal priority processes every 100ms.
86  */
87 void
88 roundrobin(struct cpu_info *ci)
89 {
90 	struct schedstate_percpu *spc = &ci->ci_schedstate;
91 
92 	spc->spc_rrticks = rrticks_init;
93 
94 	if (ci->ci_curproc != NULL) {
95 		if (spc->spc_schedflags & SPCF_SEENRR) {
96 			/*
97 			 * The process has already been through a roundrobin
98 			 * without switching and may be hogging the CPU.
99 			 * Indicate that the process should yield.
100 			 */
101 			atomic_setbits_int(&spc->spc_schedflags,
102 			    SPCF_SHOULDYIELD);
103 		} else {
104 			atomic_setbits_int(&spc->spc_schedflags,
105 			    SPCF_SEENRR);
106 		}
107 	}
108 
109 	if (spc->spc_nrun)
110 		need_resched(ci);
111 }
112 
113 /*
114  * Constants for digital decay and forget:
115  *	90% of (p_estcpu) usage in 5 * loadav time
116  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
117  *          Note that, as ps(1) mentions, this can let percentages
118  *          total over 100% (I've seen 137.9% for 3 processes).
119  *
120  * Note that hardclock updates p_estcpu and p_cpticks independently.
121  *
122  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
123  * That is, the system wants to compute a value of decay such
124  * that the following for loop:
125  * 	for (i = 0; i < (5 * loadavg); i++)
126  * 		p_estcpu *= decay;
127  * will compute
128  * 	p_estcpu *= 0.1;
129  * for all values of loadavg:
130  *
131  * Mathematically this loop can be expressed by saying:
132  * 	decay ** (5 * loadavg) ~= .1
133  *
134  * The system computes decay as:
135  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
136  *
137  * We wish to prove that the system's computation of decay
138  * will always fulfill the equation:
139  * 	decay ** (5 * loadavg) ~= .1
140  *
141  * If we compute b as:
142  * 	b = 2 * loadavg
143  * then
144  * 	decay = b / (b + 1)
145  *
146  * We now need to prove two things:
147  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
148  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
149  *
150  * Facts:
151  *         For x close to zero, exp(x) =~ 1 + x, since
152  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
153  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
154  *         For x close to zero, ln(1+x) =~ x, since
155  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
156  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
157  *         ln(.1) =~ -2.30
158  *
159  * Proof of (1):
160  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
161  *	solving for factor,
162  *      ln(factor) =~ (-2.30/5*loadav), or
163  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
164  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
165  *
166  * Proof of (2):
167  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
168  *	solving for power,
169  *      power*ln(b/(b+1)) =~ -2.30, or
170  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
171  *
172  * Actual power values for the implemented algorithm are as follows:
173  *      loadav: 1       2       3       4
174  *      power:  5.68    10.32   14.94   19.55
175  */
176 
177 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
178 #define	loadfactor(loadav)	(2 * (loadav))
179 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
180 
181 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
182 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
183 
184 /*
185  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
186  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
187  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
188  *
189  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
190  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
191  *
192  * If you don't want to bother with the faster/more-accurate formula, you
193  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
194  * (more general) method of calculating the %age of CPU used by a process.
195  */
196 #define	CCPU_SHIFT	11
197 
198 /*
199  * Recompute process priorities, every second.
200  */
201 void
202 schedcpu(void *arg)
203 {
204 	struct timeout *to = (struct timeout *)arg;
205 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
206 	struct proc *p;
207 	int s;
208 	unsigned int newcpu;
209 	int phz;
210 
211 	/*
212 	 * If we have a statistics clock, use that to calculate CPU
213 	 * time, otherwise revert to using the profiling clock (which,
214 	 * in turn, defaults to hz if there is no separate profiling
215 	 * clock available)
216 	 */
217 	phz = stathz ? stathz : profhz;
218 	KASSERT(phz);
219 
220 	LIST_FOREACH(p, &allproc, p_list) {
221 		/*
222 		 * Idle threads are never placed on the runqueue,
223 		 * therefore computing their priority is pointless.
224 		 */
225 		if (p->p_cpu != NULL &&
226 		    p->p_cpu->ci_schedstate.spc_idleproc == p)
227 			continue;
228 		/*
229 		 * Increment sleep time (if sleeping). We ignore overflow.
230 		 */
231 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
232 			p->p_slptime++;
233 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
234 		/*
235 		 * If the process has slept the entire second,
236 		 * stop recalculating its priority until it wakes up.
237 		 */
238 		if (p->p_slptime > 1)
239 			continue;
240 		SCHED_LOCK(s);
241 		/*
242 		 * p_pctcpu is only for diagnostic tools such as ps.
243 		 */
244 #if	(FSHIFT >= CCPU_SHIFT)
245 		p->p_pctcpu += (phz == 100)?
246 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
247                 	100 * (((fixpt_t) p->p_cpticks)
248 				<< (FSHIFT - CCPU_SHIFT)) / phz;
249 #else
250 		p->p_pctcpu += ((FSCALE - ccpu) *
251 			(p->p_cpticks * FSCALE / phz)) >> FSHIFT;
252 #endif
253 		p->p_cpticks = 0;
254 		newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu);
255 		p->p_estcpu = newcpu;
256 		resetpriority(p);
257 		if (p->p_priority >= PUSER) {
258 			if (p->p_stat == SRUN &&
259 			    (p->p_priority / SCHED_PPQ) !=
260 			    (p->p_usrpri / SCHED_PPQ)) {
261 				remrunqueue(p);
262 				p->p_priority = p->p_usrpri;
263 				setrunqueue(p);
264 			} else
265 				p->p_priority = p->p_usrpri;
266 		}
267 		SCHED_UNLOCK(s);
268 	}
269 	uvm_meter();
270 	wakeup(&lbolt);
271 	timeout_add_sec(to, 1);
272 }
273 
274 /*
275  * Recalculate the priority of a process after it has slept for a while.
276  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
277  * least six times the loadfactor will decay p_estcpu to zero.
278  */
279 void
280 updatepri(struct proc *p)
281 {
282 	unsigned int newcpu = p->p_estcpu;
283 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
284 
285 	SCHED_ASSERT_LOCKED();
286 
287 	if (p->p_slptime > 5 * loadfac)
288 		p->p_estcpu = 0;
289 	else {
290 		p->p_slptime--;	/* the first time was done in schedcpu */
291 		while (newcpu && --p->p_slptime)
292 			newcpu = (int) decay_cpu(loadfac, newcpu);
293 		p->p_estcpu = newcpu;
294 	}
295 	resetpriority(p);
296 }
297 
298 /*
299  * General yield call.  Puts the current process back on its run queue and
300  * performs a voluntary context switch.
301  */
302 void
303 yield(void)
304 {
305 	struct proc *p = curproc;
306 	int s;
307 
308 	NET_ASSERT_UNLOCKED();
309 
310 	SCHED_LOCK(s);
311 	p->p_priority = p->p_usrpri;
312 	p->p_stat = SRUN;
313 	setrunqueue(p);
314 	p->p_ru.ru_nvcsw++;
315 	mi_switch();
316 	SCHED_UNLOCK(s);
317 }
318 
319 /*
320  * General preemption call.  Puts the current process back on its run queue
321  * and performs an involuntary context switch.  If a process is supplied,
322  * we switch to that process.  Otherwise, we use the normal process selection
323  * criteria.
324  */
325 void
326 preempt(void)
327 {
328 	struct proc *p = curproc;
329 	int s;
330 
331 	SCHED_LOCK(s);
332 	p->p_priority = p->p_usrpri;
333 	p->p_stat = SRUN;
334 	setrunqueue(p);
335 	p->p_ru.ru_nivcsw++;
336 	mi_switch();
337 	SCHED_UNLOCK(s);
338 }
339 
340 void
341 mi_switch(void)
342 {
343 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
344 	struct proc *p = curproc;
345 	struct proc *nextproc;
346 	struct process *pr = p->p_p;
347 	struct timespec ts;
348 #ifdef MULTIPROCESSOR
349 	int hold_count;
350 	int sched_count;
351 #endif
352 
353 	assertwaitok();
354 	KASSERT(p->p_stat != SONPROC);
355 
356 	SCHED_ASSERT_LOCKED();
357 
358 #ifdef MULTIPROCESSOR
359 	/*
360 	 * Release the kernel_lock, as we are about to yield the CPU.
361 	 */
362 	sched_count = __mp_release_all_but_one(&sched_lock);
363 	if (_kernel_lock_held())
364 		hold_count = __mp_release_all(&kernel_lock);
365 	else
366 		hold_count = 0;
367 #endif
368 
369 	/*
370 	 * Compute the amount of time during which the current
371 	 * process was running, and add that to its total so far.
372 	 */
373 	nanouptime(&ts);
374 	if (timespeccmp(&ts, &spc->spc_runtime, <)) {
375 #if 0
376 		printf("uptime is not monotonic! "
377 		    "ts=%lld.%09lu, runtime=%lld.%09lu\n",
378 		    (long long)tv.tv_sec, tv.tv_nsec,
379 		    (long long)spc->spc_runtime.tv_sec,
380 		    spc->spc_runtime.tv_nsec);
381 #endif
382 	} else {
383 		timespecsub(&ts, &spc->spc_runtime, &ts);
384 		timespecadd(&p->p_rtime, &ts, &p->p_rtime);
385 	}
386 
387 	/* add the time counts for this thread to the process's total */
388 	tuagg_unlocked(pr, p);
389 
390 	/*
391 	 * Process is about to yield the CPU; clear the appropriate
392 	 * scheduling flags.
393 	 */
394 	atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR);
395 
396 	nextproc = sched_chooseproc();
397 
398 	if (p != nextproc) {
399 		uvmexp.swtch++;
400 		cpu_switchto(p, nextproc);
401 	} else {
402 		p->p_stat = SONPROC;
403 	}
404 
405 	clear_resched(curcpu());
406 
407 	SCHED_ASSERT_LOCKED();
408 
409 	/*
410 	 * To preserve lock ordering, we need to release the sched lock
411 	 * and grab it after we grab the big lock.
412 	 * In the future, when the sched lock isn't recursive, we'll
413 	 * just release it here.
414 	 */
415 #ifdef MULTIPROCESSOR
416 	__mp_unlock(&sched_lock);
417 #endif
418 
419 	SCHED_ASSERT_UNLOCKED();
420 
421 	smr_idle();
422 
423 	/*
424 	 * We're running again; record our new start time.  We might
425 	 * be running on a new CPU now, so don't use the cache'd
426 	 * schedstate_percpu pointer.
427 	 */
428 	KASSERT(p->p_cpu == curcpu());
429 
430 	nanouptime(&p->p_cpu->ci_schedstate.spc_runtime);
431 
432 #ifdef MULTIPROCESSOR
433 	/*
434 	 * Reacquire the kernel_lock now.  We do this after we've
435 	 * released the scheduler lock to avoid deadlock, and before
436 	 * we reacquire the interlock and the scheduler lock.
437 	 */
438 	if (hold_count)
439 		__mp_acquire_count(&kernel_lock, hold_count);
440 	__mp_acquire_count(&sched_lock, sched_count + 1);
441 #endif
442 }
443 
444 static __inline void
445 resched_proc(struct proc *p, u_char pri)
446 {
447 	struct cpu_info *ci;
448 
449 	/*
450 	 * XXXSMP
451 	 * This does not handle the case where its last
452 	 * CPU is running a higher-priority process, but every
453 	 * other CPU is running a lower-priority process.  There
454 	 * are ways to handle this situation, but they're not
455 	 * currently very pretty, and we also need to weigh the
456 	 * cost of moving a process from one CPU to another.
457 	 *
458 	 * XXXSMP
459 	 * There is also the issue of locking the other CPU's
460 	 * sched state, which we currently do not do.
461 	 */
462 	ci = (p->p_cpu != NULL) ? p->p_cpu : curcpu();
463 	if (pri < ci->ci_schedstate.spc_curpriority)
464 		need_resched(ci);
465 }
466 
467 /*
468  * Change process state to be runnable,
469  * placing it on the run queue if it is in memory,
470  * and awakening the swapper if it isn't in memory.
471  */
472 void
473 setrunnable(struct proc *p)
474 {
475 	SCHED_ASSERT_LOCKED();
476 
477 	switch (p->p_stat) {
478 	case 0:
479 	case SRUN:
480 	case SONPROC:
481 	case SDEAD:
482 	case SIDL:
483 	default:
484 		panic("setrunnable");
485 	case SSTOP:
486 		/*
487 		 * If we're being traced (possibly because someone attached us
488 		 * while we were stopped), check for a signal from the debugger.
489 		 */
490 		if ((p->p_p->ps_flags & PS_TRACED) != 0 && p->p_xstat != 0)
491 			atomic_setbits_int(&p->p_siglist, sigmask(p->p_xstat));
492 	case SSLEEP:
493 		unsleep(p);		/* e.g. when sending signals */
494 		break;
495 	}
496 	p->p_stat = SRUN;
497 	p->p_cpu = sched_choosecpu(p);
498 	setrunqueue(p);
499 	if (p->p_slptime > 1)
500 		updatepri(p);
501 	p->p_slptime = 0;
502 	resched_proc(p, p->p_priority);
503 }
504 
505 /*
506  * Compute the priority of a process when running in user mode.
507  * Arrange to reschedule if the resulting priority is better
508  * than that of the current process.
509  */
510 void
511 resetpriority(struct proc *p)
512 {
513 	unsigned int newpriority;
514 
515 	SCHED_ASSERT_LOCKED();
516 
517 	newpriority = PUSER + p->p_estcpu +
518 	    NICE_WEIGHT * (p->p_p->ps_nice - NZERO);
519 	newpriority = min(newpriority, MAXPRI);
520 	p->p_usrpri = newpriority;
521 	resched_proc(p, p->p_usrpri);
522 }
523 
524 /*
525  * We adjust the priority of the current process.  The priority of a process
526  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
527  * is increased here.  The formula for computing priorities (in kern_synch.c)
528  * will compute a different value each time p_estcpu increases. This can
529  * cause a switch, but unless the priority crosses a PPQ boundary the actual
530  * queue will not change.  The cpu usage estimator ramps up quite quickly
531  * when the process is running (linearly), and decays away exponentially, at
532  * a rate which is proportionally slower when the system is busy.  The basic
533  * principle is that the system will 90% forget that the process used a lot
534  * of CPU time in 5 * loadav seconds.  This causes the system to favor
535  * processes which haven't run much recently, and to round-robin among other
536  * processes.
537  */
538 void
539 schedclock(struct proc *p)
540 {
541 	struct cpu_info *ci = curcpu();
542 	struct schedstate_percpu *spc = &ci->ci_schedstate;
543 	int s;
544 
545 	if (p == spc->spc_idleproc)
546 		return;
547 
548 	SCHED_LOCK(s);
549 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
550 	resetpriority(p);
551 	if (p->p_priority >= PUSER)
552 		p->p_priority = p->p_usrpri;
553 	SCHED_UNLOCK(s);
554 }
555 
556 void (*cpu_setperf)(int);
557 
558 #define PERFPOL_MANUAL 0
559 #define PERFPOL_AUTO 1
560 #define PERFPOL_HIGH 2
561 int perflevel = 100;
562 int perfpolicy = PERFPOL_MANUAL;
563 
564 #ifndef SMALL_KERNEL
565 /*
566  * The code below handles CPU throttling.
567  */
568 #include <sys/sysctl.h>
569 
570 void setperf_auto(void *);
571 struct timeout setperf_to = TIMEOUT_INITIALIZER(setperf_auto, NULL);
572 
573 void
574 setperf_auto(void *v)
575 {
576 	static uint64_t *idleticks, *totalticks;
577 	static int downbeats;
578 
579 	int i, j;
580 	int speedup;
581 	CPU_INFO_ITERATOR cii;
582 	struct cpu_info *ci;
583 	uint64_t idle, total, allidle, alltotal;
584 
585 	if (perfpolicy != PERFPOL_AUTO)
586 		return;
587 
588 	if (!idleticks)
589 		if (!(idleticks = mallocarray(ncpusfound, sizeof(*idleticks),
590 		    M_DEVBUF, M_NOWAIT | M_ZERO)))
591 			return;
592 	if (!totalticks)
593 		if (!(totalticks = mallocarray(ncpusfound, sizeof(*totalticks),
594 		    M_DEVBUF, M_NOWAIT | M_ZERO))) {
595 			free(idleticks, M_DEVBUF,
596 			    sizeof(*idleticks) * ncpusfound);
597 			return;
598 		}
599 
600 	alltotal = allidle = 0;
601 	j = 0;
602 	speedup = 0;
603 	CPU_INFO_FOREACH(cii, ci) {
604 		total = 0;
605 		for (i = 0; i < CPUSTATES; i++) {
606 			total += ci->ci_schedstate.spc_cp_time[i];
607 		}
608 		total -= totalticks[j];
609 		idle = ci->ci_schedstate.spc_cp_time[CP_IDLE] - idleticks[j];
610 		if (idle < total / 3)
611 			speedup = 1;
612 		alltotal += total;
613 		allidle += idle;
614 		idleticks[j] += idle;
615 		totalticks[j] += total;
616 		j++;
617 	}
618 	if (allidle < alltotal / 2)
619 		speedup = 1;
620 	if (speedup)
621 		downbeats = 5;
622 
623 	if (speedup && perflevel != 100) {
624 		perflevel = 100;
625 		cpu_setperf(perflevel);
626 	} else if (!speedup && perflevel != 0 && --downbeats <= 0) {
627 		perflevel = 0;
628 		cpu_setperf(perflevel);
629 	}
630 
631 	timeout_add_msec(&setperf_to, 100);
632 }
633 
634 int
635 sysctl_hwsetperf(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
636 {
637 	int err, newperf;
638 
639 	if (!cpu_setperf)
640 		return EOPNOTSUPP;
641 
642 	if (perfpolicy != PERFPOL_MANUAL)
643 		return sysctl_rdint(oldp, oldlenp, newp, perflevel);
644 
645 	newperf = perflevel;
646 	err = sysctl_int(oldp, oldlenp, newp, newlen, &newperf);
647 	if (err)
648 		return err;
649 	if (newperf > 100)
650 		newperf = 100;
651 	if (newperf < 0)
652 		newperf = 0;
653 	perflevel = newperf;
654 	cpu_setperf(perflevel);
655 
656 	return 0;
657 }
658 
659 int
660 sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
661 {
662 	char policy[32];
663 	int err;
664 
665 	if (!cpu_setperf)
666 		return EOPNOTSUPP;
667 
668 	switch (perfpolicy) {
669 	case PERFPOL_MANUAL:
670 		strlcpy(policy, "manual", sizeof(policy));
671 		break;
672 	case PERFPOL_AUTO:
673 		strlcpy(policy, "auto", sizeof(policy));
674 		break;
675 	case PERFPOL_HIGH:
676 		strlcpy(policy, "high", sizeof(policy));
677 		break;
678 	default:
679 		strlcpy(policy, "unknown", sizeof(policy));
680 		break;
681 	}
682 
683 	if (newp == NULL)
684 		return sysctl_rdstring(oldp, oldlenp, newp, policy);
685 
686 	err = sysctl_string(oldp, oldlenp, newp, newlen, policy, sizeof(policy));
687 	if (err)
688 		return err;
689 	if (strcmp(policy, "manual") == 0)
690 		perfpolicy = PERFPOL_MANUAL;
691 	else if (strcmp(policy, "auto") == 0)
692 		perfpolicy = PERFPOL_AUTO;
693 	else if (strcmp(policy, "high") == 0)
694 		perfpolicy = PERFPOL_HIGH;
695 	else
696 		return EINVAL;
697 
698 	if (perfpolicy == PERFPOL_AUTO) {
699 		timeout_add_msec(&setperf_to, 200);
700 	} else if (perfpolicy == PERFPOL_HIGH) {
701 		perflevel = 100;
702 		cpu_setperf(perflevel);
703 	}
704 	return 0;
705 }
706 #endif
707