xref: /openbsd-src/sys/kern/sched_bsd.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*	$OpenBSD: sched_bsd.c,v 1.73 2022/12/05 23:18:37 deraadt Exp $	*/
2 /*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/resourcevar.h>
46 #include <uvm/uvm_extern.h>
47 #include <sys/sched.h>
48 #include <sys/timeout.h>
49 #include <sys/smr.h>
50 #include <sys/tracepoint.h>
51 
52 #ifdef KTRACE
53 #include <sys/ktrace.h>
54 #endif
55 
56 
57 int	lbolt;			/* once a second sleep address */
58 int	rrticks_init;		/* # of hardclock ticks per roundrobin() */
59 
60 #ifdef MULTIPROCESSOR
61 struct __mp_lock sched_lock;
62 #endif
63 
64 void			schedcpu(void *);
65 uint32_t		decay_aftersleep(uint32_t, uint32_t);
66 
67 /*
68  * Force switch among equal priority processes every 100ms.
69  */
70 void
71 roundrobin(struct cpu_info *ci)
72 {
73 	struct schedstate_percpu *spc = &ci->ci_schedstate;
74 
75 	spc->spc_rrticks = rrticks_init;
76 
77 	if (ci->ci_curproc != NULL) {
78 		if (spc->spc_schedflags & SPCF_SEENRR) {
79 			/*
80 			 * The process has already been through a roundrobin
81 			 * without switching and may be hogging the CPU.
82 			 * Indicate that the process should yield.
83 			 */
84 			atomic_setbits_int(&spc->spc_schedflags,
85 			    SPCF_SHOULDYIELD);
86 		} else {
87 			atomic_setbits_int(&spc->spc_schedflags,
88 			    SPCF_SEENRR);
89 		}
90 	}
91 
92 	if (spc->spc_nrun)
93 		need_resched(ci);
94 }
95 
96 /*
97  * Constants for digital decay and forget:
98  *	90% of (p_estcpu) usage in 5 * loadav time
99  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
100  *          Note that, as ps(1) mentions, this can let percentages
101  *          total over 100% (I've seen 137.9% for 3 processes).
102  *
103  * Note that hardclock updates p_estcpu and p_cpticks independently.
104  *
105  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
106  * That is, the system wants to compute a value of decay such
107  * that the following for loop:
108  * 	for (i = 0; i < (5 * loadavg); i++)
109  * 		p_estcpu *= decay;
110  * will compute
111  * 	p_estcpu *= 0.1;
112  * for all values of loadavg:
113  *
114  * Mathematically this loop can be expressed by saying:
115  * 	decay ** (5 * loadavg) ~= .1
116  *
117  * The system computes decay as:
118  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
119  *
120  * We wish to prove that the system's computation of decay
121  * will always fulfill the equation:
122  * 	decay ** (5 * loadavg) ~= .1
123  *
124  * If we compute b as:
125  * 	b = 2 * loadavg
126  * then
127  * 	decay = b / (b + 1)
128  *
129  * We now need to prove two things:
130  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
131  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
132  *
133  * Facts:
134  *         For x close to zero, exp(x) =~ 1 + x, since
135  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
136  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
137  *         For x close to zero, ln(1+x) =~ x, since
138  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
139  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
140  *         ln(.1) =~ -2.30
141  *
142  * Proof of (1):
143  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
144  *	solving for factor,
145  *      ln(factor) =~ (-2.30/5*loadav), or
146  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
147  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
148  *
149  * Proof of (2):
150  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
151  *	solving for power,
152  *      power*ln(b/(b+1)) =~ -2.30, or
153  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
154  *
155  * Actual power values for the implemented algorithm are as follows:
156  *      loadav: 1       2       3       4
157  *      power:  5.68    10.32   14.94   19.55
158  */
159 
160 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
161 #define	loadfactor(loadav)	(2 * (loadav))
162 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
163 
164 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
165 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
166 
167 /*
168  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
169  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
170  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
171  *
172  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
173  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
174  *
175  * If you don't want to bother with the faster/more-accurate formula, you
176  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
177  * (more general) method of calculating the %age of CPU used by a process.
178  */
179 #define	CCPU_SHIFT	11
180 
181 /*
182  * Recompute process priorities, every second.
183  */
184 void
185 schedcpu(void *arg)
186 {
187 	struct timeout *to = (struct timeout *)arg;
188 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
189 	struct proc *p;
190 	int s;
191 	unsigned int newcpu;
192 	int phz;
193 
194 	/*
195 	 * If we have a statistics clock, use that to calculate CPU
196 	 * time, otherwise revert to using the profiling clock (which,
197 	 * in turn, defaults to hz if there is no separate profiling
198 	 * clock available)
199 	 */
200 	phz = stathz ? stathz : profhz;
201 	KASSERT(phz);
202 
203 	LIST_FOREACH(p, &allproc, p_list) {
204 		/*
205 		 * Idle threads are never placed on the runqueue,
206 		 * therefore computing their priority is pointless.
207 		 */
208 		if (p->p_cpu != NULL &&
209 		    p->p_cpu->ci_schedstate.spc_idleproc == p)
210 			continue;
211 		/*
212 		 * Increment sleep time (if sleeping). We ignore overflow.
213 		 */
214 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
215 			p->p_slptime++;
216 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
217 		/*
218 		 * If the process has slept the entire second,
219 		 * stop recalculating its priority until it wakes up.
220 		 */
221 		if (p->p_slptime > 1)
222 			continue;
223 		SCHED_LOCK(s);
224 		/*
225 		 * p_pctcpu is only for diagnostic tools such as ps.
226 		 */
227 #if	(FSHIFT >= CCPU_SHIFT)
228 		p->p_pctcpu += (phz == 100)?
229 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
230                 	100 * (((fixpt_t) p->p_cpticks)
231 				<< (FSHIFT - CCPU_SHIFT)) / phz;
232 #else
233 		p->p_pctcpu += ((FSCALE - ccpu) *
234 			(p->p_cpticks * FSCALE / phz)) >> FSHIFT;
235 #endif
236 		p->p_cpticks = 0;
237 		newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu);
238 		setpriority(p, newcpu, p->p_p->ps_nice);
239 
240 		if (p->p_stat == SRUN &&
241 		    (p->p_runpri / SCHED_PPQ) != (p->p_usrpri / SCHED_PPQ)) {
242 			remrunqueue(p);
243 			setrunqueue(p->p_cpu, p, p->p_usrpri);
244 		}
245 		SCHED_UNLOCK(s);
246 	}
247 	uvm_meter();
248 	wakeup(&lbolt);
249 	timeout_add_sec(to, 1);
250 }
251 
252 /*
253  * Recalculate the priority of a process after it has slept for a while.
254  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
255  * least six times the loadfactor will decay p_estcpu to zero.
256  */
257 uint32_t
258 decay_aftersleep(uint32_t estcpu, uint32_t slptime)
259 {
260 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
261 	uint32_t newcpu;
262 
263 	if (slptime > 5 * loadfac)
264 		newcpu = 0;
265 	else {
266 		newcpu = estcpu;
267 		slptime--;	/* the first time was done in schedcpu */
268 		while (newcpu && --slptime)
269 			newcpu = decay_cpu(loadfac, newcpu);
270 
271 	}
272 
273 	return (newcpu);
274 }
275 
276 /*
277  * General yield call.  Puts the current process back on its run queue and
278  * performs a voluntary context switch.
279  */
280 void
281 yield(void)
282 {
283 	struct proc *p = curproc;
284 	int s;
285 
286 	SCHED_LOCK(s);
287 	setrunqueue(p->p_cpu, p, p->p_usrpri);
288 	p->p_ru.ru_nvcsw++;
289 	mi_switch();
290 	SCHED_UNLOCK(s);
291 }
292 
293 /*
294  * General preemption call.  Puts the current process back on its run queue
295  * and performs an involuntary context switch.  If a process is supplied,
296  * we switch to that process.  Otherwise, we use the normal process selection
297  * criteria.
298  */
299 void
300 preempt(void)
301 {
302 	struct proc *p = curproc;
303 	int s;
304 
305 	SCHED_LOCK(s);
306 	setrunqueue(p->p_cpu, p, p->p_usrpri);
307 	p->p_ru.ru_nivcsw++;
308 	mi_switch();
309 	SCHED_UNLOCK(s);
310 }
311 
312 void
313 mi_switch(void)
314 {
315 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
316 	struct proc *p = curproc;
317 	struct proc *nextproc;
318 	struct process *pr = p->p_p;
319 	struct timespec ts;
320 #ifdef MULTIPROCESSOR
321 	int hold_count;
322 	int sched_count;
323 #endif
324 
325 	assertwaitok();
326 	KASSERT(p->p_stat != SONPROC);
327 
328 	SCHED_ASSERT_LOCKED();
329 
330 #ifdef MULTIPROCESSOR
331 	/*
332 	 * Release the kernel_lock, as we are about to yield the CPU.
333 	 */
334 	sched_count = __mp_release_all_but_one(&sched_lock);
335 	if (_kernel_lock_held())
336 		hold_count = __mp_release_all(&kernel_lock);
337 	else
338 		hold_count = 0;
339 #endif
340 
341 	/*
342 	 * Compute the amount of time during which the current
343 	 * process was running, and add that to its total so far.
344 	 */
345 	nanouptime(&ts);
346 	if (timespeccmp(&ts, &spc->spc_runtime, <)) {
347 #if 0
348 		printf("uptime is not monotonic! "
349 		    "ts=%lld.%09lu, runtime=%lld.%09lu\n",
350 		    (long long)tv.tv_sec, tv.tv_nsec,
351 		    (long long)spc->spc_runtime.tv_sec,
352 		    spc->spc_runtime.tv_nsec);
353 #endif
354 	} else {
355 		timespecsub(&ts, &spc->spc_runtime, &ts);
356 		timespecadd(&p->p_rtime, &ts, &p->p_rtime);
357 	}
358 
359 	/* add the time counts for this thread to the process's total */
360 	tuagg_unlocked(pr, p);
361 
362 	/*
363 	 * Process is about to yield the CPU; clear the appropriate
364 	 * scheduling flags.
365 	 */
366 	atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR);
367 
368 	nextproc = sched_chooseproc();
369 
370 	if (p != nextproc) {
371 		uvmexp.swtch++;
372 		TRACEPOINT(sched, off__cpu, nextproc->p_tid + THREAD_PID_OFFSET,
373 		    nextproc->p_p->ps_pid);
374 		cpu_switchto(p, nextproc);
375 		TRACEPOINT(sched, on__cpu, NULL);
376 	} else {
377 		TRACEPOINT(sched, remain__cpu, NULL);
378 		p->p_stat = SONPROC;
379 	}
380 
381 	clear_resched(curcpu());
382 
383 	SCHED_ASSERT_LOCKED();
384 
385 	/*
386 	 * To preserve lock ordering, we need to release the sched lock
387 	 * and grab it after we grab the big lock.
388 	 * In the future, when the sched lock isn't recursive, we'll
389 	 * just release it here.
390 	 */
391 #ifdef MULTIPROCESSOR
392 	__mp_unlock(&sched_lock);
393 #endif
394 
395 	SCHED_ASSERT_UNLOCKED();
396 
397 	smr_idle();
398 
399 	/*
400 	 * We're running again; record our new start time.  We might
401 	 * be running on a new CPU now, so don't use the cache'd
402 	 * schedstate_percpu pointer.
403 	 */
404 	KASSERT(p->p_cpu == curcpu());
405 
406 	nanouptime(&p->p_cpu->ci_schedstate.spc_runtime);
407 
408 #ifdef MULTIPROCESSOR
409 	/*
410 	 * Reacquire the kernel_lock now.  We do this after we've
411 	 * released the scheduler lock to avoid deadlock, and before
412 	 * we reacquire the interlock and the scheduler lock.
413 	 */
414 	if (hold_count)
415 		__mp_acquire_count(&kernel_lock, hold_count);
416 	__mp_acquire_count(&sched_lock, sched_count + 1);
417 #endif
418 }
419 
420 /*
421  * Change process state to be runnable,
422  * placing it on the run queue.
423  */
424 void
425 setrunnable(struct proc *p)
426 {
427 	struct process *pr = p->p_p;
428 	u_char prio;
429 
430 	SCHED_ASSERT_LOCKED();
431 
432 	switch (p->p_stat) {
433 	case 0:
434 	case SRUN:
435 	case SONPROC:
436 	case SDEAD:
437 	case SIDL:
438 	default:
439 		panic("setrunnable");
440 	case SSTOP:
441 		/*
442 		 * If we're being traced (possibly because someone attached us
443 		 * while we were stopped), check for a signal from the debugger.
444 		 */
445 		if ((pr->ps_flags & PS_TRACED) != 0 && pr->ps_xsig != 0)
446 			atomic_setbits_int(&p->p_siglist, sigmask(pr->ps_xsig));
447 		prio = p->p_usrpri;
448 		unsleep(p);
449 		break;
450 	case SSLEEP:
451 		prio = p->p_slppri;
452 		unsleep(p);		/* e.g. when sending signals */
453 		break;
454 	}
455 	setrunqueue(NULL, p, prio);
456 	if (p->p_slptime > 1) {
457 		uint32_t newcpu;
458 
459 		newcpu = decay_aftersleep(p->p_estcpu, p->p_slptime);
460 		setpriority(p, newcpu, pr->ps_nice);
461 	}
462 	p->p_slptime = 0;
463 }
464 
465 /*
466  * Compute the priority of a process.
467  */
468 void
469 setpriority(struct proc *p, uint32_t newcpu, uint8_t nice)
470 {
471 	unsigned int newprio;
472 
473 	newprio = min((PUSER + newcpu + NICE_WEIGHT * (nice - NZERO)), MAXPRI);
474 
475 	SCHED_ASSERT_LOCKED();
476 	p->p_estcpu = newcpu;
477 	p->p_usrpri = newprio;
478 }
479 
480 /*
481  * We adjust the priority of the current process.  The priority of a process
482  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
483  * is increased here.  The formula for computing priorities (in kern_synch.c)
484  * will compute a different value each time p_estcpu increases. This can
485  * cause a switch, but unless the priority crosses a PPQ boundary the actual
486  * queue will not change.  The cpu usage estimator ramps up quite quickly
487  * when the process is running (linearly), and decays away exponentially, at
488  * a rate which is proportionally slower when the system is busy.  The basic
489  * principle is that the system will 90% forget that the process used a lot
490  * of CPU time in 5 * loadav seconds.  This causes the system to favor
491  * processes which haven't run much recently, and to round-robin among other
492  * processes.
493  */
494 void
495 schedclock(struct proc *p)
496 {
497 	struct cpu_info *ci = curcpu();
498 	struct schedstate_percpu *spc = &ci->ci_schedstate;
499 	uint32_t newcpu;
500 	int s;
501 
502 	if (p == spc->spc_idleproc || spc->spc_spinning)
503 		return;
504 
505 	SCHED_LOCK(s);
506 	newcpu = ESTCPULIM(p->p_estcpu + 1);
507 	setpriority(p, newcpu, p->p_p->ps_nice);
508 	SCHED_UNLOCK(s);
509 }
510 
511 void (*cpu_setperf)(int);
512 
513 #define PERFPOL_MANUAL 0
514 #define PERFPOL_AUTO 1
515 #define PERFPOL_HIGH 2
516 int perflevel = 100;
517 int perfpolicy = PERFPOL_AUTO;
518 
519 #ifndef SMALL_KERNEL
520 /*
521  * The code below handles CPU throttling.
522  */
523 #include <sys/sysctl.h>
524 
525 void setperf_auto(void *);
526 struct timeout setperf_to = TIMEOUT_INITIALIZER(setperf_auto, NULL);
527 extern int hw_power;
528 
529 void
530 setperf_auto(void *v)
531 {
532 	static uint64_t *idleticks, *totalticks;
533 	static int downbeats;
534 	int i, j = 0;
535 	int speedup = 0;
536 	CPU_INFO_ITERATOR cii;
537 	struct cpu_info *ci;
538 	uint64_t idle, total, allidle = 0, alltotal = 0;
539 
540 	if (perfpolicy != PERFPOL_AUTO)
541 		return;
542 
543 	if (cpu_setperf == NULL)
544 		return;
545 
546 	if (hw_power) {
547 		speedup = 1;
548 		goto faster;
549 	}
550 
551 	if (!idleticks)
552 		if (!(idleticks = mallocarray(ncpusfound, sizeof(*idleticks),
553 		    M_DEVBUF, M_NOWAIT | M_ZERO)))
554 			return;
555 	if (!totalticks)
556 		if (!(totalticks = mallocarray(ncpusfound, sizeof(*totalticks),
557 		    M_DEVBUF, M_NOWAIT | M_ZERO))) {
558 			free(idleticks, M_DEVBUF,
559 			    sizeof(*idleticks) * ncpusfound);
560 			return;
561 		}
562 	CPU_INFO_FOREACH(cii, ci) {
563 		if (!cpu_is_online(ci))
564 			continue;
565 		total = 0;
566 		for (i = 0; i < CPUSTATES; i++) {
567 			total += ci->ci_schedstate.spc_cp_time[i];
568 		}
569 		total -= totalticks[j];
570 		idle = ci->ci_schedstate.spc_cp_time[CP_IDLE] - idleticks[j];
571 		if (idle < total / 3)
572 			speedup = 1;
573 		alltotal += total;
574 		allidle += idle;
575 		idleticks[j] += idle;
576 		totalticks[j] += total;
577 		j++;
578 	}
579 	if (allidle < alltotal / 2)
580 		speedup = 1;
581 	if (speedup && downbeats < 5)
582 		downbeats++;
583 
584 	if (speedup && perflevel != 100) {
585 faster:
586 		perflevel = 100;
587 		cpu_setperf(perflevel);
588 	} else if (!speedup && perflevel != 0 && --downbeats <= 0) {
589 		perflevel = 0;
590 		cpu_setperf(perflevel);
591 	}
592 
593 	timeout_add_msec(&setperf_to, 100);
594 }
595 
596 int
597 sysctl_hwsetperf(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
598 {
599 	int err;
600 
601 	if (!cpu_setperf)
602 		return EOPNOTSUPP;
603 
604 	if (perfpolicy != PERFPOL_MANUAL)
605 		return sysctl_rdint(oldp, oldlenp, newp, perflevel);
606 
607 	err = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
608 	    &perflevel, 0, 100);
609 	if (err)
610 		return err;
611 
612 	if (newp != NULL)
613 		cpu_setperf(perflevel);
614 
615 	return 0;
616 }
617 
618 int
619 sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
620 {
621 	char policy[32];
622 	int err;
623 
624 	if (!cpu_setperf)
625 		return EOPNOTSUPP;
626 
627 	switch (perfpolicy) {
628 	case PERFPOL_MANUAL:
629 		strlcpy(policy, "manual", sizeof(policy));
630 		break;
631 	case PERFPOL_AUTO:
632 		strlcpy(policy, "auto", sizeof(policy));
633 		break;
634 	case PERFPOL_HIGH:
635 		strlcpy(policy, "high", sizeof(policy));
636 		break;
637 	default:
638 		strlcpy(policy, "unknown", sizeof(policy));
639 		break;
640 	}
641 
642 	if (newp == NULL)
643 		return sysctl_rdstring(oldp, oldlenp, newp, policy);
644 
645 	err = sysctl_string(oldp, oldlenp, newp, newlen, policy, sizeof(policy));
646 	if (err)
647 		return err;
648 	if (strcmp(policy, "manual") == 0)
649 		perfpolicy = PERFPOL_MANUAL;
650 	else if (strcmp(policy, "auto") == 0)
651 		perfpolicy = PERFPOL_AUTO;
652 	else if (strcmp(policy, "high") == 0)
653 		perfpolicy = PERFPOL_HIGH;
654 	else
655 		return EINVAL;
656 
657 	if (perfpolicy == PERFPOL_AUTO) {
658 		timeout_add_msec(&setperf_to, 200);
659 	} else if (perfpolicy == PERFPOL_HIGH) {
660 		perflevel = 100;
661 		cpu_setperf(perflevel);
662 	}
663 	return 0;
664 }
665 #endif
666 
667 void
668 scheduler_start(void)
669 {
670 	static struct timeout schedcpu_to;
671 
672 	/*
673 	 * We avoid polluting the global namespace by keeping the scheduler
674 	 * timeouts static in this function.
675 	 * We setup the timeout here and kick schedcpu once to make it do
676 	 * its job.
677 	 */
678 	timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
679 
680 	rrticks_init = hz / 10;
681 	schedcpu(&schedcpu_to);
682 
683 #ifndef SMALL_KERNEL
684 	if (perfpolicy == PERFPOL_AUTO)
685 		timeout_add_msec(&setperf_to, 200);
686 #endif
687 }
688 
689