xref: /openbsd-src/sys/kern/sched_bsd.c (revision 02d561d09200565aadf0aa2290278a365803be27)
1 /*	$OpenBSD: sched_bsd.c,v 1.82 2023/08/18 09:18:52 claudio Exp $	*/
2 /*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/clockintr.h>
43 #include <sys/proc.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/resourcevar.h>
47 #include <uvm/uvm_extern.h>
48 #include <sys/sched.h>
49 #include <sys/timeout.h>
50 #include <sys/smr.h>
51 #include <sys/tracepoint.h>
52 
53 #ifdef KTRACE
54 #include <sys/ktrace.h>
55 #endif
56 
57 uint32_t roundrobin_period;	/* [I] roundrobin period (ns) */
58 int	lbolt;			/* once a second sleep address */
59 
60 #ifdef MULTIPROCESSOR
61 struct __mp_lock sched_lock;
62 #endif
63 
64 void			update_loadavg(void *);
65 void			schedcpu(void *);
66 uint32_t		decay_aftersleep(uint32_t, uint32_t);
67 
68 extern struct cpuset sched_idle_cpus;
69 
70 /*
71  * constants for averages over 1, 5, and 15 minutes when sampling at
72  * 5 second intervals.
73  */
74 static const fixpt_t cexp[3] = {
75 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
76 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
77 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
78 };
79 
80 struct loadavg averunnable;
81 
82 /*
83  * Force switch among equal priority processes every 100ms.
84  */
85 void
86 roundrobin(struct clockintr *cl, void *cf)
87 {
88 	uint64_t count;
89 	struct cpu_info *ci = curcpu();
90 	struct schedstate_percpu *spc = &ci->ci_schedstate;
91 
92 	count = clockintr_advance(cl, roundrobin_period);
93 
94 	if (ci->ci_curproc != NULL) {
95 		if (spc->spc_schedflags & SPCF_SEENRR || count >= 2) {
96 			/*
97 			 * The process has already been through a roundrobin
98 			 * without switching and may be hogging the CPU.
99 			 * Indicate that the process should yield.
100 			 */
101 			atomic_setbits_int(&spc->spc_schedflags,
102 			    SPCF_SEENRR | SPCF_SHOULDYIELD);
103 		} else {
104 			atomic_setbits_int(&spc->spc_schedflags,
105 			    SPCF_SEENRR);
106 		}
107 	}
108 
109 	if (spc->spc_nrun)
110 		need_resched(ci);
111 }
112 
113 
114 
115 /*
116  * update_loadav: compute a tenex style load average of a quantity on
117  * 1, 5, and 15 minute intervals.
118  */
119 void
120 update_loadavg(void *arg)
121 {
122 	struct timeout *to = (struct timeout *)arg;
123 	CPU_INFO_ITERATOR cii;
124 	struct cpu_info *ci;
125 	u_int i, nrun = 0;
126 
127 	CPU_INFO_FOREACH(cii, ci) {
128 		if (!cpuset_isset(&sched_idle_cpus, ci))
129 			nrun++;
130 		nrun += ci->ci_schedstate.spc_nrun;
131 	}
132 
133 	for (i = 0; i < 3; i++) {
134 		averunnable.ldavg[i] = (cexp[i] * averunnable.ldavg[i] +
135 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
136 	}
137 
138 	timeout_add_sec(to, 5);
139 }
140 
141 /*
142  * Constants for digital decay and forget:
143  *	90% of (p_estcpu) usage in 5 * loadav time
144  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
145  *          Note that, as ps(1) mentions, this can let percentages
146  *          total over 100% (I've seen 137.9% for 3 processes).
147  *
148  * Note that hardclock updates p_estcpu and p_cpticks independently.
149  *
150  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
151  * That is, the system wants to compute a value of decay such
152  * that the following for loop:
153  * 	for (i = 0; i < (5 * loadavg); i++)
154  * 		p_estcpu *= decay;
155  * will compute
156  * 	p_estcpu *= 0.1;
157  * for all values of loadavg:
158  *
159  * Mathematically this loop can be expressed by saying:
160  * 	decay ** (5 * loadavg) ~= .1
161  *
162  * The system computes decay as:
163  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
164  *
165  * We wish to prove that the system's computation of decay
166  * will always fulfill the equation:
167  * 	decay ** (5 * loadavg) ~= .1
168  *
169  * If we compute b as:
170  * 	b = 2 * loadavg
171  * then
172  * 	decay = b / (b + 1)
173  *
174  * We now need to prove two things:
175  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
176  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
177  *
178  * Facts:
179  *         For x close to zero, exp(x) =~ 1 + x, since
180  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
181  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
182  *         For x close to zero, ln(1+x) =~ x, since
183  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
184  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
185  *         ln(.1) =~ -2.30
186  *
187  * Proof of (1):
188  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
189  *	solving for factor,
190  *      ln(factor) =~ (-2.30/5*loadav), or
191  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
192  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
193  *
194  * Proof of (2):
195  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
196  *	solving for power,
197  *      power*ln(b/(b+1)) =~ -2.30, or
198  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
199  *
200  * Actual power values for the implemented algorithm are as follows:
201  *      loadav: 1       2       3       4
202  *      power:  5.68    10.32   14.94   19.55
203  */
204 
205 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
206 #define	loadfactor(loadav)	(2 * (loadav))
207 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
208 
209 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
210 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
211 
212 /*
213  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
214  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
215  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
216  *
217  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
218  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
219  *
220  * If you don't want to bother with the faster/more-accurate formula, you
221  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
222  * (more general) method of calculating the %age of CPU used by a process.
223  */
224 #define	CCPU_SHIFT	11
225 
226 /*
227  * Recompute process priorities, every second.
228  */
229 void
230 schedcpu(void *arg)
231 {
232 	struct timeout *to = (struct timeout *)arg;
233 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
234 	struct proc *p;
235 	int s;
236 	unsigned int newcpu;
237 
238 	LIST_FOREACH(p, &allproc, p_list) {
239 		/*
240 		 * Idle threads are never placed on the runqueue,
241 		 * therefore computing their priority is pointless.
242 		 */
243 		if (p->p_cpu != NULL &&
244 		    p->p_cpu->ci_schedstate.spc_idleproc == p)
245 			continue;
246 		/*
247 		 * Increment sleep time (if sleeping). We ignore overflow.
248 		 */
249 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
250 			p->p_slptime++;
251 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
252 		/*
253 		 * If the process has slept the entire second,
254 		 * stop recalculating its priority until it wakes up.
255 		 */
256 		if (p->p_slptime > 1)
257 			continue;
258 		SCHED_LOCK(s);
259 		/*
260 		 * p_pctcpu is only for diagnostic tools such as ps.
261 		 */
262 #if	(FSHIFT >= CCPU_SHIFT)
263 		p->p_pctcpu += (stathz == 100)?
264 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
265                 	100 * (((fixpt_t) p->p_cpticks)
266 				<< (FSHIFT - CCPU_SHIFT)) / stathz;
267 #else
268 		p->p_pctcpu += ((FSCALE - ccpu) *
269 			(p->p_cpticks * FSCALE / stathz)) >> FSHIFT;
270 #endif
271 		p->p_cpticks = 0;
272 		newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu);
273 		setpriority(p, newcpu, p->p_p->ps_nice);
274 
275 		if (p->p_stat == SRUN &&
276 		    (p->p_runpri / SCHED_PPQ) != (p->p_usrpri / SCHED_PPQ)) {
277 			remrunqueue(p);
278 			setrunqueue(p->p_cpu, p, p->p_usrpri);
279 		}
280 		SCHED_UNLOCK(s);
281 	}
282 	wakeup(&lbolt);
283 	timeout_add_sec(to, 1);
284 }
285 
286 /*
287  * Recalculate the priority of a process after it has slept for a while.
288  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
289  * least six times the loadfactor will decay p_estcpu to zero.
290  */
291 uint32_t
292 decay_aftersleep(uint32_t estcpu, uint32_t slptime)
293 {
294 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
295 	uint32_t newcpu;
296 
297 	if (slptime > 5 * loadfac)
298 		newcpu = 0;
299 	else {
300 		newcpu = estcpu;
301 		slptime--;	/* the first time was done in schedcpu */
302 		while (newcpu && --slptime)
303 			newcpu = decay_cpu(loadfac, newcpu);
304 
305 	}
306 
307 	return (newcpu);
308 }
309 
310 /*
311  * General yield call.  Puts the current process back on its run queue and
312  * performs a voluntary context switch.
313  */
314 void
315 yield(void)
316 {
317 	struct proc *p = curproc;
318 	int s;
319 
320 	SCHED_LOCK(s);
321 	setrunqueue(p->p_cpu, p, p->p_usrpri);
322 	p->p_ru.ru_nvcsw++;
323 	mi_switch();
324 	SCHED_UNLOCK(s);
325 }
326 
327 /*
328  * General preemption call.  Puts the current process back on its run queue
329  * and performs an involuntary context switch.  If a process is supplied,
330  * we switch to that process.  Otherwise, we use the normal process selection
331  * criteria.
332  */
333 void
334 preempt(void)
335 {
336 	struct proc *p = curproc;
337 	int s;
338 
339 	SCHED_LOCK(s);
340 	setrunqueue(p->p_cpu, p, p->p_usrpri);
341 	p->p_ru.ru_nivcsw++;
342 	mi_switch();
343 	SCHED_UNLOCK(s);
344 }
345 
346 void
347 mi_switch(void)
348 {
349 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
350 	struct proc *p = curproc;
351 	struct proc *nextproc;
352 	struct process *pr = p->p_p;
353 	struct timespec ts;
354 #ifdef MULTIPROCESSOR
355 	int hold_count;
356 	int sched_count;
357 #endif
358 
359 	assertwaitok();
360 	KASSERT(p->p_stat != SONPROC);
361 
362 	SCHED_ASSERT_LOCKED();
363 
364 #ifdef MULTIPROCESSOR
365 	/*
366 	 * Release the kernel_lock, as we are about to yield the CPU.
367 	 */
368 	sched_count = __mp_release_all_but_one(&sched_lock);
369 	if (_kernel_lock_held())
370 		hold_count = __mp_release_all(&kernel_lock);
371 	else
372 		hold_count = 0;
373 #endif
374 
375 	/*
376 	 * Compute the amount of time during which the current
377 	 * process was running, and add that to its total so far.
378 	 */
379 	nanouptime(&ts);
380 	if (timespeccmp(&ts, &spc->spc_runtime, <)) {
381 #if 0
382 		printf("uptime is not monotonic! "
383 		    "ts=%lld.%09lu, runtime=%lld.%09lu\n",
384 		    (long long)tv.tv_sec, tv.tv_nsec,
385 		    (long long)spc->spc_runtime.tv_sec,
386 		    spc->spc_runtime.tv_nsec);
387 #endif
388 	} else {
389 		timespecsub(&ts, &spc->spc_runtime, &ts);
390 		timespecadd(&p->p_rtime, &ts, &p->p_rtime);
391 	}
392 
393 	/* add the time counts for this thread to the process's total */
394 	tuagg_unlocked(pr, p);
395 
396 	/* Stop any optional clock interrupts. */
397 	if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
398 		atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER);
399 		clockintr_cancel(spc->spc_itimer);
400 	}
401 	if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
402 		atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
403 		clockintr_cancel(spc->spc_profclock);
404 	}
405 
406 	/*
407 	 * Process is about to yield the CPU; clear the appropriate
408 	 * scheduling flags.
409 	 */
410 	atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR);
411 
412 	nextproc = sched_chooseproc();
413 
414 	if (p != nextproc) {
415 		uvmexp.swtch++;
416 		TRACEPOINT(sched, off__cpu, nextproc->p_tid + THREAD_PID_OFFSET,
417 		    nextproc->p_p->ps_pid);
418 		cpu_switchto(p, nextproc);
419 		TRACEPOINT(sched, on__cpu, NULL);
420 	} else {
421 		TRACEPOINT(sched, remain__cpu, NULL);
422 		p->p_stat = SONPROC;
423 	}
424 
425 	clear_resched(curcpu());
426 
427 	SCHED_ASSERT_LOCKED();
428 
429 	/*
430 	 * To preserve lock ordering, we need to release the sched lock
431 	 * and grab it after we grab the big lock.
432 	 * In the future, when the sched lock isn't recursive, we'll
433 	 * just release it here.
434 	 */
435 #ifdef MULTIPROCESSOR
436 	__mp_unlock(&sched_lock);
437 #endif
438 
439 	SCHED_ASSERT_UNLOCKED();
440 
441 	smr_idle();
442 
443 	/*
444 	 * We're running again; record our new start time.  We might
445 	 * be running on a new CPU now, so don't use the cache'd
446 	 * schedstate_percpu pointer.
447 	 */
448 	KASSERT(p->p_cpu == curcpu());
449 
450 	/* Start any optional clock interrupts needed by the thread. */
451 	if (ISSET(p->p_p->ps_flags, PS_ITIMER)) {
452 		atomic_setbits_int(&p->p_cpu->ci_schedstate.spc_schedflags,
453 		    SPCF_ITIMER);
454 		clockintr_advance(p->p_cpu->ci_schedstate.spc_itimer,
455 		    hardclock_period);
456 	}
457 	if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
458 		atomic_setbits_int(&p->p_cpu->ci_schedstate.spc_schedflags,
459 		    SPCF_PROFCLOCK);
460 		clockintr_advance(p->p_cpu->ci_schedstate.spc_profclock,
461 		    profclock_period);
462 	}
463 
464 	nanouptime(&p->p_cpu->ci_schedstate.spc_runtime);
465 
466 #ifdef MULTIPROCESSOR
467 	/*
468 	 * Reacquire the kernel_lock now.  We do this after we've
469 	 * released the scheduler lock to avoid deadlock, and before
470 	 * we reacquire the interlock and the scheduler lock.
471 	 */
472 	if (hold_count)
473 		__mp_acquire_count(&kernel_lock, hold_count);
474 	__mp_acquire_count(&sched_lock, sched_count + 1);
475 #endif
476 }
477 
478 /*
479  * Change process state to be runnable,
480  * placing it on the run queue.
481  */
482 void
483 setrunnable(struct proc *p)
484 {
485 	struct process *pr = p->p_p;
486 	u_char prio;
487 
488 	SCHED_ASSERT_LOCKED();
489 
490 	switch (p->p_stat) {
491 	case 0:
492 	case SRUN:
493 	case SONPROC:
494 	case SDEAD:
495 	case SIDL:
496 	default:
497 		panic("setrunnable");
498 	case SSTOP:
499 		/*
500 		 * If we're being traced (possibly because someone attached us
501 		 * while we were stopped), check for a signal from the debugger.
502 		 */
503 		if ((pr->ps_flags & PS_TRACED) != 0 && pr->ps_xsig != 0)
504 			atomic_setbits_int(&p->p_siglist, sigmask(pr->ps_xsig));
505 		prio = p->p_usrpri;
506 		unsleep(p);
507 		setrunqueue(NULL, p, prio);
508 		break;
509 	case SSLEEP:
510 		prio = p->p_slppri;
511 		unsleep(p);		/* e.g. when sending signals */
512 
513 		/* if not yet asleep, don't add to runqueue */
514 		if (ISSET(p->p_flag, P_WSLEEP))
515 			return;
516 		setrunqueue(NULL, p, prio);
517 		TRACEPOINT(sched, wakeup, p->p_tid + THREAD_PID_OFFSET,
518 		    p->p_p->ps_pid, CPU_INFO_UNIT(p->p_cpu));
519 		break;
520 	}
521 	if (p->p_slptime > 1) {
522 		uint32_t newcpu;
523 
524 		newcpu = decay_aftersleep(p->p_estcpu, p->p_slptime);
525 		setpriority(p, newcpu, pr->ps_nice);
526 	}
527 	p->p_slptime = 0;
528 }
529 
530 /*
531  * Compute the priority of a process.
532  */
533 void
534 setpriority(struct proc *p, uint32_t newcpu, uint8_t nice)
535 {
536 	unsigned int newprio;
537 
538 	newprio = min((PUSER + newcpu + NICE_WEIGHT * (nice - NZERO)), MAXPRI);
539 
540 	SCHED_ASSERT_LOCKED();
541 	p->p_estcpu = newcpu;
542 	p->p_usrpri = newprio;
543 }
544 
545 /*
546  * We adjust the priority of the current process.  The priority of a process
547  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
548  * is increased here.  The formula for computing priorities (in kern_synch.c)
549  * will compute a different value each time p_estcpu increases. This can
550  * cause a switch, but unless the priority crosses a PPQ boundary the actual
551  * queue will not change.  The cpu usage estimator ramps up quite quickly
552  * when the process is running (linearly), and decays away exponentially, at
553  * a rate which is proportionally slower when the system is busy.  The basic
554  * principle is that the system will 90% forget that the process used a lot
555  * of CPU time in 5 * loadav seconds.  This causes the system to favor
556  * processes which haven't run much recently, and to round-robin among other
557  * processes.
558  */
559 void
560 schedclock(struct proc *p)
561 {
562 	struct cpu_info *ci = curcpu();
563 	struct schedstate_percpu *spc = &ci->ci_schedstate;
564 	uint32_t newcpu;
565 	int s;
566 
567 	if (p == spc->spc_idleproc || spc->spc_spinning)
568 		return;
569 
570 	SCHED_LOCK(s);
571 	newcpu = ESTCPULIM(p->p_estcpu + 1);
572 	setpriority(p, newcpu, p->p_p->ps_nice);
573 	SCHED_UNLOCK(s);
574 }
575 
576 void (*cpu_setperf)(int);
577 
578 #define PERFPOL_MANUAL 0
579 #define PERFPOL_AUTO 1
580 #define PERFPOL_HIGH 2
581 int perflevel = 100;
582 int perfpolicy = PERFPOL_AUTO;
583 
584 #ifndef SMALL_KERNEL
585 /*
586  * The code below handles CPU throttling.
587  */
588 #include <sys/sysctl.h>
589 
590 void setperf_auto(void *);
591 struct timeout setperf_to = TIMEOUT_INITIALIZER(setperf_auto, NULL);
592 extern int hw_power;
593 
594 void
595 setperf_auto(void *v)
596 {
597 	static uint64_t *idleticks, *totalticks;
598 	static int downbeats;
599 	int i, j = 0;
600 	int speedup = 0;
601 	CPU_INFO_ITERATOR cii;
602 	struct cpu_info *ci;
603 	uint64_t idle, total, allidle = 0, alltotal = 0;
604 
605 	if (perfpolicy != PERFPOL_AUTO)
606 		return;
607 
608 	if (cpu_setperf == NULL)
609 		return;
610 
611 	if (hw_power) {
612 		speedup = 1;
613 		goto faster;
614 	}
615 
616 	if (!idleticks)
617 		if (!(idleticks = mallocarray(ncpusfound, sizeof(*idleticks),
618 		    M_DEVBUF, M_NOWAIT | M_ZERO)))
619 			return;
620 	if (!totalticks)
621 		if (!(totalticks = mallocarray(ncpusfound, sizeof(*totalticks),
622 		    M_DEVBUF, M_NOWAIT | M_ZERO))) {
623 			free(idleticks, M_DEVBUF,
624 			    sizeof(*idleticks) * ncpusfound);
625 			return;
626 		}
627 	CPU_INFO_FOREACH(cii, ci) {
628 		if (!cpu_is_online(ci))
629 			continue;
630 		total = 0;
631 		for (i = 0; i < CPUSTATES; i++) {
632 			total += ci->ci_schedstate.spc_cp_time[i];
633 		}
634 		total -= totalticks[j];
635 		idle = ci->ci_schedstate.spc_cp_time[CP_IDLE] - idleticks[j];
636 		if (idle < total / 3)
637 			speedup = 1;
638 		alltotal += total;
639 		allidle += idle;
640 		idleticks[j] += idle;
641 		totalticks[j] += total;
642 		j++;
643 	}
644 	if (allidle < alltotal / 2)
645 		speedup = 1;
646 	if (speedup && downbeats < 5)
647 		downbeats++;
648 
649 	if (speedup && perflevel != 100) {
650 faster:
651 		perflevel = 100;
652 		cpu_setperf(perflevel);
653 	} else if (!speedup && perflevel != 0 && --downbeats <= 0) {
654 		perflevel = 0;
655 		cpu_setperf(perflevel);
656 	}
657 
658 	timeout_add_msec(&setperf_to, 100);
659 }
660 
661 int
662 sysctl_hwsetperf(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
663 {
664 	int err;
665 
666 	if (!cpu_setperf)
667 		return EOPNOTSUPP;
668 
669 	if (perfpolicy != PERFPOL_MANUAL)
670 		return sysctl_rdint(oldp, oldlenp, newp, perflevel);
671 
672 	err = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
673 	    &perflevel, 0, 100);
674 	if (err)
675 		return err;
676 
677 	if (newp != NULL)
678 		cpu_setperf(perflevel);
679 
680 	return 0;
681 }
682 
683 int
684 sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
685 {
686 	char policy[32];
687 	int err;
688 
689 	if (!cpu_setperf)
690 		return EOPNOTSUPP;
691 
692 	switch (perfpolicy) {
693 	case PERFPOL_MANUAL:
694 		strlcpy(policy, "manual", sizeof(policy));
695 		break;
696 	case PERFPOL_AUTO:
697 		strlcpy(policy, "auto", sizeof(policy));
698 		break;
699 	case PERFPOL_HIGH:
700 		strlcpy(policy, "high", sizeof(policy));
701 		break;
702 	default:
703 		strlcpy(policy, "unknown", sizeof(policy));
704 		break;
705 	}
706 
707 	if (newp == NULL)
708 		return sysctl_rdstring(oldp, oldlenp, newp, policy);
709 
710 	err = sysctl_string(oldp, oldlenp, newp, newlen, policy, sizeof(policy));
711 	if (err)
712 		return err;
713 	if (strcmp(policy, "manual") == 0)
714 		perfpolicy = PERFPOL_MANUAL;
715 	else if (strcmp(policy, "auto") == 0)
716 		perfpolicy = PERFPOL_AUTO;
717 	else if (strcmp(policy, "high") == 0)
718 		perfpolicy = PERFPOL_HIGH;
719 	else
720 		return EINVAL;
721 
722 	if (perfpolicy == PERFPOL_AUTO) {
723 		timeout_add_msec(&setperf_to, 200);
724 	} else if (perfpolicy == PERFPOL_HIGH) {
725 		perflevel = 100;
726 		cpu_setperf(perflevel);
727 	}
728 	return 0;
729 }
730 #endif
731 
732 void
733 scheduler_start(void)
734 {
735 	static struct timeout schedcpu_to;
736 	static struct timeout loadavg_to;
737 
738 	/*
739 	 * We avoid polluting the global namespace by keeping the scheduler
740 	 * timeouts static in this function.
741 	 * We setup the timeout here and kick schedcpu once to make it do
742 	 * its job.
743 	 */
744 	timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
745 	timeout_set(&loadavg_to, update_loadavg, &loadavg_to);
746 
747 	schedcpu(&schedcpu_to);
748 	update_loadavg(&loadavg_to);
749 
750 #ifndef SMALL_KERNEL
751 	if (perfpolicy == PERFPOL_AUTO)
752 		timeout_add_msec(&setperf_to, 200);
753 #endif
754 }
755 
756