xref: /netbsd-src/sys/kern/sched_4bsd.c (revision 2de962bd804263c16657f586aa00f1704045df8e)
1 /*	$NetBSD: sched_4bsd.c,v 1.22 2008/05/19 12:48:54 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10  * Daniel Sieger.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*-
35  * Copyright (c) 1982, 1986, 1990, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
68  */
69 
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.22 2008/05/19 12:48:54 rmind Exp $");
72 
73 #include "opt_ddb.h"
74 #include "opt_lockdebug.h"
75 #include "opt_perfctrs.h"
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/callout.h>
80 #include <sys/cpu.h>
81 #include <sys/proc.h>
82 #include <sys/kernel.h>
83 #include <sys/signalvar.h>
84 #include <sys/resourcevar.h>
85 #include <sys/sched.h>
86 #include <sys/sysctl.h>
87 #include <sys/kauth.h>
88 #include <sys/lockdebug.h>
89 #include <sys/kmem.h>
90 #include <sys/intr.h>
91 
92 #include <uvm/uvm_extern.h>
93 
94 static void updatepri(struct lwp *);
95 static void resetpriority(struct lwp *);
96 
97 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
98 
99 /* Number of hardclock ticks per sched_tick() */
100 static int rrticks;
101 
102 /*
103  * Force switch among equal priority processes every 100ms.
104  * Called from hardclock every hz/10 == rrticks hardclock ticks.
105  *
106  * There's no need to lock anywhere in this routine, as it's
107  * CPU-local and runs at IPL_SCHED (called from clock interrupt).
108  */
109 /* ARGSUSED */
110 void
111 sched_tick(struct cpu_info *ci)
112 {
113 	struct schedstate_percpu *spc = &ci->ci_schedstate;
114 
115 	spc->spc_ticks = rrticks;
116 
117 	if (CURCPU_IDLE_P()) {
118 		cpu_need_resched(ci, 0);
119 		return;
120 	}
121 	if (curlwp->l_class == SCHED_FIFO) {
122 		return;
123 	}
124 	if (spc->spc_flags & SPCF_SEENRR) {
125 		/*
126 		 * The process has already been through a roundrobin
127 		 * without switching and may be hogging the CPU.
128 		 * Indicate that the process should yield.
129 		 */
130 		spc->spc_flags |= SPCF_SHOULDYIELD;
131 		cpu_need_resched(ci, 0);
132 	} else
133 		spc->spc_flags |= SPCF_SEENRR;
134 }
135 
136 /*
137  * Why PRIO_MAX - 2? From setpriority(2):
138  *
139  *	prio is a value in the range -20 to 20.  The default priority is
140  *	0; lower priorities cause more favorable scheduling.  A value of
141  *	19 or 20 will schedule a process only when nothing at priority <=
142  *	0 is runnable.
143  *
144  * This gives estcpu influence over 18 priority levels, and leaves nice
145  * with 40 levels.  One way to think about it is that nice has 20 levels
146  * either side of estcpu's 18.
147  */
148 #define	ESTCPU_SHIFT	11
149 #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
150 #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
151 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
152 
153 /*
154  * Constants for digital decay and forget:
155  *	90% of (l_estcpu) usage in 5 * loadav time
156  *	95% of (l_pctcpu) usage in 60 seconds (load insensitive)
157  *          Note that, as ps(1) mentions, this can let percentages
158  *          total over 100% (I've seen 137.9% for 3 processes).
159  *
160  * Note that hardclock updates l_estcpu and l_cpticks independently.
161  *
162  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds.
163  * That is, the system wants to compute a value of decay such
164  * that the following for loop:
165  * 	for (i = 0; i < (5 * loadavg); i++)
166  * 		l_estcpu *= decay;
167  * will compute
168  * 	l_estcpu *= 0.1;
169  * for all values of loadavg:
170  *
171  * Mathematically this loop can be expressed by saying:
172  * 	decay ** (5 * loadavg) ~= .1
173  *
174  * The system computes decay as:
175  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
176  *
177  * We wish to prove that the system's computation of decay
178  * will always fulfill the equation:
179  * 	decay ** (5 * loadavg) ~= .1
180  *
181  * If we compute b as:
182  * 	b = 2 * loadavg
183  * then
184  * 	decay = b / (b + 1)
185  *
186  * We now need to prove two things:
187  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
188  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
189  *
190  * Facts:
191  *         For x close to zero, exp(x) =~ 1 + x, since
192  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
193  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
194  *         For x close to zero, ln(1+x) =~ x, since
195  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
196  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
197  *         ln(.1) =~ -2.30
198  *
199  * Proof of (1):
200  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
201  *	solving for factor,
202  *      ln(factor) =~ (-2.30/5*loadav), or
203  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
204  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
205  *
206  * Proof of (2):
207  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
208  *	solving for power,
209  *      power*ln(b/(b+1)) =~ -2.30, or
210  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
211  *
212  * Actual power values for the implemented algorithm are as follows:
213  *      loadav: 1       2       3       4
214  *      power:  5.68    10.32   14.94   19.55
215  */
216 
217 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
218 #define	loadfactor(loadav)	(2 * (loadav))
219 
220 static fixpt_t
221 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
222 {
223 
224 	if (estcpu == 0) {
225 		return 0;
226 	}
227 
228 #if !defined(_LP64)
229 	/* avoid 64bit arithmetics. */
230 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
231 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
232 		return estcpu * loadfac / (loadfac + FSCALE);
233 	}
234 #endif /* !defined(_LP64) */
235 
236 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
237 }
238 
239 /*
240  * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
241  * sleeping for at least seven times the loadfactor will decay l_estcpu to
242  * less than (1 << ESTCPU_SHIFT).
243  *
244  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
245  */
246 static fixpt_t
247 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
248 {
249 
250 	if ((n << FSHIFT) >= 7 * loadfac) {
251 		return 0;
252 	}
253 
254 	while (estcpu != 0 && n > 1) {
255 		estcpu = decay_cpu(loadfac, estcpu);
256 		n--;
257 	}
258 
259 	return estcpu;
260 }
261 
262 /*
263  * sched_pstats_hook:
264  *
265  * Periodically called from sched_pstats(); used to recalculate priorities.
266  */
267 void
268 sched_pstats_hook(struct lwp *l, int batch)
269 {
270 	fixpt_t loadfac;
271 	int sleeptm;
272 
273 	/*
274 	 * If the LWP has slept an entire second, stop recalculating
275 	 * its priority until it wakes up.
276 	 */
277 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
278 	    l->l_stat == LSSUSPENDED) {
279 		l->l_slptime++;
280 		sleeptm = 1;
281 	} else {
282 		sleeptm = 0x7fffffff;
283 	}
284 
285 	if (l->l_slptime <= sleeptm) {
286 		loadfac = 2 * (averunnable.ldavg[0]);
287 		l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
288 		resetpriority(l);
289 	}
290 }
291 
292 /*
293  * Recalculate the priority of a process after it has slept for a while.
294  */
295 static void
296 updatepri(struct lwp *l)
297 {
298 	fixpt_t loadfac;
299 
300 	KASSERT(lwp_locked(l, NULL));
301 	KASSERT(l->l_slptime > 1);
302 
303 	loadfac = loadfactor(averunnable.ldavg[0]);
304 
305 	l->l_slptime--; /* the first time was done in sched_pstats */
306 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
307 	resetpriority(l);
308 }
309 
310 void
311 sched_rqinit(void)
312 {
313 
314 }
315 
316 void
317 sched_setrunnable(struct lwp *l)
318 {
319 
320  	if (l->l_slptime > 1)
321  		updatepri(l);
322 }
323 
324 void
325 sched_nice(struct proc *p, int n)
326 {
327 	struct lwp *l;
328 
329 	KASSERT(mutex_owned(p->p_lock));
330 
331 	p->p_nice = n;
332 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
333 		lwp_lock(l);
334 		resetpriority(l);
335 		lwp_unlock(l);
336 	}
337 }
338 
339 /*
340  * Recompute the priority of an LWP.  Arrange to reschedule if
341  * the resulting priority is better than that of the current LWP.
342  */
343 static void
344 resetpriority(struct lwp *l)
345 {
346 	pri_t pri;
347 	struct proc *p = l->l_proc;
348 
349 	KASSERT(lwp_locked(l, NULL));
350 
351 	if (l->l_class != SCHED_OTHER)
352 		return;
353 
354 	/* See comments above ESTCPU_SHIFT definition. */
355 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
356 	pri = imax(pri, 0);
357 	if (pri != l->l_priority)
358 		lwp_changepri(l, pri);
359 }
360 
361 /*
362  * We adjust the priority of the current process.  The priority of a process
363  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
364  * is increased here.  The formula for computing priorities (in kern_synch.c)
365  * will compute a different value each time l_estcpu increases. This can
366  * cause a switch, but unless the priority crosses a PPQ boundary the actual
367  * queue will not change.  The CPU usage estimator ramps up quite quickly
368  * when the process is running (linearly), and decays away exponentially, at
369  * a rate which is proportionally slower when the system is busy.  The basic
370  * principle is that the system will 90% forget that the process used a lot
371  * of CPU time in 5 * loadav seconds.  This causes the system to favor
372  * processes which haven't run much recently, and to round-robin among other
373  * processes.
374  */
375 
376 void
377 sched_schedclock(struct lwp *l)
378 {
379 
380 	if (l->l_class != SCHED_OTHER)
381 		return;
382 
383 	KASSERT(!CURCPU_IDLE_P());
384 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
385 	lwp_lock(l);
386 	resetpriority(l);
387 	lwp_unlock(l);
388 }
389 
390 /*
391  * sched_proc_fork:
392  *
393  *	Inherit the parent's scheduler history.
394  */
395 void
396 sched_proc_fork(struct proc *parent, struct proc *child)
397 {
398 	lwp_t *pl;
399 
400 	KASSERT(mutex_owned(parent->p_lock));
401 
402 	pl = LIST_FIRST(&parent->p_lwps);
403 	child->p_estcpu_inherited = pl->l_estcpu;
404 	child->p_forktime = sched_pstats_ticks;
405 }
406 
407 /*
408  * sched_proc_exit:
409  *
410  *	Chargeback parents for the sins of their children.
411  */
412 void
413 sched_proc_exit(struct proc *parent, struct proc *child)
414 {
415 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
416 	fixpt_t estcpu;
417 	lwp_t *pl, *cl;
418 
419 	/* XXX Only if parent != init?? */
420 
421 	mutex_enter(parent->p_lock);
422 	pl = LIST_FIRST(&parent->p_lwps);
423 	cl = LIST_FIRST(&child->p_lwps);
424 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
425 	    sched_pstats_ticks - child->p_forktime);
426 	if (cl->l_estcpu > estcpu) {
427 		lwp_lock(pl);
428 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
429 		lwp_unlock(pl);
430 	}
431 	mutex_exit(parent->p_lock);
432 }
433 
434 void
435 sched_wakeup(struct lwp *l)
436 {
437 
438 }
439 
440 void
441 sched_slept(struct lwp *l)
442 {
443 
444 }
445 
446 void
447 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
448 {
449 
450 	l2->l_estcpu = l1->l_estcpu;
451 }
452 
453 void
454 sched_lwp_exit(struct lwp *l)
455 {
456 
457 }
458 
459 void
460 sched_lwp_collect(struct lwp *t)
461 {
462 	lwp_t *l;
463 
464 	/* Absorb estcpu value of collected LWP. */
465 	l = curlwp;
466 	lwp_lock(l);
467 	l->l_estcpu += t->l_estcpu;
468 	lwp_unlock(l);
469 }
470 
471 void
472 sched_oncpu(lwp_t *l)
473 {
474 
475 }
476 
477 void
478 sched_newts(lwp_t *l)
479 {
480 
481 }
482 
483 /*
484  * Sysctl nodes and initialization.
485  */
486 
487 static int
488 sysctl_sched_rtts(SYSCTLFN_ARGS)
489 {
490 	struct sysctlnode node;
491 	int rttsms = hztoms(rrticks);
492 
493 	node = *rnode;
494 	node.sysctl_data = &rttsms;
495 	return sysctl_lookup(SYSCTLFN_CALL(&node));
496 }
497 
498 SYSCTL_SETUP(sysctl_sched_4bsd_setup, "sysctl sched setup")
499 {
500 	const struct sysctlnode *node = NULL;
501 
502 	sysctl_createv(clog, 0, NULL, NULL,
503 		CTLFLAG_PERMANENT,
504 		CTLTYPE_NODE, "kern", NULL,
505 		NULL, 0, NULL, 0,
506 		CTL_KERN, CTL_EOL);
507 	sysctl_createv(clog, 0, NULL, &node,
508 		CTLFLAG_PERMANENT,
509 		CTLTYPE_NODE, "sched",
510 		SYSCTL_DESCR("Scheduler options"),
511 		NULL, 0, NULL, 0,
512 		CTL_KERN, CTL_CREATE, CTL_EOL);
513 
514 	if (node == NULL)
515 		return;
516 
517 	rrticks = hz / 10;
518 
519 	sysctl_createv(NULL, 0, &node, NULL,
520 		CTLFLAG_PERMANENT,
521 		CTLTYPE_STRING, "name", NULL,
522 		NULL, 0, __UNCONST("4.4BSD"), 0,
523 		CTL_CREATE, CTL_EOL);
524 	sysctl_createv(NULL, 0, &node, NULL,
525 		CTLFLAG_PERMANENT,
526 		CTLTYPE_INT, "rtts",
527 		SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
528 		sysctl_sched_rtts, 0, NULL, 0,
529 		CTL_CREATE, CTL_EOL);
530 }
531