xref: /netbsd-src/sys/kern/sched_4bsd.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /*	$NetBSD: sched_4bsd.c,v 1.35 2018/09/03 16:29:35 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10  * Daniel Sieger.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1982, 1986, 1990, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
68  */
69 
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.35 2018/09/03 16:29:35 riastradh Exp $");
72 
73 #include "opt_ddb.h"
74 #include "opt_lockdebug.h"
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/callout.h>
79 #include <sys/cpu.h>
80 #include <sys/proc.h>
81 #include <sys/kernel.h>
82 #include <sys/resourcevar.h>
83 #include <sys/sched.h>
84 #include <sys/sysctl.h>
85 #include <sys/lockdebug.h>
86 #include <sys/intr.h>
87 
88 static void updatepri(struct lwp *);
89 static void resetpriority(struct lwp *);
90 
91 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
92 
93 /* Number of hardclock ticks per sched_tick() */
94 static int rrticks __read_mostly;
95 
96 /*
97  * Force switch among equal priority processes every 100ms.
98  * Called from hardclock every hz/10 == rrticks hardclock ticks.
99  *
100  * There's no need to lock anywhere in this routine, as it's
101  * CPU-local and runs at IPL_SCHED (called from clock interrupt).
102  */
103 /* ARGSUSED */
104 void
105 sched_tick(struct cpu_info *ci)
106 {
107 	struct schedstate_percpu *spc = &ci->ci_schedstate;
108 	lwp_t *l;
109 
110 	spc->spc_ticks = rrticks;
111 
112 	if (CURCPU_IDLE_P()) {
113 		cpu_need_resched(ci, 0);
114 		return;
115 	}
116 	l = ci->ci_data.cpu_onproc;
117 	if (l == NULL) {
118 		return;
119 	}
120 	switch (l->l_class) {
121 	case SCHED_FIFO:
122 		/* No timeslicing for FIFO jobs. */
123 		break;
124 	case SCHED_RR:
125 		/* Force it into mi_switch() to look for other jobs to run. */
126 		cpu_need_resched(ci, RESCHED_KPREEMPT);
127 		break;
128 	default:
129 		if (spc->spc_flags & SPCF_SHOULDYIELD) {
130 			/*
131 			 * Process is stuck in kernel somewhere, probably
132 			 * due to buggy or inefficient code.  Force a
133 			 * kernel preemption.
134 			 */
135 			cpu_need_resched(ci, RESCHED_KPREEMPT);
136 		} else if (spc->spc_flags & SPCF_SEENRR) {
137 			/*
138 			 * The process has already been through a roundrobin
139 			 * without switching and may be hogging the CPU.
140 			 * Indicate that the process should yield.
141 			 */
142 			spc->spc_flags |= SPCF_SHOULDYIELD;
143 			cpu_need_resched(ci, 0);
144 		} else {
145 			spc->spc_flags |= SPCF_SEENRR;
146 		}
147 		break;
148 	}
149 }
150 
151 /*
152  * Why PRIO_MAX - 2? From setpriority(2):
153  *
154  *	prio is a value in the range -20 to 20.  The default priority is
155  *	0; lower priorities cause more favorable scheduling.  A value of
156  *	19 or 20 will schedule a process only when nothing at priority <=
157  *	0 is runnable.
158  *
159  * This gives estcpu influence over 18 priority levels, and leaves nice
160  * with 40 levels.  One way to think about it is that nice has 20 levels
161  * either side of estcpu's 18.
162  */
163 #define	ESTCPU_SHIFT	11
164 #define	ESTCPU_MAX	((PRIO_MAX - 2) << ESTCPU_SHIFT)
165 #define	ESTCPU_ACCUM	(1 << (ESTCPU_SHIFT - 1))
166 #define	ESTCPULIM(e)	uimin((e), ESTCPU_MAX)
167 
168 /*
169  * The main parameter used by this algorithm is 'l_estcpu'. It is an estimate
170  * of the recent CPU utilization of the thread.
171  *
172  * l_estcpu is:
173  *  - increased each time the hardclock ticks and the thread is found to
174  *    be executing, in sched_schedclock() called from hardclock()
175  *  - decreased (filtered) on each sched tick, in sched_pstats_hook()
176  * If the lwp is sleeping for more than a second, we don't touch l_estcpu: it
177  * will be updated in sched_setrunnable() when the lwp wakes up, in burst mode
178  * (ie, we decrease it n times).
179  *
180  * Note that hardclock updates l_estcpu and l_cpticks independently.
181  *
182  * -----------------------------------------------------------------------------
183  *
184  * Here we describe how l_estcpu is decreased.
185  *
186  * Constants for digital decay (filter):
187  *     90% of l_estcpu usage in (5 * loadavg) seconds
188  *
189  * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds. That is, we
190  * want to compute a value of decay such that the following loop:
191  *     for (i = 0; i < (5 * loadavg); i++)
192  *         l_estcpu *= decay;
193  * will result in
194  *     l_estcpu *= 0.1;
195  * for all values of loadavg.
196  *
197  * Mathematically this loop can be expressed by saying:
198  *     decay ** (5 * loadavg) ~= .1
199  *
200  * And finally, the corresponding value of decay we're using is:
201  *     decay = (2 * loadavg) / (2 * loadavg + 1)
202  *
203  * -----------------------------------------------------------------------------
204  *
205  * Now, let's prove that the value of decay stated above will always fulfill
206  * the equation:
207  *     decay ** (5 * loadavg) ~= .1
208  *
209  * If we compute b as:
210  *     b = 2 * loadavg
211  * then
212  *     decay = b / (b + 1)
213  *
214  * We now need to prove two things:
215  *     1) Given [factor ** (5 * loadavg) =~ .1], prove [factor == b/(b+1)].
216  *     2) Given [b/(b+1) ** power =~ .1], prove [power == (5 * loadavg)].
217  *
218  * Facts:
219  *   * For x real: exp(x) = 0! + x**1/1! + x**2/2! + ...
220  *     Therefore, for x close to zero, exp(x) =~ 1 + x.
221  *     In turn, for b large enough, exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
222  *
223  *   * For b large enough, (b-1)/b =~ b/(b+1).
224  *
225  *   * For x belonging to [-1;1[, ln(1-x) = - x - x**2/2 - x**3/3 - ...
226  *     Therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
227  *
228  *   * ln(0.1) =~ -2.30
229  *
230  * Proof of (1):
231  *     factor ** (5 * loadavg) =~ 0.1
232  *  => ln(factor) =~ -2.30 / (5 * loadavg)
233  *  => factor =~ exp(-1 / ((5 / 2.30) * loadavg))
234  *            =~ exp(-1 / (2 * loadavg))
235  *            =~ exp(-1 / b)
236  *            =~ (b - 1) / b
237  *            =~ b / (b + 1)
238  *            =~ (2 * loadavg) / ((2 * loadavg) + 1)
239  *
240  * Proof of (2):
241  *     (b / (b + 1)) ** power =~ .1
242  *  => power * ln(b / (b + 1)) =~ -2.30
243  *  => power * (-1 / (b + 1)) =~ -2.30
244  *  => power =~ 2.30 * (b + 1)
245  *  => power =~ 4.60 * loadavg + 2.30
246  *  => power =~ 5 * loadavg
247  *
248  * Conclusion: decay = (2 * loadavg) / (2 * loadavg + 1)
249  */
250 
251 /* See calculations above */
252 #define	loadfactor(loadavg)  (2 * (loadavg))
253 
254 static fixpt_t
255 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
256 {
257 
258 	if (estcpu == 0) {
259 		return 0;
260 	}
261 
262 #if !defined(_LP64)
263 	/* avoid 64bit arithmetics. */
264 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
265 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
266 		return estcpu * loadfac / (loadfac + FSCALE);
267 	}
268 #endif
269 
270 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
271 }
272 
273 static fixpt_t
274 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
275 {
276 
277 	/*
278 	 * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
279 	 * if we slept for at least seven times the loadfactor, we will decay
280 	 * l_estcpu to less than (1 << ESTCPU_SHIFT), and therefore we can
281 	 * return zero directly.
282 	 *
283 	 * Note that our ESTCPU_MAX is actually much smaller than
284 	 * (255 << ESTCPU_SHIFT).
285 	 */
286 	if ((n << FSHIFT) >= 7 * loadfac) {
287 		return 0;
288 	}
289 
290 	while (estcpu != 0 && n > 1) {
291 		estcpu = decay_cpu(loadfac, estcpu);
292 		n--;
293 	}
294 
295 	return estcpu;
296 }
297 
298 /*
299  * sched_pstats_hook:
300  *
301  * Periodically called from sched_pstats(); used to recalculate priorities.
302  */
303 void
304 sched_pstats_hook(struct lwp *l, int batch)
305 {
306 	fixpt_t loadfac;
307 
308 	/*
309 	 * If the LWP has slept an entire second, stop recalculating
310 	 * its priority until it wakes up.
311 	 */
312 	KASSERT(lwp_locked(l, NULL));
313 	if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
314 	    l->l_stat == LSSUSPENDED) {
315 		if (l->l_slptime > 1) {
316 			return;
317 		}
318 	}
319 
320 	loadfac = loadfactor(averunnable.ldavg[0]);
321 	l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
322 	resetpriority(l);
323 }
324 
325 /*
326  * Recalculate the priority of an LWP after it has slept for a while.
327  */
328 static void
329 updatepri(struct lwp *l)
330 {
331 	fixpt_t loadfac;
332 
333 	KASSERT(lwp_locked(l, NULL));
334 	KASSERT(l->l_slptime > 1);
335 
336 	loadfac = loadfactor(averunnable.ldavg[0]);
337 
338 	l->l_slptime--; /* the first time was done in sched_pstats */
339 	l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
340 	resetpriority(l);
341 }
342 
343 void
344 sched_rqinit(void)
345 {
346 
347 }
348 
349 void
350 sched_setrunnable(struct lwp *l)
351 {
352 
353  	if (l->l_slptime > 1)
354  		updatepri(l);
355 }
356 
357 void
358 sched_nice(struct proc *p, int n)
359 {
360 	struct lwp *l;
361 
362 	KASSERT(mutex_owned(p->p_lock));
363 
364 	p->p_nice = n;
365 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
366 		lwp_lock(l);
367 		resetpriority(l);
368 		lwp_unlock(l);
369 	}
370 }
371 
372 /*
373  * Recompute the priority of an LWP.  Arrange to reschedule if
374  * the resulting priority is better than that of the current LWP.
375  */
376 static void
377 resetpriority(struct lwp *l)
378 {
379 	pri_t pri;
380 	struct proc *p = l->l_proc;
381 
382 	KASSERT(lwp_locked(l, NULL));
383 
384 	if (l->l_class != SCHED_OTHER)
385 		return;
386 
387 	/* See comments above ESTCPU_SHIFT definition. */
388 	pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
389 	pri = imax(pri, 0);
390 	if (pri != l->l_priority)
391 		lwp_changepri(l, pri);
392 }
393 
394 /*
395  * We adjust the priority of the current LWP.  The priority of a LWP
396  * gets worse as it accumulates CPU time.  The CPU usage estimator (l_estcpu)
397  * is increased here.  The formula for computing priorities will compute a
398  * different value each time l_estcpu increases. This can cause a switch,
399  * but unless the priority crosses a PPQ boundary the actual queue will not
400  * change.  The CPU usage estimator ramps up quite quickly when the process
401  * is running (linearly), and decays away exponentially, at a rate which is
402  * proportionally slower when the system is busy.  The basic principle is
403  * that the system will 90% forget that the process used a lot of CPU time
404  * in (5 * loadavg) seconds.  This causes the system to favor processes which
405  * haven't run much recently, and to round-robin among other processes.
406  */
407 void
408 sched_schedclock(struct lwp *l)
409 {
410 
411 	if (l->l_class != SCHED_OTHER)
412 		return;
413 
414 	KASSERT(!CURCPU_IDLE_P());
415 	l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
416 	lwp_lock(l);
417 	resetpriority(l);
418 	lwp_unlock(l);
419 }
420 
421 /*
422  * sched_proc_fork:
423  *
424  *	Inherit the parent's scheduler history.
425  */
426 void
427 sched_proc_fork(struct proc *parent, struct proc *child)
428 {
429 	lwp_t *pl;
430 
431 	KASSERT(mutex_owned(parent->p_lock));
432 
433 	pl = LIST_FIRST(&parent->p_lwps);
434 	child->p_estcpu_inherited = pl->l_estcpu;
435 	child->p_forktime = sched_pstats_ticks;
436 }
437 
438 /*
439  * sched_proc_exit:
440  *
441  *	Chargeback parents for the sins of their children.
442  */
443 void
444 sched_proc_exit(struct proc *parent, struct proc *child)
445 {
446 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
447 	fixpt_t estcpu;
448 	lwp_t *pl, *cl;
449 
450 	/* XXX Only if parent != init?? */
451 
452 	mutex_enter(parent->p_lock);
453 	pl = LIST_FIRST(&parent->p_lwps);
454 	cl = LIST_FIRST(&child->p_lwps);
455 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
456 	    sched_pstats_ticks - child->p_forktime);
457 	if (cl->l_estcpu > estcpu) {
458 		lwp_lock(pl);
459 		pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
460 		lwp_unlock(pl);
461 	}
462 	mutex_exit(parent->p_lock);
463 }
464 
465 void
466 sched_wakeup(struct lwp *l)
467 {
468 
469 }
470 
471 void
472 sched_slept(struct lwp *l)
473 {
474 
475 }
476 
477 void
478 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
479 {
480 
481 	l2->l_estcpu = l1->l_estcpu;
482 }
483 
484 void
485 sched_lwp_collect(struct lwp *t)
486 {
487 	lwp_t *l;
488 
489 	/* Absorb estcpu value of collected LWP. */
490 	l = curlwp;
491 	lwp_lock(l);
492 	l->l_estcpu += t->l_estcpu;
493 	lwp_unlock(l);
494 }
495 
496 void
497 sched_oncpu(lwp_t *l)
498 {
499 
500 }
501 
502 void
503 sched_newts(lwp_t *l)
504 {
505 
506 }
507 
508 /*
509  * Sysctl nodes and initialization.
510  */
511 
512 static int
513 sysctl_sched_rtts(SYSCTLFN_ARGS)
514 {
515 	struct sysctlnode node;
516 	int rttsms = hztoms(rrticks);
517 
518 	node = *rnode;
519 	node.sysctl_data = &rttsms;
520 	return sysctl_lookup(SYSCTLFN_CALL(&node));
521 }
522 
523 SYSCTL_SETUP(sysctl_sched_4bsd_setup, "sysctl sched setup")
524 {
525 	const struct sysctlnode *node = NULL;
526 
527 	sysctl_createv(clog, 0, NULL, &node,
528 		CTLFLAG_PERMANENT,
529 		CTLTYPE_NODE, "sched",
530 		SYSCTL_DESCR("Scheduler options"),
531 		NULL, 0, NULL, 0,
532 		CTL_KERN, CTL_CREATE, CTL_EOL);
533 
534 	if (node == NULL)
535 		return;
536 
537 	rrticks = hz / 10;
538 
539 	sysctl_createv(NULL, 0, &node, NULL,
540 		CTLFLAG_PERMANENT,
541 		CTLTYPE_STRING, "name", NULL,
542 		NULL, 0, __UNCONST("4.4BSD"), 0,
543 		CTL_CREATE, CTL_EOL);
544 	sysctl_createv(NULL, 0, &node, NULL,
545 		CTLFLAG_PERMANENT,
546 		CTLTYPE_INT, "rtts",
547 		SYSCTL_DESCR("Round-robin time quantum (in milliseconds)"),
548 		sysctl_sched_rtts, 0, NULL, 0,
549 		CTL_CREATE, CTL_EOL);
550 }
551