xref: /netbsd-src/sys/kern/sched_4bsd.c (revision 0920b4f20b78ab1ccd9f2312fbe10deaf000cbf3)
1 /*	$NetBSD: sched_4bsd.c,v 1.4 2007/08/04 11:03:02 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10  * Daniel Sieger.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 /*-
42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
43  *	The Regents of the University of California.  All rights reserved.
44  * (c) UNIX System Laboratories, Inc.
45  * All or some portions of this file are derived from material licensed
46  * to the University of California by American Telephone and Telegraph
47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48  * the permission of UNIX System Laboratories, Inc.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
75  */
76 
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.4 2007/08/04 11:03:02 ad Exp $");
79 
80 #include "opt_ddb.h"
81 #include "opt_lockdebug.h"
82 #include "opt_perfctrs.h"
83 
84 #define	__MUTEX_PRIVATE
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/cpu.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/signalvar.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sysctl.h>
96 #include <sys/kauth.h>
97 #include <sys/lockdebug.h>
98 #include <sys/kmem.h>
99 
100 #include <uvm/uvm_extern.h>
101 
102 /*
103  * Run queues.
104  *
105  * We have 32 run queues in descending priority of 0..31.  We maintain
106  * a bitmask of non-empty queues in order speed up finding the first
107  * runnable process.  The bitmask is maintained only by machine-dependent
108  * code, allowing the most efficient instructions to be used to find the
109  * first non-empty queue.
110  */
111 
112 #define	RUNQUE_NQS		32      /* number of runqueues */
113 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
114 
115 typedef struct subqueue {
116 	TAILQ_HEAD(, lwp) sq_queue;
117 } subqueue_t;
118 typedef struct runqueue {
119 	subqueue_t rq_subqueues[RUNQUE_NQS];	/* run queues */
120 	uint32_t rq_bitmap;	/* bitmap of non-empty queues */
121 } runqueue_t;
122 static runqueue_t global_queue;
123 
124 static void updatepri(struct lwp *);
125 static void resetpriority(struct lwp *);
126 static void resetprocpriority(struct proc *);
127 
128 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
129 
130 /* The global scheduler state */
131 kmutex_t sched_mutex;
132 
133 /* Number of hardclock ticks per sched_tick() */
134 int rrticks;
135 
136 /*
137  * Force switch among equal priority processes every 100ms.
138  * Called from hardclock every hz/10 == rrticks hardclock ticks.
139  */
140 /* ARGSUSED */
141 void
142 sched_tick(struct cpu_info *ci)
143 {
144 	struct schedstate_percpu *spc = &ci->ci_schedstate;
145 
146 	spc->spc_ticks = rrticks;
147 
148 	spc_lock(ci);
149 	if (!CURCPU_IDLE_P()) {
150 		if (spc->spc_flags & SPCF_SEENRR) {
151 			/*
152 			 * The process has already been through a roundrobin
153 			 * without switching and may be hogging the CPU.
154 			 * Indicate that the process should yield.
155 			 */
156 			spc->spc_flags |= SPCF_SHOULDYIELD;
157 		} else
158 			spc->spc_flags |= SPCF_SEENRR;
159 	}
160 	cpu_need_resched(curcpu(), 0);
161 	spc_unlock(ci);
162 }
163 
164 #define	NICE_WEIGHT 2			/* priorities per nice level */
165 
166 #define	ESTCPU_SHIFT	11
167 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
168 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
169 
170 /*
171  * Constants for digital decay and forget:
172  *	90% of (p_estcpu) usage in 5 * loadav time
173  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
174  *          Note that, as ps(1) mentions, this can let percentages
175  *          total over 100% (I've seen 137.9% for 3 processes).
176  *
177  * Note that hardclock updates p_estcpu and p_cpticks independently.
178  *
179  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
180  * That is, the system wants to compute a value of decay such
181  * that the following for loop:
182  * 	for (i = 0; i < (5 * loadavg); i++)
183  * 		p_estcpu *= decay;
184  * will compute
185  * 	p_estcpu *= 0.1;
186  * for all values of loadavg:
187  *
188  * Mathematically this loop can be expressed by saying:
189  * 	decay ** (5 * loadavg) ~= .1
190  *
191  * The system computes decay as:
192  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
193  *
194  * We wish to prove that the system's computation of decay
195  * will always fulfill the equation:
196  * 	decay ** (5 * loadavg) ~= .1
197  *
198  * If we compute b as:
199  * 	b = 2 * loadavg
200  * then
201  * 	decay = b / (b + 1)
202  *
203  * We now need to prove two things:
204  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
205  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
206  *
207  * Facts:
208  *         For x close to zero, exp(x) =~ 1 + x, since
209  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
210  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
211  *         For x close to zero, ln(1+x) =~ x, since
212  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
213  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
214  *         ln(.1) =~ -2.30
215  *
216  * Proof of (1):
217  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
218  *	solving for factor,
219  *      ln(factor) =~ (-2.30/5*loadav), or
220  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
221  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
222  *
223  * Proof of (2):
224  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
225  *	solving for power,
226  *      power*ln(b/(b+1)) =~ -2.30, or
227  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
228  *
229  * Actual power values for the implemented algorithm are as follows:
230  *      loadav: 1       2       3       4
231  *      power:  5.68    10.32   14.94   19.55
232  */
233 
234 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
235 #define	loadfactor(loadav)	(2 * (loadav))
236 
237 static fixpt_t
238 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
239 {
240 
241 	if (estcpu == 0) {
242 		return 0;
243 	}
244 
245 #if !defined(_LP64)
246 	/* avoid 64bit arithmetics. */
247 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
248 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
249 		return estcpu * loadfac / (loadfac + FSCALE);
250 	}
251 #endif /* !defined(_LP64) */
252 
253 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
254 }
255 
256 /*
257  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
258  * sleeping for at least seven times the loadfactor will decay p_estcpu to
259  * less than (1 << ESTCPU_SHIFT).
260  *
261  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
262  */
263 static fixpt_t
264 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
265 {
266 
267 	if ((n << FSHIFT) >= 7 * loadfac) {
268 		return 0;
269 	}
270 
271 	while (estcpu != 0 && n > 1) {
272 		estcpu = decay_cpu(loadfac, estcpu);
273 		n--;
274 	}
275 
276 	return estcpu;
277 }
278 
279 /*
280  * sched_pstats_hook:
281  *
282  * Periodically called from sched_pstats(); used to recalculate priorities.
283  */
284 void
285 sched_pstats_hook(struct proc *p, int minslp)
286 {
287 	struct lwp *l;
288 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
289 
290 	/*
291 	 * If the process has slept the entire second,
292 	 * stop recalculating its priority until it wakes up.
293 	 */
294 	if (minslp <= 1) {
295 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
296 
297 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
298 			if ((l->l_flag & LW_IDLE) != 0)
299 				continue;
300 			lwp_lock(l);
301 			if (l->l_slptime <= 1 && l->l_priority >= PUSER)
302 				resetpriority(l);
303 			lwp_unlock(l);
304 		}
305 	}
306 }
307 
308 /*
309  * Recalculate the priority of a process after it has slept for a while.
310  */
311 static void
312 updatepri(struct lwp *l)
313 {
314 	struct proc *p = l->l_proc;
315 	fixpt_t loadfac;
316 
317 	KASSERT(lwp_locked(l, NULL));
318 	KASSERT(l->l_slptime > 1);
319 
320 	loadfac = loadfactor(averunnable.ldavg[0]);
321 
322 	l->l_slptime--; /* the first time was done in sched_pstats */
323 	/* XXX NJWLWP */
324 	/* XXXSMP occasionally unlocked, should be per-LWP */
325 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
326 	resetpriority(l);
327 }
328 
329 /*
330  * On some architectures, it's faster to use a MSB ordering for the priorites
331  * than the traditional LSB ordering.
332  */
333 #define	RQMASK(n) (0x00000001 << (n))
334 
335 /*
336  * The primitives that manipulate the run queues.  whichqs tells which
337  * of the 32 queues qs have processes in them.  sched_enqueue() puts processes
338  * into queues, sched_dequeue removes them from queues.  The running process is
339  * on no queue, other processes are on a queue related to p->p_priority,
340  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
341  * available queues.
342  */
343 #ifdef RQDEBUG
344 static void
345 runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
346 {
347 	const subqueue_t * const sq = &rq->rq_subqueues[whichq];
348 	const uint32_t bitmap = rq->rq_bitmap;
349 	struct lwp *l2;
350 	int found = 0;
351 	int die = 0;
352 	int empty = 1;
353 
354 	TAILQ_FOREACH(l2, &sq->sq_queue, l_runq) {
355 		if (l2->l_stat != LSRUN) {
356 			printf("runqueue_check[%d]: lwp %p state (%d) "
357 			    " != LSRUN\n", whichq, l2, l2->l_stat);
358 		}
359 		if (l2 == l)
360 			found = 1;
361 		empty = 0;
362 	}
363 	if (empty && (bitmap & RQMASK(whichq)) != 0) {
364 		printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
365 		    whichq, rq);
366 		die = 1;
367 	} else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
368 		printf("runqueue_check[%d]: bit clear for non-empty "
369 		    "run-queue %p\n", whichq, rq);
370 		die = 1;
371 	}
372 	if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
373 		printf("runqueue_check[%d]: bit clear for active lwp %p\n",
374 		    whichq, l);
375 		die = 1;
376 	}
377 	if (l != NULL && empty) {
378 		printf("runqueue_check[%d]: empty run-queue %p with "
379 		    "active lwp %p\n", whichq, rq, l);
380 		die = 1;
381 	}
382 	if (l != NULL && !found) {
383 		printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
384 		    whichq, l, rq);
385 		die = 1;
386 	}
387 	if (die)
388 		panic("runqueue_check: inconsistency found");
389 }
390 #else /* RQDEBUG */
391 #define	runqueue_check(a, b, c)	/* nothing */
392 #endif /* RQDEBUG */
393 
394 static void
395 runqueue_init(runqueue_t *rq)
396 {
397 	int i;
398 
399 	for (i = 0; i < RUNQUE_NQS; i++)
400 		TAILQ_INIT(&rq->rq_subqueues[i].sq_queue);
401 }
402 
403 static void
404 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
405 {
406 	subqueue_t *sq;
407 	const int whichq = lwp_eprio(l) / PPQ;
408 
409 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
410 
411 	runqueue_check(rq, whichq, NULL);
412 	rq->rq_bitmap |= RQMASK(whichq);
413 	sq = &rq->rq_subqueues[whichq];
414 	TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_runq);
415 	runqueue_check(rq, whichq, l);
416 }
417 
418 static void
419 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
420 {
421 	subqueue_t *sq;
422 	const int whichq = lwp_eprio(l) / PPQ;
423 
424 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
425 
426 	runqueue_check(rq, whichq, l);
427 	KASSERT((rq->rq_bitmap & RQMASK(whichq)) != 0);
428 	sq = &rq->rq_subqueues[whichq];
429 	TAILQ_REMOVE(&sq->sq_queue, l, l_runq);
430 	if (TAILQ_EMPTY(&sq->sq_queue))
431 		rq->rq_bitmap &= ~RQMASK(whichq);
432 	runqueue_check(rq, whichq, NULL);
433 }
434 
435 static struct lwp *
436 runqueue_nextlwp(runqueue_t *rq)
437 {
438 	const uint32_t bitmap = rq->rq_bitmap;
439 	int whichq;
440 
441 	if (bitmap == 0) {
442 		return NULL;
443 	}
444 	whichq = ffs(bitmap) - 1;
445 	return TAILQ_FIRST(&rq->rq_subqueues[whichq].sq_queue);
446 }
447 
448 #if defined(DDB)
449 static void
450 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
451 {
452 	const uint32_t bitmap = rq->rq_bitmap;
453 	struct lwp *l;
454 	int i, first;
455 
456 	for (i = 0; i < RUNQUE_NQS; i++) {
457 		const subqueue_t *sq;
458 		first = 1;
459 		sq = &rq->rq_subqueues[i];
460 		TAILQ_FOREACH(l, &sq->sq_queue, l_runq) {
461 			if (first) {
462 				(*pr)("%c%d",
463 				    (bitmap & RQMASK(i)) ? ' ' : '!', i);
464 				first = 0;
465 			}
466 			(*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
467 			    l->l_proc->p_pid,
468 			    l->l_lid, l->l_proc->p_comm,
469 			    (int)l->l_priority, (int)l->l_usrpri);
470 		}
471 	}
472 }
473 #endif /* defined(DDB) */
474 #undef RQMASK
475 
476 /*
477  * Initialize the (doubly-linked) run queues
478  * to be empty.
479  */
480 void
481 sched_rqinit()
482 {
483 
484 	runqueue_init(&global_queue);
485 	mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
486 	/* Initialize the lock pointer for lwp0 */
487 	lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
488 }
489 
490 void
491 sched_cpuattach(struct cpu_info *ci)
492 {
493 	runqueue_t *rq;
494 
495 	ci->ci_schedstate.spc_mutex = &sched_mutex;
496 	rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
497 	runqueue_init(rq);
498 	ci->ci_schedstate.spc_sched_info = rq;
499 }
500 
501 void
502 sched_setup()
503 {
504 
505 	rrticks = hz / 10;
506 }
507 
508 void
509 sched_setrunnable(struct lwp *l)
510 {
511 
512  	if (l->l_slptime > 1)
513  		updatepri(l);
514 }
515 
516 bool
517 sched_curcpu_runnable_p(void)
518 {
519 	struct schedstate_percpu *spc;
520 	runqueue_t *rq;
521 
522 	spc = &curcpu()->ci_schedstate;
523 	rq = spc->spc_sched_info;
524 
525 	if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
526 		return (global_queue.rq_bitmap | rq->rq_bitmap) != 0;
527 	return rq->rq_bitmap != 0;
528 }
529 
530 void
531 sched_nice(struct proc *chgp, int n)
532 {
533 
534 	chgp->p_nice = n;
535 	(void)resetprocpriority(chgp);
536 }
537 
538 /*
539  * Compute the priority of a process when running in user mode.
540  * Arrange to reschedule if the resulting priority is better
541  * than that of the current process.
542  */
543 static void
544 resetpriority(struct lwp *l)
545 {
546 	unsigned int newpriority;
547 	struct proc *p = l->l_proc;
548 
549 	/* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
550 	LOCK_ASSERT(lwp_locked(l, NULL));
551 
552 	if ((l->l_flag & LW_SYSTEM) != 0)
553 		return;
554 
555 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
556 	    NICE_WEIGHT * (p->p_nice - NZERO);
557 	newpriority = min(newpriority, MAXPRI);
558 	lwp_changepri(l, newpriority);
559 }
560 
561 /*
562  * Recompute priority for all LWPs in a process.
563  */
564 static void
565 resetprocpriority(struct proc *p)
566 {
567 	struct lwp *l;
568 
569 	KASSERT(mutex_owned(&p->p_stmutex));
570 
571 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
572 		lwp_lock(l);
573 		resetpriority(l);
574 		lwp_unlock(l);
575 	}
576 }
577 
578 /*
579  * We adjust the priority of the current process.  The priority of a process
580  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
581  * is increased here.  The formula for computing priorities (in kern_synch.c)
582  * will compute a different value each time p_estcpu increases. This can
583  * cause a switch, but unless the priority crosses a PPQ boundary the actual
584  * queue will not change.  The CPU usage estimator ramps up quite quickly
585  * when the process is running (linearly), and decays away exponentially, at
586  * a rate which is proportionally slower when the system is busy.  The basic
587  * principle is that the system will 90% forget that the process used a lot
588  * of CPU time in 5 * loadav seconds.  This causes the system to favor
589  * processes which haven't run much recently, and to round-robin among other
590  * processes.
591  */
592 
593 void
594 sched_schedclock(struct lwp *l)
595 {
596 	struct proc *p = l->l_proc;
597 
598 	KASSERT(!CURCPU_IDLE_P());
599 	mutex_spin_enter(&p->p_stmutex);
600 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
601 	lwp_lock(l);
602 	resetpriority(l);
603 	mutex_spin_exit(&p->p_stmutex);
604 	if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
605 		l->l_priority = l->l_usrpri;
606 	lwp_unlock(l);
607 }
608 
609 /*
610  * sched_proc_fork:
611  *
612  *	Inherit the parent's scheduler history.
613  */
614 void
615 sched_proc_fork(struct proc *parent, struct proc *child)
616 {
617 
618 	KASSERT(mutex_owned(&parent->p_smutex));
619 
620 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
621 	child->p_forktime = sched_pstats_ticks;
622 }
623 
624 /*
625  * sched_proc_exit:
626  *
627  *	Chargeback parents for the sins of their children.
628  */
629 void
630 sched_proc_exit(struct proc *parent, struct proc *child)
631 {
632 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
633 	fixpt_t estcpu;
634 
635 	/* XXX Only if parent != init?? */
636 
637 	mutex_spin_enter(&parent->p_stmutex);
638 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
639 	    sched_pstats_ticks - child->p_forktime);
640 	if (child->p_estcpu > estcpu)
641 		parent->p_estcpu =
642 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
643 	mutex_spin_exit(&parent->p_stmutex);
644 }
645 
646 void
647 sched_enqueue(struct lwp *l, bool ctxswitch)
648 {
649 
650 	if ((l->l_flag & LW_BOUND) != 0)
651 		runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
652 	else
653 		runqueue_enqueue(&global_queue, l);
654 }
655 
656 /*
657  * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
658  * drop of the effective priority level from kernel to user needs to be
659  * moved here from userret().  The assignment in userret() is currently
660  * done unlocked.
661  */
662 void
663 sched_dequeue(struct lwp *l)
664 {
665 
666 	if ((l->l_flag & LW_BOUND) != 0)
667 		runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
668 	else
669 		runqueue_dequeue(&global_queue, l);
670 }
671 
672 struct lwp *
673 sched_nextlwp(void)
674 {
675 	struct schedstate_percpu *spc;
676 	lwp_t *l1, *l2;
677 
678 	spc = &curcpu()->ci_schedstate;
679 
680 	/* For now, just pick the highest priority LWP. */
681 	l1 = runqueue_nextlwp(spc->spc_sched_info);
682 	if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0))
683 		return l1;
684 	l2 = runqueue_nextlwp(&global_queue);
685 
686 	if (l1 == NULL)
687 		return l2;
688 	if (l2 == NULL)
689 		return l1;
690 	if (lwp_eprio(l2) < lwp_eprio(l1))
691 		return l2;
692 	else
693 		return l1;
694 }
695 
696 /* Dummy */
697 void
698 sched_lwp_fork(struct lwp *l)
699 {
700 
701 }
702 
703 void
704 sched_lwp_exit(struct lwp *l)
705 {
706 
707 }
708 
709 /* SysCtl */
710 
711 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
712 {
713 	const struct sysctlnode *node = NULL;
714 
715 	sysctl_createv(clog, 0, NULL, NULL,
716 		CTLFLAG_PERMANENT,
717 		CTLTYPE_NODE, "kern", NULL,
718 		NULL, 0, NULL, 0,
719 		CTL_KERN, CTL_EOL);
720 	sysctl_createv(clog, 0, NULL, &node,
721 		CTLFLAG_PERMANENT,
722 		CTLTYPE_NODE, "sched",
723 		SYSCTL_DESCR("Scheduler options"),
724 		NULL, 0, NULL, 0,
725 		CTL_KERN, CTL_CREATE, CTL_EOL);
726 
727 	if (node != NULL) {
728 		sysctl_createv(clog, 0, &node, NULL,
729 			CTLFLAG_PERMANENT,
730 			CTLTYPE_STRING, "name", NULL,
731 			NULL, 0, __UNCONST("4.4BSD"), 0,
732 			CTL_CREATE, CTL_EOL);
733 	}
734 }
735 
736 #if defined(DDB)
737 void
738 sched_print_runqueue(void (*pr)(const char *, ...))
739 {
740 
741 	runqueue_print(&global_queue, pr);
742 }
743 #endif /* defined(DDB) */
744